aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/at91/clk-programmable.c4
-rw-r--r--drivers/clk/bcm/clk-kona.c4
-rw-r--r--drivers/clk/clk-composite.c9
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk.c42
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c72
-rw-r--r--drivers/clk/mmp/Makefile7
-rw-r--r--drivers/clk/mmp/clk-frac.c74
-rw-r--r--drivers/clk/mmp/clk-gate.c133
-rw-r--r--drivers/clk/mmp/clk-mix.c513
-rw-r--r--drivers/clk/mmp/clk-mmp2.c6
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c334
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c279
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c301
-rw-r--r--drivers/clk/mmp/clk-pxa168.c6
-rw-r--r--drivers/clk/mmp/clk-pxa910.c6
-rw-r--r--drivers/clk/mmp/clk.c192
-rw-r--r--drivers/clk/mmp/clk.h226
-rw-r--r--drivers/clk/mmp/reset.c99
-rw-r--r--drivers/clk/mmp/reset.h31
-rw-r--r--drivers/clk/pxa/Makefile1
-rw-r--r--drivers/clk/pxa/clk-pxa.c45
-rw-r--r--drivers/clk/pxa/clk-pxa.h9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c273
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c9
-rw-r--r--drivers/clk/qcom/clk-pll.c2
-rw-r--r--drivers/clk/qcom/clk-rcg.c20
-rw-r--r--drivers/clk/qcom/clk-rcg2.c28
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c154
-rw-r--r--drivers/clk/rockchip/clk-pll.c81
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c79
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c246
-rw-r--r--drivers/clk/rockchip/clk.c20
-rw-r--r--drivers/clk/rockchip/clk.h45
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c33
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c1144
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c185
-rw-r--r--drivers/clk/samsung/clk-exynos7.c743
-rw-r--r--drivers/clk/samsung/clk-pll.c25
-rw-r--r--drivers/clk/samsung/clk-pll.h4
-rw-r--r--drivers/clk/samsung/clk.c102
-rw-r--r--drivers/clk/samsung/clk.h43
-rw-r--r--drivers/clk/shmobile/clk-div6.c113
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c7
-rw-r--r--drivers/clk/sunxi/clk-factors.c6
-rw-r--r--drivers/clk/sunxi/clk-factors.h3
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c271
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c85
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c328
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c112
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1599
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h80
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/macintosh/Kconfig10
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/therm_pm72.c2278
-rw-r--r--drivers/macintosh/therm_pm72.h326
-rw-r--r--drivers/scsi/53c700.c41
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_debug.c62
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c26
-rw-r--r--drivers/target/loopback/tcm_loop.c71
-rw-r--r--drivers/target/loopback/tcm_loop.h7
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c344
-rw-r--r--drivers/target/target_core_device.c90
-rw-r--r--drivers/target/target_core_file.c42
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_internal.h28
-rw-r--r--drivers/target/target_core_pr.c125
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c41
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_user.c42
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c10
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
134 files changed, 7367 insertions, 4923 deletions
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 62e2509f9df1..bbdb1b985c91 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -57,7 +57,7 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
57static long clk_programmable_determine_rate(struct clk_hw *hw, 57static long clk_programmable_determine_rate(struct clk_hw *hw,
58 unsigned long rate, 58 unsigned long rate,
59 unsigned long *best_parent_rate, 59 unsigned long *best_parent_rate,
60 struct clk **best_parent_clk) 60 struct clk_hw **best_parent_hw)
61{ 61{
62 struct clk *parent = NULL; 62 struct clk *parent = NULL;
63 long best_rate = -EINVAL; 63 long best_rate = -EINVAL;
@@ -84,7 +84,7 @@ static long clk_programmable_determine_rate(struct clk_hw *hw,
84 if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { 84 if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
85 best_rate = tmp_rate; 85 best_rate = tmp_rate;
86 *best_parent_rate = parent_rate; 86 *best_parent_rate = parent_rate;
87 *best_parent_clk = parent; 87 *best_parent_hw = __clk_get_hw(parent);
88 } 88 }
89 89
90 if (!best_rate) 90 if (!best_rate)
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 95af2e665dd3..1c06f6f3a8c5 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,7 +1032,7 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1032} 1032}
1033 1033
1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1035 unsigned long *best_parent_rate, struct clk **best_parent) 1035 unsigned long *best_parent_rate, struct clk_hw **best_parent)
1036{ 1036{
1037 struct kona_clk *bcm_clk = to_kona_clk(hw); 1037 struct kona_clk *bcm_clk = to_kona_clk(hw);
1038 struct clk *clk = hw->clk; 1038 struct clk *clk = hw->clk;
@@ -1075,7 +1075,7 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1075 if (delta < best_delta) { 1075 if (delta < best_delta) {
1076 best_delta = delta; 1076 best_delta = delta;
1077 best_rate = other_rate; 1077 best_rate = other_rate;
1078 *best_parent = parent; 1078 *best_parent = __clk_get_hw(parent);
1079 *best_parent_rate = parent_rate; 1079 *best_parent_rate = parent_rate;
1080 } 1080 }
1081 } 1081 }
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index b9355daf8065..4386697236a7 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -57,7 +57,7 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
57 57
58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, 58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long *best_parent_rate, 59 unsigned long *best_parent_rate,
60 struct clk **best_parent_p) 60 struct clk_hw **best_parent_p)
61{ 61{
62 struct clk_composite *composite = to_clk_composite(hw); 62 struct clk_composite *composite = to_clk_composite(hw);
63 const struct clk_ops *rate_ops = composite->rate_ops; 63 const struct clk_ops *rate_ops = composite->rate_ops;
@@ -80,8 +80,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
80 *best_parent_p = NULL; 80 *best_parent_p = NULL;
81 81
82 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { 82 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
83 *best_parent_p = clk_get_parent(mux_hw->clk); 83 parent = clk_get_parent(mux_hw->clk);
84 *best_parent_rate = __clk_get_rate(*best_parent_p); 84 *best_parent_p = __clk_get_hw(parent);
85 *best_parent_rate = __clk_get_rate(parent);
85 86
86 return rate_ops->round_rate(rate_hw, rate, 87 return rate_ops->round_rate(rate_hw, rate,
87 best_parent_rate); 88 best_parent_rate);
@@ -103,7 +104,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
103 104
104 if (!rate_diff || !*best_parent_p 105 if (!rate_diff || !*best_parent_p
105 || best_rate_diff > rate_diff) { 106 || best_rate_diff > rate_diff) {
106 *best_parent_p = parent; 107 *best_parent_p = __clk_get_hw(parent);
107 *best_parent_rate = parent_rate; 108 *best_parent_rate = parent_rate;
108 best_rate_diff = rate_diff; 109 best_rate_diff = rate_diff;
109 best_rate = tmp_rate; 110 best_rate = tmp_rate;
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 4f96ff3ba728..6e1ecf94bf58 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -77,7 +77,7 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
77 77
78 else { 78 else {
79 if (mux->flags & CLK_MUX_INDEX_BIT) 79 if (mux->flags & CLK_MUX_INDEX_BIT)
80 index = (1 << ffs(index)); 80 index = 1 << index;
81 81
82 if (mux->flags & CLK_MUX_INDEX_ONE) 82 if (mux->flags & CLK_MUX_INDEX_ONE)
83 index++; 83 index++;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 87a41038237d..bfa1e64e267d 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -218,7 +218,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
218 default: 218 default:
219 dev_err(&pdev->dev, "Invalid device type\n"); 219 dev_err(&pdev->dev, "Invalid device type\n");
220 return -EINVAL; 220 return -EINVAL;
221 }; 221 }
222 222
223 /* Store clocks of_node in first element of s2mps11_clks array */ 223 /* Store clocks of_node in first element of s2mps11_clks array */
224 s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init); 224 s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4896ae9e23da..f4963b7d4e17 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
240 .release = single_release, 240 .release = single_release,
241}; 241};
242 242
243/* caller must hold prepare_lock */
244static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 243static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
245{ 244{
246 struct dentry *d; 245 struct dentry *d;
@@ -354,13 +353,13 @@ out:
354 mutex_unlock(&clk_debug_lock); 353 mutex_unlock(&clk_debug_lock);
355} 354}
356 355
357struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, 356struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
358 void *data, const struct file_operations *fops) 357 void *data, const struct file_operations *fops)
359{ 358{
360 struct dentry *d = NULL; 359 struct dentry *d = NULL;
361 360
362 if (clk->dentry) 361 if (hw->clk->dentry)
363 d = debugfs_create_file(name, mode, clk->dentry, data, fops); 362 d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
364 363
365 return d; 364 return d;
366} 365}
@@ -574,11 +573,6 @@ unsigned int __clk_get_enable_count(struct clk *clk)
574 return !clk ? 0 : clk->enable_count; 573 return !clk ? 0 : clk->enable_count;
575} 574}
576 575
577unsigned int __clk_get_prepare_count(struct clk *clk)
578{
579 return !clk ? 0 : clk->prepare_count;
580}
581
582unsigned long __clk_get_rate(struct clk *clk) 576unsigned long __clk_get_rate(struct clk *clk)
583{ 577{
584 unsigned long ret; 578 unsigned long ret;
@@ -601,7 +595,7 @@ out:
601} 595}
602EXPORT_SYMBOL_GPL(__clk_get_rate); 596EXPORT_SYMBOL_GPL(__clk_get_rate);
603 597
604unsigned long __clk_get_accuracy(struct clk *clk) 598static unsigned long __clk_get_accuracy(struct clk *clk)
605{ 599{
606 if (!clk) 600 if (!clk)
607 return 0; 601 return 0;
@@ -707,7 +701,7 @@ struct clk *__clk_lookup(const char *name)
707 */ 701 */
708long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 702long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
709 unsigned long *best_parent_rate, 703 unsigned long *best_parent_rate,
710 struct clk **best_parent_p) 704 struct clk_hw **best_parent_p)
711{ 705{
712 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 706 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
713 int i, num_parents; 707 int i, num_parents;
@@ -743,7 +737,7 @@ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
743 737
744out: 738out:
745 if (best_parent) 739 if (best_parent)
746 *best_parent_p = best_parent; 740 *best_parent_p = best_parent->hw;
747 *best_parent_rate = best; 741 *best_parent_rate = best;
748 742
749 return best; 743 return best;
@@ -951,6 +945,7 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
951{ 945{
952 unsigned long parent_rate = 0; 946 unsigned long parent_rate = 0;
953 struct clk *parent; 947 struct clk *parent;
948 struct clk_hw *parent_hw;
954 949
955 if (!clk) 950 if (!clk)
956 return 0; 951 return 0;
@@ -959,10 +954,11 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
959 if (parent) 954 if (parent)
960 parent_rate = parent->rate; 955 parent_rate = parent->rate;
961 956
962 if (clk->ops->determine_rate) 957 if (clk->ops->determine_rate) {
958 parent_hw = parent ? parent->hw : NULL;
963 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 959 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
964 &parent); 960 &parent_hw);
965 else if (clk->ops->round_rate) 961 } else if (clk->ops->round_rate)
966 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 962 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
967 else if (clk->flags & CLK_SET_RATE_PARENT) 963 else if (clk->flags & CLK_SET_RATE_PARENT)
968 return __clk_round_rate(clk->parent, rate); 964 return __clk_round_rate(clk->parent, rate);
@@ -1350,6 +1346,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1350{ 1346{
1351 struct clk *top = clk; 1347 struct clk *top = clk;
1352 struct clk *old_parent, *parent; 1348 struct clk *old_parent, *parent;
1349 struct clk_hw *parent_hw;
1353 unsigned long best_parent_rate = 0; 1350 unsigned long best_parent_rate = 0;
1354 unsigned long new_rate; 1351 unsigned long new_rate;
1355 int p_index = 0; 1352 int p_index = 0;
@@ -1365,9 +1362,11 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1365 1362
1366 /* find the closest rate and parent clk/rate */ 1363 /* find the closest rate and parent clk/rate */
1367 if (clk->ops->determine_rate) { 1364 if (clk->ops->determine_rate) {
1365 parent_hw = parent ? parent->hw : NULL;
1368 new_rate = clk->ops->determine_rate(clk->hw, rate, 1366 new_rate = clk->ops->determine_rate(clk->hw, rate,
1369 &best_parent_rate, 1367 &best_parent_rate,
1370 &parent); 1368 &parent_hw);
1369 parent = parent_hw->clk;
1371 } else if (clk->ops->round_rate) { 1370 } else if (clk->ops->round_rate) {
1372 new_rate = clk->ops->round_rate(clk->hw, rate, 1371 new_rate = clk->ops->round_rate(clk->hw, rate,
1373 &best_parent_rate); 1372 &best_parent_rate);
@@ -1614,7 +1613,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
1614 1613
1615 if (clk->num_parents == 1) { 1614 if (clk->num_parents == 1) {
1616 if (IS_ERR_OR_NULL(clk->parent)) 1615 if (IS_ERR_OR_NULL(clk->parent))
1617 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 1616 clk->parent = __clk_lookup(clk->parent_names[0]);
1618 ret = clk->parent; 1617 ret = clk->parent;
1619 goto out; 1618 goto out;
1620 } 1619 }
@@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
1944 else 1943 else
1945 clk->rate = 0; 1944 clk->rate = 0;
1946 1945
1947 clk_debug_register(clk);
1948 /* 1946 /*
1949 * walk the list of orphan clocks and reparent any that are children of 1947 * walk the list of orphan clocks and reparent any that are children of
1950 * this clock 1948 * this clock
@@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
1979out: 1977out:
1980 clk_prepare_unlock(); 1978 clk_prepare_unlock();
1981 1979
1980 if (!ret)
1981 clk_debug_register(clk);
1982
1982 return ret; 1983 return ret;
1983} 1984}
1984 1985
@@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
2273 2274
2274void __clk_put(struct clk *clk) 2275void __clk_put(struct clk *clk)
2275{ 2276{
2277 struct module *owner;
2278
2276 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2279 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2277 return; 2280 return;
2278 2281
2279 clk_prepare_lock(); 2282 clk_prepare_lock();
2283 owner = clk->owner;
2280 kref_put(&clk->ref, __clk_release); 2284 kref_put(&clk->ref, __clk_release);
2281 clk_prepare_unlock(); 2285 clk_prepare_unlock();
2282 2286
2283 module_put(clk->owner); 2287 module_put(owner);
2284} 2288}
2285 2289
2286/*** clk rate change notifiers ***/ 2290/*** clk rate change notifiers ***/
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 339945d2503b..007144f81f50 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -38,44 +38,44 @@
38#include "clk.h" 38#include "clk.h"
39 39
40/* clock parent list */ 40/* clock parent list */
41static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", }; 41static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
42static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", }; 42static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
43static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", }; 43static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
44static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", }; 44static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
45static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", }; 45static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
46static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", }; 46static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
47static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", }; 47static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
48static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", }; 48static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
49static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", }; 49static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
50static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", }; 50static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
51static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", }; 51static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
52static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", }; 52static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
53static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", }; 53static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
54static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", }; 54static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
55static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", }; 55static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
56static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 56static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
57static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 57static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
58static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 58static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
59/* share axi parent */ 59/* share axi parent */
60static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", }; 60static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
61static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", }; 61static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
62static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", }; 62static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
63static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", }; 63static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
64static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", }; 64static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
65static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", }; 65static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
66static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", }; 66static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
67static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", }; 67static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
68static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", }; 68static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
69static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", }; 69static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
70static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", }; 70static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
71static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4", 71static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
72 "armpll3", "armpll5", }; 72 "armpll3", "armpll5", };
73static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", }; 73static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
74static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4", 74static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
75 "armpll3", "armpll5", }; 75 "armpll3", "armpll5", };
76static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", }; 76static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
77static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", }; 77static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
78static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", }; 78static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
79 79
80 80
81/* fixed rate clocks */ 81/* fixed rate clocks */
@@ -296,7 +296,7 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
296 296
297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
298 unsigned long *best_parent_rate, 298 unsigned long *best_parent_rate,
299 struct clk **best_parent_p) 299 struct clk_hw **best_parent_p)
300{ 300{
301 struct clk_mmc *mclk = to_mmc(hw); 301 struct clk_mmc *mclk = to_mmc(hw);
302 unsigned long best = 0; 302 unsigned long best = 0;
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 392d78044ce3..3caaf7cc169c 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -2,7 +2,12 @@
2# Makefile for mmp specific clk 2# Makefile for mmp specific clk
3# 3#
4 4
5obj-y += clk-apbc.o clk-apmu.o clk-frac.o 5obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
6
7obj-$(CONFIG_RESET_CONTROLLER) += reset.o
8
9obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
10obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
6 11
7obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o 12obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
8obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o 13obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 23a56f561812..584a9927993b 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -22,19 +22,12 @@
22 * numerator/denominator = Fin / (Fout * factor) 22 * numerator/denominator = Fin / (Fout * factor)
23 */ 23 */
24 24
25#define to_clk_factor(hw) container_of(hw, struct clk_factor, hw) 25#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
26struct clk_factor {
27 struct clk_hw hw;
28 void __iomem *base;
29 struct clk_factor_masks *masks;
30 struct clk_factor_tbl *ftbl;
31 unsigned int ftbl_cnt;
32};
33 26
34static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, 27static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
35 unsigned long *prate) 28 unsigned long *prate)
36{ 29{
37 struct clk_factor *factor = to_clk_factor(hw); 30 struct mmp_clk_factor *factor = to_clk_factor(hw);
38 unsigned long rate = 0, prev_rate; 31 unsigned long rate = 0, prev_rate;
39 int i; 32 int i;
40 33
@@ -58,8 +51,8 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
58static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, 51static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
59 unsigned long parent_rate) 52 unsigned long parent_rate)
60{ 53{
61 struct clk_factor *factor = to_clk_factor(hw); 54 struct mmp_clk_factor *factor = to_clk_factor(hw);
62 struct clk_factor_masks *masks = factor->masks; 55 struct mmp_clk_factor_masks *masks = factor->masks;
63 unsigned int val, num, den; 56 unsigned int val, num, den;
64 57
65 val = readl_relaxed(factor->base); 58 val = readl_relaxed(factor->base);
@@ -81,11 +74,12 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
81static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, 74static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
82 unsigned long prate) 75 unsigned long prate)
83{ 76{
84 struct clk_factor *factor = to_clk_factor(hw); 77 struct mmp_clk_factor *factor = to_clk_factor(hw);
85 struct clk_factor_masks *masks = factor->masks; 78 struct mmp_clk_factor_masks *masks = factor->masks;
86 int i; 79 int i;
87 unsigned long val; 80 unsigned long val;
88 unsigned long prev_rate, rate = 0; 81 unsigned long prev_rate, rate = 0;
82 unsigned long flags = 0;
89 83
90 for (i = 0; i < factor->ftbl_cnt; i++) { 84 for (i = 0; i < factor->ftbl_cnt; i++) {
91 prev_rate = rate; 85 prev_rate = rate;
@@ -97,6 +91,9 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
97 if (i > 0) 91 if (i > 0)
98 i--; 92 i--;
99 93
94 if (factor->lock)
95 spin_lock_irqsave(factor->lock, flags);
96
100 val = readl_relaxed(factor->base); 97 val = readl_relaxed(factor->base);
101 98
102 val &= ~(masks->num_mask << masks->num_shift); 99 val &= ~(masks->num_mask << masks->num_shift);
@@ -107,21 +104,65 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
107 104
108 writel_relaxed(val, factor->base); 105 writel_relaxed(val, factor->base);
109 106
107 if (factor->lock)
108 spin_unlock_irqrestore(factor->lock, flags);
109
110 return 0; 110 return 0;
111} 111}
112 112
113static void clk_factor_init(struct clk_hw *hw)
114{
115 struct mmp_clk_factor *factor = to_clk_factor(hw);
116 struct mmp_clk_factor_masks *masks = factor->masks;
117 u32 val, num, den;
118 int i;
119 unsigned long flags = 0;
120
121 if (factor->lock)
122 spin_lock_irqsave(factor->lock, flags);
123
124 val = readl(factor->base);
125
126 /* calculate numerator */
127 num = (val >> masks->num_shift) & masks->num_mask;
128
129 /* calculate denominator */
130 den = (val >> masks->den_shift) & masks->den_mask;
131
132 for (i = 0; i < factor->ftbl_cnt; i++)
133 if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
134 break;
135
136 if (i >= factor->ftbl_cnt) {
137 val &= ~(masks->num_mask << masks->num_shift);
138 val |= (factor->ftbl[0].num & masks->num_mask) <<
139 masks->num_shift;
140
141 val &= ~(masks->den_mask << masks->den_shift);
142 val |= (factor->ftbl[0].den & masks->den_mask) <<
143 masks->den_shift;
144
145 writel(val, factor->base);
146 }
147
148 if (factor->lock)
149 spin_unlock_irqrestore(factor->lock, flags);
150}
151
113static struct clk_ops clk_factor_ops = { 152static struct clk_ops clk_factor_ops = {
114 .recalc_rate = clk_factor_recalc_rate, 153 .recalc_rate = clk_factor_recalc_rate,
115 .round_rate = clk_factor_round_rate, 154 .round_rate = clk_factor_round_rate,
116 .set_rate = clk_factor_set_rate, 155 .set_rate = clk_factor_set_rate,
156 .init = clk_factor_init,
117}; 157};
118 158
119struct clk *mmp_clk_register_factor(const char *name, const char *parent_name, 159struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
120 unsigned long flags, void __iomem *base, 160 unsigned long flags, void __iomem *base,
121 struct clk_factor_masks *masks, struct clk_factor_tbl *ftbl, 161 struct mmp_clk_factor_masks *masks,
122 unsigned int ftbl_cnt) 162 struct mmp_clk_factor_tbl *ftbl,
163 unsigned int ftbl_cnt, spinlock_t *lock)
123{ 164{
124 struct clk_factor *factor; 165 struct mmp_clk_factor *factor;
125 struct clk_init_data init; 166 struct clk_init_data init;
126 struct clk *clk; 167 struct clk *clk;
127 168
@@ -142,6 +183,7 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
142 factor->ftbl = ftbl; 183 factor->ftbl = ftbl;
143 factor->ftbl_cnt = ftbl_cnt; 184 factor->ftbl_cnt = ftbl_cnt;
144 factor->hw.init = &init; 185 factor->hw.init = &init;
186 factor->lock = lock;
145 187
146 init.name = name; 188 init.name = name;
147 init.ops = &clk_factor_ops; 189 init.ops = &clk_factor_ops;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
new file mode 100644
index 000000000000..adbd9d64ded2
--- /dev/null
+++ b/drivers/clk/mmp/clk-gate.c
@@ -0,0 +1,133 @@
1/*
2 * mmp gate clock operation source file
3 *
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16#include <linux/delay.h>
17
18#include "clk.h"
19
20/*
21 * Some clocks will have mutiple bits to enable the clocks, and
22 * the bits to disable the clock is not same as enabling bits.
23 */
24
25#define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
26
27static int mmp_clk_gate_enable(struct clk_hw *hw)
28{
29 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
30 struct clk *clk = hw->clk;
31 unsigned long flags = 0;
32 unsigned long rate;
33 u32 tmp;
34
35 if (gate->lock)
36 spin_lock_irqsave(gate->lock, flags);
37
38 tmp = readl(gate->reg);
39 tmp &= ~gate->mask;
40 tmp |= gate->val_enable;
41 writel(tmp, gate->reg);
42
43 if (gate->lock)
44 spin_unlock_irqrestore(gate->lock, flags);
45
46 if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
47 rate = __clk_get_rate(clk);
48 /* Need delay 2 cycles. */
49 udelay(2000000/rate);
50 }
51
52 return 0;
53}
54
55static void mmp_clk_gate_disable(struct clk_hw *hw)
56{
57 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
58 unsigned long flags = 0;
59 u32 tmp;
60
61 if (gate->lock)
62 spin_lock_irqsave(gate->lock, flags);
63
64 tmp = readl(gate->reg);
65 tmp &= ~gate->mask;
66 tmp |= gate->val_disable;
67 writel(tmp, gate->reg);
68
69 if (gate->lock)
70 spin_unlock_irqrestore(gate->lock, flags);
71}
72
73static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
74{
75 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
76 unsigned long flags = 0;
77 u32 tmp;
78
79 if (gate->lock)
80 spin_lock_irqsave(gate->lock, flags);
81
82 tmp = readl(gate->reg);
83
84 if (gate->lock)
85 spin_unlock_irqrestore(gate->lock, flags);
86
87 return (tmp & gate->mask) == gate->val_enable;
88}
89
90const struct clk_ops mmp_clk_gate_ops = {
91 .enable = mmp_clk_gate_enable,
92 .disable = mmp_clk_gate_disable,
93 .is_enabled = mmp_clk_gate_is_enabled,
94};
95
96struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
97 const char *parent_name, unsigned long flags,
98 void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
99 unsigned int gate_flags, spinlock_t *lock)
100{
101 struct mmp_clk_gate *gate;
102 struct clk *clk;
103 struct clk_init_data init;
104
105 /* allocate the gate */
106 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
107 if (!gate) {
108 pr_err("%s:%s could not allocate gate clk\n", __func__, name);
109 return ERR_PTR(-ENOMEM);
110 }
111
112 init.name = name;
113 init.ops = &mmp_clk_gate_ops;
114 init.flags = flags | CLK_IS_BASIC;
115 init.parent_names = (parent_name ? &parent_name : NULL);
116 init.num_parents = (parent_name ? 1 : 0);
117
118 /* struct clk_gate assignments */
119 gate->reg = reg;
120 gate->mask = mask;
121 gate->val_enable = val_enable;
122 gate->val_disable = val_disable;
123 gate->flags = gate_flags;
124 gate->lock = lock;
125 gate->hw.init = &init;
126
127 clk = clk_register(dev, &gate->hw);
128
129 if (IS_ERR(clk))
130 kfree(gate);
131
132 return clk;
133}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
new file mode 100644
index 000000000000..48fa53c7ce5e
--- /dev/null
+++ b/drivers/clk/mmp/clk-mix.c
@@ -0,0 +1,513 @@
1/*
2 * mmp mix(div and mux) clock operation source file
3 *
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16
17#include "clk.h"
18
19/*
20 * The mix clock is a clock combined mux and div type clock.
21 * Because the div field and mux field need to be set at same
22 * time, we can not divide it into 2 types of clock
23 */
24
25#define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
26
27static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
28{
29 unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
30 unsigned int maxdiv = 0;
31 struct clk_div_table *clkt;
32
33 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
34 return div_mask;
35 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
36 return 1 << div_mask;
37 if (mix->div_table) {
38 for (clkt = mix->div_table; clkt->div; clkt++)
39 if (clkt->div > maxdiv)
40 maxdiv = clkt->div;
41 return maxdiv;
42 }
43 return div_mask + 1;
44}
45
46static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
47{
48 struct clk_div_table *clkt;
49
50 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
51 return val;
52 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
53 return 1 << val;
54 if (mix->div_table) {
55 for (clkt = mix->div_table; clkt->div; clkt++)
56 if (clkt->val == val)
57 return clkt->div;
58 if (clkt->div == 0)
59 return 0;
60 }
61 return val + 1;
62}
63
64static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
65{
66 int num_parents = __clk_get_num_parents(mix->hw.clk);
67 int i;
68
69 if (mix->mux_flags & CLK_MUX_INDEX_BIT)
70 return ffs(val) - 1;
71 if (mix->mux_flags & CLK_MUX_INDEX_ONE)
72 return val - 1;
73 if (mix->mux_table) {
74 for (i = 0; i < num_parents; i++)
75 if (mix->mux_table[i] == val)
76 return i;
77 if (i == num_parents)
78 return 0;
79 }
80
81 return val;
82}
83static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
84{
85 struct clk_div_table *clkt;
86
87 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
88 return div;
89 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
90 return __ffs(div);
91 if (mix->div_table) {
92 for (clkt = mix->div_table; clkt->div; clkt++)
93 if (clkt->div == div)
94 return clkt->val;
95 if (clkt->div == 0)
96 return 0;
97 }
98
99 return div - 1;
100}
101
102static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
103{
104 if (mix->mux_table)
105 return mix->mux_table[mux];
106
107 return mux;
108}
109
110static void _filter_clk_table(struct mmp_clk_mix *mix,
111 struct mmp_clk_mix_clk_table *table,
112 unsigned int table_size)
113{
114 int i;
115 struct mmp_clk_mix_clk_table *item;
116 struct clk *parent, *clk;
117 unsigned long parent_rate;
118
119 clk = mix->hw.clk;
120
121 for (i = 0; i < table_size; i++) {
122 item = &table[i];
123 parent = clk_get_parent_by_index(clk, item->parent_index);
124 parent_rate = __clk_get_rate(parent);
125 if (parent_rate % item->rate) {
126 item->valid = 0;
127 } else {
128 item->divisor = parent_rate / item->rate;
129 item->valid = 1;
130 }
131 }
132}
133
134static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
135 unsigned int change_mux, unsigned int change_div)
136{
137 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
138 u8 width, shift;
139 u32 mux_div, fc_req;
140 int ret, timeout = 50;
141 unsigned long flags = 0;
142
143 if (!change_mux && !change_div)
144 return -EINVAL;
145
146 if (mix->lock)
147 spin_lock_irqsave(mix->lock, flags);
148
149 if (mix->type == MMP_CLK_MIX_TYPE_V1
150 || mix->type == MMP_CLK_MIX_TYPE_V2)
151 mux_div = readl(ri->reg_clk_ctrl);
152 else
153 mux_div = readl(ri->reg_clk_sel);
154
155 if (change_div) {
156 width = ri->width_div;
157 shift = ri->shift_div;
158 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
159 mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
160 }
161
162 if (change_mux) {
163 width = ri->width_mux;
164 shift = ri->shift_mux;
165 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
166 mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
167 }
168
169 if (mix->type == MMP_CLK_MIX_TYPE_V1) {
170 writel(mux_div, ri->reg_clk_ctrl);
171 } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
172 mux_div |= (1 << ri->bit_fc);
173 writel(mux_div, ri->reg_clk_ctrl);
174
175 do {
176 fc_req = readl(ri->reg_clk_ctrl);
177 timeout--;
178 if (!(fc_req & (1 << ri->bit_fc)))
179 break;
180 } while (timeout);
181
182 if (timeout == 0) {
183 pr_err("%s:%s cannot do frequency change\n",
184 __func__, __clk_get_name(mix->hw.clk));
185 ret = -EBUSY;
186 goto error;
187 }
188 } else {
189 fc_req = readl(ri->reg_clk_ctrl);
190 fc_req |= 1 << ri->bit_fc;
191 writel(fc_req, ri->reg_clk_ctrl);
192 writel(mux_div, ri->reg_clk_sel);
193 fc_req &= ~(1 << ri->bit_fc);
194 }
195
196 ret = 0;
197error:
198 if (mix->lock)
199 spin_unlock_irqrestore(mix->lock, flags);
200
201 return ret;
202}
203
204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
205 unsigned long *best_parent_rate,
206 struct clk_hw **best_parent_clk)
207{
208 struct mmp_clk_mix *mix = to_clk_mix(hw);
209 struct mmp_clk_mix_clk_table *item;
210 struct clk *parent, *parent_best, *mix_clk;
211 unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
212 unsigned long gap, gap_best;
213 u32 div_val_max;
214 unsigned int div;
215 int i, j;
216
217 mix_clk = hw->clk;
218
219 parent = NULL;
220 mix_rate_best = 0;
221 parent_rate_best = 0;
222 gap_best = rate;
223 parent_best = NULL;
224
225 if (mix->table) {
226 for (i = 0; i < mix->table_size; i++) {
227 item = &mix->table[i];
228 if (item->valid == 0)
229 continue;
230 parent = clk_get_parent_by_index(mix_clk,
231 item->parent_index);
232 parent_rate = __clk_get_rate(parent);
233 mix_rate = parent_rate / item->divisor;
234 gap = abs(mix_rate - rate);
235 if (parent_best == NULL || gap < gap_best) {
236 parent_best = parent;
237 parent_rate_best = parent_rate;
238 mix_rate_best = mix_rate;
239 gap_best = gap;
240 if (gap_best == 0)
241 goto found;
242 }
243 }
244 } else {
245 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
246 parent = clk_get_parent_by_index(mix_clk, i);
247 parent_rate = __clk_get_rate(parent);
248 div_val_max = _get_maxdiv(mix);
249 for (j = 0; j < div_val_max; j++) {
250 div = _get_div(mix, j);
251 mix_rate = parent_rate / div;
252 gap = abs(mix_rate - rate);
253 if (parent_best == NULL || gap < gap_best) {
254 parent_best = parent;
255 parent_rate_best = parent_rate;
256 mix_rate_best = mix_rate;
257 gap_best = gap;
258 if (gap_best == 0)
259 goto found;
260 }
261 }
262 }
263 }
264
265found:
266 *best_parent_rate = parent_rate_best;
267 *best_parent_clk = __clk_get_hw(parent_best);
268
269 return mix_rate_best;
270}
271
272static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
273 unsigned long rate,
274 unsigned long parent_rate,
275 u8 index)
276{
277 struct mmp_clk_mix *mix = to_clk_mix(hw);
278 unsigned int div;
279 u32 div_val, mux_val;
280
281 div = parent_rate / rate;
282 div_val = _get_div_val(mix, div);
283 mux_val = _get_mux_val(mix, index);
284
285 return _set_rate(mix, mux_val, div_val, 1, 1);
286}
287
288static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
289{
290 struct mmp_clk_mix *mix = to_clk_mix(hw);
291 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
292 unsigned long flags = 0;
293 u32 mux_div = 0;
294 u8 width, shift;
295 u32 mux_val;
296
297 if (mix->lock)
298 spin_lock_irqsave(mix->lock, flags);
299
300 if (mix->type == MMP_CLK_MIX_TYPE_V1
301 || mix->type == MMP_CLK_MIX_TYPE_V2)
302 mux_div = readl(ri->reg_clk_ctrl);
303 else
304 mux_div = readl(ri->reg_clk_sel);
305
306 if (mix->lock)
307 spin_unlock_irqrestore(mix->lock, flags);
308
309 width = mix->reg_info.width_mux;
310 shift = mix->reg_info.shift_mux;
311
312 mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
313
314 return _get_mux(mix, mux_val);
315}
316
317static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
318 unsigned long parent_rate)
319{
320 struct mmp_clk_mix *mix = to_clk_mix(hw);
321 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
322 unsigned long flags = 0;
323 u32 mux_div = 0;
324 u8 width, shift;
325 unsigned int div;
326
327 if (mix->lock)
328 spin_lock_irqsave(mix->lock, flags);
329
330 if (mix->type == MMP_CLK_MIX_TYPE_V1
331 || mix->type == MMP_CLK_MIX_TYPE_V2)
332 mux_div = readl(ri->reg_clk_ctrl);
333 else
334 mux_div = readl(ri->reg_clk_sel);
335
336 if (mix->lock)
337 spin_unlock_irqrestore(mix->lock, flags);
338
339 width = mix->reg_info.width_div;
340 shift = mix->reg_info.shift_div;
341
342 div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
343
344 return parent_rate / div;
345}
346
347static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
348{
349 struct mmp_clk_mix *mix = to_clk_mix(hw);
350 struct mmp_clk_mix_clk_table *item;
351 int i;
352 u32 div_val, mux_val;
353
354 if (mix->table) {
355 for (i = 0; i < mix->table_size; i++) {
356 item = &mix->table[i];
357 if (item->valid == 0)
358 continue;
359 if (item->parent_index == index)
360 break;
361 }
362 if (i < mix->table_size) {
363 div_val = _get_div_val(mix, item->divisor);
364 mux_val = _get_mux_val(mix, item->parent_index);
365 } else
366 return -EINVAL;
367 } else {
368 mux_val = _get_mux_val(mix, index);
369 div_val = 0;
370 }
371
372 return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
373}
374
375static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
376 unsigned long best_parent_rate)
377{
378 struct mmp_clk_mix *mix = to_clk_mix(hw);
379 struct mmp_clk_mix_clk_table *item;
380 unsigned long parent_rate;
381 unsigned int best_divisor;
382 struct clk *mix_clk, *parent;
383 int i;
384
385 best_divisor = best_parent_rate / rate;
386
387 mix_clk = hw->clk;
388 if (mix->table) {
389 for (i = 0; i < mix->table_size; i++) {
390 item = &mix->table[i];
391 if (item->valid == 0)
392 continue;
393 parent = clk_get_parent_by_index(mix_clk,
394 item->parent_index);
395 parent_rate = __clk_get_rate(parent);
396 if (parent_rate == best_parent_rate
397 && item->divisor == best_divisor)
398 break;
399 }
400 if (i < mix->table_size)
401 return _set_rate(mix,
402 _get_mux_val(mix, item->parent_index),
403 _get_div_val(mix, item->divisor),
404 1, 1);
405 else
406 return -EINVAL;
407 } else {
408 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
409 parent = clk_get_parent_by_index(mix_clk, i);
410 parent_rate = __clk_get_rate(parent);
411 if (parent_rate == best_parent_rate)
412 break;
413 }
414 if (i < __clk_get_num_parents(mix_clk))
415 return _set_rate(mix, _get_mux_val(mix, i),
416 _get_div_val(mix, best_divisor), 1, 1);
417 else
418 return -EINVAL;
419 }
420}
421
422static void mmp_clk_mix_init(struct clk_hw *hw)
423{
424 struct mmp_clk_mix *mix = to_clk_mix(hw);
425
426 if (mix->table)
427 _filter_clk_table(mix, mix->table, mix->table_size);
428}
429
430const struct clk_ops mmp_clk_mix_ops = {
431 .determine_rate = mmp_clk_mix_determine_rate,
432 .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
433 .set_rate = mmp_clk_set_rate,
434 .set_parent = mmp_clk_set_parent,
435 .get_parent = mmp_clk_mix_get_parent,
436 .recalc_rate = mmp_clk_mix_recalc_rate,
437 .init = mmp_clk_mix_init,
438};
439
440struct clk *mmp_clk_register_mix(struct device *dev,
441 const char *name,
442 const char **parent_names,
443 u8 num_parents,
444 unsigned long flags,
445 struct mmp_clk_mix_config *config,
446 spinlock_t *lock)
447{
448 struct mmp_clk_mix *mix;
449 struct clk *clk;
450 struct clk_init_data init;
451 size_t table_bytes;
452
453 mix = kzalloc(sizeof(*mix), GFP_KERNEL);
454 if (!mix) {
455 pr_err("%s:%s: could not allocate mmp mix clk\n",
456 __func__, name);
457 return ERR_PTR(-ENOMEM);
458 }
459
460 init.name = name;
461 init.flags = flags | CLK_GET_RATE_NOCACHE;
462 init.parent_names = parent_names;
463 init.num_parents = num_parents;
464 init.ops = &mmp_clk_mix_ops;
465
466 memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
467 if (config->table) {
468 table_bytes = sizeof(*config->table) * config->table_size;
469 mix->table = kzalloc(table_bytes, GFP_KERNEL);
470 if (!mix->table) {
471 pr_err("%s:%s: could not allocate mmp mix table\n",
472 __func__, name);
473 kfree(mix);
474 return ERR_PTR(-ENOMEM);
475 }
476 memcpy(mix->table, config->table, table_bytes);
477 mix->table_size = config->table_size;
478 }
479
480 if (config->mux_table) {
481 table_bytes = sizeof(u32) * num_parents;
482 mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
483 if (!mix->mux_table) {
484 pr_err("%s:%s: could not allocate mmp mix mux-table\n",
485 __func__, name);
486 kfree(mix->table);
487 kfree(mix);
488 return ERR_PTR(-ENOMEM);
489 }
490 memcpy(mix->mux_table, config->mux_table, table_bytes);
491 }
492
493 mix->div_flags = config->div_flags;
494 mix->mux_flags = config->mux_flags;
495 mix->lock = lock;
496 mix->hw.init = &init;
497
498 if (config->reg_info.bit_fc >= 32)
499 mix->type = MMP_CLK_MIX_TYPE_V1;
500 else if (config->reg_info.reg_clk_sel)
501 mix->type = MMP_CLK_MIX_TYPE_V3;
502 else
503 mix->type = MMP_CLK_MIX_TYPE_V2;
504 clk = clk_register(dev, &mix->hw);
505
506 if (IS_ERR(clk)) {
507 kfree(mix->mux_table);
508 kfree(mix->table);
509 kfree(mix);
510 }
511
512 return clk;
513}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index b2721cae257a..5c90a4230fa3 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -54,7 +54,7 @@
54 54
55static DEFINE_SPINLOCK(clk_lock); 55static DEFINE_SPINLOCK(clk_lock);
56 56
57static struct clk_factor_masks uart_factor_masks = { 57static struct mmp_clk_factor_masks uart_factor_masks = {
58 .factor = 2, 58 .factor = 2,
59 .num_mask = 0x1fff, 59 .num_mask = 0x1fff,
60 .den_mask = 0x1fff, 60 .den_mask = 0x1fff,
@@ -62,7 +62,7 @@ static struct clk_factor_masks uart_factor_masks = {
62 .den_shift = 0, 62 .den_shift = 0,
63}; 63};
64 64
65static struct clk_factor_tbl uart_factor_tbl[] = { 65static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
66 {.num = 14634, .den = 2165}, /*14.745MHZ */ 66 {.num = 14634, .den = 2165}, /*14.745MHZ */
67 {.num = 3521, .den = 689}, /*19.23MHZ */ 67 {.num = 3521, .den = 689}, /*19.23MHZ */
68 {.num = 9679, .den = 5728}, /*58.9824MHZ */ 68 {.num = 9679, .den = 5728}, /*58.9824MHZ */
@@ -191,7 +191,7 @@ void __init mmp2_clk_init(void)
191 clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 191 clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
192 mpmu_base + MPMU_UART_PLL, 192 mpmu_base + MPMU_UART_PLL,
193 &uart_factor_masks, uart_factor_tbl, 193 &uart_factor_masks, uart_factor_tbl,
194 ARRAY_SIZE(uart_factor_tbl)); 194 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
195 clk_set_rate(clk, 14745600); 195 clk_set_rate(clk, 14745600);
196 clk_register_clkdev(clk, "uart_pll", NULL); 196 clk_register_clkdev(clk, "uart_pll", NULL);
197 197
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
new file mode 100644
index 000000000000..2cbc2b43ae52
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -0,0 +1,334 @@
1/*
2 * mmp2 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,mmp2.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x0
26#define APBC_TWSI0 0x4
27#define APBC_TWSI1 0x8
28#define APBC_TWSI2 0xc
29#define APBC_TWSI3 0x10
30#define APBC_TWSI4 0x7c
31#define APBC_TWSI5 0x80
32#define APBC_KPC 0x18
33#define APBC_UART0 0x2c
34#define APBC_UART1 0x30
35#define APBC_UART2 0x34
36#define APBC_UART3 0x88
37#define APBC_GPIO 0x38
38#define APBC_PWM0 0x3c
39#define APBC_PWM1 0x40
40#define APBC_PWM2 0x44
41#define APBC_PWM3 0x48
42#define APBC_SSP0 0x50
43#define APBC_SSP1 0x54
44#define APBC_SSP2 0x58
45#define APBC_SSP3 0x5c
46#define APMU_SDH0 0x54
47#define APMU_SDH1 0x58
48#define APMU_SDH2 0xe8
49#define APMU_SDH3 0xec
50#define APMU_USB 0x5c
51#define APMU_DISP0 0x4c
52#define APMU_DISP1 0x110
53#define APMU_CCIC0 0x50
54#define APMU_CCIC1 0xf4
55#define MPMU_UART_PLL 0x14
56
57struct mmp2_clk_unit {
58 struct mmp_clk_unit unit;
59 void __iomem *mpmu_base;
60 void __iomem *apmu_base;
61 void __iomem *apbc_base;
62};
63
64static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
65 {MMP2_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
66 {MMP2_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
67 {MMP2_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 800000000},
68 {MMP2_CLK_PLL2, "pll2", NULL, CLK_IS_ROOT, 960000000},
69 {MMP2_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
70};
71
72static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
73 {MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
74 {MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
75 {MMP2_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
76 {MMP2_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
77 {MMP2_CLK_PLL1_20, "pll1_20", "pll1_4", 1, 5, 0},
78 {MMP2_CLK_PLL1_3, "pll1_3", "pll1", 1, 3, 0},
79 {MMP2_CLK_PLL1_6, "pll1_6", "pll1_3", 1, 2, 0},
80 {MMP2_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
81 {MMP2_CLK_PLL2_2, "pll2_2", "pll2", 1, 2, 0},
82 {MMP2_CLK_PLL2_4, "pll2_4", "pll2_2", 1, 2, 0},
83 {MMP2_CLK_PLL2_8, "pll2_8", "pll2_4", 1, 2, 0},
84 {MMP2_CLK_PLL2_16, "pll2_16", "pll2_8", 1, 2, 0},
85 {MMP2_CLK_PLL2_3, "pll2_3", "pll2", 1, 3, 0},
86 {MMP2_CLK_PLL2_6, "pll2_6", "pll2_3", 1, 2, 0},
87 {MMP2_CLK_PLL2_12, "pll2_12", "pll2_6", 1, 2, 0},
88 {MMP2_CLK_VCTCXO_2, "vctcxo_2", "vctcxo", 1, 2, 0},
89 {MMP2_CLK_VCTCXO_4, "vctcxo_4", "vctcxo_2", 1, 2, 0},
90};
91
92static struct mmp_clk_factor_masks uart_factor_masks = {
93 .factor = 2,
94 .num_mask = 0x1fff,
95 .den_mask = 0x1fff,
96 .num_shift = 16,
97 .den_shift = 0,
98};
99
100static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
101 {.num = 14634, .den = 2165}, /*14.745MHZ */
102 {.num = 3521, .den = 689}, /*19.23MHZ */
103 {.num = 9679, .den = 5728}, /*58.9824MHZ */
104 {.num = 15850, .den = 9451}, /*59.429MHZ */
105};
106
107static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
108{
109 struct clk *clk;
110 struct mmp_clk_unit *unit = &pxa_unit->unit;
111
112 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
113 ARRAY_SIZE(fixed_rate_clks));
114
115 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
116 ARRAY_SIZE(fixed_factor_clks));
117
118 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
119 CLK_SET_RATE_PARENT,
120 pxa_unit->mpmu_base + MPMU_UART_PLL,
121 &uart_factor_masks, uart_factor_tbl,
122 ARRAY_SIZE(uart_factor_tbl), NULL);
123 mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
124}
125
126static DEFINE_SPINLOCK(uart0_lock);
127static DEFINE_SPINLOCK(uart1_lock);
128static DEFINE_SPINLOCK(uart2_lock);
129static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
130
131static DEFINE_SPINLOCK(ssp0_lock);
132static DEFINE_SPINLOCK(ssp1_lock);
133static DEFINE_SPINLOCK(ssp2_lock);
134static DEFINE_SPINLOCK(ssp3_lock);
135static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
136
137static DEFINE_SPINLOCK(reset_lock);
138
139static struct mmp_param_mux_clk apbc_mux_clks[] = {
140 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
141 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
142 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
143 {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART3, 4, 3, 0, &uart2_lock},
144 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
145 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
146 {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
147 {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
148};
149
150static struct mmp_param_gate_clk apbc_gate_clks[] = {
151 {MMP2_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 0x3, 0x0, 0, &reset_lock},
152 {MMP2_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 0x3, 0x0, 0, &reset_lock},
153 {MMP2_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI2, 0x7, 0x3, 0x0, 0, &reset_lock},
154 {MMP2_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 0x3, 0x0, 0, &reset_lock},
155 {MMP2_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI4, 0x7, 0x3, 0x0, 0, &reset_lock},
156 {MMP2_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI5, 0x7, 0x3, 0x0, 0, &reset_lock},
157 {MMP2_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 0x3, 0x0, 0, &reset_lock},
158 {MMP2_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
159 {MMP2_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
160 {MMP2_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x7, 0x3, 0x0, 0, &reset_lock},
161 {MMP2_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x7, 0x3, 0x0, 0, &reset_lock},
162 {MMP2_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x7, 0x3, 0x0, 0, &reset_lock},
163 {MMP2_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x7, 0x3, 0x0, 0, &reset_lock},
164 /* The gate clocks has mux parent. */
165 {MMP2_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 0x3, 0x0, 0, &uart0_lock},
166 {MMP2_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 0x3, 0x0, 0, &uart1_lock},
167 {MMP2_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
168 {MMP2_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, APBC_UART3, 0x7, 0x3, 0x0, 0, &uart2_lock},
169 {MMP2_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x7, 0x3, 0x0, 0, &ssp0_lock},
170 {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock},
171 {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
172 {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
173};
174
175static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
176{
177 struct mmp_clk_unit *unit = &pxa_unit->unit;
178
179 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
180 ARRAY_SIZE(apbc_mux_clks));
181
182 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
183 ARRAY_SIZE(apbc_gate_clks));
184}
185
186static DEFINE_SPINLOCK(sdh_lock);
187static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
188static struct mmp_clk_mix_config sdh_mix_config = {
189 .reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
190};
191
192static DEFINE_SPINLOCK(usb_lock);
193
194static DEFINE_SPINLOCK(disp0_lock);
195static DEFINE_SPINLOCK(disp1_lock);
196static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
197
198static DEFINE_SPINLOCK(ccic0_lock);
199static DEFINE_SPINLOCK(ccic1_lock);
200static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
201static struct mmp_clk_mix_config ccic0_mix_config = {
202 .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
203};
204static struct mmp_clk_mix_config ccic1_mix_config = {
205 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
206};
207
208static struct mmp_param_mux_clk apmu_mux_clks[] = {
209 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
210 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
211};
212
213static struct mmp_param_div_clk apmu_div_clks[] = {
214 {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
215 {0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
216 {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, 0, &disp1_lock},
217 {0, "ccic0_sphy_div", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
218 {0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
219};
220
221static struct mmp_param_gate_clk apmu_gate_clks[] = {
222 {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
223 /* The gate clocks has mux parent. */
224 {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
225 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
226 {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
227 {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
228 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
229 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
230 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
231 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
232 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
233 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
234 {MMP2_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
235 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
236 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
237 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
238};
239
240static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
241{
242 struct clk *clk;
243 struct mmp_clk_unit *unit = &pxa_unit->unit;
244
245 sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_SDH0;
246 clk = mmp_clk_register_mix(NULL, "sdh_mix_clk", sdh_parent_names,
247 ARRAY_SIZE(sdh_parent_names),
248 CLK_SET_RATE_PARENT,
249 &sdh_mix_config, &sdh_lock);
250
251 ccic0_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC0;
252 clk = mmp_clk_register_mix(NULL, "ccic0_mix_clk", ccic_parent_names,
253 ARRAY_SIZE(ccic_parent_names),
254 CLK_SET_RATE_PARENT,
255 &ccic0_mix_config, &ccic0_lock);
256 mmp_clk_add(unit, MMP2_CLK_CCIC0_MIX, clk);
257
258 ccic1_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC1;
259 clk = mmp_clk_register_mix(NULL, "ccic1_mix_clk", ccic_parent_names,
260 ARRAY_SIZE(ccic_parent_names),
261 CLK_SET_RATE_PARENT,
262 &ccic1_mix_config, &ccic1_lock);
263 mmp_clk_add(unit, MMP2_CLK_CCIC1_MIX, clk);
264
265 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
266 ARRAY_SIZE(apmu_mux_clks));
267
268 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
269 ARRAY_SIZE(apmu_div_clks));
270
271 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
272 ARRAY_SIZE(apmu_gate_clks));
273}
274
275static void mmp2_clk_reset_init(struct device_node *np,
276 struct mmp2_clk_unit *pxa_unit)
277{
278 struct mmp_clk_reset_cell *cells;
279 int i, nr_resets;
280
281 nr_resets = ARRAY_SIZE(apbc_gate_clks);
282 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
283 if (!cells)
284 return;
285
286 for (i = 0; i < nr_resets; i++) {
287 cells[i].clk_id = apbc_gate_clks[i].id;
288 cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
289 cells[i].flags = 0;
290 cells[i].lock = apbc_gate_clks[i].lock;
291 cells[i].bits = 0x4;
292 }
293
294 mmp_clk_reset_register(np, cells, nr_resets);
295}
296
297static void __init mmp2_clk_init(struct device_node *np)
298{
299 struct mmp2_clk_unit *pxa_unit;
300
301 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
302 if (!pxa_unit)
303 return;
304
305 pxa_unit->mpmu_base = of_iomap(np, 0);
306 if (!pxa_unit->mpmu_base) {
307 pr_err("failed to map mpmu registers\n");
308 return;
309 }
310
311 pxa_unit->apmu_base = of_iomap(np, 1);
312 if (!pxa_unit->mpmu_base) {
313 pr_err("failed to map apmu registers\n");
314 return;
315 }
316
317 pxa_unit->apbc_base = of_iomap(np, 2);
318 if (!pxa_unit->apbc_base) {
319 pr_err("failed to map apbc registers\n");
320 return;
321 }
322
323 mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
324
325 mmp2_pll_init(pxa_unit);
326
327 mmp2_apb_periph_clk_init(pxa_unit);
328
329 mmp2_axi_periph_clk_init(pxa_unit);
330
331 mmp2_clk_reset_init(np, pxa_unit);
332}
333
334CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
new file mode 100644
index 000000000000..5b1810dc4bd2
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -0,0 +1,279 @@
1/*
2 * pxa168 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,pxa168.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x28
26#define APBC_TWSI0 0x2c
27#define APBC_KPC 0x30
28#define APBC_UART0 0x0
29#define APBC_UART1 0x4
30#define APBC_GPIO 0x8
31#define APBC_PWM0 0xc
32#define APBC_PWM1 0x10
33#define APBC_PWM2 0x14
34#define APBC_PWM3 0x18
35#define APBC_SSP0 0x81c
36#define APBC_SSP1 0x820
37#define APBC_SSP2 0x84c
38#define APBC_SSP3 0x858
39#define APBC_SSP4 0x85c
40#define APBC_TWSI1 0x6c
41#define APBC_UART2 0x70
42#define APMU_SDH0 0x54
43#define APMU_SDH1 0x58
44#define APMU_USB 0x5c
45#define APMU_DISP0 0x4c
46#define APMU_CCIC0 0x50
47#define APMU_DFC 0x60
48#define MPMU_UART_PLL 0x14
49
50struct pxa168_clk_unit {
51 struct mmp_clk_unit unit;
52 void __iomem *mpmu_base;
53 void __iomem *apmu_base;
54 void __iomem *apbc_base;
55};
56
57static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
58 {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
59 {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
60 {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
61};
62
63static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
64 {PXA168_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
65 {PXA168_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
66 {PXA168_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
67 {PXA168_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
68 {PXA168_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
69 {PXA168_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
70 {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
71 {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
72 {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
73 {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
74 {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
75 {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
76 {PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
77};
78
79static struct mmp_clk_factor_masks uart_factor_masks = {
80 .factor = 2,
81 .num_mask = 0x1fff,
82 .den_mask = 0x1fff,
83 .num_shift = 16,
84 .den_shift = 0,
85};
86
87static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
88 {.num = 8125, .den = 1536}, /*14.745MHZ */
89};
90
91static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
92{
93 struct clk *clk;
94 struct mmp_clk_unit *unit = &pxa_unit->unit;
95
96 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
97 ARRAY_SIZE(fixed_rate_clks));
98
99 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
100 ARRAY_SIZE(fixed_factor_clks));
101
102 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
103 CLK_SET_RATE_PARENT,
104 pxa_unit->mpmu_base + MPMU_UART_PLL,
105 &uart_factor_masks, uart_factor_tbl,
106 ARRAY_SIZE(uart_factor_tbl), NULL);
107 mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
108}
109
110static DEFINE_SPINLOCK(uart0_lock);
111static DEFINE_SPINLOCK(uart1_lock);
112static DEFINE_SPINLOCK(uart2_lock);
113static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
114
115static DEFINE_SPINLOCK(ssp0_lock);
116static DEFINE_SPINLOCK(ssp1_lock);
117static DEFINE_SPINLOCK(ssp2_lock);
118static DEFINE_SPINLOCK(ssp3_lock);
119static DEFINE_SPINLOCK(ssp4_lock);
120static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
121
122static DEFINE_SPINLOCK(reset_lock);
123
124static struct mmp_param_mux_clk apbc_mux_clks[] = {
125 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
126 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
127 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
128 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
129 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
130 {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
131 {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
132 {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock},
133};
134
135static struct mmp_param_gate_clk apbc_gate_clks[] = {
136 {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
137 {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
138 {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
139 {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
140 {PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
141 {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
142 {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
143 {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
144 {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
145 /* The gate clocks has mux parent. */
146 {PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
147 {PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
148 {PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
149 {PXA168_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
150 {PXA168_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
151 {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock},
152 {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock},
153 {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock},
154};
155
156static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
157{
158 struct mmp_clk_unit *unit = &pxa_unit->unit;
159
160 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
161 ARRAY_SIZE(apbc_mux_clks));
162
163 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
164 ARRAY_SIZE(apbc_gate_clks));
165
166}
167
168static DEFINE_SPINLOCK(sdh0_lock);
169static DEFINE_SPINLOCK(sdh1_lock);
170static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
171
172static DEFINE_SPINLOCK(usb_lock);
173
174static DEFINE_SPINLOCK(disp0_lock);
175static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
176
177static DEFINE_SPINLOCK(ccic0_lock);
178static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
179static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
180
181static struct mmp_param_mux_clk apmu_mux_clks[] = {
182 {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
183 {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
184 {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
185 {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
186 {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
187};
188
189static struct mmp_param_div_clk apmu_div_clks[] = {
190 {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
191};
192
193static struct mmp_param_gate_clk apmu_gate_clks[] = {
194 {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
195 {PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
196 {PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
197 /* The gate clocks has mux parent. */
198 {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
199 {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
200 {PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
201 {PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
202 {PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
203 {PXA168_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
204};
205
206static void pxa168_axi_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
207{
208 struct mmp_clk_unit *unit = &pxa_unit->unit;
209
210 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
211 ARRAY_SIZE(apmu_mux_clks));
212
213 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
214 ARRAY_SIZE(apmu_div_clks));
215
216 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
217 ARRAY_SIZE(apmu_gate_clks));
218}
219
220static void pxa168_clk_reset_init(struct device_node *np,
221 struct pxa168_clk_unit *pxa_unit)
222{
223 struct mmp_clk_reset_cell *cells;
224 int i, nr_resets;
225
226 nr_resets = ARRAY_SIZE(apbc_gate_clks);
227 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
228 if (!cells)
229 return;
230
231 for (i = 0; i < nr_resets; i++) {
232 cells[i].clk_id = apbc_gate_clks[i].id;
233 cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
234 cells[i].flags = 0;
235 cells[i].lock = apbc_gate_clks[i].lock;
236 cells[i].bits = 0x4;
237 }
238
239 mmp_clk_reset_register(np, cells, nr_resets);
240}
241
242static void __init pxa168_clk_init(struct device_node *np)
243{
244 struct pxa168_clk_unit *pxa_unit;
245
246 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
247 if (!pxa_unit)
248 return;
249
250 pxa_unit->mpmu_base = of_iomap(np, 0);
251 if (!pxa_unit->mpmu_base) {
252 pr_err("failed to map mpmu registers\n");
253 return;
254 }
255
256 pxa_unit->apmu_base = of_iomap(np, 1);
257 if (!pxa_unit->mpmu_base) {
258 pr_err("failed to map apmu registers\n");
259 return;
260 }
261
262 pxa_unit->apbc_base = of_iomap(np, 2);
263 if (!pxa_unit->apbc_base) {
264 pr_err("failed to map apbc registers\n");
265 return;
266 }
267
268 mmp_clk_init(np, &pxa_unit->unit, PXA168_NR_CLKS);
269
270 pxa168_pll_init(pxa_unit);
271
272 pxa168_apb_periph_clk_init(pxa_unit);
273
274 pxa168_axi_periph_clk_init(pxa_unit);
275
276 pxa168_clk_reset_init(np, pxa_unit);
277}
278
279CLK_OF_DECLARE(pxa168_clk, "marvell,pxa168-clock", pxa168_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
new file mode 100644
index 000000000000..5e3c80dad336
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -0,0 +1,301 @@
1/*
2 * pxa910 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,pxa910.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x28
26#define APBC_TWSI0 0x2c
27#define APBC_KPC 0x18
28#define APBC_UART0 0x0
29#define APBC_UART1 0x4
30#define APBC_GPIO 0x8
31#define APBC_PWM0 0xc
32#define APBC_PWM1 0x10
33#define APBC_PWM2 0x14
34#define APBC_PWM3 0x18
35#define APBC_SSP0 0x1c
36#define APBC_SSP1 0x20
37#define APBC_SSP2 0x4c
38#define APBCP_TWSI1 0x28
39#define APBCP_UART2 0x1c
40#define APMU_SDH0 0x54
41#define APMU_SDH1 0x58
42#define APMU_USB 0x5c
43#define APMU_DISP0 0x4c
44#define APMU_CCIC0 0x50
45#define APMU_DFC 0x60
46#define MPMU_UART_PLL 0x14
47
48struct pxa910_clk_unit {
49 struct mmp_clk_unit unit;
50 void __iomem *mpmu_base;
51 void __iomem *apmu_base;
52 void __iomem *apbc_base;
53 void __iomem *apbcp_base;
54};
55
56static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
57 {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
58 {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
59 {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
60};
61
62static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
63 {PXA910_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
64 {PXA910_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
65 {PXA910_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
66 {PXA910_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
67 {PXA910_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
68 {PXA910_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
69 {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
70 {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
71 {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
72 {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
73 {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
74 {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
75 {PXA910_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
76};
77
78static struct mmp_clk_factor_masks uart_factor_masks = {
79 .factor = 2,
80 .num_mask = 0x1fff,
81 .den_mask = 0x1fff,
82 .num_shift = 16,
83 .den_shift = 0,
84};
85
86static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
87 {.num = 8125, .den = 1536}, /*14.745MHZ */
88};
89
90static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
91{
92 struct clk *clk;
93 struct mmp_clk_unit *unit = &pxa_unit->unit;
94
95 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
96 ARRAY_SIZE(fixed_rate_clks));
97
98 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
99 ARRAY_SIZE(fixed_factor_clks));
100
101 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
102 CLK_SET_RATE_PARENT,
103 pxa_unit->mpmu_base + MPMU_UART_PLL,
104 &uart_factor_masks, uart_factor_tbl,
105 ARRAY_SIZE(uart_factor_tbl), NULL);
106 mmp_clk_add(unit, PXA910_CLK_UART_PLL, clk);
107}
108
109static DEFINE_SPINLOCK(uart0_lock);
110static DEFINE_SPINLOCK(uart1_lock);
111static DEFINE_SPINLOCK(uart2_lock);
112static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
113
114static DEFINE_SPINLOCK(ssp0_lock);
115static DEFINE_SPINLOCK(ssp1_lock);
116static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
117
118static DEFINE_SPINLOCK(reset_lock);
119
120static struct mmp_param_mux_clk apbc_mux_clks[] = {
121 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
122 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
123 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
124 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
125};
126
127static struct mmp_param_mux_clk apbcp_mux_clks[] = {
128 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
129};
130
131static struct mmp_param_gate_clk apbc_gate_clks[] = {
132 {PXA910_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
133 {PXA910_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
134 {PXA910_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
135 {PXA910_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
136 {PXA910_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
137 {PXA910_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
138 {PXA910_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
139 {PXA910_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
140 /* The gate clocks has mux parent. */
141 {PXA910_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
142 {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
143 {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
144 {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
145};
146
147static struct mmp_param_gate_clk apbcp_gate_clks[] = {
148 {PXA910_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBCP_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
149 /* The gate clocks has mux parent. */
150 {PXA910_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
151};
152
153static void pxa910_apb_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
154{
155 struct mmp_clk_unit *unit = &pxa_unit->unit;
156
157 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
158 ARRAY_SIZE(apbc_mux_clks));
159
160 mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->apbcp_base,
161 ARRAY_SIZE(apbcp_mux_clks));
162
163 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
164 ARRAY_SIZE(apbc_gate_clks));
165
166 mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->apbcp_base,
167 ARRAY_SIZE(apbcp_gate_clks));
168}
169
170static DEFINE_SPINLOCK(sdh0_lock);
171static DEFINE_SPINLOCK(sdh1_lock);
172static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
173
174static DEFINE_SPINLOCK(usb_lock);
175
176static DEFINE_SPINLOCK(disp0_lock);
177static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
178
179static DEFINE_SPINLOCK(ccic0_lock);
180static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
181static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
182
183static struct mmp_param_mux_clk apmu_mux_clks[] = {
184 {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
185 {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
186 {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
187 {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
188 {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
189};
190
191static struct mmp_param_div_clk apmu_div_clks[] = {
192 {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
193};
194
195static struct mmp_param_gate_clk apmu_gate_clks[] = {
196 {PXA910_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
197 {PXA910_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
198 {PXA910_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
199 /* The gate clocks has mux parent. */
200 {PXA910_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
201 {PXA910_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
202 {PXA910_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
203 {PXA910_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
204 {PXA910_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
205 {PXA910_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
206};
207
208static void pxa910_axi_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
209{
210 struct mmp_clk_unit *unit = &pxa_unit->unit;
211
212 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
213 ARRAY_SIZE(apmu_mux_clks));
214
215 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
216 ARRAY_SIZE(apmu_div_clks));
217
218 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
219 ARRAY_SIZE(apmu_gate_clks));
220}
221
222static void pxa910_clk_reset_init(struct device_node *np,
223 struct pxa910_clk_unit *pxa_unit)
224{
225 struct mmp_clk_reset_cell *cells;
226 int i, base, nr_resets_apbc, nr_resets_apbcp, nr_resets;
227
228 nr_resets_apbc = ARRAY_SIZE(apbc_gate_clks);
229 nr_resets_apbcp = ARRAY_SIZE(apbcp_gate_clks);
230 nr_resets = nr_resets_apbc + nr_resets_apbcp;
231 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
232 if (!cells)
233 return;
234
235 base = 0;
236 for (i = 0; i < nr_resets_apbc; i++) {
237 cells[base + i].clk_id = apbc_gate_clks[i].id;
238 cells[base + i].reg =
239 pxa_unit->apbc_base + apbc_gate_clks[i].offset;
240 cells[base + i].flags = 0;
241 cells[base + i].lock = apbc_gate_clks[i].lock;
242 cells[base + i].bits = 0x4;
243 }
244
245 base = nr_resets_apbc;
246 for (i = 0; i < nr_resets_apbcp; i++) {
247 cells[base + i].clk_id = apbcp_gate_clks[i].id;
248 cells[base + i].reg =
249 pxa_unit->apbc_base + apbc_gate_clks[i].offset;
250 cells[base + i].flags = 0;
251 cells[base + i].lock = apbc_gate_clks[i].lock;
252 cells[base + i].bits = 0x4;
253 }
254
255 mmp_clk_reset_register(np, cells, nr_resets);
256}
257
258static void __init pxa910_clk_init(struct device_node *np)
259{
260 struct pxa910_clk_unit *pxa_unit;
261
262 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
263 if (!pxa_unit)
264 return;
265
266 pxa_unit->mpmu_base = of_iomap(np, 0);
267 if (!pxa_unit->mpmu_base) {
268 pr_err("failed to map mpmu registers\n");
269 return;
270 }
271
272 pxa_unit->apmu_base = of_iomap(np, 1);
273 if (!pxa_unit->mpmu_base) {
274 pr_err("failed to map apmu registers\n");
275 return;
276 }
277
278 pxa_unit->apbc_base = of_iomap(np, 2);
279 if (!pxa_unit->apbc_base) {
280 pr_err("failed to map apbc registers\n");
281 return;
282 }
283
284 pxa_unit->apbcp_base = of_iomap(np, 3);
285 if (!pxa_unit->mpmu_base) {
286 pr_err("failed to map apbcp registers\n");
287 return;
288 }
289
290 mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
291
292 pxa910_pll_init(pxa_unit);
293
294 pxa910_apb_periph_clk_init(pxa_unit);
295
296 pxa910_axi_periph_clk_init(pxa_unit);
297
298 pxa910_clk_reset_init(np, pxa_unit);
299}
300
301CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 014396b028a2..93e967c0f972 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -47,7 +47,7 @@
47 47
48static DEFINE_SPINLOCK(clk_lock); 48static DEFINE_SPINLOCK(clk_lock);
49 49
50static struct clk_factor_masks uart_factor_masks = { 50static struct mmp_clk_factor_masks uart_factor_masks = {
51 .factor = 2, 51 .factor = 2,
52 .num_mask = 0x1fff, 52 .num_mask = 0x1fff,
53 .den_mask = 0x1fff, 53 .den_mask = 0x1fff,
@@ -55,7 +55,7 @@ static struct clk_factor_masks uart_factor_masks = {
55 .den_shift = 0, 55 .den_shift = 0,
56}; 56};
57 57
58static struct clk_factor_tbl uart_factor_tbl[] = { 58static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
59 {.num = 8125, .den = 1536}, /*14.745MHZ */ 59 {.num = 8125, .den = 1536}, /*14.745MHZ */
60}; 60};
61 61
@@ -158,7 +158,7 @@ void __init pxa168_clk_init(void)
158 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 158 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
159 mpmu_base + MPMU_UART_PLL, 159 mpmu_base + MPMU_UART_PLL,
160 &uart_factor_masks, uart_factor_tbl, 160 &uart_factor_masks, uart_factor_tbl,
161 ARRAY_SIZE(uart_factor_tbl)); 161 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
162 clk_set_rate(uart_pll, 14745600); 162 clk_set_rate(uart_pll, 14745600);
163 clk_register_clkdev(uart_pll, "uart_pll", NULL); 163 clk_register_clkdev(uart_pll, "uart_pll", NULL);
164 164
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 9efc6a47535d..993abcdb32cc 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -45,7 +45,7 @@
45 45
46static DEFINE_SPINLOCK(clk_lock); 46static DEFINE_SPINLOCK(clk_lock);
47 47
48static struct clk_factor_masks uart_factor_masks = { 48static struct mmp_clk_factor_masks uart_factor_masks = {
49 .factor = 2, 49 .factor = 2,
50 .num_mask = 0x1fff, 50 .num_mask = 0x1fff,
51 .den_mask = 0x1fff, 51 .den_mask = 0x1fff,
@@ -53,7 +53,7 @@ static struct clk_factor_masks uart_factor_masks = {
53 .den_shift = 0, 53 .den_shift = 0,
54}; 54};
55 55
56static struct clk_factor_tbl uart_factor_tbl[] = { 56static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
57 {.num = 8125, .den = 1536}, /*14.745MHZ */ 57 {.num = 8125, .den = 1536}, /*14.745MHZ */
58}; 58};
59 59
@@ -163,7 +163,7 @@ void __init pxa910_clk_init(void)
163 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 163 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
164 mpmu_base + MPMU_UART_PLL, 164 mpmu_base + MPMU_UART_PLL,
165 &uart_factor_masks, uart_factor_tbl, 165 &uart_factor_masks, uart_factor_tbl,
166 ARRAY_SIZE(uart_factor_tbl)); 166 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
167 clk_set_rate(uart_pll, 14745600); 167 clk_set_rate(uart_pll, 14745600);
168 clk_register_clkdev(uart_pll, "uart_pll", NULL); 168 clk_register_clkdev(uart_pll, "uart_pll", NULL);
169 169
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
new file mode 100644
index 000000000000..cf038ef54c59
--- /dev/null
+++ b/drivers/clk/mmp/clk.c
@@ -0,0 +1,192 @@
1#include <linux/io.h>
2#include <linux/clk.h>
3#include <linux/clk-provider.h>
4#include <linux/clkdev.h>
5#include <linux/of.h>
6#include <linux/of_address.h>
7
8#include "clk.h"
9
10void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
11 int nr_clks)
12{
13 static struct clk **clk_table;
14
15 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
16 if (!clk_table)
17 return;
18
19 unit->clk_table = clk_table;
20 unit->nr_clks = nr_clks;
21 unit->clk_data.clks = clk_table;
22 unit->clk_data.clk_num = nr_clks;
23 of_clk_add_provider(np, of_clk_src_onecell_get, &unit->clk_data);
24}
25
26void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
27 struct mmp_param_fixed_rate_clk *clks,
28 int size)
29{
30 int i;
31 struct clk *clk;
32
33 for (i = 0; i < size; i++) {
34 clk = clk_register_fixed_rate(NULL, clks[i].name,
35 clks[i].parent_name,
36 clks[i].flags,
37 clks[i].fixed_rate);
38 if (IS_ERR(clk)) {
39 pr_err("%s: failed to register clock %s\n",
40 __func__, clks[i].name);
41 continue;
42 }
43 if (clks[i].id)
44 unit->clk_table[clks[i].id] = clk;
45 }
46}
47
48void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
49 struct mmp_param_fixed_factor_clk *clks,
50 int size)
51{
52 struct clk *clk;
53 int i;
54
55 for (i = 0; i < size; i++) {
56 clk = clk_register_fixed_factor(NULL, clks[i].name,
57 clks[i].parent_name,
58 clks[i].flags, clks[i].mult,
59 clks[i].div);
60 if (IS_ERR(clk)) {
61 pr_err("%s: failed to register clock %s\n",
62 __func__, clks[i].name);
63 continue;
64 }
65 if (clks[i].id)
66 unit->clk_table[clks[i].id] = clk;
67 }
68}
69
70void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
71 struct mmp_param_general_gate_clk *clks,
72 void __iomem *base, int size)
73{
74 struct clk *clk;
75 int i;
76
77 for (i = 0; i < size; i++) {
78 clk = clk_register_gate(NULL, clks[i].name,
79 clks[i].parent_name,
80 clks[i].flags,
81 base + clks[i].offset,
82 clks[i].bit_idx,
83 clks[i].gate_flags,
84 clks[i].lock);
85
86 if (IS_ERR(clk)) {
87 pr_err("%s: failed to register clock %s\n",
88 __func__, clks[i].name);
89 continue;
90 }
91 if (clks[i].id)
92 unit->clk_table[clks[i].id] = clk;
93 }
94}
95
96void mmp_register_gate_clks(struct mmp_clk_unit *unit,
97 struct mmp_param_gate_clk *clks,
98 void __iomem *base, int size)
99{
100 struct clk *clk;
101 int i;
102
103 for (i = 0; i < size; i++) {
104 clk = mmp_clk_register_gate(NULL, clks[i].name,
105 clks[i].parent_name,
106 clks[i].flags,
107 base + clks[i].offset,
108 clks[i].mask,
109 clks[i].val_enable,
110 clks[i].val_disable,
111 clks[i].gate_flags,
112 clks[i].lock);
113
114 if (IS_ERR(clk)) {
115 pr_err("%s: failed to register clock %s\n",
116 __func__, clks[i].name);
117 continue;
118 }
119 if (clks[i].id)
120 unit->clk_table[clks[i].id] = clk;
121 }
122}
123
124void mmp_register_mux_clks(struct mmp_clk_unit *unit,
125 struct mmp_param_mux_clk *clks,
126 void __iomem *base, int size)
127{
128 struct clk *clk;
129 int i;
130
131 for (i = 0; i < size; i++) {
132 clk = clk_register_mux(NULL, clks[i].name,
133 clks[i].parent_name,
134 clks[i].num_parents,
135 clks[i].flags,
136 base + clks[i].offset,
137 clks[i].shift,
138 clks[i].width,
139 clks[i].mux_flags,
140 clks[i].lock);
141
142 if (IS_ERR(clk)) {
143 pr_err("%s: failed to register clock %s\n",
144 __func__, clks[i].name);
145 continue;
146 }
147 if (clks[i].id)
148 unit->clk_table[clks[i].id] = clk;
149 }
150}
151
152void mmp_register_div_clks(struct mmp_clk_unit *unit,
153 struct mmp_param_div_clk *clks,
154 void __iomem *base, int size)
155{
156 struct clk *clk;
157 int i;
158
159 for (i = 0; i < size; i++) {
160 clk = clk_register_divider(NULL, clks[i].name,
161 clks[i].parent_name,
162 clks[i].flags,
163 base + clks[i].offset,
164 clks[i].shift,
165 clks[i].width,
166 clks[i].div_flags,
167 clks[i].lock);
168
169 if (IS_ERR(clk)) {
170 pr_err("%s: failed to register clock %s\n",
171 __func__, clks[i].name);
172 continue;
173 }
174 if (clks[i].id)
175 unit->clk_table[clks[i].id] = clk;
176 }
177}
178
179void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
180 struct clk *clk)
181{
182 if (IS_ERR_OR_NULL(clk)) {
183 pr_err("CLK %d has invalid pointer %p\n", id, clk);
184 return;
185 }
186 if (id > unit->nr_clks) {
187 pr_err("CLK %d is invalid\n", id);
188 return;
189 }
190
191 unit->clk_table[id] = clk;
192}
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index ab86dd4a416a..adf9b711b037 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -7,19 +7,123 @@
7#define APBC_NO_BUS_CTRL BIT(0) 7#define APBC_NO_BUS_CTRL BIT(0)
8#define APBC_POWER_CTRL BIT(1) 8#define APBC_POWER_CTRL BIT(1)
9 9
10struct clk_factor_masks { 10
11 unsigned int factor; 11/* Clock type "factor" */
12 unsigned int num_mask; 12struct mmp_clk_factor_masks {
13 unsigned int den_mask; 13 unsigned int factor;
14 unsigned int num_shift; 14 unsigned int num_mask;
15 unsigned int den_shift; 15 unsigned int den_mask;
16 unsigned int num_shift;
17 unsigned int den_shift;
16}; 18};
17 19
18struct clk_factor_tbl { 20struct mmp_clk_factor_tbl {
19 unsigned int num; 21 unsigned int num;
20 unsigned int den; 22 unsigned int den;
21}; 23};
22 24
25struct mmp_clk_factor {
26 struct clk_hw hw;
27 void __iomem *base;
28 struct mmp_clk_factor_masks *masks;
29 struct mmp_clk_factor_tbl *ftbl;
30 unsigned int ftbl_cnt;
31 spinlock_t *lock;
32};
33
34extern struct clk *mmp_clk_register_factor(const char *name,
35 const char *parent_name, unsigned long flags,
36 void __iomem *base, struct mmp_clk_factor_masks *masks,
37 struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
38 spinlock_t *lock);
39
40/* Clock type "mix" */
41#define MMP_CLK_BITS_MASK(width, shift) \
42 (((1 << (width)) - 1) << (shift))
43#define MMP_CLK_BITS_GET_VAL(data, width, shift) \
44 ((data & MMP_CLK_BITS_MASK(width, shift)) >> (shift))
45#define MMP_CLK_BITS_SET_VAL(val, width, shift) \
46 (((val) << (shift)) & MMP_CLK_BITS_MASK(width, shift))
47
48enum {
49 MMP_CLK_MIX_TYPE_V1,
50 MMP_CLK_MIX_TYPE_V2,
51 MMP_CLK_MIX_TYPE_V3,
52};
53
54/* The register layout */
55struct mmp_clk_mix_reg_info {
56 void __iomem *reg_clk_ctrl;
57 void __iomem *reg_clk_sel;
58 u8 width_div;
59 u8 shift_div;
60 u8 width_mux;
61 u8 shift_mux;
62 u8 bit_fc;
63};
64
65/* The suggested clock table from user. */
66struct mmp_clk_mix_clk_table {
67 unsigned long rate;
68 u8 parent_index;
69 unsigned int divisor;
70 unsigned int valid;
71};
72
73struct mmp_clk_mix_config {
74 struct mmp_clk_mix_reg_info reg_info;
75 struct mmp_clk_mix_clk_table *table;
76 unsigned int table_size;
77 u32 *mux_table;
78 struct clk_div_table *div_table;
79 u8 div_flags;
80 u8 mux_flags;
81};
82
83struct mmp_clk_mix {
84 struct clk_hw hw;
85 struct mmp_clk_mix_reg_info reg_info;
86 struct mmp_clk_mix_clk_table *table;
87 u32 *mux_table;
88 struct clk_div_table *div_table;
89 unsigned int table_size;
90 u8 div_flags;
91 u8 mux_flags;
92 unsigned int type;
93 spinlock_t *lock;
94};
95
96extern const struct clk_ops mmp_clk_mix_ops;
97extern struct clk *mmp_clk_register_mix(struct device *dev,
98 const char *name,
99 const char **parent_names,
100 u8 num_parents,
101 unsigned long flags,
102 struct mmp_clk_mix_config *config,
103 spinlock_t *lock);
104
105
106/* Clock type "gate". MMP private gate */
107#define MMP_CLK_GATE_NEED_DELAY BIT(0)
108
109struct mmp_clk_gate {
110 struct clk_hw hw;
111 void __iomem *reg;
112 u32 mask;
113 u32 val_enable;
114 u32 val_disable;
115 unsigned int flags;
116 spinlock_t *lock;
117};
118
119extern const struct clk_ops mmp_clk_gate_ops;
120extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
121 const char *parent_name, unsigned long flags,
122 void __iomem *reg, u32 mask, u32 val_enable,
123 u32 val_disable, unsigned int gate_flags,
124 spinlock_t *lock);
125
126
23extern struct clk *mmp_clk_register_pll2(const char *name, 127extern struct clk *mmp_clk_register_pll2(const char *name,
24 const char *parent_name, unsigned long flags); 128 const char *parent_name, unsigned long flags);
25extern struct clk *mmp_clk_register_apbc(const char *name, 129extern struct clk *mmp_clk_register_apbc(const char *name,
@@ -28,8 +132,108 @@ extern struct clk *mmp_clk_register_apbc(const char *name,
28extern struct clk *mmp_clk_register_apmu(const char *name, 132extern struct clk *mmp_clk_register_apmu(const char *name,
29 const char *parent_name, void __iomem *base, u32 enable_mask, 133 const char *parent_name, void __iomem *base, u32 enable_mask,
30 spinlock_t *lock); 134 spinlock_t *lock);
31extern struct clk *mmp_clk_register_factor(const char *name, 135
32 const char *parent_name, unsigned long flags, 136struct mmp_clk_unit {
33 void __iomem *base, struct clk_factor_masks *masks, 137 unsigned int nr_clks;
34 struct clk_factor_tbl *ftbl, unsigned int ftbl_cnt); 138 struct clk **clk_table;
139 struct clk_onecell_data clk_data;
140};
141
142struct mmp_param_fixed_rate_clk {
143 unsigned int id;
144 char *name;
145 const char *parent_name;
146 unsigned long flags;
147 unsigned long fixed_rate;
148};
149void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
150 struct mmp_param_fixed_rate_clk *clks,
151 int size);
152
153struct mmp_param_fixed_factor_clk {
154 unsigned int id;
155 char *name;
156 const char *parent_name;
157 unsigned long mult;
158 unsigned long div;
159 unsigned long flags;
160};
161void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
162 struct mmp_param_fixed_factor_clk *clks,
163 int size);
164
165struct mmp_param_general_gate_clk {
166 unsigned int id;
167 const char *name;
168 const char *parent_name;
169 unsigned long flags;
170 unsigned long offset;
171 u8 bit_idx;
172 u8 gate_flags;
173 spinlock_t *lock;
174};
175void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
176 struct mmp_param_general_gate_clk *clks,
177 void __iomem *base, int size);
178
179struct mmp_param_gate_clk {
180 unsigned int id;
181 char *name;
182 const char *parent_name;
183 unsigned long flags;
184 unsigned long offset;
185 u32 mask;
186 u32 val_enable;
187 u32 val_disable;
188 unsigned int gate_flags;
189 spinlock_t *lock;
190};
191void mmp_register_gate_clks(struct mmp_clk_unit *unit,
192 struct mmp_param_gate_clk *clks,
193 void __iomem *base, int size);
194
195struct mmp_param_mux_clk {
196 unsigned int id;
197 char *name;
198 const char **parent_name;
199 u8 num_parents;
200 unsigned long flags;
201 unsigned long offset;
202 u8 shift;
203 u8 width;
204 u8 mux_flags;
205 spinlock_t *lock;
206};
207void mmp_register_mux_clks(struct mmp_clk_unit *unit,
208 struct mmp_param_mux_clk *clks,
209 void __iomem *base, int size);
210
211struct mmp_param_div_clk {
212 unsigned int id;
213 char *name;
214 const char *parent_name;
215 unsigned long flags;
216 unsigned long offset;
217 u8 shift;
218 u8 width;
219 u8 div_flags;
220 spinlock_t *lock;
221};
222void mmp_register_div_clks(struct mmp_clk_unit *unit,
223 struct mmp_param_div_clk *clks,
224 void __iomem *base, int size);
225
226#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
227{ \
228 .width_div = (w_d), \
229 .shift_div = (s_d), \
230 .width_mux = (w_m), \
231 .shift_mux = (s_m), \
232 .bit_fc = (fc), \
233}
234
235void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
236 int nr_clks);
237void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
238 struct clk *clk);
35#endif 239#endif
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
new file mode 100644
index 000000000000..b54da1fe73f0
--- /dev/null
+++ b/drivers/clk/mmp/reset.c
@@ -0,0 +1,99 @@
1#include <linux/slab.h>
2#include <linux/io.h>
3#include <linux/of.h>
4#include <linux/of_address.h>
5#include <linux/reset-controller.h>
6
7#include "reset.h"
8
9#define rcdev_to_unit(rcdev) container_of(rcdev, struct mmp_clk_reset_unit, rcdev)
10
11static int mmp_of_reset_xlate(struct reset_controller_dev *rcdev,
12 const struct of_phandle_args *reset_spec)
13{
14 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
15 struct mmp_clk_reset_cell *cell;
16 int i;
17
18 if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
19 return -EINVAL;
20
21 for (i = 0; i < rcdev->nr_resets; i++) {
22 cell = &unit->cells[i];
23 if (cell->clk_id == reset_spec->args[0])
24 break;
25 }
26
27 if (i == rcdev->nr_resets)
28 return -EINVAL;
29
30 return i;
31}
32
33static int mmp_clk_reset_assert(struct reset_controller_dev *rcdev,
34 unsigned long id)
35{
36 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
37 struct mmp_clk_reset_cell *cell;
38 unsigned long flags = 0;
39 u32 val;
40
41 cell = &unit->cells[id];
42 if (cell->lock)
43 spin_lock_irqsave(cell->lock, flags);
44
45 val = readl(cell->reg);
46 val |= cell->bits;
47 writel(val, cell->reg);
48
49 if (cell->lock)
50 spin_unlock_irqrestore(cell->lock, flags);
51
52 return 0;
53}
54
55static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
56 unsigned long id)
57{
58 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
59 struct mmp_clk_reset_cell *cell;
60 unsigned long flags = 0;
61 u32 val;
62
63 cell = &unit->cells[id];
64 if (cell->lock)
65 spin_lock_irqsave(cell->lock, flags);
66
67 val = readl(cell->reg);
68 val &= ~cell->bits;
69 writel(val, cell->reg);
70
71 if (cell->lock)
72 spin_unlock_irqrestore(cell->lock, flags);
73
74 return 0;
75}
76
77static struct reset_control_ops mmp_clk_reset_ops = {
78 .assert = mmp_clk_reset_assert,
79 .deassert = mmp_clk_reset_deassert,
80};
81
82void mmp_clk_reset_register(struct device_node *np,
83 struct mmp_clk_reset_cell *cells, int nr_resets)
84{
85 struct mmp_clk_reset_unit *unit;
86
87 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
88 if (!unit)
89 return;
90
91 unit->cells = cells;
92 unit->rcdev.of_reset_n_cells = 1;
93 unit->rcdev.nr_resets = nr_resets;
94 unit->rcdev.ops = &mmp_clk_reset_ops;
95 unit->rcdev.of_node = np;
96 unit->rcdev.of_xlate = mmp_of_reset_xlate;
97
98 reset_controller_register(&unit->rcdev);
99}
diff --git a/drivers/clk/mmp/reset.h b/drivers/clk/mmp/reset.h
new file mode 100644
index 000000000000..be8b1a7000f7
--- /dev/null
+++ b/drivers/clk/mmp/reset.h
@@ -0,0 +1,31 @@
1#ifndef __MACH_MMP_CLK_RESET_H
2#define __MACH_MMP_CLK_RESET_H
3
4#include <linux/reset-controller.h>
5
6#define MMP_RESET_INVERT 1
7
8struct mmp_clk_reset_cell {
9 unsigned int clk_id;
10 void __iomem *reg;
11 u32 bits;
12 unsigned int flags;
13 spinlock_t *lock;
14};
15
16struct mmp_clk_reset_unit {
17 struct reset_controller_dev rcdev;
18 struct mmp_clk_reset_cell *cells;
19};
20
21#ifdef CONFIG_RESET_CONTROLLER
22void mmp_clk_reset_register(struct device_node *np,
23 struct mmp_clk_reset_cell *cells, int nr_resets);
24#else
25static inline void mmp_clk_reset_register(struct device_node *np,
26 struct mmp_clk_reset_cell *cells, int nr_resets)
27{
28}
29#endif
30
31#endif
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 4ff2abcd500b..38e915344605 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,2 +1,3 @@
1obj-y += clk-pxa.o 1obj-y += clk-pxa.o
2obj-$(CONFIG_PXA25x) += clk-pxa25x.o
2obj-$(CONFIG_PXA27x) += clk-pxa27x.o 3obj-$(CONFIG_PXA27x) += clk-pxa27x.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index ef3c05389c0a..4e834753ab09 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -26,12 +26,20 @@ static struct clk_onecell_data onecell_data = {
26 .clk_num = CLK_MAX, 26 .clk_num = CLK_MAX,
27}; 27};
28 28
29#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk_cken, hw) 29struct pxa_clk {
30 struct clk_hw hw;
31 struct clk_fixed_factor lp;
32 struct clk_fixed_factor hp;
33 struct clk_gate gate;
34 bool (*is_in_low_power)(void);
35};
36
37#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
30 38
31static unsigned long cken_recalc_rate(struct clk_hw *hw, 39static unsigned long cken_recalc_rate(struct clk_hw *hw,
32 unsigned long parent_rate) 40 unsigned long parent_rate)
33{ 41{
34 struct pxa_clk_cken *pclk = to_pxa_clk(hw); 42 struct pxa_clk *pclk = to_pxa_clk(hw);
35 struct clk_fixed_factor *fix; 43 struct clk_fixed_factor *fix;
36 44
37 if (!pclk->is_in_low_power || pclk->is_in_low_power()) 45 if (!pclk->is_in_low_power || pclk->is_in_low_power())
@@ -48,7 +56,7 @@ static struct clk_ops cken_rate_ops = {
48 56
49static u8 cken_get_parent(struct clk_hw *hw) 57static u8 cken_get_parent(struct clk_hw *hw)
50{ 58{
51 struct pxa_clk_cken *pclk = to_pxa_clk(hw); 59 struct pxa_clk *pclk = to_pxa_clk(hw);
52 60
53 if (!pclk->is_in_low_power) 61 if (!pclk->is_in_low_power)
54 return 0; 62 return 0;
@@ -69,29 +77,32 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
69 clk_register_clkdev(clk, con_id, dev_id); 77 clk_register_clkdev(clk, con_id, dev_id);
70} 78}
71 79
72int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks) 80int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
73{ 81{
74 int i; 82 int i;
75 struct pxa_clk_cken *pclk; 83 struct pxa_clk *pxa_clk;
76 struct clk *clk; 84 struct clk *clk;
77 85
78 for (i = 0; i < nb_clks; i++) { 86 for (i = 0; i < nb_clks; i++) {
79 pclk = clks + i; 87 pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
80 pclk->gate.lock = &lock; 88 pxa_clk->is_in_low_power = clks[i].is_in_low_power;
81 clk = clk_register_composite(NULL, pclk->name, 89 pxa_clk->lp = clks[i].lp;
82 pclk->parent_names, 2, 90 pxa_clk->hp = clks[i].hp;
83 &pclk->hw, &cken_mux_ops, 91 pxa_clk->gate = clks[i].gate;
84 &pclk->hw, &cken_rate_ops, 92 pxa_clk->gate.lock = &lock;
85 &pclk->gate.hw, &clk_gate_ops, 93 clk = clk_register_composite(NULL, clks[i].name,
86 pclk->flags); 94 clks[i].parent_names, 2,
87 clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id, 95 &pxa_clk->hw, &cken_mux_ops,
88 clk); 96 &pxa_clk->hw, &cken_rate_ops,
97 &pxa_clk->gate.hw, &clk_gate_ops,
98 clks[i].flags);
99 clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
100 clks[i].dev_id, clk);
89 } 101 }
90 return 0; 102 return 0;
91} 103}
92 104
93static void __init pxa_dt_clocks_init(struct device_node *np) 105void __init clk_pxa_dt_common_init(struct device_node *np)
94{ 106{
95 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data); 107 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
96} 108}
97CLK_OF_DECLARE(pxa_clks, "marvell,pxa-clocks", pxa_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 5fe219d06b49..323965430111 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -25,7 +25,7 @@
25 static struct clk_ops name ## _rate_ops = { \ 25 static struct clk_ops name ## _rate_ops = { \
26 .recalc_rate = name ## _get_rate, \ 26 .recalc_rate = name ## _get_rate, \
27 }; \ 27 }; \
28 static struct clk *clk_register_ ## name(void) \ 28 static struct clk * __init clk_register_ ## name(void) \
29 { \ 29 { \
30 return clk_register_composite(NULL, clk_name, \ 30 return clk_register_composite(NULL, clk_name, \
31 name ## _parents, \ 31 name ## _parents, \
@@ -40,7 +40,7 @@
40 static struct clk_ops name ## _rate_ops = { \ 40 static struct clk_ops name ## _rate_ops = { \
41 .recalc_rate = name ## _get_rate, \ 41 .recalc_rate = name ## _get_rate, \
42 }; \ 42 }; \
43 static struct clk *clk_register_ ## name(void) \ 43 static struct clk * __init clk_register_ ## name(void) \
44 { \ 44 { \
45 return clk_register_composite(NULL, clk_name, \ 45 return clk_register_composite(NULL, clk_name, \
46 name ## _parents, \ 46 name ## _parents, \
@@ -66,7 +66,7 @@
66 * | Clock | --- | / div_hp | 66 * | Clock | --- | / div_hp |
67 * +------------+ +-----------+ 67 * +------------+ +-----------+
68 */ 68 */
69struct pxa_clk_cken { 69struct desc_clk_cken {
70 struct clk_hw hw; 70 struct clk_hw hw;
71 int ckid; 71 int ckid;
72 const char *name; 72 const char *name;
@@ -102,6 +102,7 @@ static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
102 102
103extern void clkdev_pxa_register(int ckid, const char *con_id, 103extern void clkdev_pxa_register(int ckid, const char *con_id,
104 const char *dev_id, struct clk *clk); 104 const char *dev_id, struct clk *clk);
105extern int clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks); 105extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
106void clk_pxa_dt_common_init(struct device_node *np);
106 107
107#endif 108#endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
new file mode 100644
index 000000000000..6cd88d963a7f
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -0,0 +1,273 @@
1/*
2 * Marvell PXA25x family clocks
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * Heavily inspired from former arch/arm/mach-pxa/pxa25x.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
13 * should go away.
14 */
15#include <linux/clk-provider.h>
16#include <linux/clk.h>
17#include <linux/clkdev.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <mach/pxa25x.h>
21#include <mach/pxa2xx-regs.h>
22
23#include <dt-bindings/clock/pxa-clock.h>
24#include "clk-pxa.h"
25
26#define KHz 1000
27#define MHz (1000 * 1000)
28
29enum {
30 PXA_CORE_RUN = 0,
31 PXA_CORE_TURBO,
32};
33
34/*
35 * Various clock factors driven by the CCCR register.
36 */
37
38/* Crystal Frequency to Memory Frequency Multiplier (L) */
39static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
40
41/* Memory Frequency to Run Mode Frequency Multiplier (M) */
42static unsigned char M_clk_mult[4] = { 0, 1, 2, 4 };
43
44/* Run Mode Frequency to Turbo Mode Frequency Multiplier (N) */
45/* Note: we store the value N * 2 here. */
46static unsigned char N2_clk_mult[8] = { 0, 0, 2, 3, 4, 0, 6, 0 };
47
48static const char * const get_freq_khz[] = {
49 "core", "run", "cpll", "memory"
50};
51
52/*
53 * Get the clock frequency as reflected by CCCR and the turbo flag.
54 * We assume these values have been applied via a fcs.
55 * If info is not 0 we also display the current settings.
56 */
57unsigned int pxa25x_get_clk_frequency_khz(int info)
58{
59 struct clk *clk;
60 unsigned long clks[5];
61 int i;
62
63 for (i = 0; i < ARRAY_SIZE(get_freq_khz); i++) {
64 clk = clk_get(NULL, get_freq_khz[i]);
65 if (IS_ERR(clk)) {
66 clks[i] = 0;
67 } else {
68 clks[i] = clk_get_rate(clk);
69 clk_put(clk);
70 }
71 }
72
73 if (info) {
74 pr_info("Run Mode clock: %ld.%02ldMHz\n",
75 clks[1] / 1000000, (clks[1] % 1000000) / 10000);
76 pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
77 clks[2] / 1000000, (clks[2] % 1000000) / 10000);
78 pr_info("Memory clock: %ld.%02ldMHz\n",
79 clks[3] / 1000000, (clks[3] % 1000000) / 10000);
80 }
81
82 return (unsigned int)clks[0];
83}
84
85static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
86 unsigned long parent_rate)
87{
88 unsigned long cccr = CCCR;
89 unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
90
91 return parent_rate / m;
92}
93PARENTS(clk_pxa25x_memory) = { "run" };
94RATE_RO_OPS(clk_pxa25x_memory, "memory");
95
96PARENTS(pxa25x_pbus95) = { "ppll_95_85mhz", "ppll_95_85mhz" };
97PARENTS(pxa25x_pbus147) = { "ppll_147_46mhz", "ppll_147_46mhz" };
98PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
99
100#define PXA25X_CKEN(dev_id, con_id, parents, mult, div, \
101 bit, is_lp, flags) \
102 PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div, \
103 is_lp, &CKEN, CKEN_ ## bit, flags)
104#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
105 PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp, \
106 div_hp, bit, NULL, 0)
107#define PXA25X_PBUS147_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)\
108 PXA25X_CKEN(dev_id, con_id, pxa25x_pbus147_parents, mult_hp, \
109 div_hp, bit, NULL, 0)
110#define PXA25X_OSC3_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
111 PXA25X_CKEN(dev_id, con_id, pxa25x_osc3_parents, mult_hp, \
112 div_hp, bit, NULL, 0)
113
114#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
115 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
116 &CKEN, CKEN_ ## bit, 0)
117#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
118 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
119 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
120
121static struct desc_clk_cken pxa25x_clocks[] __initdata = {
122 PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
123 PXA25X_PBUS95_CKEN("pxa2xx-i2c.0", NULL, I2C, 1, 3, 0),
124 PXA25X_PBUS95_CKEN("pxa2xx-ir", "FICPCLK", FICP, 1, 2, 0),
125 PXA25X_PBUS95_CKEN("pxa25x-udc", NULL, USB, 1, 2, 5),
126 PXA25X_PBUS147_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 10, 1),
127 PXA25X_PBUS147_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 10, 1),
128 PXA25X_PBUS147_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 10, 1),
129 PXA25X_PBUS147_CKEN("pxa2xx-uart.3", NULL, HWUART, 1, 10, 1),
130 PXA25X_PBUS147_CKEN("pxa2xx-i2s", NULL, I2S, 1, 10, 0),
131 PXA25X_PBUS147_CKEN(NULL, "AC97CLK", AC97, 1, 12, 0),
132 PXA25X_OSC3_CKEN("pxa25x-ssp.0", NULL, SSP, 1, 1, 0),
133 PXA25X_OSC3_CKEN("pxa25x-nssp.1", NULL, NSSP, 1, 1, 0),
134 PXA25X_OSC3_CKEN("pxa25x-nssp.2", NULL, ASSP, 1, 1, 0),
135 PXA25X_OSC3_CKEN("pxa25x-pwm.0", NULL, PWM0, 1, 1, 0),
136 PXA25X_OSC3_CKEN("pxa25x-pwm.1", NULL, PWM1, 1, 1, 0),
137
138 PXA25X_CKEN_1RATE("pxa2xx-fb", NULL, LCD, clk_pxa25x_memory_parents, 0),
139 PXA25X_CKEN_1RATE_AO("pxa2xx-pcmcia", NULL, MEMC,
140 clk_pxa25x_memory_parents, 0),
141};
142
143static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
144{
145 unsigned long clkcfg;
146 unsigned int t;
147
148 asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
149 t = clkcfg & (1 << 0);
150 if (t)
151 return PXA_CORE_TURBO;
152 return PXA_CORE_RUN;
153}
154
155static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
156 unsigned long parent_rate)
157{
158 return parent_rate;
159}
160PARENTS(clk_pxa25x_core) = { "run", "cpll" };
161MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
162
163static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
164 unsigned long parent_rate)
165{
166 unsigned long cccr = CCCR;
167 unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
168
169 return (parent_rate / n2) * 2;
170}
171PARENTS(clk_pxa25x_run) = { "cpll" };
172RATE_RO_OPS(clk_pxa25x_run, "run");
173
174static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
175 unsigned long parent_rate)
176{
177 unsigned long clkcfg, cccr = CCCR;
178 unsigned int l, m, n2, t;
179
180 asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
181 t = clkcfg & (1 << 0);
182 l = L_clk_mult[(cccr >> 0) & 0x1f];
183 m = M_clk_mult[(cccr >> 5) & 0x03];
184 n2 = N2_clk_mult[(cccr >> 7) & 0x07];
185
186 if (t)
187 return m * l * n2 * parent_rate / 2;
188 return m * l * parent_rate;
189}
190PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
191RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
192
193static void __init pxa25x_register_core(void)
194{
195 clk_register_clk_pxa25x_cpll();
196 clk_register_clk_pxa25x_run();
197 clkdev_pxa_register(CLK_CORE, "core", NULL,
198 clk_register_clk_pxa25x_core());
199}
200
201static void __init pxa25x_register_plls(void)
202{
203 clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
204 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
205 3686400);
206 clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
207 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
208 32768);
209 clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
210 clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
211 0, 26, 1);
212 clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
213 0, 40, 1);
214}
215
216static void __init pxa25x_base_clocks_init(void)
217{
218 pxa25x_register_plls();
219 pxa25x_register_core();
220 clk_register_clk_pxa25x_memory();
221}
222
223#define DUMMY_CLK(_con_id, _dev_id, _parent) \
224 { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
225struct dummy_clk {
226 const char *con_id;
227 const char *dev_id;
228 const char *parent;
229};
230static struct dummy_clk dummy_clks[] __initdata = {
231 DUMMY_CLK(NULL, "pxa25x-gpio", "osc_32_768khz"),
232 DUMMY_CLK(NULL, "pxa26x-gpio", "osc_32_768khz"),
233 DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
234 DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
235 DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
236 DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
237 DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
238};
239
240static void __init pxa25x_dummy_clocks_init(void)
241{
242 struct clk *clk;
243 struct dummy_clk *d;
244 const char *name;
245 int i;
246
247 /*
248 * All pinctrl logic has been wiped out of the clock driver, especially
249 * for gpio11 and gpio12 outputs. Machine code should ensure proper pin
250 * control (ie. pxa2xx_mfp_config() invocation).
251 */
252 for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
253 d = &dummy_clks[i];
254 name = d->dev_id ? d->dev_id : d->con_id;
255 clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
256 clk_register_clkdev(clk, d->con_id, d->dev_id);
257 }
258}
259
260int __init pxa25x_clocks_init(void)
261{
262 pxa25x_base_clocks_init();
263 pxa25x_dummy_clocks_init();
264 return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
265}
266
267static void __init pxa25x_dt_clocks_init(struct device_node *np)
268{
269 pxa25x_clocks_init();
270 clk_pxa_dt_common_init(np);
271}
272CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
273 pxa25x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 88b9fe13fa44..5f9b54b024b9 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -111,7 +111,7 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
111 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \ 111 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
112 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED) 112 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
113 113
114static struct pxa_clk_cken pxa27x_clocks[] = { 114static struct desc_clk_cken pxa27x_clocks[] __initdata = {
115 PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1), 115 PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
116 PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1), 116 PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1),
117 PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1), 117 PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1),
@@ -368,3 +368,10 @@ static int __init pxa27x_clocks_init(void)
368 return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks)); 368 return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
369} 369}
370postcore_initcall(pxa27x_clocks_init); 370postcore_initcall(pxa27x_clocks_init);
371
372static void __init pxa27x_dt_clocks_init(struct device_node *np)
373{
374 pxa27x_clocks_init();
375 clk_pxa_dt_common_init(np);
376}
377CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index b823bc3b6250..60873a7f45d9 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,7 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
141 141
142static long 142static long
143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, 143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
144 unsigned long *p_rate, struct clk **p) 144 unsigned long *p_rate, struct clk_hw **p)
145{ 145{
146 struct clk_pll *pll = to_clk_pll(hw); 146 struct clk_pll *pll = to_clk_pll(hw);
147 const struct pll_freq_tbl *f; 147 const struct pll_freq_tbl *f;
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index b6e6959e89aa..0b93972c8807 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,16 +368,17 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
368 368
369static long _freq_tbl_determine_rate(struct clk_hw *hw, 369static long _freq_tbl_determine_rate(struct clk_hw *hw,
370 const struct freq_tbl *f, unsigned long rate, 370 const struct freq_tbl *f, unsigned long rate,
371 unsigned long *p_rate, struct clk **p) 371 unsigned long *p_rate, struct clk_hw **p_hw)
372{ 372{
373 unsigned long clk_flags; 373 unsigned long clk_flags;
374 struct clk *p;
374 375
375 f = qcom_find_freq(f, rate); 376 f = qcom_find_freq(f, rate);
376 if (!f) 377 if (!f)
377 return -EINVAL; 378 return -EINVAL;
378 379
379 clk_flags = __clk_get_flags(hw->clk); 380 clk_flags = __clk_get_flags(hw->clk);
380 *p = clk_get_parent_by_index(hw->clk, f->src); 381 p = clk_get_parent_by_index(hw->clk, f->src);
381 if (clk_flags & CLK_SET_RATE_PARENT) { 382 if (clk_flags & CLK_SET_RATE_PARENT) {
382 rate = rate * f->pre_div; 383 rate = rate * f->pre_div;
383 if (f->n) { 384 if (f->n) {
@@ -387,15 +388,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
387 rate = tmp; 388 rate = tmp;
388 } 389 }
389 } else { 390 } else {
390 rate = __clk_get_rate(*p); 391 rate = __clk_get_rate(p);
391 } 392 }
393 *p_hw = __clk_get_hw(p);
392 *p_rate = rate; 394 *p_rate = rate;
393 395
394 return f->freq; 396 return f->freq;
395} 397}
396 398
397static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 399static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
398 unsigned long *p_rate, struct clk **p) 400 unsigned long *p_rate, struct clk_hw **p)
399{ 401{
400 struct clk_rcg *rcg = to_clk_rcg(hw); 402 struct clk_rcg *rcg = to_clk_rcg(hw);
401 403
@@ -403,7 +405,7 @@ static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
403} 405}
404 406
405static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 407static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
406 unsigned long *p_rate, struct clk **p) 408 unsigned long *p_rate, struct clk_hw **p)
407{ 409{
408 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 410 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
409 411
@@ -411,13 +413,15 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
411} 413}
412 414
413static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, 415static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
414 unsigned long *p_rate, struct clk **p) 416 unsigned long *p_rate, struct clk_hw **p_hw)
415{ 417{
416 struct clk_rcg *rcg = to_clk_rcg(hw); 418 struct clk_rcg *rcg = to_clk_rcg(hw);
417 const struct freq_tbl *f = rcg->freq_tbl; 419 const struct freq_tbl *f = rcg->freq_tbl;
420 struct clk *p;
418 421
419 *p = clk_get_parent_by_index(hw->clk, f->src); 422 p = clk_get_parent_by_index(hw->clk, f->src);
420 *p_rate = __clk_round_rate(*p, rate); 423 *p_hw = __clk_get_hw(p);
424 *p_rate = __clk_round_rate(p, rate);
421 425
422 return *p_rate; 426 return *p_rate;
423} 427}
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index cfa9eb4fe9ca..08b8b3729f53 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -175,16 +175,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
175 175
176static long _freq_tbl_determine_rate(struct clk_hw *hw, 176static long _freq_tbl_determine_rate(struct clk_hw *hw,
177 const struct freq_tbl *f, unsigned long rate, 177 const struct freq_tbl *f, unsigned long rate,
178 unsigned long *p_rate, struct clk **p) 178 unsigned long *p_rate, struct clk_hw **p_hw)
179{ 179{
180 unsigned long clk_flags; 180 unsigned long clk_flags;
181 struct clk *p;
181 182
182 f = qcom_find_freq(f, rate); 183 f = qcom_find_freq(f, rate);
183 if (!f) 184 if (!f)
184 return -EINVAL; 185 return -EINVAL;
185 186
186 clk_flags = __clk_get_flags(hw->clk); 187 clk_flags = __clk_get_flags(hw->clk);
187 *p = clk_get_parent_by_index(hw->clk, f->src); 188 p = clk_get_parent_by_index(hw->clk, f->src);
188 if (clk_flags & CLK_SET_RATE_PARENT) { 189 if (clk_flags & CLK_SET_RATE_PARENT) {
189 if (f->pre_div) { 190 if (f->pre_div) {
190 rate /= 2; 191 rate /= 2;
@@ -198,15 +199,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
198 rate = tmp; 199 rate = tmp;
199 } 200 }
200 } else { 201 } else {
201 rate = __clk_get_rate(*p); 202 rate = __clk_get_rate(p);
202 } 203 }
204 *p_hw = __clk_get_hw(p);
203 *p_rate = rate; 205 *p_rate = rate;
204 206
205 return f->freq; 207 return f->freq;
206} 208}
207 209
208static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, 210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
209 unsigned long *p_rate, struct clk **p) 211 unsigned long *p_rate, struct clk_hw **p)
210{ 212{
211 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 213 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
212 214
@@ -359,7 +361,7 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
359} 361}
360 362
361static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 363static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
362 unsigned long *p_rate, struct clk **p) 364 unsigned long *p_rate, struct clk_hw **p)
363{ 365{
364 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 366 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
365 const struct freq_tbl *f = rcg->freq_tbl; 367 const struct freq_tbl *f = rcg->freq_tbl;
@@ -371,7 +373,7 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
371 u32 hid_div; 373 u32 hid_div;
372 374
373 /* Force the correct parent */ 375 /* Force the correct parent */
374 *p = clk_get_parent_by_index(hw->clk, f->src); 376 *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, f->src));
375 377
376 if (src_rate == 810000000) 378 if (src_rate == 810000000)
377 frac = frac_table_810m; 379 frac = frac_table_810m;
@@ -410,18 +412,20 @@ const struct clk_ops clk_edp_pixel_ops = {
410EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 412EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
411 413
412static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, 414static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
413 unsigned long *p_rate, struct clk **p) 415 unsigned long *p_rate, struct clk_hw **p_hw)
414{ 416{
415 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 417 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
416 const struct freq_tbl *f = rcg->freq_tbl; 418 const struct freq_tbl *f = rcg->freq_tbl;
417 unsigned long parent_rate, div; 419 unsigned long parent_rate, div;
418 u32 mask = BIT(rcg->hid_width) - 1; 420 u32 mask = BIT(rcg->hid_width) - 1;
421 struct clk *p;
419 422
420 if (rate == 0) 423 if (rate == 0)
421 return -EINVAL; 424 return -EINVAL;
422 425
423 *p = clk_get_parent_by_index(hw->clk, f->src); 426 p = clk_get_parent_by_index(hw->clk, f->src);
424 *p_rate = parent_rate = __clk_round_rate(*p, rate); 427 *p_hw = __clk_get_hw(p);
428 *p_rate = parent_rate = __clk_round_rate(p, rate);
425 429
426 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 430 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
427 div = min_t(u32, div, mask); 431 div = min_t(u32, div, mask);
@@ -472,14 +476,16 @@ static const struct frac_entry frac_table_pixel[] = {
472}; 476};
473 477
474static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 478static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
475 unsigned long *p_rate, struct clk **p) 479 unsigned long *p_rate, struct clk_hw **p)
476{ 480{
477 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 481 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
478 unsigned long request, src_rate; 482 unsigned long request, src_rate;
479 int delta = 100000; 483 int delta = 100000;
480 const struct freq_tbl *f = rcg->freq_tbl; 484 const struct freq_tbl *f = rcg->freq_tbl;
481 const struct frac_entry *frac = frac_table_pixel; 485 const struct frac_entry *frac = frac_table_pixel;
482 struct clk *parent = *p = clk_get_parent_by_index(hw->clk, f->src); 486 struct clk *parent = clk_get_parent_by_index(hw->clk, f->src);
487
488 *p = __clk_get_hw(parent);
483 489
484 for (; frac->num; frac++) { 490 for (; frac->num; frac++) {
485 request = (rate * frac->den) / frac->num; 491 request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index bd8514d63634..2714097f90db 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,6 +6,7 @@ obj-y += clk-rockchip.o
6obj-y += clk.o 6obj-y += clk.o
7obj-y += clk-pll.o 7obj-y += clk-pll.o
8obj-y += clk-cpu.o 8obj-y += clk-cpu.o
9obj-y += clk-mmc-phase.o
9obj-$(CONFIG_RESET_CONTROLLER) += softrst.o 10obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
10 11
11obj-y += clk-rk3188.o 12obj-y += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
new file mode 100644
index 000000000000..c842e3b60f21
--- /dev/null
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2014 Google, Inc
3 * Author: Alexandru M Stan <amstan@chromium.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/slab.h>
17#include <linux/clk-provider.h>
18#include "clk.h"
19
20struct rockchip_mmc_clock {
21 struct clk_hw hw;
22 void __iomem *reg;
23 int id;
24 int shift;
25};
26
27#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
28
29#define RK3288_MMC_CLKGEN_DIV 2
30
31static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
32 unsigned long parent_rate)
33{
34 return parent_rate / RK3288_MMC_CLKGEN_DIV;
35}
36
37#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
38#define ROCKCHIP_MMC_DEGREE_MASK 0x3
39#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
40#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
41
42#define PSECS_PER_SEC 1000000000000LL
43
44/*
45 * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
46 * simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
47 */
48#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
49
50static int rockchip_mmc_get_phase(struct clk_hw *hw)
51{
52 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
53 unsigned long rate = clk_get_rate(hw->clk);
54 u32 raw_value;
55 u16 degrees;
56 u32 delay_num = 0;
57
58 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
59
60 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
61
62 if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
63 /* degrees/delaynum * 10000 */
64 unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
65 36 * (rate / 1000000);
66
67 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
68 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
69 degrees += delay_num * factor / 10000;
70 }
71
72 return degrees % 360;
73}
74
75static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
76{
77 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
78 unsigned long rate = clk_get_rate(hw->clk);
79 u8 nineties, remainder;
80 u8 delay_num;
81 u32 raw_value;
82 u64 delay;
83
84 /* allow 22 to be 22.5 */
85 degrees++;
86 /* floor to 22.5 increment */
87 degrees -= ((degrees) * 10 % 225) / 10;
88
89 nineties = degrees / 90;
90 /* 22.5 multiples */
91 remainder = (degrees % 90) / 22;
92
93 delay = PSECS_PER_SEC;
94 do_div(delay, rate);
95 /* / 360 / 22.5 */
96 do_div(delay, 16);
97 do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
98
99 delay *= remainder;
100 delay_num = (u8) min(delay, 255ULL);
101
102 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
103 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
104 raw_value |= nineties;
105 writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
106
107 pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
108 __clk_get_name(hw->clk), degrees, delay_num,
109 mmc_clock->reg, raw_value>>(mmc_clock->shift),
110 rockchip_mmc_get_phase(hw)
111 );
112
113 return 0;
114}
115
116static const struct clk_ops rockchip_mmc_clk_ops = {
117 .recalc_rate = rockchip_mmc_recalc,
118 .get_phase = rockchip_mmc_get_phase,
119 .set_phase = rockchip_mmc_set_phase,
120};
121
122struct clk *rockchip_clk_register_mmc(const char *name,
123 const char **parent_names, u8 num_parents,
124 void __iomem *reg, int shift)
125{
126 struct clk_init_data init;
127 struct rockchip_mmc_clock *mmc_clock;
128 struct clk *clk;
129
130 mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
131 if (!mmc_clock)
132 return NULL;
133
134 init.num_parents = num_parents;
135 init.parent_names = parent_names;
136 init.ops = &rockchip_mmc_clk_ops;
137
138 mmc_clock->hw.init = &init;
139 mmc_clock->reg = reg;
140 mmc_clock->shift = shift;
141
142 if (name)
143 init.name = name;
144
145 clk = clk_register(NULL, &mmc_clock->hw);
146 if (IS_ERR(clk))
147 goto err_free;
148
149 return clk;
150
151err_free:
152 kfree(mmc_clock);
153 return NULL;
154}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index a3e886a38480..f8d3baf275b2 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -39,6 +39,7 @@ struct rockchip_clk_pll {
39 int lock_offset; 39 int lock_offset;
40 unsigned int lock_shift; 40 unsigned int lock_shift;
41 enum rockchip_pll_type type; 41 enum rockchip_pll_type type;
42 u8 flags;
42 const struct rockchip_pll_rate_table *rate_table; 43 const struct rockchip_pll_rate_table *rate_table;
43 unsigned int rate_count; 44 unsigned int rate_count;
44 spinlock_t *lock; 45 spinlock_t *lock;
@@ -257,6 +258,55 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
257 return !(pllcon & RK3066_PLLCON3_PWRDOWN); 258 return !(pllcon & RK3066_PLLCON3_PWRDOWN);
258} 259}
259 260
261static void rockchip_rk3066_pll_init(struct clk_hw *hw)
262{
263 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
264 const struct rockchip_pll_rate_table *rate;
265 unsigned int nf, nr, no, bwadj;
266 unsigned long drate;
267 u32 pllcon;
268
269 if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
270 return;
271
272 drate = __clk_get_rate(hw->clk);
273 rate = rockchip_get_pll_settings(pll, drate);
274
275 /* when no rate setting for the current rate, rely on clk_set_rate */
276 if (!rate)
277 return;
278
279 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
280 nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
281 no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
282
283 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
284 nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
285
286 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
287 bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
288
289 pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
290 __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
291 rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
292 if (rate->nr != nr || rate->no != no || rate->nf != nf
293 || rate->bwadj != bwadj) {
294 struct clk *parent = __clk_get_parent(hw->clk);
295 unsigned long prate;
296
297 if (!parent) {
298 pr_warn("%s: parent of %s not available\n",
299 __func__, __clk_get_name(hw->clk));
300 return;
301 }
302
303 pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
304 __func__, __clk_get_name(hw->clk));
305 prate = __clk_get_rate(parent);
306 rockchip_rk3066_pll_set_rate(hw, drate, prate);
307 }
308}
309
260static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = { 310static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
261 .recalc_rate = rockchip_rk3066_pll_recalc_rate, 311 .recalc_rate = rockchip_rk3066_pll_recalc_rate,
262 .enable = rockchip_rk3066_pll_enable, 312 .enable = rockchip_rk3066_pll_enable,
@@ -271,6 +321,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
271 .enable = rockchip_rk3066_pll_enable, 321 .enable = rockchip_rk3066_pll_enable,
272 .disable = rockchip_rk3066_pll_disable, 322 .disable = rockchip_rk3066_pll_disable,
273 .is_enabled = rockchip_rk3066_pll_is_enabled, 323 .is_enabled = rockchip_rk3066_pll_is_enabled,
324 .init = rockchip_rk3066_pll_init,
274}; 325};
275 326
276/* 327/*
@@ -282,7 +333,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
282 void __iomem *base, int con_offset, int grf_lock_offset, 333 void __iomem *base, int con_offset, int grf_lock_offset,
283 int lock_shift, int mode_offset, int mode_shift, 334 int lock_shift, int mode_offset, int mode_shift,
284 struct rockchip_pll_rate_table *rate_table, 335 struct rockchip_pll_rate_table *rate_table,
285 spinlock_t *lock) 336 u8 clk_pll_flags, spinlock_t *lock)
286{ 337{
287 const char *pll_parents[3]; 338 const char *pll_parents[3];
288 struct clk_init_data init; 339 struct clk_init_data init;
@@ -345,8 +396,22 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
345 pll->reg_base = base + con_offset; 396 pll->reg_base = base + con_offset;
346 pll->lock_offset = grf_lock_offset; 397 pll->lock_offset = grf_lock_offset;
347 pll->lock_shift = lock_shift; 398 pll->lock_shift = lock_shift;
399 pll->flags = clk_pll_flags;
348 pll->lock = lock; 400 pll->lock = lock;
349 401
402 /* create the mux on top of the real pll */
403 pll->pll_mux_ops = &clk_mux_ops;
404 pll_mux = &pll->pll_mux;
405 pll_mux->reg = base + mode_offset;
406 pll_mux->shift = mode_shift;
407 pll_mux->mask = PLL_MODE_MASK;
408 pll_mux->flags = 0;
409 pll_mux->lock = lock;
410 pll_mux->hw.init = &init;
411
412 if (pll_type == pll_rk3066)
413 pll_mux->flags |= CLK_MUX_HIWORD_MASK;
414
350 pll_clk = clk_register(NULL, &pll->hw); 415 pll_clk = clk_register(NULL, &pll->hw);
351 if (IS_ERR(pll_clk)) { 416 if (IS_ERR(pll_clk)) {
352 pr_err("%s: failed to register pll clock %s : %ld\n", 417 pr_err("%s: failed to register pll clock %s : %ld\n",
@@ -355,10 +420,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
355 goto err_pll; 420 goto err_pll;
356 } 421 }
357 422
358 /* create the mux on top of the real pll */
359 pll->pll_mux_ops = &clk_mux_ops;
360 pll_mux = &pll->pll_mux;
361
362 /* the actual muxing is xin24m, pll-output, xin32k */ 423 /* the actual muxing is xin24m, pll-output, xin32k */
363 pll_parents[0] = parent_names[0]; 424 pll_parents[0] = parent_names[0];
364 pll_parents[1] = pll_name; 425 pll_parents[1] = pll_name;
@@ -370,16 +431,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
370 init.parent_names = pll_parents; 431 init.parent_names = pll_parents;
371 init.num_parents = ARRAY_SIZE(pll_parents); 432 init.num_parents = ARRAY_SIZE(pll_parents);
372 433
373 pll_mux->reg = base + mode_offset;
374 pll_mux->shift = mode_shift;
375 pll_mux->mask = PLL_MODE_MASK;
376 pll_mux->flags = 0;
377 pll_mux->lock = lock;
378 pll_mux->hw.init = &init;
379
380 if (pll_type == pll_rk3066)
381 pll_mux->flags |= CLK_MUX_HIWORD_MASK;
382
383 mux_clk = clk_register(NULL, &pll_mux->hw); 434 mux_clk = clk_register(NULL, &pll_mux->hw);
384 if (IS_ERR(mux_clk)) 435 if (IS_ERR(mux_clk))
385 goto err_mux; 436 goto err_mux;
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index beed49c79126..c54078960847 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -212,13 +212,13 @@ PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
212 212
213static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { 213static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), 214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
215 RK2928_MODE_CON, 0, 6, rk3188_pll_rates), 215 RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
216 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4), 216 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
217 RK2928_MODE_CON, 4, 5, NULL), 217 RK2928_MODE_CON, 4, 5, 0, NULL),
218 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8), 218 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
219 RK2928_MODE_CON, 8, 7, rk3188_pll_rates), 219 RK2928_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
220 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12), 220 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
221 RK2928_MODE_CON, 12, 8, rk3188_pll_rates), 221 RK2928_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
222}; 222};
223 223
224#define MFLAGS CLK_MUX_HIWORD_MASK 224#define MFLAGS CLK_MUX_HIWORD_MASK
@@ -257,9 +257,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
257 GATE(0, "hclk_vdpu", "aclk_vdpu", 0, 257 GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
258 RK2928_CLKGATE_CON(3), 12, GFLAGS), 258 RK2928_CLKGATE_CON(3), 12, GFLAGS),
259 259
260 GATE(0, "gpll_ddr", "gpll", 0, 260 GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
261 RK2928_CLKGATE_CON(1), 7, GFLAGS), 261 RK2928_CLKGATE_CON(1), 7, GFLAGS),
262 COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0, 262 COMPOSITE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
263 RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 263 RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
264 RK2928_CLKGATE_CON(0), 2, GFLAGS), 264 RK2928_CLKGATE_CON(0), 2, GFLAGS),
265 265
@@ -270,10 +270,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
270 RK2928_CLKGATE_CON(0), 6, GFLAGS), 270 RK2928_CLKGATE_CON(0), 6, GFLAGS),
271 GATE(0, "pclk_cpu", "pclk_cpu_pre", 0, 271 GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
272 RK2928_CLKGATE_CON(0), 5, GFLAGS), 272 RK2928_CLKGATE_CON(0), 5, GFLAGS),
273 GATE(0, "hclk_cpu", "hclk_cpu_pre", 0, 273 GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
274 RK2928_CLKGATE_CON(0), 4, GFLAGS), 274 RK2928_CLKGATE_CON(0), 4, GFLAGS),
275 275
276 COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0, 276 COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
277 RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS, 277 RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
278 RK2928_CLKGATE_CON(3), 0, GFLAGS), 278 RK2928_CLKGATE_CON(3), 0, GFLAGS),
279 COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0, 279 COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -304,9 +304,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
304 * the 480m are generated inside the usb block from these clocks, 304 * the 480m are generated inside the usb block from these clocks,
305 * but they are also a source for the hsicphy clock. 305 * but they are also a source for the hsicphy clock.
306 */ 306 */
307 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0, 307 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
308 RK2928_CLKGATE_CON(1), 5, GFLAGS), 308 RK2928_CLKGATE_CON(1), 5, GFLAGS),
309 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0, 309 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
310 RK2928_CLKGATE_CON(1), 6, GFLAGS), 310 RK2928_CLKGATE_CON(1), 6, GFLAGS),
311 311
312 COMPOSITE(0, "mac_src", mux_mac_p, 0, 312 COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -320,9 +320,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
320 COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0, 320 COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
321 RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS, 321 RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
322 RK2928_CLKGATE_CON(2), 6, GFLAGS), 322 RK2928_CLKGATE_CON(2), 6, GFLAGS),
323 COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 323 COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
324 RK2928_CLKSEL_CON(23), 0, 324 RK2928_CLKSEL_CON(23), 0,
325 RK2928_CLKGATE_CON(2), 7, 0, GFLAGS), 325 RK2928_CLKGATE_CON(2), 7, GFLAGS),
326 MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0, 326 MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
327 RK2928_CLKSEL_CON(22), 4, 2, MFLAGS), 327 RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
328 328
@@ -330,6 +330,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
330 RK2928_CLKSEL_CON(24), 8, 8, DFLAGS, 330 RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
331 RK2928_CLKGATE_CON(2), 8, GFLAGS), 331 RK2928_CLKGATE_CON(2), 8, GFLAGS),
332 332
333 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
334 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
335 RK2928_CLKGATE_CON(0), 13, GFLAGS),
336 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
337 RK2928_CLKSEL_CON(9), 0,
338 RK2928_CLKGATE_CON(0), 14, GFLAGS),
339 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
340 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
341
333 /* 342 /*
334 * Clock-Architecture Diagram 4 343 * Clock-Architecture Diagram 4
335 */ 344 */
@@ -399,8 +408,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
399 408
400 /* aclk_cpu gates */ 409 /* aclk_cpu gates */
401 GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS), 410 GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
402 GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS), 411 GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS),
403 GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS), 412 GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS),
404 413
405 /* hclk_cpu gates */ 414 /* hclk_cpu gates */
406 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS), 415 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
@@ -410,14 +419,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
410 /* hclk_ahb2apb is part of a clk branch */ 419 /* hclk_ahb2apb is part of a clk branch */
411 GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS), 420 GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
412 GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS), 421 GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
413 GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS), 422 GATE(HCLK_LCDC1, "hclk_lcdc1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
414 GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS), 423 GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
415 GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS), 424 GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
416 GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS), 425 GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
417 426
418 /* hclk_peri gates */ 427 /* hclk_peri gates */
419 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS), 428 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
420 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS), 429 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
421 GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS), 430 GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
422 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), 431 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
423 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), 432 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
@@ -457,18 +466,18 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
457 GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS), 466 GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
458 GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS), 467 GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
459 GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS), 468 GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
460 GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS), 469 GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
461 GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS), 470 GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 5, GFLAGS),
462 471
463 /* aclk_peri */ 472 /* aclk_peri */
464 GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS), 473 GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
465 GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS), 474 GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
466 GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS), 475 GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 4, GFLAGS),
467 GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS), 476 GATE(0, "aclk_cpu_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
468 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS), 477 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS),
469 478
470 /* pclk_peri gates */ 479 /* pclk_peri gates */
471 GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS), 480 GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
472 GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS), 481 GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
473 GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), 482 GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
474 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS), 483 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
@@ -511,7 +520,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
511 | CLK_DIVIDER_READ_ONLY, 520 | CLK_DIVIDER_READ_ONLY,
512 RK2928_CLKGATE_CON(4), 9, GFLAGS), 521 RK2928_CLKGATE_CON(4), 9, GFLAGS),
513 522
514 GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0, 523 GATE(CORE_L2C, "core_l2c", "aclk_cpu", CLK_IGNORE_UNUSED,
515 RK2928_CLKGATE_CON(9), 4, GFLAGS), 524 RK2928_CLKGATE_CON(9), 4, GFLAGS),
516 525
517 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0, 526 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
@@ -577,14 +586,6 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
577 RK2928_CLKGATE_CON(0), 12, GFLAGS), 586 RK2928_CLKGATE_CON(0), 12, GFLAGS),
578 MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0, 587 MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
579 RK2928_CLKSEL_CON(4), 8, 2, MFLAGS), 588 RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
580 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
581 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
582 RK2928_CLKGATE_CON(0), 13, GFLAGS),
583 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
584 RK2928_CLKSEL_CON(9), 0,
585 RK2928_CLKGATE_CON(0), 14, GFLAGS),
586 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
587 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
588 589
589 GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), 590 GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
590 GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), 591 GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
@@ -618,7 +619,7 @@ PNAME(mux_hsicphy_p) = { "sclk_otgphy0", "sclk_otgphy1",
618 "gpll", "cpll" }; 619 "gpll", "cpll" };
619 620
620static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = { 621static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
621 COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0, 622 COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
622 RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 623 RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
623 div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS), 624 div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
624 625
@@ -633,7 +634,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
633 RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 634 RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
634 RK2928_CLKGATE_CON(4), 9, GFLAGS), 635 RK2928_CLKGATE_CON(4), 9, GFLAGS),
635 636
636 GATE(CORE_L2C, "core_l2c", "armclk", 0, 637 GATE(CORE_L2C, "core_l2c", "armclk", CLK_IGNORE_UNUSED,
637 RK2928_CLKGATE_CON(9), 4, GFLAGS), 638 RK2928_CLKGATE_CON(9), 4, GFLAGS),
638 639
639 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0, 640 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -663,7 +664,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
663 RK2928_CLKSEL_CON(30), 0, 2, DFLAGS, 664 RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
664 RK2928_CLKGATE_CON(3), 6, GFLAGS), 665 RK2928_CLKGATE_CON(3), 6, GFLAGS),
665 DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0, 666 DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
666 RK2928_CLKGATE_CON(11), 8, 6, DFLAGS), 667 RK2928_CLKSEL_CON(11), 8, 6, DFLAGS),
667 668
668 MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0, 669 MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
669 RK2928_CLKSEL_CON(2), 15, 1, MFLAGS), 670 RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
@@ -675,14 +676,6 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
675 RK2928_CLKGATE_CON(0), 10, GFLAGS), 676 RK2928_CLKGATE_CON(0), 10, GFLAGS),
676 MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0, 677 MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
677 RK2928_CLKSEL_CON(3), 8, 2, MFLAGS), 678 RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
678 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
679 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
680 RK2928_CLKGATE_CON(13), 13, GFLAGS),
681 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
682 RK2928_CLKSEL_CON(9), 0,
683 RK2928_CLKGATE_CON(0), 14, GFLAGS),
684 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
685 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
686 679
687 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 680 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
688 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS), 681 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 23278291da44..ac6be7c0132d 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -16,6 +16,7 @@
16#include <linux/clk-provider.h> 16#include <linux/clk-provider.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/of_address.h> 18#include <linux/of_address.h>
19#include <linux/syscore_ops.h>
19#include <dt-bindings/clock/rk3288-cru.h> 20#include <dt-bindings/clock/rk3288-cru.h>
20#include "clk.h" 21#include "clk.h"
21 22
@@ -83,11 +84,13 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
83 RK3066_PLL_RATE( 742500000, 8, 495, 2), 84 RK3066_PLL_RATE( 742500000, 8, 495, 2),
84 RK3066_PLL_RATE( 696000000, 1, 58, 2), 85 RK3066_PLL_RATE( 696000000, 1, 58, 2),
85 RK3066_PLL_RATE( 600000000, 1, 50, 2), 86 RK3066_PLL_RATE( 600000000, 1, 50, 2),
86 RK3066_PLL_RATE( 594000000, 2, 198, 4), 87 RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
87 RK3066_PLL_RATE( 552000000, 1, 46, 2), 88 RK3066_PLL_RATE( 552000000, 1, 46, 2),
88 RK3066_PLL_RATE( 504000000, 1, 84, 4), 89 RK3066_PLL_RATE( 504000000, 1, 84, 4),
90 RK3066_PLL_RATE( 500000000, 3, 125, 2),
89 RK3066_PLL_RATE( 456000000, 1, 76, 4), 91 RK3066_PLL_RATE( 456000000, 1, 76, 4),
90 RK3066_PLL_RATE( 408000000, 1, 68, 4), 92 RK3066_PLL_RATE( 408000000, 1, 68, 4),
93 RK3066_PLL_RATE( 400000000, 3, 100, 2),
91 RK3066_PLL_RATE( 384000000, 2, 128, 4), 94 RK3066_PLL_RATE( 384000000, 2, 128, 4),
92 RK3066_PLL_RATE( 360000000, 1, 60, 4), 95 RK3066_PLL_RATE( 360000000, 1, 60, 4),
93 RK3066_PLL_RATE( 312000000, 1, 52, 4), 96 RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -173,14 +176,14 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
173PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" }; 176PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
174PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" }; 177PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
175PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" }; 178PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
176PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usb480m" }; 179PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
180PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
177 181
178PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" }; 182PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" };
179PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; 183PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
180PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" }; 184PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
181PNAME(mux_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" }; 185PNAME(mux_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" };
182PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" }; 186PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
183PNAME(mux_uart0_pll_p) = { "cpll", "gpll", "usbphy_480m_src", "npll" };
184PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 187PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
185PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 188PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
186PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 189PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -192,22 +195,22 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
192PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; 195PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
193PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" }; 196PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
194 197
195PNAME(mux_usbphy480m_p) = { "sclk_otgphy0", "sclk_otgphy1", 198PNAME(mux_usbphy480m_p) = { "sclk_otgphy1", "sclk_otgphy2",
196 "sclk_otgphy2" }; 199 "sclk_otgphy0" };
197PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" }; 200PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
198PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" }; 201PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
199 202
200static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = { 203static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
201 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0), 204 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
202 RK3288_MODE_CON, 0, 6, rk3288_pll_rates), 205 RK3288_MODE_CON, 0, 6, 0, rk3288_pll_rates),
203 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4), 206 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
204 RK3288_MODE_CON, 4, 5, NULL), 207 RK3288_MODE_CON, 4, 5, 0, NULL),
205 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8), 208 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
206 RK3288_MODE_CON, 8, 7, rk3288_pll_rates), 209 RK3288_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
207 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12), 210 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
208 RK3288_MODE_CON, 12, 8, rk3288_pll_rates), 211 RK3288_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
209 [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3288_PLL_CON(16), 212 [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3288_PLL_CON(16),
210 RK3288_MODE_CON, 14, 9, rk3288_pll_rates), 213 RK3288_MODE_CON, 14, 9, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
211}; 214};
212 215
213static struct clk_div_table div_hclk_cpu_t[] = { 216static struct clk_div_table div_hclk_cpu_t[] = {
@@ -226,67 +229,67 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
226 * Clock-Architecture Diagram 1 229 * Clock-Architecture Diagram 1
227 */ 230 */
228 231
229 GATE(0, "apll_core", "apll", 0, 232 GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
230 RK3288_CLKGATE_CON(0), 1, GFLAGS), 233 RK3288_CLKGATE_CON(0), 1, GFLAGS),
231 GATE(0, "gpll_core", "gpll", 0, 234 GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
232 RK3288_CLKGATE_CON(0), 2, GFLAGS), 235 RK3288_CLKGATE_CON(0), 2, GFLAGS),
233 236
234 COMPOSITE_NOMUX(0, "armcore0", "armclk", 0, 237 COMPOSITE_NOMUX(0, "armcore0", "armclk", CLK_IGNORE_UNUSED,
235 RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 238 RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
236 RK3288_CLKGATE_CON(12), 0, GFLAGS), 239 RK3288_CLKGATE_CON(12), 0, GFLAGS),
237 COMPOSITE_NOMUX(0, "armcore1", "armclk", 0, 240 COMPOSITE_NOMUX(0, "armcore1", "armclk", CLK_IGNORE_UNUSED,
238 RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 241 RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
239 RK3288_CLKGATE_CON(12), 1, GFLAGS), 242 RK3288_CLKGATE_CON(12), 1, GFLAGS),
240 COMPOSITE_NOMUX(0, "armcore2", "armclk", 0, 243 COMPOSITE_NOMUX(0, "armcore2", "armclk", CLK_IGNORE_UNUSED,
241 RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 244 RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
242 RK3288_CLKGATE_CON(12), 2, GFLAGS), 245 RK3288_CLKGATE_CON(12), 2, GFLAGS),
243 COMPOSITE_NOMUX(0, "armcore3", "armclk", 0, 246 COMPOSITE_NOMUX(0, "armcore3", "armclk", CLK_IGNORE_UNUSED,
244 RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 247 RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
245 RK3288_CLKGATE_CON(12), 3, GFLAGS), 248 RK3288_CLKGATE_CON(12), 3, GFLAGS),
246 COMPOSITE_NOMUX(0, "l2ram", "armclk", 0, 249 COMPOSITE_NOMUX(0, "l2ram", "armclk", CLK_IGNORE_UNUSED,
247 RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 250 RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
248 RK3288_CLKGATE_CON(12), 4, GFLAGS), 251 RK3288_CLKGATE_CON(12), 4, GFLAGS),
249 COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0, 252 COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", CLK_IGNORE_UNUSED,
250 RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, 253 RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
251 RK3288_CLKGATE_CON(12), 5, GFLAGS), 254 RK3288_CLKGATE_CON(12), 5, GFLAGS),
252 COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0, 255 COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
253 RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, 256 RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
254 RK3288_CLKGATE_CON(12), 6, GFLAGS), 257 RK3288_CLKGATE_CON(12), 6, GFLAGS),
255 COMPOSITE_NOMUX(0, "atclk", "armclk", 0, 258 COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
256 RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 259 RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
257 RK3288_CLKGATE_CON(12), 7, GFLAGS), 260 RK3288_CLKGATE_CON(12), 7, GFLAGS),
258 COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0, 261 COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
259 RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 262 RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
260 RK3288_CLKGATE_CON(12), 8, GFLAGS), 263 RK3288_CLKGATE_CON(12), 8, GFLAGS),
261 GATE(0, "pclk_dbg", "pclk_dbg_pre", 0, 264 GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
262 RK3288_CLKGATE_CON(12), 9, GFLAGS), 265 RK3288_CLKGATE_CON(12), 9, GFLAGS),
263 GATE(0, "cs_dbg", "pclk_dbg_pre", 0, 266 GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
264 RK3288_CLKGATE_CON(12), 10, GFLAGS), 267 RK3288_CLKGATE_CON(12), 10, GFLAGS),
265 GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0, 268 GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
266 RK3288_CLKGATE_CON(12), 11, GFLAGS), 269 RK3288_CLKGATE_CON(12), 11, GFLAGS),
267 270
268 GATE(0, "dpll_ddr", "dpll", 0, 271 GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
269 RK3288_CLKGATE_CON(0), 8, GFLAGS), 272 RK3288_CLKGATE_CON(0), 8, GFLAGS),
270 GATE(0, "gpll_ddr", "gpll", 0, 273 GATE(0, "gpll_ddr", "gpll", 0,
271 RK3288_CLKGATE_CON(0), 9, GFLAGS), 274 RK3288_CLKGATE_CON(0), 9, GFLAGS),
272 COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0, 275 COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
273 RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2, 276 RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
274 DFLAGS | CLK_DIVIDER_POWER_OF_TWO), 277 DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
275 278
276 GATE(0, "gpll_aclk_cpu", "gpll", 0, 279 GATE(0, "gpll_aclk_cpu", "gpll", CLK_IGNORE_UNUSED,
277 RK3288_CLKGATE_CON(0), 10, GFLAGS), 280 RK3288_CLKGATE_CON(0), 10, GFLAGS),
278 GATE(0, "cpll_aclk_cpu", "cpll", 0, 281 GATE(0, "cpll_aclk_cpu", "cpll", CLK_IGNORE_UNUSED,
279 RK3288_CLKGATE_CON(0), 11, GFLAGS), 282 RK3288_CLKGATE_CON(0), 11, GFLAGS),
280 COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0, 283 COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, CLK_IGNORE_UNUSED,
281 RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS), 284 RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
282 DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0, 285 DIV(0, "aclk_cpu_pre", "aclk_cpu_src", CLK_SET_RATE_PARENT,
283 RK3288_CLKSEL_CON(1), 0, 3, DFLAGS), 286 RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
284 GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0, 287 GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
285 RK3288_CLKGATE_CON(0), 3, GFLAGS), 288 RK3288_CLKGATE_CON(0), 3, GFLAGS),
286 COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", 0, 289 COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
287 RK3288_CLKSEL_CON(1), 12, 3, DFLAGS, 290 RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
288 RK3288_CLKGATE_CON(0), 5, GFLAGS), 291 RK3288_CLKGATE_CON(0), 5, GFLAGS),
289 COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", 0, 292 COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
290 RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t, 293 RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
291 RK3288_CLKGATE_CON(0), 4, GFLAGS), 294 RK3288_CLKGATE_CON(0), 4, GFLAGS),
292 GATE(0, "c2c_host", "aclk_cpu_src", 0, 295 GATE(0, "c2c_host", "aclk_cpu_src", 0,
@@ -294,7 +297,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
294 COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0, 297 COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
295 RK3288_CLKSEL_CON(26), 6, 2, DFLAGS, 298 RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
296 RK3288_CLKGATE_CON(5), 4, GFLAGS), 299 RK3288_CLKGATE_CON(5), 4, GFLAGS),
297 GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0, 300 GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
298 RK3288_CLKGATE_CON(0), 7, GFLAGS), 301 RK3288_CLKGATE_CON(0), 7, GFLAGS),
299 302
300 COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0, 303 COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
@@ -305,7 +308,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
305 RK3288_CLKGATE_CON(4), 2, GFLAGS), 308 RK3288_CLKGATE_CON(4), 2, GFLAGS),
306 MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT, 309 MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT,
307 RK3288_CLKSEL_CON(4), 8, 2, MFLAGS), 310 RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
308 COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, CLK_SET_RATE_PARENT, 311 COMPOSITE_NODIV(SCLK_I2S0_OUT, "i2s0_clkout", mux_i2s_clkout_p, 0,
309 RK3288_CLKSEL_CON(4), 12, 1, MFLAGS, 312 RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
310 RK3288_CLKGATE_CON(4), 0, GFLAGS), 313 RK3288_CLKGATE_CON(4), 0, GFLAGS),
311 GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT, 314 GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT,
@@ -325,7 +328,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
325 COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0, 328 COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
326 RK3288_CLKSEL_CON(40), 0, 7, DFLAGS, 329 RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
327 RK3288_CLKGATE_CON(4), 7, GFLAGS), 330 RK3288_CLKGATE_CON(4), 7, GFLAGS),
328 COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0, 331 COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_pre", 0,
329 RK3288_CLKSEL_CON(41), 0, 332 RK3288_CLKSEL_CON(41), 0,
330 RK3288_CLKGATE_CON(4), 8, GFLAGS), 333 RK3288_CLKGATE_CON(4), 8, GFLAGS),
331 COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0, 334 COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
@@ -373,12 +376,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
373 GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0, 376 GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
374 RK3288_CLKGATE_CON(9), 1, GFLAGS), 377 RK3288_CLKGATE_CON(9), 1, GFLAGS),
375 378
376 COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0, 379 COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
377 RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS, 380 RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
378 RK3288_CLKGATE_CON(3), 0, GFLAGS), 381 RK3288_CLKGATE_CON(3), 0, GFLAGS),
379 DIV(0, "hclk_vio", "aclk_vio0", 0, 382 DIV(0, "hclk_vio", "aclk_vio0", 0,
380 RK3288_CLKSEL_CON(28), 8, 5, DFLAGS), 383 RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
381 COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0, 384 COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
382 RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS, 385 RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
383 RK3288_CLKGATE_CON(3), 2, GFLAGS), 386 RK3288_CLKGATE_CON(3), 2, GFLAGS),
384 387
@@ -436,24 +439,24 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
436 439
437 DIV(0, "pclk_pd_alive", "gpll", 0, 440 DIV(0, "pclk_pd_alive", "gpll", 0,
438 RK3288_CLKSEL_CON(33), 8, 5, DFLAGS), 441 RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
439 COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0, 442 COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
440 RK3288_CLKSEL_CON(33), 0, 5, DFLAGS, 443 RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
441 RK3288_CLKGATE_CON(5), 8, GFLAGS), 444 RK3288_CLKGATE_CON(5), 8, GFLAGS),
442 445
443 COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0, 446 COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gll_usb_npll_p, 0,
444 RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS, 447 RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
445 RK3288_CLKGATE_CON(5), 7, GFLAGS), 448 RK3288_CLKGATE_CON(5), 7, GFLAGS),
446 449
447 COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0, 450 COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
448 RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS, 451 RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
449 RK3288_CLKGATE_CON(2), 0, GFLAGS), 452 RK3288_CLKGATE_CON(2), 0, GFLAGS),
450 COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0, 453 COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
451 RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 454 RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
452 RK3288_CLKGATE_CON(2), 3, GFLAGS), 455 RK3288_CLKGATE_CON(2), 3, GFLAGS),
453 COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0, 456 COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
454 RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 457 RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
455 RK3288_CLKGATE_CON(2), 2, GFLAGS), 458 RK3288_CLKGATE_CON(2), 2, GFLAGS),
456 GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0, 459 GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
457 RK3288_CLKGATE_CON(2), 1, GFLAGS), 460 RK3288_CLKGATE_CON(2), 1, GFLAGS),
458 461
459 /* 462 /*
@@ -483,6 +486,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
483 RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS, 486 RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
484 RK3288_CLKGATE_CON(13), 3, GFLAGS), 487 RK3288_CLKGATE_CON(13), 3, GFLAGS),
485 488
489 MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3288_SDMMC_CON0, 1),
490 MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3288_SDMMC_CON1, 0),
491
492 MMC(SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3288_SDIO0_CON0, 1),
493 MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3288_SDIO0_CON1, 0),
494
495 MMC(SCLK_SDIO1_DRV, "sdio1_drv", "sclk_sdio1", RK3288_SDIO1_CON0, 1),
496 MMC(SCLK_SDIO1_SAMPLE, "sdio1_sample", "sclk_sdio1", RK3288_SDIO1_CON1, 0),
497
498 MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3288_EMMC_CON0, 1),
499 MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3288_EMMC_CON1, 0),
500
486 COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0, 501 COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
487 RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS, 502 RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
488 RK3288_CLKGATE_CON(4), 11, GFLAGS), 503 RK3288_CLKGATE_CON(4), 11, GFLAGS),
@@ -490,13 +505,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
490 RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, 505 RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
491 RK3288_CLKGATE_CON(4), 10, GFLAGS), 506 RK3288_CLKGATE_CON(4), 10, GFLAGS),
492 507
493 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0, 508 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
494 RK3288_CLKGATE_CON(13), 4, GFLAGS), 509 RK3288_CLKGATE_CON(13), 4, GFLAGS),
495 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0, 510 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
496 RK3288_CLKGATE_CON(13), 5, GFLAGS), 511 RK3288_CLKGATE_CON(13), 5, GFLAGS),
497 GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0, 512 GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
498 RK3288_CLKGATE_CON(13), 6, GFLAGS), 513 RK3288_CLKGATE_CON(13), 6, GFLAGS),
499 GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0, 514 GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
500 RK3288_CLKGATE_CON(13), 7, GFLAGS), 515 RK3288_CLKGATE_CON(13), 7, GFLAGS),
501 516
502 COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0, 517 COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
@@ -517,7 +532,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
517 RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS, 532 RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
518 RK3288_CLKGATE_CON(5), 6, GFLAGS), 533 RK3288_CLKGATE_CON(5), 6, GFLAGS),
519 534
520 COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0, 535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
521 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS, 536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
522 RK3288_CLKGATE_CON(1), 8, GFLAGS), 537 RK3288_CLKGATE_CON(1), 8, GFLAGS),
523 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0, 538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
@@ -585,7 +600,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
585 600
586 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0, 601 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
587 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS, 602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
588 RK3288_CLKGATE_CON(5), 15, GFLAGS), 603 RK3288_CLKGATE_CON(5), 14, GFLAGS),
589 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, 604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
590 RK3288_CLKSEL_CON(29), 0, 2, MFLAGS, 605 RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
591 RK3288_CLKGATE_CON(3), 6, GFLAGS), 606 RK3288_CLKGATE_CON(3), 6, GFLAGS),
@@ -601,19 +616,19 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
601 */ 616 */
602 617
603 /* aclk_cpu gates */ 618 /* aclk_cpu gates */
604 GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS), 619 GATE(0, "sclk_intmem0", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 5, GFLAGS),
605 GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS), 620 GATE(0, "sclk_intmem1", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 6, GFLAGS),
606 GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS), 621 GATE(0, "sclk_intmem2", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 7, GFLAGS),
607 GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS), 622 GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
608 GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS), 623 GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 13, GFLAGS),
609 GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS), 624 GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 4, GFLAGS),
610 GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS), 625 GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
611 GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS), 626 GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
612 627
613 /* hclk_cpu gates */ 628 /* hclk_cpu gates */
614 GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS), 629 GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
615 GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS), 630 GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
616 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS), 631 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 9, GFLAGS),
617 GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS), 632 GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
618 GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS), 633 GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
619 634
@@ -622,42 +637,42 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
622 GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), 637 GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
623 GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), 638 GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
624 GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), 639 GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
625 GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), 640 GATE(PCLK_DDRUPCTL0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
626 GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), 641 GATE(PCLK_PUBL0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
627 GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), 642 GATE(PCLK_DDRUPCTL1, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
628 GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS), 643 GATE(PCLK_PUBL1, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
629 GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS), 644 GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
630 GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS), 645 GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
631 GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS), 646 GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
632 GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS), 647 GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
633 GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS), 648 GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
634 649
635 /* ddrctrl [DDR Controller PHY clock] gates */ 650 /* ddrctrl [DDR Controller PHY clock] gates */
636 GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS), 651 GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
637 GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS), 652 GATE(0, "nclk_ddrupctl1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 5, GFLAGS),
638 653
639 /* ddrphy gates */ 654 /* ddrphy gates */
640 GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS), 655 GATE(0, "sclk_ddrphy0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 12, GFLAGS),
641 GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS), 656 GATE(0, "sclk_ddrphy1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 13, GFLAGS),
642 657
643 /* aclk_peri gates */ 658 /* aclk_peri gates */
644 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS), 659 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 2, GFLAGS),
645 GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS), 660 GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
646 GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS), 661 GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 11, GFLAGS),
647 GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS), 662 GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(8), 12, GFLAGS),
648 GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS), 663 GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
649 GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS), 664 GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
650 665
651 /* hclk_peri gates */ 666 /* hclk_peri gates */
652 GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS), 667 GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 0, GFLAGS),
653 GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS), 668 GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 4, GFLAGS),
654 GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS), 669 GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
655 GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS), 670 GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 7, GFLAGS),
656 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS), 671 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
657 GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS), 672 GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 9, GFLAGS),
658 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS), 673 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 10, GFLAGS),
659 GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS), 674 GATE(0, "hclk_emem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 12, GFLAGS),
660 GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS), 675 GATE(0, "hclk_mem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 13, GFLAGS),
661 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS), 676 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
662 GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS), 677 GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
663 GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS), 678 GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
@@ -669,7 +684,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
669 GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS), 684 GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
670 685
671 /* pclk_peri gates */ 686 /* pclk_peri gates */
672 GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS), 687 GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 1, GFLAGS),
673 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS), 688 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
674 GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS), 689 GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
675 GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS), 690 GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
@@ -705,48 +720,48 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
705 GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS), 720 GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
706 GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS), 721 GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
707 GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS), 722 GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
708 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS), 723 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS),
709 GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS), 724 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 12, GFLAGS),
710 725
711 /* pclk_pd_pmu gates */ 726 /* pclk_pd_pmu gates */
712 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS), 727 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS),
713 GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS), 728 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS),
714 GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS), 729 GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 2, GFLAGS),
715 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS), 730 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 3, GFLAGS),
716 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS), 731 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
717 732
718 /* hclk_vio gates */ 733 /* hclk_vio gates */
719 GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS), 734 GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
720 GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS), 735 GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
721 GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS), 736 GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
722 GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS), 737 GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 9, GFLAGS),
723 GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS), 738 GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 10, GFLAGS),
724 GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS), 739 GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
725 GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS), 740 GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
726 GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS), 741 GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
727 GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS), 742 GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 10, GFLAGS),
728 GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS), 743 GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
729 GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS), 744 GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
730 GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS), 745 GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
731 GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS), 746 GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
732 GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS), 747 GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 8, GFLAGS),
733 GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS), 748 GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
734 GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS), 749 GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 11, GFLAGS),
735 750
736 /* aclk_vio0 gates */ 751 /* aclk_vio0 gates */
737 GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS), 752 GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
738 GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS), 753 GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
739 GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS), 754 GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 11, GFLAGS),
740 GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS), 755 GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
741 756
742 /* aclk_vio1 gates */ 757 /* aclk_vio1 gates */
743 GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS), 758 GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
744 GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS), 759 GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
745 GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS), 760 GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 12, GFLAGS),
746 761
747 /* aclk_rga_pre gates */ 762 /* aclk_rga_pre gates */
748 GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS), 763 GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
749 GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS), 764 GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 13, GFLAGS),
750 765
751 /* 766 /*
752 * Other ungrouped clocks. 767 * Other ungrouped clocks.
@@ -762,6 +777,64 @@ static const char *rk3288_critical_clocks[] __initconst = {
762 "hclk_peri", 777 "hclk_peri",
763}; 778};
764 779
780#ifdef CONFIG_PM_SLEEP
781static void __iomem *rk3288_cru_base;
782
783/* Some CRU registers will be reset in maskrom when the system
784 * wakes up from fastboot.
785 * So save them before suspend, restore them after resume.
786 */
787static const int rk3288_saved_cru_reg_ids[] = {
788 RK3288_MODE_CON,
789 RK3288_CLKSEL_CON(0),
790 RK3288_CLKSEL_CON(1),
791 RK3288_CLKSEL_CON(10),
792 RK3288_CLKSEL_CON(33),
793 RK3288_CLKSEL_CON(37),
794};
795
796static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
797
798static int rk3288_clk_suspend(void)
799{
800 int i, reg_id;
801
802 for (i = 0; i < ARRAY_SIZE(rk3288_saved_cru_reg_ids); i++) {
803 reg_id = rk3288_saved_cru_reg_ids[i];
804
805 rk3288_saved_cru_regs[i] =
806 readl_relaxed(rk3288_cru_base + reg_id);
807 }
808 return 0;
809}
810
811static void rk3288_clk_resume(void)
812{
813 int i, reg_id;
814
815 for (i = ARRAY_SIZE(rk3288_saved_cru_reg_ids) - 1; i >= 0; i--) {
816 reg_id = rk3288_saved_cru_reg_ids[i];
817
818 writel_relaxed(rk3288_saved_cru_regs[i] | 0xffff0000,
819 rk3288_cru_base + reg_id);
820 }
821}
822
823static struct syscore_ops rk3288_clk_syscore_ops = {
824 .suspend = rk3288_clk_suspend,
825 .resume = rk3288_clk_resume,
826};
827
828static void rk3288_clk_sleep_init(void __iomem *reg_base)
829{
830 rk3288_cru_base = reg_base;
831 register_syscore_ops(&rk3288_clk_syscore_ops);
832}
833
834#else /* CONFIG_PM_SLEEP */
835static void rk3288_clk_sleep_init(void __iomem *reg_base) {}
836#endif
837
765static void __init rk3288_clk_init(struct device_node *np) 838static void __init rk3288_clk_init(struct device_node *np)
766{ 839{
767 void __iomem *reg_base; 840 void __iomem *reg_base;
@@ -810,5 +883,6 @@ static void __init rk3288_clk_init(struct device_node *np)
810 ROCKCHIP_SOFTRST_HIWORD_MASK); 883 ROCKCHIP_SOFTRST_HIWORD_MASK);
811 884
812 rockchip_register_restart_notifier(RK3288_GLB_SRST_FST); 885 rockchip_register_restart_notifier(RK3288_GLB_SRST_FST);
886 rk3288_clk_sleep_init(reg_base);
813} 887}
814CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init); 888CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 880a266f0143..20e05bbb3a67 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -197,7 +197,8 @@ void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
197 list->parent_names, list->num_parents, 197 list->parent_names, list->num_parents,
198 reg_base, list->con_offset, grf_lock_offset, 198 reg_base, list->con_offset, grf_lock_offset,
199 list->lock_shift, list->mode_offset, 199 list->lock_shift, list->mode_offset,
200 list->mode_shift, list->rate_table, &clk_lock); 200 list->mode_shift, list->rate_table,
201 list->pll_flags, &clk_lock);
201 if (IS_ERR(clk)) { 202 if (IS_ERR(clk)) {
202 pr_err("%s: failed to register clock %s\n", __func__, 203 pr_err("%s: failed to register clock %s\n", __func__,
203 list->name); 204 list->name);
@@ -244,9 +245,6 @@ void __init rockchip_clk_register_branches(
244 list->div_flags, &clk_lock); 245 list->div_flags, &clk_lock);
245 break; 246 break;
246 case branch_fraction_divider: 247 case branch_fraction_divider:
247 /* keep all gates untouched for now */
248 flags |= CLK_IGNORE_UNUSED;
249
250 clk = rockchip_clk_register_frac_branch(list->name, 248 clk = rockchip_clk_register_frac_branch(list->name,
251 list->parent_names, list->num_parents, 249 list->parent_names, list->num_parents,
252 reg_base, list->muxdiv_offset, list->div_flags, 250 reg_base, list->muxdiv_offset, list->div_flags,
@@ -256,18 +254,12 @@ void __init rockchip_clk_register_branches(
256 case branch_gate: 254 case branch_gate:
257 flags |= CLK_SET_RATE_PARENT; 255 flags |= CLK_SET_RATE_PARENT;
258 256
259 /* keep all gates untouched for now */
260 flags |= CLK_IGNORE_UNUSED;
261
262 clk = clk_register_gate(NULL, list->name, 257 clk = clk_register_gate(NULL, list->name,
263 list->parent_names[0], flags, 258 list->parent_names[0], flags,
264 reg_base + list->gate_offset, 259 reg_base + list->gate_offset,
265 list->gate_shift, list->gate_flags, &clk_lock); 260 list->gate_shift, list->gate_flags, &clk_lock);
266 break; 261 break;
267 case branch_composite: 262 case branch_composite:
268 /* keep all gates untouched for now */
269 flags |= CLK_IGNORE_UNUSED;
270
271 clk = rockchip_clk_register_branch(list->name, 263 clk = rockchip_clk_register_branch(list->name,
272 list->parent_names, list->num_parents, 264 list->parent_names, list->num_parents,
273 reg_base, list->muxdiv_offset, list->mux_shift, 265 reg_base, list->muxdiv_offset, list->mux_shift,
@@ -277,6 +269,14 @@ void __init rockchip_clk_register_branches(
277 list->gate_offset, list->gate_shift, 269 list->gate_offset, list->gate_shift,
278 list->gate_flags, flags, &clk_lock); 270 list->gate_flags, flags, &clk_lock);
279 break; 271 break;
272 case branch_mmc:
273 clk = rockchip_clk_register_mmc(
274 list->name,
275 list->parent_names, list->num_parents,
276 reg_base + list->muxdiv_offset,
277 list->div_shift
278 );
279 break;
280 } 280 }
281 281
282 /* none of the cases above matched */ 282 /* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ca009ab0a33a..58d2e3bdf22f 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -48,6 +48,14 @@
48#define RK3288_GLB_SRST_SND 0x1b4 48#define RK3288_GLB_SRST_SND 0x1b4
49#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8) 49#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8)
50#define RK3288_MISC_CON 0x1e8 50#define RK3288_MISC_CON 0x1e8
51#define RK3288_SDMMC_CON0 0x200
52#define RK3288_SDMMC_CON1 0x204
53#define RK3288_SDIO0_CON0 0x208
54#define RK3288_SDIO0_CON1 0x20c
55#define RK3288_SDIO1_CON0 0x210
56#define RK3288_SDIO1_CON1 0x214
57#define RK3288_EMMC_CON0 0x218
58#define RK3288_EMMC_CON1 0x21c
51 59
52enum rockchip_pll_type { 60enum rockchip_pll_type {
53 pll_rk3066, 61 pll_rk3066,
@@ -62,6 +70,15 @@ enum rockchip_pll_type {
62 .bwadj = (_nf >> 1), \ 70 .bwadj = (_nf >> 1), \
63} 71}
64 72
73#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw) \
74{ \
75 .rate = _rate##U, \
76 .nr = _nr, \
77 .nf = _nf, \
78 .no = _no, \
79 .bwadj = _bw, \
80}
81
65struct rockchip_pll_rate_table { 82struct rockchip_pll_rate_table {
66 unsigned long rate; 83 unsigned long rate;
67 unsigned int nr; 84 unsigned int nr;
@@ -81,7 +98,12 @@ struct rockchip_pll_rate_table {
81 * @mode_shift: offset inside the mode-register for the mode of this pll. 98 * @mode_shift: offset inside the mode-register for the mode of this pll.
82 * @lock_shift: offset inside the lock register for the lock status. 99 * @lock_shift: offset inside the lock register for the lock status.
83 * @type: Type of PLL to be registered. 100 * @type: Type of PLL to be registered.
101 * @pll_flags: hardware-specific flags
84 * @rate_table: Table of usable pll rates 102 * @rate_table: Table of usable pll rates
103 *
104 * Flags:
105 * ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
106 * rate_table parameters and ajust them if necessary.
85 */ 107 */
86struct rockchip_pll_clock { 108struct rockchip_pll_clock {
87 unsigned int id; 109 unsigned int id;
@@ -94,11 +116,14 @@ struct rockchip_pll_clock {
94 int mode_shift; 116 int mode_shift;
95 int lock_shift; 117 int lock_shift;
96 enum rockchip_pll_type type; 118 enum rockchip_pll_type type;
119 u8 pll_flags;
97 struct rockchip_pll_rate_table *rate_table; 120 struct rockchip_pll_rate_table *rate_table;
98}; 121};
99 122
123#define ROCKCHIP_PLL_SYNC_RATE BIT(0)
124
100#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \ 125#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \
101 _lshift, _rtable) \ 126 _lshift, _pflags, _rtable) \
102 { \ 127 { \
103 .id = _id, \ 128 .id = _id, \
104 .type = _type, \ 129 .type = _type, \
@@ -110,6 +135,7 @@ struct rockchip_pll_clock {
110 .mode_offset = _mode, \ 135 .mode_offset = _mode, \
111 .mode_shift = _mshift, \ 136 .mode_shift = _mshift, \
112 .lock_shift = _lshift, \ 137 .lock_shift = _lshift, \
138 .pll_flags = _pflags, \
113 .rate_table = _rtable, \ 139 .rate_table = _rtable, \
114 } 140 }
115 141
@@ -118,7 +144,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
118 void __iomem *base, int con_offset, int grf_lock_offset, 144 void __iomem *base, int con_offset, int grf_lock_offset,
119 int lock_shift, int reg_mode, int mode_shift, 145 int lock_shift, int reg_mode, int mode_shift,
120 struct rockchip_pll_rate_table *rate_table, 146 struct rockchip_pll_rate_table *rate_table,
121 spinlock_t *lock); 147 u8 clk_pll_flags, spinlock_t *lock);
122 148
123struct rockchip_cpuclk_clksel { 149struct rockchip_cpuclk_clksel {
124 int reg; 150 int reg;
@@ -152,6 +178,10 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
152 const struct rockchip_cpuclk_rate_table *rates, 178 const struct rockchip_cpuclk_rate_table *rates,
153 int nrates, void __iomem *reg_base, spinlock_t *lock); 179 int nrates, void __iomem *reg_base, spinlock_t *lock);
154 180
181struct clk *rockchip_clk_register_mmc(const char *name,
182 const char **parent_names, u8 num_parents,
183 void __iomem *reg, int shift);
184
155#define PNAME(x) static const char *x[] __initconst 185#define PNAME(x) static const char *x[] __initconst
156 186
157enum rockchip_clk_branch_type { 187enum rockchip_clk_branch_type {
@@ -160,6 +190,7 @@ enum rockchip_clk_branch_type {
160 branch_divider, 190 branch_divider,
161 branch_fraction_divider, 191 branch_fraction_divider,
162 branch_gate, 192 branch_gate,
193 branch_mmc,
163}; 194};
164 195
165struct rockchip_clk_branch { 196struct rockchip_clk_branch {
@@ -352,6 +383,16 @@ struct rockchip_clk_branch {
352 .gate_flags = gf, \ 383 .gate_flags = gf, \
353 } 384 }
354 385
386#define MMC(_id, cname, pname, offset, shift) \
387 { \
388 .id = _id, \
389 .branch_type = branch_mmc, \
390 .name = cname, \
391 .parent_names = (const char *[]){ pname }, \
392 .num_parents = 1, \
393 .muxdiv_offset = offset, \
394 .div_shift = shift, \
395 }
355 396
356void rockchip_clk_init(struct device_node *np, void __iomem *base, 397void rockchip_clk_init(struct device_node *np, void __iomem *base,
357 unsigned long nr_clks); 398 unsigned long nr_clks);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 6fb4bc602e8a..006c6f294310 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -5,6 +5,7 @@
5obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o 5obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
6obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o 6obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
7obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o 7obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
8obj-$(CONFIG_SOC_EXYNOS4415) += clk-exynos4415.o
8obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o 9obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
9obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o 10obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
10obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o 11obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
12obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o 13obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
13obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o 14obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
14obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o 15obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
16obj-$(CONFIG_ARCH_EXYNOS7) += clk-exynos7.o
15obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o 17obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
16obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o 18obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
17obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o 19obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index acce708ace18..f2c2ccce49bb 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -29,6 +29,13 @@ static DEFINE_SPINLOCK(lock);
29static struct clk **clk_table; 29static struct clk **clk_table;
30static void __iomem *reg_base; 30static void __iomem *reg_base;
31static struct clk_onecell_data clk_data; 31static struct clk_onecell_data clk_data;
32/*
33 * On Exynos5420 this will be a clock which has to be enabled before any
34 * access to audss registers. Typically a child of EPLL.
35 *
36 * On other platforms this will be -ENODEV.
37 */
38static struct clk *epll;
32 39
33#define ASS_CLK_SRC 0x0 40#define ASS_CLK_SRC 0x0
34#define ASS_CLK_DIV 0x4 41#define ASS_CLK_DIV 0x4
@@ -98,6 +105,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
98 dev_err(&pdev->dev, "failed to map audss registers\n"); 105 dev_err(&pdev->dev, "failed to map audss registers\n");
99 return PTR_ERR(reg_base); 106 return PTR_ERR(reg_base);
100 } 107 }
108 /* EPLL don't have to be enabled for boards other than Exynos5420 */
109 epll = ERR_PTR(-ENODEV);
101 110
102 clk_table = devm_kzalloc(&pdev->dev, 111 clk_table = devm_kzalloc(&pdev->dev,
103 sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, 112 sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
@@ -115,8 +124,20 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
115 pll_in = devm_clk_get(&pdev->dev, "pll_in"); 124 pll_in = devm_clk_get(&pdev->dev, "pll_in");
116 if (!IS_ERR(pll_ref)) 125 if (!IS_ERR(pll_ref))
117 mout_audss_p[0] = __clk_get_name(pll_ref); 126 mout_audss_p[0] = __clk_get_name(pll_ref);
118 if (!IS_ERR(pll_in)) 127 if (!IS_ERR(pll_in)) {
119 mout_audss_p[1] = __clk_get_name(pll_in); 128 mout_audss_p[1] = __clk_get_name(pll_in);
129
130 if (variant == TYPE_EXYNOS5420) {
131 epll = pll_in;
132
133 ret = clk_prepare_enable(epll);
134 if (ret) {
135 dev_err(&pdev->dev,
136 "failed to prepare the epll clock\n");
137 return ret;
138 }
139 }
140 }
120 clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", 141 clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
121 mout_audss_p, ARRAY_SIZE(mout_audss_p), 142 mout_audss_p, ARRAY_SIZE(mout_audss_p),
122 CLK_SET_RATE_NO_REPARENT, 143 CLK_SET_RATE_NO_REPARENT,
@@ -203,6 +224,9 @@ unregister:
203 clk_unregister(clk_table[i]); 224 clk_unregister(clk_table[i]);
204 } 225 }
205 226
227 if (!IS_ERR(epll))
228 clk_disable_unprepare(epll);
229
206 return ret; 230 return ret;
207} 231}
208 232
@@ -210,6 +234,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
210{ 234{
211 int i; 235 int i;
212 236
237#ifdef CONFIG_PM_SLEEP
238 unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
239#endif
240
213 of_clk_del_provider(pdev->dev.of_node); 241 of_clk_del_provider(pdev->dev.of_node);
214 242
215 for (i = 0; i < clk_data.clk_num; i++) { 243 for (i = 0; i < clk_data.clk_num; i++) {
@@ -217,6 +245,9 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
217 clk_unregister(clk_table[i]); 245 clk_unregister(clk_table[i]);
218 } 246 }
219 247
248 if (!IS_ERR(epll))
249 clk_disable_unprepare(epll);
250
220 return 0; 251 return 0;
221} 252}
222 253
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 940f02837b82..88e8c6bbd77f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -505,7 +505,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
505/* fixed rate clocks generated inside the soc */ 505/* fixed rate clocks generated inside the soc */
506static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = { 506static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
507 FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000), 507 FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
508 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), 508 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
509 FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), 509 FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
510}; 510};
511 511
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
new file mode 100644
index 000000000000..2123fc251e0f
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -0,0 +1,1144 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Chanwoo Choi <cw00.choi@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Common Clock Framework support for Exynos4415 SoC.
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/clk-provider.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/platform_device.h>
18#include <linux/syscore_ops.h>
19
20#include <dt-bindings/clock/exynos4415.h>
21
22#include "clk.h"
23#include "clk-pll.h"
24
25#define SRC_LEFTBUS 0x4200
26#define DIV_LEFTBUS 0x4500
27#define GATE_IP_LEFTBUS 0x4800
28#define GATE_IP_IMAGE 0x4930
29#define SRC_RIGHTBUS 0x8200
30#define DIV_RIGHTBUS 0x8500
31#define GATE_IP_RIGHTBUS 0x8800
32#define GATE_IP_PERIR 0x8960
33#define EPLL_LOCK 0xc010
34#define G3D_PLL_LOCK 0xc020
35#define DISP_PLL_LOCK 0xc030
36#define ISP_PLL_LOCK 0xc040
37#define EPLL_CON0 0xc110
38#define EPLL_CON1 0xc114
39#define EPLL_CON2 0xc118
40#define G3D_PLL_CON0 0xc120
41#define G3D_PLL_CON1 0xc124
42#define G3D_PLL_CON2 0xc128
43#define ISP_PLL_CON0 0xc130
44#define ISP_PLL_CON1 0xc134
45#define ISP_PLL_CON2 0xc138
46#define DISP_PLL_CON0 0xc140
47#define DISP_PLL_CON1 0xc144
48#define DISP_PLL_CON2 0xc148
49#define SRC_TOP0 0xc210
50#define SRC_TOP1 0xc214
51#define SRC_CAM 0xc220
52#define SRC_TV 0xc224
53#define SRC_MFC 0xc228
54#define SRC_G3D 0xc22c
55#define SRC_LCD 0xc234
56#define SRC_ISP 0xc238
57#define SRC_MAUDIO 0xc23c
58#define SRC_FSYS 0xc240
59#define SRC_PERIL0 0xc250
60#define SRC_PERIL1 0xc254
61#define SRC_CAM1 0xc258
62#define SRC_TOP_ISP0 0xc25c
63#define SRC_TOP_ISP1 0xc260
64#define SRC_MASK_TOP 0xc310
65#define SRC_MASK_CAM 0xc320
66#define SRC_MASK_TV 0xc324
67#define SRC_MASK_LCD 0xc334
68#define SRC_MASK_ISP 0xc338
69#define SRC_MASK_MAUDIO 0xc33c
70#define SRC_MASK_FSYS 0xc340
71#define SRC_MASK_PERIL0 0xc350
72#define SRC_MASK_PERIL1 0xc354
73#define DIV_TOP 0xc510
74#define DIV_CAM 0xc520
75#define DIV_TV 0xc524
76#define DIV_MFC 0xc528
77#define DIV_G3D 0xc52c
78#define DIV_LCD 0xc534
79#define DIV_ISP 0xc538
80#define DIV_MAUDIO 0xc53c
81#define DIV_FSYS0 0xc540
82#define DIV_FSYS1 0xc544
83#define DIV_FSYS2 0xc548
84#define DIV_PERIL0 0xc550
85#define DIV_PERIL1 0xc554
86#define DIV_PERIL2 0xc558
87#define DIV_PERIL3 0xc55c
88#define DIV_PERIL4 0xc560
89#define DIV_PERIL5 0xc564
90#define DIV_CAM1 0xc568
91#define DIV_TOP_ISP1 0xc56c
92#define DIV_TOP_ISP0 0xc570
93#define CLKDIV2_RATIO 0xc580
94#define GATE_SCLK_CAM 0xc820
95#define GATE_SCLK_TV 0xc824
96#define GATE_SCLK_MFC 0xc828
97#define GATE_SCLK_G3D 0xc82c
98#define GATE_SCLK_LCD 0xc834
99#define GATE_SCLK_MAUDIO 0xc83c
100#define GATE_SCLK_FSYS 0xc840
101#define GATE_SCLK_PERIL 0xc850
102#define GATE_IP_CAM 0xc920
103#define GATE_IP_TV 0xc924
104#define GATE_IP_MFC 0xc928
105#define GATE_IP_G3D 0xc92c
106#define GATE_IP_LCD 0xc934
107#define GATE_IP_FSYS 0xc940
108#define GATE_IP_PERIL 0xc950
109#define GATE_BLOCK 0xc970
110#define APLL_LOCK 0x14000
111#define APLL_CON0 0x14100
112#define SRC_CPU 0x14200
113#define DIV_CPU0 0x14500
114#define DIV_CPU1 0x14504
115
116enum exynos4415_plls {
117 apll, epll, g3d_pll, isp_pll, disp_pll,
118 nr_plls,
119};
120
121static struct samsung_clk_provider *exynos4415_ctx;
122
123/*
124 * Support for CMU save/restore across system suspends
125 */
126#ifdef CONFIG_PM_SLEEP
127static struct samsung_clk_reg_dump *exynos4415_clk_regs;
128
129static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
130 SRC_LEFTBUS,
131 DIV_LEFTBUS,
132 GATE_IP_LEFTBUS,
133 GATE_IP_IMAGE,
134 SRC_RIGHTBUS,
135 DIV_RIGHTBUS,
136 GATE_IP_RIGHTBUS,
137 GATE_IP_PERIR,
138 EPLL_LOCK,
139 G3D_PLL_LOCK,
140 DISP_PLL_LOCK,
141 ISP_PLL_LOCK,
142 EPLL_CON0,
143 EPLL_CON1,
144 EPLL_CON2,
145 G3D_PLL_CON0,
146 G3D_PLL_CON1,
147 G3D_PLL_CON2,
148 ISP_PLL_CON0,
149 ISP_PLL_CON1,
150 ISP_PLL_CON2,
151 DISP_PLL_CON0,
152 DISP_PLL_CON1,
153 DISP_PLL_CON2,
154 SRC_TOP0,
155 SRC_TOP1,
156 SRC_CAM,
157 SRC_TV,
158 SRC_MFC,
159 SRC_G3D,
160 SRC_LCD,
161 SRC_ISP,
162 SRC_MAUDIO,
163 SRC_FSYS,
164 SRC_PERIL0,
165 SRC_PERIL1,
166 SRC_CAM1,
167 SRC_TOP_ISP0,
168 SRC_TOP_ISP1,
169 SRC_MASK_TOP,
170 SRC_MASK_CAM,
171 SRC_MASK_TV,
172 SRC_MASK_LCD,
173 SRC_MASK_ISP,
174 SRC_MASK_MAUDIO,
175 SRC_MASK_FSYS,
176 SRC_MASK_PERIL0,
177 SRC_MASK_PERIL1,
178 DIV_TOP,
179 DIV_CAM,
180 DIV_TV,
181 DIV_MFC,
182 DIV_G3D,
183 DIV_LCD,
184 DIV_ISP,
185 DIV_MAUDIO,
186 DIV_FSYS0,
187 DIV_FSYS1,
188 DIV_FSYS2,
189 DIV_PERIL0,
190 DIV_PERIL1,
191 DIV_PERIL2,
192 DIV_PERIL3,
193 DIV_PERIL4,
194 DIV_PERIL5,
195 DIV_CAM1,
196 DIV_TOP_ISP1,
197 DIV_TOP_ISP0,
198 CLKDIV2_RATIO,
199 GATE_SCLK_CAM,
200 GATE_SCLK_TV,
201 GATE_SCLK_MFC,
202 GATE_SCLK_G3D,
203 GATE_SCLK_LCD,
204 GATE_SCLK_MAUDIO,
205 GATE_SCLK_FSYS,
206 GATE_SCLK_PERIL,
207 GATE_IP_CAM,
208 GATE_IP_TV,
209 GATE_IP_MFC,
210 GATE_IP_G3D,
211 GATE_IP_LCD,
212 GATE_IP_FSYS,
213 GATE_IP_PERIL,
214 GATE_BLOCK,
215 APLL_LOCK,
216 APLL_CON0,
217 SRC_CPU,
218 DIV_CPU0,
219 DIV_CPU1,
220};
221
222static int exynos4415_clk_suspend(void)
223{
224 samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
225 ARRAY_SIZE(exynos4415_cmu_clk_regs));
226
227 return 0;
228}
229
230static void exynos4415_clk_resume(void)
231{
232 samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
233 ARRAY_SIZE(exynos4415_cmu_clk_regs));
234}
235
236static struct syscore_ops exynos4415_clk_syscore_ops = {
237 .suspend = exynos4415_clk_suspend,
238 .resume = exynos4415_clk_resume,
239};
240
241static void exynos4415_clk_sleep_init(void)
242{
243 exynos4415_clk_regs =
244 samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
245 ARRAY_SIZE(exynos4415_cmu_clk_regs));
246 if (!exynos4415_clk_regs) {
247 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
248 return;
249 }
250
251 register_syscore_ops(&exynos4415_clk_syscore_ops);
252}
253#else
254static inline void exynos4415_clk_sleep_init(void) { }
255#endif
256
257/* list of all parent clock list */
258PNAME(mout_g3d_pllsrc_p) = { "fin_pll", };
259
260PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
261PNAME(mout_g3d_pll_p) = { "fin_pll", "fout_g3d_pll", };
262PNAME(mout_isp_pll_p) = { "fin_pll", "fout_isp_pll", };
263PNAME(mout_disp_pll_p) = { "fin_pll", "fout_disp_pll", };
264
265PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", };
266PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
267PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", };
268PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", };
269
270PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", };
271PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_g3d_pll", };
272
273PNAME(mout_gdl_p) = { "mout_mpll_user_l", };
274PNAME(mout_gdr_p) = { "mout_mpll_user_r", };
275
276PNAME(mout_aclk_266_p) = { "mout_mpll_user_t", "mout_g3d_pll", };
277
278PNAME(group_epll_g3dpll_p) = { "mout_epll", "mout_g3d_pll" };
279PNAME(group_sclk_p) = { "xxti", "xusbxti",
280 "none", "mout_isp_pll",
281 "none", "none", "div_mpll_pre",
282 "mout_epll", "mout_g3d_pll", };
283PNAME(group_spdif_p) = { "mout_audio0", "mout_audio1",
284 "mout_audio2", "spdif_extclk", };
285PNAME(group_sclk_audio2_p) = { "audiocdclk2", "none",
286 "none", "mout_isp_pll",
287 "mout_disp_pll", "xusbxti",
288 "div_mpll_pre", "mout_epll",
289 "mout_g3d_pll", };
290PNAME(group_sclk_audio1_p) = { "audiocdclk1", "none",
291 "none", "mout_isp_pll",
292 "mout_disp_pll", "xusbxti",
293 "div_mpll_pre", "mout_epll",
294 "mout_g3d_pll", };
295PNAME(group_sclk_audio0_p) = { "audiocdclk0", "none",
296 "none", "mout_isp_pll",
297 "mout_disp_pll", "xusbxti",
298 "div_mpll_pre", "mout_epll",
299 "mout_g3d_pll", };
300PNAME(group_fimc_lclk_p) = { "xxti", "xusbxti",
301 "none", "mout_isp_pll",
302 "none", "mout_disp_pll",
303 "mout_mpll_user_t", "mout_epll",
304 "mout_g3d_pll", };
305PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti",
306 "m_bitclkhsdiv4_4l", "mout_isp_pll",
307 "mout_disp_pll", "sclk_hdmiphy",
308 "div_mpll_pre", "mout_epll",
309 "mout_g3d_pll", };
310PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy" };
311PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" };
312PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" };
313PNAME(mout_jpeg_p) = { "mout_jpeg_0", "mout_jpeg_1" };
314PNAME(mout_jpeg1_p) = { "mout_epll", "mout_g3d_pll" };
315PNAME(group_aclk_isp0_300_p) = { "mout_isp_pll", "div_mpll_pre" };
316PNAME(group_aclk_isp0_400_user_p) = { "fin_pll", "div_aclk_400_mcuisp" };
317PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" };
318PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" };
319PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" };
320
321static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = {
322 /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
323 FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
324};
325
326static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
327 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
328};
329
330static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
331 /*
332 * NOTE: Following table is sorted by register address in ascending
333 * order and then bitfield shift in descending order, as it is done
334 * in the User's Manual. When adding new entries, please make sure
335 * that the order is preserved, to avoid merge conflicts and make
336 * further work with defined data easier.
337 */
338
339 /* SRC_LEFTBUS */
340 MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p,
341 SRC_LEFTBUS, 4, 1),
342 MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1),
343
344 /* SRC_RIGHTBUS */
345 MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p,
346 SRC_RIGHTBUS, 4, 1),
347 MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1),
348
349 /* SRC_TOP0 */
350 MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
351 MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_mout_mpll_user_t_p,
352 SRC_TOP0, 24, 1),
353 MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_mout_mpll_user_t_p,
354 SRC_TOP0, 20, 1),
355 MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_mout_mpll_user_t_p,
356 SRC_TOP0, 16, 1),
357 MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p,
358 SRC_TOP0, 12, 1),
359 MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
360 SRC_TOP0, 8, 1),
361 MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_TOP0, 4, 1),
362 MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1),
363
364 /* SRC_TOP1 */
365 MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p,
366 SRC_TOP1, 28, 1),
367 MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p,
368 SRC_TOP1, 16, 1),
369 MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p,
370 SRC_TOP1, 12, 1),
371 MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp",
372 group_mout_mpll_user_t_p, SRC_TOP1, 8, 1),
373 MUX(CLK_MOUT_G3D_PLLSRC, "mout_g3d_pllsrc", mout_g3d_pllsrc_p,
374 SRC_TOP1, 0, 1),
375
376 /* SRC_CAM */
377 MUX(CLK_MOUT_CSIS1, "mout_csis1", group_fimc_lclk_p, SRC_CAM, 28, 4),
378 MUX(CLK_MOUT_CSIS0, "mout_csis0", group_fimc_lclk_p, SRC_CAM, 24, 4),
379 MUX(CLK_MOUT_CAM1, "mout_cam1", group_fimc_lclk_p, SRC_CAM, 20, 4),
380 MUX(CLK_MOUT_FIMC3_LCLK, "mout_fimc3_lclk", group_fimc_lclk_p, SRC_CAM,
381 12, 4),
382 MUX(CLK_MOUT_FIMC2_LCLK, "mout_fimc2_lclk", group_fimc_lclk_p, SRC_CAM,
383 8, 4),
384 MUX(CLK_MOUT_FIMC1_LCLK, "mout_fimc1_lclk", group_fimc_lclk_p, SRC_CAM,
385 4, 4),
386 MUX(CLK_MOUT_FIMC0_LCLK, "mout_fimc0_lclk", group_fimc_lclk_p, SRC_CAM,
387 0, 4),
388
389 /* SRC_TV */
390 MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
391
392 /* SRC_MFC */
393 MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
394 MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_g3dpll_p, SRC_MFC, 4, 1),
395 MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_mout_mpll_user_t_p, SRC_MFC, 0,
396 1),
397
398 /* SRC_G3D */
399 MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1),
400 MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_g3dpll_p, SRC_G3D, 4, 1),
401 MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_mout_mpll_user_t_p, SRC_G3D, 0,
402 1),
403
404 /* SRC_LCD */
405 MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_fimc_lclk_p, SRC_LCD, 12, 4),
406 MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4),
407
408 /* SRC_ISP */
409 MUX(CLK_MOUT_TSADC_ISP, "mout_tsadc_isp", group_fimc_lclk_p, SRC_ISP,
410 16, 4),
411 MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_fimc_lclk_p, SRC_ISP,
412 12, 4),
413 MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_fimc_lclk_p, SRC_ISP,
414 8, 4),
415 MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_fimc_lclk_p, SRC_ISP,
416 4, 4),
417 MUX(CLK_MOUT_PWM_ISP, "mout_pwm_isp", group_fimc_lclk_p, SRC_ISP,
418 0, 4),
419
420 /* SRC_MAUDIO */
421 MUX(CLK_MOUT_AUDIO0, "mout_audio0", group_sclk_audio0_p, SRC_MAUDIO,
422 0, 4),
423
424 /* SRC_FSYS */
425 MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
426 MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
427 MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
428 MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
429
430 /* SRC_PERIL0 */
431 MUX(CLK_MOUT_UART3, "mout_uart3", group_sclk_p, SRC_PERIL0, 12, 4),
432 MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
433 MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
434 MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
435
436 /* SRC_PERIL1 */
437 MUX(CLK_MOUT_SPI2, "mout_spi2", group_sclk_p, SRC_PERIL1, 24, 4),
438 MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4),
439 MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4),
440 MUX(CLK_MOUT_SPDIF, "mout_spdif", group_spdif_p, SRC_PERIL1, 8, 4),
441 MUX(CLK_MOUT_AUDIO2, "mout_audio2", group_sclk_audio2_p, SRC_PERIL1,
442 4, 4),
443 MUX(CLK_MOUT_AUDIO1, "mout_audio1", group_sclk_audio1_p, SRC_PERIL1,
444 0, 4),
445
446 /* SRC_CPU */
447 MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
448 SRC_CPU, 24, 1),
449 MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
450 MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, 0,
451 CLK_MUX_READ_ONLY),
452 MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
453 CLK_SET_RATE_PARENT, 0),
454
455 /* SRC_CAM1 */
456 MUX(CLK_MOUT_PXLASYNC_CSIS1_FIMC, "mout_pxlasync_csis1",
457 group_fimc_lclk_p, SRC_CAM1, 20, 1),
458 MUX(CLK_MOUT_PXLASYNC_CSIS0_FIMC, "mout_pxlasync_csis0",
459 group_fimc_lclk_p, SRC_CAM1, 16, 1),
460 MUX(CLK_MOUT_JPEG, "mout_jpeg", mout_jpeg_p, SRC_CAM1, 8, 1),
461 MUX(CLK_MOUT_JPEG1, "mout_jpeg_1", mout_jpeg1_p, SRC_CAM1, 4, 1),
462 MUX(CLK_MOUT_JPEG0, "mout_jpeg_0", group_mout_mpll_user_t_p, SRC_CAM1,
463 0, 1),
464
465 /* SRC_TOP_ISP0 */
466 MUX(CLK_MOUT_ACLK_ISP0_300, "mout_aclk_isp0_300",
467 group_aclk_isp0_300_p, SRC_TOP_ISP0, 8, 1),
468 MUX(CLK_MOUT_ACLK_ISP0_400, "mout_aclk_isp0_400_user",
469 group_aclk_isp0_400_user_p, SRC_TOP_ISP0, 4, 1),
470 MUX(CLK_MOUT_ACLK_ISP0_300_USER, "mout_aclk_isp0_300_user",
471 group_aclk_isp0_300_user_p, SRC_TOP_ISP0, 0, 1),
472
473 /* SRC_TOP_ISP1 */
474 MUX(CLK_MOUT_ACLK_ISP1_300, "mout_aclk_isp1_300",
475 group_aclk_isp0_300_p, SRC_TOP_ISP1, 4, 1),
476 MUX(CLK_MOUT_ACLK_ISP1_300_USER, "mout_aclk_isp1_300_user",
477 group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1),
478};
479
480static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
481 /*
482 * NOTE: Following table is sorted by register address in ascending
483 * order and then bitfield shift in descending order, as it is done
484 * in the User's Manual. When adding new entries, please make sure
485 * that the order is preserved, to avoid merge conflicts and make
486 * further work with defined data easier.
487 */
488
489 /* DIV_LEFTBUS */
490 DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
491 DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4),
492
493 /* DIV_RIGHTBUS */
494 DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
495 DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4),
496
497 /* DIV_TOP */
498 DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp",
499 "mout_aclk_400_mcuisp", DIV_TOP, 24, 3),
500 DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3),
501 DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3),
502 DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3),
503 DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4),
504 DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3),
505
506 /* DIV_CAM */
507 DIV(CLK_DIV_CSIS1, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
508 DIV(CLK_DIV_CSIS0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
509 DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
510 DIV(CLK_DIV_FIMC3_LCLK, "div_fimc3_lclk", "mout_fimc3_lclk", DIV_CAM,
511 12, 4),
512 DIV(CLK_DIV_FIMC2_LCLK, "div_fimc2_lclk", "mout_fimc2_lclk", DIV_CAM,
513 8, 4),
514 DIV(CLK_DIV_FIMC1_LCLK, "div_fimc1_lclk", "mout_fimc1_lclk", DIV_CAM,
515 4, 4),
516 DIV(CLK_DIV_FIMC0_LCLK, "div_fimc0_lclk", "mout_fimc0_lclk", DIV_CAM,
517 0, 4),
518
519 /* DIV_TV */
520 DIV(CLK_DIV_TV_BLK, "div_tv_blk", "mout_g3d_pll", DIV_TV, 0, 4),
521
522 /* DIV_MFC */
523 DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4),
524
525 /* DIV_G3D */
526 DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4),
527
528 /* DIV_LCD */
529 DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4,
530 CLK_SET_RATE_PARENT, 0),
531 DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4),
532 DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4),
533
534 /* DIV_ISP */
535 DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4),
536 DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp",
537 DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0),
538 DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
539 DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
540 DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
541 DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4),
542 DIV(CLK_DIV_PWM_ISP, "div_pwm_isp", "mout_pwm_isp", DIV_ISP, 0, 4),
543
544 /* DIV_MAUDIO */
545 DIV(CLK_DIV_PCM0, "div_pcm0", "div_audio0", DIV_MAUDIO, 4, 8),
546 DIV(CLK_DIV_AUDIO0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
547
548 /* DIV_FSYS0 */
549 DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
550 CLK_SET_RATE_PARENT, 0),
551 DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4),
552
553 /* DIV_FSYS1 */
554 DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8,
555 CLK_SET_RATE_PARENT, 0),
556 DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
557 DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8,
558 CLK_SET_RATE_PARENT, 0),
559 DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
560
561 /* DIV_FSYS2 */
562 DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
563 CLK_SET_RATE_PARENT, 0),
564 DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4,
565 CLK_SET_RATE_PARENT, 0),
566
567 /* DIV_PERIL0 */
568 DIV(CLK_DIV_UART3, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
569 DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
570 DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
571 DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
572
573 /* DIV_PERIL1 */
574 DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8,
575 CLK_SET_RATE_PARENT, 0),
576 DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
577 DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8,
578 CLK_SET_RATE_PARENT, 0),
579 DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
580
581 /* DIV_PERIL2 */
582 DIV_F(CLK_DIV_SPI2_PRE, "div_spi2_pre", "div_spi2", DIV_PERIL2, 8, 8,
583 CLK_SET_RATE_PARENT, 0),
584 DIV(CLK_DIV_SPI2, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
585
586 /* DIV_PERIL4 */
587 DIV(CLK_DIV_PCM2, "div_pcm2", "div_audio2", DIV_PERIL4, 20, 8),
588 DIV(CLK_DIV_AUDIO2, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
589 DIV(CLK_DIV_PCM1, "div_pcm1", "div_audio1", DIV_PERIL4, 20, 8),
590 DIV(CLK_DIV_AUDIO1, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
591
592 /* DIV_PERIL5 */
593 DIV(CLK_DIV_I2S1, "div_i2s1", "div_audio1", DIV_PERIL5, 0, 6),
594
595 /* DIV_CAM1 */
596 DIV(CLK_DIV_PXLASYNC_CSIS1_FIMC, "div_pxlasync_csis1_fimc",
597 "mout_pxlasync_csis1", DIV_CAM1, 24, 4),
598 DIV(CLK_DIV_PXLASYNC_CSIS0_FIMC, "div_pxlasync_csis0_fimc",
599 "mout_pxlasync_csis0", DIV_CAM1, 20, 4),
600 DIV(CLK_DIV_JPEG, "div_jpeg", "mout_jpeg", DIV_CAM1, 0, 4),
601
602 /* DIV_CPU0 */
603 DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
604 DIV_F(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3,
605 CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
606 DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3),
607 DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3),
608 DIV(CLK_DIV_PERIPH, "div_periph", "div_core2", DIV_CPU0, 12, 3),
609 DIV(CLK_DIV_COREM1, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
610 DIV(CLK_DIV_COREM0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
611 DIV_F(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3,
612 CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
613
614 /* DIV_CPU1 */
615 DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
616 DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
617};
618
619static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
620 /*
621 * NOTE: Following table is sorted by register address in ascending
622 * order and then bitfield shift in descending order, as it is done
623 * in the User's Manual. When adding new entries, please make sure
624 * that the order is preserved, to avoid merge conflicts and make
625 * further work with defined data easier.
626 */
627
628 /* GATE_IP_LEFTBUS */
629 GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6,
630 CLK_IGNORE_UNUSED, 0),
631 GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4,
632 CLK_IGNORE_UNUSED, 0),
633 GATE(CLK_ASYNC_TVX, "async_tvx", "div_aclk_100", GATE_IP_LEFTBUS, 3,
634 CLK_IGNORE_UNUSED, 0),
635 GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1,
636 CLK_IGNORE_UNUSED, 0),
637 GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0,
638 CLK_IGNORE_UNUSED, 0),
639
640 /* GATE_IP_IMAGE */
641 GATE(CLK_PPMUIMAGE, "ppmuimage", "div_aclk_100", GATE_IP_IMAGE,
642 9, 0, 0),
643 GATE(CLK_QEMDMA2, "qe_mdma2", "div_aclk_100", GATE_IP_IMAGE,
644 8, 0, 0),
645 GATE(CLK_QEROTATOR, "qe_rotator", "div_aclk_100", GATE_IP_IMAGE,
646 7, 0, 0),
647 GATE(CLK_SMMUMDMA2, "smmu_mdam2", "div_aclk_100", GATE_IP_IMAGE,
648 5, 0, 0),
649 GATE(CLK_SMMUROTATOR, "smmu_rotator", "div_aclk_100", GATE_IP_IMAGE,
650 4, 0, 0),
651 GATE(CLK_MDMA2, "mdma2", "div_aclk_100", GATE_IP_IMAGE, 2, 0, 0),
652 GATE(CLK_ROTATOR, "rotator", "div_aclk_100", GATE_IP_IMAGE, 1, 0, 0),
653
654 /* GATE_IP_RIGHTBUS */
655 GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100",
656 GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0),
657 GATE(CLK_ASYNC_MAUDIOX, "async_maudiox", "div_aclk_100",
658 GATE_IP_RIGHTBUS, 7, CLK_IGNORE_UNUSED, 0),
659 GATE(CLK_ASYNC_MFCR, "async_mfcr", "div_aclk_100",
660 GATE_IP_RIGHTBUS, 6, CLK_IGNORE_UNUSED, 0),
661 GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100",
662 GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0),
663 GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100",
664 GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0),
665 GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100",
666 GATE_IP_RIGHTBUS, 2, CLK_IGNORE_UNUSED, 0),
667 GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100",
668 GATE_IP_RIGHTBUS, 1, CLK_IGNORE_UNUSED, 0),
669 GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100",
670 GATE_IP_RIGHTBUS, 0, CLK_IGNORE_UNUSED, 0),
671
672 /* GATE_IP_PERIR */
673 GATE(CLK_ANTIRBK_APBIF, "antirbk_apbif", "div_aclk_100",
674 GATE_IP_PERIR, 24, CLK_IGNORE_UNUSED, 0),
675 GATE(CLK_EFUSE_WRITER_APBIF, "efuse_writer_apbif", "div_aclk_100",
676 GATE_IP_PERIR, 23, CLK_IGNORE_UNUSED, 0),
677 GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22,
678 CLK_IGNORE_UNUSED, 0),
679 GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21,
680 CLK_IGNORE_UNUSED, 0),
681 GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100",
682 GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0),
683 GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100",
684 GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0),
685 GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18,
686 CLK_IGNORE_UNUSED, 0),
687 GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100",
688 GATE_IP_PERIR, 17, 0, 0),
689 GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0),
690 GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0),
691 GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0),
692 GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0),
693 GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12,
694 CLK_IGNORE_UNUSED, 0),
695 GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk_100", GATE_IP_PERIR, 11,
696 CLK_IGNORE_UNUSED, 0),
697 GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10,
698 CLK_IGNORE_UNUSED, 0),
699 GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9,
700 CLK_IGNORE_UNUSED, 0),
701 GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8,
702 CLK_IGNORE_UNUSED, 0),
703 GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7,
704 CLK_IGNORE_UNUSED, 0),
705 GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6,
706 CLK_IGNORE_UNUSED, 0),
707 GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5,
708 CLK_IGNORE_UNUSED, 0),
709 GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4,
710 CLK_IGNORE_UNUSED, 0),
711 GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3,
712 CLK_IGNORE_UNUSED, 0),
713 GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2,
714 CLK_IGNORE_UNUSED, 0),
715 GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1,
716 CLK_IGNORE_UNUSED, 0),
717 GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0,
718 CLK_IGNORE_UNUSED, 0),
719
720 /* GATE_SCLK_CAM - non-completed */
721 GATE(CLK_SCLK_PXLAYSNC_CSIS1_FIMC, "sclk_pxlasync_csis1_fimc",
722 "div_pxlasync_csis1_fimc", GATE_SCLK_CAM, 11,
723 CLK_SET_RATE_PARENT, 0),
724 GATE(CLK_SCLK_PXLAYSNC_CSIS0_FIMC, "sclk_pxlasync_csis0_fimc",
725 "div_pxlasync_csis0_fimc", GATE_SCLK_CAM,
726 10, CLK_SET_RATE_PARENT, 0),
727 GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
728 GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0),
729 GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1",
730 GATE_SCLK_CAM, 7, CLK_SET_RATE_PARENT, 0),
731 GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0",
732 GATE_SCLK_CAM, 6, CLK_SET_RATE_PARENT, 0),
733 GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
734 GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0),
735 GATE(CLK_SCLK_FIMC3_LCLK, "sclk_fimc3_lclk", "div_fimc3_lclk",
736 GATE_SCLK_CAM, 3, CLK_SET_RATE_PARENT, 0),
737 GATE(CLK_SCLK_FIMC2_LCLK, "sclk_fimc2_lclk", "div_fimc2_lclk",
738 GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0),
739 GATE(CLK_SCLK_FIMC1_LCLK, "sclk_fimc1_lclk", "div_fimc1_lclk",
740 GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0),
741 GATE(CLK_SCLK_FIMC0_LCLK, "sclk_fimc0_lclk", "div_fimc0_lclk",
742 GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0),
743
744 /* GATE_SCLK_TV */
745 GATE(CLK_SCLK_PIXEL, "sclk_pixel", "div_tv_blk",
746 GATE_SCLK_TV, 3, CLK_SET_RATE_PARENT, 0),
747 GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
748 GATE_SCLK_TV, 2, CLK_SET_RATE_PARENT, 0),
749 GATE(CLK_SCLK_MIXER, "sclk_mixer", "div_tv_blk",
750 GATE_SCLK_TV, 0, CLK_SET_RATE_PARENT, 0),
751
752 /* GATE_SCLK_MFC */
753 GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc",
754 GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0),
755
756 /* GATE_SCLK_G3D */
757 GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d",
758 GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
759
760 /* GATE_SCLK_LCD */
761 GATE(CLK_SCLK_MIPIDPHY4L, "sclk_mipidphy4l", "div_mipi0",
762 GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0),
763 GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre",
764 GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0),
765 GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_fimd0",
766 GATE_SCLK_LCD, 1, CLK_SET_RATE_PARENT, 0),
767 GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0",
768 GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0),
769
770 /* GATE_SCLK_MAUDIO */
771 GATE(CLK_SCLK_PCM0, "sclk_pcm0", "div_pcm0",
772 GATE_SCLK_MAUDIO, 1, CLK_SET_RATE_PARENT, 0),
773 GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
774 GATE_SCLK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0),
775
776 /* GATE_SCLK_FSYS */
777 GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre",
778 GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
779 GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
780 GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
781 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
782 GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
783 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
784 GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
785 GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
786 GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
787
788 /* GATE_SCLK_PERIL */
789 GATE(CLK_SCLK_I2S, "sclk_i2s1", "div_i2s1",
790 GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0),
791 GATE(CLK_SCLK_PCM2, "sclk_pcm2", "div_pcm2",
792 GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0),
793 GATE(CLK_SCLK_PCM1, "sclk_pcm1", "div_pcm1",
794 GATE_SCLK_PERIL, 15, CLK_SET_RATE_PARENT, 0),
795 GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
796 GATE_SCLK_PERIL, 14, CLK_SET_RATE_PARENT, 0),
797 GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
798 GATE_SCLK_PERIL, 13, CLK_SET_RATE_PARENT, 0),
799 GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
800 GATE_SCLK_PERIL, 10, CLK_SET_RATE_PARENT, 0),
801 GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi2_pre",
802 GATE_SCLK_PERIL, 8, CLK_SET_RATE_PARENT, 0),
803 GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre",
804 GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
805 GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
806 GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
807 GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
808 GATE_SCLK_PERIL, 3, CLK_SET_RATE_PARENT, 0),
809 GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
810 GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
811 GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
812 GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
813 GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
814 GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0),
815
816 /* GATE_IP_CAM */
817 GATE(CLK_SMMUFIMC_LITE2, "smmufimc_lite2", "div_aclk_160", GATE_IP_CAM,
818 22, CLK_IGNORE_UNUSED, 0),
819 GATE(CLK_FIMC_LITE2, "fimc_lite2", "div_aclk_160", GATE_IP_CAM,
820 20, CLK_IGNORE_UNUSED, 0),
821 GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_aclk_160", GATE_IP_CAM,
822 18, CLK_IGNORE_UNUSED, 0),
823 GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_aclk_160", GATE_IP_CAM,
824 17, CLK_IGNORE_UNUSED, 0),
825 GATE(CLK_PPMUCAMIF, "ppmucamif", "div_aclk_160", GATE_IP_CAM,
826 16, CLK_IGNORE_UNUSED, 0),
827 GATE(CLK_SMMUJPEG, "smmujpeg", "div_aclk_160", GATE_IP_CAM, 11, 0, 0),
828 GATE(CLK_SMMUFIMC3, "smmufimc3", "div_aclk_160", GATE_IP_CAM, 10, 0, 0),
829 GATE(CLK_SMMUFIMC2, "smmufimc2", "div_aclk_160", GATE_IP_CAM, 9, 0, 0),
830 GATE(CLK_SMMUFIMC1, "smmufimc1", "div_aclk_160", GATE_IP_CAM, 8, 0, 0),
831 GATE(CLK_SMMUFIMC0, "smmufimc0", "div_aclk_160", GATE_IP_CAM, 7, 0, 0),
832 GATE(CLK_JPEG, "jpeg", "div_aclk_160", GATE_IP_CAM, 6, 0, 0),
833 GATE(CLK_CSIS1, "csis1", "div_aclk_160", GATE_IP_CAM, 5, 0, 0),
834 GATE(CLK_CSIS0, "csis0", "div_aclk_160", GATE_IP_CAM, 4, 0, 0),
835 GATE(CLK_FIMC3, "fimc3", "div_aclk_160", GATE_IP_CAM, 3, 0, 0),
836 GATE(CLK_FIMC2, "fimc2", "div_aclk_160", GATE_IP_CAM, 2, 0, 0),
837 GATE(CLK_FIMC1, "fimc1", "div_aclk_160", GATE_IP_CAM, 1, 0, 0),
838 GATE(CLK_FIMC0, "fimc0", "div_aclk_160", GATE_IP_CAM, 0, 0, 0),
839
840 /* GATE_IP_TV */
841 GATE(CLK_PPMUTV, "ppmutv", "div_aclk_100", GATE_IP_TV, 5, 0, 0),
842 GATE(CLK_SMMUTV, "smmutv", "div_aclk_100", GATE_IP_TV, 4, 0, 0),
843 GATE(CLK_HDMI, "hdmi", "div_aclk_100", GATE_IP_TV, 3, 0, 0),
844 GATE(CLK_MIXER, "mixer", "div_aclk_100", GATE_IP_TV, 1, 0, 0),
845 GATE(CLK_VP, "vp", "div_aclk_100", GATE_IP_TV, 0, 0, 0),
846
847 /* GATE_IP_MFC */
848 GATE(CLK_PPMUMFC_R, "ppmumfc_r", "div_aclk_200", GATE_IP_MFC, 4,
849 CLK_IGNORE_UNUSED, 0),
850 GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3,
851 CLK_IGNORE_UNUSED, 0),
852 GATE(CLK_SMMUMFC_R, "smmumfc_r", "div_aclk_200", GATE_IP_MFC, 2, 0, 0),
853 GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0),
854 GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0),
855
856 /* GATE_IP_G3D */
857 GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1,
858 CLK_IGNORE_UNUSED, 0),
859 GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0),
860
861 /* GATE_IP_LCD */
862 GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5,
863 CLK_IGNORE_UNUSED, 0),
864 GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0),
865 GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0),
866 GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0),
867 GATE(CLK_MIE0, "mie0", "div_aclk_160", GATE_IP_LCD, 1, 0, 0),
868 GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0),
869
870 /* GATE_IP_FSYS */
871 GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0),
872 GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17,
873 CLK_IGNORE_UNUSED, 0),
874 GATE(CLK_NFCON, "nfcon", "div_aclk_200", GATE_IP_FSYS, 16, 0, 0),
875 GATE(CLK_USBDEVICE, "usbdevice", "div_aclk_200", GATE_IP_FSYS, 13,
876 0, 0),
877 GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
878 GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
879 GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
880 GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
881 GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
882 GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
883 GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0),
884
885 /* GATE_IP_PERIL */
886 GATE(CLK_SPDIF, "spdif", "div_aclk_100", GATE_IP_PERIL, 26, 0, 0),
887 GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0),
888 GATE(CLK_PCM2, "pcm2", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0),
889 GATE(CLK_PCM1, "pcm1", "div_aclk_100", GATE_IP_PERIL, 22, 0, 0),
890 GATE(CLK_I2S1, "i2s1", "div_aclk_100", GATE_IP_PERIL, 20, 0, 0),
891 GATE(CLK_SPI2, "spi2", "div_aclk_100", GATE_IP_PERIL, 18, 0, 0),
892 GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0),
893 GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0),
894 GATE(CLK_I2CHDMI, "i2chdmi", "div_aclk_100", GATE_IP_PERIL, 14, 0, 0),
895 GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0),
896 GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0),
897 GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0),
898 GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0),
899 GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0),
900 GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
901 GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
902 GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
903 GATE(CLK_UART3, "uart3", "div_aclk_100", GATE_IP_PERIL, 3, 0, 0),
904 GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
905 GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
906 GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
907};
908
909/*
910 * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL
911 */
912static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
913 PLL_35XX_RATE(1600000000, 400, 3, 1),
914 PLL_35XX_RATE(1500000000, 250, 2, 1),
915 PLL_35XX_RATE(1400000000, 175, 3, 0),
916 PLL_35XX_RATE(1300000000, 325, 3, 1),
917 PLL_35XX_RATE(1200000000, 400, 4, 1),
918 PLL_35XX_RATE(1100000000, 275, 3, 1),
919 PLL_35XX_RATE(1066000000, 533, 6, 1),
920 PLL_35XX_RATE(1000000000, 250, 3, 1),
921 PLL_35XX_RATE(960000000, 320, 4, 1),
922 PLL_35XX_RATE(900000000, 300, 4, 1),
923 PLL_35XX_RATE(850000000, 425, 6, 1),
924 PLL_35XX_RATE(800000000, 200, 3, 1),
925 PLL_35XX_RATE(700000000, 175, 3, 1),
926 PLL_35XX_RATE(667000000, 667, 12, 1),
927 PLL_35XX_RATE(600000000, 400, 4, 2),
928 PLL_35XX_RATE(550000000, 275, 3, 2),
929 PLL_35XX_RATE(533000000, 533, 6, 2),
930 PLL_35XX_RATE(520000000, 260, 3, 2),
931 PLL_35XX_RATE(500000000, 250, 3, 2),
932 PLL_35XX_RATE(440000000, 220, 3, 2),
933 PLL_35XX_RATE(400000000, 200, 3, 2),
934 PLL_35XX_RATE(350000000, 175, 3, 2),
935 PLL_35XX_RATE(300000000, 300, 3, 3),
936 PLL_35XX_RATE(266000000, 266, 3, 3),
937 PLL_35XX_RATE(200000000, 200, 3, 3),
938 PLL_35XX_RATE(160000000, 160, 3, 3),
939 PLL_35XX_RATE(100000000, 200, 3, 4),
940 { /* sentinel */ }
941};
942
943/* EPLL */
944static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
945 PLL_36XX_RATE(800000000, 200, 3, 1, 0),
946 PLL_36XX_RATE(288000000, 96, 2, 2, 0),
947 PLL_36XX_RATE(192000000, 128, 2, 3, 0),
948 PLL_36XX_RATE(144000000, 96, 2, 3, 0),
949 PLL_36XX_RATE(96000000, 128, 2, 4, 0),
950 PLL_36XX_RATE(84000000, 112, 2, 4, 0),
951 PLL_36XX_RATE(80750011, 107, 2, 4, 43691),
952 PLL_36XX_RATE(73728004, 98, 2, 4, 19923),
953 PLL_36XX_RATE(67987602, 271, 3, 5, 62285),
954 PLL_36XX_RATE(65911004, 175, 2, 5, 49982),
955 PLL_36XX_RATE(50000000, 200, 3, 5, 0),
956 PLL_36XX_RATE(49152003, 131, 2, 5, 4719),
957 PLL_36XX_RATE(48000000, 128, 2, 5, 0),
958 PLL_36XX_RATE(45250000, 181, 3, 5, 0),
959 { /* sentinel */ }
960};
961
962static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
963 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
964 APLL_LOCK, APLL_CON0, NULL),
965 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
966 EPLL_LOCK, EPLL_CON0, NULL),
967 [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
968 "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
969 [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
970 ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
971 [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
972 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
973};
974
975static void __init exynos4415_cmu_init(struct device_node *np)
976{
977 void __iomem *reg_base;
978
979 reg_base = of_iomap(np, 0);
980 if (!reg_base)
981 panic("%s: failed to map registers\n", __func__);
982
983 exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
984 if (!exynos4415_ctx)
985 panic("%s: unable to allocate context.\n", __func__);
986
987 exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
988 exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
989 exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
990 exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
991 exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
992
993 samsung_clk_register_fixed_factor(exynos4415_ctx,
994 exynos4415_fixed_factor_clks,
995 ARRAY_SIZE(exynos4415_fixed_factor_clks));
996 samsung_clk_register_fixed_rate(exynos4415_ctx,
997 exynos4415_fixed_rate_clks,
998 ARRAY_SIZE(exynos4415_fixed_rate_clks));
999
1000 samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
1001 ARRAY_SIZE(exynos4415_plls), reg_base);
1002 samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
1003 ARRAY_SIZE(exynos4415_mux_clks));
1004 samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
1005 ARRAY_SIZE(exynos4415_div_clks));
1006 samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
1007 ARRAY_SIZE(exynos4415_gate_clks));
1008
1009 exynos4415_clk_sleep_init();
1010
1011 samsung_clk_of_add_provider(np, exynos4415_ctx);
1012}
1013CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1014
1015/*
1016 * CMU DMC
1017 */
1018
1019#define MPLL_LOCK 0x008
1020#define MPLL_CON0 0x108
1021#define MPLL_CON1 0x10c
1022#define MPLL_CON2 0x110
1023#define BPLL_LOCK 0x118
1024#define BPLL_CON0 0x218
1025#define BPLL_CON1 0x21c
1026#define BPLL_CON2 0x220
1027#define SRC_DMC 0x300
1028#define DIV_DMC1 0x504
1029
1030enum exynos4415_dmc_plls {
1031 mpll, bpll,
1032 nr_dmc_plls,
1033};
1034
1035static struct samsung_clk_provider *exynos4415_dmc_ctx;
1036
1037#ifdef CONFIG_PM_SLEEP
1038static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
1039
1040static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1041 MPLL_LOCK,
1042 MPLL_CON0,
1043 MPLL_CON1,
1044 MPLL_CON2,
1045 BPLL_LOCK,
1046 BPLL_CON0,
1047 BPLL_CON1,
1048 BPLL_CON2,
1049 SRC_DMC,
1050 DIV_DMC1,
1051};
1052
1053static int exynos4415_dmc_clk_suspend(void)
1054{
1055 samsung_clk_save(exynos4415_dmc_ctx->reg_base,
1056 exynos4415_dmc_clk_regs,
1057 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1058 return 0;
1059}
1060
1061static void exynos4415_dmc_clk_resume(void)
1062{
1063 samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
1064 exynos4415_dmc_clk_regs,
1065 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1066}
1067
1068static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
1069 .suspend = exynos4415_dmc_clk_suspend,
1070 .resume = exynos4415_dmc_clk_resume,
1071};
1072
1073static void exynos4415_dmc_clk_sleep_init(void)
1074{
1075 exynos4415_dmc_clk_regs =
1076 samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
1077 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1078 if (!exynos4415_dmc_clk_regs) {
1079 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
1080 return;
1081 }
1082
1083 register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
1084}
1085#else
1086static inline void exynos4415_dmc_clk_sleep_init(void) { }
1087#endif /* CONFIG_PM_SLEEP */
1088
1089PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
1090PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
1091PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
1092
1093static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = {
1094 MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1),
1095 MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
1096 MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1),
1097 MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1),
1098};
1099
1100static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
1101 DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
1102 DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
1103 DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus",
1104 DIV_DMC1, 19, 2),
1105 DIV(CLK_DMC_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3),
1106 DIV(CLK_DMC_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
1107 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
1108};
1109
1110static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
1111 [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
1112 MPLL_LOCK, MPLL_CON0, NULL),
1113 [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
1114 BPLL_LOCK, BPLL_CON0, NULL),
1115};
1116
1117static void __init exynos4415_cmu_dmc_init(struct device_node *np)
1118{
1119 void __iomem *reg_base;
1120
1121 reg_base = of_iomap(np, 0);
1122 if (!reg_base)
1123 panic("%s: failed to map registers\n", __func__);
1124
1125 exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
1126 if (!exynos4415_dmc_ctx)
1127 panic("%s: unable to allocate context.\n", __func__);
1128
1129 exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
1130 exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
1131
1132 samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
1133 ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
1134 samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
1135 ARRAY_SIZE(exynos4415_dmc_mux_clks));
1136 samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
1137 ARRAY_SIZE(exynos4415_dmc_div_clks));
1138
1139 exynos4415_dmc_clk_sleep_init();
1140
1141 samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
1142}
1143CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
1144 exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 2527e39aadcf..e2e5193d1049 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -11,10 +11,8 @@
11 11
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/clkdev.h> 13#include <linux/clkdev.h>
14#include <linux/clk-provider.h>
15#include <linux/of.h> 14#include <linux/of.h>
16#include <linux/of_address.h> 15#include <linux/of_address.h>
17#include <linux/syscore_ops.h>
18 16
19#include "clk-exynos5260.h" 17#include "clk-exynos5260.h"
20#include "clk.h" 18#include "clk.h"
@@ -22,39 +20,6 @@
22 20
23#include <dt-bindings/clock/exynos5260-clk.h> 21#include <dt-bindings/clock/exynos5260-clk.h>
24 22
25static LIST_HEAD(clock_reg_cache_list);
26
27struct exynos5260_clock_reg_cache {
28 struct list_head node;
29 void __iomem *reg_base;
30 struct samsung_clk_reg_dump *rdump;
31 unsigned int rd_num;
32};
33
34struct exynos5260_cmu_info {
35 /* list of pll clocks and respective count */
36 struct samsung_pll_clock *pll_clks;
37 unsigned int nr_pll_clks;
38 /* list of mux clocks and respective count */
39 struct samsung_mux_clock *mux_clks;
40 unsigned int nr_mux_clks;
41 /* list of div clocks and respective count */
42 struct samsung_div_clock *div_clks;
43 unsigned int nr_div_clks;
44 /* list of gate clocks and respective count */
45 struct samsung_gate_clock *gate_clks;
46 unsigned int nr_gate_clks;
47 /* list of fixed clocks and respective count */
48 struct samsung_fixed_rate_clock *fixed_clks;
49 unsigned int nr_fixed_clks;
50 /* total number of clocks with IDs assigned*/
51 unsigned int nr_clk_ids;
52
53 /* list and number of clocks registers */
54 unsigned long *clk_regs;
55 unsigned int nr_clk_regs;
56};
57
58/* 23/*
59 * Applicable for all 2550 Type PLLS for Exynos5260, listed below 24 * Applicable for all 2550 Type PLLS for Exynos5260, listed below
60 * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL. 25 * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
@@ -113,104 +78,6 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
113 PLL_36XX_RATE(66000000, 176, 2, 5, 0), 78 PLL_36XX_RATE(66000000, 176, 2, 5, 0),
114}; 79};
115 80
116#ifdef CONFIG_PM_SLEEP
117
118static int exynos5260_clk_suspend(void)
119{
120 struct exynos5260_clock_reg_cache *cache;
121
122 list_for_each_entry(cache, &clock_reg_cache_list, node)
123 samsung_clk_save(cache->reg_base, cache->rdump,
124 cache->rd_num);
125
126 return 0;
127}
128
129static void exynos5260_clk_resume(void)
130{
131 struct exynos5260_clock_reg_cache *cache;
132
133 list_for_each_entry(cache, &clock_reg_cache_list, node)
134 samsung_clk_restore(cache->reg_base, cache->rdump,
135 cache->rd_num);
136}
137
138static struct syscore_ops exynos5260_clk_syscore_ops = {
139 .suspend = exynos5260_clk_suspend,
140 .resume = exynos5260_clk_resume,
141};
142
143static void exynos5260_clk_sleep_init(void __iomem *reg_base,
144 unsigned long *rdump,
145 unsigned long nr_rdump)
146{
147 struct exynos5260_clock_reg_cache *reg_cache;
148
149 reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache),
150 GFP_KERNEL);
151 if (!reg_cache)
152 panic("could not allocate register cache.\n");
153
154 reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
155
156 if (!reg_cache->rdump)
157 panic("could not allocate register dump storage.\n");
158
159 if (list_empty(&clock_reg_cache_list))
160 register_syscore_ops(&exynos5260_clk_syscore_ops);
161
162 reg_cache->rd_num = nr_rdump;
163 reg_cache->reg_base = reg_base;
164 list_add_tail(&reg_cache->node, &clock_reg_cache_list);
165}
166
167#else
168static void exynos5260_clk_sleep_init(void __iomem *reg_base,
169 unsigned long *rdump,
170 unsigned long nr_rdump){}
171#endif
172
173/*
174 * Common function which registers plls, muxes, dividers and gates
175 * for each CMU. It also add CMU register list to register cache.
176 */
177
178void __init exynos5260_cmu_register_one(struct device_node *np,
179 struct exynos5260_cmu_info *cmu)
180{
181 void __iomem *reg_base;
182 struct samsung_clk_provider *ctx;
183
184 reg_base = of_iomap(np, 0);
185 if (!reg_base)
186 panic("%s: failed to map registers\n", __func__);
187
188 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
189 if (!ctx)
190 panic("%s: unable to alllocate ctx\n", __func__);
191
192 if (cmu->pll_clks)
193 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
194 reg_base);
195 if (cmu->mux_clks)
196 samsung_clk_register_mux(ctx, cmu->mux_clks,
197 cmu->nr_mux_clks);
198 if (cmu->div_clks)
199 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
200 if (cmu->gate_clks)
201 samsung_clk_register_gate(ctx, cmu->gate_clks,
202 cmu->nr_gate_clks);
203 if (cmu->fixed_clks)
204 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
205 cmu->nr_fixed_clks);
206 if (cmu->clk_regs)
207 exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
208 cmu->nr_clk_regs);
209
210 samsung_clk_of_add_provider(np, ctx);
211}
212
213
214/* CMU_AUD */ 81/* CMU_AUD */
215 82
216static unsigned long aud_clk_regs[] __initdata = { 83static unsigned long aud_clk_regs[] __initdata = {
@@ -268,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = {
268 135
269static void __init exynos5260_clk_aud_init(struct device_node *np) 136static void __init exynos5260_clk_aud_init(struct device_node *np)
270{ 137{
271 struct exynos5260_cmu_info cmu = {0}; 138 struct samsung_cmu_info cmu = {0};
272 139
273 cmu.mux_clks = aud_mux_clks; 140 cmu.mux_clks = aud_mux_clks;
274 cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks); 141 cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
@@ -280,7 +147,7 @@ static void __init exynos5260_clk_aud_init(struct device_node *np)
280 cmu.clk_regs = aud_clk_regs; 147 cmu.clk_regs = aud_clk_regs;
281 cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs); 148 cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
282 149
283 exynos5260_cmu_register_one(np, &cmu); 150 samsung_cmu_register_one(np, &cmu);
284} 151}
285 152
286CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud", 153CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
@@ -458,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = {
458 325
459static void __init exynos5260_clk_disp_init(struct device_node *np) 326static void __init exynos5260_clk_disp_init(struct device_node *np)
460{ 327{
461 struct exynos5260_cmu_info cmu = {0}; 328 struct samsung_cmu_info cmu = {0};
462 329
463 cmu.mux_clks = disp_mux_clks; 330 cmu.mux_clks = disp_mux_clks;
464 cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks); 331 cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
@@ -470,7 +337,7 @@ static void __init exynos5260_clk_disp_init(struct device_node *np)
470 cmu.clk_regs = disp_clk_regs; 337 cmu.clk_regs = disp_clk_regs;
471 cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs); 338 cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
472 339
473 exynos5260_cmu_register_one(np, &cmu); 340 samsung_cmu_register_one(np, &cmu);
474} 341}
475 342
476CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp", 343CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
@@ -522,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = {
522 389
523static void __init exynos5260_clk_egl_init(struct device_node *np) 390static void __init exynos5260_clk_egl_init(struct device_node *np)
524{ 391{
525 struct exynos5260_cmu_info cmu = {0}; 392 struct samsung_cmu_info cmu = {0};
526 393
527 cmu.pll_clks = egl_pll_clks; 394 cmu.pll_clks = egl_pll_clks;
528 cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks); 395 cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks);
@@ -534,7 +401,7 @@ static void __init exynos5260_clk_egl_init(struct device_node *np)
534 cmu.clk_regs = egl_clk_regs; 401 cmu.clk_regs = egl_clk_regs;
535 cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs); 402 cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
536 403
537 exynos5260_cmu_register_one(np, &cmu); 404 samsung_cmu_register_one(np, &cmu);
538} 405}
539 406
540CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl", 407CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
@@ -624,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = {
624 491
625static void __init exynos5260_clk_fsys_init(struct device_node *np) 492static void __init exynos5260_clk_fsys_init(struct device_node *np)
626{ 493{
627 struct exynos5260_cmu_info cmu = {0}; 494 struct samsung_cmu_info cmu = {0};
628 495
629 cmu.mux_clks = fsys_mux_clks; 496 cmu.mux_clks = fsys_mux_clks;
630 cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks); 497 cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
@@ -634,7 +501,7 @@ static void __init exynos5260_clk_fsys_init(struct device_node *np)
634 cmu.clk_regs = fsys_clk_regs; 501 cmu.clk_regs = fsys_clk_regs;
635 cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs); 502 cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
636 503
637 exynos5260_cmu_register_one(np, &cmu); 504 samsung_cmu_register_one(np, &cmu);
638} 505}
639 506
640CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys", 507CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
@@ -713,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = {
713 580
714static void __init exynos5260_clk_g2d_init(struct device_node *np) 581static void __init exynos5260_clk_g2d_init(struct device_node *np)
715{ 582{
716 struct exynos5260_cmu_info cmu = {0}; 583 struct samsung_cmu_info cmu = {0};
717 584
718 cmu.mux_clks = g2d_mux_clks; 585 cmu.mux_clks = g2d_mux_clks;
719 cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks); 586 cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
@@ -725,7 +592,7 @@ static void __init exynos5260_clk_g2d_init(struct device_node *np)
725 cmu.clk_regs = g2d_clk_regs; 592 cmu.clk_regs = g2d_clk_regs;
726 cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs); 593 cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
727 594
728 exynos5260_cmu_register_one(np, &cmu); 595 samsung_cmu_register_one(np, &cmu);
729} 596}
730 597
731CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d", 598CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
@@ -774,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
774 641
775static void __init exynos5260_clk_g3d_init(struct device_node *np) 642static void __init exynos5260_clk_g3d_init(struct device_node *np)
776{ 643{
777 struct exynos5260_cmu_info cmu = {0}; 644 struct samsung_cmu_info cmu = {0};
778 645
779 cmu.pll_clks = g3d_pll_clks; 646 cmu.pll_clks = g3d_pll_clks;
780 cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks); 647 cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks);
@@ -788,7 +655,7 @@ static void __init exynos5260_clk_g3d_init(struct device_node *np)
788 cmu.clk_regs = g3d_clk_regs; 655 cmu.clk_regs = g3d_clk_regs;
789 cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs); 656 cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
790 657
791 exynos5260_cmu_register_one(np, &cmu); 658 samsung_cmu_register_one(np, &cmu);
792} 659}
793 660
794CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d", 661CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
@@ -909,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = {
909 776
910static void __init exynos5260_clk_gscl_init(struct device_node *np) 777static void __init exynos5260_clk_gscl_init(struct device_node *np)
911{ 778{
912 struct exynos5260_cmu_info cmu = {0}; 779 struct samsung_cmu_info cmu = {0};
913 780
914 cmu.mux_clks = gscl_mux_clks; 781 cmu.mux_clks = gscl_mux_clks;
915 cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks); 782 cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
@@ -921,7 +788,7 @@ static void __init exynos5260_clk_gscl_init(struct device_node *np)
921 cmu.clk_regs = gscl_clk_regs; 788 cmu.clk_regs = gscl_clk_regs;
922 cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs); 789 cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
923 790
924 exynos5260_cmu_register_one(np, &cmu); 791 samsung_cmu_register_one(np, &cmu);
925} 792}
926 793
927CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl", 794CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
@@ -1028,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = {
1028 895
1029static void __init exynos5260_clk_isp_init(struct device_node *np) 896static void __init exynos5260_clk_isp_init(struct device_node *np)
1030{ 897{
1031 struct exynos5260_cmu_info cmu = {0}; 898 struct samsung_cmu_info cmu = {0};
1032 899
1033 cmu.mux_clks = isp_mux_clks; 900 cmu.mux_clks = isp_mux_clks;
1034 cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks); 901 cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
@@ -1040,7 +907,7 @@ static void __init exynos5260_clk_isp_init(struct device_node *np)
1040 cmu.clk_regs = isp_clk_regs; 907 cmu.clk_regs = isp_clk_regs;
1041 cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs); 908 cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
1042 909
1043 exynos5260_cmu_register_one(np, &cmu); 910 samsung_cmu_register_one(np, &cmu);
1044} 911}
1045 912
1046CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp", 913CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
@@ -1092,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
1092 959
1093static void __init exynos5260_clk_kfc_init(struct device_node *np) 960static void __init exynos5260_clk_kfc_init(struct device_node *np)
1094{ 961{
1095 struct exynos5260_cmu_info cmu = {0}; 962 struct samsung_cmu_info cmu = {0};
1096 963
1097 cmu.pll_clks = kfc_pll_clks; 964 cmu.pll_clks = kfc_pll_clks;
1098 cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks); 965 cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks);
@@ -1104,7 +971,7 @@ static void __init exynos5260_clk_kfc_init(struct device_node *np)
1104 cmu.clk_regs = kfc_clk_regs; 971 cmu.clk_regs = kfc_clk_regs;
1105 cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs); 972 cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
1106 973
1107 exynos5260_cmu_register_one(np, &cmu); 974 samsung_cmu_register_one(np, &cmu);
1108} 975}
1109 976
1110CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc", 977CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
@@ -1148,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = {
1148 1015
1149static void __init exynos5260_clk_mfc_init(struct device_node *np) 1016static void __init exynos5260_clk_mfc_init(struct device_node *np)
1150{ 1017{
1151 struct exynos5260_cmu_info cmu = {0}; 1018 struct samsung_cmu_info cmu = {0};
1152 1019
1153 cmu.mux_clks = mfc_mux_clks; 1020 cmu.mux_clks = mfc_mux_clks;
1154 cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks); 1021 cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
@@ -1160,7 +1027,7 @@ static void __init exynos5260_clk_mfc_init(struct device_node *np)
1160 cmu.clk_regs = mfc_clk_regs; 1027 cmu.clk_regs = mfc_clk_regs;
1161 cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs); 1028 cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
1162 1029
1163 exynos5260_cmu_register_one(np, &cmu); 1030 samsung_cmu_register_one(np, &cmu);
1164} 1031}
1165 1032
1166CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc", 1033CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
@@ -1295,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = {
1295 1162
1296static void __init exynos5260_clk_mif_init(struct device_node *np) 1163static void __init exynos5260_clk_mif_init(struct device_node *np)
1297{ 1164{
1298 struct exynos5260_cmu_info cmu = {0}; 1165 struct samsung_cmu_info cmu = {0};
1299 1166
1300 cmu.pll_clks = mif_pll_clks; 1167 cmu.pll_clks = mif_pll_clks;
1301 cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks); 1168 cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks);
@@ -1309,7 +1176,7 @@ static void __init exynos5260_clk_mif_init(struct device_node *np)
1309 cmu.clk_regs = mif_clk_regs; 1176 cmu.clk_regs = mif_clk_regs;
1310 cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs); 1177 cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
1311 1178
1312 exynos5260_cmu_register_one(np, &cmu); 1179 samsung_cmu_register_one(np, &cmu);
1313} 1180}
1314 1181
1315CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif", 1182CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
@@ -1503,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = {
1503 1370
1504static void __init exynos5260_clk_peri_init(struct device_node *np) 1371static void __init exynos5260_clk_peri_init(struct device_node *np)
1505{ 1372{
1506 struct exynos5260_cmu_info cmu = {0}; 1373 struct samsung_cmu_info cmu = {0};
1507 1374
1508 cmu.mux_clks = peri_mux_clks; 1375 cmu.mux_clks = peri_mux_clks;
1509 cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks); 1376 cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
@@ -1515,7 +1382,7 @@ static void __init exynos5260_clk_peri_init(struct device_node *np)
1515 cmu.clk_regs = peri_clk_regs; 1382 cmu.clk_regs = peri_clk_regs;
1516 cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs); 1383 cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
1517 1384
1518 exynos5260_cmu_register_one(np, &cmu); 1385 samsung_cmu_register_one(np, &cmu);
1519} 1386}
1520 1387
1521CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri", 1388CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
@@ -1959,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = {
1959 1826
1960static void __init exynos5260_clk_top_init(struct device_node *np) 1827static void __init exynos5260_clk_top_init(struct device_node *np)
1961{ 1828{
1962 struct exynos5260_cmu_info cmu = {0}; 1829 struct samsung_cmu_info cmu = {0};
1963 1830
1964 cmu.pll_clks = top_pll_clks; 1831 cmu.pll_clks = top_pll_clks;
1965 cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks); 1832 cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks);
@@ -1975,7 +1842,7 @@ static void __init exynos5260_clk_top_init(struct device_node *np)
1975 cmu.clk_regs = top_clk_regs; 1842 cmu.clk_regs = top_clk_regs;
1976 cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs); 1843 cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
1977 1844
1978 exynos5260_cmu_register_one(np, &cmu); 1845 samsung_cmu_register_one(np, &cmu);
1979} 1846}
1980 1847
1981CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top", 1848CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
new file mode 100644
index 000000000000..ea4483b8d62e
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -0,0 +1,743 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9*/
10
11#include <linux/clk.h>
12#include <linux/clkdev.h>
13#include <linux/clk-provider.h>
14#include <linux/of.h>
15
16#include "clk.h"
17#include <dt-bindings/clock/exynos7-clk.h>
18
19/* Register Offset definitions for CMU_TOPC (0x10570000) */
20#define CC_PLL_LOCK 0x0000
21#define BUS0_PLL_LOCK 0x0004
22#define BUS1_DPLL_LOCK 0x0008
23#define MFC_PLL_LOCK 0x000C
24#define AUD_PLL_LOCK 0x0010
25#define CC_PLL_CON0 0x0100
26#define BUS0_PLL_CON0 0x0110
27#define BUS1_DPLL_CON0 0x0120
28#define MFC_PLL_CON0 0x0130
29#define AUD_PLL_CON0 0x0140
30#define MUX_SEL_TOPC0 0x0200
31#define MUX_SEL_TOPC1 0x0204
32#define MUX_SEL_TOPC2 0x0208
33#define MUX_SEL_TOPC3 0x020C
34#define DIV_TOPC0 0x0600
35#define DIV_TOPC1 0x0604
36#define DIV_TOPC3 0x060C
37
38static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
39 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
40 FFACTOR(0, "ffac_topc_bus0_pll_div4",
41 "ffac_topc_bus0_pll_div2", 1, 2, 0),
42 FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_bus1_pll_ctrl", 1, 2, 0),
43 FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_cc_pll_ctrl", 1, 2, 0),
44 FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_mfc_pll_ctrl", 1, 2, 0),
45};
46
47/* List of parent clocks for Muxes in CMU_TOPC */
48PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
49PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
50PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
51PNAME(mout_mfc_pll_ctrl_p) = { "fin_pll", "fout_mfc_pll" };
52
53PNAME(mout_topc_group2) = { "mout_sclk_bus0_pll_cmuc",
54 "mout_sclk_bus1_pll_cmuc", "mout_sclk_cc_pll_cmuc",
55 "mout_sclk_mfc_pll_cmuc" };
56
57PNAME(mout_sclk_bus0_pll_cmuc_p) = { "mout_bus0_pll_ctrl",
58 "ffac_topc_bus0_pll_div2", "ffac_topc_bus0_pll_div4"};
59PNAME(mout_sclk_bus1_pll_cmuc_p) = { "mout_bus1_pll_ctrl",
60 "ffac_topc_bus1_pll_div2"};
61PNAME(mout_sclk_cc_pll_cmuc_p) = { "mout_cc_pll_ctrl",
62 "ffac_topc_cc_pll_div2"};
63PNAME(mout_sclk_mfc_pll_cmuc_p) = { "mout_mfc_pll_ctrl",
64 "ffac_topc_mfc_pll_div2"};
65
66
67PNAME(mout_sclk_bus0_pll_out_p) = {"mout_bus0_pll_ctrl",
68 "ffac_topc_bus0_pll_div2"};
69
70static unsigned long topc_clk_regs[] __initdata = {
71 CC_PLL_LOCK,
72 BUS0_PLL_LOCK,
73 BUS1_DPLL_LOCK,
74 MFC_PLL_LOCK,
75 AUD_PLL_LOCK,
76 CC_PLL_CON0,
77 BUS0_PLL_CON0,
78 BUS1_DPLL_CON0,
79 MFC_PLL_CON0,
80 AUD_PLL_CON0,
81 MUX_SEL_TOPC0,
82 MUX_SEL_TOPC1,
83 MUX_SEL_TOPC2,
84 MUX_SEL_TOPC3,
85 DIV_TOPC0,
86 DIV_TOPC1,
87 DIV_TOPC3,
88};
89
90static struct samsung_mux_clock topc_mux_clks[] __initdata = {
91 MUX(0, "mout_bus0_pll_ctrl", mout_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1),
92 MUX(0, "mout_bus1_pll_ctrl", mout_bus1_pll_ctrl_p, MUX_SEL_TOPC0, 4, 1),
93 MUX(0, "mout_cc_pll_ctrl", mout_cc_pll_ctrl_p, MUX_SEL_TOPC0, 8, 1),
94 MUX(0, "mout_mfc_pll_ctrl", mout_mfc_pll_ctrl_p, MUX_SEL_TOPC0, 12, 1),
95
96 MUX(0, "mout_sclk_bus0_pll_cmuc", mout_sclk_bus0_pll_cmuc_p,
97 MUX_SEL_TOPC0, 16, 2),
98 MUX(0, "mout_sclk_bus1_pll_cmuc", mout_sclk_bus1_pll_cmuc_p,
99 MUX_SEL_TOPC0, 20, 1),
100 MUX(0, "mout_sclk_cc_pll_cmuc", mout_sclk_cc_pll_cmuc_p,
101 MUX_SEL_TOPC0, 24, 1),
102 MUX(0, "mout_sclk_mfc_pll_cmuc", mout_sclk_mfc_pll_cmuc_p,
103 MUX_SEL_TOPC0, 28, 1),
104
105 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
106 MUX_SEL_TOPC1, 16, 1),
107
108 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
109
110 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
111};
112
113static struct samsung_div_clock topc_div_clks[] __initdata = {
114 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
115 DIV_TOPC0, 4, 4),
116
117 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
118 DIV_TOPC1, 24, 4),
119
120 DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_sclk_bus0_pll_out",
121 DIV_TOPC3, 0, 3),
122 DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_bus1_pll_ctrl",
123 DIV_TOPC3, 8, 3),
124 DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_cc_pll_ctrl",
125 DIV_TOPC3, 12, 3),
126 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
127 DIV_TOPC3, 16, 3),
128};
129
130static struct samsung_pll_clock topc_pll_clks[] __initdata = {
131 PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK,
132 BUS0_PLL_CON0, NULL),
133 PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK,
134 CC_PLL_CON0, NULL),
135 PLL(pll_1452x, 0, "fout_bus1_pll", "fin_pll", BUS1_DPLL_LOCK,
136 BUS1_DPLL_CON0, NULL),
137 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
138 MFC_PLL_CON0, NULL),
139 PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
140 AUD_PLL_CON0, NULL),
141};
142
143static struct samsung_cmu_info topc_cmu_info __initdata = {
144 .pll_clks = topc_pll_clks,
145 .nr_pll_clks = ARRAY_SIZE(topc_pll_clks),
146 .mux_clks = topc_mux_clks,
147 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks),
148 .div_clks = topc_div_clks,
149 .nr_div_clks = ARRAY_SIZE(topc_div_clks),
150 .fixed_factor_clks = topc_fixed_factor_clks,
151 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks),
152 .nr_clk_ids = TOPC_NR_CLK,
153 .clk_regs = topc_clk_regs,
154 .nr_clk_regs = ARRAY_SIZE(topc_clk_regs),
155};
156
157static void __init exynos7_clk_topc_init(struct device_node *np)
158{
159 samsung_cmu_register_one(np, &topc_cmu_info);
160}
161
162CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
163 exynos7_clk_topc_init);
164
165/* Register Offset definitions for CMU_TOP0 (0x105D0000) */
166#define MUX_SEL_TOP00 0x0200
167#define MUX_SEL_TOP01 0x0204
168#define MUX_SEL_TOP03 0x020C
169#define MUX_SEL_TOP0_PERIC3 0x023C
170#define DIV_TOP03 0x060C
171#define DIV_TOP0_PERIC3 0x063C
172#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
173
174/* List of parent clocks for Muxes in CMU_TOP0 */
175PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
176PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
177PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
178PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
179
180PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
181 "ffac_top0_bus0_pll_div2"};
182PNAME(mout_top0_half_bus1_pll_p) = {"mout_top0_bus1_pll",
183 "ffac_top0_bus1_pll_div2"};
184PNAME(mout_top0_half_cc_pll_p) = {"mout_top0_cc_pll",
185 "ffac_top0_cc_pll_div2"};
186PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
187 "ffac_top0_mfc_pll_div2"};
188
189PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
190 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
191 "mout_top0_half_mfc_pll"};
192
193static unsigned long top0_clk_regs[] __initdata = {
194 MUX_SEL_TOP00,
195 MUX_SEL_TOP01,
196 MUX_SEL_TOP03,
197 MUX_SEL_TOP0_PERIC3,
198 DIV_TOP03,
199 DIV_TOP0_PERIC3,
200 ENABLE_SCLK_TOP0_PERIC3,
201};
202
203static struct samsung_mux_clock top0_mux_clks[] __initdata = {
204 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
205 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
206 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
207 MUX(0, "mout_top0_bus0_pll", mout_bus0_pll_p, MUX_SEL_TOP00, 16, 1),
208
209 MUX(0, "mout_top0_half_mfc_pll", mout_top0_half_mfc_pll_p,
210 MUX_SEL_TOP01, 4, 1),
211 MUX(0, "mout_top0_half_cc_pll", mout_top0_half_cc_pll_p,
212 MUX_SEL_TOP01, 8, 1),
213 MUX(0, "mout_top0_half_bus1_pll", mout_top0_half_bus1_pll_p,
214 MUX_SEL_TOP01, 12, 1),
215 MUX(0, "mout_top0_half_bus0_pll", mout_top0_half_bus0_pll_p,
216 MUX_SEL_TOP01, 16, 1),
217
218 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
219 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
220
221 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
222 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
223 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
224 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
225};
226
227static struct samsung_div_clock top0_div_clks[] __initdata = {
228 DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66",
229 DIV_TOP03, 12, 6),
230 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
231 DIV_TOP03, 20, 6),
232
233 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
234 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
235 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
236 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
237};
238
239static struct samsung_gate_clock top0_gate_clks[] __initdata = {
240 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
241 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
242 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
243 ENABLE_SCLK_TOP0_PERIC3, 8, 0, 0),
244 GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_sclk_uart1",
245 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
246 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
247 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
248};
249
250static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
251 FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll", 1, 2, 0),
252 FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll", 1, 2, 0),
253 FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll", 1, 2, 0),
254 FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll", 1, 2, 0),
255};
256
257static struct samsung_cmu_info top0_cmu_info __initdata = {
258 .mux_clks = top0_mux_clks,
259 .nr_mux_clks = ARRAY_SIZE(top0_mux_clks),
260 .div_clks = top0_div_clks,
261 .nr_div_clks = ARRAY_SIZE(top0_div_clks),
262 .gate_clks = top0_gate_clks,
263 .nr_gate_clks = ARRAY_SIZE(top0_gate_clks),
264 .fixed_factor_clks = top0_fixed_factor_clks,
265 .nr_fixed_factor_clks = ARRAY_SIZE(top0_fixed_factor_clks),
266 .nr_clk_ids = TOP0_NR_CLK,
267 .clk_regs = top0_clk_regs,
268 .nr_clk_regs = ARRAY_SIZE(top0_clk_regs),
269};
270
271static void __init exynos7_clk_top0_init(struct device_node *np)
272{
273 samsung_cmu_register_one(np, &top0_cmu_info);
274}
275
276CLK_OF_DECLARE(exynos7_clk_top0, "samsung,exynos7-clock-top0",
277 exynos7_clk_top0_init);
278
279/* Register Offset definitions for CMU_TOP1 (0x105E0000) */
280#define MUX_SEL_TOP10 0x0200
281#define MUX_SEL_TOP11 0x0204
282#define MUX_SEL_TOP13 0x020C
283#define MUX_SEL_TOP1_FSYS0 0x0224
284#define MUX_SEL_TOP1_FSYS1 0x0228
285#define DIV_TOP13 0x060C
286#define DIV_TOP1_FSYS0 0x0624
287#define DIV_TOP1_FSYS1 0x0628
288#define ENABLE_ACLK_TOP13 0x080C
289#define ENABLE_SCLK_TOP1_FSYS0 0x0A24
290#define ENABLE_SCLK_TOP1_FSYS1 0x0A28
291
292/* List of parent clocks for Muxes in CMU_TOP1 */
293PNAME(mout_top1_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
294PNAME(mout_top1_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll_b" };
295PNAME(mout_top1_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll_b" };
296PNAME(mout_top1_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll_b" };
297
298PNAME(mout_top1_half_bus0_pll_p) = {"mout_top1_bus0_pll",
299 "ffac_top1_bus0_pll_div2"};
300PNAME(mout_top1_half_bus1_pll_p) = {"mout_top1_bus1_pll",
301 "ffac_top1_bus1_pll_div2"};
302PNAME(mout_top1_half_cc_pll_p) = {"mout_top1_cc_pll",
303 "ffac_top1_cc_pll_div2"};
304PNAME(mout_top1_half_mfc_pll_p) = {"mout_top1_mfc_pll",
305 "ffac_top1_mfc_pll_div2"};
306
307PNAME(mout_top1_group1) = {"mout_top1_half_bus0_pll",
308 "mout_top1_half_bus1_pll", "mout_top1_half_cc_pll",
309 "mout_top1_half_mfc_pll"};
310
311static unsigned long top1_clk_regs[] __initdata = {
312 MUX_SEL_TOP10,
313 MUX_SEL_TOP11,
314 MUX_SEL_TOP13,
315 MUX_SEL_TOP1_FSYS0,
316 MUX_SEL_TOP1_FSYS1,
317 DIV_TOP13,
318 DIV_TOP1_FSYS0,
319 DIV_TOP1_FSYS1,
320 ENABLE_ACLK_TOP13,
321 ENABLE_SCLK_TOP1_FSYS0,
322 ENABLE_SCLK_TOP1_FSYS1,
323};
324
325static struct samsung_mux_clock top1_mux_clks[] __initdata = {
326 MUX(0, "mout_top1_mfc_pll", mout_top1_mfc_pll_p, MUX_SEL_TOP10, 4, 1),
327 MUX(0, "mout_top1_cc_pll", mout_top1_cc_pll_p, MUX_SEL_TOP10, 8, 1),
328 MUX(0, "mout_top1_bus1_pll", mout_top1_bus1_pll_p,
329 MUX_SEL_TOP10, 12, 1),
330 MUX(0, "mout_top1_bus0_pll", mout_top1_bus0_pll_p,
331 MUX_SEL_TOP10, 16, 1),
332
333 MUX(0, "mout_top1_half_mfc_pll", mout_top1_half_mfc_pll_p,
334 MUX_SEL_TOP11, 4, 1),
335 MUX(0, "mout_top1_half_cc_pll", mout_top1_half_cc_pll_p,
336 MUX_SEL_TOP11, 8, 1),
337 MUX(0, "mout_top1_half_bus1_pll", mout_top1_half_bus1_pll_p,
338 MUX_SEL_TOP11, 12, 1),
339 MUX(0, "mout_top1_half_bus0_pll", mout_top1_half_bus0_pll_p,
340 MUX_SEL_TOP11, 16, 1),
341
342 MUX(0, "mout_aclk_fsys1_200", mout_top1_group1, MUX_SEL_TOP13, 24, 2),
343 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
344
345 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
346
347 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
348 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
349};
350
351static struct samsung_div_clock top1_div_clks[] __initdata = {
352 DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200",
353 DIV_TOP13, 24, 4),
354 DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
355 DIV_TOP13, 28, 4),
356
357 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
358 DIV_TOP1_FSYS0, 24, 4),
359
360 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
361 DIV_TOP1_FSYS1, 24, 4),
362 DIV(DOUT_SCLK_MMC0, "dout_sclk_mmc0", "mout_sclk_mmc0",
363 DIV_TOP1_FSYS1, 28, 4),
364};
365
366static struct samsung_gate_clock top1_gate_clks[] __initdata = {
367 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
368 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
369
370 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
371 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
372 GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_sclk_mmc0",
373 ENABLE_SCLK_TOP1_FSYS1, 28, CLK_SET_RATE_PARENT, 0),
374};
375
376static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
377 FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll", 1, 2, 0),
378 FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll", 1, 2, 0),
379 FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll", 1, 2, 0),
380 FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll", 1, 2, 0),
381};
382
383static struct samsung_cmu_info top1_cmu_info __initdata = {
384 .mux_clks = top1_mux_clks,
385 .nr_mux_clks = ARRAY_SIZE(top1_mux_clks),
386 .div_clks = top1_div_clks,
387 .nr_div_clks = ARRAY_SIZE(top1_div_clks),
388 .gate_clks = top1_gate_clks,
389 .nr_gate_clks = ARRAY_SIZE(top1_gate_clks),
390 .fixed_factor_clks = top1_fixed_factor_clks,
391 .nr_fixed_factor_clks = ARRAY_SIZE(top1_fixed_factor_clks),
392 .nr_clk_ids = TOP1_NR_CLK,
393 .clk_regs = top1_clk_regs,
394 .nr_clk_regs = ARRAY_SIZE(top1_clk_regs),
395};
396
397static void __init exynos7_clk_top1_init(struct device_node *np)
398{
399 samsung_cmu_register_one(np, &top1_cmu_info);
400}
401
402CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
403 exynos7_clk_top1_init);
404
405/* Register Offset definitions for CMU_CCORE (0x105B0000) */
406#define MUX_SEL_CCORE 0x0200
407#define DIV_CCORE 0x0600
408#define ENABLE_ACLK_CCORE0 0x0800
409#define ENABLE_ACLK_CCORE1 0x0804
410#define ENABLE_PCLK_CCORE 0x0900
411
412/*
413 * List of parent clocks for Muxes in CMU_CCORE
414 */
415PNAME(mout_aclk_ccore_133_p) = { "fin_pll", "dout_aclk_ccore_133" };
416
417static unsigned long ccore_clk_regs[] __initdata = {
418 MUX_SEL_CCORE,
419 ENABLE_PCLK_CCORE,
420};
421
422static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
423 MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_p,
424 MUX_SEL_CCORE, 1, 1),
425};
426
427static struct samsung_gate_clock ccore_gate_clks[] __initdata = {
428 GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user",
429 ENABLE_PCLK_CCORE, 8, 0, 0),
430};
431
432static struct samsung_cmu_info ccore_cmu_info __initdata = {
433 .mux_clks = ccore_mux_clks,
434 .nr_mux_clks = ARRAY_SIZE(ccore_mux_clks),
435 .gate_clks = ccore_gate_clks,
436 .nr_gate_clks = ARRAY_SIZE(ccore_gate_clks),
437 .nr_clk_ids = CCORE_NR_CLK,
438 .clk_regs = ccore_clk_regs,
439 .nr_clk_regs = ARRAY_SIZE(ccore_clk_regs),
440};
441
442static void __init exynos7_clk_ccore_init(struct device_node *np)
443{
444 samsung_cmu_register_one(np, &ccore_cmu_info);
445}
446
447CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
448 exynos7_clk_ccore_init);
449
450/* Register Offset definitions for CMU_PERIC0 (0x13610000) */
451#define MUX_SEL_PERIC0 0x0200
452#define ENABLE_PCLK_PERIC0 0x0900
453#define ENABLE_SCLK_PERIC0 0x0A00
454
455/* List of parent clocks for Muxes in CMU_PERIC0 */
456PNAME(mout_aclk_peric0_66_p) = { "fin_pll", "dout_aclk_peric0_66" };
457PNAME(mout_sclk_uart0_p) = { "fin_pll", "sclk_uart0" };
458
459static unsigned long peric0_clk_regs[] __initdata = {
460 MUX_SEL_PERIC0,
461 ENABLE_PCLK_PERIC0,
462 ENABLE_SCLK_PERIC0,
463};
464
465static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
466 MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_p,
467 MUX_SEL_PERIC0, 0, 1),
468 MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_p,
469 MUX_SEL_PERIC0, 16, 1),
470};
471
472static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
473 GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user",
474 ENABLE_PCLK_PERIC0, 8, 0, 0),
475 GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user",
476 ENABLE_PCLK_PERIC0, 9, 0, 0),
477 GATE(PCLK_HSI2C4, "pclk_hsi2c4", "mout_aclk_peric0_66_user",
478 ENABLE_PCLK_PERIC0, 10, 0, 0),
479 GATE(PCLK_HSI2C5, "pclk_hsi2c5", "mout_aclk_peric0_66_user",
480 ENABLE_PCLK_PERIC0, 11, 0, 0),
481 GATE(PCLK_HSI2C9, "pclk_hsi2c9", "mout_aclk_peric0_66_user",
482 ENABLE_PCLK_PERIC0, 12, 0, 0),
483 GATE(PCLK_HSI2C10, "pclk_hsi2c10", "mout_aclk_peric0_66_user",
484 ENABLE_PCLK_PERIC0, 13, 0, 0),
485 GATE(PCLK_HSI2C11, "pclk_hsi2c11", "mout_aclk_peric0_66_user",
486 ENABLE_PCLK_PERIC0, 14, 0, 0),
487 GATE(PCLK_UART0, "pclk_uart0", "mout_aclk_peric0_66_user",
488 ENABLE_PCLK_PERIC0, 16, 0, 0),
489 GATE(PCLK_ADCIF, "pclk_adcif", "mout_aclk_peric0_66_user",
490 ENABLE_PCLK_PERIC0, 20, 0, 0),
491 GATE(PCLK_PWM, "pclk_pwm", "mout_aclk_peric0_66_user",
492 ENABLE_PCLK_PERIC0, 21, 0, 0),
493
494 GATE(SCLK_UART0, "sclk_uart0_user", "mout_sclk_uart0_user",
495 ENABLE_SCLK_PERIC0, 16, 0, 0),
496 GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0),
497};
498
499static struct samsung_cmu_info peric0_cmu_info __initdata = {
500 .mux_clks = peric0_mux_clks,
501 .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
502 .gate_clks = peric0_gate_clks,
503 .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
504 .nr_clk_ids = PERIC0_NR_CLK,
505 .clk_regs = peric0_clk_regs,
506 .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
507};
508
509static void __init exynos7_clk_peric0_init(struct device_node *np)
510{
511 samsung_cmu_register_one(np, &peric0_cmu_info);
512}
513
514/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
515#define MUX_SEL_PERIC10 0x0200
516#define MUX_SEL_PERIC11 0x0204
517#define ENABLE_PCLK_PERIC1 0x0900
518#define ENABLE_SCLK_PERIC10 0x0A00
519
520CLK_OF_DECLARE(exynos7_clk_peric0, "samsung,exynos7-clock-peric0",
521 exynos7_clk_peric0_init);
522
523/* List of parent clocks for Muxes in CMU_PERIC1 */
524PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
525PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
526PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
527PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
528
529static unsigned long peric1_clk_regs[] __initdata = {
530 MUX_SEL_PERIC10,
531 MUX_SEL_PERIC11,
532 ENABLE_PCLK_PERIC1,
533 ENABLE_SCLK_PERIC10,
534};
535
536static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
537 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
538 MUX_SEL_PERIC10, 0, 1),
539
540 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
541 MUX_SEL_PERIC11, 20, 1),
542 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
543 MUX_SEL_PERIC11, 24, 1),
544 MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_p,
545 MUX_SEL_PERIC11, 28, 1),
546};
547
548static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
549 GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user",
550 ENABLE_PCLK_PERIC1, 4, 0, 0),
551 GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user",
552 ENABLE_PCLK_PERIC1, 5, 0, 0),
553 GATE(PCLK_HSI2C6, "pclk_hsi2c6", "mout_aclk_peric1_66_user",
554 ENABLE_PCLK_PERIC1, 6, 0, 0),
555 GATE(PCLK_HSI2C7, "pclk_hsi2c7", "mout_aclk_peric1_66_user",
556 ENABLE_PCLK_PERIC1, 7, 0, 0),
557 GATE(PCLK_HSI2C8, "pclk_hsi2c8", "mout_aclk_peric1_66_user",
558 ENABLE_PCLK_PERIC1, 8, 0, 0),
559 GATE(PCLK_UART1, "pclk_uart1", "mout_aclk_peric1_66_user",
560 ENABLE_PCLK_PERIC1, 9, 0, 0),
561 GATE(PCLK_UART2, "pclk_uart2", "mout_aclk_peric1_66_user",
562 ENABLE_PCLK_PERIC1, 10, 0, 0),
563 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
564 ENABLE_PCLK_PERIC1, 11, 0, 0),
565
566 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
567 ENABLE_SCLK_PERIC10, 9, 0, 0),
568 GATE(SCLK_UART2, "sclk_uart2_user", "mout_sclk_uart2_user",
569 ENABLE_SCLK_PERIC10, 10, 0, 0),
570 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
571 ENABLE_SCLK_PERIC10, 11, 0, 0),
572};
573
574static struct samsung_cmu_info peric1_cmu_info __initdata = {
575 .mux_clks = peric1_mux_clks,
576 .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
577 .gate_clks = peric1_gate_clks,
578 .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
579 .nr_clk_ids = PERIC1_NR_CLK,
580 .clk_regs = peric1_clk_regs,
581 .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
582};
583
584static void __init exynos7_clk_peric1_init(struct device_node *np)
585{
586 samsung_cmu_register_one(np, &peric1_cmu_info);
587}
588
589CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
590 exynos7_clk_peric1_init);
591
592/* Register Offset definitions for CMU_PERIS (0x10040000) */
593#define MUX_SEL_PERIS 0x0200
594#define ENABLE_PCLK_PERIS 0x0900
595#define ENABLE_PCLK_PERIS_SECURE_CHIPID 0x0910
596#define ENABLE_SCLK_PERIS 0x0A00
597#define ENABLE_SCLK_PERIS_SECURE_CHIPID 0x0A10
598
599/* List of parent clocks for Muxes in CMU_PERIS */
600PNAME(mout_aclk_peris_66_p) = { "fin_pll", "dout_aclk_peris_66" };
601
602static unsigned long peris_clk_regs[] __initdata = {
603 MUX_SEL_PERIS,
604 ENABLE_PCLK_PERIS,
605 ENABLE_PCLK_PERIS_SECURE_CHIPID,
606 ENABLE_SCLK_PERIS,
607 ENABLE_SCLK_PERIS_SECURE_CHIPID,
608};
609
610static struct samsung_mux_clock peris_mux_clks[] __initdata = {
611 MUX(0, "mout_aclk_peris_66_user",
612 mout_aclk_peris_66_p, MUX_SEL_PERIS, 0, 1),
613};
614
615static struct samsung_gate_clock peris_gate_clks[] __initdata = {
616 GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user",
617 ENABLE_PCLK_PERIS, 6, 0, 0),
618 GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user",
619 ENABLE_PCLK_PERIS, 10, 0, 0),
620
621 GATE(PCLK_CHIPID, "pclk_chipid", "mout_aclk_peris_66_user",
622 ENABLE_PCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
623 GATE(SCLK_CHIPID, "sclk_chipid", "fin_pll",
624 ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
625
626 GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0),
627};
628
629static struct samsung_cmu_info peris_cmu_info __initdata = {
630 .mux_clks = peris_mux_clks,
631 .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
632 .gate_clks = peris_gate_clks,
633 .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
634 .nr_clk_ids = PERIS_NR_CLK,
635 .clk_regs = peris_clk_regs,
636 .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
637};
638
639static void __init exynos7_clk_peris_init(struct device_node *np)
640{
641 samsung_cmu_register_one(np, &peris_cmu_info);
642}
643
644CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
645 exynos7_clk_peris_init);
646
647/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
648#define MUX_SEL_FSYS00 0x0200
649#define MUX_SEL_FSYS01 0x0204
650#define ENABLE_ACLK_FSYS01 0x0804
651
652/*
653 * List of parent clocks for Muxes in CMU_FSYS0
654 */
655PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
656PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
657
658static unsigned long fsys0_clk_regs[] __initdata = {
659 MUX_SEL_FSYS00,
660 MUX_SEL_FSYS01,
661 ENABLE_ACLK_FSYS01,
662};
663
664static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
665 MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_p,
666 MUX_SEL_FSYS00, 24, 1),
667
668 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
669};
670
671static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
672 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
673 ENABLE_ACLK_FSYS01, 31, 0, 0),
674};
675
676static struct samsung_cmu_info fsys0_cmu_info __initdata = {
677 .mux_clks = fsys0_mux_clks,
678 .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
679 .gate_clks = fsys0_gate_clks,
680 .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
681 .nr_clk_ids = TOP1_NR_CLK,
682 .clk_regs = fsys0_clk_regs,
683 .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
684};
685
686static void __init exynos7_clk_fsys0_init(struct device_node *np)
687{
688 samsung_cmu_register_one(np, &fsys0_cmu_info);
689}
690
691CLK_OF_DECLARE(exynos7_clk_fsys0, "samsung,exynos7-clock-fsys0",
692 exynos7_clk_fsys0_init);
693
694/* Register Offset definitions for CMU_FSYS1 (0x156E0000) */
695#define MUX_SEL_FSYS10 0x0200
696#define MUX_SEL_FSYS11 0x0204
697#define ENABLE_ACLK_FSYS1 0x0800
698
699/*
700 * List of parent clocks for Muxes in CMU_FSYS1
701 */
702PNAME(mout_aclk_fsys1_200_p) = { "fin_pll", "dout_aclk_fsys1_200" };
703PNAME(mout_sclk_mmc0_p) = { "fin_pll", "sclk_mmc0" };
704PNAME(mout_sclk_mmc1_p) = { "fin_pll", "sclk_mmc1" };
705
706static unsigned long fsys1_clk_regs[] __initdata = {
707 MUX_SEL_FSYS10,
708 MUX_SEL_FSYS11,
709 ENABLE_ACLK_FSYS1,
710};
711
712static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
713 MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_p,
714 MUX_SEL_FSYS10, 28, 1),
715
716 MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_p, MUX_SEL_FSYS11, 24, 1),
717 MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_p, MUX_SEL_FSYS11, 28, 1),
718};
719
720static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
721 GATE(ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys1_200_user",
722 ENABLE_ACLK_FSYS1, 29, 0, 0),
723 GATE(ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys1_200_user",
724 ENABLE_ACLK_FSYS1, 30, 0, 0),
725};
726
727static struct samsung_cmu_info fsys1_cmu_info __initdata = {
728 .mux_clks = fsys1_mux_clks,
729 .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
730 .gate_clks = fsys1_gate_clks,
731 .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
732 .nr_clk_ids = TOP1_NR_CLK,
733 .clk_regs = fsys1_clk_regs,
734 .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
735};
736
737static void __init exynos7_clk_fsys1_init(struct device_node *np)
738{
739 samsung_cmu_register_one(np, &fsys1_cmu_info);
740}
741
742CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
743 exynos7_clk_fsys1_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index b07fad2a9167..9d70e5c03804 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -482,6 +482,8 @@ static const struct clk_ops samsung_pll45xx_clk_min_ops = {
482 482
483#define PLL46XX_VSEL_MASK (1) 483#define PLL46XX_VSEL_MASK (1)
484#define PLL46XX_MDIV_MASK (0x1FF) 484#define PLL46XX_MDIV_MASK (0x1FF)
485#define PLL1460X_MDIV_MASK (0x3FF)
486
485#define PLL46XX_PDIV_MASK (0x3F) 487#define PLL46XX_PDIV_MASK (0x3F)
486#define PLL46XX_SDIV_MASK (0x7) 488#define PLL46XX_SDIV_MASK (0x7)
487#define PLL46XX_VSEL_SHIFT (27) 489#define PLL46XX_VSEL_SHIFT (27)
@@ -511,13 +513,15 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
511 513
512 pll_con0 = __raw_readl(pll->con_reg); 514 pll_con0 = __raw_readl(pll->con_reg);
513 pll_con1 = __raw_readl(pll->con_reg + 4); 515 pll_con1 = __raw_readl(pll->con_reg + 4);
514 mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK; 516 mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ?
517 PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK);
515 pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK; 518 pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
516 sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK; 519 sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
517 kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK : 520 kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
518 pll_con1 & PLL46XX_KDIV_MASK; 521 pll_con1 & PLL46XX_KDIV_MASK;
519 522
520 shift = pll->type == pll_4600 ? 16 : 10; 523 shift = ((pll->type == pll_4600) || (pll->type == pll_1460x)) ? 16 : 10;
524
521 fvco *= (mdiv << shift) + kdiv; 525 fvco *= (mdiv << shift) + kdiv;
522 do_div(fvco, (pdiv << sdiv)); 526 do_div(fvco, (pdiv << sdiv));
523 fvco >>= shift; 527 fvco >>= shift;
@@ -573,14 +577,21 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
573 lock = 0xffff; 577 lock = 0xffff;
574 578
575 /* Set PLL PMS and VSEL values. */ 579 /* Set PLL PMS and VSEL values. */
576 con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) | 580 if (pll->type == pll_1460x) {
581 con0 &= ~((PLL1460X_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
582 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
583 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT));
584 } else {
585 con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
577 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) | 586 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
578 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) | 587 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
579 (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT)); 588 (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
589 con0 |= rate->vsel << PLL46XX_VSEL_SHIFT;
590 }
591
580 con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) | 592 con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
581 (rate->pdiv << PLL46XX_PDIV_SHIFT) | 593 (rate->pdiv << PLL46XX_PDIV_SHIFT) |
582 (rate->sdiv << PLL46XX_SDIV_SHIFT) | 594 (rate->sdiv << PLL46XX_SDIV_SHIFT);
583 (rate->vsel << PLL46XX_VSEL_SHIFT);
584 595
585 /* Set PLL K, MFR and MRR values. */ 596 /* Set PLL K, MFR and MRR values. */
586 con1 = __raw_readl(pll->con_reg + 0x4); 597 con1 = __raw_readl(pll->con_reg + 0x4);
@@ -1190,6 +1201,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
1190 /* clk_ops for 35xx and 2550 are similar */ 1201 /* clk_ops for 35xx and 2550 are similar */
1191 case pll_35xx: 1202 case pll_35xx:
1192 case pll_2550: 1203 case pll_2550:
1204 case pll_1450x:
1205 case pll_1451x:
1206 case pll_1452x:
1193 if (!pll->rate_table) 1207 if (!pll->rate_table)
1194 init.ops = &samsung_pll35xx_clk_min_ops; 1208 init.ops = &samsung_pll35xx_clk_min_ops;
1195 else 1209 else
@@ -1223,6 +1237,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
1223 case pll_4600: 1237 case pll_4600:
1224 case pll_4650: 1238 case pll_4650:
1225 case pll_4650c: 1239 case pll_4650c:
1240 case pll_1460x:
1226 if (!pll->rate_table) 1241 if (!pll->rate_table)
1227 init.ops = &samsung_pll46xx_clk_min_ops; 1242 init.ops = &samsung_pll46xx_clk_min_ops;
1228 else 1243 else
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index c0ed4d41fd90..213de9af8b4f 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -33,6 +33,10 @@ enum samsung_pll_type {
33 pll_s3c2440_mpll, 33 pll_s3c2440_mpll,
34 pll_2550xx, 34 pll_2550xx,
35 pll_2650xx, 35 pll_2650xx,
36 pll_1450x,
37 pll_1451x,
38 pll_1452x,
39 pll_1460x,
36}; 40};
37 41
38#define PLL_35XX_RATE(_rate, _m, _p, _s) \ 42#define PLL_35XX_RATE(_rate, _m, _p, _s) \
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index deab84d9f37d..4bda54095a16 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -11,9 +11,13 @@
11 * clock framework for Samsung platforms. 11 * clock framework for Samsung platforms.
12*/ 12*/
13 13
14#include <linux/of_address.h>
14#include <linux/syscore_ops.h> 15#include <linux/syscore_ops.h>
16
15#include "clk.h" 17#include "clk.h"
16 18
19static LIST_HEAD(clock_reg_cache_list);
20
17void samsung_clk_save(void __iomem *base, 21void samsung_clk_save(void __iomem *base,
18 struct samsung_clk_reg_dump *rd, 22 struct samsung_clk_reg_dump *rd,
19 unsigned int num_regs) 23 unsigned int num_regs)
@@ -281,7 +285,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
281 * obtain the clock speed of all external fixed clock sources from device 285 * obtain the clock speed of all external fixed clock sources from device
282 * tree and register it 286 * tree and register it
283 */ 287 */
284#ifdef CONFIG_OF
285void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx, 288void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
286 struct samsung_fixed_rate_clock *fixed_rate_clk, 289 struct samsung_fixed_rate_clock *fixed_rate_clk,
287 unsigned int nr_fixed_rate_clk, 290 unsigned int nr_fixed_rate_clk,
@@ -298,7 +301,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
298 } 301 }
299 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk); 302 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
300} 303}
301#endif
302 304
303/* utility function to get the rate of a specified clock */ 305/* utility function to get the rate of a specified clock */
304unsigned long _get_rate(const char *clk_name) 306unsigned long _get_rate(const char *clk_name)
@@ -313,3 +315,99 @@ unsigned long _get_rate(const char *clk_name)
313 315
314 return clk_get_rate(clk); 316 return clk_get_rate(clk);
315} 317}
318
319#ifdef CONFIG_PM_SLEEP
320static int samsung_clk_suspend(void)
321{
322 struct samsung_clock_reg_cache *reg_cache;
323
324 list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
325 samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
326 reg_cache->rd_num);
327 return 0;
328}
329
330static void samsung_clk_resume(void)
331{
332 struct samsung_clock_reg_cache *reg_cache;
333
334 list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
335 samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
336 reg_cache->rd_num);
337}
338
339static struct syscore_ops samsung_clk_syscore_ops = {
340 .suspend = samsung_clk_suspend,
341 .resume = samsung_clk_resume,
342};
343
344static void samsung_clk_sleep_init(void __iomem *reg_base,
345 const unsigned long *rdump,
346 unsigned long nr_rdump)
347{
348 struct samsung_clock_reg_cache *reg_cache;
349
350 reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
351 GFP_KERNEL);
352 if (!reg_cache)
353 panic("could not allocate register reg_cache.\n");
354 reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
355
356 if (!reg_cache->rdump)
357 panic("could not allocate register dump storage.\n");
358
359 if (list_empty(&clock_reg_cache_list))
360 register_syscore_ops(&samsung_clk_syscore_ops);
361
362 reg_cache->reg_base = reg_base;
363 reg_cache->rd_num = nr_rdump;
364 list_add_tail(&reg_cache->node, &clock_reg_cache_list);
365}
366
367#else
368static void samsung_clk_sleep_init(void __iomem *reg_base,
369 const unsigned long *rdump,
370 unsigned long nr_rdump) {}
371#endif
372
373/*
374 * Common function which registers plls, muxes, dividers and gates
375 * for each CMU. It also add CMU register list to register cache.
376 */
377void __init samsung_cmu_register_one(struct device_node *np,
378 struct samsung_cmu_info *cmu)
379{
380 void __iomem *reg_base;
381 struct samsung_clk_provider *ctx;
382
383 reg_base = of_iomap(np, 0);
384 if (!reg_base)
385 panic("%s: failed to map registers\n", __func__);
386
387 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
388 if (!ctx)
389 panic("%s: unable to alllocate ctx\n", __func__);
390
391 if (cmu->pll_clks)
392 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
393 reg_base);
394 if (cmu->mux_clks)
395 samsung_clk_register_mux(ctx, cmu->mux_clks,
396 cmu->nr_mux_clks);
397 if (cmu->div_clks)
398 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
399 if (cmu->gate_clks)
400 samsung_clk_register_gate(ctx, cmu->gate_clks,
401 cmu->nr_gate_clks);
402 if (cmu->fixed_clks)
403 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
404 cmu->nr_fixed_clks);
405 if (cmu->fixed_factor_clks)
406 samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
407 cmu->nr_fixed_factor_clks);
408 if (cmu->clk_regs)
409 samsung_clk_sleep_init(reg_base, cmu->clk_regs,
410 cmu->nr_clk_regs);
411
412 samsung_clk_of_add_provider(np, ctx);
413}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 66ab36b5cef1..8acabe1f32c4 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -13,19 +13,15 @@
13#ifndef __SAMSUNG_CLK_H 13#ifndef __SAMSUNG_CLK_H
14#define __SAMSUNG_CLK_H 14#define __SAMSUNG_CLK_H
15 15
16#include <linux/clk.h>
17#include <linux/clkdev.h> 16#include <linux/clkdev.h>
18#include <linux/io.h>
19#include <linux/clk-provider.h> 17#include <linux/clk-provider.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include "clk-pll.h" 18#include "clk-pll.h"
23 19
24/** 20/**
25 * struct samsung_clk_provider: information about clock provider 21 * struct samsung_clk_provider: information about clock provider
26 * @reg_base: virtual address for the register base. 22 * @reg_base: virtual address for the register base.
27 * @clk_data: holds clock related data like clk* and number of clocks. 23 * @clk_data: holds clock related data like clk* and number of clocks.
28 * @lock: maintains exclusion bwtween callbacks for a given clock-provider. 24 * @lock: maintains exclusion between callbacks for a given clock-provider.
29 */ 25 */
30struct samsung_clk_provider { 26struct samsung_clk_provider {
31 void __iomem *reg_base; 27 void __iomem *reg_base;
@@ -324,6 +320,40 @@ struct samsung_pll_clock {
324 __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \ 320 __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
325 _lock, _con, _rtable, _alias) 321 _lock, _con, _rtable, _alias)
326 322
323struct samsung_clock_reg_cache {
324 struct list_head node;
325 void __iomem *reg_base;
326 struct samsung_clk_reg_dump *rdump;
327 unsigned int rd_num;
328};
329
330struct samsung_cmu_info {
331 /* list of pll clocks and respective count */
332 struct samsung_pll_clock *pll_clks;
333 unsigned int nr_pll_clks;
334 /* list of mux clocks and respective count */
335 struct samsung_mux_clock *mux_clks;
336 unsigned int nr_mux_clks;
337 /* list of div clocks and respective count */
338 struct samsung_div_clock *div_clks;
339 unsigned int nr_div_clks;
340 /* list of gate clocks and respective count */
341 struct samsung_gate_clock *gate_clks;
342 unsigned int nr_gate_clks;
343 /* list of fixed clocks and respective count */
344 struct samsung_fixed_rate_clock *fixed_clks;
345 unsigned int nr_fixed_clks;
346 /* list of fixed factor clocks and respective count */
347 struct samsung_fixed_factor_clock *fixed_factor_clks;
348 unsigned int nr_fixed_factor_clks;
349 /* total number of clocks with IDs assigned*/
350 unsigned int nr_clk_ids;
351
352 /* list and number of clocks registers */
353 unsigned long *clk_regs;
354 unsigned int nr_clk_regs;
355};
356
327extern struct samsung_clk_provider *__init samsung_clk_init( 357extern struct samsung_clk_provider *__init samsung_clk_init(
328 struct device_node *np, void __iomem *base, 358 struct device_node *np, void __iomem *base,
329 unsigned long nr_clks); 359 unsigned long nr_clks);
@@ -362,6 +392,9 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
362 struct samsung_pll_clock *pll_list, 392 struct samsung_pll_clock *pll_list,
363 unsigned int nr_clk, void __iomem *base); 393 unsigned int nr_clk, void __iomem *base);
364 394
395extern void __init samsung_cmu_register_one(struct device_node *,
396 struct samsung_cmu_info *);
397
365extern unsigned long _get_rate(const char *clk_name); 398extern unsigned long _get_rate(const char *clk_name);
366 399
367extern void samsung_clk_save(void __iomem *base, 400extern void samsung_clk_save(void __iomem *base,
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index f065f694cb65..639241e31e03 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -32,6 +32,9 @@ struct div6_clock {
32 struct clk_hw hw; 32 struct clk_hw hw;
33 void __iomem *reg; 33 void __iomem *reg;
34 unsigned int div; 34 unsigned int div;
35 u32 src_shift;
36 u32 src_width;
37 u8 *parents;
35}; 38};
36 39
37#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw) 40#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -39,8 +42,11 @@ struct div6_clock {
39static int cpg_div6_clock_enable(struct clk_hw *hw) 42static int cpg_div6_clock_enable(struct clk_hw *hw)
40{ 43{
41 struct div6_clock *clock = to_div6_clock(hw); 44 struct div6_clock *clock = to_div6_clock(hw);
45 u32 val;
42 46
43 clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg); 47 val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
48 | CPG_DIV6_DIV(clock->div - 1);
49 clk_writel(val, clock->reg);
44 50
45 return 0; 51 return 0;
46} 52}
@@ -52,7 +58,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
52 /* DIV6 clocks require the divisor field to be non-zero when stopping 58 /* DIV6 clocks require the divisor field to be non-zero when stopping
53 * the clock. 59 * the clock.
54 */ 60 */
55 clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK), 61 clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
56 clock->reg); 62 clock->reg);
57} 63}
58 64
@@ -94,12 +100,53 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
94{ 100{
95 struct div6_clock *clock = to_div6_clock(hw); 101 struct div6_clock *clock = to_div6_clock(hw);
96 unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate); 102 unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
103 u32 val;
97 104
98 clock->div = div; 105 clock->div = div;
99 106
107 val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
100 /* Only program the new divisor if the clock isn't stopped. */ 108 /* Only program the new divisor if the clock isn't stopped. */
101 if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP)) 109 if (!(val & CPG_DIV6_CKSTP))
102 clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg); 110 clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
111
112 return 0;
113}
114
115static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
116{
117 struct div6_clock *clock = to_div6_clock(hw);
118 unsigned int i;
119 u8 hw_index;
120
121 if (clock->src_width == 0)
122 return 0;
123
124 hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
125 (BIT(clock->src_width) - 1);
126 for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
127 if (clock->parents[i] == hw_index)
128 return i;
129 }
130
131 pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
132 __func__, __clk_get_name(hw->clk), hw_index);
133 return 0;
134}
135
136static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
137{
138 struct div6_clock *clock = to_div6_clock(hw);
139 u8 hw_index;
140 u32 mask;
141
142 if (index >= __clk_get_num_parents(hw->clk))
143 return -EINVAL;
144
145 mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
146 hw_index = clock->parents[index];
147
148 clk_writel((clk_readl(clock->reg) & mask) |
149 (hw_index << clock->src_shift), clock->reg);
103 150
104 return 0; 151 return 0;
105} 152}
@@ -108,6 +155,8 @@ static const struct clk_ops cpg_div6_clock_ops = {
108 .enable = cpg_div6_clock_enable, 155 .enable = cpg_div6_clock_enable,
109 .disable = cpg_div6_clock_disable, 156 .disable = cpg_div6_clock_disable,
110 .is_enabled = cpg_div6_clock_is_enabled, 157 .is_enabled = cpg_div6_clock_is_enabled,
158 .get_parent = cpg_div6_clock_get_parent,
159 .set_parent = cpg_div6_clock_set_parent,
111 .recalc_rate = cpg_div6_clock_recalc_rate, 160 .recalc_rate = cpg_div6_clock_recalc_rate,
112 .round_rate = cpg_div6_clock_round_rate, 161 .round_rate = cpg_div6_clock_round_rate,
113 .set_rate = cpg_div6_clock_set_rate, 162 .set_rate = cpg_div6_clock_set_rate,
@@ -115,20 +164,33 @@ static const struct clk_ops cpg_div6_clock_ops = {
115 164
116static void __init cpg_div6_clock_init(struct device_node *np) 165static void __init cpg_div6_clock_init(struct device_node *np)
117{ 166{
167 unsigned int num_parents, valid_parents;
168 const char **parent_names;
118 struct clk_init_data init; 169 struct clk_init_data init;
119 struct div6_clock *clock; 170 struct div6_clock *clock;
120 const char *parent_name;
121 const char *name; 171 const char *name;
122 struct clk *clk; 172 struct clk *clk;
173 unsigned int i;
123 int ret; 174 int ret;
124 175
125 clock = kzalloc(sizeof(*clock), GFP_KERNEL); 176 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
126 if (!clock) { 177 if (!clock)
127 pr_err("%s: failed to allocate %s DIV6 clock\n", 178 return;
179
180 num_parents = of_clk_get_parent_count(np);
181 if (num_parents < 1) {
182 pr_err("%s: no parent found for %s DIV6 clock\n",
128 __func__, np->name); 183 __func__, np->name);
129 return; 184 return;
130 } 185 }
131 186
187 clock->parents = kmalloc_array(num_parents, sizeof(*clock->parents),
188 GFP_KERNEL);
189 parent_names = kmalloc_array(num_parents, sizeof(*parent_names),
190 GFP_KERNEL);
191 if (!parent_names)
192 return;
193
132 /* Remap the clock register and read the divisor. Disabling the 194 /* Remap the clock register and read the divisor. Disabling the
133 * clock overwrites the divisor, so we need to cache its value for the 195 * clock overwrites the divisor, so we need to cache its value for the
134 * enable operation. 196 * enable operation.
@@ -150,9 +212,34 @@ static void __init cpg_div6_clock_init(struct device_node *np)
150 goto error; 212 goto error;
151 } 213 }
152 214
153 parent_name = of_clk_get_parent_name(np, 0); 215
154 if (parent_name == NULL) { 216 for (i = 0, valid_parents = 0; i < num_parents; i++) {
155 pr_err("%s: failed to get %s DIV6 clock parent name\n", 217 const char *name = of_clk_get_parent_name(np, i);
218
219 if (name) {
220 parent_names[valid_parents] = name;
221 clock->parents[valid_parents] = i;
222 valid_parents++;
223 }
224 }
225
226 switch (num_parents) {
227 case 1:
228 /* fixed parent clock */
229 clock->src_shift = clock->src_width = 0;
230 break;
231 case 4:
232 /* clock with EXSRC bits 6-7 */
233 clock->src_shift = 6;
234 clock->src_width = 2;
235 break;
236 case 8:
237 /* VCLK with EXSRC bits 12-14 */
238 clock->src_shift = 12;
239 clock->src_width = 3;
240 break;
241 default:
242 pr_err("%s: invalid number of parents for DIV6 clock %s\n",
156 __func__, np->name); 243 __func__, np->name);
157 goto error; 244 goto error;
158 } 245 }
@@ -161,8 +248,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
161 init.name = name; 248 init.name = name;
162 init.ops = &cpg_div6_clock_ops; 249 init.ops = &cpg_div6_clock_ops;
163 init.flags = CLK_IS_BASIC; 250 init.flags = CLK_IS_BASIC;
164 init.parent_names = &parent_name; 251 init.parent_names = parent_names;
165 init.num_parents = 1; 252 init.num_parents = valid_parents;
166 253
167 clock->hw.init = &init; 254 clock->hw.init = &init;
168 255
@@ -175,11 +262,13 @@ static void __init cpg_div6_clock_init(struct device_node *np)
175 262
176 of_clk_add_provider(np, of_clk_src_simple_get, clk); 263 of_clk_add_provider(np, of_clk_src_simple_get, clk);
177 264
265 kfree(parent_names);
178 return; 266 return;
179 267
180error: 268error:
181 if (clock->reg) 269 if (clock->reg)
182 iounmap(clock->reg); 270 iounmap(clock->reg);
271 kfree(parent_names);
183 kfree(clock); 272 kfree(clock);
184} 273}
185CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init); 274CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 7ddc2b553846..a66953c0f430 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -7,6 +7,7 @@ obj-y += clk-a10-hosc.o
7obj-y += clk-a20-gmac.o 7obj-y += clk-a20-gmac.o
8obj-y += clk-mod0.o 8obj-y += clk-mod0.o
9obj-y += clk-sun8i-mbus.o 9obj-y += clk-sun8i-mbus.o
10obj-y += clk-sun9i-core.o
10 11
11obj-$(CONFIG_MFD_SUN6I_PRCM) += \ 12obj-$(CONFIG_MFD_SUN6I_PRCM) += \
12 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ 13 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 5296fd6dd7b3..0dcf4f205fb8 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -53,6 +53,11 @@ static DEFINE_SPINLOCK(gmac_lock);
53#define SUN7I_A20_GMAC_MASK 0x3 53#define SUN7I_A20_GMAC_MASK 0x3
54#define SUN7I_A20_GMAC_PARENTS 2 54#define SUN7I_A20_GMAC_PARENTS 2
55 55
56static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
57 0x00, /* Select mii_phy_tx_clk */
58 0x02, /* Select gmac_int_tx_clk */
59};
60
56static void __init sun7i_a20_gmac_clk_setup(struct device_node *node) 61static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
57{ 62{
58 struct clk *clk; 63 struct clk *clk;
@@ -90,7 +95,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
90 gate->lock = &gmac_lock; 95 gate->lock = &gmac_lock;
91 mux->reg = reg; 96 mux->reg = reg;
92 mux->mask = SUN7I_A20_GMAC_MASK; 97 mux->mask = SUN7I_A20_GMAC_MASK;
93 mux->flags = CLK_MUX_INDEX_BIT; 98 mux->table = sun7i_a20_gmac_mux_table;
94 mux->lock = &gmac_lock; 99 mux->lock = &gmac_lock;
95 100
96 clk = clk_register_composite(NULL, clk_name, 101 clk = clk_register_composite(NULL, clk_name,
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index f83ba097126c..62e08fb58554 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -81,7 +81,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
81 81
82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, 82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
83 unsigned long *best_parent_rate, 83 unsigned long *best_parent_rate,
84 struct clk **best_parent_p) 84 struct clk_hw **best_parent_p)
85{ 85{
86 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 86 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
87 int i, num_parents; 87 int i, num_parents;
@@ -108,7 +108,7 @@ static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
108 } 108 }
109 109
110 if (best_parent) 110 if (best_parent)
111 *best_parent_p = best_parent; 111 *best_parent_p = __clk_get_hw(best_parent);
112 *best_parent_rate = best; 112 *best_parent_rate = best;
113 113
114 return best_child_rate; 114 return best_child_rate;
@@ -224,7 +224,7 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
224 /* set up gate properties */ 224 /* set up gate properties */
225 mux->reg = reg; 225 mux->reg = reg;
226 mux->shift = data->mux; 226 mux->shift = data->mux;
227 mux->mask = SUNXI_FACTORS_MUX_MASK; 227 mux->mask = data->muxmask;
228 mux->lock = factors->lock; 228 mux->lock = factors->lock;
229 mux_hw = &mux->hw; 229 mux_hw = &mux->hw;
230 } 230 }
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 9913840018d3..912238fde132 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -7,8 +7,6 @@
7 7
8#define SUNXI_FACTORS_NOT_APPLICABLE (0) 8#define SUNXI_FACTORS_NOT_APPLICABLE (0)
9 9
10#define SUNXI_FACTORS_MUX_MASK 0x3
11
12struct clk_factors_config { 10struct clk_factors_config {
13 u8 nshift; 11 u8 nshift;
14 u8 nwidth; 12 u8 nwidth;
@@ -24,6 +22,7 @@ struct clk_factors_config {
24struct factors_data { 22struct factors_data {
25 int enable; 23 int enable;
26 int mux; 24 int mux;
25 int muxmask;
27 struct clk_factors_config *table; 26 struct clk_factors_config *table;
28 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p); 27 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
29 const char *name; 28 const char *name;
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 4a563850ee6e..da0524eaee94 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -70,6 +70,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
70static const struct factors_data sun4i_a10_mod0_data __initconst = { 70static const struct factors_data sun4i_a10_mod0_data __initconst = {
71 .enable = 31, 71 .enable = 31,
72 .mux = 24, 72 .mux = 24,
73 .muxmask = BIT(1) | BIT(0),
73 .table = &sun4i_a10_mod0_config, 74 .table = &sun4i_a10_mod0_config,
74 .getter = sun4i_a10_get_mod0_factors, 75 .getter = sun4i_a10_get_mod0_factors,
75}; 76};
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index acca53290be2..3d282fb8f85c 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -46,7 +46,7 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
46 46
47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, 47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
48 unsigned long *best_parent_rate, 48 unsigned long *best_parent_rate,
49 struct clk **best_parent_clk) 49 struct clk_hw **best_parent_clk)
50{ 50{
51 int nparents = __clk_get_num_parents(hw->clk); 51 int nparents = __clk_get_num_parents(hw->clk);
52 long best_rate = -EINVAL; 52 long best_rate = -EINVAL;
@@ -100,7 +100,7 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
100 100
101 tmp_rate = (parent_rate >> shift) / div; 101 tmp_rate = (parent_rate >> shift) / div;
102 if (!*best_parent_clk || tmp_rate > best_rate) { 102 if (!*best_parent_clk || tmp_rate > best_rate) {
103 *best_parent_clk = parent; 103 *best_parent_clk = __clk_get_hw(parent);
104 *best_parent_rate = parent_rate; 104 *best_parent_rate = parent_rate;
105 best_rate = tmp_rate; 105 best_rate = tmp_rate;
106 } 106 }
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 8e49b44cee41..ef49786eefd3 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -60,6 +60,7 @@ static struct clk_factors_config sun8i_a23_mbus_config = {
60static const struct factors_data sun8i_a23_mbus_data __initconst = { 60static const struct factors_data sun8i_a23_mbus_data __initconst = {
61 .enable = 31, 61 .enable = 31,
62 .mux = 24, 62 .mux = 24,
63 .muxmask = BIT(1) | BIT(0),
63 .table = &sun8i_a23_mbus_config, 64 .table = &sun8i_a23_mbus_config,
64 .getter = sun8i_a23_get_mbus_factors, 65 .getter = sun8i_a23_get_mbus_factors,
65}; 66};
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
new file mode 100644
index 000000000000..3cb9036d91bb
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2014 Chen-Yu Tsai
3 *
4 * Chen-Yu Tsai <wens@csie.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
21#include <linux/log2.h>
22
23#include "clk-factors.h"
24
25
26/**
27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
28 * PLL4 rate is calculated as follows
29 * rate = (parent_rate * n >> p) / (m + 1);
30 * parent_rate is always 24Mhz
31 *
32 * p and m are named div1 and div2 in Allwinner's SDK
33 */
34
35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
36 u8 *n, u8 *k, u8 *m, u8 *p)
37{
38 int div;
39
40 /* Normalize value to a 6M multiple */
41 div = DIV_ROUND_UP(*freq, 6000000);
42
43 /* divs above 256 cannot be odd */
44 if (div > 256)
45 div = round_up(div, 2);
46
47 /* divs above 512 must be a multiple of 4 */
48 if (div > 512)
49 div = round_up(div, 4);
50
51 *freq = 6000000 * div;
52
53 /* we were called to round the frequency, we can now return */
54 if (n == NULL)
55 return;
56
57 /* p will be 1 for divs under 512 */
58 if (div < 512)
59 *p = 1;
60 else
61 *p = 0;
62
63 /* m will be 1 if div is odd */
64 if (div & 1)
65 *m = 1;
66 else
67 *m = 0;
68
69 /* calculate a suitable n based on m and p */
70 *n = div / (*p + 1) / (*m + 1);
71}
72
73static struct clk_factors_config sun9i_a80_pll4_config = {
74 .mshift = 18,
75 .mwidth = 1,
76 .nshift = 8,
77 .nwidth = 8,
78 .pshift = 16,
79 .pwidth = 1,
80};
81
82static const struct factors_data sun9i_a80_pll4_data __initconst = {
83 .enable = 31,
84 .table = &sun9i_a80_pll4_config,
85 .getter = sun9i_a80_get_pll4_factors,
86};
87
88static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
89
90static void __init sun9i_a80_pll4_setup(struct device_node *node)
91{
92 sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
93}
94CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
95
96
97/**
98 * sun9i_a80_get_gt_factors() - calculates m factor for GT
99 * GT rate is calculated as follows
100 * rate = parent_rate / (m + 1);
101 */
102
103static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
104 u8 *n, u8 *k, u8 *m, u8 *p)
105{
106 u32 div;
107
108 if (parent_rate < *freq)
109 *freq = parent_rate;
110
111 div = DIV_ROUND_UP(parent_rate, *freq);
112
113 /* maximum divider is 4 */
114 if (div > 4)
115 div = 4;
116
117 *freq = parent_rate / div;
118
119 /* we were called to round the frequency, we can now return */
120 if (!m)
121 return;
122
123 *m = div;
124}
125
126static struct clk_factors_config sun9i_a80_gt_config = {
127 .mshift = 0,
128 .mwidth = 2,
129};
130
131static const struct factors_data sun9i_a80_gt_data __initconst = {
132 .mux = 24,
133 .muxmask = BIT(1) | BIT(0),
134 .table = &sun9i_a80_gt_config,
135 .getter = sun9i_a80_get_gt_factors,
136};
137
138static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
139
140static void __init sun9i_a80_gt_setup(struct device_node *node)
141{
142 struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
143 &sun9i_a80_gt_lock);
144
145 /* The GT bus clock needs to be always enabled */
146 __clk_get(gt);
147 clk_prepare_enable(gt);
148}
149CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
150
151
152/**
153 * sun9i_a80_get_ahb_factors() - calculates p factor for AHB0/1/2
154 * AHB rate is calculated as follows
155 * rate = parent_rate >> p;
156 */
157
158static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
159 u8 *n, u8 *k, u8 *m, u8 *p)
160{
161 u32 _p;
162
163 if (parent_rate < *freq)
164 *freq = parent_rate;
165
166 _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
167
168 /* maximum p is 3 */
169 if (_p > 3)
170 _p = 3;
171
172 *freq = parent_rate >> _p;
173
174 /* we were called to round the frequency, we can now return */
175 if (!p)
176 return;
177
178 *p = _p;
179}
180
181static struct clk_factors_config sun9i_a80_ahb_config = {
182 .pshift = 0,
183 .pwidth = 2,
184};
185
186static const struct factors_data sun9i_a80_ahb_data __initconst = {
187 .mux = 24,
188 .muxmask = BIT(1) | BIT(0),
189 .table = &sun9i_a80_ahb_config,
190 .getter = sun9i_a80_get_ahb_factors,
191};
192
193static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
194
195static void __init sun9i_a80_ahb_setup(struct device_node *node)
196{
197 sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
198}
199CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
200
201
202static const struct factors_data sun9i_a80_apb0_data __initconst = {
203 .mux = 24,
204 .muxmask = BIT(0),
205 .table = &sun9i_a80_ahb_config,
206 .getter = sun9i_a80_get_ahb_factors,
207};
208
209static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
210
211static void __init sun9i_a80_apb0_setup(struct device_node *node)
212{
213 sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
214}
215CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
216
217
218/**
219 * sun9i_a80_get_apb1_factors() - calculates m, p factors for APB1
220 * APB1 rate is calculated as follows
221 * rate = (parent_rate >> p) / (m + 1);
222 */
223
224static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
225 u8 *n, u8 *k, u8 *m, u8 *p)
226{
227 u32 div;
228 u8 calcm, calcp;
229
230 if (parent_rate < *freq)
231 *freq = parent_rate;
232
233 div = DIV_ROUND_UP(parent_rate, *freq);
234
235 /* Highest possible divider is 256 (p = 3, m = 31) */
236 if (div > 256)
237 div = 256;
238
239 calcp = order_base_2(div);
240 calcm = (parent_rate >> calcp) - 1;
241 *freq = (parent_rate >> calcp) / (calcm + 1);
242
243 /* we were called to round the frequency, we can now return */
244 if (n == NULL)
245 return;
246
247 *m = calcm;
248 *p = calcp;
249}
250
251static struct clk_factors_config sun9i_a80_apb1_config = {
252 .mshift = 0,
253 .mwidth = 5,
254 .pshift = 16,
255 .pwidth = 2,
256};
257
258static const struct factors_data sun9i_a80_apb1_data __initconst = {
259 .mux = 24,
260 .muxmask = BIT(0),
261 .table = &sun9i_a80_apb1_config,
262 .getter = sun9i_a80_get_apb1_factors,
263};
264
265static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
266
267static void __init sun9i_a80_apb1_setup(struct device_node *node)
268{
269 sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
270}
271CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index d5dc951264ca..570202582dcf 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -245,9 +245,9 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
245} 245}
246 246
247/** 247/**
248 * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6 248 * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
249 * PLL6 rate is calculated as follows 249 * PLL6x2 rate is calculated as follows
250 * rate = parent_rate * n * (k + 1) / 2 250 * rate = parent_rate * (n + 1) * (k + 1)
251 * parent_rate is always 24Mhz 251 * parent_rate is always 24Mhz
252 */ 252 */
253 253
@@ -256,13 +256,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
256{ 256{
257 u8 div; 257 u8 div;
258 258
259 /* 259 /* Normalize value to a parent_rate multiple (24M) */
260 * We always have 24MHz / 2, so we can just say that our
261 * parent clock is 12MHz.
262 */
263 parent_rate = parent_rate / 2;
264
265 /* Normalize value to a parent_rate multiple (24M / 2) */
266 div = *freq / parent_rate; 260 div = *freq / parent_rate;
267 *freq = parent_rate * div; 261 *freq = parent_rate * div;
268 262
@@ -274,7 +268,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
274 if (*k > 3) 268 if (*k > 3)
275 *k = 3; 269 *k = 3;
276 270
277 *n = DIV_ROUND_UP(div, (*k+1)); 271 *n = DIV_ROUND_UP(div, (*k+1)) - 1;
278} 272}
279 273
280/** 274/**
@@ -445,6 +439,7 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
445 .nwidth = 5, 439 .nwidth = 5,
446 .kshift = 4, 440 .kshift = 4,
447 .kwidth = 2, 441 .kwidth = 2,
442 .n_start = 1,
448}; 443};
449 444
450static struct clk_factors_config sun4i_apb1_config = { 445static struct clk_factors_config sun4i_apb1_config = {
@@ -504,9 +499,12 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
504 .enable = 31, 499 .enable = 31,
505 .table = &sun6i_a31_pll6_config, 500 .table = &sun6i_a31_pll6_config,
506 .getter = sun6i_a31_get_pll6_factors, 501 .getter = sun6i_a31_get_pll6_factors,
502 .name = "pll6x2",
507}; 503};
508 504
509static const struct factors_data sun4i_apb1_data __initconst = { 505static const struct factors_data sun4i_apb1_data __initconst = {
506 .mux = 24,
507 .muxmask = BIT(1) | BIT(0),
510 .table = &sun4i_apb1_config, 508 .table = &sun4i_apb1_config,
511 .getter = sun4i_get_apb1_factors, 509 .getter = sun4i_get_apb1_factors,
512}; 510};
@@ -514,6 +512,7 @@ static const struct factors_data sun4i_apb1_data __initconst = {
514static const struct factors_data sun7i_a20_out_data __initconst = { 512static const struct factors_data sun7i_a20_out_data __initconst = {
515 .enable = 31, 513 .enable = 31,
516 .mux = 24, 514 .mux = 24,
515 .muxmask = BIT(1) | BIT(0),
517 .table = &sun7i_a20_out_config, 516 .table = &sun7i_a20_out_config,
518 .getter = sun7i_a20_get_out_factors, 517 .getter = sun7i_a20_get_out_factors,
519}; 518};
@@ -544,10 +543,6 @@ static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
544 .shift = 12, 543 .shift = 12,
545}; 544};
546 545
547static const struct mux_data sun4i_apb1_mux_data __initconst = {
548 .shift = 24,
549};
550
551static void __init sunxi_mux_clk_setup(struct device_node *node, 546static void __init sunxi_mux_clk_setup(struct device_node *node,
552 struct mux_data *data) 547 struct mux_data *data)
553{ 548{
@@ -633,12 +628,6 @@ static const struct div_data sun4i_apb0_data __initconst = {
633 .table = sun4i_apb0_table, 628 .table = sun4i_apb0_table,
634}; 629};
635 630
636static const struct div_data sun6i_a31_apb2_div_data __initconst = {
637 .shift = 0,
638 .pow = 0,
639 .width = 4,
640};
641
642static void __init sunxi_divider_clk_setup(struct device_node *node, 631static void __init sunxi_divider_clk_setup(struct device_node *node,
643 struct div_data *data) 632 struct div_data *data)
644{ 633{
@@ -757,6 +746,18 @@ static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
757 .mask = {0x25386742, 0x2505111}, 746 .mask = {0x25386742, 0x2505111},
758}; 747};
759 748
749static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
750 .mask = {0xF5F12B},
751};
752
753static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
754 .mask = {0x1E20003},
755};
756
757static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
758 .mask = {0x9B7},
759};
760
760static const struct gates_data sun4i_apb0_gates_data __initconst = { 761static const struct gates_data sun4i_apb0_gates_data __initconst = {
761 .mask = {0x4EF}, 762 .mask = {0x4EF},
762}; 763};
@@ -773,6 +774,10 @@ static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
773 .mask = { 0x4ff }, 774 .mask = { 0x4ff },
774}; 775};
775 776
777static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
778 .mask = {0xEB822},
779};
780
776static const struct gates_data sun4i_apb1_gates_data __initconst = { 781static const struct gates_data sun4i_apb1_gates_data __initconst = {
777 .mask = {0xFF00F7}, 782 .mask = {0xFF00F7},
778}; 783};
@@ -801,6 +806,10 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
801 .mask = { 0xff80ff }, 806 .mask = { 0xff80ff },
802}; 807};
803 808
809static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
810 .mask = {0x3F001F},
811};
812
804static const struct gates_data sun8i_a23_apb2_gates_data __initconst = { 813static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
805 .mask = {0x1F0007}, 814 .mask = {0x1F0007},
806}; 815};
@@ -893,6 +902,7 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
893 902
894struct divs_data { 903struct divs_data {
895 const struct factors_data *factors; /* data for the factor clock */ 904 const struct factors_data *factors; /* data for the factor clock */
905 int ndivs; /* number of children */
896 struct { 906 struct {
897 u8 fixed; /* is it a fixed divisor? if not... */ 907 u8 fixed; /* is it a fixed divisor? if not... */
898 struct clk_div_table *table; /* is it a table based divisor? */ 908 struct clk_div_table *table; /* is it a table based divisor? */
@@ -912,6 +922,7 @@ static struct clk_div_table pll6_sata_tbl[] = {
912 922
913static const struct divs_data pll5_divs_data __initconst = { 923static const struct divs_data pll5_divs_data __initconst = {
914 .factors = &sun4i_pll5_data, 924 .factors = &sun4i_pll5_data,
925 .ndivs = 2,
915 .div = { 926 .div = {
916 { .shift = 0, .pow = 0, }, /* M, DDR */ 927 { .shift = 0, .pow = 0, }, /* M, DDR */
917 { .shift = 16, .pow = 1, }, /* P, other */ 928 { .shift = 16, .pow = 1, }, /* P, other */
@@ -920,12 +931,21 @@ static const struct divs_data pll5_divs_data __initconst = {
920 931
921static const struct divs_data pll6_divs_data __initconst = { 932static const struct divs_data pll6_divs_data __initconst = {
922 .factors = &sun4i_pll6_data, 933 .factors = &sun4i_pll6_data,
934 .ndivs = 2,
923 .div = { 935 .div = {
924 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */ 936 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
925 { .fixed = 2 }, /* P, other */ 937 { .fixed = 2 }, /* P, other */
926 } 938 }
927}; 939};
928 940
941static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
942 .factors = &sun6i_a31_pll6_data,
943 .ndivs = 1,
944 .div = {
945 { .fixed = 2 }, /* normal output */
946 }
947};
948
929/** 949/**
930 * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks 950 * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
931 * 951 *
@@ -950,7 +970,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
950 struct clk_fixed_factor *fix_factor; 970 struct clk_fixed_factor *fix_factor;
951 struct clk_divider *divider; 971 struct clk_divider *divider;
952 void __iomem *reg; 972 void __iomem *reg;
953 int i = 0; 973 int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
954 int flags, clkflags; 974 int flags, clkflags;
955 975
956 /* Set up factor clock that we will be dividing */ 976 /* Set up factor clock that we will be dividing */
@@ -973,7 +993,11 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
973 * our RAM clock! */ 993 * our RAM clock! */
974 clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT; 994 clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
975 995
976 for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) { 996 /* if number of children known, use it */
997 if (data->ndivs)
998 ndivs = data->ndivs;
999
1000 for (i = 0; i < ndivs; i++) {
977 if (of_property_read_string_index(node, "clock-output-names", 1001 if (of_property_read_string_index(node, "clock-output-names",
978 i, &clk_name) != 0) 1002 i, &clk_name) != 0)
979 break; 1003 break;
@@ -1062,7 +1086,6 @@ static const struct of_device_id clk_factors_match[] __initconst = {
1062 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,}, 1086 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
1063 {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,}, 1087 {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
1064 {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,}, 1088 {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
1065 {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
1066 {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,}, 1089 {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
1067 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,}, 1090 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
1068 {} 1091 {}
@@ -1074,7 +1097,6 @@ static const struct of_device_id clk_div_match[] __initconst = {
1074 {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,}, 1097 {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
1075 {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,}, 1098 {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
1076 {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,}, 1099 {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
1077 {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
1078 {} 1100 {}
1079}; 1101};
1080 1102
@@ -1082,13 +1104,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
1082static const struct of_device_id clk_divs_match[] __initconst = { 1104static const struct of_device_id clk_divs_match[] __initconst = {
1083 {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,}, 1105 {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
1084 {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,}, 1106 {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
1107 {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
1085 {} 1108 {}
1086}; 1109};
1087 1110
1088/* Matches for mux clocks */ 1111/* Matches for mux clocks */
1089static const struct of_device_id clk_mux_match[] __initconst = { 1112static const struct of_device_id clk_mux_match[] __initconst = {
1090 {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,}, 1113 {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
1091 {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
1092 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,}, 1114 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
1093 {} 1115 {}
1094}; 1116};
@@ -1102,16 +1124,21 @@ static const struct of_device_id clk_gates_match[] __initconst = {
1102 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,}, 1124 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
1103 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,}, 1125 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
1104 {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,}, 1126 {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
1127 {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
1128 {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
1129 {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
1105 {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, 1130 {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
1106 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,}, 1131 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
1107 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,}, 1132 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
1108 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,}, 1133 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
1134 {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
1109 {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, 1135 {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
1110 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,}, 1136 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
1111 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,}, 1137 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
1112 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,}, 1138 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
1113 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,}, 1139 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
1114 {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,}, 1140 {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
1141 {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
1115 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, 1142 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
1116 {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,}, 1143 {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
1117 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,}, 1144 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
@@ -1200,3 +1227,9 @@ static void __init sun6i_init_clocks(struct device_node *node)
1200} 1227}
1201CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks); 1228CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
1202CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks); 1229CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
1230
1231static void __init sun9i_init_clocks(struct device_node *node)
1232{
1233 sunxi_init_clocks(NULL, 0);
1234}
1235CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 91a488c7cc44..31e8308ba899 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -753,6 +753,7 @@ config I2C_SH7760
753 753
754config I2C_SH_MOBILE 754config I2C_SH_MOBILE
755 tristate "SuperH Mobile I2C Controller" 755 tristate "SuperH Mobile I2C Controller"
756 depends on HAS_DMA
756 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST 757 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
757 help 758 help
758 If you say yes to this option, support will be included for the 759 If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 373f6d4e4080..30059c1df2a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -30,12 +30,12 @@
30#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7) 30#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7)
31#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3) 31#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3)
32 32
33#define MV64XXX_I2C_REG_CONTROL_ACK 0x00000004 33#define MV64XXX_I2C_REG_CONTROL_ACK BIT(2)
34#define MV64XXX_I2C_REG_CONTROL_IFLG 0x00000008 34#define MV64XXX_I2C_REG_CONTROL_IFLG BIT(3)
35#define MV64XXX_I2C_REG_CONTROL_STOP 0x00000010 35#define MV64XXX_I2C_REG_CONTROL_STOP BIT(4)
36#define MV64XXX_I2C_REG_CONTROL_START 0x00000020 36#define MV64XXX_I2C_REG_CONTROL_START BIT(5)
37#define MV64XXX_I2C_REG_CONTROL_TWSIEN 0x00000040 37#define MV64XXX_I2C_REG_CONTROL_TWSIEN BIT(6)
38#define MV64XXX_I2C_REG_CONTROL_INTEN 0x00000080 38#define MV64XXX_I2C_REG_CONTROL_INTEN BIT(7)
39 39
40/* Ctlr status values */ 40/* Ctlr status values */
41#define MV64XXX_I2C_STATUS_BUS_ERR 0x00 41#define MV64XXX_I2C_STATUS_BUS_ERR 0x00
@@ -68,19 +68,17 @@
68#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0 68#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0
69 69
70/* Bridge Control values */ 70/* Bridge Control values */
71#define MV64XXX_I2C_BRIDGE_CONTROL_WR 0x00000001 71#define MV64XXX_I2C_BRIDGE_CONTROL_WR BIT(0)
72#define MV64XXX_I2C_BRIDGE_CONTROL_RD 0x00000002 72#define MV64XXX_I2C_BRIDGE_CONTROL_RD BIT(1)
73#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2 73#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2
74#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT 0x00001000 74#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT BIT(12)
75#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13 75#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13
76#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16 76#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16
77#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE 0x00080000 77#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE BIT(19)
78#define MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START BIT(20)
78 79
79/* Bridge Status values */ 80/* Bridge Status values */
80#define MV64XXX_I2C_BRIDGE_STATUS_ERROR 0x00000001 81#define MV64XXX_I2C_BRIDGE_STATUS_ERROR BIT(0)
81#define MV64XXX_I2C_STATUS_OFFLOAD_ERROR 0xf0000001
82#define MV64XXX_I2C_STATUS_OFFLOAD_OK 0xf0000000
83
84 82
85/* Driver states */ 83/* Driver states */
86enum { 84enum {
@@ -99,14 +97,12 @@ enum {
99 MV64XXX_I2C_ACTION_INVALID, 97 MV64XXX_I2C_ACTION_INVALID,
100 MV64XXX_I2C_ACTION_CONTINUE, 98 MV64XXX_I2C_ACTION_CONTINUE,
101 MV64XXX_I2C_ACTION_SEND_RESTART, 99 MV64XXX_I2C_ACTION_SEND_RESTART,
102 MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
103 MV64XXX_I2C_ACTION_SEND_ADDR_1, 100 MV64XXX_I2C_ACTION_SEND_ADDR_1,
104 MV64XXX_I2C_ACTION_SEND_ADDR_2, 101 MV64XXX_I2C_ACTION_SEND_ADDR_2,
105 MV64XXX_I2C_ACTION_SEND_DATA, 102 MV64XXX_I2C_ACTION_SEND_DATA,
106 MV64XXX_I2C_ACTION_RCV_DATA, 103 MV64XXX_I2C_ACTION_RCV_DATA,
107 MV64XXX_I2C_ACTION_RCV_DATA_STOP, 104 MV64XXX_I2C_ACTION_RCV_DATA_STOP,
108 MV64XXX_I2C_ACTION_SEND_STOP, 105 MV64XXX_I2C_ACTION_SEND_STOP,
109 MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
110}; 106};
111 107
112struct mv64xxx_i2c_regs { 108struct mv64xxx_i2c_regs {
@@ -193,75 +189,6 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
193 } 189 }
194} 190}
195 191
196static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
197{
198 unsigned long data_reg_hi = 0;
199 unsigned long data_reg_lo = 0;
200 unsigned long ctrl_reg;
201 struct i2c_msg *msg = drv_data->msgs;
202
203 if (!drv_data->offload_enabled)
204 return -EOPNOTSUPP;
205
206 /* Only regular transactions can be offloaded */
207 if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
208 return -EINVAL;
209
210 /* Only 1-8 byte transfers can be offloaded */
211 if (msg->len < 1 || msg->len > 8)
212 return -EINVAL;
213
214 /* Build transaction */
215 ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
216 (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
217
218 if ((msg->flags & I2C_M_TEN) != 0)
219 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
220
221 if ((msg->flags & I2C_M_RD) == 0) {
222 u8 local_buf[8] = { 0 };
223
224 memcpy(local_buf, msg->buf, msg->len);
225 data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
226 data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
227
228 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
229 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
230
231 writel(data_reg_lo,
232 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
233 writel(data_reg_hi,
234 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
235
236 } else {
237 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
238 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
239 }
240
241 /* Execute transaction */
242 writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
243
244 return 0;
245}
246
247static void
248mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
249{
250 struct i2c_msg *msg = drv_data->msg;
251
252 if (msg->flags & I2C_M_RD) {
253 u32 data_reg_lo = readl(drv_data->reg_base +
254 MV64XXX_I2C_REG_RX_DATA_LO);
255 u32 data_reg_hi = readl(drv_data->reg_base +
256 MV64XXX_I2C_REG_RX_DATA_HI);
257 u8 local_buf[8] = { 0 };
258
259 *((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
260 *((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
261 memcpy(msg->buf, local_buf, msg->len);
262 }
263
264}
265/* 192/*
266 ***************************************************************************** 193 *****************************************************************************
267 * 194 *
@@ -389,16 +316,6 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
389 drv_data->rc = -ENXIO; 316 drv_data->rc = -ENXIO;
390 break; 317 break;
391 318
392 case MV64XXX_I2C_STATUS_OFFLOAD_OK:
393 if (drv_data->send_stop || drv_data->aborting) {
394 drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
395 drv_data->state = MV64XXX_I2C_STATE_IDLE;
396 } else {
397 drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
398 drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
399 }
400 break;
401
402 default: 319 default:
403 dev_err(&drv_data->adapter.dev, 320 dev_err(&drv_data->adapter.dev,
404 "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, " 321 "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, "
@@ -419,25 +336,15 @@ static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data)
419 drv_data->aborting = 0; 336 drv_data->aborting = 0;
420 drv_data->rc = 0; 337 drv_data->rc = 0;
421 338
422 /* Can we offload this msg ? */ 339 mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
423 if (mv64xxx_i2c_offload_msg(drv_data) < 0) { 340 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
424 /* No, switch to standard path */ 341 drv_data->reg_base + drv_data->reg_offsets.control);
425 mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
426 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
427 drv_data->reg_base + drv_data->reg_offsets.control);
428 }
429} 342}
430 343
431static void 344static void
432mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) 345mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
433{ 346{
434 switch(drv_data->action) { 347 switch(drv_data->action) {
435 case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
436 mv64xxx_i2c_update_offload_data(drv_data);
437 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
438 writel(0, drv_data->reg_base +
439 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
440 /* FALLTHRU */
441 case MV64XXX_I2C_ACTION_SEND_RESTART: 348 case MV64XXX_I2C_ACTION_SEND_RESTART:
442 /* We should only get here if we have further messages */ 349 /* We should only get here if we have further messages */
443 BUG_ON(drv_data->num_msgs == 0); 350 BUG_ON(drv_data->num_msgs == 0);
@@ -518,16 +425,71 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
518 drv_data->block = 0; 425 drv_data->block = 0;
519 wake_up(&drv_data->waitq); 426 wake_up(&drv_data->waitq);
520 break; 427 break;
428 }
429}
521 430
522 case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP: 431static void
523 mv64xxx_i2c_update_offload_data(drv_data); 432mv64xxx_i2c_read_offload_rx_data(struct mv64xxx_i2c_data *drv_data,
524 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL); 433 struct i2c_msg *msg)
525 writel(0, drv_data->reg_base + 434{
526 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE); 435 u32 buf[2];
527 drv_data->block = 0; 436
528 wake_up(&drv_data->waitq); 437 buf[0] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_LO);
529 break; 438 buf[1] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_HI);
439
440 memcpy(msg->buf, buf, msg->len);
441}
442
443static int
444mv64xxx_i2c_intr_offload(struct mv64xxx_i2c_data *drv_data)
445{
446 u32 cause, status;
447
448 cause = readl(drv_data->reg_base +
449 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
450 if (!cause)
451 return IRQ_NONE;
452
453 status = readl(drv_data->reg_base +
454 MV64XXX_I2C_REG_BRIDGE_STATUS);
455
456 if (status & MV64XXX_I2C_BRIDGE_STATUS_ERROR) {
457 drv_data->rc = -EIO;
458 goto out;
459 }
460
461 drv_data->rc = 0;
462
463 /*
464 * Transaction is a one message read transaction, read data
465 * for this message.
466 */
467 if (drv_data->num_msgs == 1 && drv_data->msgs[0].flags & I2C_M_RD) {
468 mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs);
469 drv_data->msgs++;
470 drv_data->num_msgs--;
471 }
472 /*
473 * Transaction is a two messages write/read transaction, read
474 * data for the second (read) message.
475 */
476 else if (drv_data->num_msgs == 2 &&
477 !(drv_data->msgs[0].flags & I2C_M_RD) &&
478 drv_data->msgs[1].flags & I2C_M_RD) {
479 mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs + 1);
480 drv_data->msgs += 2;
481 drv_data->num_msgs -= 2;
530 } 482 }
483
484out:
485 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
486 writel(0, drv_data->reg_base +
487 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
488 drv_data->block = 0;
489
490 wake_up(&drv_data->waitq);
491
492 return IRQ_HANDLED;
531} 493}
532 494
533static irqreturn_t 495static irqreturn_t
@@ -540,20 +502,9 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
540 502
541 spin_lock_irqsave(&drv_data->lock, flags); 503 spin_lock_irqsave(&drv_data->lock, flags);
542 504
543 if (drv_data->offload_enabled) { 505 if (drv_data->offload_enabled)
544 while (readl(drv_data->reg_base + 506 rc = mv64xxx_i2c_intr_offload(drv_data);
545 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) { 507
546 int reg_status = readl(drv_data->reg_base +
547 MV64XXX_I2C_REG_BRIDGE_STATUS);
548 if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
549 status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
550 else
551 status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
552 mv64xxx_i2c_fsm(drv_data, status);
553 mv64xxx_i2c_do_action(drv_data);
554 rc = IRQ_HANDLED;
555 }
556 }
557 while (readl(drv_data->reg_base + drv_data->reg_offsets.control) & 508 while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
558 MV64XXX_I2C_REG_CONTROL_IFLG) { 509 MV64XXX_I2C_REG_CONTROL_IFLG) {
559 status = readl(drv_data->reg_base + drv_data->reg_offsets.status); 510 status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
@@ -635,6 +586,117 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
635 return drv_data->rc; 586 return drv_data->rc;
636} 587}
637 588
589static void
590mv64xxx_i2c_prepare_tx(struct mv64xxx_i2c_data *drv_data)
591{
592 struct i2c_msg *msg = drv_data->msgs;
593 u32 buf[2];
594
595 memcpy(buf, msg->buf, msg->len);
596
597 writel(buf[0], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
598 writel(buf[1], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
599}
600
601static int
602mv64xxx_i2c_offload_xfer(struct mv64xxx_i2c_data *drv_data)
603{
604 struct i2c_msg *msgs = drv_data->msgs;
605 int num = drv_data->num_msgs;
606 unsigned long ctrl_reg;
607 unsigned long flags;
608
609 spin_lock_irqsave(&drv_data->lock, flags);
610
611 /* Build transaction */
612 ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
613 (msgs[0].addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
614
615 if (msgs[0].flags & I2C_M_TEN)
616 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
617
618 /* Single write message transaction */
619 if (num == 1 && !(msgs[0].flags & I2C_M_RD)) {
620 size_t len = msgs[0].len - 1;
621
622 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
623 (len << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT);
624 mv64xxx_i2c_prepare_tx(drv_data);
625 }
626 /* Single read message transaction */
627 else if (num == 1 && msgs[0].flags & I2C_M_RD) {
628 size_t len = msgs[0].len - 1;
629
630 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
631 (len << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT);
632 }
633 /*
634 * Transaction with one write and one read message. This is
635 * guaranteed by the mv64xx_i2c_can_offload() checks.
636 */
637 else if (num == 2) {
638 size_t lentx = msgs[0].len - 1;
639 size_t lenrx = msgs[1].len - 1;
640
641 ctrl_reg |=
642 MV64XXX_I2C_BRIDGE_CONTROL_RD |
643 MV64XXX_I2C_BRIDGE_CONTROL_WR |
644 (lentx << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT) |
645 (lenrx << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT) |
646 MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START;
647 mv64xxx_i2c_prepare_tx(drv_data);
648 }
649
650 /* Execute transaction */
651 drv_data->block = 1;
652 writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
653 spin_unlock_irqrestore(&drv_data->lock, flags);
654
655 mv64xxx_i2c_wait_for_completion(drv_data);
656
657 return drv_data->rc;
658}
659
660static bool
661mv64xxx_i2c_valid_offload_sz(struct i2c_msg *msg)
662{
663 return msg->len <= 8 && msg->len >= 1;
664}
665
666static bool
667mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
668{
669 struct i2c_msg *msgs = drv_data->msgs;
670 int num = drv_data->num_msgs;
671
672 return false;
673
674 if (!drv_data->offload_enabled)
675 return false;
676
677 /*
678 * We can offload a transaction consisting of a single
679 * message, as long as the message has a length between 1 and
680 * 8 bytes.
681 */
682 if (num == 1 && mv64xxx_i2c_valid_offload_sz(msgs))
683 return true;
684
685 /*
686 * We can offload a transaction consisting of two messages, if
687 * the first is a write and a second is a read, and both have
688 * a length between 1 and 8 bytes.
689 */
690 if (num == 2 &&
691 mv64xxx_i2c_valid_offload_sz(msgs) &&
692 mv64xxx_i2c_valid_offload_sz(msgs + 1) &&
693 !(msgs[0].flags & I2C_M_RD) &&
694 msgs[1].flags & I2C_M_RD)
695 return true;
696
697 return false;
698}
699
638/* 700/*
639 ***************************************************************************** 701 *****************************************************************************
640 * 702 *
@@ -658,7 +720,11 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
658 drv_data->msgs = msgs; 720 drv_data->msgs = msgs;
659 drv_data->num_msgs = num; 721 drv_data->num_msgs = num;
660 722
661 rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1); 723 if (mv64xxx_i2c_can_offload(drv_data))
724 rc = mv64xxx_i2c_offload_xfer(drv_data);
725 else
726 rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
727
662 if (rc < 0) 728 if (rc < 0)
663 ret = rc; 729 ret = rc;
664 730
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index d7efaf44868b..440d5dbc8b5f 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -140,6 +140,7 @@ struct sh_mobile_i2c_data {
140 int sr; 140 int sr;
141 bool send_stop; 141 bool send_stop;
142 142
143 struct resource *res;
143 struct dma_chan *dma_tx; 144 struct dma_chan *dma_tx;
144 struct dma_chan *dma_rx; 145 struct dma_chan *dma_rx;
145 struct scatterlist sg; 146 struct scatterlist sg;
@@ -539,6 +540,42 @@ static void sh_mobile_i2c_dma_callback(void *data)
539 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 540 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
540} 541}
541 542
543static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
544 enum dma_transfer_direction dir, dma_addr_t port_addr)
545{
546 struct dma_chan *chan;
547 struct dma_slave_config cfg;
548 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
549 int ret;
550
551 chan = dma_request_slave_channel_reason(dev, chan_name);
552 if (IS_ERR(chan)) {
553 ret = PTR_ERR(chan);
554 dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
555 return chan;
556 }
557
558 memset(&cfg, 0, sizeof(cfg));
559 cfg.direction = dir;
560 if (dir == DMA_MEM_TO_DEV) {
561 cfg.dst_addr = port_addr;
562 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
563 } else {
564 cfg.src_addr = port_addr;
565 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
566 }
567
568 ret = dmaengine_slave_config(chan, &cfg);
569 if (ret) {
570 dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
571 dma_release_channel(chan);
572 return ERR_PTR(ret);
573 }
574
575 dev_dbg(dev, "got DMA channel for %s\n", chan_name);
576 return chan;
577}
578
542static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) 579static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
543{ 580{
544 bool read = pd->msg->flags & I2C_M_RD; 581 bool read = pd->msg->flags & I2C_M_RD;
@@ -548,7 +585,16 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
548 dma_addr_t dma_addr; 585 dma_addr_t dma_addr;
549 dma_cookie_t cookie; 586 dma_cookie_t cookie;
550 587
551 if (!chan) 588 if (PTR_ERR(chan) == -EPROBE_DEFER) {
589 if (read)
590 chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
591 pd->res->start + ICDR);
592 else
593 chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
594 pd->res->start + ICDR);
595 }
596
597 if (IS_ERR(chan))
552 return; 598 return;
553 599
554 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); 600 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
@@ -747,56 +793,16 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
747}; 793};
748MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids); 794MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
749 795
750static int sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir,
751 dma_addr_t port_addr, struct dma_chan **chan_ptr)
752{
753 struct dma_chan *chan;
754 struct dma_slave_config cfg;
755 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
756 int ret;
757
758 *chan_ptr = NULL;
759
760 chan = dma_request_slave_channel_reason(dev, chan_name);
761 if (IS_ERR(chan)) {
762 ret = PTR_ERR(chan);
763 dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
764 return ret;
765 }
766
767 memset(&cfg, 0, sizeof(cfg));
768 cfg.direction = dir;
769 if (dir == DMA_MEM_TO_DEV) {
770 cfg.dst_addr = port_addr;
771 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
772 } else {
773 cfg.src_addr = port_addr;
774 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
775 }
776
777 ret = dmaengine_slave_config(chan, &cfg);
778 if (ret) {
779 dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
780 dma_release_channel(chan);
781 return ret;
782 }
783
784 *chan_ptr = chan;
785
786 dev_dbg(dev, "got DMA channel for %s\n", chan_name);
787 return 0;
788}
789
790static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) 796static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
791{ 797{
792 if (pd->dma_tx) { 798 if (!IS_ERR(pd->dma_tx)) {
793 dma_release_channel(pd->dma_tx); 799 dma_release_channel(pd->dma_tx);
794 pd->dma_tx = NULL; 800 pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
795 } 801 }
796 802
797 if (pd->dma_rx) { 803 if (!IS_ERR(pd->dma_rx)) {
798 dma_release_channel(pd->dma_rx); 804 dma_release_channel(pd->dma_rx);
799 pd->dma_rx = NULL; 805 pd->dma_rx = ERR_PTR(-EPROBE_DEFER);
800 } 806 }
801} 807}
802 808
@@ -849,6 +855,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
849 855
850 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 856 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
851 857
858 pd->res = res;
852 pd->reg = devm_ioremap_resource(&dev->dev, res); 859 pd->reg = devm_ioremap_resource(&dev->dev, res);
853 if (IS_ERR(pd->reg)) 860 if (IS_ERR(pd->reg))
854 return PTR_ERR(pd->reg); 861 return PTR_ERR(pd->reg);
@@ -889,17 +896,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
889 /* Init DMA */ 896 /* Init DMA */
890 sg_init_table(&pd->sg, 1); 897 sg_init_table(&pd->sg, 1);
891 pd->dma_direction = DMA_NONE; 898 pd->dma_direction = DMA_NONE;
892 ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM, 899 pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
893 res->start + ICDR, &pd->dma_rx);
894 if (ret == -EPROBE_DEFER)
895 return ret;
896
897 ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
898 res->start + ICDR, &pd->dma_tx);
899 if (ret == -EPROBE_DEFER) {
900 sh_mobile_i2c_release_dma(pd);
901 return ret;
902 }
903 900
904 /* Enable Runtime PM for this device. 901 /* Enable Runtime PM for this device.
905 * 902 *
@@ -937,8 +934,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
937 return ret; 934 return ret;
938 } 935 }
939 936
940 dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz, DMA=%c\n", 937 dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed);
941 adap->nr, pd->bus_speed, (pd->dma_rx || pd->dma_tx) ? 'y' : 'n');
942 938
943 return 0; 939 return 0;
944} 940}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 10641b7816f4..dafb3c531f96 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/in.h> 23#include <linux/in.h>
24#include <linux/in6.h> 24#include <linux/in6.h>
25#include <linux/llist.h>
26#include <rdma/ib_verbs.h> 25#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h> 26#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h> 27#include <target/target_core_base.h>
@@ -36,11 +35,17 @@
36#define ISERT_MAX_CONN 8 35#define ISERT_MAX_CONN 8
37#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
40
41int isert_debug_level = 0;
42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
39 44
40static DEFINE_MUTEX(device_list_mutex); 45static DEFINE_MUTEX(device_list_mutex);
41static LIST_HEAD(device_list); 46static LIST_HEAD(device_list);
42static struct workqueue_struct *isert_rx_wq;
43static struct workqueue_struct *isert_comp_wq; 47static struct workqueue_struct *isert_comp_wq;
48static struct workqueue_struct *isert_release_wq;
44 49
45static void 50static void
46isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr); 59 struct isert_rdma_wr *wr);
55static int 60static int
56isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 61isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
62static int
63isert_rdma_post_recvl(struct isert_conn *isert_conn);
64static int
65isert_rdma_accept(struct isert_conn *isert_conn);
66struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
67
68static inline bool
69isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
70{
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
73}
74
57 75
58static void 76static void
59isert_qp_event_callback(struct ib_event *e, void *context) 77isert_qp_event_callback(struct ib_event *e, void *context)
60{ 78{
61 struct isert_conn *isert_conn = (struct isert_conn *)context; 79 struct isert_conn *isert_conn = (struct isert_conn *)context;
62 80
63 pr_err("isert_qp_event_callback event: %d\n", e->event); 81 isert_err("conn %p event: %d\n", isert_conn, e->event);
64 switch (e->event) { 82 switch (e->event) {
65 case IB_EVENT_COMM_EST: 83 case IB_EVENT_COMM_EST:
66 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); 84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
67 break; 85 break;
68 case IB_EVENT_QP_LAST_WQE_REACHED: 86 case IB_EVENT_QP_LAST_WQE_REACHED:
69 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); 87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
70 break; 88 break;
71 default: 89 default:
72 break; 90 break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
80 98
81 ret = ib_query_device(ib_dev, devattr); 99 ret = ib_query_device(ib_dev, devattr);
82 if (ret) { 100 if (ret) {
83 pr_err("ib_query_device() failed: %d\n", ret); 101 isert_err("ib_query_device() failed: %d\n", ret);
84 return ret; 102 return ret;
85 } 103 }
86 pr_debug("devattr->max_sge: %d\n", devattr->max_sge); 104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
87 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); 105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
88 106
89 return 0; 107 return 0;
90} 108}
91 109
92static int 110static int
93isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, 111isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
94 u8 protection)
95{ 112{
96 struct isert_device *device = isert_conn->conn_device; 113 struct isert_device *device = isert_conn->conn_device;
97 struct ib_qp_init_attr attr; 114 struct ib_qp_init_attr attr;
98 int ret, index, min_index = 0; 115 struct isert_comp *comp;
116 int ret, i, min = 0;
99 117
100 mutex_lock(&device_list_mutex); 118 mutex_lock(&device_list_mutex);
101 for (index = 0; index < device->cqs_used; index++) 119 for (i = 0; i < device->comps_used; i++)
102 if (device->cq_active_qps[index] < 120 if (device->comps[i].active_qps <
103 device->cq_active_qps[min_index]) 121 device->comps[min].active_qps)
104 min_index = index; 122 min = i;
105 device->cq_active_qps[min_index]++; 123 comp = &device->comps[min];
106 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); 124 comp->active_qps++;
125 isert_info("conn %p, using comp %p min_index: %d\n",
126 isert_conn, comp, min);
107 mutex_unlock(&device_list_mutex); 127 mutex_unlock(&device_list_mutex);
108 128
109 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
110 attr.event_handler = isert_qp_event_callback; 130 attr.event_handler = isert_qp_event_callback;
111 attr.qp_context = isert_conn; 131 attr.qp_context = isert_conn;
112 attr.send_cq = device->dev_tx_cq[min_index]; 132 attr.send_cq = comp->cq;
113 attr.recv_cq = device->dev_rx_cq[min_index]; 133 attr.recv_cq = comp->cq;
114 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
116 /* 136 /*
117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
118 * work-around for RDMA_READs with ConnectX-2. 138 * work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
126 attr.cap.max_recv_sge = 1; 146 attr.cap.max_recv_sge = 1;
127 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
128 attr.qp_type = IB_QPT_RC; 148 attr.qp_type = IB_QPT_RC;
129 if (protection) 149 if (device->pi_capable)
130 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
131 151
132 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
133 cma_id->device);
134 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
135 isert_conn->conn_pd->device);
136
137 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); 152 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
138 if (ret) { 153 if (ret) {
139 pr_err("rdma_create_qp failed for cma_id %d\n", ret); 154 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
140 return ret; 155 goto err;
141 } 156 }
142 isert_conn->conn_qp = cma_id->qp; 157 isert_conn->conn_qp = cma_id->qp;
143 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
144 158
145 return 0; 159 return 0;
160err:
161 mutex_lock(&device_list_mutex);
162 comp->active_qps--;
163 mutex_unlock(&device_list_mutex);
164
165 return ret;
146} 166}
147 167
148static void 168static void
149isert_cq_event_callback(struct ib_event *e, void *context) 169isert_cq_event_callback(struct ib_event *e, void *context)
150{ 170{
151 pr_debug("isert_cq_event_callback event: %d\n", e->event); 171 isert_dbg("event: %d\n", e->event);
152} 172}
153 173
154static int 174static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
182 } 202 }
183 203
184 isert_conn->conn_rx_desc_head = 0; 204 isert_conn->conn_rx_desc_head = 0;
205
185 return 0; 206 return 0;
186 207
187dma_map_fail: 208dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
193 kfree(isert_conn->conn_rx_descs); 214 kfree(isert_conn->conn_rx_descs);
194 isert_conn->conn_rx_descs = NULL; 215 isert_conn->conn_rx_descs = NULL;
195fail: 216fail:
217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
218
196 return -ENOMEM; 219 return -ENOMEM;
197} 220}
198 221
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
216 isert_conn->conn_rx_descs = NULL; 239 isert_conn->conn_rx_descs = NULL;
217} 240}
218 241
219static void isert_cq_tx_work(struct work_struct *); 242static void isert_cq_work(struct work_struct *);
220static void isert_cq_tx_callback(struct ib_cq *, void *); 243static void isert_cq_callback(struct ib_cq *, void *);
221static void isert_cq_rx_work(struct work_struct *);
222static void isert_cq_rx_callback(struct ib_cq *, void *);
223 244
224static int 245static int
225isert_create_device_ib_res(struct isert_device *device) 246isert_create_device_ib_res(struct isert_device *device)
226{ 247{
227 struct ib_device *ib_dev = device->ib_device; 248 struct ib_device *ib_dev = device->ib_device;
228 struct isert_cq_desc *cq_desc;
229 struct ib_device_attr *dev_attr; 249 struct ib_device_attr *dev_attr;
230 int ret = 0, i, j; 250 int ret = 0, i;
231 int max_rx_cqe, max_tx_cqe; 251 int max_cqe;
232 252
233 dev_attr = &device->dev_attr; 253 dev_attr = &device->dev_attr;
234 ret = isert_query_device(ib_dev, dev_attr); 254 ret = isert_query_device(ib_dev, dev_attr);
235 if (ret) 255 if (ret)
236 return ret; 256 return ret;
237 257
238 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); 258 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
239 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
240 259
241 /* asign function handlers */ 260 /* asign function handlers */
242 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 261 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
254 device->pi_capable = dev_attr->device_cap_flags & 273 device->pi_capable = dev_attr->device_cap_flags &
255 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 274 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
256 275
257 device->cqs_used = min_t(int, num_online_cpus(), 276 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
258 device->ib_device->num_comp_vectors); 277 device->ib_device->num_comp_vectors));
259 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 278 isert_info("Using %d CQs, %s supports %d vectors support "
260 pr_debug("Using %d CQs, device %s supports %d vectors support " 279 "Fast registration %d pi_capable %d\n",
261 "Fast registration %d pi_capable %d\n", 280 device->comps_used, device->ib_device->name,
262 device->cqs_used, device->ib_device->name, 281 device->ib_device->num_comp_vectors, device->use_fastreg,
263 device->ib_device->num_comp_vectors, device->use_fastreg, 282 device->pi_capable);
264 device->pi_capable); 283
265 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 284 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
266 device->cqs_used, GFP_KERNEL); 285 GFP_KERNEL);
267 if (!device->cq_desc) { 286 if (!device->comps) {
268 pr_err("Unable to allocate device->cq_desc\n"); 287 isert_err("Unable to allocate completion contexts\n");
269 return -ENOMEM; 288 return -ENOMEM;
270 } 289 }
271 cq_desc = device->cq_desc;
272
273 for (i = 0; i < device->cqs_used; i++) {
274 cq_desc[i].device = device;
275 cq_desc[i].cq_index = i;
276
277 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
278 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
279 isert_cq_rx_callback,
280 isert_cq_event_callback,
281 (void *)&cq_desc[i],
282 max_rx_cqe, i);
283 if (IS_ERR(device->dev_rx_cq[i])) {
284 ret = PTR_ERR(device->dev_rx_cq[i]);
285 device->dev_rx_cq[i] = NULL;
286 goto out_cq;
287 }
288 290
289 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); 291 for (i = 0; i < device->comps_used; i++) {
290 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 292 struct isert_comp *comp = &device->comps[i];
291 isert_cq_tx_callback,
292 isert_cq_event_callback,
293 (void *)&cq_desc[i],
294 max_tx_cqe, i);
295 if (IS_ERR(device->dev_tx_cq[i])) {
296 ret = PTR_ERR(device->dev_tx_cq[i]);
297 device->dev_tx_cq[i] = NULL;
298 goto out_cq;
299 }
300 293
301 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); 294 comp->device = device;
302 if (ret) 295 INIT_WORK(&comp->work, isert_cq_work);
296 comp->cq = ib_create_cq(device->ib_device,
297 isert_cq_callback,
298 isert_cq_event_callback,
299 (void *)comp,
300 max_cqe, i);
301 if (IS_ERR(comp->cq)) {
302 ret = PTR_ERR(comp->cq);
303 comp->cq = NULL;
303 goto out_cq; 304 goto out_cq;
305 }
304 306
305 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); 307 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
306 if (ret) 308 if (ret)
307 goto out_cq; 309 goto out_cq;
308 } 310 }
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
310 return 0; 312 return 0;
311 313
312out_cq: 314out_cq:
313 for (j = 0; j < i; j++) { 315 for (i = 0; i < device->comps_used; i++) {
314 cq_desc = &device->cq_desc[j]; 316 struct isert_comp *comp = &device->comps[i];
315 317
316 if (device->dev_rx_cq[j]) { 318 if (comp->cq) {
317 cancel_work_sync(&cq_desc->cq_rx_work); 319 cancel_work_sync(&comp->work);
318 ib_destroy_cq(device->dev_rx_cq[j]); 320 ib_destroy_cq(comp->cq);
319 }
320 if (device->dev_tx_cq[j]) {
321 cancel_work_sync(&cq_desc->cq_tx_work);
322 ib_destroy_cq(device->dev_tx_cq[j]);
323 } 321 }
324 } 322 }
325 kfree(device->cq_desc); 323 kfree(device->comps);
326 324
327 return ret; 325 return ret;
328} 326}
@@ -330,21 +328,18 @@ out_cq:
330static void 328static void
331isert_free_device_ib_res(struct isert_device *device) 329isert_free_device_ib_res(struct isert_device *device)
332{ 330{
333 struct isert_cq_desc *cq_desc;
334 int i; 331 int i;
335 332
336 for (i = 0; i < device->cqs_used; i++) { 333 isert_info("device %p\n", device);
337 cq_desc = &device->cq_desc[i];
338 334
339 cancel_work_sync(&cq_desc->cq_rx_work); 335 for (i = 0; i < device->comps_used; i++) {
340 cancel_work_sync(&cq_desc->cq_tx_work); 336 struct isert_comp *comp = &device->comps[i];
341 ib_destroy_cq(device->dev_rx_cq[i]);
342 ib_destroy_cq(device->dev_tx_cq[i]);
343 device->dev_rx_cq[i] = NULL;
344 device->dev_tx_cq[i] = NULL;
345 }
346 337
347 kfree(device->cq_desc); 338 cancel_work_sync(&comp->work);
339 ib_destroy_cq(comp->cq);
340 comp->cq = NULL;
341 }
342 kfree(device->comps);
348} 343}
349 344
350static void 345static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
352{ 347{
353 mutex_lock(&device_list_mutex); 348 mutex_lock(&device_list_mutex);
354 device->refcount--; 349 device->refcount--;
350 isert_info("device %p refcount %d\n", device, device->refcount);
355 if (!device->refcount) { 351 if (!device->refcount) {
356 isert_free_device_ib_res(device); 352 isert_free_device_ib_res(device);
357 list_del(&device->dev_node); 353 list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
370 list_for_each_entry(device, &device_list, dev_node) { 366 list_for_each_entry(device, &device_list, dev_node) {
371 if (device->ib_device->node_guid == cma_id->device->node_guid) { 367 if (device->ib_device->node_guid == cma_id->device->node_guid) {
372 device->refcount++; 368 device->refcount++;
369 isert_info("Found iser device %p refcount %d\n",
370 device, device->refcount);
373 mutex_unlock(&device_list_mutex); 371 mutex_unlock(&device_list_mutex);
374 return device; 372 return device;
375 } 373 }
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
393 391
394 device->refcount++; 392 device->refcount++;
395 list_add_tail(&device->dev_node, &device_list); 393 list_add_tail(&device->dev_node, &device_list);
394 isert_info("Created a new iser device %p refcount %d\n",
395 device, device->refcount);
396 mutex_unlock(&device_list_mutex); 396 mutex_unlock(&device_list_mutex);
397 397
398 return device; 398 return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
407 if (list_empty(&isert_conn->conn_fr_pool)) 407 if (list_empty(&isert_conn->conn_fr_pool))
408 return; 408 return;
409 409
410 pr_debug("Freeing conn %p fastreg pool", isert_conn); 410 isert_info("Freeing conn %p fastreg pool", isert_conn);
411 411
412 list_for_each_entry_safe(fr_desc, tmp, 412 list_for_each_entry_safe(fr_desc, tmp,
413 &isert_conn->conn_fr_pool, list) { 413 &isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
425 } 425 }
426 426
427 if (i < isert_conn->conn_fr_pool_size) 427 if (i < isert_conn->conn_fr_pool_size)
428 pr_warn("Pool still has %d regions registered\n", 428 isert_warn("Pool still has %d regions registered\n",
429 isert_conn->conn_fr_pool_size - i); 429 isert_conn->conn_fr_pool_size - i);
430} 430}
431 431
432static int 432static int
433isert_create_pi_ctx(struct fast_reg_descriptor *desc,
434 struct ib_device *device,
435 struct ib_pd *pd)
436{
437 struct ib_mr_init_attr mr_init_attr;
438 struct pi_context *pi_ctx;
439 int ret;
440
441 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
442 if (!pi_ctx) {
443 isert_err("Failed to allocate pi context\n");
444 return -ENOMEM;
445 }
446
447 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
448 ISCSI_ISER_SG_TABLESIZE);
449 if (IS_ERR(pi_ctx->prot_frpl)) {
450 isert_err("Failed to allocate prot frpl err=%ld\n",
451 PTR_ERR(pi_ctx->prot_frpl));
452 ret = PTR_ERR(pi_ctx->prot_frpl);
453 goto err_pi_ctx;
454 }
455
456 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
457 if (IS_ERR(pi_ctx->prot_mr)) {
458 isert_err("Failed to allocate prot frmr err=%ld\n",
459 PTR_ERR(pi_ctx->prot_mr));
460 ret = PTR_ERR(pi_ctx->prot_mr);
461 goto err_prot_frpl;
462 }
463 desc->ind |= ISERT_PROT_KEY_VALID;
464
465 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
466 mr_init_attr.max_reg_descriptors = 2;
467 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
468 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
469 if (IS_ERR(pi_ctx->sig_mr)) {
470 isert_err("Failed to allocate signature enabled mr err=%ld\n",
471 PTR_ERR(pi_ctx->sig_mr));
472 ret = PTR_ERR(pi_ctx->sig_mr);
473 goto err_prot_mr;
474 }
475
476 desc->pi_ctx = pi_ctx;
477 desc->ind |= ISERT_SIG_KEY_VALID;
478 desc->ind &= ~ISERT_PROTECTED;
479
480 return 0;
481
482err_prot_mr:
483 ib_dereg_mr(desc->pi_ctx->prot_mr);
484err_prot_frpl:
485 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
486err_pi_ctx:
487 kfree(desc->pi_ctx);
488
489 return ret;
490}
491
492static int
433isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 493isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
434 struct fast_reg_descriptor *fr_desc, u8 protection) 494 struct fast_reg_descriptor *fr_desc)
435{ 495{
436 int ret; 496 int ret;
437 497
438 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 498 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
439 ISCSI_ISER_SG_TABLESIZE); 499 ISCSI_ISER_SG_TABLESIZE);
440 if (IS_ERR(fr_desc->data_frpl)) { 500 if (IS_ERR(fr_desc->data_frpl)) {
441 pr_err("Failed to allocate data frpl err=%ld\n", 501 isert_err("Failed to allocate data frpl err=%ld\n",
442 PTR_ERR(fr_desc->data_frpl)); 502 PTR_ERR(fr_desc->data_frpl));
443 return PTR_ERR(fr_desc->data_frpl); 503 return PTR_ERR(fr_desc->data_frpl);
444 } 504 }
445 505
446 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); 506 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
447 if (IS_ERR(fr_desc->data_mr)) { 507 if (IS_ERR(fr_desc->data_mr)) {
448 pr_err("Failed to allocate data frmr err=%ld\n", 508 isert_err("Failed to allocate data frmr err=%ld\n",
449 PTR_ERR(fr_desc->data_mr)); 509 PTR_ERR(fr_desc->data_mr));
450 ret = PTR_ERR(fr_desc->data_mr); 510 ret = PTR_ERR(fr_desc->data_mr);
451 goto err_data_frpl; 511 goto err_data_frpl;
452 } 512 }
453 pr_debug("Create fr_desc %p page_list %p\n",
454 fr_desc, fr_desc->data_frpl->page_list);
455 fr_desc->ind |= ISERT_DATA_KEY_VALID; 513 fr_desc->ind |= ISERT_DATA_KEY_VALID;
456 514
457 if (protection) { 515 isert_dbg("Created fr_desc %p\n", fr_desc);
458 struct ib_mr_init_attr mr_init_attr = {0};
459 struct pi_context *pi_ctx;
460
461 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
462 if (!fr_desc->pi_ctx) {
463 pr_err("Failed to allocate pi context\n");
464 ret = -ENOMEM;
465 goto err_data_mr;
466 }
467 pi_ctx = fr_desc->pi_ctx;
468
469 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
470 ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_frpl)) {
472 pr_err("Failed to allocate prot frpl err=%ld\n",
473 PTR_ERR(pi_ctx->prot_frpl));
474 ret = PTR_ERR(pi_ctx->prot_frpl);
475 goto err_pi_ctx;
476 }
477
478 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
479 if (IS_ERR(pi_ctx->prot_mr)) {
480 pr_err("Failed to allocate prot frmr err=%ld\n",
481 PTR_ERR(pi_ctx->prot_mr));
482 ret = PTR_ERR(pi_ctx->prot_mr);
483 goto err_prot_frpl;
484 }
485 fr_desc->ind |= ISERT_PROT_KEY_VALID;
486
487 mr_init_attr.max_reg_descriptors = 2;
488 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
489 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
490 if (IS_ERR(pi_ctx->sig_mr)) {
491 pr_err("Failed to allocate signature enabled mr err=%ld\n",
492 PTR_ERR(pi_ctx->sig_mr));
493 ret = PTR_ERR(pi_ctx->sig_mr);
494 goto err_prot_mr;
495 }
496 fr_desc->ind |= ISERT_SIG_KEY_VALID;
497 }
498 fr_desc->ind &= ~ISERT_PROTECTED;
499 516
500 return 0; 517 return 0;
501err_prot_mr: 518
502 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
503err_prot_frpl:
504 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
505err_pi_ctx:
506 kfree(fr_desc->pi_ctx);
507err_data_mr:
508 ib_dereg_mr(fr_desc->data_mr);
509err_data_frpl: 519err_data_frpl:
510 ib_free_fast_reg_page_list(fr_desc->data_frpl); 520 ib_free_fast_reg_page_list(fr_desc->data_frpl);
511 521
@@ -513,7 +523,7 @@ err_data_frpl:
513} 523}
514 524
515static int 525static int
516isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) 526isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
517{ 527{
518 struct fast_reg_descriptor *fr_desc; 528 struct fast_reg_descriptor *fr_desc;
519 struct isert_device *device = isert_conn->conn_device; 529 struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
531 for (i = 0; i < tag_num; i++) { 541 for (i = 0; i < tag_num; i++) {
532 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 542 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
533 if (!fr_desc) { 543 if (!fr_desc) {
534 pr_err("Failed to allocate fast_reg descriptor\n"); 544 isert_err("Failed to allocate fast_reg descriptor\n");
535 ret = -ENOMEM; 545 ret = -ENOMEM;
536 goto err; 546 goto err;
537 } 547 }
538 548
539 ret = isert_create_fr_desc(device->ib_device, 549 ret = isert_create_fr_desc(device->ib_device,
540 isert_conn->conn_pd, fr_desc, 550 isert_conn->conn_pd, fr_desc);
541 pi_support);
542 if (ret) { 551 if (ret) {
543 pr_err("Failed to create fastreg descriptor err=%d\n", 552 isert_err("Failed to create fastreg descriptor err=%d\n",
544 ret); 553 ret);
545 kfree(fr_desc); 554 kfree(fr_desc);
546 goto err; 555 goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
550 isert_conn->conn_fr_pool_size++; 559 isert_conn->conn_fr_pool_size++;
551 } 560 }
552 561
553 pr_debug("Creating conn %p fastreg pool size=%d", 562 isert_dbg("Creating conn %p fastreg pool size=%d",
554 isert_conn, isert_conn->conn_fr_pool_size); 563 isert_conn, isert_conn->conn_fr_pool_size);
555 564
556 return 0; 565 return 0;
@@ -563,47 +572,45 @@ err:
563static int 572static int
564isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 573isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
565{ 574{
566 struct iscsi_np *np = cma_id->context; 575 struct isert_np *isert_np = cma_id->context;
567 struct isert_np *isert_np = np->np_context; 576 struct iscsi_np *np = isert_np->np;
568 struct isert_conn *isert_conn; 577 struct isert_conn *isert_conn;
569 struct isert_device *device; 578 struct isert_device *device;
570 struct ib_device *ib_dev = cma_id->device; 579 struct ib_device *ib_dev = cma_id->device;
571 int ret = 0; 580 int ret = 0;
572 u8 pi_support;
573 581
574 spin_lock_bh(&np->np_thread_lock); 582 spin_lock_bh(&np->np_thread_lock);
575 if (!np->enabled) { 583 if (!np->enabled) {
576 spin_unlock_bh(&np->np_thread_lock); 584 spin_unlock_bh(&np->np_thread_lock);
577 pr_debug("iscsi_np is not enabled, reject connect request\n"); 585 isert_dbg("iscsi_np is not enabled, reject connect request\n");
578 return rdma_reject(cma_id, NULL, 0); 586 return rdma_reject(cma_id, NULL, 0);
579 } 587 }
580 spin_unlock_bh(&np->np_thread_lock); 588 spin_unlock_bh(&np->np_thread_lock);
581 589
582 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 590 isert_dbg("cma_id: %p, portal: %p\n",
583 cma_id, cma_id->context); 591 cma_id, cma_id->context);
584 592
585 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 593 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
586 if (!isert_conn) { 594 if (!isert_conn) {
587 pr_err("Unable to allocate isert_conn\n"); 595 isert_err("Unable to allocate isert_conn\n");
588 return -ENOMEM; 596 return -ENOMEM;
589 } 597 }
590 isert_conn->state = ISER_CONN_INIT; 598 isert_conn->state = ISER_CONN_INIT;
591 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 599 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
592 init_completion(&isert_conn->conn_login_comp); 600 init_completion(&isert_conn->conn_login_comp);
601 init_completion(&isert_conn->login_req_comp);
593 init_completion(&isert_conn->conn_wait); 602 init_completion(&isert_conn->conn_wait);
594 init_completion(&isert_conn->conn_wait_comp_err);
595 kref_init(&isert_conn->conn_kref); 603 kref_init(&isert_conn->conn_kref);
596 mutex_init(&isert_conn->conn_mutex); 604 mutex_init(&isert_conn->conn_mutex);
597 spin_lock_init(&isert_conn->conn_lock); 605 spin_lock_init(&isert_conn->conn_lock);
598 INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 606 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
599 607
600 cma_id->context = isert_conn;
601 isert_conn->conn_cm_id = cma_id; 608 isert_conn->conn_cm_id = cma_id;
602 609
603 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 610 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
604 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 611 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
605 if (!isert_conn->login_buf) { 612 if (!isert_conn->login_buf) {
606 pr_err("Unable to allocate isert_conn->login_buf\n"); 613 isert_err("Unable to allocate isert_conn->login_buf\n");
607 ret = -ENOMEM; 614 ret = -ENOMEM;
608 goto out; 615 goto out;
609 } 616 }
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
611 isert_conn->login_req_buf = isert_conn->login_buf; 618 isert_conn->login_req_buf = isert_conn->login_buf;
612 isert_conn->login_rsp_buf = isert_conn->login_buf + 619 isert_conn->login_rsp_buf = isert_conn->login_buf +
613 ISCSI_DEF_MAX_RECV_SEG_LEN; 620 ISCSI_DEF_MAX_RECV_SEG_LEN;
614 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 621 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
615 isert_conn->login_buf, isert_conn->login_req_buf, 622 isert_conn->login_buf, isert_conn->login_req_buf,
616 isert_conn->login_rsp_buf); 623 isert_conn->login_rsp_buf);
617 624
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
621 628
622 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 629 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
623 if (ret) { 630 if (ret) {
624 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", 631 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
625 ret); 632 ret);
626 isert_conn->login_req_dma = 0; 633 isert_conn->login_req_dma = 0;
627 goto out_login_buf; 634 goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
633 640
634 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 641 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
635 if (ret) { 642 if (ret) {
636 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", 643 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
637 ret); 644 ret);
638 isert_conn->login_rsp_dma = 0; 645 isert_conn->login_rsp_dma = 0;
639 goto out_req_dma_map; 646 goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
649 isert_conn->initiator_depth = min_t(u8, 656 isert_conn->initiator_depth = min_t(u8,
650 event->param.conn.initiator_depth, 657 event->param.conn.initiator_depth,
651 device->dev_attr.max_qp_init_rd_atom); 658 device->dev_attr.max_qp_init_rd_atom);
652 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); 659 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
653 660
654 isert_conn->conn_device = device; 661 isert_conn->conn_device = device;
655 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 662 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
656 if (IS_ERR(isert_conn->conn_pd)) { 663 if (IS_ERR(isert_conn->conn_pd)) {
657 ret = PTR_ERR(isert_conn->conn_pd); 664 ret = PTR_ERR(isert_conn->conn_pd);
658 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", 665 isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
659 isert_conn, ret); 666 isert_conn, ret);
660 goto out_pd; 667 goto out_pd;
661 } 668 }
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
664 IB_ACCESS_LOCAL_WRITE); 671 IB_ACCESS_LOCAL_WRITE);
665 if (IS_ERR(isert_conn->conn_mr)) { 672 if (IS_ERR(isert_conn->conn_mr)) {
666 ret = PTR_ERR(isert_conn->conn_mr); 673 ret = PTR_ERR(isert_conn->conn_mr);
667 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", 674 isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
668 isert_conn, ret); 675 isert_conn, ret);
669 goto out_mr; 676 goto out_mr;
670 } 677 }
671 678
672 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 679 ret = isert_conn_setup_qp(isert_conn, cma_id);
673 if (pi_support && !device->pi_capable) { 680 if (ret)
674 pr_err("Protection information requested but not supported, " 681 goto out_conn_dev;
675 "rejecting connect request\n");
676 ret = rdma_reject(cma_id, NULL, 0);
677 goto out_mr;
678 }
679 682
680 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); 683 ret = isert_rdma_post_recvl(isert_conn);
684 if (ret)
685 goto out_conn_dev;
686
687 ret = isert_rdma_accept(isert_conn);
681 if (ret) 688 if (ret)
682 goto out_conn_dev; 689 goto out_conn_dev;
683 690
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
685 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); 692 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
686 mutex_unlock(&isert_np->np_accept_mutex); 693 mutex_unlock(&isert_np->np_accept_mutex);
687 694
688 pr_debug("isert_connect_request() up np_sem np: %p\n", np); 695 isert_info("np %p: Allow accept_np to continue\n", np);
689 up(&isert_np->np_sem); 696 up(&isert_np->np_sem);
690 return 0; 697 return 0;
691 698
@@ -705,6 +712,7 @@ out_login_buf:
705 kfree(isert_conn->login_buf); 712 kfree(isert_conn->login_buf);
706out: 713out:
707 kfree(isert_conn); 714 kfree(isert_conn);
715 rdma_reject(cma_id, NULL, 0);
708 return ret; 716 return ret;
709} 717}
710 718
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
713{ 721{
714 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 722 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
715 struct isert_device *device = isert_conn->conn_device; 723 struct isert_device *device = isert_conn->conn_device;
716 int cq_index;
717 724
718 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 725 isert_dbg("conn %p\n", isert_conn);
719 726
720 if (device && device->use_fastreg) 727 if (device && device->use_fastreg)
721 isert_conn_free_fastreg_pool(isert_conn); 728 isert_conn_free_fastreg_pool(isert_conn);
722 729
730 isert_free_rx_descriptors(isert_conn);
731 rdma_destroy_id(isert_conn->conn_cm_id);
732
723 if (isert_conn->conn_qp) { 733 if (isert_conn->conn_qp) {
724 cq_index = ((struct isert_cq_desc *) 734 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
725 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
726 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
727 isert_conn->conn_device->cq_active_qps[cq_index]--;
728 735
729 rdma_destroy_qp(isert_conn->conn_cm_id); 736 isert_dbg("dec completion context %p active_qps\n", comp);
730 } 737 mutex_lock(&device_list_mutex);
738 comp->active_qps--;
739 mutex_unlock(&device_list_mutex);
731 740
732 isert_free_rx_descriptors(isert_conn); 741 ib_destroy_qp(isert_conn->conn_qp);
733 rdma_destroy_id(isert_conn->conn_cm_id); 742 }
734 743
735 ib_dereg_mr(isert_conn->conn_mr); 744 ib_dereg_mr(isert_conn->conn_mr);
736 ib_dealloc_pd(isert_conn->conn_pd); 745 ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
747 756
748 if (device) 757 if (device)
749 isert_device_try_release(device); 758 isert_device_try_release(device);
750
751 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
752} 759}
753 760
754static void 761static void
755isert_connected_handler(struct rdma_cm_id *cma_id) 762isert_connected_handler(struct rdma_cm_id *cma_id)
756{ 763{
757 struct isert_conn *isert_conn = cma_id->context; 764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
758 765
759 kref_get(&isert_conn->conn_kref); 766 isert_info("conn %p\n", isert_conn);
767
768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
769 isert_warn("conn %p connect_release is running\n", isert_conn);
770 return;
771 }
772
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
760} 777}
761 778
762static void 779static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
765 struct isert_conn *isert_conn = container_of(kref, 782 struct isert_conn *isert_conn = container_of(kref,
766 struct isert_conn, conn_kref); 783 struct isert_conn, conn_kref);
767 784
768 pr_debug("Calling isert_connect_release for final kref %s/%d\n", 785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
769 current->comm, current->pid); 786 current->pid);
770 787
771 isert_connect_release(isert_conn); 788 isert_connect_release(isert_conn);
772} 789}
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
777 kref_put(&isert_conn->conn_kref, isert_release_conn_kref); 794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
778} 795}
779 796
797/**
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
800 *
801 * Notes:
802 * In case the connection state is FULL_FEATURE, move state
803 * to TEMINATING and start teardown sequence (rdma_disconnect).
804 * In case the connection state is UP, complete flush as well.
805 *
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
808 */
780static void 809static void
781isert_disconnect_work(struct work_struct *work) 810isert_conn_terminate(struct isert_conn *isert_conn)
782{ 811{
783 struct isert_conn *isert_conn = container_of(work, 812 int err;
784 struct isert_conn, conn_logout_work);
785 813
786 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 814 switch (isert_conn->state) {
787 mutex_lock(&isert_conn->conn_mutex); 815 case ISER_CONN_TERMINATING:
788 if (isert_conn->state == ISER_CONN_UP) 816 break;
817 case ISER_CONN_UP:
818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
819 isert_info("Terminating conn %p state %d\n",
820 isert_conn, isert_conn->state);
789 isert_conn->state = ISER_CONN_TERMINATING; 821 isert_conn->state = ISER_CONN_TERMINATING;
790 822 err = rdma_disconnect(isert_conn->conn_cm_id);
791 if (isert_conn->post_recv_buf_count == 0 && 823 if (err)
792 atomic_read(&isert_conn->post_send_buf_count) == 0) { 824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
793 mutex_unlock(&isert_conn->conn_mutex); 825 isert_conn);
794 goto wake_up; 826 break;
795 } 827 default:
796 if (!isert_conn->conn_cm_id) { 828 isert_warn("conn %p teminating in state %d\n",
797 mutex_unlock(&isert_conn->conn_mutex); 829 isert_conn, isert_conn->state);
798 isert_put_conn(isert_conn);
799 return;
800 } 830 }
831}
801 832
802 if (isert_conn->disconnect) { 833static int
803 /* Send DREQ/DREP towards our initiator */ 834isert_np_cma_handler(struct isert_np *isert_np,
804 rdma_disconnect(isert_conn->conn_cm_id); 835 enum rdma_cm_event_type event)
805 } 836{
837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
806 838
807 mutex_unlock(&isert_conn->conn_mutex); 839 switch (event) {
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
842 break;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
848 isert_np->np_cm_id = NULL;
849 }
850 break;
851 default:
852 isert_err("isert np %p Unexpected event %d\n",
853 isert_np, event);
854 }
808 855
809wake_up: 856 return -1;
810 complete(&isert_conn->conn_wait);
811} 857}
812 858
813static int 859static int
814isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) 860isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
815{ 862{
863 struct isert_np *isert_np = cma_id->context;
816 struct isert_conn *isert_conn; 864 struct isert_conn *isert_conn;
817 865
818 if (!cma_id->qp) { 866 if (isert_np->np_cm_id == cma_id)
819 struct isert_np *isert_np = cma_id->context; 867 return isert_np_cma_handler(cma_id->context, event);
820 868
821 isert_np->np_cm_id = NULL; 869 isert_conn = cma_id->qp->qp_context;
822 return -1;
823 }
824 870
825 isert_conn = (struct isert_conn *)cma_id->context; 871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
826 874
827 isert_conn->disconnect = disconnect; 875 isert_info("conn %p completing conn_wait\n", isert_conn);
828 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 876 complete(&isert_conn->conn_wait);
829 schedule_work(&isert_conn->conn_logout_work);
830 877
831 return 0; 878 return 0;
832} 879}
833 880
881static void
882isert_connect_error(struct rdma_cm_id *cma_id)
883{
884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
885
886 isert_put_conn(isert_conn);
887}
888
834static int 889static int
835isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 890isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
836{ 891{
837 int ret = 0; 892 int ret = 0;
838 bool disconnect = false;
839 893
840 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", 894 isert_info("event %d status %d id %p np %p\n", event->event,
841 event->event, event->status, cma_id->context, cma_id); 895 event->status, cma_id, cma_id->context);
842 896
843 switch (event->event) { 897 switch (event->event) {
844 case RDMA_CM_EVENT_CONNECT_REQUEST: 898 case RDMA_CM_EVENT_CONNECT_REQUEST:
845 ret = isert_connect_request(cma_id, event); 899 ret = isert_connect_request(cma_id, event);
846 if (ret) 900 if (ret)
847 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", 901 isert_err("failed handle connect request %d\n", ret);
848 event->event, ret);
849 break; 902 break;
850 case RDMA_CM_EVENT_ESTABLISHED: 903 case RDMA_CM_EVENT_ESTABLISHED:
851 isert_connected_handler(cma_id); 904 isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
853 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 906 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
854 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 907 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
855 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 908 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
856 disconnect = true;
857 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 909 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
858 ret = isert_disconnected_handler(cma_id, disconnect); 910 ret = isert_disconnected_handler(cma_id, event->event);
859 break; 911 break;
912 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
913 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
860 case RDMA_CM_EVENT_CONNECT_ERROR: 914 case RDMA_CM_EVENT_CONNECT_ERROR:
915 isert_connect_error(cma_id);
916 break;
861 default: 917 default:
862 pr_err("Unhandled RDMA CMA event: %d\n", event->event); 918 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
863 break; 919 break;
864 } 920 }
865 921
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
876 932
877 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { 933 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
878 rx_desc = &isert_conn->conn_rx_descs[rx_head]; 934 rx_desc = &isert_conn->conn_rx_descs[rx_head];
879 rx_wr->wr_id = (unsigned long)rx_desc; 935 rx_wr->wr_id = (uintptr_t)rx_desc;
880 rx_wr->sg_list = &rx_desc->rx_sg; 936 rx_wr->sg_list = &rx_desc->rx_sg;
881 rx_wr->num_sge = 1; 937 rx_wr->num_sge = 1;
882 rx_wr->next = rx_wr + 1; 938 rx_wr->next = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
890 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, 946 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
891 &rx_wr_failed); 947 &rx_wr_failed);
892 if (ret) { 948 if (ret) {
893 pr_err("ib_post_recv() failed with ret: %d\n", ret); 949 isert_err("ib_post_recv() failed with ret: %d\n", ret);
894 isert_conn->post_recv_buf_count -= count; 950 isert_conn->post_recv_buf_count -= count;
895 } else { 951 } else {
896 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); 952 isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
897 isert_conn->conn_rx_desc_head = rx_head; 953 isert_conn->conn_rx_desc_head = rx_head;
898 } 954 }
899 return ret; 955 return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
910 ISER_HEADERS_LEN, DMA_TO_DEVICE); 966 ISER_HEADERS_LEN, DMA_TO_DEVICE);
911 967
912 send_wr.next = NULL; 968 send_wr.next = NULL;
913 send_wr.wr_id = (unsigned long)tx_desc; 969 send_wr.wr_id = (uintptr_t)tx_desc;
914 send_wr.sg_list = tx_desc->tx_sg; 970 send_wr.sg_list = tx_desc->tx_sg;
915 send_wr.num_sge = tx_desc->num_sge; 971 send_wr.num_sge = tx_desc->num_sge;
916 send_wr.opcode = IB_WR_SEND; 972 send_wr.opcode = IB_WR_SEND;
917 send_wr.send_flags = IB_SEND_SIGNALED; 973 send_wr.send_flags = IB_SEND_SIGNALED;
918 974
919 atomic_inc(&isert_conn->post_send_buf_count);
920
921 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); 975 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
922 if (ret) { 976 if (ret)
923 pr_err("ib_post_send() failed, ret: %d\n", ret); 977 isert_err("ib_post_send() failed, ret: %d\n", ret);
924 atomic_dec(&isert_conn->post_send_buf_count);
925 }
926 978
927 return ret; 979 return ret;
928} 980}
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
945 997
946 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { 998 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
947 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 999 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
948 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); 1000 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
949 } 1001 }
950} 1002}
951 1003
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
959 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 1011 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
960 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1012 ISER_HEADERS_LEN, DMA_TO_DEVICE);
961 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 1013 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
962 pr_err("ib_dma_mapping_error() failed\n"); 1014 isert_err("ib_dma_mapping_error() failed\n");
963 return -ENOMEM; 1015 return -ENOMEM;
964 } 1016 }
965 1017
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
968 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 1020 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
969 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 1021 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
970 1022
971 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" 1023 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
972 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, 1024 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
973 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); 1025 tx_desc->tx_sg[0].lkey);
974 1026
975 return 0; 1027 return 0;
976} 1028}
977 1029
978static void 1030static void
979isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1031isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
980 struct ib_send_wr *send_wr, bool coalesce) 1032 struct ib_send_wr *send_wr)
981{ 1033{
982 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 1034 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
983 1035
984 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 1036 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
985 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 1037 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
986 send_wr->opcode = IB_WR_SEND; 1038 send_wr->opcode = IB_WR_SEND;
987 send_wr->sg_list = &tx_desc->tx_sg[0]; 1039 send_wr->sg_list = &tx_desc->tx_sg[0];
988 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 1040 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
989 /*
990 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
991 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
992 */
993 mutex_lock(&isert_conn->conn_mutex);
994 if (coalesce && isert_conn->state == ISER_CONN_UP &&
995 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
996 tx_desc->llnode_active = true;
997 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
998 mutex_unlock(&isert_conn->conn_mutex);
999 return;
1000 }
1001 isert_conn->conn_comp_batch = 0;
1002 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
1003 mutex_unlock(&isert_conn->conn_mutex);
1004
1005 send_wr->send_flags = IB_SEND_SIGNALED; 1041 send_wr->send_flags = IB_SEND_SIGNALED;
1006} 1042}
1007 1043
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
1017 sge.length = ISER_RX_LOGIN_SIZE; 1053 sge.length = ISER_RX_LOGIN_SIZE;
1018 sge.lkey = isert_conn->conn_mr->lkey; 1054 sge.lkey = isert_conn->conn_mr->lkey;
1019 1055
1020 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", 1056 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1021 sge.addr, sge.length, sge.lkey); 1057 sge.addr, sge.length, sge.lkey);
1022 1058
1023 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 1059 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1024 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; 1060 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1025 rx_wr.sg_list = &sge; 1061 rx_wr.sg_list = &sge;
1026 rx_wr.num_sge = 1; 1062 rx_wr.num_sge = 1;
1027 1063
1028 isert_conn->post_recv_buf_count++; 1064 isert_conn->post_recv_buf_count++;
1029 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); 1065 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1030 if (ret) { 1066 if (ret) {
1031 pr_err("ib_post_recv() failed: %d\n", ret); 1067 isert_err("ib_post_recv() failed: %d\n", ret);
1032 isert_conn->post_recv_buf_count--; 1068 isert_conn->post_recv_buf_count--;
1033 } 1069 }
1034 1070
1035 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1036 return ret; 1071 return ret;
1037} 1072}
1038 1073
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1072 if (login->login_complete) { 1107 if (login->login_complete) {
1073 if (!conn->sess->sess_ops->SessionType && 1108 if (!conn->sess->sess_ops->SessionType &&
1074 isert_conn->conn_device->use_fastreg) { 1109 isert_conn->conn_device->use_fastreg) {
1075 /* Normal Session and fastreg is used */ 1110 ret = isert_conn_create_fastreg_pool(isert_conn);
1076 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1077
1078 ret = isert_conn_create_fastreg_pool(isert_conn,
1079 pi_support);
1080 if (ret) { 1111 if (ret) {
1081 pr_err("Conn: %p failed to create" 1112 isert_err("Conn: %p failed to create"
1082 " fastreg pool\n", isert_conn); 1113 " fastreg pool\n", isert_conn);
1083 return ret; 1114 return ret;
1084 } 1115 }
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1092 if (ret) 1123 if (ret)
1093 return ret; 1124 return ret;
1094 1125
1095 isert_conn->state = ISER_CONN_UP; 1126 /* Now we are in FULL_FEATURE phase */
1127 mutex_lock(&isert_conn->conn_mutex);
1128 isert_conn->state = ISER_CONN_FULL_FEATURE;
1129 mutex_unlock(&isert_conn->conn_mutex);
1096 goto post_send; 1130 goto post_send;
1097 } 1131 }
1098 1132
@@ -1109,18 +1143,17 @@ post_send:
1109} 1143}
1110 1144
1111static void 1145static void
1112isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, 1146isert_rx_login_req(struct isert_conn *isert_conn)
1113 struct isert_conn *isert_conn)
1114{ 1147{
1148 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1149 int rx_buflen = isert_conn->login_req_len;
1115 struct iscsi_conn *conn = isert_conn->conn; 1150 struct iscsi_conn *conn = isert_conn->conn;
1116 struct iscsi_login *login = conn->conn_login; 1151 struct iscsi_login *login = conn->conn_login;
1117 int size; 1152 int size;
1118 1153
1119 if (!login) { 1154 isert_info("conn %p\n", isert_conn);
1120 pr_err("conn->conn_login is NULL\n"); 1155
1121 dump_stack(); 1156 WARN_ON_ONCE(!login);
1122 return;
1123 }
1124 1157
1125 if (login->first_request) { 1158 if (login->first_request) {
1126 struct iscsi_login_req *login_req = 1159 struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1146 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1179 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1147 1180
1148 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1181 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1149 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", 1182 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1150 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 1183 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1184 MAX_KEY_VALUE_PAIRS);
1151 memcpy(login->req_buf, &rx_desc->data[0], size); 1185 memcpy(login->req_buf, &rx_desc->data[0], size);
1152 1186
1153 if (login->first_request) { 1187 if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
1166 1200
1167 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1201 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1168 if (!cmd) { 1202 if (!cmd) {
1169 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1203 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1170 return NULL; 1204 return NULL;
1171 } 1205 }
1172 isert_cmd = iscsit_priv_cmd(cmd); 1206 isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1209 sg = &cmd->se_cmd.t_data_sg[0]; 1243 sg = &cmd->se_cmd.t_data_sg[0];
1210 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1244 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1211 1245
1212 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 1246 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1213 sg, sg_nents, &rx_desc->data[0], imm_data_len); 1247 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1214 1248
1215 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 1249 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1216 1250
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1254 * FIXME: Unexpected unsolicited_data out 1288 * FIXME: Unexpected unsolicited_data out
1255 */ 1289 */
1256 if (!cmd->unsolicited_data) { 1290 if (!cmd->unsolicited_data) {
1257 pr_err("Received unexpected solicited data payload\n"); 1291 isert_err("Received unexpected solicited data payload\n");
1258 dump_stack(); 1292 dump_stack();
1259 return -1; 1293 return -1;
1260 } 1294 }
1261 1295
1262 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", 1296 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1263 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); 1297 "write_data_done: %u, data_length: %u\n",
1298 unsol_data_len, cmd->write_data_done,
1299 cmd->se_cmd.data_length);
1264 1300
1265 sg_off = cmd->write_data_done / PAGE_SIZE; 1301 sg_off = cmd->write_data_done / PAGE_SIZE;
1266 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1302 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1270 * FIXME: Non page-aligned unsolicited_data out 1306 * FIXME: Non page-aligned unsolicited_data out
1271 */ 1307 */
1272 if (page_off) { 1308 if (page_off) {
1273 pr_err("Received unexpected non-page aligned data payload\n"); 1309 isert_err("unexpected non-page aligned data payload\n");
1274 dump_stack(); 1310 dump_stack();
1275 return -1; 1311 return -1;
1276 } 1312 }
1277 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", 1313 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1278 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); 1314 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1315 sg_nents, &rx_desc->data[0], unsol_data_len);
1279 1316
1280 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1317 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1281 unsol_data_len); 1318 unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
1322 1359
1323 text_in = kzalloc(payload_length, GFP_KERNEL); 1360 text_in = kzalloc(payload_length, GFP_KERNEL);
1324 if (!text_in) { 1361 if (!text_in) {
1325 pr_err("Unable to allocate text_in of payload_length: %u\n", 1362 isert_err("Unable to allocate text_in of payload_length: %u\n",
1326 payload_length); 1363 payload_length);
1327 return -ENOMEM; 1364 return -ENOMEM;
1328 } 1365 }
1329 cmd->text_in_ptr = text_in; 1366 cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1348 1385
1349 if (sess->sess_ops->SessionType && 1386 if (sess->sess_ops->SessionType &&
1350 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1387 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1351 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1388 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1352 " ignoring\n", opcode); 1389 " ignoring\n", opcode);
1353 return 0; 1390 return 0;
1354 } 1391 }
1355 1392
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1395 break; 1432 break;
1396 1433
1397 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1434 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1398 if (ret > 0)
1399 wait_for_completion_timeout(&conn->conn_logout_comp,
1400 SECONDS_FOR_LOGOUT_COMP *
1401 HZ);
1402 break; 1435 break;
1403 case ISCSI_OP_TEXT: 1436 case ISCSI_OP_TEXT:
1404 cmd = isert_allocate_cmd(conn); 1437 cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1410 rx_desc, (struct iscsi_text *)hdr); 1443 rx_desc, (struct iscsi_text *)hdr);
1411 break; 1444 break;
1412 default: 1445 default:
1413 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1446 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1414 dump_stack(); 1447 dump_stack();
1415 break; 1448 break;
1416 } 1449 }
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1431 if (iser_hdr->flags & ISER_RSV) { 1464 if (iser_hdr->flags & ISER_RSV) {
1432 read_stag = be32_to_cpu(iser_hdr->read_stag); 1465 read_stag = be32_to_cpu(iser_hdr->read_stag);
1433 read_va = be64_to_cpu(iser_hdr->read_va); 1466 read_va = be64_to_cpu(iser_hdr->read_va);
1434 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", 1467 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1435 read_stag, (unsigned long long)read_va); 1468 read_stag, (unsigned long long)read_va);
1436 } 1469 }
1437 if (iser_hdr->flags & ISER_WSV) { 1470 if (iser_hdr->flags & ISER_WSV) {
1438 write_stag = be32_to_cpu(iser_hdr->write_stag); 1471 write_stag = be32_to_cpu(iser_hdr->write_stag);
1439 write_va = be64_to_cpu(iser_hdr->write_va); 1472 write_va = be64_to_cpu(iser_hdr->write_va);
1440 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", 1473 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1441 write_stag, (unsigned long long)write_va); 1474 write_stag, (unsigned long long)write_va);
1442 } 1475 }
1443 1476
1444 pr_debug("ISER ISCSI_CTRL PDU\n"); 1477 isert_dbg("ISER ISCSI_CTRL PDU\n");
1445 break; 1478 break;
1446 case ISER_HELLO: 1479 case ISER_HELLO:
1447 pr_err("iSER Hello message\n"); 1480 isert_err("iSER Hello message\n");
1448 break; 1481 break;
1449 default: 1482 default:
1450 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); 1483 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1451 break; 1484 break;
1452 } 1485 }
1453 1486
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1457 1490
1458static void 1491static void
1459isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, 1492isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1460 unsigned long xfer_len) 1493 u32 xfer_len)
1461{ 1494{
1462 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1495 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1463 struct iscsi_hdr *hdr; 1496 struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1467 if ((char *)desc == isert_conn->login_req_buf) { 1500 if ((char *)desc == isert_conn->login_req_buf) {
1468 rx_dma = isert_conn->login_req_dma; 1501 rx_dma = isert_conn->login_req_dma;
1469 rx_buflen = ISER_RX_LOGIN_SIZE; 1502 rx_buflen = ISER_RX_LOGIN_SIZE;
1470 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1503 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1471 rx_dma, rx_buflen); 1504 rx_dma, rx_buflen);
1472 } else { 1505 } else {
1473 rx_dma = desc->dma_addr; 1506 rx_dma = desc->dma_addr;
1474 rx_buflen = ISER_RX_PAYLOAD_SIZE; 1507 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1475 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1508 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1476 rx_dma, rx_buflen); 1509 rx_dma, rx_buflen);
1477 } 1510 }
1478 1511
1479 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); 1512 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1480 1513
1481 hdr = &desc->iscsi_header; 1514 hdr = &desc->iscsi_header;
1482 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1515 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1483 hdr->opcode, hdr->itt, hdr->flags, 1516 hdr->opcode, hdr->itt, hdr->flags,
1484 (int)(xfer_len - ISER_HEADERS_LEN)); 1517 (int)(xfer_len - ISER_HEADERS_LEN));
1485 1518
1486 if ((char *)desc == isert_conn->login_req_buf) 1519 if ((char *)desc == isert_conn->login_req_buf) {
1487 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, 1520 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1488 isert_conn); 1521 if (isert_conn->conn) {
1489 else 1522 struct iscsi_login *login = isert_conn->conn->conn_login;
1523
1524 if (login && !login->first_request)
1525 isert_rx_login_req(isert_conn);
1526 }
1527 mutex_lock(&isert_conn->conn_mutex);
1528 complete(&isert_conn->login_req_comp);
1529 mutex_unlock(&isert_conn->conn_mutex);
1530 } else {
1490 isert_rx_do_work(desc, isert_conn); 1531 isert_rx_do_work(desc, isert_conn);
1532 }
1491 1533
1492 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, 1534 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1493 DMA_FROM_DEVICE); 1535 DMA_FROM_DEVICE);
1494 1536
1495 isert_conn->post_recv_buf_count--; 1537 isert_conn->post_recv_buf_count--;
1496 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", 1538 isert_dbg("Decremented post_recv_buf_count: %d\n",
1497 isert_conn->post_recv_buf_count); 1539 isert_conn->post_recv_buf_count);
1498 1540
1499 if ((char *)desc == isert_conn->login_req_buf) 1541 if ((char *)desc == isert_conn->login_req_buf)
1500 return; 1542 return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1505 ISERT_MIN_POSTED_RX); 1547 ISERT_MIN_POSTED_RX);
1506 err = isert_post_recv(isert_conn, count); 1548 err = isert_post_recv(isert_conn, count);
1507 if (err) { 1549 if (err) {
1508 pr_err("isert_post_recv() count: %d failed, %d\n", 1550 isert_err("isert_post_recv() count: %d failed, %d\n",
1509 count, err); 1551 count, err);
1510 } 1552 }
1511 } 1553 }
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1534 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, 1576 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1535 data->dma_dir); 1577 data->dma_dir);
1536 if (unlikely(!data->dma_nents)) { 1578 if (unlikely(!data->dma_nents)) {
1537 pr_err("Cmd: unable to dma map SGs %p\n", sg); 1579 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1538 return -EINVAL; 1580 return -EINVAL;
1539 } 1581 }
1540 1582
1541 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1583 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1542 isert_cmd, data->dma_nents, data->sg, data->nents, data->len); 1584 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1543 1585
1544 return 0; 1586 return 0;
1545} 1587}
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1560{ 1602{
1561 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1603 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1562 1604
1563 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1605 isert_dbg("Cmd %p\n", isert_cmd);
1564 1606
1565 if (wr->data.sg) { 1607 if (wr->data.sg) {
1566 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1608 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1567 isert_unmap_data_buf(isert_conn, &wr->data); 1609 isert_unmap_data_buf(isert_conn, &wr->data);
1568 } 1610 }
1569 1611
1570 if (wr->send_wr) { 1612 if (wr->send_wr) {
1571 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); 1613 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1572 kfree(wr->send_wr); 1614 kfree(wr->send_wr);
1573 wr->send_wr = NULL; 1615 wr->send_wr = NULL;
1574 } 1616 }
1575 1617
1576 if (wr->ib_sge) { 1618 if (wr->ib_sge) {
1577 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); 1619 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1578 kfree(wr->ib_sge); 1620 kfree(wr->ib_sge);
1579 wr->ib_sge = NULL; 1621 wr->ib_sge = NULL;
1580 } 1622 }
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1586 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1628 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1587 LIST_HEAD(unmap_list); 1629 LIST_HEAD(unmap_list);
1588 1630
1589 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1631 isert_dbg("Cmd %p\n", isert_cmd);
1590 1632
1591 if (wr->fr_desc) { 1633 if (wr->fr_desc) {
1592 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1634 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1593 isert_cmd, wr->fr_desc);
1594 if (wr->fr_desc->ind & ISERT_PROTECTED) { 1635 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1595 isert_unmap_data_buf(isert_conn, &wr->prot); 1636 isert_unmap_data_buf(isert_conn, &wr->prot);
1596 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1637 wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1602 } 1643 }
1603 1644
1604 if (wr->data.sg) { 1645 if (wr->data.sg) {
1605 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1646 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1606 isert_unmap_data_buf(isert_conn, &wr->data); 1647 isert_unmap_data_buf(isert_conn, &wr->data);
1607 } 1648 }
1608 1649
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1618 struct iscsi_conn *conn = isert_conn->conn; 1659 struct iscsi_conn *conn = isert_conn->conn;
1619 struct isert_device *device = isert_conn->conn_device; 1660 struct isert_device *device = isert_conn->conn_device;
1620 1661
1621 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1662 isert_dbg("Cmd %p\n", isert_cmd);
1622 1663
1623 switch (cmd->iscsi_opcode) { 1664 switch (cmd->iscsi_opcode) {
1624 case ISCSI_OP_SCSI_CMD: 1665 case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1668 * associated cmd->se_cmd needs to be released. 1709 * associated cmd->se_cmd needs to be released.
1669 */ 1710 */
1670 if (cmd->se_cmd.se_tfo != NULL) { 1711 if (cmd->se_cmd.se_tfo != NULL) {
1671 pr_debug("Calling transport_generic_free_cmd from" 1712 isert_dbg("Calling transport_generic_free_cmd from"
1672 " isert_put_cmd for 0x%02x\n", 1713 " isert_put_cmd for 0x%02x\n",
1673 cmd->iscsi_opcode); 1714 cmd->iscsi_opcode);
1674 transport_generic_free_cmd(&cmd->se_cmd, 0); 1715 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
1687isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1728isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1688{ 1729{
1689 if (tx_desc->dma_addr != 0) { 1730 if (tx_desc->dma_addr != 0) {
1690 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); 1731 isert_dbg("unmap single for tx_desc->dma_addr\n");
1691 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1732 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1692 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1733 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1693 tx_desc->dma_addr = 0; 1734 tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1699 struct ib_device *ib_dev, bool comp_err) 1740 struct ib_device *ib_dev, bool comp_err)
1700{ 1741{
1701 if (isert_cmd->pdu_buf_dma != 0) { 1742 if (isert_cmd->pdu_buf_dma != 0) {
1702 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1743 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1703 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1744 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1704 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1745 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1705 isert_cmd->pdu_buf_dma = 0; 1746 isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1717 1758
1718 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1759 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1719 if (ret) { 1760 if (ret) {
1720 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1761 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1721 goto fail_mr_status; 1762 goto fail_mr_status;
1722 } 1763 }
1723 1764
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1740 do_div(sec_offset_err, block_size); 1781 do_div(sec_offset_err, block_size);
1741 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1782 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1742 1783
1743 pr_err("isert: PI error found type %d at sector 0x%llx " 1784 isert_err("PI error found type %d at sector 0x%llx "
1744 "expected 0x%x vs actual 0x%x\n", 1785 "expected 0x%x vs actual 0x%x\n",
1745 mr_status.sig_err.err_type, 1786 mr_status.sig_err.err_type,
1746 (unsigned long long)se_cmd->bad_sector, 1787 (unsigned long long)se_cmd->bad_sector,
1747 mr_status.sig_err.expected, 1788 mr_status.sig_err.expected,
1748 mr_status.sig_err.actual); 1789 mr_status.sig_err.actual);
1749 ret = 1; 1790 ret = 1;
1750 } 1791 }
1751 1792
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1801 cmd->write_data_done = wr->data.len; 1842 cmd->write_data_done = wr->data.len;
1802 wr->send_wr_num = 0; 1843 wr->send_wr_num = 0;
1803 1844
1804 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1845 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1805 spin_lock_bh(&cmd->istate_lock); 1846 spin_lock_bh(&cmd->istate_lock);
1806 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1847 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1807 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1848 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
1823 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1864 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1824 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1865 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1825 1866
1867 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1868
1826 switch (cmd->i_state) { 1869 switch (cmd->i_state) {
1827 case ISTATE_SEND_TASKMGTRSP: 1870 case ISTATE_SEND_TASKMGTRSP:
1828 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1829
1830 atomic_dec(&isert_conn->post_send_buf_count);
1831 iscsit_tmr_post_handler(cmd, cmd->conn); 1871 iscsit_tmr_post_handler(cmd, cmd->conn);
1832 1872 case ISTATE_SEND_REJECT: /* FALLTHRU */
1833 cmd->i_state = ISTATE_SENT_STATUS; 1873 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1834 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1835 break;
1836 case ISTATE_SEND_REJECT:
1837 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1838 atomic_dec(&isert_conn->post_send_buf_count);
1839
1840 cmd->i_state = ISTATE_SENT_STATUS; 1874 cmd->i_state = ISTATE_SENT_STATUS;
1841 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); 1875 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1876 ib_dev, false);
1842 break; 1877 break;
1843 case ISTATE_SEND_LOGOUTRSP: 1878 case ISTATE_SEND_LOGOUTRSP:
1844 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1845
1846 atomic_dec(&isert_conn->post_send_buf_count);
1847 iscsit_logout_post_handler(cmd, cmd->conn); 1879 iscsit_logout_post_handler(cmd, cmd->conn);
1848 break; 1880 break;
1849 case ISTATE_SEND_TEXTRSP:
1850 atomic_dec(&isert_conn->post_send_buf_count);
1851 cmd->i_state = ISTATE_SENT_STATUS;
1852 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1853 break;
1854 default: 1881 default:
1855 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1882 isert_err("Unknown i_state %d\n", cmd->i_state);
1856 dump_stack(); 1883 dump_stack();
1857 break; 1884 break;
1858 } 1885 }
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1865 struct ib_device *ib_dev) 1892 struct ib_device *ib_dev)
1866{ 1893{
1867 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1894 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1868 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1869 1895
1870 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1896 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1871 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1897 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1878 return; 1904 return;
1879 } 1905 }
1880 1906
1881 /**
1882 * If send_wr_num is 0 this means that we got
1883 * RDMA completion and we cleared it and we should
1884 * simply decrement the response post. else the
1885 * response is incorporated in send_wr_num, just
1886 * sub it.
1887 **/
1888 if (wr->send_wr_num)
1889 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1890 else
1891 atomic_dec(&isert_conn->post_send_buf_count);
1892
1893 cmd->i_state = ISTATE_SENT_STATUS; 1907 cmd->i_state = ISTATE_SENT_STATUS;
1894 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1908 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1895} 1909}
1896 1910
1897static void 1911static void
1898__isert_send_completion(struct iser_tx_desc *tx_desc, 1912isert_send_completion(struct iser_tx_desc *tx_desc,
1899 struct isert_conn *isert_conn) 1913 struct isert_conn *isert_conn)
1900{ 1914{
1901 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1915 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1902 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1916 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1903 struct isert_rdma_wr *wr; 1917 struct isert_rdma_wr *wr;
1904 1918
1905 if (!isert_cmd) { 1919 if (!isert_cmd) {
1906 atomic_dec(&isert_conn->post_send_buf_count);
1907 isert_unmap_tx_desc(tx_desc, ib_dev); 1920 isert_unmap_tx_desc(tx_desc, ib_dev);
1908 return; 1921 return;
1909 } 1922 }
1910 wr = &isert_cmd->rdma_wr; 1923 wr = &isert_cmd->rdma_wr;
1911 1924
1925 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
1926
1912 switch (wr->iser_ib_op) { 1927 switch (wr->iser_ib_op) {
1913 case ISER_IB_RECV: 1928 case ISER_IB_RECV:
1914 pr_err("isert_send_completion: Got ISER_IB_RECV\n"); 1929 isert_err("Got ISER_IB_RECV\n");
1915 dump_stack(); 1930 dump_stack();
1916 break; 1931 break;
1917 case ISER_IB_SEND: 1932 case ISER_IB_SEND:
1918 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1919 isert_response_completion(tx_desc, isert_cmd, 1933 isert_response_completion(tx_desc, isert_cmd,
1920 isert_conn, ib_dev); 1934 isert_conn, ib_dev);
1921 break; 1935 break;
1922 case ISER_IB_RDMA_WRITE: 1936 case ISER_IB_RDMA_WRITE:
1923 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1924 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1925 isert_completion_rdma_write(tx_desc, isert_cmd); 1937 isert_completion_rdma_write(tx_desc, isert_cmd);
1926 break; 1938 break;
1927 case ISER_IB_RDMA_READ: 1939 case ISER_IB_RDMA_READ:
1928 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1929
1930 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1931 isert_completion_rdma_read(tx_desc, isert_cmd); 1940 isert_completion_rdma_read(tx_desc, isert_cmd);
1932 break; 1941 break;
1933 default: 1942 default:
1934 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); 1943 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
1935 dump_stack(); 1944 dump_stack();
1936 break; 1945 break;
1937 } 1946 }
1938} 1947}
1939 1948
1940static void 1949/**
1941isert_send_completion(struct iser_tx_desc *tx_desc, 1950 * is_isert_tx_desc() - Indicate if the completion wr_id
1942 struct isert_conn *isert_conn) 1951 * is a TX descriptor or not.
1943{ 1952 * @isert_conn: iser connection
1944 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1953 * @wr_id: completion WR identifier
1945 struct iser_tx_desc *t; 1954 *
1946 /* 1955 * Since we cannot rely on wc opcode in FLUSH errors
1947 * Drain coalesced completion llist starting from comp_llnode_batch 1956 * we must work around it by checking if the wr_id address
1948 * setup in isert_init_send_wr(), and then complete trailing tx_desc. 1957 * falls in the iser connection rx_descs buffer. If so
1949 */ 1958 * it is an RX descriptor, otherwize it is a TX.
1950 while (llnode) { 1959 */
1951 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1960static inline bool
1952 llnode = llist_next(llnode); 1961is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
1953 __isert_send_completion(t, isert_conn);
1954 }
1955 __isert_send_completion(tx_desc, isert_conn);
1956}
1957
1958static void
1959isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1960{ 1962{
1961 struct llist_node *llnode; 1963 void *start = isert_conn->conn_rx_descs;
1962 struct isert_rdma_wr *wr; 1964 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1963 struct iser_tx_desc *t;
1964 1965
1965 mutex_lock(&isert_conn->conn_mutex); 1966 if (wr_id >= start && wr_id < start + len)
1966 llnode = llist_del_all(&isert_conn->conn_comp_llist); 1967 return false;
1967 isert_conn->conn_comp_batch = 0;
1968 mutex_unlock(&isert_conn->conn_mutex);
1969
1970 while (llnode) {
1971 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1972 llnode = llist_next(llnode);
1973 wr = &t->isert_cmd->rdma_wr;
1974
1975 /**
1976 * If send_wr_num is 0 this means that we got
1977 * RDMA completion and we cleared it and we should
1978 * simply decrement the response post. else the
1979 * response is incorporated in send_wr_num, just
1980 * sub it.
1981 **/
1982 if (wr->send_wr_num)
1983 atomic_sub(wr->send_wr_num,
1984 &isert_conn->post_send_buf_count);
1985 else
1986 atomic_dec(&isert_conn->post_send_buf_count);
1987 1968
1988 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1969 return true;
1989 }
1990} 1970}
1991 1971
1992static void 1972static void
1993isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1973isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1994{ 1974{
1995 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1975 if (wc->wr_id == ISER_BEACON_WRID) {
1996 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1976 isert_info("conn %p completing conn_wait_comp_err\n",
1997 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1977 isert_conn);
1998 struct isert_rdma_wr *wr; 1978 complete(&isert_conn->conn_wait_comp_err);
1999 struct iser_tx_desc *t; 1979 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2000 1980 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2001 while (llnode) { 1981 struct isert_cmd *isert_cmd;
2002 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1982 struct iser_tx_desc *desc;
2003 llnode = llist_next(llnode);
2004 wr = &t->isert_cmd->rdma_wr;
2005 1983
2006 /** 1984 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2007 * If send_wr_num is 0 this means that we got 1985 isert_cmd = desc->isert_cmd;
2008 * RDMA completion and we cleared it and we should 1986 if (!isert_cmd)
2009 * simply decrement the response post. else the 1987 isert_unmap_tx_desc(desc, ib_dev);
2010 * response is incorporated in send_wr_num, just
2011 * sub it.
2012 **/
2013 if (wr->send_wr_num)
2014 atomic_sub(wr->send_wr_num,
2015 &isert_conn->post_send_buf_count);
2016 else 1988 else
2017 atomic_dec(&isert_conn->post_send_buf_count); 1989 isert_completion_put(desc, isert_cmd, ib_dev, true);
2018 1990 } else {
2019 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1991 isert_conn->post_recv_buf_count--;
2020 } 1992 if (!isert_conn->post_recv_buf_count)
2021 tx_desc->comp_llnode_batch = NULL; 1993 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2022
2023 if (!isert_cmd)
2024 isert_unmap_tx_desc(tx_desc, ib_dev);
2025 else
2026 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
2027}
2028
2029static void
2030isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2031{
2032 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2033 struct iscsi_conn *conn = isert_conn->conn;
2034
2035 if (isert_conn->post_recv_buf_count)
2036 return;
2037
2038 isert_cq_drain_comp_llist(isert_conn, ib_dev);
2039
2040 if (conn->sess) {
2041 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2042 target_wait_for_sess_cmds(conn->sess->se_sess);
2043 } 1994 }
2044
2045 while (atomic_read(&isert_conn->post_send_buf_count))
2046 msleep(3000);
2047
2048 mutex_lock(&isert_conn->conn_mutex);
2049 isert_conn->state = ISER_CONN_DOWN;
2050 mutex_unlock(&isert_conn->conn_mutex);
2051
2052 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2053
2054 complete(&isert_conn->conn_wait_comp_err);
2055} 1995}
2056 1996
2057static void 1997static void
2058isert_cq_tx_work(struct work_struct *work) 1998isert_handle_wc(struct ib_wc *wc)
2059{ 1999{
2060 struct isert_cq_desc *cq_desc = container_of(work,
2061 struct isert_cq_desc, cq_tx_work);
2062 struct isert_device *device = cq_desc->device;
2063 int cq_index = cq_desc->cq_index;
2064 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2065 struct isert_conn *isert_conn; 2000 struct isert_conn *isert_conn;
2066 struct iser_tx_desc *tx_desc; 2001 struct iser_tx_desc *tx_desc;
2067 struct ib_wc wc; 2002 struct iser_rx_desc *rx_desc;
2068
2069 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2070 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2071 isert_conn = wc.qp->qp_context;
2072 2003
2073 if (wc.status == IB_WC_SUCCESS) { 2004 isert_conn = wc->qp->qp_context;
2074 isert_send_completion(tx_desc, isert_conn); 2005 if (likely(wc->status == IB_WC_SUCCESS)) {
2006 if (wc->opcode == IB_WC_RECV) {
2007 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2008 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
2075 } else { 2009 } else {
2076 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 2010 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2077 pr_debug("TX wc.status: 0x%08x\n", wc.status); 2011 isert_send_completion(tx_desc, isert_conn);
2078 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
2079
2080 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
2081 if (tx_desc->llnode_active)
2082 continue;
2083
2084 atomic_dec(&isert_conn->post_send_buf_count);
2085 isert_cq_tx_comp_err(tx_desc, isert_conn);
2086 }
2087 } 2012 }
2088 } 2013 } else {
2089 2014 if (wc->status != IB_WC_WR_FLUSH_ERR)
2090 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); 2015 isert_err("wr id %llx status %d vend_err %x\n",
2091} 2016 wc->wr_id, wc->status, wc->vendor_err);
2092 2017 else
2093static void 2018 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2094isert_cq_tx_callback(struct ib_cq *cq, void *context)
2095{
2096 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2097 2019
2098 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 2020 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2021 isert_cq_comp_err(isert_conn, wc);
2022 }
2099} 2023}
2100 2024
2101static void 2025static void
2102isert_cq_rx_work(struct work_struct *work) 2026isert_cq_work(struct work_struct *work)
2103{ 2027{
2104 struct isert_cq_desc *cq_desc = container_of(work, 2028 enum { isert_poll_budget = 65536 };
2105 struct isert_cq_desc, cq_rx_work); 2029 struct isert_comp *comp = container_of(work, struct isert_comp,
2106 struct isert_device *device = cq_desc->device; 2030 work);
2107 int cq_index = cq_desc->cq_index; 2031 struct ib_wc *const wcs = comp->wcs;
2108 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; 2032 int i, n, completed = 0;
2109 struct isert_conn *isert_conn;
2110 struct iser_rx_desc *rx_desc;
2111 struct ib_wc wc;
2112 unsigned long xfer_len;
2113 2033
2114 while (ib_poll_cq(rx_cq, 1, &wc) == 1) { 2034 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2115 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; 2035 for (i = 0; i < n; i++)
2116 isert_conn = wc.qp->qp_context; 2036 isert_handle_wc(&wcs[i]);
2117 2037
2118 if (wc.status == IB_WC_SUCCESS) { 2038 completed += n;
2119 xfer_len = (unsigned long)wc.byte_len; 2039 if (completed >= isert_poll_budget)
2120 isert_rx_completion(rx_desc, isert_conn, xfer_len); 2040 break;
2121 } else {
2122 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2123 if (wc.status != IB_WC_WR_FLUSH_ERR) {
2124 pr_debug("RX wc.status: 0x%08x\n", wc.status);
2125 pr_debug("RX wc.vendor_err: 0x%08x\n",
2126 wc.vendor_err);
2127 }
2128 isert_conn->post_recv_buf_count--;
2129 isert_cq_rx_comp_err(isert_conn);
2130 }
2131 } 2041 }
2132 2042
2133 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); 2043 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2134} 2044}
2135 2045
2136static void 2046static void
2137isert_cq_rx_callback(struct ib_cq *cq, void *context) 2047isert_cq_callback(struct ib_cq *cq, void *context)
2138{ 2048{
2139 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 2049 struct isert_comp *comp = context;
2140 2050
2141 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 2051 queue_work(isert_comp_wq, &comp->work);
2142} 2052}
2143 2053
2144static int 2054static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2147 struct ib_send_wr *wr_failed; 2057 struct ib_send_wr *wr_failed;
2148 int ret; 2058 int ret;
2149 2059
2150 atomic_inc(&isert_conn->post_send_buf_count);
2151
2152 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, 2060 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2153 &wr_failed); 2061 &wr_failed);
2154 if (ret) { 2062 if (ret) {
2155 pr_err("ib_post_send failed with %d\n", ret); 2063 isert_err("ib_post_send failed with %d\n", ret);
2156 atomic_dec(&isert_conn->post_send_buf_count);
2157 return ret; 2064 return ret;
2158 } 2065 }
2159 return ret; 2066 return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2200 isert_cmd->tx_desc.num_sge = 2; 2107 isert_cmd->tx_desc.num_sge = 2;
2201 } 2108 }
2202 2109
2203 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2110 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2204 2111
2205 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2112 isert_dbg("Posting SCSI Response\n");
2206 2113
2207 return isert_post_response(isert_conn, isert_cmd); 2114 return isert_post_response(isert_conn, isert_cmd);
2208} 2115}
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
2231 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2138 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2232 struct isert_device *device = isert_conn->conn_device; 2139 struct isert_device *device = isert_conn->conn_device;
2233 2140
2234 if (device->pi_capable) 2141 if (conn->tpg->tpg_attrib.t10_pi) {
2235 return TARGET_PROT_ALL; 2142 if (device->pi_capable) {
2143 isert_info("conn %p PI offload enabled\n", isert_conn);
2144 isert_conn->pi_support = true;
2145 return TARGET_PROT_ALL;
2146 }
2147 }
2148
2149 isert_info("conn %p PI offload disabled\n", isert_conn);
2150 isert_conn->pi_support = false;
2236 2151
2237 return TARGET_PROT_NORMAL; 2152 return TARGET_PROT_NORMAL;
2238} 2153}
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2250 &isert_cmd->tx_desc.iscsi_header, 2165 &isert_cmd->tx_desc.iscsi_header,
2251 nopout_response); 2166 nopout_response);
2252 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2167 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2253 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2168 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2254 2169
2255 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2170 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2256 2171
2257 return isert_post_response(isert_conn, isert_cmd); 2172 return isert_post_response(isert_conn, isert_cmd);
2258} 2173}
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2268 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 2183 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2269 &isert_cmd->tx_desc.iscsi_header); 2184 &isert_cmd->tx_desc.iscsi_header);
2270 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2185 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2271 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2186 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2272 2187
2273 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2188 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2274 2189
2275 return isert_post_response(isert_conn, isert_cmd); 2190 return isert_post_response(isert_conn, isert_cmd);
2276} 2191}
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2286 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 2201 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2287 &isert_cmd->tx_desc.iscsi_header); 2202 &isert_cmd->tx_desc.iscsi_header);
2288 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2203 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2204 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2290 2205
2291 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2206 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2292 2207
2293 return isert_post_response(isert_conn, isert_cmd); 2208 return isert_post_response(isert_conn, isert_cmd);
2294} 2209}
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2318 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2233 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2319 isert_cmd->tx_desc.num_sge = 2; 2234 isert_cmd->tx_desc.num_sge = 2;
2320 2235
2321 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2236 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2322 2237
2323 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2238 isert_dbg("conn %p Posting Reject\n", isert_conn);
2324 2239
2325 return isert_post_response(isert_conn, isert_cmd); 2240 return isert_post_response(isert_conn, isert_cmd);
2326} 2241}
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2358 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2273 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2359 isert_cmd->tx_desc.num_sge = 2; 2274 isert_cmd->tx_desc.num_sge = 2;
2360 } 2275 }
2361 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2276 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2362 2277
2363 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2278 isert_dbg("conn %p Text Reject\n", isert_conn);
2364 2279
2365 return isert_post_response(isert_conn, isert_cmd); 2280 return isert_post_response(isert_conn, isert_cmd);
2366} 2281}
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2383 2298
2384 send_wr->sg_list = ib_sge; 2299 send_wr->sg_list = ib_sge;
2385 send_wr->num_sge = sg_nents; 2300 send_wr->num_sge = sg_nents;
2386 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2301 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2387 /* 2302 /*
2388 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2303 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2389 */ 2304 */
2390 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2305 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2391 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", 2306 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2392 (unsigned long long)tmp_sg->dma_address, 2307 "page_off: %u\n",
2393 tmp_sg->length, page_off); 2308 (unsigned long long)tmp_sg->dma_address,
2309 tmp_sg->length, page_off);
2394 2310
2395 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2311 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2396 ib_sge->length = min_t(u32, data_left, 2312 ib_sge->length = min_t(u32, data_left,
2397 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2313 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2398 ib_sge->lkey = isert_conn->conn_mr->lkey; 2314 ib_sge->lkey = isert_conn->conn_mr->lkey;
2399 2315
2400 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2316 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2401 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2317 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2402 page_off = 0; 2318 page_off = 0;
2403 data_left -= ib_sge->length; 2319 data_left -= ib_sge->length;
2404 ib_sge++; 2320 ib_sge++;
2405 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); 2321 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2406 } 2322 }
2407 2323
2408 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2324 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2409 send_wr->sg_list, send_wr->num_sge); 2325 send_wr->sg_list, send_wr->num_sge);
2410 2326
2411 return sg_nents; 2327 return sg_nents;
2412} 2328}
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2438 2354
2439 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); 2355 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2440 if (!ib_sge) { 2356 if (!ib_sge) {
2441 pr_warn("Unable to allocate ib_sge\n"); 2357 isert_warn("Unable to allocate ib_sge\n");
2442 ret = -ENOMEM; 2358 ret = -ENOMEM;
2443 goto unmap_cmd; 2359 goto unmap_cmd;
2444 } 2360 }
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2448 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2364 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2449 GFP_KERNEL); 2365 GFP_KERNEL);
2450 if (!wr->send_wr) { 2366 if (!wr->send_wr) {
2451 pr_debug("Unable to allocate wr->send_wr\n"); 2367 isert_dbg("Unable to allocate wr->send_wr\n");
2452 ret = -ENOMEM; 2368 ret = -ENOMEM;
2453 goto unmap_cmd; 2369 goto unmap_cmd;
2454 } 2370 }
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2512 chunk_start = start_addr; 2428 chunk_start = start_addr;
2513 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); 2429 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2514 2430
2515 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", 2431 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2516 i, (unsigned long long)tmp_sg->dma_address, 2432 i, (unsigned long long)tmp_sg->dma_address,
2517 tmp_sg->length); 2433 tmp_sg->length);
2518 2434
2519 if ((end_addr & ~PAGE_MASK) && i < last_ent) { 2435 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2520 new_chunk = 0; 2436 new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2525 page = chunk_start & PAGE_MASK; 2441 page = chunk_start & PAGE_MASK;
2526 do { 2442 do {
2527 fr_pl[n_pages++] = page; 2443 fr_pl[n_pages++] = page;
2528 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", 2444 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2529 n_pages - 1, page); 2445 n_pages - 1, page);
2530 page += PAGE_SIZE; 2446 page += PAGE_SIZE;
2531 } while (page < end_addr); 2447 } while (page < end_addr);
2532 } 2448 }
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2534 return n_pages; 2450 return n_pages;
2535} 2451}
2536 2452
2453static inline void
2454isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2455{
2456 u32 rkey;
2457
2458 memset(inv_wr, 0, sizeof(*inv_wr));
2459 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2460 inv_wr->opcode = IB_WR_LOCAL_INV;
2461 inv_wr->ex.invalidate_rkey = mr->rkey;
2462
2463 /* Bump the key */
2464 rkey = ib_inc_rkey(mr->rkey);
2465 ib_update_fast_reg_key(mr, rkey);
2466}
2467
2537static int 2468static int
2538isert_fast_reg_mr(struct isert_conn *isert_conn, 2469isert_fast_reg_mr(struct isert_conn *isert_conn,
2539 struct fast_reg_descriptor *fr_desc, 2470 struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2548 struct ib_send_wr *bad_wr, *wr = NULL; 2479 struct ib_send_wr *bad_wr, *wr = NULL;
2549 int ret, pagelist_len; 2480 int ret, pagelist_len;
2550 u32 page_off; 2481 u32 page_off;
2551 u8 key;
2552 2482
2553 if (mem->dma_nents == 1) { 2483 if (mem->dma_nents == 1) {
2554 sge->lkey = isert_conn->conn_mr->lkey; 2484 sge->lkey = isert_conn->conn_mr->lkey;
2555 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2485 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2556 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2486 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2557 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2487 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2558 __func__, __LINE__, sge->addr, sge->length, 2488 sge->addr, sge->length, sge->lkey);
2559 sge->lkey);
2560 return 0; 2489 return 0;
2561 } 2490 }
2562 2491
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2572 2501
2573 page_off = mem->offset % PAGE_SIZE; 2502 page_off = mem->offset % PAGE_SIZE;
2574 2503
2575 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2504 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2576 fr_desc, mem->nents, mem->offset); 2505 fr_desc, mem->nents, mem->offset);
2577 2506
2578 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, 2507 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2579 &frpl->page_list[0]); 2508 &frpl->page_list[0]);
2580 2509
2581 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { 2510 if (!(fr_desc->ind & ind)) {
2582 memset(&inv_wr, 0, sizeof(inv_wr)); 2511 isert_inv_rkey(&inv_wr, mr);
2583 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2584 inv_wr.opcode = IB_WR_LOCAL_INV;
2585 inv_wr.ex.invalidate_rkey = mr->rkey;
2586 wr = &inv_wr; 2512 wr = &inv_wr;
2587 /* Bump the key */
2588 key = (u8)(mr->rkey & 0x000000FF);
2589 ib_update_fast_reg_key(mr, ++key);
2590 } 2513 }
2591 2514
2592 /* Prepare FASTREG WR */ 2515 /* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2608 2531
2609 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2532 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2610 if (ret) { 2533 if (ret) {
2611 pr_err("fast registration failed, ret:%d\n", ret); 2534 isert_err("fast registration failed, ret:%d\n", ret);
2612 return ret; 2535 return ret;
2613 } 2536 }
2614 fr_desc->ind &= ~ind; 2537 fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2617 sge->addr = frpl->page_list[0] + page_off; 2540 sge->addr = frpl->page_list[0] + page_off;
2618 sge->length = mem->len; 2541 sge->length = mem->len;
2619 2542
2620 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2543 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2621 __func__, __LINE__, sge->addr, sge->length, 2544 sge->addr, sge->length, sge->lkey);
2622 sge->lkey);
2623 2545
2624 return ret; 2546 return ret;
2625} 2547}
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2665 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2587 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2666 break; 2588 break;
2667 default: 2589 default:
2668 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2590 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2669 return -EINVAL; 2591 return -EINVAL;
2670 } 2592 }
2671 2593
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
2681} 2603}
2682 2604
2683static int 2605static int
2684isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, 2606isert_reg_sig_mr(struct isert_conn *isert_conn,
2685 struct fast_reg_descriptor *fr_desc, 2607 struct se_cmd *se_cmd,
2686 struct ib_sge *data_sge, struct ib_sge *prot_sge, 2608 struct isert_rdma_wr *rdma_wr,
2687 struct ib_sge *sig_sge) 2609 struct fast_reg_descriptor *fr_desc)
2688{ 2610{
2689 struct ib_send_wr sig_wr, inv_wr; 2611 struct ib_send_wr sig_wr, inv_wr;
2690 struct ib_send_wr *bad_wr, *wr = NULL; 2612 struct ib_send_wr *bad_wr, *wr = NULL;
2691 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2613 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2692 struct ib_sig_attrs sig_attrs; 2614 struct ib_sig_attrs sig_attrs;
2693 int ret; 2615 int ret;
2694 u32 key;
2695 2616
2696 memset(&sig_attrs, 0, sizeof(sig_attrs)); 2617 memset(&sig_attrs, 0, sizeof(sig_attrs));
2697 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2618 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2701 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); 2622 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2702 2623
2703 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { 2624 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2704 memset(&inv_wr, 0, sizeof(inv_wr)); 2625 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2705 inv_wr.opcode = IB_WR_LOCAL_INV;
2706 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2707 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2708 wr = &inv_wr; 2626 wr = &inv_wr;
2709 /* Bump the key */
2710 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2711 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2712 } 2627 }
2713 2628
2714 memset(&sig_wr, 0, sizeof(sig_wr)); 2629 memset(&sig_wr, 0, sizeof(sig_wr));
2715 sig_wr.opcode = IB_WR_REG_SIG_MR; 2630 sig_wr.opcode = IB_WR_REG_SIG_MR;
2716 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 2631 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2717 sig_wr.sg_list = data_sge; 2632 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2718 sig_wr.num_sge = 1; 2633 sig_wr.num_sge = 1;
2719 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; 2634 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2720 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 2635 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2721 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 2636 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2722 if (se_cmd->t_prot_sg) 2637 if (se_cmd->t_prot_sg)
2723 sig_wr.wr.sig_handover.prot = prot_sge; 2638 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2724 2639
2725 if (!wr) 2640 if (!wr)
2726 wr = &sig_wr; 2641 wr = &sig_wr;
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2729 2644
2730 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2645 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2731 if (ret) { 2646 if (ret) {
2732 pr_err("fast registration failed, ret:%d\n", ret); 2647 isert_err("fast registration failed, ret:%d\n", ret);
2733 goto err; 2648 goto err;
2734 } 2649 }
2735 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2650 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2736 2651
2737 sig_sge->lkey = pi_ctx->sig_mr->lkey; 2652 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2738 sig_sge->addr = 0; 2653 rdma_wr->ib_sg[SIG].addr = 0;
2739 sig_sge->length = se_cmd->data_length; 2654 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2740 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2655 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2741 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2656 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2742 /* 2657 /*
2743 * We have protection guards on the wire 2658 * We have protection guards on the wire
2744 * so we need to set a larget transfer 2659 * so we need to set a larget transfer
2745 */ 2660 */
2746 sig_sge->length += se_cmd->prot_length; 2661 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2747 2662
2748 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2663 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2749 sig_sge->addr, sig_sge->length, 2664 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2750 sig_sge->lkey); 2665 rdma_wr->ib_sg[SIG].lkey);
2751err: 2666err:
2752 return ret; 2667 return ret;
2753} 2668}
2754 2669
2755static int 2670static int
2671isert_handle_prot_cmd(struct isert_conn *isert_conn,
2672 struct isert_cmd *isert_cmd,
2673 struct isert_rdma_wr *wr)
2674{
2675 struct isert_device *device = isert_conn->conn_device;
2676 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2677 int ret;
2678
2679 if (!wr->fr_desc->pi_ctx) {
2680 ret = isert_create_pi_ctx(wr->fr_desc,
2681 device->ib_device,
2682 isert_conn->conn_pd);
2683 if (ret) {
2684 isert_err("conn %p failed to allocate pi_ctx\n",
2685 isert_conn);
2686 return ret;
2687 }
2688 }
2689
2690 if (se_cmd->t_prot_sg) {
2691 ret = isert_map_data_buf(isert_conn, isert_cmd,
2692 se_cmd->t_prot_sg,
2693 se_cmd->t_prot_nents,
2694 se_cmd->prot_length,
2695 0, wr->iser_ib_op, &wr->prot);
2696 if (ret) {
2697 isert_err("conn %p failed to map protection buffer\n",
2698 isert_conn);
2699 return ret;
2700 }
2701
2702 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2703 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2704 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2705 if (ret) {
2706 isert_err("conn %p failed to fast reg mr\n",
2707 isert_conn);
2708 goto unmap_prot_cmd;
2709 }
2710 }
2711
2712 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2713 if (ret) {
2714 isert_err("conn %p failed to fast reg mr\n",
2715 isert_conn);
2716 goto unmap_prot_cmd;
2717 }
2718 wr->fr_desc->ind |= ISERT_PROTECTED;
2719
2720 return 0;
2721
2722unmap_prot_cmd:
2723 if (se_cmd->t_prot_sg)
2724 isert_unmap_data_buf(isert_conn, &wr->prot);
2725
2726 return ret;
2727}
2728
2729static int
2756isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2730isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2757 struct isert_rdma_wr *wr) 2731 struct isert_rdma_wr *wr)
2758{ 2732{
2759 struct se_cmd *se_cmd = &cmd->se_cmd; 2733 struct se_cmd *se_cmd = &cmd->se_cmd;
2760 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2734 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2761 struct isert_conn *isert_conn = conn->context; 2735 struct isert_conn *isert_conn = conn->context;
2762 struct ib_sge data_sge;
2763 struct ib_send_wr *send_wr;
2764 struct fast_reg_descriptor *fr_desc = NULL; 2736 struct fast_reg_descriptor *fr_desc = NULL;
2737 struct ib_send_wr *send_wr;
2738 struct ib_sge *ib_sg;
2765 u32 offset; 2739 u32 offset;
2766 int ret = 0; 2740 int ret = 0;
2767 unsigned long flags; 2741 unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2775 if (ret) 2749 if (ret)
2776 return ret; 2750 return ret;
2777 2751
2778 if (wr->data.dma_nents != 1 || 2752 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2779 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2780 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2753 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2781 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2754 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2782 struct fast_reg_descriptor, list); 2755 struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2786 } 2759 }
2787 2760
2788 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, 2761 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2789 ISERT_DATA_KEY_VALID, &data_sge); 2762 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2790 if (ret) 2763 if (ret)
2791 goto unmap_cmd; 2764 goto unmap_cmd;
2792 2765
2793 if (se_cmd->prot_op != TARGET_PROT_NORMAL) { 2766 if (isert_prot_cmd(isert_conn, se_cmd)) {
2794 struct ib_sge prot_sge, sig_sge; 2767 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2795
2796 if (se_cmd->t_prot_sg) {
2797 ret = isert_map_data_buf(isert_conn, isert_cmd,
2798 se_cmd->t_prot_sg,
2799 se_cmd->t_prot_nents,
2800 se_cmd->prot_length,
2801 0, wr->iser_ib_op, &wr->prot);
2802 if (ret)
2803 goto unmap_cmd;
2804
2805 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2806 ISERT_PROT_KEY_VALID, &prot_sge);
2807 if (ret)
2808 goto unmap_prot_cmd;
2809 }
2810
2811 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2812 &data_sge, &prot_sge, &sig_sge);
2813 if (ret) 2768 if (ret)
2814 goto unmap_prot_cmd; 2769 goto unmap_cmd;
2815 2770
2816 fr_desc->ind |= ISERT_PROTECTED; 2771 ib_sg = &wr->ib_sg[SIG];
2817 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); 2772 } else {
2818 } else 2773 ib_sg = &wr->ib_sg[DATA];
2819 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); 2774 }
2820 2775
2776 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2821 wr->ib_sge = &wr->s_ib_sge; 2777 wr->ib_sge = &wr->s_ib_sge;
2822 wr->send_wr_num = 1; 2778 wr->send_wr_num = 1;
2823 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2779 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2827 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2783 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2828 send_wr->sg_list = &wr->s_ib_sge; 2784 send_wr->sg_list = &wr->s_ib_sge;
2829 send_wr->num_sge = 1; 2785 send_wr->num_sge = 1;
2830 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2786 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2831 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2787 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2832 send_wr->opcode = IB_WR_RDMA_WRITE; 2788 send_wr->opcode = IB_WR_RDMA_WRITE;
2833 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2789 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2834 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2790 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2835 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? 2791 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2836 0 : IB_SEND_SIGNALED; 2792 0 : IB_SEND_SIGNALED;
2837 } else { 2793 } else {
2838 send_wr->opcode = IB_WR_RDMA_READ; 2794 send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2842 } 2798 }
2843 2799
2844 return 0; 2800 return 0;
2845unmap_prot_cmd: 2801
2846 if (se_cmd->t_prot_sg)
2847 isert_unmap_data_buf(isert_conn, &wr->prot);
2848unmap_cmd: 2802unmap_cmd:
2849 if (fr_desc) { 2803 if (fr_desc) {
2850 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2804 spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2867 struct ib_send_wr *wr_failed; 2821 struct ib_send_wr *wr_failed;
2868 int rc; 2822 int rc;
2869 2823
2870 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", 2824 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2871 isert_cmd, se_cmd->data_length); 2825 isert_cmd, se_cmd->data_length);
2826
2872 wr->iser_ib_op = ISER_IB_RDMA_WRITE; 2827 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2873 rc = device->reg_rdma_mem(conn, cmd, wr); 2828 rc = device->reg_rdma_mem(conn, cmd, wr);
2874 if (rc) { 2829 if (rc) {
2875 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2830 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2876 return rc; 2831 return rc;
2877 } 2832 }
2878 2833
2879 if (se_cmd->prot_op == TARGET_PROT_NORMAL) { 2834 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2880 /* 2835 /*
2881 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2836 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2882 */ 2837 */
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2886 &isert_cmd->tx_desc.iscsi_header); 2841 &isert_cmd->tx_desc.iscsi_header);
2887 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2842 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2888 isert_init_send_wr(isert_conn, isert_cmd, 2843 isert_init_send_wr(isert_conn, isert_cmd,
2889 &isert_cmd->tx_desc.send_wr, false); 2844 &isert_cmd->tx_desc.send_wr);
2890 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2845 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2891 wr->send_wr_num += 1; 2846 wr->send_wr_num += 1;
2892 } 2847 }
2893 2848
2894 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2895
2896 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2849 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2897 if (rc) { 2850 if (rc)
2898 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2851 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2899 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2900 }
2901 2852
2902 if (se_cmd->prot_op == TARGET_PROT_NORMAL) 2853 if (!isert_prot_cmd(isert_conn, se_cmd))
2903 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " 2854 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2904 "READ\n", isert_cmd); 2855 "READ\n", isert_cmd);
2905 else 2856 else
2906 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", 2857 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2907 isert_cmd); 2858 isert_cmd);
2908 2859
2909 return 1; 2860 return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2920 struct ib_send_wr *wr_failed; 2871 struct ib_send_wr *wr_failed;
2921 int rc; 2872 int rc;
2922 2873
2923 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2874 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2924 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2875 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2925 wr->iser_ib_op = ISER_IB_RDMA_READ; 2876 wr->iser_ib_op = ISER_IB_RDMA_READ;
2926 rc = device->reg_rdma_mem(conn, cmd, wr); 2877 rc = device->reg_rdma_mem(conn, cmd, wr);
2927 if (rc) { 2878 if (rc) {
2928 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2879 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2929 return rc; 2880 return rc;
2930 } 2881 }
2931 2882
2932 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2933
2934 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2883 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2935 if (rc) { 2884 if (rc)
2936 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2885 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2937 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 2886
2938 } 2887 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2939 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2940 isert_cmd); 2888 isert_cmd);
2941 2889
2942 return 0; 2890 return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2952 ret = isert_put_nopin(cmd, conn, false); 2900 ret = isert_put_nopin(cmd, conn, false);
2953 break; 2901 break;
2954 default: 2902 default:
2955 pr_err("Unknown immediate state: 0x%02x\n", state); 2903 isert_err("Unknown immediate state: 0x%02x\n", state);
2956 ret = -EINVAL; 2904 ret = -EINVAL;
2957 break; 2905 break;
2958 } 2906 }
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2963static int 2911static int
2964isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2912isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2965{ 2913{
2914 struct isert_conn *isert_conn = conn->context;
2966 int ret; 2915 int ret;
2967 2916
2968 switch (state) { 2917 switch (state) {
2969 case ISTATE_SEND_LOGOUTRSP: 2918 case ISTATE_SEND_LOGOUTRSP:
2970 ret = isert_put_logout_rsp(cmd, conn); 2919 ret = isert_put_logout_rsp(cmd, conn);
2971 if (!ret) { 2920 if (!ret)
2972 pr_debug("Returning iSER Logout -EAGAIN\n"); 2921 isert_conn->logout_posted = true;
2973 ret = -EAGAIN;
2974 }
2975 break; 2922 break;
2976 case ISTATE_SEND_NOPIN: 2923 case ISTATE_SEND_NOPIN:
2977 ret = isert_put_nopin(cmd, conn, true); 2924 ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2993 ret = isert_put_response(conn, cmd); 2940 ret = isert_put_response(conn, cmd);
2994 break; 2941 break;
2995 default: 2942 default:
2996 pr_err("Unknown response state: 0x%02x\n", state); 2943 isert_err("Unknown response state: 0x%02x\n", state);
2997 ret = -EINVAL; 2944 ret = -EINVAL;
2998 break; 2945 break;
2999 } 2946 }
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3001 return ret; 2948 return ret;
3002} 2949}
3003 2950
2951struct rdma_cm_id *
2952isert_setup_id(struct isert_np *isert_np)
2953{
2954 struct iscsi_np *np = isert_np->np;
2955 struct rdma_cm_id *id;
2956 struct sockaddr *sa;
2957 int ret;
2958
2959 sa = (struct sockaddr *)&np->np_sockaddr;
2960 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2961
2962 id = rdma_create_id(isert_cma_handler, isert_np,
2963 RDMA_PS_TCP, IB_QPT_RC);
2964 if (IS_ERR(id)) {
2965 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2966 ret = PTR_ERR(id);
2967 goto out;
2968 }
2969 isert_dbg("id %p context %p\n", id, id->context);
2970
2971 ret = rdma_bind_addr(id, sa);
2972 if (ret) {
2973 isert_err("rdma_bind_addr() failed: %d\n", ret);
2974 goto out_id;
2975 }
2976
2977 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
2978 if (ret) {
2979 isert_err("rdma_listen() failed: %d\n", ret);
2980 goto out_id;
2981 }
2982
2983 return id;
2984out_id:
2985 rdma_destroy_id(id);
2986out:
2987 return ERR_PTR(ret);
2988}
2989
3004static int 2990static int
3005isert_setup_np(struct iscsi_np *np, 2991isert_setup_np(struct iscsi_np *np,
3006 struct __kernel_sockaddr_storage *ksockaddr) 2992 struct __kernel_sockaddr_storage *ksockaddr)
3007{ 2993{
3008 struct isert_np *isert_np; 2994 struct isert_np *isert_np;
3009 struct rdma_cm_id *isert_lid; 2995 struct rdma_cm_id *isert_lid;
3010 struct sockaddr *sa;
3011 int ret; 2996 int ret;
3012 2997
3013 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2998 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3014 if (!isert_np) { 2999 if (!isert_np) {
3015 pr_err("Unable to allocate struct isert_np\n"); 3000 isert_err("Unable to allocate struct isert_np\n");
3016 return -ENOMEM; 3001 return -ENOMEM;
3017 } 3002 }
3018 sema_init(&isert_np->np_sem, 0); 3003 sema_init(&isert_np->np_sem, 0);
3019 mutex_init(&isert_np->np_accept_mutex); 3004 mutex_init(&isert_np->np_accept_mutex);
3020 INIT_LIST_HEAD(&isert_np->np_accept_list); 3005 INIT_LIST_HEAD(&isert_np->np_accept_list);
3021 init_completion(&isert_np->np_login_comp); 3006 init_completion(&isert_np->np_login_comp);
3007 isert_np->np = np;
3022 3008
3023 sa = (struct sockaddr *)ksockaddr;
3024 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
3025 /* 3009 /*
3026 * Setup the np->np_sockaddr from the passed sockaddr setup 3010 * Setup the np->np_sockaddr from the passed sockaddr setup
3027 * in iscsi_target_configfs.c code.. 3011 * in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
3029 memcpy(&np->np_sockaddr, ksockaddr, 3013 memcpy(&np->np_sockaddr, ksockaddr,
3030 sizeof(struct __kernel_sockaddr_storage)); 3014 sizeof(struct __kernel_sockaddr_storage));
3031 3015
3032 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, 3016 isert_lid = isert_setup_id(isert_np);
3033 IB_QPT_RC);
3034 if (IS_ERR(isert_lid)) { 3017 if (IS_ERR(isert_lid)) {
3035 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3036 PTR_ERR(isert_lid));
3037 ret = PTR_ERR(isert_lid); 3018 ret = PTR_ERR(isert_lid);
3038 goto out; 3019 goto out;
3039 } 3020 }
3040 3021
3041 ret = rdma_bind_addr(isert_lid, sa);
3042 if (ret) {
3043 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
3044 goto out_lid;
3045 }
3046
3047 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
3048 if (ret) {
3049 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
3050 goto out_lid;
3051 }
3052
3053 isert_np->np_cm_id = isert_lid; 3022 isert_np->np_cm_id = isert_lid;
3054 np->np_context = isert_np; 3023 np->np_context = isert_np;
3055 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
3056 3024
3057 return 0; 3025 return 0;
3058 3026
3059out_lid:
3060 rdma_destroy_id(isert_lid);
3061out: 3027out:
3062 kfree(isert_np); 3028 kfree(isert_np);
3029
3063 return ret; 3030 return ret;
3064} 3031}
3065 3032
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
3075 cp.retry_count = 7; 3042 cp.retry_count = 7;
3076 cp.rnr_retry_count = 7; 3043 cp.rnr_retry_count = 7;
3077 3044
3078 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3079
3080 ret = rdma_accept(cm_id, &cp); 3045 ret = rdma_accept(cm_id, &cp);
3081 if (ret) { 3046 if (ret) {
3082 pr_err("rdma_accept() failed with: %d\n", ret); 3047 isert_err("rdma_accept() failed with: %d\n", ret);
3083 return ret; 3048 return ret;
3084 } 3049 }
3085 3050
3086 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3087
3088 return 0; 3051 return 0;
3089} 3052}
3090 3053
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3094 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 3057 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3095 int ret; 3058 int ret;
3096 3059
3097 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 3060 isert_info("before login_req comp conn: %p\n", isert_conn);
3061 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3062 if (ret) {
3063 isert_err("isert_conn %p interrupted before got login req\n",
3064 isert_conn);
3065 return ret;
3066 }
3067 reinit_completion(&isert_conn->login_req_comp);
3068
3098 /* 3069 /*
3099 * For login requests after the first PDU, isert_rx_login_req() will 3070 * For login requests after the first PDU, isert_rx_login_req() will
3100 * kick schedule_delayed_work(&conn->login_work) as the packet is 3071 * kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3104 if (!login->first_request) 3075 if (!login->first_request)
3105 return 0; 3076 return 0;
3106 3077
3078 isert_rx_login_req(isert_conn);
3079
3080 isert_info("before conn_login_comp conn: %p\n", conn);
3107 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 3081 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3108 if (ret) 3082 if (ret)
3109 return ret; 3083 return ret;
3110 3084
3111 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); 3085 isert_info("processing login->req: %p\n", login->req);
3086
3112 return 0; 3087 return 0;
3113} 3088}
3114 3089
@@ -3161,7 +3136,7 @@ accept_wait:
3161 spin_lock_bh(&np->np_thread_lock); 3136 spin_lock_bh(&np->np_thread_lock);
3162 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3137 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3163 spin_unlock_bh(&np->np_thread_lock); 3138 spin_unlock_bh(&np->np_thread_lock);
3164 pr_debug("np_thread_state %d for isert_accept_np\n", 3139 isert_dbg("np_thread_state %d for isert_accept_np\n",
3165 np->np_thread_state); 3140 np->np_thread_state);
3166 /** 3141 /**
3167 * No point in stalling here when np_thread 3142 * No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
3186 isert_conn->conn = conn; 3161 isert_conn->conn = conn;
3187 max_accept = 0; 3162 max_accept = 0;
3188 3163
3189 ret = isert_rdma_post_recvl(isert_conn);
3190 if (ret)
3191 return ret;
3192
3193 ret = isert_rdma_accept(isert_conn);
3194 if (ret)
3195 return ret;
3196
3197 isert_set_conn_info(np, conn, isert_conn); 3164 isert_set_conn_info(np, conn, isert_conn);
3198 3165
3199 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); 3166 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3167
3200 return 0; 3168 return 0;
3201} 3169}
3202 3170
@@ -3204,25 +3172,103 @@ static void
3204isert_free_np(struct iscsi_np *np) 3172isert_free_np(struct iscsi_np *np)
3205{ 3173{
3206 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3174 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3175 struct isert_conn *isert_conn, *n;
3207 3176
3208 if (isert_np->np_cm_id) 3177 if (isert_np->np_cm_id)
3209 rdma_destroy_id(isert_np->np_cm_id); 3178 rdma_destroy_id(isert_np->np_cm_id);
3210 3179
3180 /*
3181 * FIXME: At this point we don't have a good way to insure
3182 * that at this point we don't have hanging connections that
3183 * completed RDMA establishment but didn't start iscsi login
3184 * process. So work-around this by cleaning up what ever piled
3185 * up in np_accept_list.
3186 */
3187 mutex_lock(&isert_np->np_accept_mutex);
3188 if (!list_empty(&isert_np->np_accept_list)) {
3189 isert_info("Still have isert connections, cleaning up...\n");
3190 list_for_each_entry_safe(isert_conn, n,
3191 &isert_np->np_accept_list,
3192 conn_accept_node) {
3193 isert_info("cleaning isert_conn %p state (%d)\n",
3194 isert_conn, isert_conn->state);
3195 isert_connect_release(isert_conn);
3196 }
3197 }
3198 mutex_unlock(&isert_np->np_accept_mutex);
3199
3211 np->np_context = NULL; 3200 np->np_context = NULL;
3212 kfree(isert_np); 3201 kfree(isert_np);
3213} 3202}
3214 3203
3204static void isert_release_work(struct work_struct *work)
3205{
3206 struct isert_conn *isert_conn = container_of(work,
3207 struct isert_conn,
3208 release_work);
3209
3210 isert_info("Starting release conn %p\n", isert_conn);
3211
3212 wait_for_completion(&isert_conn->conn_wait);
3213
3214 mutex_lock(&isert_conn->conn_mutex);
3215 isert_conn->state = ISER_CONN_DOWN;
3216 mutex_unlock(&isert_conn->conn_mutex);
3217
3218 isert_info("Destroying conn %p\n", isert_conn);
3219 isert_put_conn(isert_conn);
3220}
3221
3222static void
3223isert_wait4logout(struct isert_conn *isert_conn)
3224{
3225 struct iscsi_conn *conn = isert_conn->conn;
3226
3227 isert_info("conn %p\n", isert_conn);
3228
3229 if (isert_conn->logout_posted) {
3230 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3231 wait_for_completion_timeout(&conn->conn_logout_comp,
3232 SECONDS_FOR_LOGOUT_COMP * HZ);
3233 }
3234}
3235
3236static void
3237isert_wait4cmds(struct iscsi_conn *conn)
3238{
3239 isert_info("iscsi_conn %p\n", conn);
3240
3241 if (conn->sess) {
3242 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3243 target_wait_for_sess_cmds(conn->sess->se_sess);
3244 }
3245}
3246
3247static void
3248isert_wait4flush(struct isert_conn *isert_conn)
3249{
3250 struct ib_recv_wr *bad_wr;
3251
3252 isert_info("conn %p\n", isert_conn);
3253
3254 init_completion(&isert_conn->conn_wait_comp_err);
3255 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3256 /* post an indication that all flush errors were consumed */
3257 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
3258 isert_err("conn %p failed to post beacon", isert_conn);
3259 return;
3260 }
3261
3262 wait_for_completion(&isert_conn->conn_wait_comp_err);
3263}
3264
3215static void isert_wait_conn(struct iscsi_conn *conn) 3265static void isert_wait_conn(struct iscsi_conn *conn)
3216{ 3266{
3217 struct isert_conn *isert_conn = conn->context; 3267 struct isert_conn *isert_conn = conn->context;
3218 3268
3219 pr_debug("isert_wait_conn: Starting \n"); 3269 isert_info("Starting conn %p\n", isert_conn);
3220 3270
3221 mutex_lock(&isert_conn->conn_mutex); 3271 mutex_lock(&isert_conn->conn_mutex);
3222 if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3223 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3224 rdma_disconnect(isert_conn->conn_cm_id);
3225 }
3226 /* 3272 /*
3227 * Only wait for conn_wait_comp_err if the isert_conn made it 3273 * Only wait for conn_wait_comp_err if the isert_conn made it
3228 * into full feature phase.. 3274 * into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3231 mutex_unlock(&isert_conn->conn_mutex); 3277 mutex_unlock(&isert_conn->conn_mutex);
3232 return; 3278 return;
3233 } 3279 }
3234 if (isert_conn->state == ISER_CONN_UP) 3280 isert_conn_terminate(isert_conn);
3235 isert_conn->state = ISER_CONN_TERMINATING;
3236 mutex_unlock(&isert_conn->conn_mutex); 3281 mutex_unlock(&isert_conn->conn_mutex);
3237 3282
3238 wait_for_completion(&isert_conn->conn_wait_comp_err); 3283 isert_wait4cmds(conn);
3284 isert_wait4flush(isert_conn);
3285 isert_wait4logout(isert_conn);
3239 3286
3240 wait_for_completion(&isert_conn->conn_wait); 3287 INIT_WORK(&isert_conn->release_work, isert_release_work);
3241 isert_put_conn(isert_conn); 3288 queue_work(isert_release_wq, &isert_conn->release_work);
3242} 3289}
3243 3290
3244static void isert_free_conn(struct iscsi_conn *conn) 3291static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
3273{ 3320{
3274 int ret; 3321 int ret;
3275 3322
3276 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); 3323 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3277 if (!isert_rx_wq) { 3324 if (!isert_comp_wq) {
3278 pr_err("Unable to allocate isert_rx_wq\n"); 3325 isert_err("Unable to allocate isert_comp_wq\n");
3326 ret = -ENOMEM;
3279 return -ENOMEM; 3327 return -ENOMEM;
3280 } 3328 }
3281 3329
3282 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 3330 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3283 if (!isert_comp_wq) { 3331 WQ_UNBOUND_MAX_ACTIVE);
3284 pr_err("Unable to allocate isert_comp_wq\n"); 3332 if (!isert_release_wq) {
3333 isert_err("Unable to allocate isert_release_wq\n");
3285 ret = -ENOMEM; 3334 ret = -ENOMEM;
3286 goto destroy_rx_wq; 3335 goto destroy_comp_wq;
3287 } 3336 }
3288 3337
3289 iscsit_register_transport(&iser_target_transport); 3338 iscsit_register_transport(&iser_target_transport);
3290 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 3339 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3340
3291 return 0; 3341 return 0;
3292 3342
3293destroy_rx_wq: 3343destroy_comp_wq:
3294 destroy_workqueue(isert_rx_wq); 3344 destroy_workqueue(isert_comp_wq);
3345
3295 return ret; 3346 return ret;
3296} 3347}
3297 3348
3298static void __exit isert_exit(void) 3349static void __exit isert_exit(void)
3299{ 3350{
3300 flush_scheduled_work(); 3351 flush_scheduled_work();
3352 destroy_workqueue(isert_release_wq);
3301 destroy_workqueue(isert_comp_wq); 3353 destroy_workqueue(isert_comp_wq);
3302 destroy_workqueue(isert_rx_wq);
3303 iscsit_unregister_transport(&iser_target_transport); 3354 iscsit_unregister_transport(&iser_target_transport);
3304 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); 3355 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3305} 3356}
3306 3357
3307MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3358MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 04f51f7bf614..8dc8415d152d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -4,9 +4,37 @@
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6 6
7#define DRV_NAME "isert"
8#define PFX DRV_NAME ": "
9
10#define isert_dbg(fmt, arg...) \
11 do { \
12 if (unlikely(isert_debug_level > 2)) \
13 printk(KERN_DEBUG PFX "%s: " fmt,\
14 __func__ , ## arg); \
15 } while (0)
16
17#define isert_warn(fmt, arg...) \
18 do { \
19 if (unlikely(isert_debug_level > 0)) \
20 pr_warn(PFX "%s: " fmt, \
21 __func__ , ## arg); \
22 } while (0)
23
24#define isert_info(fmt, arg...) \
25 do { \
26 if (unlikely(isert_debug_level > 1)) \
27 pr_info(PFX "%s: " fmt, \
28 __func__ , ## arg); \
29 } while (0)
30
31#define isert_err(fmt, arg...) \
32 pr_err(PFX "%s: " fmt, __func__ , ## arg)
33
7#define ISERT_RDMA_LISTEN_BACKLOG 10 34#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256 35#define ISCSI_ISER_SG_TABLESIZE 256
9#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL 36#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
37#define ISER_BEACON_WRID 0xfffffffffffffffeULL
10 38
11enum isert_desc_type { 39enum isert_desc_type {
12 ISCSI_TX_CONTROL, 40 ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
23enum iser_conn_state { 51enum iser_conn_state {
24 ISER_CONN_INIT, 52 ISER_CONN_INIT,
25 ISER_CONN_UP, 53 ISER_CONN_UP,
54 ISER_CONN_FULL_FEATURE,
26 ISER_CONN_TERMINATING, 55 ISER_CONN_TERMINATING,
27 ISER_CONN_DOWN, 56 ISER_CONN_DOWN,
28}; 57};
@@ -44,9 +73,6 @@ struct iser_tx_desc {
44 struct ib_sge tx_sg[2]; 73 struct ib_sge tx_sg[2];
45 int num_sge; 74 int num_sge;
46 struct isert_cmd *isert_cmd; 75 struct isert_cmd *isert_cmd;
47 struct llist_node *comp_llnode_batch;
48 struct llist_node comp_llnode;
49 bool llnode_active;
50 struct ib_send_wr send_wr; 76 struct ib_send_wr send_wr;
51} __packed; 77} __packed;
52 78
@@ -81,6 +107,12 @@ struct isert_data_buf {
81 enum dma_data_direction dma_dir; 107 enum dma_data_direction dma_dir;
82}; 108};
83 109
110enum {
111 DATA = 0,
112 PROT = 1,
113 SIG = 2,
114};
115
84struct isert_rdma_wr { 116struct isert_rdma_wr {
85 struct list_head wr_list; 117 struct list_head wr_list;
86 struct isert_cmd *isert_cmd; 118 struct isert_cmd *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
90 int send_wr_num; 122 int send_wr_num;
91 struct ib_send_wr *send_wr; 123 struct ib_send_wr *send_wr;
92 struct ib_send_wr s_send_wr; 124 struct ib_send_wr s_send_wr;
125 struct ib_sge ib_sg[3];
93 struct isert_data_buf data; 126 struct isert_data_buf data;
94 struct isert_data_buf prot; 127 struct isert_data_buf prot;
95 struct fast_reg_descriptor *fr_desc; 128 struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
117struct isert_conn { 150struct isert_conn {
118 enum iser_conn_state state; 151 enum iser_conn_state state;
119 int post_recv_buf_count; 152 int post_recv_buf_count;
120 atomic_t post_send_buf_count;
121 u32 responder_resources; 153 u32 responder_resources;
122 u32 initiator_depth; 154 u32 initiator_depth;
155 bool pi_support;
123 u32 max_sge; 156 u32 max_sge;
124 char *login_buf; 157 char *login_buf;
125 char *login_req_buf; 158 char *login_req_buf;
126 char *login_rsp_buf; 159 char *login_rsp_buf;
127 u64 login_req_dma; 160 u64 login_req_dma;
161 int login_req_len;
128 u64 login_rsp_dma; 162 u64 login_rsp_dma;
129 unsigned int conn_rx_desc_head; 163 unsigned int conn_rx_desc_head;
130 struct iser_rx_desc *conn_rx_descs; 164 struct iser_rx_desc *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
132 struct iscsi_conn *conn; 166 struct iscsi_conn *conn;
133 struct list_head conn_accept_node; 167 struct list_head conn_accept_node;
134 struct completion conn_login_comp; 168 struct completion conn_login_comp;
169 struct completion login_req_comp;
135 struct iser_tx_desc conn_login_tx_desc; 170 struct iser_tx_desc conn_login_tx_desc;
136 struct rdma_cm_id *conn_cm_id; 171 struct rdma_cm_id *conn_cm_id;
137 struct ib_pd *conn_pd; 172 struct ib_pd *conn_pd;
138 struct ib_mr *conn_mr; 173 struct ib_mr *conn_mr;
139 struct ib_qp *conn_qp; 174 struct ib_qp *conn_qp;
140 struct isert_device *conn_device; 175 struct isert_device *conn_device;
141 struct work_struct conn_logout_work;
142 struct mutex conn_mutex; 176 struct mutex conn_mutex;
143 struct completion conn_wait; 177 struct completion conn_wait;
144 struct completion conn_wait_comp_err; 178 struct completion conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
147 int conn_fr_pool_size; 181 int conn_fr_pool_size;
148 /* lock to protect fastreg pool */ 182 /* lock to protect fastreg pool */
149 spinlock_t conn_lock; 183 spinlock_t conn_lock;
150#define ISERT_COMP_BATCH_COUNT 8 184 struct work_struct release_work;
151 int conn_comp_batch; 185 struct ib_recv_wr beacon;
152 struct llist_head conn_comp_llist; 186 bool logout_posted;
153 bool disconnect;
154}; 187};
155 188
156#define ISERT_MAX_CQ 64 189#define ISERT_MAX_CQ 64
157 190
158struct isert_cq_desc { 191/**
159 struct isert_device *device; 192 * struct isert_comp - iSER completion context
160 int cq_index; 193 *
161 struct work_struct cq_rx_work; 194 * @device: pointer to device handle
162 struct work_struct cq_tx_work; 195 * @cq: completion queue
196 * @wcs: work completion array
197 * @active_qps: Number of active QPs attached
198 * to completion context
199 * @work: completion work handle
200 */
201struct isert_comp {
202 struct isert_device *device;
203 struct ib_cq *cq;
204 struct ib_wc wcs[16];
205 int active_qps;
206 struct work_struct work;
163}; 207};
164 208
165struct isert_device { 209struct isert_device {
166 int use_fastreg; 210 int use_fastreg;
167 bool pi_capable; 211 bool pi_capable;
168 int cqs_used;
169 int refcount; 212 int refcount;
170 int cq_active_qps[ISERT_MAX_CQ];
171 struct ib_device *ib_device; 213 struct ib_device *ib_device;
172 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 214 struct isert_comp *comps;
173 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 215 int comps_used;
174 struct isert_cq_desc *cq_desc;
175 struct list_head dev_node; 216 struct list_head dev_node;
176 struct ib_device_attr dev_attr; 217 struct ib_device_attr dev_attr;
177 int (*reg_rdma_mem)(struct iscsi_conn *conn, 218 int (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
182}; 223};
183 224
184struct isert_np { 225struct isert_np {
226 struct iscsi_np *np;
185 struct semaphore np_sem; 227 struct semaphore np_sem;
186 struct rdma_cm_id *np_cm_id; 228 struct rdma_cm_id *np_cm_id;
187 struct mutex np_accept_mutex; 229 struct mutex np_accept_mutex;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index db3c8c851af1..0747c0595a9d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
2740 .info = srp_target_info, 2740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand, 2741 .queuecommand = srp_queuecommand,
2742 .change_queue_depth = srp_change_queue_depth, 2742 .change_queue_depth = srp_change_queue_depth,
2743 .change_queue_type = scsi_change_queue_type,
2744 .eh_abort_handler = srp_abort, 2743 .eh_abort_handler = srp_abort,
2745 .eh_device_reset_handler = srp_reset_device, 2744 .eh_device_reset_handler = srp_reset_device,
2746 .eh_host_reset_handler = srp_reset_host, 2745 .eh_host_reset_handler = srp_reset_host,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dc829682701a..eb694ddad79f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1708 1708
1709 switch (srp_cmd->task_attr) { 1709 switch (srp_cmd->task_attr) {
1710 case SRP_CMD_SIMPLE_Q: 1710 case SRP_CMD_SIMPLE_Q:
1711 cmd->sam_task_attr = MSG_SIMPLE_TAG; 1711 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1712 break; 1712 break;
1713 case SRP_CMD_ORDERED_Q: 1713 case SRP_CMD_ORDERED_Q:
1714 default: 1714 default:
1715 cmd->sam_task_attr = MSG_ORDERED_TAG; 1715 cmd->sam_task_attr = TCM_ORDERED_TAG;
1716 break; 1716 break;
1717 case SRP_CMD_HEAD_OF_Q: 1717 case SRP_CMD_HEAD_OF_Q:
1718 cmd->sam_task_attr = MSG_HEAD_TAG; 1718 cmd->sam_task_attr = TCM_HEAD_TAG;
1719 break; 1719 break;
1720 case SRP_CMD_ACA: 1720 case SRP_CMD_ACA:
1721 cmd->sam_task_attr = MSG_ACA_TAG; 1721 cmd->sam_task_attr = TCM_ACA_TAG;
1722 break; 1722 break;
1723 } 1723 }
1724 1724
@@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1733 sizeof(srp_cmd->lun)); 1733 sizeof(srp_cmd->lun));
1734 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, 1734 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1735 &send_ioctx->sense_data[0], unpacked_lun, data_len, 1735 &send_ioctx->sense_data[0], unpacked_lun, data_len,
1736 MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); 1736 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1737 if (rc != 0) { 1737 if (rc != 0) {
1738 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1738 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1739 goto send_sense; 1739 goto send_sense;
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 3067d56b11a6..5844b80bd90e 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -204,16 +204,6 @@ config THERM_ADT746X
204 iBook G4, and the ATI based aluminium PowerBooks, allowing slightly 204 iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
205 better fan behaviour by default, and some manual control. 205 better fan behaviour by default, and some manual control.
206 206
207config THERM_PM72
208 tristate "Support for thermal management on PowerMac G5 (AGP)"
209 depends on I2C && I2C_POWERMAC && PPC_PMAC64
210 default n
211 help
212 This driver provides thermostat and fan control for the desktop
213 G5 machines.
214
215 This is deprecated, use windfarm instead.
216
217config WINDFARM 207config WINDFARM
218 tristate "New PowerMac thermal control infrastructure" 208 tristate "New PowerMac thermal control infrastructure"
219 depends on PPC 209 depends on PPC
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index d2f0120bc878..383ba920085b 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_ADB_IOP) += adb-iop.o
25obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o 25obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o
26obj-$(CONFIG_ADB_MACIO) += macio-adb.o 26obj-$(CONFIG_ADB_MACIO) += macio-adb.o
27 27
28obj-$(CONFIG_THERM_PM72) += therm_pm72.o
29obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o 28obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
30obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o 29obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o
31obj-$(CONFIG_WINDFARM) += windfarm_core.o 30obj-$(CONFIG_WINDFARM) += windfarm_core.o
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
deleted file mode 100644
index 7ed92582d2cf..000000000000
--- a/drivers/macintosh/therm_pm72.c
+++ /dev/null
@@ -1,2278 +0,0 @@
1/*
2 * Device driver for the thermostats & fan controller of the
3 * Apple G5 "PowerMac7,2" desktop machines.
4 *
5 * (c) Copyright IBM Corp. 2003-2004
6 *
7 * Maintained by: Benjamin Herrenschmidt
8 * <benh@kernel.crashing.org>
9 *
10 *
11 * The algorithm used is the PID control algorithm, used the same
12 * way the published Darwin code does, using the same values that
13 * are present in the Darwin 7.0 snapshot property lists.
14 *
15 * As far as the CPUs control loops are concerned, I use the
16 * calibration & PID constants provided by the EEPROM,
17 * I do _not_ embed any value from the property lists, as the ones
18 * provided by Darwin 7.0 seem to always have an older version that
19 * what I've seen on the actual computers.
20 * It would be interesting to verify that though. Darwin has a
21 * version code of 1.0.0d11 for all control loops it seems, while
22 * so far, the machines EEPROMs contain a dataset versioned 1.0.0f
23 *
24 * Darwin doesn't provide source to all parts, some missing
25 * bits like the AppleFCU driver or the actual scale of some
26 * of the values returned by sensors had to be "guessed" some
27 * way... or based on what Open Firmware does.
28 *
29 * I didn't yet figure out how to get the slots power consumption
30 * out of the FCU, so that part has not been implemented yet and
31 * the slots fan is set to a fixed 50% PWM, hoping this value is
32 * safe enough ...
33 *
34 * Note: I have observed strange oscillations of the CPU control
35 * loop on a dual G5 here. When idle, the CPU exhaust fan tend to
36 * oscillates slowly (over several minutes) between the minimum
37 * of 300RPMs and approx. 1000 RPMs. I don't know what is causing
38 * this, it could be some incorrect constant or an error in the
39 * way I ported the algorithm, or it could be just normal. I
40 * don't have full understanding on the way Apple tweaked the PID
41 * algorithm for the CPU control, it is definitely not a standard
42 * implementation...
43 *
44 * TODO: - Check MPU structure version/signature
45 * - Add things like /sbin/overtemp for non-critical
46 * overtemp conditions so userland can take some policy
47 * decisions, like slowing down CPUs
48 * - Deal with fan and i2c failures in a better way
49 * - Maybe do a generic PID based on params used for
50 * U3 and Drives ? Definitely need to factor code a bit
51 * better... also make sensor detection more robust using
52 * the device-tree to probe for them
53 * - Figure out how to get the slots consumption and set the
54 * slots fan accordingly
55 *
56 * History:
57 *
58 * Nov. 13, 2003 : 0.5
59 * - First release
60 *
61 * Nov. 14, 2003 : 0.6
62 * - Read fan speed from FCU, low level fan routines now deal
63 * with errors & check fan status, though higher level don't
64 * do much.
65 * - Move a bunch of definitions to .h file
66 *
67 * Nov. 18, 2003 : 0.7
68 * - Fix build on ppc64 kernel
69 * - Move back statics definitions to .c file
70 * - Avoid calling schedule_timeout with a negative number
71 *
72 * Dec. 18, 2003 : 0.8
73 * - Fix typo when reading back fan speed on 2 CPU machines
74 *
75 * Mar. 11, 2004 : 0.9
76 * - Rework code accessing the ADC chips, make it more robust and
77 * closer to the chip spec. Also make sure it is configured properly,
78 * I've seen yet unexplained cases where on startup, I would have stale
79 * values in the configuration register
80 * - Switch back to use of target fan speed for PID, thus lowering
81 * pressure on i2c
82 *
83 * Oct. 20, 2004 : 1.1
84 * - Add device-tree lookup for fan IDs, should detect liquid cooling
85 * pumps when present
86 * - Enable driver for PowerMac7,3 machines
87 * - Split the U3/Backside cooling on U3 & U3H versions as Darwin does
88 * - Add new CPU cooling algorithm for machines with liquid cooling
89 * - Workaround for some PowerMac7,3 with empty "fan" node in the devtree
90 * - Fix a signed/unsigned compare issue in some PID loops
91 *
92 * Mar. 10, 2005 : 1.2
93 * - Add basic support for Xserve G5
94 * - Retrieve pumps min/max from EEPROM image in device-tree (broken)
95 * - Use min/max macros here or there
96 * - Latest darwin updated U3H min fan speed to 20% PWM
97 *
98 * July. 06, 2006 : 1.3
99 * - Fix setting of RPM fans on Xserve G5 (they were going too fast)
100 * - Add missing slots fan control loop for Xserve G5
101 * - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We
102 * still can't properly implement the control loop for these, so let's
103 * reduce the noise a little bit, it appears that 40% still gives us
104 * a pretty good air flow
105 * - Add code to "tickle" the FCU regulary so it doesn't think that
106 * we are gone while in fact, the machine just didn't need any fan
107 * speed change lately
108 *
109 */
110
111#include <linux/types.h>
112#include <linux/module.h>
113#include <linux/errno.h>
114#include <linux/kernel.h>
115#include <linux/delay.h>
116#include <linux/sched.h>
117#include <linux/init.h>
118#include <linux/spinlock.h>
119#include <linux/wait.h>
120#include <linux/reboot.h>
121#include <linux/kmod.h>
122#include <linux/i2c.h>
123#include <linux/kthread.h>
124#include <linux/mutex.h>
125#include <linux/of_device.h>
126#include <linux/of_platform.h>
127#include <asm/prom.h>
128#include <asm/machdep.h>
129#include <asm/io.h>
130#include <asm/sections.h>
131#include <asm/macio.h>
132
133#include "therm_pm72.h"
134
135#define VERSION "1.3"
136
137#undef DEBUG
138
139#ifdef DEBUG
140#define DBG(args...) printk(args)
141#else
142#define DBG(args...) do { } while(0)
143#endif
144
145
146/*
147 * Driver statics
148 */
149
150static struct platform_device * of_dev;
151static struct i2c_adapter * u3_0;
152static struct i2c_adapter * u3_1;
153static struct i2c_adapter * k2;
154static struct i2c_client * fcu;
155static struct cpu_pid_state processor_state[2];
156static struct basckside_pid_params backside_params;
157static struct backside_pid_state backside_state;
158static struct drives_pid_state drives_state;
159static struct dimm_pid_state dimms_state;
160static struct slots_pid_state slots_state;
161static int state;
162static int cpu_count;
163static int cpu_pid_type;
164static struct task_struct *ctrl_task;
165static struct completion ctrl_complete;
166static int critical_state;
167static int rackmac;
168static s32 dimm_output_clamp;
169static int fcu_rpm_shift;
170static int fcu_tickle_ticks;
171static DEFINE_MUTEX(driver_lock);
172
173/*
174 * We have 3 types of CPU PID control. One is "split" old style control
175 * for intake & exhaust fans, the other is "combined" control for both
176 * CPUs that also deals with the pumps when present. To be "compatible"
177 * with OS X at this point, we only use "COMBINED" on the machines that
178 * are identified as having the pumps (though that identification is at
179 * least dodgy). Ultimately, we could probably switch completely to this
180 * algorithm provided we hack it to deal with the UP case
181 */
182#define CPU_PID_TYPE_SPLIT 0
183#define CPU_PID_TYPE_COMBINED 1
184#define CPU_PID_TYPE_RACKMAC 2
185
186/*
187 * This table describes all fans in the FCU. The "id" and "type" values
188 * are defaults valid for all earlier machines. Newer machines will
189 * eventually override the table content based on the device-tree
190 */
191struct fcu_fan_table
192{
193 char* loc; /* location code */
194 int type; /* 0 = rpm, 1 = pwm, 2 = pump */
195 int id; /* id or -1 */
196};
197
198#define FCU_FAN_RPM 0
199#define FCU_FAN_PWM 1
200
201#define FCU_FAN_ABSENT_ID -1
202
203#define FCU_FAN_COUNT ARRAY_SIZE(fcu_fans)
204
205struct fcu_fan_table fcu_fans[] = {
206 [BACKSIDE_FAN_PWM_INDEX] = {
207 .loc = "BACKSIDE,SYS CTRLR FAN",
208 .type = FCU_FAN_PWM,
209 .id = BACKSIDE_FAN_PWM_DEFAULT_ID,
210 },
211 [DRIVES_FAN_RPM_INDEX] = {
212 .loc = "DRIVE BAY",
213 .type = FCU_FAN_RPM,
214 .id = DRIVES_FAN_RPM_DEFAULT_ID,
215 },
216 [SLOTS_FAN_PWM_INDEX] = {
217 .loc = "SLOT,PCI FAN",
218 .type = FCU_FAN_PWM,
219 .id = SLOTS_FAN_PWM_DEFAULT_ID,
220 },
221 [CPUA_INTAKE_FAN_RPM_INDEX] = {
222 .loc = "CPU A INTAKE",
223 .type = FCU_FAN_RPM,
224 .id = CPUA_INTAKE_FAN_RPM_DEFAULT_ID,
225 },
226 [CPUA_EXHAUST_FAN_RPM_INDEX] = {
227 .loc = "CPU A EXHAUST",
228 .type = FCU_FAN_RPM,
229 .id = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID,
230 },
231 [CPUB_INTAKE_FAN_RPM_INDEX] = {
232 .loc = "CPU B INTAKE",
233 .type = FCU_FAN_RPM,
234 .id = CPUB_INTAKE_FAN_RPM_DEFAULT_ID,
235 },
236 [CPUB_EXHAUST_FAN_RPM_INDEX] = {
237 .loc = "CPU B EXHAUST",
238 .type = FCU_FAN_RPM,
239 .id = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID,
240 },
241 /* pumps aren't present by default, have to be looked up in the
242 * device-tree
243 */
244 [CPUA_PUMP_RPM_INDEX] = {
245 .loc = "CPU A PUMP",
246 .type = FCU_FAN_RPM,
247 .id = FCU_FAN_ABSENT_ID,
248 },
249 [CPUB_PUMP_RPM_INDEX] = {
250 .loc = "CPU B PUMP",
251 .type = FCU_FAN_RPM,
252 .id = FCU_FAN_ABSENT_ID,
253 },
254 /* Xserve fans */
255 [CPU_A1_FAN_RPM_INDEX] = {
256 .loc = "CPU A 1",
257 .type = FCU_FAN_RPM,
258 .id = FCU_FAN_ABSENT_ID,
259 },
260 [CPU_A2_FAN_RPM_INDEX] = {
261 .loc = "CPU A 2",
262 .type = FCU_FAN_RPM,
263 .id = FCU_FAN_ABSENT_ID,
264 },
265 [CPU_A3_FAN_RPM_INDEX] = {
266 .loc = "CPU A 3",
267 .type = FCU_FAN_RPM,
268 .id = FCU_FAN_ABSENT_ID,
269 },
270 [CPU_B1_FAN_RPM_INDEX] = {
271 .loc = "CPU B 1",
272 .type = FCU_FAN_RPM,
273 .id = FCU_FAN_ABSENT_ID,
274 },
275 [CPU_B2_FAN_RPM_INDEX] = {
276 .loc = "CPU B 2",
277 .type = FCU_FAN_RPM,
278 .id = FCU_FAN_ABSENT_ID,
279 },
280 [CPU_B3_FAN_RPM_INDEX] = {
281 .loc = "CPU B 3",
282 .type = FCU_FAN_RPM,
283 .id = FCU_FAN_ABSENT_ID,
284 },
285};
286
287static struct i2c_driver therm_pm72_driver;
288
289/*
290 * Utility function to create an i2c_client structure and
291 * attach it to one of u3 adapters
292 */
293static struct i2c_client *attach_i2c_chip(int id, const char *name)
294{
295 struct i2c_client *clt;
296 struct i2c_adapter *adap;
297 struct i2c_board_info info;
298
299 if (id & 0x200)
300 adap = k2;
301 else if (id & 0x100)
302 adap = u3_1;
303 else
304 adap = u3_0;
305 if (adap == NULL)
306 return NULL;
307
308 memset(&info, 0, sizeof(struct i2c_board_info));
309 info.addr = (id >> 1) & 0x7f;
310 strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
311 clt = i2c_new_device(adap, &info);
312 if (!clt) {
313 printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
314 return NULL;
315 }
316
317 /*
318 * Let i2c-core delete that device on driver removal.
319 * This is safe because i2c-core holds the core_lock mutex for us.
320 */
321 list_add_tail(&clt->detected, &therm_pm72_driver.clients);
322 return clt;
323}
324
325/*
326 * Here are the i2c chip access wrappers
327 */
328
329static void initialize_adc(struct cpu_pid_state *state)
330{
331 int rc;
332 u8 buf[2];
333
334 /* Read ADC the configuration register and cache it. We
335 * also make sure Config2 contains proper values, I've seen
336 * cases where we got stale grabage in there, thus preventing
337 * proper reading of conv. values
338 */
339
340 /* Clear Config2 */
341 buf[0] = 5;
342 buf[1] = 0;
343 i2c_master_send(state->monitor, buf, 2);
344
345 /* Read & cache Config1 */
346 buf[0] = 1;
347 rc = i2c_master_send(state->monitor, buf, 1);
348 if (rc > 0) {
349 rc = i2c_master_recv(state->monitor, buf, 1);
350 if (rc > 0) {
351 state->adc_config = buf[0];
352 DBG("ADC config reg: %02x\n", state->adc_config);
353 /* Disable shutdown mode */
354 state->adc_config &= 0xfe;
355 buf[0] = 1;
356 buf[1] = state->adc_config;
357 rc = i2c_master_send(state->monitor, buf, 2);
358 }
359 }
360 if (rc <= 0)
361 printk(KERN_ERR "therm_pm72: Error reading ADC config"
362 " register !\n");
363}
364
365static int read_smon_adc(struct cpu_pid_state *state, int chan)
366{
367 int rc, data, tries = 0;
368 u8 buf[2];
369
370 for (;;) {
371 /* Set channel */
372 buf[0] = 1;
373 buf[1] = (state->adc_config & 0x1f) | (chan << 5);
374 rc = i2c_master_send(state->monitor, buf, 2);
375 if (rc <= 0)
376 goto error;
377 /* Wait for conversion */
378 msleep(1);
379 /* Switch to data register */
380 buf[0] = 4;
381 rc = i2c_master_send(state->monitor, buf, 1);
382 if (rc <= 0)
383 goto error;
384 /* Read result */
385 rc = i2c_master_recv(state->monitor, buf, 2);
386 if (rc < 0)
387 goto error;
388 data = ((u16)buf[0]) << 8 | (u16)buf[1];
389 return data >> 6;
390 error:
391 DBG("Error reading ADC, retrying...\n");
392 if (++tries > 10) {
393 printk(KERN_ERR "therm_pm72: Error reading ADC !\n");
394 return -1;
395 }
396 msleep(10);
397 }
398}
399
400static int read_lm87_reg(struct i2c_client * chip, int reg)
401{
402 int rc, tries = 0;
403 u8 buf;
404
405 for (;;) {
406 /* Set address */
407 buf = (u8)reg;
408 rc = i2c_master_send(chip, &buf, 1);
409 if (rc <= 0)
410 goto error;
411 rc = i2c_master_recv(chip, &buf, 1);
412 if (rc <= 0)
413 goto error;
414 return (int)buf;
415 error:
416 DBG("Error reading LM87, retrying...\n");
417 if (++tries > 10) {
418 printk(KERN_ERR "therm_pm72: Error reading LM87 !\n");
419 return -1;
420 }
421 msleep(10);
422 }
423}
424
425static int fan_read_reg(int reg, unsigned char *buf, int nb)
426{
427 int tries, nr, nw;
428
429 buf[0] = reg;
430 tries = 0;
431 for (;;) {
432 nw = i2c_master_send(fcu, buf, 1);
433 if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
434 break;
435 msleep(10);
436 ++tries;
437 }
438 if (nw <= 0) {
439 printk(KERN_ERR "Failure writing address to FCU: %d", nw);
440 return -EIO;
441 }
442 tries = 0;
443 for (;;) {
444 nr = i2c_master_recv(fcu, buf, nb);
445 if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
446 break;
447 msleep(10);
448 ++tries;
449 }
450 if (nr <= 0)
451 printk(KERN_ERR "Failure reading data from FCU: %d", nw);
452 return nr;
453}
454
455static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
456{
457 int tries, nw;
458 unsigned char buf[16];
459
460 buf[0] = reg;
461 memcpy(buf+1, ptr, nb);
462 ++nb;
463 tries = 0;
464 for (;;) {
465 nw = i2c_master_send(fcu, buf, nb);
466 if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
467 break;
468 msleep(10);
469 ++tries;
470 }
471 if (nw < 0)
472 printk(KERN_ERR "Failure writing to FCU: %d", nw);
473 return nw;
474}
475
476static int start_fcu(void)
477{
478 unsigned char buf = 0xff;
479 int rc;
480
481 rc = fan_write_reg(0xe, &buf, 1);
482 if (rc < 0)
483 return -EIO;
484 rc = fan_write_reg(0x2e, &buf, 1);
485 if (rc < 0)
486 return -EIO;
487 rc = fan_read_reg(0, &buf, 1);
488 if (rc < 0)
489 return -EIO;
490 fcu_rpm_shift = (buf == 1) ? 2 : 3;
491 printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n",
492 fcu_rpm_shift);
493
494 return 0;
495}
496
497static int set_rpm_fan(int fan_index, int rpm)
498{
499 unsigned char buf[2];
500 int rc, id, min, max;
501
502 if (fcu_fans[fan_index].type != FCU_FAN_RPM)
503 return -EINVAL;
504 id = fcu_fans[fan_index].id;
505 if (id == FCU_FAN_ABSENT_ID)
506 return -EINVAL;
507
508 min = 2400 >> fcu_rpm_shift;
509 max = 56000 >> fcu_rpm_shift;
510
511 if (rpm < min)
512 rpm = min;
513 else if (rpm > max)
514 rpm = max;
515 buf[0] = rpm >> (8 - fcu_rpm_shift);
516 buf[1] = rpm << fcu_rpm_shift;
517 rc = fan_write_reg(0x10 + (id * 2), buf, 2);
518 if (rc < 0)
519 return -EIO;
520 return 0;
521}
522
523static int get_rpm_fan(int fan_index, int programmed)
524{
525 unsigned char failure;
526 unsigned char active;
527 unsigned char buf[2];
528 int rc, id, reg_base;
529
530 if (fcu_fans[fan_index].type != FCU_FAN_RPM)
531 return -EINVAL;
532 id = fcu_fans[fan_index].id;
533 if (id == FCU_FAN_ABSENT_ID)
534 return -EINVAL;
535
536 rc = fan_read_reg(0xb, &failure, 1);
537 if (rc != 1)
538 return -EIO;
539 if ((failure & (1 << id)) != 0)
540 return -EFAULT;
541 rc = fan_read_reg(0xd, &active, 1);
542 if (rc != 1)
543 return -EIO;
544 if ((active & (1 << id)) == 0)
545 return -ENXIO;
546
547 /* Programmed value or real current speed */
548 reg_base = programmed ? 0x10 : 0x11;
549 rc = fan_read_reg(reg_base + (id * 2), buf, 2);
550 if (rc != 2)
551 return -EIO;
552
553 return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift;
554}
555
556static int set_pwm_fan(int fan_index, int pwm)
557{
558 unsigned char buf[2];
559 int rc, id;
560
561 if (fcu_fans[fan_index].type != FCU_FAN_PWM)
562 return -EINVAL;
563 id = fcu_fans[fan_index].id;
564 if (id == FCU_FAN_ABSENT_ID)
565 return -EINVAL;
566
567 if (pwm < 10)
568 pwm = 10;
569 else if (pwm > 100)
570 pwm = 100;
571 pwm = (pwm * 2559) / 1000;
572 buf[0] = pwm;
573 rc = fan_write_reg(0x30 + (id * 2), buf, 1);
574 if (rc < 0)
575 return rc;
576 return 0;
577}
578
579static int get_pwm_fan(int fan_index)
580{
581 unsigned char failure;
582 unsigned char active;
583 unsigned char buf[2];
584 int rc, id;
585
586 if (fcu_fans[fan_index].type != FCU_FAN_PWM)
587 return -EINVAL;
588 id = fcu_fans[fan_index].id;
589 if (id == FCU_FAN_ABSENT_ID)
590 return -EINVAL;
591
592 rc = fan_read_reg(0x2b, &failure, 1);
593 if (rc != 1)
594 return -EIO;
595 if ((failure & (1 << id)) != 0)
596 return -EFAULT;
597 rc = fan_read_reg(0x2d, &active, 1);
598 if (rc != 1)
599 return -EIO;
600 if ((active & (1 << id)) == 0)
601 return -ENXIO;
602
603 /* Programmed value or real current speed */
604 rc = fan_read_reg(0x30 + (id * 2), buf, 1);
605 if (rc != 1)
606 return -EIO;
607
608 return (buf[0] * 1000) / 2559;
609}
610
611static void tickle_fcu(void)
612{
613 int pwm;
614
615 pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
616
617 DBG("FCU Tickle, slots fan is: %d\n", pwm);
618 if (pwm < 0)
619 pwm = 100;
620
621 if (!rackmac) {
622 pwm = SLOTS_FAN_DEFAULT_PWM;
623 } else if (pwm < SLOTS_PID_OUTPUT_MIN)
624 pwm = SLOTS_PID_OUTPUT_MIN;
625
626 /* That is hopefully enough to make the FCU happy */
627 set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm);
628}
629
630
631/*
632 * Utility routine to read the CPU calibration EEPROM data
633 * from the device-tree
634 */
635static int read_eeprom(int cpu, struct mpu_data *out)
636{
637 struct device_node *np;
638 char nodename[64];
639 const u8 *data;
640 int len;
641
642 /* prom.c routine for finding a node by path is a bit brain dead
643 * and requires exact @xxx unit numbers. This is a bit ugly but
644 * will work for these machines
645 */
646 sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
647 np = of_find_node_by_path(nodename);
648 if (np == NULL) {
649 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
650 return -ENODEV;
651 }
652 data = of_get_property(np, "cpuid", &len);
653 if (data == NULL) {
654 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
655 of_node_put(np);
656 return -ENODEV;
657 }
658 memcpy(out, data, sizeof(struct mpu_data));
659 of_node_put(np);
660
661 return 0;
662}
663
664static void fetch_cpu_pumps_minmax(void)
665{
666 struct cpu_pid_state *state0 = &processor_state[0];
667 struct cpu_pid_state *state1 = &processor_state[1];
668 u16 pump_min = 0, pump_max = 0xffff;
669 u16 tmp[4];
670
671 /* Try to fetch pumps min/max infos from eeprom */
672
673 memcpy(&tmp, &state0->mpu.processor_part_num, 8);
674 if (tmp[0] != 0xffff && tmp[1] != 0xffff) {
675 pump_min = max(pump_min, tmp[0]);
676 pump_max = min(pump_max, tmp[1]);
677 }
678 if (tmp[2] != 0xffff && tmp[3] != 0xffff) {
679 pump_min = max(pump_min, tmp[2]);
680 pump_max = min(pump_max, tmp[3]);
681 }
682
683 /* Double check the values, this _IS_ needed as the EEPROM on
684 * some dual 2.5Ghz G5s seem, at least, to have both min & max
685 * same to the same value ... (grrrr)
686 */
687 if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) {
688 pump_min = CPU_PUMP_OUTPUT_MIN;
689 pump_max = CPU_PUMP_OUTPUT_MAX;
690 }
691
692 state0->pump_min = state1->pump_min = pump_min;
693 state0->pump_max = state1->pump_max = pump_max;
694}
695
696/*
697 * Now, unfortunately, sysfs doesn't give us a nice void * we could
698 * pass around to the attribute functions, so we don't really have
699 * choice but implement a bunch of them...
700 *
701 * That sucks a bit, we take the lock because FIX32TOPRINT evaluates
702 * the input twice... I accept patches :)
703 */
704#define BUILD_SHOW_FUNC_FIX(name, data) \
705static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
706{ \
707 ssize_t r; \
708 mutex_lock(&driver_lock); \
709 r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \
710 mutex_unlock(&driver_lock); \
711 return r; \
712}
713#define BUILD_SHOW_FUNC_INT(name, data) \
714static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
715{ \
716 return sprintf(buf, "%d", data); \
717}
718
719BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp)
720BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage)
721BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a)
722BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm)
723BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
724
725BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp)
726BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage)
727BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a)
728BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm)
729BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
730
731BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp)
732BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm)
733
734BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp)
735BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm)
736
737BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp)
738BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm)
739
740BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp)
741
742static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL);
743static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL);
744static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL);
745static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL);
746static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL);
747
748static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL);
749static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL);
750static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL);
751static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL);
752static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL);
753
754static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL);
755static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL);
756
757static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL);
758static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL);
759
760static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL);
761static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL);
762
763static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL);
764
765/*
766 * CPUs fans control loop
767 */
768
769static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power)
770{
771 s32 ltemp, volts, amps;
772 int index, rc = 0;
773
774 /* Default (in case of error) */
775 *temp = state->cur_temp;
776 *power = state->cur_power;
777
778 if (cpu_pid_type == CPU_PID_TYPE_RACKMAC)
779 index = (state->index == 0) ?
780 CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX;
781 else
782 index = (state->index == 0) ?
783 CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX;
784
785 /* Read current fan status */
786 rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED);
787 if (rc < 0) {
788 /* XXX What do we do now ? Nothing for now, keep old value, but
789 * return error upstream
790 */
791 DBG(" cpu %d, fan reading error !\n", state->index);
792 } else {
793 state->rpm = rc;
794 DBG(" cpu %d, exhaust RPM: %d\n", state->index, state->rpm);
795 }
796
797 /* Get some sensor readings and scale it */
798 ltemp = read_smon_adc(state, 1);
799 if (ltemp == -1) {
800 /* XXX What do we do now ? */
801 state->overtemp++;
802 if (rc == 0)
803 rc = -EIO;
804 DBG(" cpu %d, temp reading error !\n", state->index);
805 } else {
806 /* Fixup temperature according to diode calibration
807 */
808 DBG(" cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n",
809 state->index,
810 ltemp, state->mpu.mdiode, state->mpu.bdiode);
811 *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2;
812 state->last_temp = *temp;
813 DBG(" temp: %d.%03d\n", FIX32TOPRINT((*temp)));
814 }
815
816 /*
817 * Read voltage & current and calculate power
818 */
819 volts = read_smon_adc(state, 3);
820 amps = read_smon_adc(state, 4);
821
822 /* Scale voltage and current raw sensor values according to fixed scales
823 * obtained in Darwin and calculate power from I and V
824 */
825 volts *= ADC_CPU_VOLTAGE_SCALE;
826 amps *= ADC_CPU_CURRENT_SCALE;
827 *power = (((u64)volts) * ((u64)amps)) >> 16;
828 state->voltage = volts;
829 state->current_a = amps;
830 state->last_power = *power;
831
832 DBG(" cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n",
833 state->index, FIX32TOPRINT(state->current_a),
834 FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power));
835
836 return 0;
837}
838
839static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power)
840{
841 s32 power_target, integral, derivative, proportional, adj_in_target, sval;
842 s64 integ_p, deriv_p, prop_p, sum;
843 int i;
844
845 /* Calculate power target value (could be done once for all)
846 * and convert to a 16.16 fp number
847 */
848 power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16;
849 DBG(" power target: %d.%03d, error: %d.%03d\n",
850 FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power));
851
852 /* Store temperature and power in history array */
853 state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
854 state->temp_history[state->cur_temp] = temp;
855 state->cur_power = (state->cur_power + 1) % state->count_power;
856 state->power_history[state->cur_power] = power;
857 state->error_history[state->cur_power] = power_target - power;
858
859 /* If first loop, fill the history table */
860 if (state->first) {
861 for (i = 0; i < (state->count_power - 1); i++) {
862 state->cur_power = (state->cur_power + 1) % state->count_power;
863 state->power_history[state->cur_power] = power;
864 state->error_history[state->cur_power] = power_target - power;
865 }
866 for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) {
867 state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
868 state->temp_history[state->cur_temp] = temp;
869 }
870 state->first = 0;
871 }
872
873 /* Calculate the integral term normally based on the "power" values */
874 sum = 0;
875 integral = 0;
876 for (i = 0; i < state->count_power; i++)
877 integral += state->error_history[i];
878 integral *= CPU_PID_INTERVAL;
879 DBG(" integral: %08x\n", integral);
880
881 /* Calculate the adjusted input (sense value).
882 * G_r is 12.20
883 * integ is 16.16
884 * so the result is 28.36
885 *
886 * input target is mpu.ttarget, input max is mpu.tmax
887 */
888 integ_p = ((s64)state->mpu.pid_gr) * (s64)integral;
889 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
890 sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff);
891 adj_in_target = (state->mpu.ttarget << 16);
892 if (adj_in_target > sval)
893 adj_in_target = sval;
894 DBG(" adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target),
895 state->mpu.ttarget);
896
897 /* Calculate the derivative term */
898 derivative = state->temp_history[state->cur_temp] -
899 state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1)
900 % CPU_TEMP_HISTORY_SIZE];
901 derivative /= CPU_PID_INTERVAL;
902 deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative;
903 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
904 sum += deriv_p;
905
906 /* Calculate the proportional term */
907 proportional = temp - adj_in_target;
908 prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional;
909 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
910 sum += prop_p;
911
912 /* Scale sum */
913 sum >>= 36;
914
915 DBG(" sum: %d\n", (int)sum);
916 state->rpm += (s32)sum;
917}
918
919static void do_monitor_cpu_combined(void)
920{
921 struct cpu_pid_state *state0 = &processor_state[0];
922 struct cpu_pid_state *state1 = &processor_state[1];
923 s32 temp0, power0, temp1, power1;
924 s32 temp_combi, power_combi;
925 int rc, intake, pump;
926
927 rc = do_read_one_cpu_values(state0, &temp0, &power0);
928 if (rc < 0) {
929 /* XXX What do we do now ? */
930 }
931 state1->overtemp = 0;
932 rc = do_read_one_cpu_values(state1, &temp1, &power1);
933 if (rc < 0) {
934 /* XXX What do we do now ? */
935 }
936 if (state1->overtemp)
937 state0->overtemp++;
938
939 temp_combi = max(temp0, temp1);
940 power_combi = max(power0, power1);
941
942 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
943 * full blown immediately and try to trigger a shutdown
944 */
945 if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) {
946 printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
947 temp_combi >> 16);
948 state0->overtemp += CPU_MAX_OVERTEMP / 4;
949 } else if (temp_combi > (state0->mpu.tmax << 16)) {
950 state0->overtemp++;
951 printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
952 temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
953 } else {
954 if (state0->overtemp)
955 printk(KERN_WARNING "Temperature back down to %d\n",
956 temp_combi >> 16);
957 state0->overtemp = 0;
958 }
959 if (state0->overtemp >= CPU_MAX_OVERTEMP)
960 critical_state = 1;
961 if (state0->overtemp > 0) {
962 state0->rpm = state0->mpu.rmaxn_exhaust_fan;
963 state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan;
964 pump = state0->pump_max;
965 goto do_set_fans;
966 }
967
968 /* Do the PID */
969 do_cpu_pid(state0, temp_combi, power_combi);
970
971 /* Range check */
972 state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan);
973 state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan);
974
975 /* Calculate intake fan speed */
976 intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16;
977 intake = max(intake, (int)state0->mpu.rminn_intake_fan);
978 intake = min(intake, (int)state0->mpu.rmaxn_intake_fan);
979 state0->intake_rpm = intake;
980
981 /* Calculate pump speed */
982 pump = (state0->rpm * state0->pump_max) /
983 state0->mpu.rmaxn_exhaust_fan;
984 pump = min(pump, state0->pump_max);
985 pump = max(pump, state0->pump_min);
986
987 do_set_fans:
988 /* We copy values from state 0 to state 1 for /sysfs */
989 state1->rpm = state0->rpm;
990 state1->intake_rpm = state0->intake_rpm;
991
992 DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n",
993 state1->index, (int)state1->rpm, intake, pump, state1->overtemp);
994
995 /* We should check for errors, shouldn't we ? But then, what
996 * do we do once the error occurs ? For FCU notified fan
997 * failures (-EFAULT) we probably want to notify userland
998 * some way...
999 */
1000 set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
1001 set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm);
1002 set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
1003 set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm);
1004
1005 if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
1006 set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump);
1007 if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
1008 set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump);
1009}
1010
1011static void do_monitor_cpu_split(struct cpu_pid_state *state)
1012{
1013 s32 temp, power;
1014 int rc, intake;
1015
1016 /* Read current fan status */
1017 rc = do_read_one_cpu_values(state, &temp, &power);
1018 if (rc < 0) {
1019 /* XXX What do we do now ? */
1020 }
1021
1022 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
1023 * full blown immediately and try to trigger a shutdown
1024 */
1025 if (temp >= ((state->mpu.tmax + 8) << 16)) {
1026 printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
1027 " (%d) !\n",
1028 state->index, temp >> 16);
1029 state->overtemp += CPU_MAX_OVERTEMP / 4;
1030 } else if (temp > (state->mpu.tmax << 16)) {
1031 state->overtemp++;
1032 printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
1033 state->index, temp >> 16, state->mpu.tmax, state->overtemp);
1034 } else {
1035 if (state->overtemp)
1036 printk(KERN_WARNING "CPU %d temperature back down to %d\n",
1037 state->index, temp >> 16);
1038 state->overtemp = 0;
1039 }
1040 if (state->overtemp >= CPU_MAX_OVERTEMP)
1041 critical_state = 1;
1042 if (state->overtemp > 0) {
1043 state->rpm = state->mpu.rmaxn_exhaust_fan;
1044 state->intake_rpm = intake = state->mpu.rmaxn_intake_fan;
1045 goto do_set_fans;
1046 }
1047
1048 /* Do the PID */
1049 do_cpu_pid(state, temp, power);
1050
1051 /* Range check */
1052 state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan);
1053 state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan);
1054
1055 /* Calculate intake fan */
1056 intake = (state->rpm * CPU_INTAKE_SCALE) >> 16;
1057 intake = max(intake, (int)state->mpu.rminn_intake_fan);
1058 intake = min(intake, (int)state->mpu.rmaxn_intake_fan);
1059 state->intake_rpm = intake;
1060
1061 do_set_fans:
1062 DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n",
1063 state->index, (int)state->rpm, intake, state->overtemp);
1064
1065 /* We should check for errors, shouldn't we ? But then, what
1066 * do we do once the error occurs ? For FCU notified fan
1067 * failures (-EFAULT) we probably want to notify userland
1068 * some way...
1069 */
1070 if (state->index == 0) {
1071 set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
1072 set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm);
1073 } else {
1074 set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
1075 set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm);
1076 }
1077}
1078
1079static void do_monitor_cpu_rack(struct cpu_pid_state *state)
1080{
1081 s32 temp, power, fan_min;
1082 int rc;
1083
1084 /* Read current fan status */
1085 rc = do_read_one_cpu_values(state, &temp, &power);
1086 if (rc < 0) {
1087 /* XXX What do we do now ? */
1088 }
1089
1090 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
1091 * full blown immediately and try to trigger a shutdown
1092 */
1093 if (temp >= ((state->mpu.tmax + 8) << 16)) {
1094 printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
1095 " (%d) !\n",
1096 state->index, temp >> 16);
1097 state->overtemp = CPU_MAX_OVERTEMP / 4;
1098 } else if (temp > (state->mpu.tmax << 16)) {
1099 state->overtemp++;
1100 printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
1101 state->index, temp >> 16, state->mpu.tmax, state->overtemp);
1102 } else {
1103 if (state->overtemp)
1104 printk(KERN_WARNING "CPU %d temperature back down to %d\n",
1105 state->index, temp >> 16);
1106 state->overtemp = 0;
1107 }
1108 if (state->overtemp >= CPU_MAX_OVERTEMP)
1109 critical_state = 1;
1110 if (state->overtemp > 0) {
1111 state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan;
1112 goto do_set_fans;
1113 }
1114
1115 /* Do the PID */
1116 do_cpu_pid(state, temp, power);
1117
1118 /* Check clamp from dimms */
1119 fan_min = dimm_output_clamp;
1120 fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan);
1121
1122 DBG(" CPU min mpu = %d, min dimm = %d\n",
1123 state->mpu.rminn_intake_fan, dimm_output_clamp);
1124
1125 state->rpm = max(state->rpm, (int)fan_min);
1126 state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan);
1127 state->intake_rpm = state->rpm;
1128
1129 do_set_fans:
1130 DBG("** CPU %d RPM: %d overtemp: %d\n",
1131 state->index, (int)state->rpm, state->overtemp);
1132
1133 /* We should check for errors, shouldn't we ? But then, what
1134 * do we do once the error occurs ? For FCU notified fan
1135 * failures (-EFAULT) we probably want to notify userland
1136 * some way...
1137 */
1138 if (state->index == 0) {
1139 set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm);
1140 set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm);
1141 set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm);
1142 } else {
1143 set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm);
1144 set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm);
1145 set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm);
1146 }
1147}
1148
1149/*
1150 * Initialize the state structure for one CPU control loop
1151 */
1152static int init_processor_state(struct cpu_pid_state *state, int index)
1153{
1154 int err;
1155
1156 state->index = index;
1157 state->first = 1;
1158 state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000;
1159 state->overtemp = 0;
1160 state->adc_config = 0x00;
1161
1162
1163 if (index == 0)
1164 state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor");
1165 else if (index == 1)
1166 state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor");
1167 if (state->monitor == NULL)
1168 goto fail;
1169
1170 if (read_eeprom(index, &state->mpu))
1171 goto fail;
1172
1173 state->count_power = state->mpu.tguardband;
1174 if (state->count_power > CPU_POWER_HISTORY_SIZE) {
1175 printk(KERN_WARNING "Warning ! too many power history slots\n");
1176 state->count_power = CPU_POWER_HISTORY_SIZE;
1177 }
1178 DBG("CPU %d Using %d power history entries\n", index, state->count_power);
1179
1180 if (index == 0) {
1181 err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
1182 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
1183 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
1184 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
1185 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
1186 } else {
1187 err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
1188 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
1189 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
1190 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
1191 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
1192 }
1193 if (err)
1194 printk(KERN_WARNING "Failed to create some of the attribute"
1195 "files for CPU %d\n", index);
1196
1197 return 0;
1198 fail:
1199 state->monitor = NULL;
1200
1201 return -ENODEV;
1202}
1203
1204/*
1205 * Dispose of the state data for one CPU control loop
1206 */
1207static void dispose_processor_state(struct cpu_pid_state *state)
1208{
1209 if (state->monitor == NULL)
1210 return;
1211
1212 if (state->index == 0) {
1213 device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature);
1214 device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage);
1215 device_remove_file(&of_dev->dev, &dev_attr_cpu0_current);
1216 device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
1217 device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
1218 } else {
1219 device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature);
1220 device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage);
1221 device_remove_file(&of_dev->dev, &dev_attr_cpu1_current);
1222 device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
1223 device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
1224 }
1225
1226 state->monitor = NULL;
1227}
1228
1229/*
1230 * Motherboard backside & U3 heatsink fan control loop
1231 */
1232static void do_monitor_backside(struct backside_pid_state *state)
1233{
1234 s32 temp, integral, derivative, fan_min;
1235 s64 integ_p, deriv_p, prop_p, sum;
1236 int i, rc;
1237
1238 if (--state->ticks != 0)
1239 return;
1240 state->ticks = backside_params.interval;
1241
1242 DBG("backside:\n");
1243
1244 /* Check fan status */
1245 rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX);
1246 if (rc < 0) {
1247 printk(KERN_WARNING "Error %d reading backside fan !\n", rc);
1248 /* XXX What do we do now ? */
1249 } else
1250 state->pwm = rc;
1251 DBG(" current pwm: %d\n", state->pwm);
1252
1253 /* Get some sensor readings */
1254 temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16;
1255 state->last_temp = temp;
1256 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1257 FIX32TOPRINT(backside_params.input_target));
1258
1259 /* Store temperature and error in history array */
1260 state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE;
1261 state->sample_history[state->cur_sample] = temp;
1262 state->error_history[state->cur_sample] = temp - backside_params.input_target;
1263
1264 /* If first loop, fill the history table */
1265 if (state->first) {
1266 for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) {
1267 state->cur_sample = (state->cur_sample + 1) %
1268 BACKSIDE_PID_HISTORY_SIZE;
1269 state->sample_history[state->cur_sample] = temp;
1270 state->error_history[state->cur_sample] =
1271 temp - backside_params.input_target;
1272 }
1273 state->first = 0;
1274 }
1275
1276 /* Calculate the integral term */
1277 sum = 0;
1278 integral = 0;
1279 for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++)
1280 integral += state->error_history[i];
1281 integral *= backside_params.interval;
1282 DBG(" integral: %08x\n", integral);
1283 integ_p = ((s64)backside_params.G_r) * (s64)integral;
1284 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1285 sum += integ_p;
1286
1287 /* Calculate the derivative term */
1288 derivative = state->error_history[state->cur_sample] -
1289 state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1)
1290 % BACKSIDE_PID_HISTORY_SIZE];
1291 derivative /= backside_params.interval;
1292 deriv_p = ((s64)backside_params.G_d) * (s64)derivative;
1293 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1294 sum += deriv_p;
1295
1296 /* Calculate the proportional term */
1297 prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]);
1298 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1299 sum += prop_p;
1300
1301 /* Scale sum */
1302 sum >>= 36;
1303
1304 DBG(" sum: %d\n", (int)sum);
1305 if (backside_params.additive)
1306 state->pwm += (s32)sum;
1307 else
1308 state->pwm = sum;
1309
1310 /* Check for clamp */
1311 fan_min = (dimm_output_clamp * 100) / 14000;
1312 fan_min = max(fan_min, backside_params.output_min);
1313
1314 state->pwm = max(state->pwm, fan_min);
1315 state->pwm = min(state->pwm, backside_params.output_max);
1316
1317 DBG("** BACKSIDE PWM: %d\n", (int)state->pwm);
1318 set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm);
1319}
1320
1321/*
1322 * Initialize the state structure for the backside fan control loop
1323 */
1324static int init_backside_state(struct backside_pid_state *state)
1325{
1326 struct device_node *u3;
1327 int u3h = 1; /* conservative by default */
1328 int err;
1329
1330 /*
1331 * There are different PID params for machines with U3 and machines
1332 * with U3H, pick the right ones now
1333 */
1334 u3 = of_find_node_by_path("/u3@0,f8000000");
1335 if (u3 != NULL) {
1336 const u32 *vers = of_get_property(u3, "device-rev", NULL);
1337 if (vers)
1338 if (((*vers) & 0x3f) < 0x34)
1339 u3h = 0;
1340 of_node_put(u3);
1341 }
1342
1343 if (rackmac) {
1344 backside_params.G_d = BACKSIDE_PID_RACK_G_d;
1345 backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET;
1346 backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
1347 backside_params.interval = BACKSIDE_PID_RACK_INTERVAL;
1348 backside_params.G_p = BACKSIDE_PID_RACK_G_p;
1349 backside_params.G_r = BACKSIDE_PID_G_r;
1350 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1351 backside_params.additive = 0;
1352 } else if (u3h) {
1353 backside_params.G_d = BACKSIDE_PID_U3H_G_d;
1354 backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET;
1355 backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
1356 backside_params.interval = BACKSIDE_PID_INTERVAL;
1357 backside_params.G_p = BACKSIDE_PID_G_p;
1358 backside_params.G_r = BACKSIDE_PID_G_r;
1359 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1360 backside_params.additive = 1;
1361 } else {
1362 backside_params.G_d = BACKSIDE_PID_U3_G_d;
1363 backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET;
1364 backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN;
1365 backside_params.interval = BACKSIDE_PID_INTERVAL;
1366 backside_params.G_p = BACKSIDE_PID_G_p;
1367 backside_params.G_r = BACKSIDE_PID_G_r;
1368 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1369 backside_params.additive = 1;
1370 }
1371
1372 state->ticks = 1;
1373 state->first = 1;
1374 state->pwm = 50;
1375
1376 state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp");
1377 if (state->monitor == NULL)
1378 return -ENODEV;
1379
1380 err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
1381 err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
1382 if (err)
1383 printk(KERN_WARNING "Failed to create attribute file(s)"
1384 " for backside fan\n");
1385
1386 return 0;
1387}
1388
1389/*
1390 * Dispose of the state data for the backside control loop
1391 */
1392static void dispose_backside_state(struct backside_pid_state *state)
1393{
1394 if (state->monitor == NULL)
1395 return;
1396
1397 device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
1398 device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
1399
1400 state->monitor = NULL;
1401}
1402
1403/*
1404 * Drives bay fan control loop
1405 */
1406static void do_monitor_drives(struct drives_pid_state *state)
1407{
1408 s32 temp, integral, derivative;
1409 s64 integ_p, deriv_p, prop_p, sum;
1410 int i, rc;
1411
1412 if (--state->ticks != 0)
1413 return;
1414 state->ticks = DRIVES_PID_INTERVAL;
1415
1416 DBG("drives:\n");
1417
1418 /* Check fan status */
1419 rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED);
1420 if (rc < 0) {
1421 printk(KERN_WARNING "Error %d reading drives fan !\n", rc);
1422 /* XXX What do we do now ? */
1423 } else
1424 state->rpm = rc;
1425 DBG(" current rpm: %d\n", state->rpm);
1426
1427 /* Get some sensor readings */
1428 temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
1429 DS1775_TEMP)) << 8;
1430 state->last_temp = temp;
1431 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1432 FIX32TOPRINT(DRIVES_PID_INPUT_TARGET));
1433
1434 /* Store temperature and error in history array */
1435 state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE;
1436 state->sample_history[state->cur_sample] = temp;
1437 state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET;
1438
1439 /* If first loop, fill the history table */
1440 if (state->first) {
1441 for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) {
1442 state->cur_sample = (state->cur_sample + 1) %
1443 DRIVES_PID_HISTORY_SIZE;
1444 state->sample_history[state->cur_sample] = temp;
1445 state->error_history[state->cur_sample] =
1446 temp - DRIVES_PID_INPUT_TARGET;
1447 }
1448 state->first = 0;
1449 }
1450
1451 /* Calculate the integral term */
1452 sum = 0;
1453 integral = 0;
1454 for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++)
1455 integral += state->error_history[i];
1456 integral *= DRIVES_PID_INTERVAL;
1457 DBG(" integral: %08x\n", integral);
1458 integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral;
1459 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1460 sum += integ_p;
1461
1462 /* Calculate the derivative term */
1463 derivative = state->error_history[state->cur_sample] -
1464 state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1)
1465 % DRIVES_PID_HISTORY_SIZE];
1466 derivative /= DRIVES_PID_INTERVAL;
1467 deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative;
1468 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1469 sum += deriv_p;
1470
1471 /* Calculate the proportional term */
1472 prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1473 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1474 sum += prop_p;
1475
1476 /* Scale sum */
1477 sum >>= 36;
1478
1479 DBG(" sum: %d\n", (int)sum);
1480 state->rpm += (s32)sum;
1481
1482 state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN);
1483 state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX);
1484
1485 DBG("** DRIVES RPM: %d\n", (int)state->rpm);
1486 set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm);
1487}
1488
1489/*
1490 * Initialize the state structure for the drives bay fan control loop
1491 */
1492static int init_drives_state(struct drives_pid_state *state)
1493{
1494 int err;
1495
1496 state->ticks = 1;
1497 state->first = 1;
1498 state->rpm = 1000;
1499
1500 state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp");
1501 if (state->monitor == NULL)
1502 return -ENODEV;
1503
1504 err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
1505 err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
1506 if (err)
1507 printk(KERN_WARNING "Failed to create attribute file(s)"
1508 " for drives bay fan\n");
1509
1510 return 0;
1511}
1512
1513/*
1514 * Dispose of the state data for the drives control loop
1515 */
1516static void dispose_drives_state(struct drives_pid_state *state)
1517{
1518 if (state->monitor == NULL)
1519 return;
1520
1521 device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
1522 device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
1523
1524 state->monitor = NULL;
1525}
1526
1527/*
1528 * DIMMs temp control loop
1529 */
1530static void do_monitor_dimms(struct dimm_pid_state *state)
1531{
1532 s32 temp, integral, derivative, fan_min;
1533 s64 integ_p, deriv_p, prop_p, sum;
1534 int i;
1535
1536 if (--state->ticks != 0)
1537 return;
1538 state->ticks = DIMM_PID_INTERVAL;
1539
1540 DBG("DIMM:\n");
1541
1542 DBG(" current value: %d\n", state->output);
1543
1544 temp = read_lm87_reg(state->monitor, LM87_INT_TEMP);
1545 if (temp < 0)
1546 return;
1547 temp <<= 16;
1548 state->last_temp = temp;
1549 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1550 FIX32TOPRINT(DIMM_PID_INPUT_TARGET));
1551
1552 /* Store temperature and error in history array */
1553 state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE;
1554 state->sample_history[state->cur_sample] = temp;
1555 state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET;
1556
1557 /* If first loop, fill the history table */
1558 if (state->first) {
1559 for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) {
1560 state->cur_sample = (state->cur_sample + 1) %
1561 DIMM_PID_HISTORY_SIZE;
1562 state->sample_history[state->cur_sample] = temp;
1563 state->error_history[state->cur_sample] =
1564 temp - DIMM_PID_INPUT_TARGET;
1565 }
1566 state->first = 0;
1567 }
1568
1569 /* Calculate the integral term */
1570 sum = 0;
1571 integral = 0;
1572 for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++)
1573 integral += state->error_history[i];
1574 integral *= DIMM_PID_INTERVAL;
1575 DBG(" integral: %08x\n", integral);
1576 integ_p = ((s64)DIMM_PID_G_r) * (s64)integral;
1577 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1578 sum += integ_p;
1579
1580 /* Calculate the derivative term */
1581 derivative = state->error_history[state->cur_sample] -
1582 state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1)
1583 % DIMM_PID_HISTORY_SIZE];
1584 derivative /= DIMM_PID_INTERVAL;
1585 deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative;
1586 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1587 sum += deriv_p;
1588
1589 /* Calculate the proportional term */
1590 prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1591 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1592 sum += prop_p;
1593
1594 /* Scale sum */
1595 sum >>= 36;
1596
1597 DBG(" sum: %d\n", (int)sum);
1598 state->output = (s32)sum;
1599 state->output = max(state->output, DIMM_PID_OUTPUT_MIN);
1600 state->output = min(state->output, DIMM_PID_OUTPUT_MAX);
1601 dimm_output_clamp = state->output;
1602
1603 DBG("** DIMM clamp value: %d\n", (int)state->output);
1604
1605 /* Backside PID is only every 5 seconds, force backside fan clamping now */
1606 fan_min = (dimm_output_clamp * 100) / 14000;
1607 fan_min = max(fan_min, backside_params.output_min);
1608 if (backside_state.pwm < fan_min) {
1609 backside_state.pwm = fan_min;
1610 DBG(" -> applying clamp to backside fan now: %d !\n", fan_min);
1611 set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min);
1612 }
1613}
1614
1615/*
1616 * Initialize the state structure for the DIMM temp control loop
1617 */
1618static int init_dimms_state(struct dimm_pid_state *state)
1619{
1620 state->ticks = 1;
1621 state->first = 1;
1622 state->output = 4000;
1623
1624 state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp");
1625 if (state->monitor == NULL)
1626 return -ENODEV;
1627
1628 if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature))
1629 printk(KERN_WARNING "Failed to create attribute file"
1630 " for DIMM temperature\n");
1631
1632 return 0;
1633}
1634
1635/*
1636 * Dispose of the state data for the DIMM control loop
1637 */
1638static void dispose_dimms_state(struct dimm_pid_state *state)
1639{
1640 if (state->monitor == NULL)
1641 return;
1642
1643 device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
1644
1645 state->monitor = NULL;
1646}
1647
1648/*
1649 * Slots fan control loop
1650 */
1651static void do_monitor_slots(struct slots_pid_state *state)
1652{
1653 s32 temp, integral, derivative;
1654 s64 integ_p, deriv_p, prop_p, sum;
1655 int i, rc;
1656
1657 if (--state->ticks != 0)
1658 return;
1659 state->ticks = SLOTS_PID_INTERVAL;
1660
1661 DBG("slots:\n");
1662
1663 /* Check fan status */
1664 rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
1665 if (rc < 0) {
1666 printk(KERN_WARNING "Error %d reading slots fan !\n", rc);
1667 /* XXX What do we do now ? */
1668 } else
1669 state->pwm = rc;
1670 DBG(" current pwm: %d\n", state->pwm);
1671
1672 /* Get some sensor readings */
1673 temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
1674 DS1775_TEMP)) << 8;
1675 state->last_temp = temp;
1676 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1677 FIX32TOPRINT(SLOTS_PID_INPUT_TARGET));
1678
1679 /* Store temperature and error in history array */
1680 state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE;
1681 state->sample_history[state->cur_sample] = temp;
1682 state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET;
1683
1684 /* If first loop, fill the history table */
1685 if (state->first) {
1686 for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) {
1687 state->cur_sample = (state->cur_sample + 1) %
1688 SLOTS_PID_HISTORY_SIZE;
1689 state->sample_history[state->cur_sample] = temp;
1690 state->error_history[state->cur_sample] =
1691 temp - SLOTS_PID_INPUT_TARGET;
1692 }
1693 state->first = 0;
1694 }
1695
1696 /* Calculate the integral term */
1697 sum = 0;
1698 integral = 0;
1699 for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++)
1700 integral += state->error_history[i];
1701 integral *= SLOTS_PID_INTERVAL;
1702 DBG(" integral: %08x\n", integral);
1703 integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral;
1704 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1705 sum += integ_p;
1706
1707 /* Calculate the derivative term */
1708 derivative = state->error_history[state->cur_sample] -
1709 state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1)
1710 % SLOTS_PID_HISTORY_SIZE];
1711 derivative /= SLOTS_PID_INTERVAL;
1712 deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative;
1713 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1714 sum += deriv_p;
1715
1716 /* Calculate the proportional term */
1717 prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1718 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1719 sum += prop_p;
1720
1721 /* Scale sum */
1722 sum >>= 36;
1723
1724 DBG(" sum: %d\n", (int)sum);
1725 state->pwm = (s32)sum;
1726
1727 state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN);
1728 state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX);
1729
1730 DBG("** DRIVES PWM: %d\n", (int)state->pwm);
1731 set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm);
1732}
1733
1734/*
1735 * Initialize the state structure for the slots bay fan control loop
1736 */
1737static int init_slots_state(struct slots_pid_state *state)
1738{
1739 int err;
1740
1741 state->ticks = 1;
1742 state->first = 1;
1743 state->pwm = 50;
1744
1745 state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp");
1746 if (state->monitor == NULL)
1747 return -ENODEV;
1748
1749 err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
1750 err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
1751 if (err)
1752 printk(KERN_WARNING "Failed to create attribute file(s)"
1753 " for slots bay fan\n");
1754
1755 return 0;
1756}
1757
1758/*
1759 * Dispose of the state data for the slots control loop
1760 */
1761static void dispose_slots_state(struct slots_pid_state *state)
1762{
1763 if (state->monitor == NULL)
1764 return;
1765
1766 device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
1767 device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
1768
1769 state->monitor = NULL;
1770}
1771
1772
1773static int call_critical_overtemp(void)
1774{
1775 char *argv[] = { critical_overtemp_path, NULL };
1776 static char *envp[] = { "HOME=/",
1777 "TERM=linux",
1778 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
1779 NULL };
1780
1781 return call_usermodehelper(critical_overtemp_path,
1782 argv, envp, UMH_WAIT_EXEC);
1783}
1784
1785
1786/*
1787 * Here's the kernel thread that calls the various control loops
1788 */
1789static int main_control_loop(void *x)
1790{
1791 DBG("main_control_loop started\n");
1792
1793 mutex_lock(&driver_lock);
1794
1795 if (start_fcu() < 0) {
1796 printk(KERN_ERR "kfand: failed to start FCU\n");
1797 mutex_unlock(&driver_lock);
1798 goto out;
1799 }
1800
1801 /* Set the PCI fan once for now on non-RackMac */
1802 if (!rackmac)
1803 set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
1804
1805 /* Initialize ADCs */
1806 initialize_adc(&processor_state[0]);
1807 if (processor_state[1].monitor != NULL)
1808 initialize_adc(&processor_state[1]);
1809
1810 fcu_tickle_ticks = FCU_TICKLE_TICKS;
1811
1812 mutex_unlock(&driver_lock);
1813
1814 while (state == state_attached) {
1815 unsigned long elapsed, start;
1816
1817 start = jiffies;
1818
1819 mutex_lock(&driver_lock);
1820
1821 /* Tickle the FCU just in case */
1822 if (--fcu_tickle_ticks < 0) {
1823 fcu_tickle_ticks = FCU_TICKLE_TICKS;
1824 tickle_fcu();
1825 }
1826
1827 /* First, we always calculate the new DIMMs state on an Xserve */
1828 if (rackmac)
1829 do_monitor_dimms(&dimms_state);
1830
1831 /* Then, the CPUs */
1832 if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
1833 do_monitor_cpu_combined();
1834 else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) {
1835 do_monitor_cpu_rack(&processor_state[0]);
1836 if (processor_state[1].monitor != NULL)
1837 do_monitor_cpu_rack(&processor_state[1]);
1838 // better deal with UP
1839 } else {
1840 do_monitor_cpu_split(&processor_state[0]);
1841 if (processor_state[1].monitor != NULL)
1842 do_monitor_cpu_split(&processor_state[1]);
1843 // better deal with UP
1844 }
1845 /* Then, the rest */
1846 do_monitor_backside(&backside_state);
1847 if (rackmac)
1848 do_monitor_slots(&slots_state);
1849 else
1850 do_monitor_drives(&drives_state);
1851 mutex_unlock(&driver_lock);
1852
1853 if (critical_state == 1) {
1854 printk(KERN_WARNING "Temperature control detected a critical condition\n");
1855 printk(KERN_WARNING "Attempting to shut down...\n");
1856 if (call_critical_overtemp()) {
1857 printk(KERN_WARNING "Can't call %s, power off now!\n",
1858 critical_overtemp_path);
1859 machine_power_off();
1860 }
1861 }
1862 if (critical_state > 0)
1863 critical_state++;
1864 if (critical_state > MAX_CRITICAL_STATE) {
1865 printk(KERN_WARNING "Shutdown timed out, power off now !\n");
1866 machine_power_off();
1867 }
1868
1869 // FIXME: Deal with signals
1870 elapsed = jiffies - start;
1871 if (elapsed < HZ)
1872 schedule_timeout_interruptible(HZ - elapsed);
1873 }
1874
1875 out:
1876 DBG("main_control_loop ended\n");
1877
1878 ctrl_task = 0;
1879 complete_and_exit(&ctrl_complete, 0);
1880}
1881
1882/*
1883 * Dispose the control loops when tearing down
1884 */
1885static void dispose_control_loops(void)
1886{
1887 dispose_processor_state(&processor_state[0]);
1888 dispose_processor_state(&processor_state[1]);
1889 dispose_backside_state(&backside_state);
1890 dispose_drives_state(&drives_state);
1891 dispose_slots_state(&slots_state);
1892 dispose_dimms_state(&dimms_state);
1893}
1894
1895/*
1896 * Create the control loops. U3-0 i2c bus is up, so we can now
1897 * get to the various sensors
1898 */
1899static int create_control_loops(void)
1900{
1901 struct device_node *np;
1902
1903 /* Count CPUs from the device-tree, we don't care how many are
1904 * actually used by Linux
1905 */
1906 cpu_count = 0;
1907 for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));)
1908 cpu_count++;
1909
1910 DBG("counted %d CPUs in the device-tree\n", cpu_count);
1911
1912 /* Decide the type of PID algorithm to use based on the presence of
1913 * the pumps, though that may not be the best way, that is good enough
1914 * for now
1915 */
1916 if (rackmac)
1917 cpu_pid_type = CPU_PID_TYPE_RACKMAC;
1918 else if (of_machine_is_compatible("PowerMac7,3")
1919 && (cpu_count > 1)
1920 && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID
1921 && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) {
1922 printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n");
1923 cpu_pid_type = CPU_PID_TYPE_COMBINED;
1924 } else
1925 cpu_pid_type = CPU_PID_TYPE_SPLIT;
1926
1927 /* Create control loops for everything. If any fail, everything
1928 * fails
1929 */
1930 if (init_processor_state(&processor_state[0], 0))
1931 goto fail;
1932 if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
1933 fetch_cpu_pumps_minmax();
1934
1935 if (cpu_count > 1 && init_processor_state(&processor_state[1], 1))
1936 goto fail;
1937 if (init_backside_state(&backside_state))
1938 goto fail;
1939 if (rackmac && init_dimms_state(&dimms_state))
1940 goto fail;
1941 if (rackmac && init_slots_state(&slots_state))
1942 goto fail;
1943 if (!rackmac && init_drives_state(&drives_state))
1944 goto fail;
1945
1946 DBG("all control loops up !\n");
1947
1948 return 0;
1949
1950 fail:
1951 DBG("failure creating control loops, disposing\n");
1952
1953 dispose_control_loops();
1954
1955 return -ENODEV;
1956}
1957
1958/*
1959 * Start the control loops after everything is up, that is create
1960 * the thread that will make them run
1961 */
1962static void start_control_loops(void)
1963{
1964 init_completion(&ctrl_complete);
1965
1966 ctrl_task = kthread_run(main_control_loop, NULL, "kfand");
1967}
1968
1969/*
1970 * Stop the control loops when tearing down
1971 */
1972static void stop_control_loops(void)
1973{
1974 if (ctrl_task)
1975 wait_for_completion(&ctrl_complete);
1976}
1977
1978/*
1979 * Attach to the i2c FCU after detecting U3-1 bus
1980 */
1981static int attach_fcu(void)
1982{
1983 fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu");
1984 if (fcu == NULL)
1985 return -ENODEV;
1986
1987 DBG("FCU attached\n");
1988
1989 return 0;
1990}
1991
1992/*
1993 * Detach from the i2c FCU when tearing down
1994 */
1995static void detach_fcu(void)
1996{
1997 fcu = NULL;
1998}
1999
2000/*
2001 * Attach to the i2c controller. We probe the various chips based
2002 * on the device-tree nodes and build everything for the driver to
2003 * run, we then kick the driver monitoring thread
2004 */
2005static int therm_pm72_attach(struct i2c_adapter *adapter)
2006{
2007 mutex_lock(&driver_lock);
2008
2009 /* Check state */
2010 if (state == state_detached)
2011 state = state_attaching;
2012 if (state != state_attaching) {
2013 mutex_unlock(&driver_lock);
2014 return 0;
2015 }
2016
2017 /* Check if we are looking for one of these */
2018 if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) {
2019 u3_0 = adapter;
2020 DBG("found U3-0\n");
2021 if (k2 || !rackmac)
2022 if (create_control_loops())
2023 u3_0 = NULL;
2024 } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) {
2025 u3_1 = adapter;
2026 DBG("found U3-1, attaching FCU\n");
2027 if (attach_fcu())
2028 u3_1 = NULL;
2029 } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) {
2030 k2 = adapter;
2031 DBG("Found K2\n");
2032 if (u3_0 && rackmac)
2033 if (create_control_loops())
2034 k2 = NULL;
2035 }
2036 /* We got all we need, start control loops */
2037 if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) {
2038 DBG("everything up, starting control loops\n");
2039 state = state_attached;
2040 start_control_loops();
2041 }
2042 mutex_unlock(&driver_lock);
2043
2044 return 0;
2045}
2046
2047static int therm_pm72_probe(struct i2c_client *client,
2048 const struct i2c_device_id *id)
2049{
2050 /* Always succeed, the real work was done in therm_pm72_attach() */
2051 return 0;
2052}
2053
2054/*
2055 * Called when any of the devices which participates into thermal management
2056 * is going away.
2057 */
2058static int therm_pm72_remove(struct i2c_client *client)
2059{
2060 struct i2c_adapter *adapter = client->adapter;
2061
2062 mutex_lock(&driver_lock);
2063
2064 if (state != state_detached)
2065 state = state_detaching;
2066
2067 /* Stop control loops if any */
2068 DBG("stopping control loops\n");
2069 mutex_unlock(&driver_lock);
2070 stop_control_loops();
2071 mutex_lock(&driver_lock);
2072
2073 if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
2074 DBG("lost U3-0, disposing control loops\n");
2075 dispose_control_loops();
2076 u3_0 = NULL;
2077 }
2078
2079 if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) {
2080 DBG("lost U3-1, detaching FCU\n");
2081 detach_fcu();
2082 u3_1 = NULL;
2083 }
2084 if (u3_0 == NULL && u3_1 == NULL)
2085 state = state_detached;
2086
2087 mutex_unlock(&driver_lock);
2088
2089 return 0;
2090}
2091
2092/*
2093 * i2c_driver structure to attach to the host i2c controller
2094 */
2095
2096static const struct i2c_device_id therm_pm72_id[] = {
2097 /*
2098 * Fake device name, thermal management is done by several
2099 * chips but we don't need to differentiate between them at
2100 * this point.
2101 */
2102 { "therm_pm72", 0 },
2103 { }
2104};
2105
2106static struct i2c_driver therm_pm72_driver = {
2107 .driver = {
2108 .name = "therm_pm72",
2109 },
2110 .attach_adapter = therm_pm72_attach,
2111 .probe = therm_pm72_probe,
2112 .remove = therm_pm72_remove,
2113 .id_table = therm_pm72_id,
2114};
2115
2116static int fan_check_loc_match(const char *loc, int fan)
2117{
2118 char tmp[64];
2119 char *c, *e;
2120
2121 strlcpy(tmp, fcu_fans[fan].loc, 64);
2122
2123 c = tmp;
2124 for (;;) {
2125 e = strchr(c, ',');
2126 if (e)
2127 *e = 0;
2128 if (strcmp(loc, c) == 0)
2129 return 1;
2130 if (e == NULL)
2131 break;
2132 c = e + 1;
2133 }
2134 return 0;
2135}
2136
2137static void fcu_lookup_fans(struct device_node *fcu_node)
2138{
2139 struct device_node *np = NULL;
2140 int i;
2141
2142 /* The table is filled by default with values that are suitable
2143 * for the old machines without device-tree informations. We scan
2144 * the device-tree and override those values with whatever is
2145 * there
2146 */
2147
2148 DBG("Looking up FCU controls in device-tree...\n");
2149
2150 while ((np = of_get_next_child(fcu_node, np)) != NULL) {
2151 int type = -1;
2152 const char *loc;
2153 const u32 *reg;
2154
2155 DBG(" control: %s, type: %s\n", np->name, np->type);
2156
2157 /* Detect control type */
2158 if (!strcmp(np->type, "fan-rpm-control") ||
2159 !strcmp(np->type, "fan-rpm"))
2160 type = FCU_FAN_RPM;
2161 if (!strcmp(np->type, "fan-pwm-control") ||
2162 !strcmp(np->type, "fan-pwm"))
2163 type = FCU_FAN_PWM;
2164 /* Only care about fans for now */
2165 if (type == -1)
2166 continue;
2167
2168 /* Lookup for a matching location */
2169 loc = of_get_property(np, "location", NULL);
2170 reg = of_get_property(np, "reg", NULL);
2171 if (loc == NULL || reg == NULL)
2172 continue;
2173 DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
2174
2175 for (i = 0; i < FCU_FAN_COUNT; i++) {
2176 int fan_id;
2177
2178 if (!fan_check_loc_match(loc, i))
2179 continue;
2180 DBG(" location match, index: %d\n", i);
2181 fcu_fans[i].id = FCU_FAN_ABSENT_ID;
2182 if (type != fcu_fans[i].type) {
2183 printk(KERN_WARNING "therm_pm72: Fan type mismatch "
2184 "in device-tree for %s\n", np->full_name);
2185 break;
2186 }
2187 if (type == FCU_FAN_RPM)
2188 fan_id = ((*reg) - 0x10) / 2;
2189 else
2190 fan_id = ((*reg) - 0x30) / 2;
2191 if (fan_id > 7) {
2192 printk(KERN_WARNING "therm_pm72: Can't parse "
2193 "fan ID in device-tree for %s\n", np->full_name);
2194 break;
2195 }
2196 DBG(" fan id -> %d, type -> %d\n", fan_id, type);
2197 fcu_fans[i].id = fan_id;
2198 }
2199 }
2200
2201 /* Now dump the array */
2202 printk(KERN_INFO "Detected fan controls:\n");
2203 for (i = 0; i < FCU_FAN_COUNT; i++) {
2204 if (fcu_fans[i].id == FCU_FAN_ABSENT_ID)
2205 continue;
2206 printk(KERN_INFO " %d: %s fan, id %d, location: %s\n", i,
2207 fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM",
2208 fcu_fans[i].id, fcu_fans[i].loc);
2209 }
2210}
2211
2212static int fcu_of_probe(struct platform_device* dev)
2213{
2214 state = state_detached;
2215 of_dev = dev;
2216
2217 dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
2218
2219 /* Lookup the fans in the device tree */
2220 fcu_lookup_fans(dev->dev.of_node);
2221
2222 /* Add the driver */
2223 return i2c_add_driver(&therm_pm72_driver);
2224}
2225
2226static int fcu_of_remove(struct platform_device* dev)
2227{
2228 i2c_del_driver(&therm_pm72_driver);
2229
2230 return 0;
2231}
2232
2233static const struct of_device_id fcu_match[] =
2234{
2235 {
2236 .type = "fcu",
2237 },
2238 {},
2239};
2240MODULE_DEVICE_TABLE(of, fcu_match);
2241
2242static struct platform_driver fcu_of_platform_driver =
2243{
2244 .driver = {
2245 .name = "temperature",
2246 .of_match_table = fcu_match,
2247 },
2248 .probe = fcu_of_probe,
2249 .remove = fcu_of_remove
2250};
2251
2252/*
2253 * Check machine type, attach to i2c controller
2254 */
2255static int __init therm_pm72_init(void)
2256{
2257 rackmac = of_machine_is_compatible("RackMac3,1");
2258
2259 if (!of_machine_is_compatible("PowerMac7,2") &&
2260 !of_machine_is_compatible("PowerMac7,3") &&
2261 !rackmac)
2262 return -ENODEV;
2263
2264 return platform_driver_register(&fcu_of_platform_driver);
2265}
2266
2267static void __exit therm_pm72_exit(void)
2268{
2269 platform_driver_unregister(&fcu_of_platform_driver);
2270}
2271
2272module_init(therm_pm72_init);
2273module_exit(therm_pm72_exit);
2274
2275MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
2276MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control");
2277MODULE_LICENSE("GPL");
2278
diff --git a/drivers/macintosh/therm_pm72.h b/drivers/macintosh/therm_pm72.h
deleted file mode 100644
index df3680e2a22f..000000000000
--- a/drivers/macintosh/therm_pm72.h
+++ /dev/null
@@ -1,326 +0,0 @@
1#ifndef __THERM_PMAC_7_2_H__
2#define __THERM_PMAC_7_2_H__
3
4typedef unsigned short fu16;
5typedef int fs32;
6typedef short fs16;
7
8struct mpu_data
9{
10 u8 signature; /* 0x00 - EEPROM sig. */
11 u8 bytes_used; /* 0x01 - Bytes used in eeprom (160 ?) */
12 u8 size; /* 0x02 - EEPROM size (256 ?) */
13 u8 version; /* 0x03 - EEPROM version */
14 u32 data_revision; /* 0x04 - Dataset revision */
15 u8 processor_bin_code[3]; /* 0x08 - Processor BIN code */
16 u8 bin_code_expansion; /* 0x0b - ??? (padding ?) */
17 u8 processor_num; /* 0x0c - Number of CPUs on this MPU */
18 u8 input_mul_bus_div; /* 0x0d - Clock input multiplier/bus divider */
19 u8 reserved1[2]; /* 0x0e - */
20 u32 input_clk_freq_high; /* 0x10 - Input clock frequency high */
21 u8 cpu_nb_target_cycles; /* 0x14 - ??? */
22 u8 cpu_statlat; /* 0x15 - ??? */
23 u8 cpu_snooplat; /* 0x16 - ??? */
24 u8 cpu_snoopacc; /* 0x17 - ??? */
25 u8 nb_paamwin; /* 0x18 - ??? */
26 u8 nb_statlat; /* 0x19 - ??? */
27 u8 nb_snooplat; /* 0x1a - ??? */
28 u8 nb_snoopwin; /* 0x1b - ??? */
29 u8 api_bus_mode; /* 0x1c - ??? */
30 u8 reserved2[3]; /* 0x1d - */
31 u32 input_clk_freq_low; /* 0x20 - Input clock frequency low */
32 u8 processor_card_slot; /* 0x24 - Processor card slot number */
33 u8 reserved3[2]; /* 0x25 - */
34 u8 padjmax; /* 0x27 - Max power adjustment (Not in OF!) */
35 u8 ttarget; /* 0x28 - Target temperature */
36 u8 tmax; /* 0x29 - Max temperature */
37 u8 pmaxh; /* 0x2a - Max power */
38 u8 tguardband; /* 0x2b - Guardband temp ??? Hist. len in OSX */
39 fs32 pid_gp; /* 0x2c - PID proportional gain */
40 fs32 pid_gr; /* 0x30 - PID reset gain */
41 fs32 pid_gd; /* 0x34 - PID derivative gain */
42 fu16 voph; /* 0x38 - Vop High */
43 fu16 vopl; /* 0x3a - Vop Low */
44 fs16 nactual_die; /* 0x3c - nActual Die */
45 fs16 nactual_heatsink; /* 0x3e - nActual Heatsink */
46 fs16 nactual_system; /* 0x40 - nActual System */
47 u16 calibration_flags; /* 0x42 - Calibration flags */
48 fu16 mdiode; /* 0x44 - Diode M value (scaling factor) */
49 fs16 bdiode; /* 0x46 - Diode B value (offset) */
50 fs32 theta_heat_sink; /* 0x48 - Theta heat sink */
51 u16 rminn_intake_fan; /* 0x4c - Intake fan min RPM */
52 u16 rmaxn_intake_fan; /* 0x4e - Intake fan max RPM */
53 u16 rminn_exhaust_fan; /* 0x50 - Exhaust fan min RPM */
54 u16 rmaxn_exhaust_fan; /* 0x52 - Exhaust fan max RPM */
55 u8 processor_part_num[8]; /* 0x54 - Processor part number XX pumps min/max */
56 u32 processor_lot_num; /* 0x5c - Processor lot number */
57 u8 orig_card_sernum[0x10]; /* 0x60 - Card original serial number */
58 u8 curr_card_sernum[0x10]; /* 0x70 - Card current serial number */
59 u8 mlb_sernum[0x18]; /* 0x80 - MLB serial number */
60 u32 checksum1; /* 0x98 - */
61 u32 checksum2; /* 0x9c - */
62}; /* Total size = 0xa0 */
63
64/* Display a 16.16 fixed point value */
65#define FIX32TOPRINT(f) ((f) >> 16),((((f) & 0xffff) * 1000) >> 16)
66
67/*
68 * Maximum number of seconds to be in critical state (after a
69 * normal shutdown attempt). If the machine isn't down after
70 * this counter elapses, we force an immediate machine power
71 * off.
72 */
73#define MAX_CRITICAL_STATE 30
74static char * critical_overtemp_path = "/sbin/critical_overtemp";
75
76/*
77 * This option is "weird" :) Basically, if you define this to 1
78 * the control loop for the RPMs fans (not PWMs) will apply the
79 * correction factor obtained from the PID to the _actual_ RPM
80 * speed read from the FCU.
81 * If you define the below constant to 0, then it will be
82 * applied to the setpoint RPM speed, that is basically the
83 * speed we proviously "asked" for.
84 *
85 * I'm not sure which of these Apple's algorithm is supposed
86 * to use
87 */
88#define RPM_PID_USE_ACTUAL_SPEED 0
89
90/*
91 * i2c IDs. Currently, we hard code those and assume that
92 * the FCU is on U3 bus 1 while all sensors are on U3 bus
93 * 0. This appear to be safe enough for this first version
94 * of the driver, though I would accept any clean patch
95 * doing a better use of the device-tree without turning the
96 * while i2c registration mechanism into a racy mess
97 *
98 * Note: Xserve changed this. We have some bits on the K2 bus,
99 * which I arbitrarily set to 0x200. Ultimately, we really want
100 * too lookup these in the device-tree though
101 */
102#define FAN_CTRLER_ID 0x15e
103#define SUPPLY_MONITOR_ID 0x58
104#define SUPPLY_MONITORB_ID 0x5a
105#define DRIVES_DALLAS_ID 0x94
106#define BACKSIDE_MAX_ID 0x98
107#define XSERVE_DIMMS_LM87 0x25a
108#define XSERVE_SLOTS_LM75 0x290
109
110/*
111 * Some MAX6690, DS1775, LM87 register definitions
112 */
113#define MAX6690_INT_TEMP 0
114#define MAX6690_EXT_TEMP 1
115#define DS1775_TEMP 0
116#define LM87_INT_TEMP 0x27
117
118/*
119 * Scaling factors for the AD7417 ADC converters (except
120 * for the CPU diode which is obtained from the EEPROM).
121 * Those values are obtained from the property list of
122 * the darwin driver
123 */
124#define ADC_12V_CURRENT_SCALE 0x0320 /* _AD2 */
125#define ADC_CPU_VOLTAGE_SCALE 0x00a0 /* _AD3 */
126#define ADC_CPU_CURRENT_SCALE 0x1f40 /* _AD4 */
127
128/*
129 * PID factors for the U3/Backside fan control loop. We have 2 sets
130 * of values here, one set for U3 and one set for U3H
131 */
132#define BACKSIDE_FAN_PWM_DEFAULT_ID 1
133#define BACKSIDE_FAN_PWM_INDEX 0
134#define BACKSIDE_PID_U3_G_d 0x02800000
135#define BACKSIDE_PID_U3H_G_d 0x01400000
136#define BACKSIDE_PID_RACK_G_d 0x00500000
137#define BACKSIDE_PID_G_p 0x00500000
138#define BACKSIDE_PID_RACK_G_p 0x0004cccc
139#define BACKSIDE_PID_G_r 0x00000000
140#define BACKSIDE_PID_U3_INPUT_TARGET 0x00410000
141#define BACKSIDE_PID_U3H_INPUT_TARGET 0x004b0000
142#define BACKSIDE_PID_RACK_INPUT_TARGET 0x00460000
143#define BACKSIDE_PID_INTERVAL 5
144#define BACKSIDE_PID_RACK_INTERVAL 1
145#define BACKSIDE_PID_OUTPUT_MAX 100
146#define BACKSIDE_PID_U3_OUTPUT_MIN 20
147#define BACKSIDE_PID_U3H_OUTPUT_MIN 20
148#define BACKSIDE_PID_HISTORY_SIZE 2
149
150struct basckside_pid_params
151{
152 s32 G_d;
153 s32 G_p;
154 s32 G_r;
155 s32 input_target;
156 s32 output_min;
157 s32 output_max;
158 s32 interval;
159 int additive;
160};
161
162struct backside_pid_state
163{
164 int ticks;
165 struct i2c_client * monitor;
166 s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
167 s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
168 int cur_sample;
169 s32 last_temp;
170 int pwm;
171 int first;
172};
173
174/*
175 * PID factors for the Drive Bay fan control loop
176 */
177#define DRIVES_FAN_RPM_DEFAULT_ID 2
178#define DRIVES_FAN_RPM_INDEX 1
179#define DRIVES_PID_G_d 0x01e00000
180#define DRIVES_PID_G_p 0x00500000
181#define DRIVES_PID_G_r 0x00000000
182#define DRIVES_PID_INPUT_TARGET 0x00280000
183#define DRIVES_PID_INTERVAL 5
184#define DRIVES_PID_OUTPUT_MAX 4000
185#define DRIVES_PID_OUTPUT_MIN 300
186#define DRIVES_PID_HISTORY_SIZE 2
187
188struct drives_pid_state
189{
190 int ticks;
191 struct i2c_client * monitor;
192 s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
193 s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
194 int cur_sample;
195 s32 last_temp;
196 int rpm;
197 int first;
198};
199
200#define SLOTS_FAN_PWM_DEFAULT_ID 2
201#define SLOTS_FAN_PWM_INDEX 2
202#define SLOTS_FAN_DEFAULT_PWM 40 /* Do better here ! */
203
204
205/*
206 * PID factors for the Xserve DIMM control loop
207 */
208#define DIMM_PID_G_d 0
209#define DIMM_PID_G_p 0
210#define DIMM_PID_G_r 0x06553600
211#define DIMM_PID_INPUT_TARGET 3276800
212#define DIMM_PID_INTERVAL 1
213#define DIMM_PID_OUTPUT_MAX 14000
214#define DIMM_PID_OUTPUT_MIN 4000
215#define DIMM_PID_HISTORY_SIZE 20
216
217struct dimm_pid_state
218{
219 int ticks;
220 struct i2c_client * monitor;
221 s32 sample_history[DIMM_PID_HISTORY_SIZE];
222 s32 error_history[DIMM_PID_HISTORY_SIZE];
223 int cur_sample;
224 s32 last_temp;
225 int first;
226 int output;
227};
228
229
230/*
231 * PID factors for the Xserve Slots control loop
232 */
233#define SLOTS_PID_G_d 0
234#define SLOTS_PID_G_p 0
235#define SLOTS_PID_G_r 0x00100000
236#define SLOTS_PID_INPUT_TARGET 3200000
237#define SLOTS_PID_INTERVAL 1
238#define SLOTS_PID_OUTPUT_MAX 100
239#define SLOTS_PID_OUTPUT_MIN 20
240#define SLOTS_PID_HISTORY_SIZE 20
241
242struct slots_pid_state
243{
244 int ticks;
245 struct i2c_client * monitor;
246 s32 sample_history[SLOTS_PID_HISTORY_SIZE];
247 s32 error_history[SLOTS_PID_HISTORY_SIZE];
248 int cur_sample;
249 s32 last_temp;
250 int first;
251 int pwm;
252};
253
254
255
256/* Desktops */
257
258#define CPUA_INTAKE_FAN_RPM_DEFAULT_ID 3
259#define CPUA_EXHAUST_FAN_RPM_DEFAULT_ID 4
260#define CPUB_INTAKE_FAN_RPM_DEFAULT_ID 5
261#define CPUB_EXHAUST_FAN_RPM_DEFAULT_ID 6
262
263#define CPUA_INTAKE_FAN_RPM_INDEX 3
264#define CPUA_EXHAUST_FAN_RPM_INDEX 4
265#define CPUB_INTAKE_FAN_RPM_INDEX 5
266#define CPUB_EXHAUST_FAN_RPM_INDEX 6
267
268#define CPU_INTAKE_SCALE 0x0000f852
269#define CPU_TEMP_HISTORY_SIZE 2
270#define CPU_POWER_HISTORY_SIZE 10
271#define CPU_PID_INTERVAL 1
272#define CPU_MAX_OVERTEMP 90
273
274#define CPUA_PUMP_RPM_INDEX 7
275#define CPUB_PUMP_RPM_INDEX 8
276#define CPU_PUMP_OUTPUT_MAX 3200
277#define CPU_PUMP_OUTPUT_MIN 1250
278
279/* Xserve */
280#define CPU_A1_FAN_RPM_INDEX 9
281#define CPU_A2_FAN_RPM_INDEX 10
282#define CPU_A3_FAN_RPM_INDEX 11
283#define CPU_B1_FAN_RPM_INDEX 12
284#define CPU_B2_FAN_RPM_INDEX 13
285#define CPU_B3_FAN_RPM_INDEX 14
286
287
288struct cpu_pid_state
289{
290 int index;
291 struct i2c_client * monitor;
292 struct mpu_data mpu;
293 int overtemp;
294 s32 temp_history[CPU_TEMP_HISTORY_SIZE];
295 int cur_temp;
296 s32 power_history[CPU_POWER_HISTORY_SIZE];
297 s32 error_history[CPU_POWER_HISTORY_SIZE];
298 int cur_power;
299 int count_power;
300 int rpm;
301 int intake_rpm;
302 s32 voltage;
303 s32 current_a;
304 s32 last_temp;
305 s32 last_power;
306 int first;
307 u8 adc_config;
308 s32 pump_min;
309 s32 pump_max;
310};
311
312/* Tickle FCU every 10 seconds */
313#define FCU_TICKLE_TICKS 10
314
315/*
316 * Driver state
317 */
318enum {
319 state_detached,
320 state_attaching,
321 state_attached,
322 state_detaching,
323};
324
325
326#endif /* __THERM_PMAC_7_2_H__ */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index aa915da2a5e5..82abfce1cb42 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -176,7 +176,6 @@ STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 179
181STATIC struct device_attribute *NCR_700_dev_attrs[]; 180STATIC struct device_attribute *NCR_700_dev_attrs[];
182 181
@@ -326,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
326 tpnt->slave_destroy = NCR_700_slave_destroy; 325 tpnt->slave_destroy = NCR_700_slave_destroy;
327 tpnt->slave_alloc = NCR_700_slave_alloc; 326 tpnt->slave_alloc = NCR_700_slave_alloc;
328 tpnt->change_queue_depth = NCR_700_change_queue_depth; 327 tpnt->change_queue_depth = NCR_700_change_queue_depth;
329 tpnt->change_queue_type = NCR_700_change_queue_type;
330 tpnt->use_blk_tags = 1; 328 tpnt->use_blk_tags = 1;
331 329
332 if(tpnt->name == NULL) 330 if(tpnt->name == NULL)
@@ -904,8 +902,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 902 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
905 903
906 SCp->device->tagged_supported = 0; 904 SCp->device->tagged_supported = 0;
905 SCp->device->simple_tags = 0;
907 scsi_change_queue_depth(SCp->device, host->cmd_per_lun); 906 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
908 scsi_set_tag_type(SCp->device, 0);
909 } else { 907 } else {
910 shost_printk(KERN_WARNING, host, 908 shost_printk(KERN_WARNING, host,
911 "(%d:%d) Unexpected REJECT Message %s\n", 909 "(%d:%d) Unexpected REJECT Message %s\n",
@@ -1818,8 +1816,8 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1818 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 1816 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1819 } 1817 }
1820 1818
1821 if((hostdata->tag_negotiated &(1<<scmd_id(SCp))) 1819 if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1822 && scsi_get_tag_type(SCp->device)) { 1820 SCp->device->simple_tags) {
1823 slot->tag = SCp->request->tag; 1821 slot->tag = SCp->request->tag;
1824 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", 1822 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1825 slot->tag, slot); 1823 slot->tag, slot);
@@ -2082,39 +2080,6 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2082 return scsi_change_queue_depth(SDp, depth); 2080 return scsi_change_queue_depth(SDp, depth);
2083} 2081}
2084 2082
2085static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2086{
2087 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2088 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2089 struct NCR_700_Host_Parameters *hostdata =
2090 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2091
2092 /* We have a global (per target) flag to track whether TCQ is
2093 * enabled, so we'll be turning it off for the entire target here.
2094 * our tag algorithm will fail if we mix tagged and untagged commands,
2095 * so quiesce the device before doing this */
2096 if (change_tag)
2097 scsi_target_quiesce(SDp->sdev_target);
2098
2099 scsi_set_tag_type(SDp, tag_type);
2100 if (!tag_type) {
2101 /* shift back to the default unqueued number of commands
2102 * (the user can still raise this) */
2103 scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
2104 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2105 } else {
2106 /* Here, we cleared the negotiation flag above, so this
2107 * will force the driver to renegotiate */
2108 scsi_change_queue_depth(SDp, SDp->queue_depth);
2109 if (change_tag)
2110 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2111 }
2112 if (change_tag)
2113 scsi_target_resume(SDp->sdev_target);
2114
2115 return tag_type;
2116}
2117
2118static ssize_t 2083static ssize_t
2119NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf) 2084NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2120{ 2085{
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 86cf3d671eb9..9c92f415229f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1462,18 +1462,17 @@ config SCSI_WD719X
1462 SCSI controllers (based on WD33C296A chip). 1462 SCSI controllers (based on WD33C296A chip).
1463 1463
1464config SCSI_DEBUG 1464config SCSI_DEBUG
1465 tristate "SCSI debugging host simulator" 1465 tristate "SCSI debugging host and device simulator"
1466 depends on SCSI 1466 depends on SCSI
1467 select CRC_T10DIF 1467 select CRC_T10DIF
1468 help 1468 help
1469 This is a host adapter simulator that can simulate multiple hosts 1469 This pseudo driver simulates one or more hosts (SCSI initiators),
1470 each with multiple dummy SCSI devices (disks). It defaults to one 1470 each with one or more targets, each with one or more logical units.
1471 host adapter with one dummy SCSI disk. Each dummy disk uses kernel 1471 Defaults to one of each, creating a small RAM disk device. Many
1472 RAM as storage (i.e. it is a ramdisk). To save space when multiple 1472 parameters found in the /sys/bus/pseudo/drivers/scsi_debug
1473 dummy disks are simulated, they share the same kernel RAM for 1473 directory can be tweaked at run time.
1474 their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more 1474 See <http://sg.danny.cz/sg/sdebug26.html> for more information.
1475 information. This driver is primarily of use to those testing the 1475 Mainly used for testing and best as a module. If unsure, say N.
1476 SCSI and block subsystems. If unsure, say N.
1477 1476
1478config SCSI_MESH 1477config SCSI_MESH
1479 tristate "MESH (Power Mac internal SCSI) support" 1478 tristate "MESH (Power Mac internal SCSI) support"
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 6719a3390ebd..2c5ce48c8f95 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7921,9 +7921,9 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7921 */ 7921 */
7922 if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && 7922 if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
7923 (boardp->reqcnt[scp->device->id] % 255) == 0) { 7923 (boardp->reqcnt[scp->device->id] % 255) == 0) {
7924 asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; 7924 asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
7925 } else { 7925 } else {
7926 asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; 7926 asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
7927 } 7927 }
7928 7928
7929 /* Build ASC_SCSI_Q */ 7929 /* Build ASC_SCSI_Q */
@@ -8351,7 +8351,7 @@ static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
8351 } 8351 }
8352 q_addr = ASC_QNO_TO_QADDR(q_no); 8352 q_addr = ASC_QNO_TO_QADDR(q_no);
8353 if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { 8353 if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
8354 scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8354 scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
8355 } 8355 }
8356 scsiq->q1.status = QS_FREE; 8356 scsiq->q1.status = QS_FREE;
8357 AscMemWordCopyPtrToLram(iop_base, 8357 AscMemWordCopyPtrToLram(iop_base,
@@ -8669,7 +8669,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8669 } 8669 }
8670 } 8670 }
8671 if (disable_syn_offset_one_fix) { 8671 if (disable_syn_offset_one_fix) {
8672 scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8672 scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
8673 scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | 8673 scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
8674 ASC_TAG_FLAG_DISABLE_DISCONNECT); 8674 ASC_TAG_FLAG_DISABLE_DISCONNECT);
8675 } else { 8675 } else {
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 14fc018436c2..02a2512b76a8 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -63,7 +63,6 @@ static struct scsi_host_template aic94xx_sht = {
63 .scan_finished = asd_scan_finished, 63 .scan_finished = asd_scan_finished,
64 .scan_start = asd_scan_start, 64 .scan_start = asd_scan_start,
65 .change_queue_depth = sas_change_queue_depth, 65 .change_queue_depth = sas_change_queue_depth,
66 .change_queue_type = sas_change_queue_type,
67 .bios_param = sas_bios_param, 66 .bios_param = sas_bios_param,
68 .can_queue = 1, 67 .can_queue = 1,
69 .cmd_per_lun = 1, 68 .cmd_per_lun = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e861f286b42e..98d06d151958 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2792,7 +2792,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2792 .eh_host_reset_handler = fc_eh_host_reset, 2792 .eh_host_reset_handler = fc_eh_host_reset,
2793 .slave_alloc = fc_slave_alloc, 2793 .slave_alloc = fc_slave_alloc,
2794 .change_queue_depth = scsi_change_queue_depth, 2794 .change_queue_depth = scsi_change_queue_depth,
2795 .change_queue_type = scsi_change_queue_type,
2796 .this_id = -1, 2795 .this_id = -1,
2797 .cmd_per_lun = 3, 2796 .cmd_per_lun = 3,
2798 .use_clustering = ENABLE_CLUSTERING, 2797 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4b56858c1df2..9ecca8504f60 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1737,11 +1737,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1737 fcp_cmnd->fc_pri_ta = 0; 1737 fcp_cmnd->fc_pri_ta = 0;
1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1739 fcp_cmnd->fc_flags = io_req->io_req_flags; 1739 fcp_cmnd->fc_flags = io_req->io_req_flags;
1740 1740 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1741 if (sc_cmd->flags & SCMD_TAGGED)
1742 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1743 else
1744 fcp_cmnd->fc_pri_ta = 0;
1745} 1741}
1746 1742
1747static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1743static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 51ea5dc5f084..3987284e0d2a 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -172,10 +172,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
172 fcp_cmnd->fc_cmdref = 0; 172 fcp_cmnd->fc_cmdref = 0;
173 173
174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
175 if (scmnd->flags & SCMD_TAGGED) 175 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
176 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
177 else
178 fcp_cmnd->fc_pri_ta = 0;
179 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
180 177
181 if (req->nsge) 178 if (req->nsge)
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index b7dc59fca7a6..7bd376d95ed5 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -684,9 +684,9 @@ static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
684 * 1) verify the fi_version is correct 684 * 1) verify the fi_version is correct
685 * 2) verify the checksum of the entire image. 685 * 2) verify the checksum of the entire image.
686 * 3) validate the adap_typ, action and length fields. 686 * 3) validate the adap_typ, action and length fields.
687 * 4) valdiate each component header. check the img_type and 687 * 4) validate each component header. check the img_type and
688 * length fields 688 * length fields
689 * 5) valdiate each component image. validate signatures and 689 * 5) validate each component image. validate signatures and
690 * local checksums 690 * local checksums
691 */ 691 */
692static bool verify_fi(struct esas2r_adapter *a, 692static bool verify_fi(struct esas2r_adapter *a,
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 593ff8a63c70..7e1c21e6736b 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -255,7 +255,6 @@ static struct scsi_host_template driver_template = {
255 .emulated = 0, 255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME, 256 .proc_name = ESAS2R_DRVR_NAME,
257 .change_queue_depth = scsi_change_queue_depth, 257 .change_queue_depth = scsi_change_queue_depth,
258 .change_queue_type = scsi_change_queue_type,
259 .max_sectors = 0xFFFF, 258 .max_sectors = 0xFFFF,
260 .use_blk_tags = 1, 259 .use_blk_tags = 1,
261}; 260};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cd00a6cdf55b..ec193a8357d7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -281,7 +281,6 @@ static struct scsi_host_template fcoe_shost_template = {
281 .eh_host_reset_handler = fc_eh_host_reset, 281 .eh_host_reset_handler = fc_eh_host_reset,
282 .slave_alloc = fc_slave_alloc, 282 .slave_alloc = fc_slave_alloc,
283 .change_queue_depth = scsi_change_queue_depth, 283 .change_queue_depth = scsi_change_queue_depth,
284 .change_queue_type = scsi_change_queue_type,
285 .this_id = -1, 284 .this_id = -1,
286 .cmd_per_lun = 3, 285 .cmd_per_lun = 3,
287 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 286 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0c1f8177b5b7..8a0d4d7b3254 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -111,7 +111,6 @@ static struct scsi_host_template fnic_host_template = {
111 .eh_host_reset_handler = fnic_host_reset, 111 .eh_host_reset_handler = fnic_host_reset,
112 .slave_alloc = fnic_slave_alloc, 112 .slave_alloc = fnic_slave_alloc,
113 .change_queue_depth = scsi_change_queue_depth, 113 .change_queue_depth = scsi_change_queue_depth,
114 .change_queue_type = scsi_change_queue_type,
115 .this_id = -1, 114 .this_id = -1,
116 .cmd_per_lun = 3, 115 .cmd_per_lun = 3,
117 .can_queue = FNIC_DFLT_IO_REQ, 116 .can_queue = FNIC_DFLT_IO_REQ,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f58c6d8e0264..057d27721d5b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1615,7 +1615,6 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1616 struct ibmvfc_cmd *vfc_cmd; 1616 struct ibmvfc_cmd *vfc_cmd;
1617 struct ibmvfc_event *evt; 1617 struct ibmvfc_event *evt;
1618 u8 tag[2];
1619 int rc; 1618 int rc;
1620 1619
1621 if (unlikely((rc = fc_remote_port_chkready(rport))) || 1620 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -3089,7 +3088,6 @@ static struct scsi_host_template driver_template = {
3089 .target_alloc = ibmvfc_target_alloc, 3088 .target_alloc = ibmvfc_target_alloc,
3090 .scan_finished = ibmvfc_scan_finished, 3089 .scan_finished = ibmvfc_scan_finished,
3091 .change_queue_depth = ibmvfc_change_queue_depth, 3090 .change_queue_depth = ibmvfc_change_queue_depth,
3092 .change_queue_type = scsi_change_queue_type,
3093 .cmd_per_lun = 16, 3091 .cmd_per_lun = 16,
3094 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, 3092 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3095 .this_id = -1, 3093 .this_id = -1,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 540294389355..df4e27cd996a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1426,16 +1426,14 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1426 if (res->sdev) { 1426 if (res->sdev) {
1427 res->del_from_ml = 1; 1427 res->del_from_ml = 1;
1428 res->res_handle = IPR_INVALID_RES_HANDLE; 1428 res->res_handle = IPR_INVALID_RES_HANDLE;
1429 if (ioa_cfg->allow_ml_add_del) 1429 schedule_work(&ioa_cfg->work_q);
1430 schedule_work(&ioa_cfg->work_q);
1431 } else { 1430 } else {
1432 ipr_clear_res_target(res); 1431 ipr_clear_res_target(res);
1433 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 } 1433 }
1435 } else if (!res->sdev || res->del_from_ml) { 1434 } else if (!res->sdev || res->del_from_ml) {
1436 res->add_to_ml = 1; 1435 res->add_to_ml = 1;
1437 if (ioa_cfg->allow_ml_add_del) 1436 schedule_work(&ioa_cfg->work_q);
1438 schedule_work(&ioa_cfg->work_q);
1439 } 1437 }
1440 1438
1441 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
@@ -3273,8 +3271,7 @@ static void ipr_worker_thread(struct work_struct *work)
3273restart: 3271restart:
3274 do { 3272 do {
3275 did_work = 0; 3273 did_work = 0;
3276 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 3274 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3277 !ioa_cfg->allow_ml_add_del) {
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 return; 3276 return;
3280 } 3277 }
@@ -3311,6 +3308,7 @@ restart:
3311 } 3308 }
3312 } 3309 }
3313 3310
3311 ioa_cfg->scan_done = 1;
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3313 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316 LEAVE; 3314 LEAVE;
@@ -4346,30 +4344,6 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4346} 4344}
4347 4345
4348/** 4346/**
4349 * ipr_change_queue_type - Change the device's queue type
4350 * @dsev: scsi device struct
4351 * @tag_type: type of tags to use
4352 *
4353 * Return value:
4354 * actual queue type set
4355 **/
4356static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4357{
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361
4362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4363 res = (struct ipr_resource_entry *)sdev->hostdata;
4364 if (res && ipr_is_gscsi(res))
4365 tag_type = scsi_change_queue_type(sdev, tag_type);
4366 else
4367 tag_type = 0;
4368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 return tag_type;
4370}
4371
4372/**
4373 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4347 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4374 * @dev: device struct 4348 * @dev: device struct
4375 * @attr: device attribute structure 4349 * @attr: device attribute structure
@@ -4739,6 +4713,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4739 sdev->no_uld_attach = 1; 4713 sdev->no_uld_attach = 1;
4740 } 4714 }
4741 if (ipr_is_vset_device(res)) { 4715 if (ipr_is_vset_device(res)) {
4716 sdev->scsi_level = SCSI_SPC_3;
4742 blk_queue_rq_timeout(sdev->request_queue, 4717 blk_queue_rq_timeout(sdev->request_queue,
4743 IPR_VSET_RW_TIMEOUT); 4718 IPR_VSET_RW_TIMEOUT);
4744 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4719 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
@@ -5231,6 +5206,28 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5231 * @scsi_cmd: scsi command struct 5206 * @scsi_cmd: scsi command struct
5232 * 5207 *
5233 * Return value: 5208 * Return value:
5209 * 0 if scan in progress / 1 if scan is complete
5210 **/
5211static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5212{
5213 unsigned long lock_flags;
5214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5215 int rc = 0;
5216
5217 spin_lock_irqsave(shost->host_lock, lock_flags);
5218 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5219 rc = 1;
5220 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5221 rc = 1;
5222 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5223 return rc;
5224}
5225
5226/**
5227 * ipr_eh_host_reset - Reset the host adapter
5228 * @scsi_cmd: scsi command struct
5229 *
5230 * Return value:
5234 * SUCCESS / FAILED 5231 * SUCCESS / FAILED
5235 **/ 5232 **/
5236static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5233static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
@@ -5779,7 +5776,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5779 5776
5780 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5777 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5781 5778
5782 if (!scsi_get_tag_type(scsi_cmd->device)) { 5779 if (!scsi_cmd->device->simple_tags) {
5783 ipr_erp_request_sense(ipr_cmd); 5780 ipr_erp_request_sense(ipr_cmd);
5784 return; 5781 return;
5785 } 5782 }
@@ -6299,10 +6296,10 @@ static struct scsi_host_template driver_template = {
6299 .slave_alloc = ipr_slave_alloc, 6296 .slave_alloc = ipr_slave_alloc,
6300 .slave_configure = ipr_slave_configure, 6297 .slave_configure = ipr_slave_configure,
6301 .slave_destroy = ipr_slave_destroy, 6298 .slave_destroy = ipr_slave_destroy,
6299 .scan_finished = ipr_scan_finished,
6302 .target_alloc = ipr_target_alloc, 6300 .target_alloc = ipr_target_alloc,
6303 .target_destroy = ipr_target_destroy, 6301 .target_destroy = ipr_target_destroy,
6304 .change_queue_depth = ipr_change_queue_depth, 6302 .change_queue_depth = ipr_change_queue_depth,
6305 .change_queue_type = ipr_change_queue_type,
6306 .bios_param = ipr_biosparam, 6303 .bios_param = ipr_biosparam,
6307 .can_queue = IPR_MAX_COMMANDS, 6304 .can_queue = IPR_MAX_COMMANDS,
6308 .this_id = -1, 6305 .this_id = -1,
@@ -6841,7 +6838,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6841 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6838 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6842 6839
6843 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 6840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6844 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { 6841 if (res->add_to_ml || res->del_from_ml) {
6845 ipr_trace; 6842 ipr_trace;
6846 break; 6843 break;
6847 } 6844 }
@@ -6870,6 +6867,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6870 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6867 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6871 scsi_block_requests(ioa_cfg->host); 6868 scsi_block_requests(ioa_cfg->host);
6872 6869
6870 schedule_work(&ioa_cfg->work_q);
6873 LEAVE; 6871 LEAVE;
6874 return IPR_RC_JOB_RETURN; 6872 return IPR_RC_JOB_RETURN;
6875} 6873}
@@ -7610,6 +7608,19 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7610 type[4] = '\0'; 7608 type[4] = '\0';
7611 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7609 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7612 7610
7611 if (ipr_invalid_adapter(ioa_cfg)) {
7612 dev_err(&ioa_cfg->pdev->dev,
7613 "Adapter not supported in this hardware configuration.\n");
7614
7615 if (!ipr_testmode) {
7616 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7618 list_add_tail(&ipr_cmd->queue,
7619 &ioa_cfg->hrrq->hrrq_free_q);
7620 return IPR_RC_JOB_RETURN;
7621 }
7622 }
7623
7613 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7624 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7614 7625
7615 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 7626 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
@@ -8797,20 +8808,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8797 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8808 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8798 IPR_SHUTDOWN_NONE); 8809 IPR_SHUTDOWN_NONE);
8799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8800 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8801 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8802
8803 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8804 rc = -EIO;
8805 } else if (ipr_invalid_adapter(ioa_cfg)) {
8806 if (!ipr_testmode)
8807 rc = -EIO;
8808
8809 dev_err(&ioa_cfg->pdev->dev,
8810 "Adapter not supported in this hardware configuration.\n");
8811 }
8812
8813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8814 8811
8815 LEAVE; 8812 LEAVE;
8816 return rc; 8813 return rc;
@@ -9264,7 +9261,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9264 * ioa_cfg->max_devs_supported))); 9261 * ioa_cfg->max_devs_supported)));
9265 } 9262 }
9266 9263
9267 host->max_channel = IPR_MAX_BUS_TO_SCAN; 9264 host->max_channel = IPR_VSET_BUS;
9268 host->unique_id = host->host_no; 9265 host->unique_id = host->host_no;
9269 host->max_cmd_len = IPR_MAX_CDB_LEN; 9266 host->max_cmd_len = IPR_MAX_CDB_LEN;
9270 host->can_queue = ioa_cfg->max_cmds; 9267 host->can_queue = ioa_cfg->max_cmds;
@@ -9764,25 +9761,6 @@ out_scsi_host_put:
9764} 9761}
9765 9762
9766/** 9763/**
9767 * ipr_scan_vsets - Scans for VSET devices
9768 * @ioa_cfg: ioa config struct
9769 *
9770 * Description: Since the VSET resources do not follow SAM in that we can have
9771 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9772 *
9773 * Return value:
9774 * none
9775 **/
9776static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9777{
9778 int target, lun;
9779
9780 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9781 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9782 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9783}
9784
9785/**
9786 * ipr_initiate_ioa_bringdown - Bring down an adapter 9764 * ipr_initiate_ioa_bringdown - Bring down an adapter
9787 * @ioa_cfg: ioa config struct 9765 * @ioa_cfg: ioa config struct
9788 * @shutdown_type: shutdown type 9766 * @shutdown_type: shutdown type
@@ -9937,10 +9915,6 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9937 } 9915 }
9938 9916
9939 scsi_scan_host(ioa_cfg->host); 9917 scsi_scan_host(ioa_cfg->host);
9940 ipr_scan_vsets(ioa_cfg);
9941 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9942 ioa_cfg->allow_ml_add_del = 1;
9943 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9944 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 9918 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9945 9919
9946 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 9920 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ebdebd944e7..b4f3eec51bc9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -157,13 +157,11 @@
157 157
158#define IPR_MAX_NUM_TARGETS_PER_BUS 256 158#define IPR_MAX_NUM_TARGETS_PER_BUS 256
159#define IPR_MAX_NUM_LUNS_PER_TARGET 256 159#define IPR_MAX_NUM_LUNS_PER_TARGET 256
160#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
161#define IPR_VSET_BUS 0xff 160#define IPR_VSET_BUS 0xff
162#define IPR_IOA_BUS 0xff 161#define IPR_IOA_BUS 0xff
163#define IPR_IOA_TARGET 0xff 162#define IPR_IOA_TARGET 0xff
164#define IPR_IOA_LUN 0xff 163#define IPR_IOA_LUN 0xff
165#define IPR_MAX_NUM_BUSES 16 164#define IPR_MAX_NUM_BUSES 16
166#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
167 165
168#define IPR_NUM_RESET_RELOAD_RETRIES 3 166#define IPR_NUM_RESET_RELOAD_RETRIES 3
169 167
@@ -1453,7 +1451,7 @@ struct ipr_ioa_cfg {
1453 u8 in_ioa_bringdown:1; 1451 u8 in_ioa_bringdown:1;
1454 u8 ioa_unit_checked:1; 1452 u8 ioa_unit_checked:1;
1455 u8 dump_taken:1; 1453 u8 dump_taken:1;
1456 u8 allow_ml_add_del:1; 1454 u8 scan_done:1;
1457 u8 needs_hard_reset:1; 1455 u8 needs_hard_reset:1;
1458 u8 dual_raid:1; 1456 u8 dual_raid:1;
1459 u8 needs_warm_reset:1; 1457 u8 needs_warm_reset:1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 724c6265b667..cd41b63a2f10 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -158,7 +158,6 @@ static struct scsi_host_template isci_sht = {
158 .scan_finished = isci_host_scan_finished, 158 .scan_finished = isci_host_scan_finished,
159 .scan_start = isci_host_start, 159 .scan_start = isci_host_start,
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
161 .change_queue_type = sas_change_queue_type,
162 .bios_param = sas_bios_param, 161 .bios_param = sas_bios_param,
163 .can_queue = ISCI_CAN_QUEUE_VAL, 162 .can_queue = ISCI_CAN_QUEUE_VAL,
164 .cmd_per_lun = 1, 163 .cmd_per_lun = 1,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 72918d227ead..519dac4e341e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -906,13 +906,6 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
906 return scsi_change_queue_depth(sdev, depth); 906 return scsi_change_queue_depth(sdev, depth);
907} 907}
908 908
909int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
910{
911 if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
912 return -EINVAL;
913 return scsi_change_queue_type(scsi_dev, type);
914}
915
916int sas_bios_param(struct scsi_device *scsi_dev, 909int sas_bios_param(struct scsi_device *scsi_dev,
917 struct block_device *bdev, 910 struct block_device *bdev,
918 sector_t capacity, int *hsc) 911 sector_t capacity, int *hsc)
@@ -1011,7 +1004,6 @@ EXPORT_SYMBOL_GPL(sas_queuecommand);
1011EXPORT_SYMBOL_GPL(sas_target_alloc); 1004EXPORT_SYMBOL_GPL(sas_target_alloc);
1012EXPORT_SYMBOL_GPL(sas_slave_configure); 1005EXPORT_SYMBOL_GPL(sas_slave_configure);
1013EXPORT_SYMBOL_GPL(sas_change_queue_depth); 1006EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1014EXPORT_SYMBOL_GPL(sas_change_queue_type);
1015EXPORT_SYMBOL_GPL(sas_bios_param); 1007EXPORT_SYMBOL_GPL(sas_bios_param);
1016EXPORT_SYMBOL_GPL(sas_task_abort); 1008EXPORT_SYMBOL_GPL(sas_task_abort);
1017EXPORT_SYMBOL_GPL(sas_phy_reset); 1009EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fd85952b621d..4f9222eb2266 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5879,7 +5879,6 @@ struct scsi_host_template lpfc_template = {
5879 .max_sectors = 0xFFFF, 5879 .max_sectors = 0xFFFF,
5880 .vendor_id = LPFC_NL_VENDOR_ID, 5880 .vendor_id = LPFC_NL_VENDOR_ID,
5881 .change_queue_depth = scsi_change_queue_depth, 5881 .change_queue_depth = scsi_change_queue_depth,
5882 .change_queue_type = scsi_change_queue_type,
5883 .use_blk_tags = 1, 5882 .use_blk_tags = 1,
5884 .track_queue_depth = 1, 5883 .track_queue_depth = 1,
5885}; 5884};
@@ -5904,7 +5903,6 @@ struct scsi_host_template lpfc_vport_template = {
5904 .shost_attrs = lpfc_vport_attrs, 5903 .shost_attrs = lpfc_vport_attrs,
5905 .max_sectors = 0xFFFF, 5904 .max_sectors = 0xFFFF,
5906 .change_queue_depth = scsi_change_queue_depth, 5905 .change_queue_depth = scsi_change_queue_depth,
5907 .change_queue_type = scsi_change_queue_type,
5908 .use_blk_tags = 1, 5906 .use_blk_tags = 1,
5909 .track_queue_depth = 1, 5907 .track_queue_depth = 1,
5910}; 5908};
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8431eb10bbb1..6a1c036a6f3f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -7592,7 +7592,6 @@ static struct scsi_host_template scsih_driver_template = {
7592 .scan_finished = _scsih_scan_finished, 7592 .scan_finished = _scsih_scan_finished,
7593 .scan_start = _scsih_scan_start, 7593 .scan_start = _scsih_scan_start,
7594 .change_queue_depth = _scsih_change_queue_depth, 7594 .change_queue_depth = _scsih_change_queue_depth,
7595 .change_queue_type = scsi_change_queue_type,
7596 .eh_abort_handler = _scsih_abort, 7595 .eh_abort_handler = _scsih_abort,
7597 .eh_device_reset_handler = _scsih_dev_reset, 7596 .eh_device_reset_handler = _scsih_dev_reset,
7598 .eh_target_reset_handler = _scsih_target_reset, 7597 .eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 0d1d06488a28..e689bf20a3ea 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
1006 &mpt2sas_phy->remote_identify); 1006 &mpt2sas_phy->remote_identify);
1007 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1007 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); 1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
1009 } else { 1009 } else
1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
1011 sas_identify)); 1011 sas_identify));
1012 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1013 mpt2sas_phy);
1014 }
1015 1012
1016 if (mpt2sas_phy->phy) 1013 if (mpt2sas_phy->phy)
1017 mpt2sas_phy->phy->negotiated_linkrate = 1014 mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a2b60991efd4..94261ee9e72d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7229,7 +7229,6 @@ static struct scsi_host_template scsih_driver_template = {
7229 .scan_finished = _scsih_scan_finished, 7229 .scan_finished = _scsih_scan_finished,
7230 .scan_start = _scsih_scan_start, 7230 .scan_start = _scsih_scan_start,
7231 .change_queue_depth = _scsih_change_queue_depth, 7231 .change_queue_depth = _scsih_change_queue_depth,
7232 .change_queue_type = scsi_change_queue_type,
7233 .eh_abort_handler = _scsih_abort, 7232 .eh_abort_handler = _scsih_abort,
7234 .eh_device_reset_handler = _scsih_dev_reset, 7233 .eh_device_reset_handler = _scsih_dev_reset,
7235 .eh_target_reset_handler = _scsih_target_reset, 7234 .eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d4bafaaebea9..3637ae6c0171 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
1003 &mpt3sas_phy->remote_identify); 1003 &mpt3sas_phy->remote_identify);
1004 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1004 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); 1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
1006 } else { 1006 } else
1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct 1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
1008 sas_identify)); 1008 sas_identify));
1009 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1010 mpt3sas_phy);
1011 }
1012 1009
1013 if (mpt3sas_phy->phy) 1010 if (mpt3sas_phy->phy)
1014 mpt3sas_phy->phy->negotiated_linkrate = 1011 mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index f15df3de6790..53030b0e8015 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -54,7 +54,6 @@ static struct scsi_host_template mvs_sht = {
54 .scan_finished = mvs_scan_finished, 54 .scan_finished = mvs_scan_finished,
55 .scan_start = mvs_scan_start, 55 .scan_start = mvs_scan_start,
56 .change_queue_depth = sas_change_queue_depth, 56 .change_queue_depth = sas_change_queue_depth,
57 .change_queue_type = sas_change_queue_type,
58 .bios_param = sas_bios_param, 57 .bios_param = sas_bios_param,
59 .can_queue = 1, 58 .can_queue = 1,
60 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 329aba0083ab..65555916d3b8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -76,7 +76,6 @@ static struct scsi_host_template pm8001_sht = {
76 .scan_finished = pm8001_scan_finished, 76 .scan_finished = pm8001_scan_finished,
77 .scan_start = pm8001_scan_start, 77 .scan_start = pm8001_scan_start,
78 .change_queue_depth = sas_change_queue_depth, 78 .change_queue_depth = sas_change_queue_depth,
79 .change_queue_type = sas_change_queue_type,
80 .bios_param = sas_bios_param, 79 .bios_param = sas_bios_param,
81 .can_queue = 1, 80 .can_queue = 1,
82 .cmd_per_lun = 1, 81 .cmd_per_lun = 1,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b1b1f66b1ab7..8c27b6a77ec4 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4251,7 +4251,6 @@ static struct scsi_host_template pmcraid_host_template = {
4251 .slave_configure = pmcraid_slave_configure, 4251 .slave_configure = pmcraid_slave_configure,
4252 .slave_destroy = pmcraid_slave_destroy, 4252 .slave_destroy = pmcraid_slave_destroy,
4253 .change_queue_depth = pmcraid_change_queue_depth, 4253 .change_queue_depth = pmcraid_change_queue_depth,
4254 .change_queue_type = scsi_change_queue_type,
4255 .can_queue = PMCRAID_MAX_IO_CMD, 4254 .can_queue = PMCRAID_MAX_IO_CMD,
4256 .this_id = -1, 4255 .this_id = -1,
4257 .sg_tablesize = PMCRAID_MAX_IOADLS, 4256 .sg_tablesize = PMCRAID_MAX_IOADLS,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a4dde7e80dbd..e59f25bff7ab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3237,8 +3237,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3237 struct fc_rport *rport; 3237 struct fc_rport *rport;
3238 unsigned long flags; 3238 unsigned long flags;
3239 3239
3240 qla2x00_rport_del(fcport);
3241
3242 rport_ids.node_name = wwn_to_u64(fcport->node_name); 3240 rport_ids.node_name = wwn_to_u64(fcport->node_name);
3243 rport_ids.port_name = wwn_to_u64(fcport->port_name); 3241 rport_ids.port_name = wwn_to_u64(fcport->port_name);
3244 rport_ids.port_id = fcport->d_id.b.domain << 16 | 3242 rport_ids.port_id = fcport->d_id.b.domain << 16 |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6b4d9235368a..12ca291c1380 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -258,7 +258,6 @@ struct scsi_host_template qla2xxx_driver_template = {
258 .scan_finished = qla2xxx_scan_finished, 258 .scan_finished = qla2xxx_scan_finished,
259 .scan_start = qla2xxx_scan_start, 259 .scan_start = qla2xxx_scan_start,
260 .change_queue_depth = scsi_change_queue_depth, 260 .change_queue_depth = scsi_change_queue_depth,
261 .change_queue_type = scsi_change_queue_type,
262 .this_id = -1, 261 .this_id = -1,
263 .cmd_per_lun = 3, 262 .cmd_per_lun = 3,
264 .use_clustering = ENABLE_CLUSTERING, 263 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a902fa1db7af..57418258c101 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3218,25 +3218,25 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3218 3218
3219 switch (task_codes) { 3219 switch (task_codes) {
3220 case ATIO_SIMPLE_QUEUE: 3220 case ATIO_SIMPLE_QUEUE:
3221 fcp_task_attr = MSG_SIMPLE_TAG; 3221 fcp_task_attr = TCM_SIMPLE_TAG;
3222 break; 3222 break;
3223 case ATIO_HEAD_OF_QUEUE: 3223 case ATIO_HEAD_OF_QUEUE:
3224 fcp_task_attr = MSG_HEAD_TAG; 3224 fcp_task_attr = TCM_HEAD_TAG;
3225 break; 3225 break;
3226 case ATIO_ORDERED_QUEUE: 3226 case ATIO_ORDERED_QUEUE:
3227 fcp_task_attr = MSG_ORDERED_TAG; 3227 fcp_task_attr = TCM_ORDERED_TAG;
3228 break; 3228 break;
3229 case ATIO_ACA_QUEUE: 3229 case ATIO_ACA_QUEUE:
3230 fcp_task_attr = MSG_ACA_TAG; 3230 fcp_task_attr = TCM_ACA_TAG;
3231 break; 3231 break;
3232 case ATIO_UNTAGGED: 3232 case ATIO_UNTAGGED:
3233 fcp_task_attr = MSG_SIMPLE_TAG; 3233 fcp_task_attr = TCM_SIMPLE_TAG;
3234 break; 3234 break;
3235 default: 3235 default:
3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3237 "qla_target: unknown task code %x, use ORDERED instead\n", 3237 "qla_target: unknown task code %x, use ORDERED instead\n",
3238 task_codes); 3238 task_codes);
3239 fcp_task_attr = MSG_ORDERED_TAG; 3239 fcp_task_attr = TCM_ORDERED_TAG;
3240 break; 3240 break;
3241 } 3241 }
3242 3242
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1ad0c36375b8..e02885451425 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -739,34 +739,12 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
739 739
740 if (sdev->last_queue_full_count <= 10) 740 if (sdev->last_queue_full_count <= 10)
741 return 0; 741 return 0;
742 if (sdev->last_queue_full_depth < 8) {
743 /* Drop back to untagged */
744 scsi_set_tag_type(sdev, 0);
745 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
746 return -1;
747 }
748 742
749 return scsi_change_queue_depth(sdev, depth); 743 return scsi_change_queue_depth(sdev, depth);
750} 744}
751EXPORT_SYMBOL(scsi_track_queue_full); 745EXPORT_SYMBOL(scsi_track_queue_full);
752 746
753/** 747/**
754 * scsi_change_queue_type() - Change a device's queue type
755 * @sdev: The SCSI device whose queue depth is to change
756 * @tag_type: Identifier for queue type
757 */
758int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
759{
760 if (!sdev->tagged_supported)
761 return 0;
762
763 scsi_set_tag_type(sdev, tag_type);
764 return tag_type;
765
766}
767EXPORT_SYMBOL(scsi_change_queue_type);
768
769/**
770 * scsi_vpd_inquiry - Request a device provide us with a VPD page 748 * scsi_vpd_inquiry - Request a device provide us with a VPD page
771 * @sdev: The device to ask 749 * @sdev: The device to ask
772 * @buffer: Where to put the result 750 * @buffer: Where to put the result
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index aa4b6b80aade..7b8b51bc29b4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -128,7 +128,6 @@ static const char *scsi_debug_version_date = "20141022";
128#define DEF_REMOVABLE false 128#define DEF_REMOVABLE false
129#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ 129#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130#define DEF_SECTOR_SIZE 512 130#define DEF_SECTOR_SIZE 512
131#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132#define DEF_UNMAP_ALIGNMENT 0 131#define DEF_UNMAP_ALIGNMENT 0
133#define DEF_UNMAP_GRANULARITY 1 132#define DEF_UNMAP_GRANULARITY 1
134#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF 133#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
@@ -817,6 +816,7 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
817 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); 816 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 if (debug) 817 if (debug)
819 cp = "capacity data changed"; 818 cp = "capacity data changed";
819 break;
820 default: 820 default:
821 pr_warn("%s: unexpected unit attention code=%d\n", 821 pr_warn("%s: unexpected unit attention code=%d\n",
822 __func__, k); 822 __func__, k);
@@ -3045,18 +3045,12 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3045 u8 num; 3045 u8 num;
3046 unsigned long iflags; 3046 unsigned long iflags;
3047 int ret; 3047 int ret;
3048 int retval = 0;
3048 3049
3049 lba = get_unaligned_be32(cmd + 2); 3050 lba = get_unaligned_be64(cmd + 2);
3050 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3051 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3051 if (0 == num) 3052 if (0 == num)
3052 return 0; /* degenerate case, not an error */ 3053 return 0; /* degenerate case, not an error */
3053 dnum = 2 * num;
3054 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3055 if (NULL == arr) {
3056 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3057 INSUFF_RES_ASCQ);
3058 return check_condition_result;
3059 }
3060 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 3054 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3061 (cmd[1] & 0xe0)) { 3055 (cmd[1] & 0xe0)) {
3062 mk_sense_invalid_opcode(scp); 3056 mk_sense_invalid_opcode(scp);
@@ -3079,6 +3073,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 3073 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3080 return check_condition_result; 3074 return check_condition_result;
3081 } 3075 }
3076 dnum = 2 * num;
3077 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3078 if (NULL == arr) {
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3080 INSUFF_RES_ASCQ);
3081 return check_condition_result;
3082 }
3082 3083
3083 write_lock_irqsave(&atomic_rw, iflags); 3084 write_lock_irqsave(&atomic_rw, iflags);
3084 3085
@@ -3089,24 +3090,24 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3089 ret = do_device_access(scp, 0, dnum, true); 3090 ret = do_device_access(scp, 0, dnum, true);
3090 fake_storep = fake_storep_hold; 3091 fake_storep = fake_storep_hold;
3091 if (ret == -1) { 3092 if (ret == -1) {
3092 write_unlock_irqrestore(&atomic_rw, iflags); 3093 retval = DID_ERROR << 16;
3093 kfree(arr); 3094 goto cleanup;
3094 return DID_ERROR << 16;
3095 } else if ((ret < (dnum * lb_size)) && 3095 } else if ((ret < (dnum * lb_size)) &&
3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " 3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098 "indicated=%u, IO sent=%d bytes\n", my_name, 3098 "indicated=%u, IO sent=%d bytes\n", my_name,
3099 dnum * lb_size, ret); 3099 dnum * lb_size, ret);
3100 if (!comp_write_worker(lba, num, arr)) { 3100 if (!comp_write_worker(lba, num, arr)) {
3101 write_unlock_irqrestore(&atomic_rw, iflags);
3102 kfree(arr);
3103 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); 3101 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3104 return check_condition_result; 3102 retval = check_condition_result;
3103 goto cleanup;
3105 } 3104 }
3106 if (scsi_debug_lbp()) 3105 if (scsi_debug_lbp())
3107 map_region(lba, num); 3106 map_region(lba, num);
3107cleanup:
3108 write_unlock_irqrestore(&atomic_rw, iflags); 3108 write_unlock_irqrestore(&atomic_rw, iflags);
3109 return 0; 3109 kfree(arr);
3110 return retval;
3110} 3111}
3111 3112
3112struct unmap_block_desc { 3113struct unmap_block_desc {
@@ -4438,6 +4439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4438 struct sdebug_host_info *sdhp; 4439 struct sdebug_host_info *sdhp;
4439 struct sdebug_dev_info *dp; 4440 struct sdebug_dev_info *dp;
4440 4441
4442 spin_lock(&sdebug_host_list_lock);
4441 list_for_each_entry(sdhp, &sdebug_host_list, 4443 list_for_each_entry(sdhp, &sdebug_host_list,
4442 host_list) { 4444 host_list) {
4443 list_for_each_entry(dp, &sdhp->dev_info_list, 4445 list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -4446,6 +4448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4446 dp->uas_bm); 4448 dp->uas_bm);
4447 } 4449 }
4448 } 4450 }
4451 spin_unlock(&sdebug_host_list_lock);
4449 } 4452 }
4450 return count; 4453 return count;
4451 } 4454 }
@@ -4988,32 +4991,6 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4988} 4991}
4989 4992
4990static int 4993static int
4991sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4992{
4993 qtype = scsi_change_queue_type(sdev, qtype);
4994 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4995 const char *cp;
4996
4997 switch (qtype) {
4998 case 0:
4999 cp = "untagged";
5000 break;
5001 case MSG_SIMPLE_TAG:
5002 cp = "simple tags";
5003 break;
5004 case MSG_ORDERED_TAG:
5005 cp = "ordered tags";
5006 break;
5007 default:
5008 cp = "unknown";
5009 break;
5010 }
5011 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
5012 }
5013 return qtype;
5014}
5015
5016static int
5017check_inject(struct scsi_cmnd *scp) 4994check_inject(struct scsi_cmnd *scp)
5018{ 4995{
5019 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 4996 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
@@ -5212,7 +5189,6 @@ static struct scsi_host_template sdebug_driver_template = {
5212 .ioctl = scsi_debug_ioctl, 5189 .ioctl = scsi_debug_ioctl,
5213 .queuecommand = sdebug_queuecommand_lock_or_not, 5190 .queuecommand = sdebug_queuecommand_lock_or_not,
5214 .change_queue_depth = sdebug_change_qdepth, 5191 .change_queue_depth = sdebug_change_qdepth,
5215 .change_queue_type = sdebug_change_qtype,
5216 .eh_abort_handler = scsi_debug_abort, 5192 .eh_abort_handler = scsi_debug_abort,
5217 .eh_device_reset_handler = scsi_debug_device_reset, 5193 .eh_device_reset_handler = scsi_debug_device_reset,
5218 .eh_target_reset_handler = scsi_debug_target_reset, 5194 .eh_target_reset_handler = scsi_debug_target_reset,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c1d04d4d3c6c..262ab837a704 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -211,6 +211,7 @@ static struct {
211 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, 211 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
212 {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, 212 {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
213 {"MICROP", "4110", NULL, BLIST_NOTQ}, 213 {"MICROP", "4110", NULL, BLIST_NOTQ},
214 {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
214 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, 215 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
215 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, 216 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
216 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 217 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 43318d556cbc..9ea95dd3e260 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1918,7 +1918,9 @@ static int scsi_mq_prep_fn(struct request *req)
1918 1918
1919 if (scsi_host_get_prot(shost)) { 1919 if (scsi_host_get_prot(shost)) {
1920 cmd->prot_sdb = (void *)sg + 1920 cmd->prot_sdb = (void *)sg +
1921 shost->sg_tablesize * sizeof(struct scatterlist); 1921 min_t(unsigned int,
1922 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
1923 sizeof(struct scatterlist);
1922 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1924 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1923 1925
1924 cmd->prot_sdb->table.sgl = 1926 cmd->prot_sdb->table.sgl =
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1cb64a8e18c9..1ac38e73df7e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -738,30 +738,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
738 const char *buf, size_t count) 738 const char *buf, size_t count)
739{ 739{
740 struct scsi_device *sdev = to_scsi_device(dev); 740 struct scsi_device *sdev = to_scsi_device(dev);
741 struct scsi_host_template *sht = sdev->host->hostt;
742 int tag_type = 0, retval;
743 int prev_tag_type = scsi_get_tag_type(sdev);
744
745 if (!sdev->tagged_supported || !sht->change_queue_type)
746 return -EINVAL;
747 741
748 /* 742 if (!sdev->tagged_supported)
749 * We're never issueing order tags these days, but allow the value
750 * for backwards compatibility.
751 */
752 if (strncmp(buf, "ordered", 7) == 0 ||
753 strncmp(buf, "simple", 6) == 0)
754 tag_type = MSG_SIMPLE_TAG;
755 else if (strncmp(buf, "none", 4) != 0)
756 return -EINVAL; 743 return -EINVAL;
757 744
758 if (tag_type == prev_tag_type) 745 sdev_printk(KERN_INFO, sdev,
759 return count; 746 "ignoring write to deprecated queue_type attribute");
760
761 retval = sht->change_queue_type(sdev, tag_type);
762 if (retval < 0)
763 return retval;
764
765 return count; 747 return count;
766} 748}
767 749
@@ -938,10 +920,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
938 !sdev->host->hostt->change_queue_depth) 920 !sdev->host->hostt->change_queue_depth)
939 return 0; 921 return 0;
940 922
941 if (attr == &dev_attr_queue_type.attr &&
942 !sdev->host->hostt->change_queue_type)
943 return S_IRUGO;
944
945 return attr->mode; 923 return attr->mode;
946} 924}
947 925
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index fa2aece76cc2..31bbb0da3397 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
1221int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) 1221int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
1222{ 1222{
1223 if (cmd->flags & SCMD_TAGGED) { 1223 if (cmd->flags & SCMD_TAGGED) {
1224 *msg++ = MSG_SIMPLE_TAG; 1224 *msg++ = SIMPLE_QUEUE_TAG;
1225 *msg++ = cmd->request->tag; 1225 *msg++ = cmd->request->tag;
1226 return 2; 1226 return 2;
1227 } 1227 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index e3ba251fb6e7..4cff0ddc2c25 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1688 if (ret == -EAGAIN) { 1688 if (ret == -EAGAIN) {
1689 /* no more space */ 1689 /* no more space */
1690 1690
1691 if (cmd_request->bounce_sgl_count) { 1691 if (cmd_request->bounce_sgl_count)
1692 destroy_bounce_buffer(cmd_request->bounce_sgl, 1692 destroy_bounce_buffer(cmd_request->bounce_sgl,
1693 cmd_request->bounce_sgl_count); 1693 cmd_request->bounce_sgl_count);
1694 1694
1695 ret = SCSI_MLQUEUE_DEVICE_BUSY; 1695 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1696 goto queue_error; 1696 goto queue_error;
1697 }
1698 } 1697 }
1699 1698
1700 return 0; 1699 return 0;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 43781c9fe521..b410499cddca 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -663,7 +663,7 @@ static int img_spfi_remove(struct platform_device *pdev)
663 return 0; 663 return 0;
664} 664}
665 665
666#ifdef CONFIG_PM_RUNTIME 666#ifdef CONFIG_PM
667static int img_spfi_runtime_suspend(struct device *dev) 667static int img_spfi_runtime_suspend(struct device *dev)
668{ 668{
669 struct spi_master *master = dev_get_drvdata(dev); 669 struct spi_master *master = dev_get_drvdata(dev);
@@ -692,7 +692,7 @@ static int img_spfi_runtime_resume(struct device *dev)
692 692
693 return 0; 693 return 0;
694} 694}
695#endif /* CONFIG_PM_RUNTIME */ 695#endif /* CONFIG_PM */
696 696
697#ifdef CONFIG_PM_SLEEP 697#ifdef CONFIG_PM_SLEEP
698static int img_spfi_suspend(struct device *dev) 698static int img_spfi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 0e48f8c2037d..1bbac0378bf7 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -413,7 +413,7 @@ static int meson_spifc_resume(struct device *dev)
413} 413}
414#endif /* CONFIG_PM_SLEEP */ 414#endif /* CONFIG_PM_SLEEP */
415 415
416#ifdef CONFIG_PM_RUNTIME 416#ifdef CONFIG_PM
417static int meson_spifc_runtime_suspend(struct device *dev) 417static int meson_spifc_runtime_suspend(struct device *dev)
418{ 418{
419 struct spi_master *master = dev_get_drvdata(dev); 419 struct spi_master *master = dev_get_drvdata(dev);
@@ -431,7 +431,7 @@ static int meson_spifc_runtime_resume(struct device *dev)
431 431
432 return clk_prepare_enable(spifc->clk); 432 return clk_prepare_enable(spifc->clk);
433} 433}
434#endif /* CONFIG_PM_RUNTIME */ 434#endif /* CONFIG_PM */
435 435
436static const struct dev_pm_ops meson_spifc_pm_ops = { 436static const struct dev_pm_ops meson_spifc_pm_ops = {
437 SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume) 437 SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 8156b4c0f568..3925db160650 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -42,28 +42,6 @@
42 42
43#include "lustre_patchless_compat.h" 43#include "lustre_patchless_compat.h"
44 44
45# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
46# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
47
48static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
49 struct dentry *dentry)
50{
51 struct path path;
52 struct path old_pwd;
53
54 path.mnt = mnt;
55 path.dentry = dentry;
56 LOCK_FS_STRUCT(fs);
57 old_pwd = fs->pwd;
58 path_get(&path);
59 fs->pwd = path;
60 UNLOCK_FS_STRUCT(fs);
61
62 if (old_pwd.dentry)
63 path_put(&old_pwd);
64}
65
66
67/* 45/*
68 * set ATTR_BLOCKS to a high value to avoid any risk of collision with other 46 * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
69 * ATTR_* attributes (see bug 13828) 47 * ATTR_* attributes (see bug 13828)
@@ -110,8 +88,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
110#define cfs_bio_io_error(a, b) bio_io_error((a)) 88#define cfs_bio_io_error(a, b) bio_io_error((a))
111#define cfs_bio_endio(a, b, c) bio_endio((a), (c)) 89#define cfs_bio_endio(a, b, c) bio_endio((a), (c))
112 90
113#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
114#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
115#define cfs_path_put(nd) path_put(&(nd)->path) 91#define cfs_path_put(nd) path_put(&(nd)->path)
116 92
117 93
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 407718a0026f..1ac7a702ce26 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -661,7 +661,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
661 int mode; 661 int mode;
662 int err; 662 int err;
663 663
664 mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR; 664 mode = (0755 & ~current_umask()) | S_IFDIR;
665 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename, 665 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
666 strlen(filename), mode, LUSTRE_OPC_MKDIR, 666 strlen(filename), mode, LUSTRE_OPC_MKDIR,
667 lump); 667 lump);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6e423aa6a6e4..a3367bfb1456 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -2372,21 +2372,6 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2372 return buf; 2372 return buf;
2373} 2373}
2374 2374
2375static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2376{
2377 char *path = NULL;
2378
2379 struct path p;
2380
2381 p.dentry = dentry;
2382 p.mnt = current->fs->root.mnt;
2383 path_get(&p);
2384 path = d_path(&p, buf, bufsize);
2385 path_put(&p);
2386
2387 return path;
2388}
2389
2390void ll_dirty_page_discard_warn(struct page *page, int ioret) 2375void ll_dirty_page_discard_warn(struct page *page, int ioret)
2391{ 2376{
2392 char *buf, *path = NULL; 2377 char *buf, *path = NULL;
@@ -2398,7 +2383,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
2398 if (buf != NULL) { 2383 if (buf != NULL) {
2399 dentry = d_find_alias(page->mapping->host); 2384 dentry = d_find_alias(page->mapping->host);
2400 if (dentry != NULL) 2385 if (dentry != NULL)
2401 path = ll_d_path(dentry, buf, PAGE_SIZE); 2386 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
2402 } 2387 }
2403 2388
2404 CDEBUG(D_WARNING, 2389 CDEBUG(D_WARNING,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 73e58d22e325..55f6774f706f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
609 609
610 return ret; 610 return ret;
611r2t_out: 611r2t_out:
612 iscsit_unregister_transport(&iscsi_target_transport);
612 kmem_cache_destroy(lio_r2t_cache); 613 kmem_cache_destroy(lio_r2t_cache);
613ooo_out: 614ooo_out:
614 kmem_cache_destroy(lio_ooo_cache); 615 kmem_cache_destroy(lio_ooo_cache);
@@ -943,17 +944,17 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
943 */ 944 */
944 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 945 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
945 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 946 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
946 sam_task_attr = MSG_SIMPLE_TAG; 947 sam_task_attr = TCM_SIMPLE_TAG;
947 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 948 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
948 sam_task_attr = MSG_ORDERED_TAG; 949 sam_task_attr = TCM_ORDERED_TAG;
949 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 950 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
950 sam_task_attr = MSG_HEAD_TAG; 951 sam_task_attr = TCM_HEAD_TAG;
951 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 952 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
952 sam_task_attr = MSG_ACA_TAG; 953 sam_task_attr = TCM_ACA_TAG;
953 else { 954 else {
954 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 955 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
955 " MSG_SIMPLE_TAG\n", iscsi_task_attr); 956 " TCM_SIMPLE_TAG\n", iscsi_task_attr);
956 sam_task_attr = MSG_SIMPLE_TAG; 957 sam_task_attr = TCM_SIMPLE_TAG;
957 } 958 }
958 959
959 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 960 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
@@ -1811,7 +1812,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1811 transport_init_se_cmd(&cmd->se_cmd, 1812 transport_init_se_cmd(&cmd->se_cmd,
1812 &lio_target_fabric_configfs->tf_ops, 1813 &lio_target_fabric_configfs->tf_ops,
1813 conn->sess->se_sess, 0, DMA_NONE, 1814 conn->sess->se_sess, 0, DMA_NONE,
1814 MSG_SIMPLE_TAG, cmd->sense_buffer + 2); 1815 TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
1815 1816
1816 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); 1817 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
1817 sess_ref = true; 1818 sess_ref = true;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 302eb3b78715..09a522bae222 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -790,7 +790,6 @@ struct iscsi_np {
790 void *np_context; 790 void *np_context;
791 struct iscsit_transport *np_transport; 791 struct iscsit_transport *np_transport;
792 struct list_head np_list; 792 struct list_head np_list;
793 struct iscsi_tpg_np *tpg_np;
794} ____cacheline_aligned; 793} ____cacheline_aligned;
795 794
796struct iscsi_tpg_np { 795struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 480f2e0ecc11..713c0c1877ab 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
281{ 281{
282 struct iscsi_session *sess = NULL; 282 struct iscsi_session *sess = NULL;
283 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 283 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
284 enum target_prot_op sup_pro_ops;
285 int ret; 284 int ret;
286 285
287 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 286 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
343 kfree(sess); 342 kfree(sess);
344 return -ENOMEM; 343 return -ENOMEM;
345 } 344 }
346 sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
347 345
348 sess->se_sess = transport_init_session(sup_pro_ops); 346 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
349 if (IS_ERR(sess->se_sess)) { 347 if (IS_ERR(sess->se_sess)) {
350 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 348 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
351 ISCSI_LOGIN_STATUS_NO_RESOURCES); 349 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1161 } 1159 }
1162 kfree(conn->sess->sess_ops); 1160 kfree(conn->sess->sess_ops);
1163 kfree(conn->sess); 1161 kfree(conn->sess);
1162 conn->sess = NULL;
1164 1163
1165old_sess_out: 1164old_sess_out:
1166 iscsi_stop_login_thread_timer(np); 1165 iscsi_stop_login_thread_timer(np);
@@ -1204,6 +1203,9 @@ old_sess_out:
1204 conn->sock = NULL; 1203 conn->sock = NULL;
1205 } 1204 }
1206 1205
1206 if (conn->conn_transport->iscsit_wait_conn)
1207 conn->conn_transport->iscsit_wait_conn(conn);
1208
1207 if (conn->conn_transport->iscsit_free_conn) 1209 if (conn->conn_transport->iscsit_free_conn)
1208 conn->conn_transport->iscsit_free_conn(conn); 1210 conn->conn_transport->iscsit_free_conn(conn);
1209 1211
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1364 } 1366 }
1365 login->zero_tsih = zero_tsih; 1367 login->zero_tsih = zero_tsih;
1366 1368
1369 conn->sess->se_sess->sup_prot_ops =
1370 conn->conn_transport->iscsit_get_sup_prot_ops(conn);
1371
1367 tpg = conn->tpg; 1372 tpg = conn->tpg;
1368 if (!tpg) { 1373 if (!tpg) {
1369 pr_err("Unable to locate struct iscsi_conn->tpg\n"); 1374 pr_err("Unable to locate struct iscsi_conn->tpg\n");
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index c3cb5c15efda..9053a3c0c6e5 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
501 init_completion(&tpg_np->tpg_np_comp); 501 init_completion(&tpg_np->tpg_np_comp);
502 kref_init(&tpg_np->tpg_np_kref); 502 kref_init(&tpg_np->tpg_np_kref);
503 tpg_np->tpg_np = np; 503 tpg_np->tpg_np = np;
504 np->tpg_np = tpg_np;
505 tpg_np->tpg = tpg; 504 tpg_np->tpg = tpg;
506 505
507 spin_lock(&tpg->tpg_np_lock); 506 spin_lock(&tpg->tpg_np_lock);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 882728fac30c..08217d62fb0d 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
26 26
27void iscsit_put_transport(struct iscsit_transport *t) 27void iscsit_put_transport(struct iscsit_transport *t)
28{ 28{
29 if (t->owner) 29 module_put(t->owner);
30 module_put(t->owner);
31} 30}
32 31
33int iscsit_register_transport(struct iscsit_transport *t) 32int iscsit_register_transport(struct iscsit_transport *t)
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7c6a95bcb35e..bcd88ec99793 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
1356 struct iscsi_conn *conn, 1356 struct iscsi_conn *conn,
1357 struct iscsi_data_count *count) 1357 struct iscsi_data_count *count)
1358{ 1358{
1359 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1359 int ret, iov_len;
1360 struct kvec *iov_p; 1360 struct kvec *iov_p;
1361 struct msghdr msg; 1361 struct msghdr msg;
1362 1362
1363 if (!conn || !conn->sock || !conn->conn_ops) 1363 if (!conn || !conn->sock || !conn->conn_ops)
1364 return -1; 1364 return -1;
1365 1365
1366 if (data <= 0) { 1366 if (count->data_length <= 0) {
1367 pr_err("Data length is: %d\n", data); 1367 pr_err("Data length is: %d\n", count->data_length);
1368 return -1; 1368 return -1;
1369 } 1369 }
1370 1370
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
1373 iov_p = count->iov; 1373 iov_p = count->iov;
1374 iov_len = count->iov_count; 1374 iov_len = count->iov_count;
1375 1375
1376 while (total_tx < data) { 1376 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1377 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1377 count->data_length);
1378 (data - total_tx)); 1378 if (ret != count->data_length) {
1379 if (tx_loop <= 0) { 1379 pr_err("Unexpected ret: %d send data %d\n",
1380 pr_debug("tx_loop: %d total_tx %d\n", 1380 ret, count->data_length);
1381 tx_loop, total_tx); 1381 return -EPIPE;
1382 return tx_loop;
1383 }
1384 total_tx += tx_loop;
1385 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1386 tx_loop, total_tx, data);
1387 } 1382 }
1383 pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
1388 1384
1389 return total_tx; 1385 return ret;
1390} 1386}
1391 1387
1392int rx_data( 1388int rx_data(
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4d1b7224a7f2..6b3c32954689 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
138 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 138 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
139 goto out_done; 139 goto out_done;
140 } 140 }
141 tl_nexus = tl_hba->tl_nexus; 141 tl_nexus = tl_tpg->tl_nexus;
142 if (!tl_nexus) { 142 if (!tl_nexus) {
143 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 143 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
144 " does not exist\n"); 144 " does not exist\n");
@@ -168,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
168 168
169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
171 transfer_length, MSG_SIMPLE_TAG, 171 transfer_length, TCM_SIMPLE_TAG,
172 sc->sc_data_direction, 0, 172 sc->sc_data_direction, 0,
173 scsi_sglist(sc), scsi_sg_count(sc), 173 scsi_sglist(sc), scsi_sg_count(sc),
174 sgl_bidi, sgl_bidi_count, 174 sgl_bidi, sgl_bidi_count,
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
218 * to struct scsi_device 218 * to struct scsi_device
219 */ 219 */
220static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 220static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
221 struct tcm_loop_nexus *tl_nexus,
222 int lun, int task, enum tcm_tmreq_table tmr) 221 int lun, int task, enum tcm_tmreq_table tmr)
223{ 222{
224 struct se_cmd *se_cmd = NULL; 223 struct se_cmd *se_cmd = NULL;
225 struct se_session *se_sess; 224 struct se_session *se_sess;
226 struct se_portal_group *se_tpg; 225 struct se_portal_group *se_tpg;
226 struct tcm_loop_nexus *tl_nexus;
227 struct tcm_loop_cmd *tl_cmd = NULL; 227 struct tcm_loop_cmd *tl_cmd = NULL;
228 struct tcm_loop_tmr *tl_tmr = NULL; 228 struct tcm_loop_tmr *tl_tmr = NULL;
229 int ret = TMR_FUNCTION_FAILED, rc; 229 int ret = TMR_FUNCTION_FAILED, rc;
230 230
231 /*
232 * Locate the tl_nexus and se_sess pointers
233 */
234 tl_nexus = tl_tpg->tl_nexus;
235 if (!tl_nexus) {
236 pr_err("Unable to perform device reset without"
237 " active I_T Nexus\n");
238 return ret;
239 }
240
231 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 241 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
232 if (!tl_cmd) { 242 if (!tl_cmd) {
233 pr_err("Unable to allocate memory for tl_cmd\n"); 243 pr_err("Unable to allocate memory for tl_cmd\n");
@@ -243,12 +253,12 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
243 253
244 se_cmd = &tl_cmd->tl_se_cmd; 254 se_cmd = &tl_cmd->tl_se_cmd;
245 se_tpg = &tl_tpg->tl_se_tpg; 255 se_tpg = &tl_tpg->tl_se_tpg;
246 se_sess = tl_nexus->se_sess; 256 se_sess = tl_tpg->tl_nexus->se_sess;
247 /* 257 /*
248 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 258 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
249 */ 259 */
250 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 260 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
251 DMA_NONE, MSG_SIMPLE_TAG, 261 DMA_NONE, TCM_SIMPLE_TAG,
252 &tl_cmd->tl_sense_buf[0]); 262 &tl_cmd->tl_sense_buf[0]);
253 263
254 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 264 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
@@ -288,7 +298,6 @@ release:
288static int tcm_loop_abort_task(struct scsi_cmnd *sc) 298static int tcm_loop_abort_task(struct scsi_cmnd *sc)
289{ 299{
290 struct tcm_loop_hba *tl_hba; 300 struct tcm_loop_hba *tl_hba;
291 struct tcm_loop_nexus *tl_nexus;
292 struct tcm_loop_tpg *tl_tpg; 301 struct tcm_loop_tpg *tl_tpg;
293 int ret = FAILED; 302 int ret = FAILED;
294 303
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
296 * Locate the tcm_loop_hba_t pointer 305 * Locate the tcm_loop_hba_t pointer
297 */ 306 */
298 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 307 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
299 /*
300 * Locate the tl_nexus and se_sess pointers
301 */
302 tl_nexus = tl_hba->tl_nexus;
303 if (!tl_nexus) {
304 pr_err("Unable to perform device reset without"
305 " active I_T Nexus\n");
306 return FAILED;
307 }
308
309 /*
310 * Locate the tl_tpg pointer from TargetID in sc->device->id
311 */
312 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 308 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
313 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 309 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
314 sc->request->tag, TMR_ABORT_TASK); 310 sc->request->tag, TMR_ABORT_TASK);
315 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 311 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
316} 312}
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
322static int tcm_loop_device_reset(struct scsi_cmnd *sc) 318static int tcm_loop_device_reset(struct scsi_cmnd *sc)
323{ 319{
324 struct tcm_loop_hba *tl_hba; 320 struct tcm_loop_hba *tl_hba;
325 struct tcm_loop_nexus *tl_nexus;
326 struct tcm_loop_tpg *tl_tpg; 321 struct tcm_loop_tpg *tl_tpg;
327 int ret = FAILED; 322 int ret = FAILED;
328 323
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
330 * Locate the tcm_loop_hba_t pointer 325 * Locate the tcm_loop_hba_t pointer
331 */ 326 */
332 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 327 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
333 /*
334 * Locate the tl_nexus and se_sess pointers
335 */
336 tl_nexus = tl_hba->tl_nexus;
337 if (!tl_nexus) {
338 pr_err("Unable to perform device reset without"
339 " active I_T Nexus\n");
340 return FAILED;
341 }
342 /*
343 * Locate the tl_tpg pointer from TargetID in sc->device->id
344 */
345 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 328 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
346 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 329
330 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
347 0, TMR_LUN_RESET); 331 0, TMR_LUN_RESET);
348 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 332 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
349} 333}
@@ -385,7 +369,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
385 .name = "TCM_Loopback", 369 .name = "TCM_Loopback",
386 .queuecommand = tcm_loop_queuecommand, 370 .queuecommand = tcm_loop_queuecommand,
387 .change_queue_depth = scsi_change_queue_depth, 371 .change_queue_depth = scsi_change_queue_depth,
388 .change_queue_type = scsi_change_queue_type,
389 .eh_abort_handler = tcm_loop_abort_task, 372 .eh_abort_handler = tcm_loop_abort_task,
390 .eh_device_reset_handler = tcm_loop_device_reset, 373 .eh_device_reset_handler = tcm_loop_device_reset,
391 .eh_target_reset_handler = tcm_loop_target_reset, 374 .eh_target_reset_handler = tcm_loop_target_reset,
@@ -940,8 +923,8 @@ static int tcm_loop_make_nexus(
940 struct tcm_loop_nexus *tl_nexus; 923 struct tcm_loop_nexus *tl_nexus;
941 int ret = -ENOMEM; 924 int ret = -ENOMEM;
942 925
943 if (tl_tpg->tl_hba->tl_nexus) { 926 if (tl_tpg->tl_nexus) {
944 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); 927 pr_debug("tl_tpg->tl_nexus already exists\n");
945 return -EEXIST; 928 return -EEXIST;
946 } 929 }
947 se_tpg = &tl_tpg->tl_se_tpg; 930 se_tpg = &tl_tpg->tl_se_tpg;
@@ -976,7 +959,7 @@ static int tcm_loop_make_nexus(
976 */ 959 */
977 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
978 tl_nexus->se_sess, tl_nexus); 961 tl_nexus->se_sess, tl_nexus);
979 tl_tpg->tl_hba->tl_nexus = tl_nexus; 962 tl_tpg->tl_nexus = tl_nexus;
980 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
981 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 964 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
982 name); 965 name);
@@ -992,12 +975,8 @@ static int tcm_loop_drop_nexus(
992{ 975{
993 struct se_session *se_sess; 976 struct se_session *se_sess;
994 struct tcm_loop_nexus *tl_nexus; 977 struct tcm_loop_nexus *tl_nexus;
995 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
996 978
997 if (!tl_hba) 979 tl_nexus = tpg->tl_nexus;
998 return -ENODEV;
999
1000 tl_nexus = tl_hba->tl_nexus;
1001 if (!tl_nexus) 980 if (!tl_nexus)
1002 return -ENODEV; 981 return -ENODEV;
1003 982
@@ -1013,13 +992,13 @@ static int tcm_loop_drop_nexus(
1013 } 992 }
1014 993
1015 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 994 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1016 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 995 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
1017 tl_nexus->se_sess->se_node_acl->initiatorname); 996 tl_nexus->se_sess->se_node_acl->initiatorname);
1018 /* 997 /*
1019 * Release the SCSI I_T Nexus to the emulated SAS Target Port 998 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1020 */ 999 */
1021 transport_deregister_session(tl_nexus->se_sess); 1000 transport_deregister_session(tl_nexus->se_sess);
1022 tpg->tl_hba->tl_nexus = NULL; 1001 tpg->tl_nexus = NULL;
1023 kfree(tl_nexus); 1002 kfree(tl_nexus);
1024 return 0; 1003 return 0;
1025} 1004}
@@ -1035,7 +1014,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
1035 struct tcm_loop_nexus *tl_nexus; 1014 struct tcm_loop_nexus *tl_nexus;
1036 ssize_t ret; 1015 ssize_t ret;
1037 1016
1038 tl_nexus = tl_tpg->tl_hba->tl_nexus; 1017 tl_nexus = tl_tpg->tl_nexus;
1039 if (!tl_nexus) 1018 if (!tl_nexus)
1040 return -ENODEV; 1019 return -ENODEV;
1041 1020
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 54c59d0b6608..6ae49f272ba6 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
27}; 27};
28 28
29struct tcm_loop_nexus { 29struct tcm_loop_nexus {
30 int it_nexus_active;
31 /*
32 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
33 */
34 struct scsi_host *sh;
35 /* 30 /*
36 * Pointer to TCM session for I_T Nexus 31 * Pointer to TCM session for I_T Nexus
37 */ 32 */
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
51 atomic_t tl_tpg_port_count; 46 atomic_t tl_tpg_port_count;
52 struct se_portal_group tl_se_tpg; 47 struct se_portal_group tl_se_tpg;
53 struct tcm_loop_hba *tl_hba; 48 struct tcm_loop_hba *tl_hba;
49 struct tcm_loop_nexus *tl_nexus;
54}; 50};
55 51
56struct tcm_loop_hba { 52struct tcm_loop_hba {
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
59 struct se_hba_s *se_hba; 55 struct se_hba_s *se_hba;
60 struct se_lun *tl_hba_lun; 56 struct se_lun *tl_hba_lun;
61 struct se_port *tl_hba_lun_sep; 57 struct se_port *tl_hba_lun_sep;
62 struct tcm_loop_nexus *tl_nexus;
63 struct device dev; 58 struct device dev;
64 struct Scsi_Host *sh; 59 struct Scsi_Host *sh;
65 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; 60 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e7e93727553c..9512af6a8114 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1237,7 +1237,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
1237 1237
1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1239 req->sense_buf, unpacked_lun, data_length, 1239 req->sense_buf, unpacked_lun, data_length,
1240 MSG_SIMPLE_TAG, data_dir, 0)) 1240 TCM_SIMPLE_TAG, data_dir, 0))
1241 goto err; 1241 goto err;
1242 1242
1243 return; 1243 return;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 79f9296a08ae..75d89adfccc0 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -50,6 +50,19 @@
50#include "target_core_rd.h" 50#include "target_core_rd.h"
51#include "target_core_xcopy.h" 51#include "target_core_xcopy.h"
52 52
53#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
54static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
55{ \
56 struct target_backend_cits *tbc = &sa->tb_cits; \
57 struct config_item_type *cit = &tbc->tb_##_name##_cit; \
58 \
59 cit->ct_item_ops = _item_ops; \
60 cit->ct_group_ops = _group_ops; \
61 cit->ct_attrs = _attrs; \
62 cit->ct_owner = sa->owner; \
63 pr_debug("Setup generic %s\n", __stringify(_name)); \
64}
65
53extern struct t10_alua_lu_gp *default_lu_gp; 66extern struct t10_alua_lu_gp *default_lu_gp;
54 67
55static LIST_HEAD(g_tf_list); 68static LIST_HEAD(g_tf_list);
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
126 139
127 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 140 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
128 " %s\n", group, name); 141 " %s\n", group, name);
129 /* 142
130 * Below are some hardcoded request_module() calls to automatically 143 tf = target_core_get_fabric(name);
131 * local fabric modules when the following is called: 144 if (!tf) {
132 * 145 pr_err("target_core_register_fabric() trying autoload for %s\n",
133 * mkdir -p /sys/kernel/config/target/$MODULE_NAME 146 name);
134 * 147
135 * Note that this does not limit which TCM fabric module can be
136 * registered, but simply provids auto loading logic for modules with
137 * mkdir(2) system calls with known TCM fabric modules.
138 */
139 if (!strncmp(name, "iscsi", 5)) {
140 /* 148 /*
141 * Automatically load the LIO Target fabric module when the 149 * Below are some hardcoded request_module() calls to automatically
142 * following is called: 150 * local fabric modules when the following is called:
143 * 151 *
144 * mkdir -p $CONFIGFS/target/iscsi 152 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
145 */
146 ret = request_module("iscsi_target_mod");
147 if (ret < 0) {
148 pr_err("request_module() failed for"
149 " iscsi_target_mod.ko: %d\n", ret);
150 return ERR_PTR(-EINVAL);
151 }
152 } else if (!strncmp(name, "loopback", 8)) {
153 /*
154 * Automatically load the tcm_loop fabric module when the
155 * following is called:
156 * 153 *
157 * mkdir -p $CONFIGFS/target/loopback 154 * Note that this does not limit which TCM fabric module can be
155 * registered, but simply provids auto loading logic for modules with
156 * mkdir(2) system calls with known TCM fabric modules.
158 */ 157 */
159 ret = request_module("tcm_loop"); 158
160 if (ret < 0) { 159 if (!strncmp(name, "iscsi", 5)) {
161 pr_err("request_module() failed for" 160 /*
162 " tcm_loop.ko: %d\n", ret); 161 * Automatically load the LIO Target fabric module when the
163 return ERR_PTR(-EINVAL); 162 * following is called:
163 *
164 * mkdir -p $CONFIGFS/target/iscsi
165 */
166 ret = request_module("iscsi_target_mod");
167 if (ret < 0) {
168 pr_err("request_module() failed for"
169 " iscsi_target_mod.ko: %d\n", ret);
170 return ERR_PTR(-EINVAL);
171 }
172 } else if (!strncmp(name, "loopback", 8)) {
173 /*
174 * Automatically load the tcm_loop fabric module when the
175 * following is called:
176 *
177 * mkdir -p $CONFIGFS/target/loopback
178 */
179 ret = request_module("tcm_loop");
180 if (ret < 0) {
181 pr_err("request_module() failed for"
182 " tcm_loop.ko: %d\n", ret);
183 return ERR_PTR(-EINVAL);
184 }
164 } 185 }
186
187 tf = target_core_get_fabric(name);
165 } 188 }
166 189
167 tf = target_core_get_fabric(name);
168 if (!tf) { 190 if (!tf) {
169 pr_err("target_core_get_fabric() failed for %s\n", 191 pr_err("target_core_get_fabric() failed for %s\n",
170 name); 192 name);
171 return ERR_PTR(-EINVAL); 193 return ERR_PTR(-EINVAL);
172 } 194 }
173 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 195 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
562// Stop functions called by external Target Fabrics Modules 584// Stop functions called by external Target Fabrics Modules
563//############################################################################*/ 585//############################################################################*/
564 586
565/* Start functions for struct config_item_type target_core_dev_attrib_cit */ 587/* Start functions for struct config_item_type tb_dev_attrib_cit */
566
567#define DEF_DEV_ATTRIB_SHOW(_name) \
568static ssize_t target_core_dev_show_attr_##_name( \
569 struct se_dev_attrib *da, \
570 char *page) \
571{ \
572 return snprintf(page, PAGE_SIZE, "%u\n", \
573 (u32)da->da_dev->dev_attrib._name); \
574}
575
576#define DEF_DEV_ATTRIB_STORE(_name) \
577static ssize_t target_core_dev_store_attr_##_name( \
578 struct se_dev_attrib *da, \
579 const char *page, \
580 size_t count) \
581{ \
582 unsigned long val; \
583 int ret; \
584 \
585 ret = kstrtoul(page, 0, &val); \
586 if (ret < 0) { \
587 pr_err("kstrtoul() failed with" \
588 " ret: %d\n", ret); \
589 return -EINVAL; \
590 } \
591 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
592 \
593 return (!ret) ? count : -EINVAL; \
594}
595
596#define DEF_DEV_ATTRIB(_name) \
597DEF_DEV_ATTRIB_SHOW(_name); \
598DEF_DEV_ATTRIB_STORE(_name);
599
600#define DEF_DEV_ATTRIB_RO(_name) \
601DEF_DEV_ATTRIB_SHOW(_name);
602 588
603CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); 589CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
604#define SE_DEV_ATTR(_name, _mode) \
605static struct target_core_dev_attrib_attribute \
606 target_core_dev_attrib_##_name = \
607 __CONFIGFS_EATTR(_name, _mode, \
608 target_core_dev_show_attr_##_name, \
609 target_core_dev_store_attr_##_name);
610
611#define SE_DEV_ATTR_RO(_name); \
612static struct target_core_dev_attrib_attribute \
613 target_core_dev_attrib_##_name = \
614 __CONFIGFS_EATTR_RO(_name, \
615 target_core_dev_show_attr_##_name);
616
617DEF_DEV_ATTRIB(emulate_model_alias);
618SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
619
620DEF_DEV_ATTRIB(emulate_dpo);
621SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
622
623DEF_DEV_ATTRIB(emulate_fua_write);
624SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
625
626DEF_DEV_ATTRIB(emulate_fua_read);
627SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
628
629DEF_DEV_ATTRIB(emulate_write_cache);
630SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
631
632DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
633SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
634
635DEF_DEV_ATTRIB(emulate_tas);
636SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
637
638DEF_DEV_ATTRIB(emulate_tpu);
639SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
640
641DEF_DEV_ATTRIB(emulate_tpws);
642SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
643
644DEF_DEV_ATTRIB(emulate_caw);
645SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
646
647DEF_DEV_ATTRIB(emulate_3pc);
648SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
649
650DEF_DEV_ATTRIB(pi_prot_type);
651SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
652
653DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
654SE_DEV_ATTR_RO(hw_pi_prot_type);
655
656DEF_DEV_ATTRIB(pi_prot_format);
657SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
658
659DEF_DEV_ATTRIB(enforce_pr_isids);
660SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
661
662DEF_DEV_ATTRIB(is_nonrot);
663SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
664
665DEF_DEV_ATTRIB(emulate_rest_reord);
666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
667
668DEF_DEV_ATTRIB(force_pr_aptpl);
669SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
670
671DEF_DEV_ATTRIB_RO(hw_block_size);
672SE_DEV_ATTR_RO(hw_block_size);
673
674DEF_DEV_ATTRIB(block_size);
675SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
676
677DEF_DEV_ATTRIB_RO(hw_max_sectors);
678SE_DEV_ATTR_RO(hw_max_sectors);
679
680DEF_DEV_ATTRIB(fabric_max_sectors);
681SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
682
683DEF_DEV_ATTRIB(optimal_sectors);
684SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
685
686DEF_DEV_ATTRIB_RO(hw_queue_depth);
687SE_DEV_ATTR_RO(hw_queue_depth);
688
689DEF_DEV_ATTRIB(queue_depth);
690SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
691
692DEF_DEV_ATTRIB(max_unmap_lba_count);
693SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
694
695DEF_DEV_ATTRIB(max_unmap_block_desc_count);
696SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
697
698DEF_DEV_ATTRIB(unmap_granularity);
699SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
700
701DEF_DEV_ATTRIB(unmap_granularity_alignment);
702SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
703
704DEF_DEV_ATTRIB(max_write_same_len);
705SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
706
707CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); 590CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
708 591
709static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
710 &target_core_dev_attrib_emulate_model_alias.attr,
711 &target_core_dev_attrib_emulate_dpo.attr,
712 &target_core_dev_attrib_emulate_fua_write.attr,
713 &target_core_dev_attrib_emulate_fua_read.attr,
714 &target_core_dev_attrib_emulate_write_cache.attr,
715 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
716 &target_core_dev_attrib_emulate_tas.attr,
717 &target_core_dev_attrib_emulate_tpu.attr,
718 &target_core_dev_attrib_emulate_tpws.attr,
719 &target_core_dev_attrib_emulate_caw.attr,
720 &target_core_dev_attrib_emulate_3pc.attr,
721 &target_core_dev_attrib_pi_prot_type.attr,
722 &target_core_dev_attrib_hw_pi_prot_type.attr,
723 &target_core_dev_attrib_pi_prot_format.attr,
724 &target_core_dev_attrib_enforce_pr_isids.attr,
725 &target_core_dev_attrib_force_pr_aptpl.attr,
726 &target_core_dev_attrib_is_nonrot.attr,
727 &target_core_dev_attrib_emulate_rest_reord.attr,
728 &target_core_dev_attrib_hw_block_size.attr,
729 &target_core_dev_attrib_block_size.attr,
730 &target_core_dev_attrib_hw_max_sectors.attr,
731 &target_core_dev_attrib_fabric_max_sectors.attr,
732 &target_core_dev_attrib_optimal_sectors.attr,
733 &target_core_dev_attrib_hw_queue_depth.attr,
734 &target_core_dev_attrib_queue_depth.attr,
735 &target_core_dev_attrib_max_unmap_lba_count.attr,
736 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
737 &target_core_dev_attrib_unmap_granularity.attr,
738 &target_core_dev_attrib_unmap_granularity_alignment.attr,
739 &target_core_dev_attrib_max_write_same_len.attr,
740 NULL,
741};
742
743static struct configfs_item_operations target_core_dev_attrib_ops = { 592static struct configfs_item_operations target_core_dev_attrib_ops = {
744 .show_attribute = target_core_dev_attrib_attr_show, 593 .show_attribute = target_core_dev_attrib_attr_show,
745 .store_attribute = target_core_dev_attrib_attr_store, 594 .store_attribute = target_core_dev_attrib_attr_store,
746}; 595};
747 596
748static struct config_item_type target_core_dev_attrib_cit = { 597TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
749 .ct_item_ops = &target_core_dev_attrib_ops,
750 .ct_attrs = target_core_dev_attrib_attrs,
751 .ct_owner = THIS_MODULE,
752};
753 598
754/* End functions for struct config_item_type target_core_dev_attrib_cit */ 599/* End functions for struct config_item_type tb_dev_attrib_cit */
755 600
756/* Start functions for struct config_item_type target_core_dev_wwn_cit */ 601/* Start functions for struct config_item_type tb_dev_wwn_cit */
757 602
758CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); 603CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
759#define SE_DEV_WWN_ATTR(_name, _mode) \ 604#define SE_DEV_WWN_ATTR(_name, _mode) \
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
984 .store_attribute = target_core_dev_wwn_attr_store, 829 .store_attribute = target_core_dev_wwn_attr_store,
985}; 830};
986 831
987static struct config_item_type target_core_dev_wwn_cit = { 832TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
988 .ct_item_ops = &target_core_dev_wwn_ops,
989 .ct_attrs = target_core_dev_wwn_attrs,
990 .ct_owner = THIS_MODULE,
991};
992 833
993/* End functions for struct config_item_type target_core_dev_wwn_cit */ 834/* End functions for struct config_item_type tb_dev_wwn_cit */
994 835
995/* Start functions for struct config_item_type target_core_dev_pr_cit */ 836/* Start functions for struct config_item_type tb_dev_pr_cit */
996 837
997CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); 838CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
998#define SE_DEV_PR_ATTR(_name, _mode) \ 839#define SE_DEV_PR_ATTR(_name, _mode) \
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
1453 .store_attribute = target_core_dev_pr_attr_store, 1294 .store_attribute = target_core_dev_pr_attr_store,
1454}; 1295};
1455 1296
1456static struct config_item_type target_core_dev_pr_cit = { 1297TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
1457 .ct_item_ops = &target_core_dev_pr_ops,
1458 .ct_attrs = target_core_dev_pr_attrs,
1459 .ct_owner = THIS_MODULE,
1460};
1461 1298
1462/* End functions for struct config_item_type target_core_dev_pr_cit */ 1299/* End functions for struct config_item_type tb_dev_pr_cit */
1463 1300
1464/* Start functions for struct config_item_type target_core_dev_cit */ 1301/* Start functions for struct config_item_type tb_dev_cit */
1465 1302
1466static ssize_t target_core_show_dev_info(void *p, char *page) 1303static ssize_t target_core_show_dev_info(void *p, char *page)
1467{ 1304{
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1925 .store = target_core_store_dev_lba_map, 1762 .store = target_core_store_dev_lba_map,
1926}; 1763};
1927 1764
1928static struct configfs_attribute *lio_core_dev_attrs[] = { 1765static struct configfs_attribute *target_core_dev_attrs[] = {
1929 &target_core_attr_dev_info.attr, 1766 &target_core_attr_dev_info.attr,
1930 &target_core_attr_dev_control.attr, 1767 &target_core_attr_dev_control.attr,
1931 &target_core_attr_dev_alias.attr, 1768 &target_core_attr_dev_alias.attr,
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
1984 .store_attribute = target_core_dev_store, 1821 .store_attribute = target_core_dev_store,
1985}; 1822};
1986 1823
1987static struct config_item_type target_core_dev_cit = { 1824TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
1988 .ct_item_ops = &target_core_dev_item_ops,
1989 .ct_attrs = lio_core_dev_attrs,
1990 .ct_owner = THIS_MODULE,
1991};
1992 1825
1993/* End functions for struct config_item_type target_core_dev_cit */ 1826/* End functions for struct config_item_type tb_dev_cit */
1994 1827
1995/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ 1828/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
1996 1829
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2670 2503
2671/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2504/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2672 2505
2673/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2506/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2674 2507
2675static struct config_group *target_core_alua_create_tg_pt_gp( 2508static struct config_group *target_core_alua_create_tg_pt_gp(
2676 struct config_group *group, 2509 struct config_group *group,
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2721 .drop_item = &target_core_alua_drop_tg_pt_gp, 2554 .drop_item = &target_core_alua_drop_tg_pt_gp,
2722}; 2555};
2723 2556
2724static struct config_item_type target_core_alua_tg_pt_gps_cit = { 2557TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2725 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
2726 .ct_owner = THIS_MODULE,
2727};
2728 2558
2729/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2559/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2730 2560
2731/* Start functions for struct config_item_type target_core_alua_cit */ 2561/* Start functions for struct config_item_type target_core_alua_cit */
2732 2562
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
2744 2574
2745/* End functions for struct config_item_type target_core_alua_cit */ 2575/* End functions for struct config_item_type target_core_alua_cit */
2746 2576
2747/* Start functions for struct config_item_type target_core_stat_cit */ 2577/* Start functions for struct config_item_type tb_dev_stat_cit */
2748 2578
2749static struct config_group *target_core_stat_mkdir( 2579static struct config_group *target_core_stat_mkdir(
2750 struct config_group *group, 2580 struct config_group *group,
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
2765 .drop_item = &target_core_stat_rmdir, 2595 .drop_item = &target_core_stat_rmdir,
2766}; 2596};
2767 2597
2768static struct config_item_type target_core_stat_cit = { 2598TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2769 .ct_group_ops = &target_core_stat_group_ops,
2770 .ct_owner = THIS_MODULE,
2771};
2772 2599
2773/* End functions for struct config_item_type target_core_stat_cit */ 2600/* End functions for struct config_item_type tb_dev_stat_cit */
2774 2601
2775/* Start functions for struct config_item_type target_core_hba_cit */ 2602/* Start functions for struct config_item_type target_core_hba_cit */
2776 2603
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
2806 if (!dev_cg->default_groups) 2633 if (!dev_cg->default_groups)
2807 goto out_free_device; 2634 goto out_free_device;
2808 2635
2809 config_group_init_type_name(dev_cg, name, &target_core_dev_cit); 2636 config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
2810 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 2637 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2811 &target_core_dev_attrib_cit); 2638 &t->tb_cits.tb_dev_attrib_cit);
2812 config_group_init_type_name(&dev->dev_pr_group, "pr", 2639 config_group_init_type_name(&dev->dev_pr_group, "pr",
2813 &target_core_dev_pr_cit); 2640 &t->tb_cits.tb_dev_pr_cit);
2814 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", 2641 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2815 &target_core_dev_wwn_cit); 2642 &t->tb_cits.tb_dev_wwn_cit);
2816 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, 2643 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2817 "alua", &target_core_alua_tg_pt_gps_cit); 2644 "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
2818 config_group_init_type_name(&dev->dev_stat_grps.stat_group, 2645 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2819 "statistics", &target_core_stat_cit); 2646 "statistics", &t->tb_cits.tb_dev_stat_cit);
2820 2647
2821 dev_cg->default_groups[0] = &dev->dev_attrib.da_group; 2648 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2822 dev_cg->default_groups[1] = &dev->dev_pr_group; 2649 dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
3110 2937
3111/* Stop functions for struct config_item_type target_core_hba_cit */ 2938/* Stop functions for struct config_item_type target_core_hba_cit */
3112 2939
2940void target_core_setup_sub_cits(struct se_subsystem_api *sa)
2941{
2942 target_core_setup_dev_cit(sa);
2943 target_core_setup_dev_attrib_cit(sa);
2944 target_core_setup_dev_pr_cit(sa);
2945 target_core_setup_dev_wwn_cit(sa);
2946 target_core_setup_dev_alua_tg_pt_gps_cit(sa);
2947 target_core_setup_dev_stat_cit(sa);
2948}
2949EXPORT_SYMBOL(target_core_setup_sub_cits);
2950
3113static int __init target_core_init_configfs(void) 2951static int __init target_core_init_configfs(void)
3114{ 2952{
3115 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 2953 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c45f9e907e44..7653cfb027a2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
659 dev, dev->dev_attrib.max_unmap_lba_count); 659 dev, dev->dev_attrib.max_unmap_lba_count);
660 return 0; 660 return 0;
661} 661}
662EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
662 663
663int se_dev_set_max_unmap_block_desc_count( 664int se_dev_set_max_unmap_block_desc_count(
664 struct se_device *dev, 665 struct se_device *dev,
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
670 dev, dev->dev_attrib.max_unmap_block_desc_count); 671 dev, dev->dev_attrib.max_unmap_block_desc_count);
671 return 0; 672 return 0;
672} 673}
674EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
673 675
674int se_dev_set_unmap_granularity( 676int se_dev_set_unmap_granularity(
675 struct se_device *dev, 677 struct se_device *dev,
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
680 dev, dev->dev_attrib.unmap_granularity); 682 dev, dev->dev_attrib.unmap_granularity);
681 return 0; 683 return 0;
682} 684}
685EXPORT_SYMBOL(se_dev_set_unmap_granularity);
683 686
684int se_dev_set_unmap_granularity_alignment( 687int se_dev_set_unmap_granularity_alignment(
685 struct se_device *dev, 688 struct se_device *dev,
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
690 dev, dev->dev_attrib.unmap_granularity_alignment); 693 dev, dev->dev_attrib.unmap_granularity_alignment);
691 return 0; 694 return 0;
692} 695}
696EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
693 697
694int se_dev_set_max_write_same_len( 698int se_dev_set_max_write_same_len(
695 struct se_device *dev, 699 struct se_device *dev,
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
700 dev, dev->dev_attrib.max_write_same_len); 704 dev, dev->dev_attrib.max_write_same_len);
701 return 0; 705 return 0;
702} 706}
707EXPORT_SYMBOL(se_dev_set_max_write_same_len);
703 708
704static void dev_set_t10_wwn_model_alias(struct se_device *dev) 709static void dev_set_t10_wwn_model_alias(struct se_device *dev)
705{ 710{
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
738 743
739 return 0; 744 return 0;
740} 745}
746EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
741 747
742int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 748int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
743{ 749{
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
753 759
754 return 0; 760 return 0;
755} 761}
762EXPORT_SYMBOL(se_dev_set_emulate_dpo);
756 763
757int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 764int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
758{ 765{
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
760 pr_err("Illegal value %d\n", flag); 767 pr_err("Illegal value %d\n", flag);
761 return -EINVAL; 768 return -EINVAL;
762 } 769 }
763
764 if (flag &&
765 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
766 pr_err("emulate_fua_write not supported for pSCSI\n");
767 return -EINVAL;
768 }
769 dev->dev_attrib.emulate_fua_write = flag; 770 dev->dev_attrib.emulate_fua_write = flag;
770 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
771 dev, dev->dev_attrib.emulate_fua_write); 772 dev, dev->dev_attrib.emulate_fua_write);
772 return 0; 773 return 0;
773} 774}
775EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
774 776
775int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 777int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
776{ 778{
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
786 788
787 return 0; 789 return 0;
788} 790}
791EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
789 792
790int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 793int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
791{ 794{
@@ -794,11 +797,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
794 return -EINVAL; 797 return -EINVAL;
795 } 798 }
796 if (flag && 799 if (flag &&
797 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL;
800 }
801 if (flag &&
802 dev->transport->get_write_cache) { 800 dev->transport->get_write_cache) {
803 pr_err("emulate_write_cache not supported for this device\n"); 801 pr_err("emulate_write_cache not supported for this device\n");
804 return -EINVAL; 802 return -EINVAL;
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
809 dev, dev->dev_attrib.emulate_write_cache); 807 dev, dev->dev_attrib.emulate_write_cache);
810 return 0; 808 return 0;
811} 809}
810EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
812 811
813int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 812int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
814{ 813{
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
829 828
830 return 0; 829 return 0;
831} 830}
831EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
832 832
833int se_dev_set_emulate_tas(struct se_device *dev, int flag) 833int se_dev_set_emulate_tas(struct se_device *dev, int flag)
834{ 834{
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
849 849
850 return 0; 850 return 0;
851} 851}
852EXPORT_SYMBOL(se_dev_set_emulate_tas);
852 853
853int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 854int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
854{ 855{
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
870 dev, flag); 871 dev, flag);
871 return 0; 872 return 0;
872} 873}
874EXPORT_SYMBOL(se_dev_set_emulate_tpu);
873 875
874int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 876int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
875{ 877{
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
891 dev, flag); 893 dev, flag);
892 return 0; 894 return 0;
893} 895}
896EXPORT_SYMBOL(se_dev_set_emulate_tpws);
894 897
895int se_dev_set_emulate_caw(struct se_device *dev, int flag) 898int se_dev_set_emulate_caw(struct se_device *dev, int flag)
896{ 899{
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
904 907
905 return 0; 908 return 0;
906} 909}
910EXPORT_SYMBOL(se_dev_set_emulate_caw);
907 911
908int se_dev_set_emulate_3pc(struct se_device *dev, int flag) 912int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
909{ 913{
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
917 921
918 return 0; 922 return 0;
919} 923}
924EXPORT_SYMBOL(se_dev_set_emulate_3pc);
920 925
921int se_dev_set_pi_prot_type(struct se_device *dev, int flag) 926int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
922{ 927{
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
970 975
971 return 0; 976 return 0;
972} 977}
978EXPORT_SYMBOL(se_dev_set_pi_prot_type);
973 979
974int se_dev_set_pi_prot_format(struct se_device *dev, int flag) 980int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
975{ 981{
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
1005 1011
1006 return 0; 1012 return 0;
1007} 1013}
1014EXPORT_SYMBOL(se_dev_set_pi_prot_format);
1008 1015
1009int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1016int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1010{ 1017{
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1017 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1024 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1018 return 0; 1025 return 0;
1019} 1026}
1027EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
1020 1028
1021int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) 1029int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022{ 1030{
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); 1042 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0; 1043 return 0;
1036} 1044}
1045EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
1037 1046
1038int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1047int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1039{ 1048{
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1046 dev, flag); 1055 dev, flag);
1047 return 0; 1056 return 0;
1048} 1057}
1058EXPORT_SYMBOL(se_dev_set_is_nonrot);
1049 1059
1050int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1060int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1051{ 1061{
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1058 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1068 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1059 return 0; 1069 return 0;
1060} 1070}
1071EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
1061 1072
1062/* 1073/*
1063 * Note, this can only be called on unexported SE Device Object. 1074 * Note, this can only be called on unexported SE Device Object.
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1076 return -EINVAL; 1087 return -EINVAL;
1077 } 1088 }
1078 1089
1079 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1090 if (queue_depth > dev->dev_attrib.queue_depth) {
1080 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 1091 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1081 pr_err("dev[%p]: Passed queue_depth: %u" 1092 pr_err("dev[%p]: Passed queue_depth:"
1082 " exceeds TCM/SE_Device TCQ: %u\n", 1093 " %u exceeds TCM/SE_Device MAX"
1083 dev, queue_depth, 1094 " TCQ: %u\n", dev, queue_depth,
1084 dev->dev_attrib.hw_queue_depth); 1095 dev->dev_attrib.hw_queue_depth);
1085 return -EINVAL; 1096 return -EINVAL;
1086 } 1097 }
1087 } else {
1088 if (queue_depth > dev->dev_attrib.queue_depth) {
1089 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1090 pr_err("dev[%p]: Passed queue_depth:"
1091 " %u exceeds TCM/SE_Device MAX"
1092 " TCQ: %u\n", dev, queue_depth,
1093 dev->dev_attrib.hw_queue_depth);
1094 return -EINVAL;
1095 }
1096 }
1097 } 1098 }
1098
1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1101 dev, queue_depth); 1101 dev, queue_depth);
1102 return 0; 1102 return 0;
1103} 1103}
1104EXPORT_SYMBOL(se_dev_set_queue_depth);
1104 1105
1105int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1106int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1106{ 1107{
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1123 DA_STATUS_MAX_SECTORS_MIN); 1124 DA_STATUS_MAX_SECTORS_MIN);
1124 return -EINVAL; 1125 return -EINVAL;
1125 } 1126 }
1126 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1127 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1127 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1129 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1129 " greater than TCM/SE_Device max_sectors:" 1130 " %u\n", dev, fabric_max_sectors,
1130 " %u\n", dev, fabric_max_sectors, 1131 DA_STATUS_MAX_SECTORS_MAX);
1131 dev->dev_attrib.hw_max_sectors); 1132 return -EINVAL;
1132 return -EINVAL;
1133 }
1134 } else {
1135 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1136 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1137 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1138 " %u\n", dev, fabric_max_sectors,
1139 DA_STATUS_MAX_SECTORS_MAX);
1140 return -EINVAL;
1141 }
1142 } 1133 }
1143 /* 1134 /*
1144 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1135 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1155 dev, fabric_max_sectors); 1146 dev, fabric_max_sectors);
1156 return 0; 1147 return 0;
1157} 1148}
1149EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
1158 1150
1159int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1151int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1160{ 1152{
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1164 dev, dev->export_count); 1156 dev, dev->export_count);
1165 return -EINVAL; 1157 return -EINVAL;
1166 } 1158 }
1167 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1168 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1169 " changed for TCM/pSCSI\n", dev);
1170 return -EINVAL;
1171 }
1172 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1159 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1173 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1160 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1174 " greater than fabric_max_sectors: %u\n", dev, 1161 " greater than fabric_max_sectors: %u\n", dev,
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1181 dev, optimal_sectors); 1168 dev, optimal_sectors);
1182 return 0; 1169 return 0;
1183} 1170}
1171EXPORT_SYMBOL(se_dev_set_optimal_sectors);
1184 1172
1185int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1173int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1186{ 1174{
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1201 return -EINVAL; 1189 return -EINVAL;
1202 } 1190 }
1203 1191
1204 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1205 pr_err("dev[%p]: Not allowed to change block_size for"
1206 " Physical Device, use for Linux/SCSI to change"
1207 " block_size for underlying hardware\n", dev);
1208 return -EINVAL;
1209 }
1210
1211 dev->dev_attrib.block_size = block_size; 1192 dev->dev_attrib.block_size = block_size;
1212 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1193 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1213 dev, block_size); 1194 dev, block_size);
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1218 1199
1219 return 0; 1200 return 0;
1220} 1201}
1202EXPORT_SYMBOL(se_dev_set_block_size);
1221 1203
1222struct se_lun *core_dev_add_lun( 1204struct se_lun *core_dev_add_lun(
1223 struct se_portal_group *tpg, 1205 struct se_portal_group *tpg,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 72c83d98662b..c2aea099ea4a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,6 +37,7 @@
37 37
38#include <target/target_core_base.h> 38#include <target/target_core_base.h>
39#include <target/target_core_backend.h> 39#include <target/target_core_backend.h>
40#include <target/target_core_backend_configfs.h>
40 41
41#include "target_core_file.h" 42#include "target_core_file.h"
42 43
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
934 return sbc_parse_cdb(cmd, &fd_sbc_ops); 935 return sbc_parse_cdb(cmd, &fd_sbc_ops);
935} 936}
936 937
938DEF_TB_DEFAULT_ATTRIBS(fileio);
939
940static struct configfs_attribute *fileio_backend_dev_attrs[] = {
941 &fileio_dev_attrib_emulate_model_alias.attr,
942 &fileio_dev_attrib_emulate_dpo.attr,
943 &fileio_dev_attrib_emulate_fua_write.attr,
944 &fileio_dev_attrib_emulate_fua_read.attr,
945 &fileio_dev_attrib_emulate_write_cache.attr,
946 &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
947 &fileio_dev_attrib_emulate_tas.attr,
948 &fileio_dev_attrib_emulate_tpu.attr,
949 &fileio_dev_attrib_emulate_tpws.attr,
950 &fileio_dev_attrib_emulate_caw.attr,
951 &fileio_dev_attrib_emulate_3pc.attr,
952 &fileio_dev_attrib_pi_prot_type.attr,
953 &fileio_dev_attrib_hw_pi_prot_type.attr,
954 &fileio_dev_attrib_pi_prot_format.attr,
955 &fileio_dev_attrib_enforce_pr_isids.attr,
956 &fileio_dev_attrib_is_nonrot.attr,
957 &fileio_dev_attrib_emulate_rest_reord.attr,
958 &fileio_dev_attrib_force_pr_aptpl.attr,
959 &fileio_dev_attrib_hw_block_size.attr,
960 &fileio_dev_attrib_block_size.attr,
961 &fileio_dev_attrib_hw_max_sectors.attr,
962 &fileio_dev_attrib_fabric_max_sectors.attr,
963 &fileio_dev_attrib_optimal_sectors.attr,
964 &fileio_dev_attrib_hw_queue_depth.attr,
965 &fileio_dev_attrib_queue_depth.attr,
966 &fileio_dev_attrib_max_unmap_lba_count.attr,
967 &fileio_dev_attrib_max_unmap_block_desc_count.attr,
968 &fileio_dev_attrib_unmap_granularity.attr,
969 &fileio_dev_attrib_unmap_granularity_alignment.attr,
970 &fileio_dev_attrib_max_write_same_len.attr,
971 NULL,
972};
973
937static struct se_subsystem_api fileio_template = { 974static struct se_subsystem_api fileio_template = {
938 .name = "fileio", 975 .name = "fileio",
939 .inquiry_prod = "FILEIO", 976 .inquiry_prod = "FILEIO",
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
957 994
958static int __init fileio_module_init(void) 995static int __init fileio_module_init(void)
959{ 996{
997 struct target_backend_cits *tbc = &fileio_template.tb_cits;
998
999 target_core_setup_sub_cits(&fileio_template);
1000 tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
1001
960 return transport_subsystem_register(&fileio_template); 1002 return transport_subsystem_register(&fileio_template);
961} 1003}
962 1004
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index a25051a37dd7..ff95f95dcd13 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -36,6 +36,7 @@
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_backend.h> 37#include <target/target_core_backend.h>
38#include <target/target_core_fabric.h> 38#include <target/target_core_fabric.h>
39#include <target/target_core_configfs.h>
39 40
40#include "target_core_internal.h" 41#include "target_core_internal.h"
41 42
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
137 return hba; 138 return hba;
138 139
139out_module_put: 140out_module_put:
140 if (hba->transport->owner) 141 module_put(hba->transport->owner);
141 module_put(hba->transport->owner);
142 hba->transport = NULL; 142 hba->transport = NULL;
143out_free_hba: 143out_free_hba:
144 kfree(hba); 144 kfree(hba);
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
159 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" 159 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
160 " Core\n", hba->hba_id); 160 " Core\n", hba->hba_id);
161 161
162 if (hba->transport->owner) 162 module_put(hba->transport->owner);
163 module_put(hba->transport->owner);
164 163
165 hba->transport = NULL; 164 hba->transport = NULL;
166 kfree(hba); 165 kfree(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7e6b857c6b3f..3efff94fbd97 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -41,6 +41,7 @@
41 41
42#include <target/target_core_base.h> 42#include <target/target_core_base.h>
43#include <target/target_core_backend.h> 43#include <target/target_core_backend.h>
44#include <target/target_core_backend_configfs.h>
44 45
45#include "target_core_iblock.h" 46#include "target_core_iblock.h"
46 47
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
858 return q->flush_flags & REQ_FLUSH; 859 return q->flush_flags & REQ_FLUSH;
859} 860}
860 861
862DEF_TB_DEFAULT_ATTRIBS(iblock);
863
864static struct configfs_attribute *iblock_backend_dev_attrs[] = {
865 &iblock_dev_attrib_emulate_model_alias.attr,
866 &iblock_dev_attrib_emulate_dpo.attr,
867 &iblock_dev_attrib_emulate_fua_write.attr,
868 &iblock_dev_attrib_emulate_fua_read.attr,
869 &iblock_dev_attrib_emulate_write_cache.attr,
870 &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
871 &iblock_dev_attrib_emulate_tas.attr,
872 &iblock_dev_attrib_emulate_tpu.attr,
873 &iblock_dev_attrib_emulate_tpws.attr,
874 &iblock_dev_attrib_emulate_caw.attr,
875 &iblock_dev_attrib_emulate_3pc.attr,
876 &iblock_dev_attrib_pi_prot_type.attr,
877 &iblock_dev_attrib_hw_pi_prot_type.attr,
878 &iblock_dev_attrib_pi_prot_format.attr,
879 &iblock_dev_attrib_enforce_pr_isids.attr,
880 &iblock_dev_attrib_is_nonrot.attr,
881 &iblock_dev_attrib_emulate_rest_reord.attr,
882 &iblock_dev_attrib_force_pr_aptpl.attr,
883 &iblock_dev_attrib_hw_block_size.attr,
884 &iblock_dev_attrib_block_size.attr,
885 &iblock_dev_attrib_hw_max_sectors.attr,
886 &iblock_dev_attrib_fabric_max_sectors.attr,
887 &iblock_dev_attrib_optimal_sectors.attr,
888 &iblock_dev_attrib_hw_queue_depth.attr,
889 &iblock_dev_attrib_queue_depth.attr,
890 &iblock_dev_attrib_max_unmap_lba_count.attr,
891 &iblock_dev_attrib_max_unmap_block_desc_count.attr,
892 &iblock_dev_attrib_unmap_granularity.attr,
893 &iblock_dev_attrib_unmap_granularity_alignment.attr,
894 &iblock_dev_attrib_max_write_same_len.attr,
895 NULL,
896};
897
861static struct se_subsystem_api iblock_template = { 898static struct se_subsystem_api iblock_template = {
862 .name = "iblock", 899 .name = "iblock",
863 .inquiry_prod = "IBLOCK", 900 .inquiry_prod = "IBLOCK",
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
883 920
884static int __init iblock_module_init(void) 921static int __init iblock_module_init(void)
885{ 922{
923 struct target_backend_cits *tbc = &iblock_template.tb_cits;
924
925 target_core_setup_sub_cits(&iblock_template);
926 tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
927
886 return transport_subsystem_register(&iblock_template); 928 return transport_subsystem_register(&iblock_template);
887} 929}
888 930
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e31f42f369ff..60381db90026 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
18 struct se_lun *); 18 struct se_lun *);
19void core_dev_unexport(struct se_device *, struct se_portal_group *, 19void core_dev_unexport(struct se_device *, struct se_portal_group *,
20 struct se_lun *); 20 struct se_lun *);
21int se_dev_set_task_timeout(struct se_device *, u32);
22int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
23int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
24int se_dev_set_unmap_granularity(struct se_device *, u32);
25int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
26int se_dev_set_max_write_same_len(struct se_device *, u32);
27int se_dev_set_emulate_model_alias(struct se_device *, int);
28int se_dev_set_emulate_dpo(struct se_device *, int);
29int se_dev_set_emulate_fua_write(struct se_device *, int);
30int se_dev_set_emulate_fua_read(struct se_device *, int);
31int se_dev_set_emulate_write_cache(struct se_device *, int);
32int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
33int se_dev_set_emulate_tas(struct se_device *, int);
34int se_dev_set_emulate_tpu(struct se_device *, int);
35int se_dev_set_emulate_tpws(struct se_device *, int);
36int se_dev_set_emulate_caw(struct se_device *, int);
37int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int);
40int se_dev_set_enforce_pr_isids(struct se_device *, int);
41int se_dev_set_force_pr_aptpl(struct se_device *, int);
42int se_dev_set_is_nonrot(struct se_device *, int);
43int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
44int se_dev_set_queue_depth(struct se_device *, u32);
45int se_dev_set_max_sectors(struct se_device *, u32);
46int se_dev_set_fabric_max_sectors(struct se_device *, u32);
47int se_dev_set_optimal_sectors(struct se_device *, u32);
48int se_dev_set_block_size(struct se_device *, u32);
49struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); 21struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
50void core_dev_del_lun(struct se_portal_group *, struct se_lun *); 22void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
51struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 23struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4c261c33cf55..d56f2aaba9af 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -76,7 +76,7 @@ enum preempt_type {
76}; 76};
77 77
78static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, 78static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
79 struct t10_pr_registration *, int); 79 struct t10_pr_registration *, int, int);
80 80
81static sense_reason_t 81static sense_reason_t
82target_scsi2_reservation_check(struct se_cmd *cmd) 82target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
1177 * service action with the SERVICE ACTION RESERVATION KEY 1177 * service action with the SERVICE ACTION RESERVATION KEY
1178 * field set to zero (see 5.7.11.3). 1178 * field set to zero (see 5.7.11.3).
1179 */ 1179 */
1180 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); 1180 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
1181 ret = 1; 1181 ret = 1;
1182 /* 1182 /*
1183 * For 'All Registrants' reservation types, all existing 1183 * For 'All Registrants' reservation types, all existing
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
1219 1219
1220 pr_reg->pr_reg_deve->def_pr_registered = 0; 1220 pr_reg->pr_reg_deve->def_pr_registered = 0;
1221 pr_reg->pr_reg_deve->pr_res_key = 0; 1221 pr_reg->pr_reg_deve->pr_res_key = 0;
1222 list_del(&pr_reg->pr_reg_list); 1222 if (!list_empty(&pr_reg->pr_reg_list))
1223 list_del(&pr_reg->pr_reg_list);
1223 /* 1224 /*
1224 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), 1225 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
1225 * so call core_scsi3_put_pr_reg() to decrement our reference. 1226 * so call core_scsi3_put_pr_reg() to decrement our reference.
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
1271{ 1272{
1272 struct t10_reservation *pr_tmpl = &dev->t10_pr; 1273 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1273 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1274 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1275 bool free_reg = false;
1274 /* 1276 /*
1275 * If the passed se_node_acl matches the reservation holder, 1277 * If the passed se_node_acl matches the reservation holder,
1276 * release the reservation. 1278 * release the reservation.
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
1278 spin_lock(&dev->dev_reservation_lock); 1280 spin_lock(&dev->dev_reservation_lock);
1279 pr_res_holder = dev->dev_pr_res_holder; 1281 pr_res_holder = dev->dev_pr_res_holder;
1280 if ((pr_res_holder != NULL) && 1282 if ((pr_res_holder != NULL) &&
1281 (pr_res_holder->pr_reg_nacl == nacl)) 1283 (pr_res_holder->pr_reg_nacl == nacl)) {
1282 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); 1284 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
1285 free_reg = true;
1286 }
1283 spin_unlock(&dev->dev_reservation_lock); 1287 spin_unlock(&dev->dev_reservation_lock);
1284 /* 1288 /*
1285 * Release any registration associated with the struct se_node_acl. 1289 * Release any registration associated with the struct se_node_acl.
1286 */ 1290 */
1287 spin_lock(&pr_tmpl->registration_lock); 1291 spin_lock(&pr_tmpl->registration_lock);
1292 if (pr_res_holder && free_reg)
1293 __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
1294
1288 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 1295 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1289 &pr_tmpl->registration_list, pr_reg_list) { 1296 &pr_tmpl->registration_list, pr_reg_list) {
1290 1297
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
1307 if (pr_res_holder != NULL) { 1314 if (pr_res_holder != NULL) {
1308 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 1315 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
1309 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 1316 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
1310 pr_res_holder, 0); 1317 pr_res_holder, 0, 0);
1311 } 1318 }
1312 spin_unlock(&dev->dev_reservation_lock); 1319 spin_unlock(&dev->dev_reservation_lock);
1313 1320
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
1429 struct target_core_fabric_ops *tmp_tf_ops; 1436 struct target_core_fabric_ops *tmp_tf_ops;
1430 unsigned char *buf; 1437 unsigned char *buf;
1431 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1438 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1432 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 1439 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1433 sense_reason_t ret; 1440 sense_reason_t ret;
1434 u32 tpdl, tid_len = 0; 1441 u32 tpdl, tid_len = 0;
1435 int dest_local_nexus; 1442 int dest_local_nexus;
1436 u32 dest_rtpi = 0; 1443 u32 dest_rtpi = 0;
1437 1444
1438 memset(dest_iport, 0, 64);
1439
1440 local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 1445 local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
1441 /* 1446 /*
1442 * Allocate a struct pr_transport_id_holder and setup the 1447 * Allocate a struct pr_transport_id_holder and setup the
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2105 /* 2110 /*
2106 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. 2111 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
2107 */ 2112 */
2108 pr_holder = core_scsi3_check_implicit_release( 2113 type = pr_reg->pr_res_type;
2109 cmd->se_dev, pr_reg); 2114 pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
2115 pr_reg);
2110 if (pr_holder < 0) { 2116 if (pr_holder < 0) {
2111 ret = TCM_RESERVATION_CONFLICT; 2117 ret = TCM_RESERVATION_CONFLICT;
2112 goto out; 2118 goto out;
2113 } 2119 }
2114 type = pr_reg->pr_res_type;
2115 2120
2116 spin_lock(&pr_tmpl->registration_lock); 2121 spin_lock(&pr_tmpl->registration_lock);
2117 /* 2122 /*
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2269 spin_lock(&dev->dev_reservation_lock); 2274 spin_lock(&dev->dev_reservation_lock);
2270 pr_res_holder = dev->dev_pr_res_holder; 2275 pr_res_holder = dev->dev_pr_res_holder;
2271 if (pr_res_holder) { 2276 if (pr_res_holder) {
2277 int pr_res_type = pr_res_holder->pr_res_type;
2272 /* 2278 /*
2273 * From spc4r17 Section 5.7.9: Reserving: 2279 * From spc4r17 Section 5.7.9: Reserving:
2274 * 2280 *
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2279 * the logical unit, then the command shall be completed with 2285 * the logical unit, then the command shall be completed with
2280 * RESERVATION CONFLICT status. 2286 * RESERVATION CONFLICT status.
2281 */ 2287 */
2282 if (pr_res_holder != pr_reg) { 2288 if ((pr_res_holder != pr_reg) &&
2289 (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
2290 (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
2283 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2291 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2284 pr_err("SPC-3 PR: Attempted RESERVE from" 2292 pr_err("SPC-3 PR: Attempted RESERVE from"
2285 " [%s]: %s while reservation already held by" 2293 " [%s]: %s while reservation already held by"
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
2385 struct se_device *dev, 2393 struct se_device *dev,
2386 struct se_node_acl *se_nacl, 2394 struct se_node_acl *se_nacl,
2387 struct t10_pr_registration *pr_reg, 2395 struct t10_pr_registration *pr_reg,
2388 int explicit) 2396 int explicit,
2397 int unreg)
2389{ 2398{
2390 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2399 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2391 char i_buf[PR_REG_ISID_ID_LEN]; 2400 char i_buf[PR_REG_ISID_ID_LEN];
2401 int pr_res_type = 0, pr_res_scope = 0;
2392 2402
2393 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2403 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2394 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 2404 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
2395 /* 2405 /*
2396 * Go ahead and release the current PR reservation holder. 2406 * Go ahead and release the current PR reservation holder.
2407 * If an All Registrants reservation is currently active and
2408 * a unregister operation is requested, replace the current
2409 * dev_pr_res_holder with another active registration.
2397 */ 2410 */
2398 dev->dev_pr_res_holder = NULL; 2411 if (dev->dev_pr_res_holder) {
2412 pr_res_type = dev->dev_pr_res_holder->pr_res_type;
2413 pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
2414 dev->dev_pr_res_holder->pr_res_type = 0;
2415 dev->dev_pr_res_holder->pr_res_scope = 0;
2416 dev->dev_pr_res_holder->pr_res_holder = 0;
2417 dev->dev_pr_res_holder = NULL;
2418 }
2419 if (!unreg)
2420 goto out;
2399 2421
2400 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2422 spin_lock(&dev->t10_pr.registration_lock);
2401 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2423 list_del_init(&pr_reg->pr_reg_list);
2402 tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", 2424 /*
2403 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2425 * If the I_T nexus is a reservation holder, the persistent reservation
2404 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2426 * is of an all registrants type, and the I_T nexus is the last remaining
2427 * registered I_T nexus, then the device server shall also release the
2428 * persistent reservation.
2429 */
2430 if (!list_empty(&dev->t10_pr.registration_list) &&
2431 ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2432 (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
2433 dev->dev_pr_res_holder =
2434 list_entry(dev->t10_pr.registration_list.next,
2435 struct t10_pr_registration, pr_reg_list);
2436 dev->dev_pr_res_holder->pr_res_type = pr_res_type;
2437 dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
2438 dev->dev_pr_res_holder->pr_res_holder = 1;
2439 }
2440 spin_unlock(&dev->t10_pr.registration_lock);
2441out:
2442 if (!dev->dev_pr_res_holder) {
2443 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2444 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2445 tfo->get_fabric_name(), (explicit) ? "explicit" :
2446 "implicit", core_scsi3_pr_dump_type(pr_res_type),
2447 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2448 }
2405 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2449 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2406 tfo->get_fabric_name(), se_nacl->initiatorname, 2450 tfo->get_fabric_name(), se_nacl->initiatorname,
2407 i_buf); 2451 i_buf);
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
2532 * server shall not establish a unit attention condition. 2576 * server shall not establish a unit attention condition.
2533 */ 2577 */
2534 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, 2578 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
2535 pr_reg, 1); 2579 pr_reg, 1, 0);
2536 2580
2537 spin_unlock(&dev->dev_reservation_lock); 2581 spin_unlock(&dev->dev_reservation_lock);
2538 2582
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
2620 if (pr_res_holder) { 2664 if (pr_res_holder) {
2621 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2665 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2622 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 2666 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
2623 pr_res_holder, 0); 2667 pr_res_holder, 0, 0);
2624 } 2668 }
2625 spin_unlock(&dev->dev_reservation_lock); 2669 spin_unlock(&dev->dev_reservation_lock);
2626 /* 2670 /*
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
2679 */ 2723 */
2680 if (dev->dev_pr_res_holder) 2724 if (dev->dev_pr_res_holder)
2681 __core_scsi3_complete_pro_release(dev, nacl, 2725 __core_scsi3_complete_pro_release(dev, nacl,
2682 dev->dev_pr_res_holder, 0); 2726 dev->dev_pr_res_holder, 0, 0);
2683 2727
2684 dev->dev_pr_res_holder = pr_reg; 2728 dev->dev_pr_res_holder = pr_reg;
2685 pr_reg->pr_res_holder = 1; 2729 pr_reg->pr_res_holder = 1;
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2924 */ 2968 */
2925 if (pr_reg_n != pr_res_holder) 2969 if (pr_reg_n != pr_res_holder)
2926 __core_scsi3_complete_pro_release(dev, 2970 __core_scsi3_complete_pro_release(dev,
2927 pr_res_holder->pr_reg_nacl, 2971 pr_res_holder->pr_reg_nacl,
2928 dev->dev_pr_res_holder, 0); 2972 dev->dev_pr_res_holder, 0, 0);
2929 /* 2973 /*
2930 * b) Remove the registrations for all I_T nexuses identified 2974 * b) Remove the registrations for all I_T nexuses identified
2931 * by the SERVICE ACTION RESERVATION KEY field, except the 2975 * by the SERVICE ACTION RESERVATION KEY field, except the
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3059 struct t10_reservation *pr_tmpl = &dev->t10_pr; 3103 struct t10_reservation *pr_tmpl = &dev->t10_pr;
3060 unsigned char *buf; 3104 unsigned char *buf;
3061 unsigned char *initiator_str; 3105 unsigned char *initiator_str;
3062 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3106 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
3063 u32 tid_len, tmp_tid_len; 3107 u32 tid_len, tmp_tid_len;
3064 int new_reg = 0, type, scope, matching_iname; 3108 int new_reg = 0, type, scope, matching_iname;
3065 sense_reason_t ret; 3109 sense_reason_t ret;
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3071 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3115 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3072 } 3116 }
3073 3117
3074 memset(dest_iport, 0, 64);
3075 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3118 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
3076 se_tpg = se_sess->se_tpg; 3119 se_tpg = se_sess->se_tpg;
3077 tf_ops = se_tpg->se_tpg_tfo; 3120 tf_ops = se_tpg->se_tpg_tfo;
@@ -3389,7 +3432,7 @@ after_iport_check:
3389 * holder (i.e., the I_T nexus on which the 3432 * holder (i.e., the I_T nexus on which the
3390 */ 3433 */
3391 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 3434 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
3392 dev->dev_pr_res_holder, 0); 3435 dev->dev_pr_res_holder, 0, 0);
3393 /* 3436 /*
3394 * g) Move the persistent reservation to the specified I_T nexus using 3437 * g) Move the persistent reservation to the specified I_T nexus using
3395 * the same scope and type as the persistent reservation released in 3438 * the same scope and type as the persistent reservation released in
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3837 unsigned char *buf; 3880 unsigned char *buf;
3838 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 3881 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
3839 u32 off = 8; /* off into first Full Status descriptor */ 3882 u32 off = 8; /* off into first Full Status descriptor */
3840 int format_code = 0; 3883 int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
3884 bool all_reg = false;
3841 3885
3842 if (cmd->data_length < 8) { 3886 if (cmd->data_length < 8) {
3843 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 3887 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3854 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3898 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3855 buf[3] = (dev->t10_pr.pr_generation & 0xff); 3899 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3856 3900
3901 spin_lock(&dev->dev_reservation_lock);
3902 if (dev->dev_pr_res_holder) {
3903 struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
3904
3905 if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
3906 pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
3907 all_reg = true;
3908 pr_res_type = pr_holder->pr_res_type;
3909 pr_res_scope = pr_holder->pr_res_scope;
3910 }
3911 }
3912 spin_unlock(&dev->dev_reservation_lock);
3913
3857 spin_lock(&pr_tmpl->registration_lock); 3914 spin_lock(&pr_tmpl->registration_lock);
3858 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 3915 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3859 &pr_tmpl->registration_list, pr_reg_list) { 3916 &pr_tmpl->registration_list, pr_reg_list) {
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3901 * reservation holder for PR_HOLDER bit. 3958 * reservation holder for PR_HOLDER bit.
3902 * 3959 *
3903 * Also, if this registration is the reservation 3960 * Also, if this registration is the reservation
3904 * holder, fill in SCOPE and TYPE in the next byte. 3961 * holder or there is an All Registrants reservation
3962 * active, fill in SCOPE and TYPE in the next byte.
3905 */ 3963 */
3906 if (pr_reg->pr_res_holder) { 3964 if (pr_reg->pr_res_holder) {
3907 buf[off++] |= 0x01; 3965 buf[off++] |= 0x01;
3908 buf[off++] = (pr_reg->pr_res_scope & 0xf0) | 3966 buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
3909 (pr_reg->pr_res_type & 0x0f); 3967 (pr_reg->pr_res_type & 0x0f);
3910 } else 3968 } else if (all_reg) {
3969 buf[off++] |= 0x01;
3970 buf[off++] = (pr_res_scope & 0xf0) |
3971 (pr_res_type & 0x0f);
3972 } else {
3911 off += 2; 3973 off += 2;
3974 }
3912 3975
3913 off += 4; /* Skip over reserved area */ 3976 off += 4; /* Skip over reserved area */
3914 /* 3977 /*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c8291f0bbbc..1045dcd7bf65 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,6 +44,7 @@
44 44
45#include <target/target_core_base.h> 45#include <target/target_core_base.h>
46#include <target/target_core_backend.h> 46#include <target/target_core_backend.h>
47#include <target/target_core_backend_configfs.h>
47 48
48#include "target_core_alua.h" 49#include "target_core_alua.h"
49#include "target_core_pscsi.h" 50#include "target_core_pscsi.h"
@@ -1094,7 +1095,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1094 req->retries = PS_RETRY; 1095 req->retries = PS_RETRY;
1095 1096
1096 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, 1097 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
1097 (cmd->sam_task_attr == MSG_HEAD_TAG), 1098 (cmd->sam_task_attr == TCM_HEAD_TAG),
1098 pscsi_req_done); 1099 pscsi_req_done);
1099 1100
1100 return 0; 1101 return 0;
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
1165 kfree(pt); 1166 kfree(pt);
1166} 1167}
1167 1168
1169DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
1170TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
1171
1172DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
1173TB_DEV_ATTR_RO(pscsi, hw_block_size);
1174
1175DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
1176TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
1177
1178DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
1179TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
1180
1181static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
1182 &pscsi_dev_attrib_hw_pi_prot_type.attr,
1183 &pscsi_dev_attrib_hw_block_size.attr,
1184 &pscsi_dev_attrib_hw_max_sectors.attr,
1185 &pscsi_dev_attrib_hw_queue_depth.attr,
1186 NULL,
1187};
1188
1168static struct se_subsystem_api pscsi_template = { 1189static struct se_subsystem_api pscsi_template = {
1169 .name = "pscsi", 1190 .name = "pscsi",
1170 .owner = THIS_MODULE, 1191 .owner = THIS_MODULE,
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
1185 1206
1186static int __init pscsi_module_init(void) 1207static int __init pscsi_module_init(void)
1187{ 1208{
1209 struct target_backend_cits *tbc = &pscsi_template.tb_cits;
1210
1211 target_core_setup_sub_cits(&pscsi_template);
1212 tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
1213
1188 return transport_subsystem_register(&pscsi_template); 1214 return transport_subsystem_register(&pscsi_template);
1189} 1215}
1190 1216
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b920db3388cd..60ebd170a561 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -34,6 +34,7 @@
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_backend.h> 36#include <target/target_core_backend.h>
37#include <target/target_core_backend_configfs.h>
37 38
38#include "target_core_rd.h" 39#include "target_core_rd.h"
39 40
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
632 return sbc_parse_cdb(cmd, &rd_sbc_ops); 633 return sbc_parse_cdb(cmd, &rd_sbc_ops);
633} 634}
634 635
636DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
637
638static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
639 &rd_mcp_dev_attrib_emulate_model_alias.attr,
640 &rd_mcp_dev_attrib_emulate_dpo.attr,
641 &rd_mcp_dev_attrib_emulate_fua_write.attr,
642 &rd_mcp_dev_attrib_emulate_fua_read.attr,
643 &rd_mcp_dev_attrib_emulate_write_cache.attr,
644 &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
645 &rd_mcp_dev_attrib_emulate_tas.attr,
646 &rd_mcp_dev_attrib_emulate_tpu.attr,
647 &rd_mcp_dev_attrib_emulate_tpws.attr,
648 &rd_mcp_dev_attrib_emulate_caw.attr,
649 &rd_mcp_dev_attrib_emulate_3pc.attr,
650 &rd_mcp_dev_attrib_pi_prot_type.attr,
651 &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
652 &rd_mcp_dev_attrib_pi_prot_format.attr,
653 &rd_mcp_dev_attrib_enforce_pr_isids.attr,
654 &rd_mcp_dev_attrib_is_nonrot.attr,
655 &rd_mcp_dev_attrib_emulate_rest_reord.attr,
656 &rd_mcp_dev_attrib_force_pr_aptpl.attr,
657 &rd_mcp_dev_attrib_hw_block_size.attr,
658 &rd_mcp_dev_attrib_block_size.attr,
659 &rd_mcp_dev_attrib_hw_max_sectors.attr,
660 &rd_mcp_dev_attrib_fabric_max_sectors.attr,
661 &rd_mcp_dev_attrib_optimal_sectors.attr,
662 &rd_mcp_dev_attrib_hw_queue_depth.attr,
663 &rd_mcp_dev_attrib_queue_depth.attr,
664 &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
665 &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
666 &rd_mcp_dev_attrib_unmap_granularity.attr,
667 &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
668 &rd_mcp_dev_attrib_max_write_same_len.attr,
669 NULL,
670};
671
635static struct se_subsystem_api rd_mcp_template = { 672static struct se_subsystem_api rd_mcp_template = {
636 .name = "rd_mcp", 673 .name = "rd_mcp",
637 .inquiry_prod = "RAMDISK-MCP", 674 .inquiry_prod = "RAMDISK-MCP",
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
653 690
654int __init rd_module_init(void) 691int __init rd_module_init(void)
655{ 692{
693 struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
656 int ret; 694 int ret;
657 695
696 target_core_setup_sub_cits(&rd_mcp_template);
697 tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
698
658 ret = transport_subsystem_register(&rd_mcp_template); 699 ret = transport_subsystem_register(&rd_mcp_template);
659 if (ret < 0) { 700 if (ret < 0) {
660 return ret; 701 return ret;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8d171ff77e75..11bea1952435 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -485,7 +485,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
485 cmd->t_data_nents_orig = cmd->t_data_nents; 485 cmd->t_data_nents_orig = cmd->t_data_nents;
486 cmd->t_data_nents = 1; 486 cmd->t_data_nents = 1;
487 487
488 cmd->sam_task_attr = MSG_HEAD_TAG; 488 cmd->sam_task_attr = TCM_HEAD_TAG;
489 cmd->transport_complete_callback = compare_and_write_post; 489 cmd->transport_complete_callback = compare_and_write_post;
490 /* 490 /*
491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index bc286a67af7c..1307600fe726 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1357,7 +1357,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1358 * See spc4r17 section 5.3 1358 * See spc4r17 section 5.3
1359 */ 1359 */
1360 cmd->sam_task_attr = MSG_HEAD_TAG; 1360 cmd->sam_task_attr = TCM_HEAD_TAG;
1361 cmd->execute_cmd = spc_emulate_inquiry; 1361 cmd->execute_cmd = spc_emulate_inquiry;
1362 break; 1362 break;
1363 case SECURITY_PROTOCOL_IN: 1363 case SECURITY_PROTOCOL_IN:
@@ -1391,7 +1391,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1392 * See spc4r17 section 5.3 1392 * See spc4r17 section 5.3
1393 */ 1393 */
1394 cmd->sam_task_attr = MSG_HEAD_TAG; 1394 cmd->sam_task_attr = TCM_HEAD_TAG;
1395 break; 1395 break;
1396 case TEST_UNIT_READY: 1396 case TEST_UNIT_READY:
1397 cmd->execute_cmd = spc_emulate_testunitready; 1397 cmd->execute_cmd = spc_emulate_testunitready;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index be877bf6f730..0adc0f650213 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1159,7 +1159,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1159 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1159 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1160 return 0; 1160 return 0;
1161 1161
1162 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1162 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1163 pr_debug("SAM Task Attribute ACA" 1163 pr_debug("SAM Task Attribute ACA"
1164 " emulation is not supported\n"); 1164 " emulation is not supported\n");
1165 return TCM_INVALID_CDB_FIELD; 1165 return TCM_INVALID_CDB_FIELD;
@@ -1531,7 +1531,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1531 BUG_ON(!se_tpg); 1531 BUG_ON(!se_tpg);
1532 1532
1533 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1533 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1534 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1534 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1535 /* 1535 /*
1536 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1536 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1537 * allocation failure. 1537 * allocation failure.
@@ -1718,12 +1718,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1718 * to allow the passed struct se_cmd list of tasks to the front of the list. 1718 * to allow the passed struct se_cmd list of tasks to the front of the list.
1719 */ 1719 */
1720 switch (cmd->sam_task_attr) { 1720 switch (cmd->sam_task_attr) {
1721 case MSG_HEAD_TAG: 1721 case TCM_HEAD_TAG:
1722 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1722 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1723 "se_ordered_id: %u\n", 1723 "se_ordered_id: %u\n",
1724 cmd->t_task_cdb[0], cmd->se_ordered_id); 1724 cmd->t_task_cdb[0], cmd->se_ordered_id);
1725 return false; 1725 return false;
1726 case MSG_ORDERED_TAG: 1726 case TCM_ORDERED_TAG:
1727 atomic_inc_mb(&dev->dev_ordered_sync); 1727 atomic_inc_mb(&dev->dev_ordered_sync);
1728 1728
1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
@@ -1828,7 +1828,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
1828 1828
1829 __target_execute_cmd(cmd); 1829 __target_execute_cmd(cmd);
1830 1830
1831 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1831 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1832 break; 1832 break;
1833 } 1833 }
1834} 1834}
@@ -1844,18 +1844,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1844 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1844 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1845 return; 1845 return;
1846 1846
1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1848 atomic_dec_mb(&dev->simple_cmds); 1848 atomic_dec_mb(&dev->simple_cmds);
1849 dev->dev_cur_ordered_id++; 1849 dev->dev_cur_ordered_id++;
1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
1852 cmd->se_ordered_id); 1852 cmd->se_ordered_id);
1853 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1853 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1854 dev->dev_cur_ordered_id++; 1854 dev->dev_cur_ordered_id++;
1855 pr_debug("Incremented dev_cur_ordered_id: %u for" 1855 pr_debug("Incremented dev_cur_ordered_id: %u for"
1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1857 cmd->se_ordered_id); 1857 cmd->se_ordered_id);
1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1859 atomic_dec_mb(&dev->dev_ordered_sync); 1859 atomic_dec_mb(&dev->dev_ordered_sync);
1860 1860
1861 dev->dev_cur_ordered_id++; 1861 dev->dev_cur_ordered_id++;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9a1b314f6482..8bfa61c9693d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,8 @@
28#include <target/target_core_base.h> 28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 29#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h> 30#include <target/target_core_backend.h>
31#include <target/target_core_backend_configfs.h>
32
31#include <linux/target_core_user.h> 33#include <linux/target_core_user.h>
32 34
33/* 35/*
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1092 return ret; 1094 return ret;
1093} 1095}
1094 1096
1097DEF_TB_DEFAULT_ATTRIBS(tcmu);
1098
1099static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1100 &tcmu_dev_attrib_emulate_model_alias.attr,
1101 &tcmu_dev_attrib_emulate_dpo.attr,
1102 &tcmu_dev_attrib_emulate_fua_write.attr,
1103 &tcmu_dev_attrib_emulate_fua_read.attr,
1104 &tcmu_dev_attrib_emulate_write_cache.attr,
1105 &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
1106 &tcmu_dev_attrib_emulate_tas.attr,
1107 &tcmu_dev_attrib_emulate_tpu.attr,
1108 &tcmu_dev_attrib_emulate_tpws.attr,
1109 &tcmu_dev_attrib_emulate_caw.attr,
1110 &tcmu_dev_attrib_emulate_3pc.attr,
1111 &tcmu_dev_attrib_pi_prot_type.attr,
1112 &tcmu_dev_attrib_hw_pi_prot_type.attr,
1113 &tcmu_dev_attrib_pi_prot_format.attr,
1114 &tcmu_dev_attrib_enforce_pr_isids.attr,
1115 &tcmu_dev_attrib_is_nonrot.attr,
1116 &tcmu_dev_attrib_emulate_rest_reord.attr,
1117 &tcmu_dev_attrib_force_pr_aptpl.attr,
1118 &tcmu_dev_attrib_hw_block_size.attr,
1119 &tcmu_dev_attrib_block_size.attr,
1120 &tcmu_dev_attrib_hw_max_sectors.attr,
1121 &tcmu_dev_attrib_fabric_max_sectors.attr,
1122 &tcmu_dev_attrib_optimal_sectors.attr,
1123 &tcmu_dev_attrib_hw_queue_depth.attr,
1124 &tcmu_dev_attrib_queue_depth.attr,
1125 &tcmu_dev_attrib_max_unmap_lba_count.attr,
1126 &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
1127 &tcmu_dev_attrib_unmap_granularity.attr,
1128 &tcmu_dev_attrib_unmap_granularity_alignment.attr,
1129 &tcmu_dev_attrib_max_write_same_len.attr,
1130 NULL,
1131};
1132
1095static struct se_subsystem_api tcmu_template = { 1133static struct se_subsystem_api tcmu_template = {
1096 .name = "user", 1134 .name = "user",
1097 .inquiry_prod = "USER", 1135 .inquiry_prod = "USER",
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
1112 1150
1113static int __init tcmu_module_init(void) 1151static int __init tcmu_module_init(void)
1114{ 1152{
1153 struct target_backend_cits *tbc = &tcmu_template.tb_cits;
1115 int ret; 1154 int ret;
1116 1155
1117 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1156 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
1134 goto out_unreg_device; 1173 goto out_unreg_device;
1135 } 1174 }
1136 1175
1176 target_core_setup_sub_cits(&tcmu_template);
1177 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
1178
1137 ret = transport_subsystem_register(&tcmu_template); 1179 ret = transport_subsystem_register(&tcmu_template);
1138 if (ret) 1180 if (ret)
1139 goto out_unreg_genl; 1181 goto out_unreg_genl;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index be0c0d08c56a..edcafa4490c0 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -554,17 +554,17 @@ static void ft_send_work(struct work_struct *work)
554 */ 554 */
555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) { 555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
556 case FCP_PTA_HEADQ: 556 case FCP_PTA_HEADQ:
557 task_attr = MSG_HEAD_TAG; 557 task_attr = TCM_HEAD_TAG;
558 break; 558 break;
559 case FCP_PTA_ORDERED: 559 case FCP_PTA_ORDERED:
560 task_attr = MSG_ORDERED_TAG; 560 task_attr = TCM_ORDERED_TAG;
561 break; 561 break;
562 case FCP_PTA_ACA: 562 case FCP_PTA_ACA:
563 task_attr = MSG_ACA_TAG; 563 task_attr = TCM_ACA_TAG;
564 break; 564 break;
565 case FCP_PTA_SIMPLE: /* Fallthrough */ 565 case FCP_PTA_SIMPLE: /* Fallthrough */
566 default: 566 default:
567 task_attr = MSG_SIMPLE_TAG; 567 task_attr = TCM_SIMPLE_TAG;
568 } 568 }
569 569
570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 336602eb453e..96b69bfd773f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -561,7 +561,7 @@ static int omap_8250_startup(struct uart_port *port)
561 if (ret) 561 if (ret)
562 goto err; 562 goto err;
563 563
564#ifdef CONFIG_PM_RUNTIME 564#ifdef CONFIG_PM
565 up->capabilities |= UART_CAP_RPM; 565 up->capabilities |= UART_CAP_RPM;
566#endif 566#endif
567 567
@@ -997,12 +997,12 @@ static int omap8250_probe(struct platform_device *pdev)
997 up.port.fifosize = 64; 997 up.port.fifosize = 64;
998 up.tx_loadsz = 64; 998 up.tx_loadsz = 64;
999 up.capabilities = UART_CAP_FIFO; 999 up.capabilities = UART_CAP_FIFO;
1000#ifdef CONFIG_PM_RUNTIME 1000#ifdef CONFIG_PM
1001 /* 1001 /*
1002 * PM_RUNTIME is mostly transparent. However to do it right we need to a 1002 * Runtime PM is mostly transparent. However to do it right we need to a
1003 * TX empty interrupt before we can put the device to auto idle. So if 1003 * TX empty interrupt before we can put the device to auto idle. So if
1004 * PM_RUNTIME is not enabled we don't add that flag and can spare that 1004 * PM is not enabled we don't add that flag and can spare that one extra
1005 * one extra interrupt in the TX path. 1005 * interrupt in the TX path.
1006 */ 1006 */
1007 up.capabilities |= UART_CAP_RPM; 1007 up.capabilities |= UART_CAP_RPM;
1008#endif 1008#endif
@@ -1105,7 +1105,7 @@ static int omap8250_remove(struct platform_device *pdev)
1105 return 0; 1105 return 0;
1106} 1106}
1107 1107
1108#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) 1108#ifdef CONFIG_PM
1109 1109
1110static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv, 1110static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
1111 bool enable) 1111 bool enable)
@@ -1179,7 +1179,7 @@ static int omap8250_resume(struct device *dev)
1179#define omap8250_complete NULL 1179#define omap8250_complete NULL
1180#endif 1180#endif
1181 1181
1182#ifdef CONFIG_PM_RUNTIME 1182#ifdef CONFIG_PM
1183static int omap8250_lost_context(struct uart_8250_port *up) 1183static int omap8250_lost_context(struct uart_8250_port *up)
1184{ 1184{
1185 u32 val; 1185 u32 val;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 024f58475a94..3a494168661e 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1131,19 +1131,19 @@ static int usbg_submit_command(struct f_uas *fu,
1131 1131
1132 switch (cmd_iu->prio_attr & 0x7) { 1132 switch (cmd_iu->prio_attr & 0x7) {
1133 case UAS_HEAD_TAG: 1133 case UAS_HEAD_TAG:
1134 cmd->prio_attr = MSG_HEAD_TAG; 1134 cmd->prio_attr = TCM_HEAD_TAG;
1135 break; 1135 break;
1136 case UAS_ORDERED_TAG: 1136 case UAS_ORDERED_TAG:
1137 cmd->prio_attr = MSG_ORDERED_TAG; 1137 cmd->prio_attr = TCM_ORDERED_TAG;
1138 break; 1138 break;
1139 case UAS_ACA: 1139 case UAS_ACA:
1140 cmd->prio_attr = MSG_ACA_TAG; 1140 cmd->prio_attr = TCM_ACA_TAG;
1141 break; 1141 break;
1142 default: 1142 default:
1143 pr_debug_once("Unsupported prio_attr: %02x.\n", 1143 pr_debug_once("Unsupported prio_attr: %02x.\n",
1144 cmd_iu->prio_attr); 1144 cmd_iu->prio_attr);
1145 case UAS_SIMPLE_TAG: 1145 case UAS_SIMPLE_TAG:
1146 cmd->prio_attr = MSG_SIMPLE_TAG; 1146 cmd->prio_attr = TCM_SIMPLE_TAG;
1147 break; 1147 break;
1148 } 1148 }
1149 1149
@@ -1240,7 +1240,7 @@ static int bot_submit_command(struct f_uas *fu,
1240 goto err; 1240 goto err;
1241 } 1241 }
1242 1242
1243 cmd->prio_attr = MSG_SIMPLE_TAG; 1243 cmd->prio_attr = TCM_SIMPLE_TAG;
1244 se_cmd = &cmd->se_cmd; 1244 se_cmd = &cmd->se_cmd;
1245 cmd->unpacked_lun = cbw->Lun; 1245 cmd->unpacked_lun = cbw->Lun;
1246 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; 1246 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index e752c3098f38..395649f357aa 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
1739 int retval = 1; 1739 int retval = 1;
1740 unsigned long flags; 1740 unsigned long flags;
1741 1741
1742 /* if !PM_RUNTIME, root hub timers won't get shut down ... */ 1742 /* if !PM, root hub timers won't get shut down ... */
1743 if (!HC_IS_RUNNING(hcd->state)) 1743 if (!HC_IS_RUNNING(hcd->state))
1744 return 0; 1744 return 0;
1745 1745
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 75811dd5a9d7..036924e640f5 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3087,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
3087 int ports, i, retval = 1; 3087 int ports, i, retval = 1;
3088 unsigned long flags; 3088 unsigned long flags;
3089 3089
3090 /* if !PM_RUNTIME, root hub timers won't get shut down ... */ 3090 /* if !PM, root hub timers won't get shut down ... */
3091 if (!HC_IS_RUNNING(hcd->state)) 3091 if (!HC_IS_RUNNING(hcd->state))
3092 return 0; 3092 return 0;
3093 3093
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 50610a6acf3d..e999496eda3e 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -606,7 +606,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
606 init_waitqueue_head(&tmr->tmr_wait); 606 init_waitqueue_head(&tmr->tmr_wait);
607 607
608 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo, 608 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
609 tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, 609 tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
610 &pending_req->sense_buffer[0]); 610 &pending_req->sense_buffer[0]);
611 611
612 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL); 612 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);