diff options
author | Daniel Lezcano <daniel.lezcano@linaro.org> | 2014-07-23 06:00:00 -0400 |
---|---|---|
committer | Daniel Lezcano <daniel.lezcano@linaro.org> | 2014-07-23 06:00:00 -0400 |
commit | de2ea58155413f8914169b7183107f4ea5c6521f (patch) | |
tree | cd33b60fc32e0c156d1f41e2a1498bd88e0d66c5 /drivers/clocksource | |
parent | afdb094380889222583df9ef803587f6b8a82c8d (diff) | |
parent | cca8d0596c4c7acb371ea1bc5eee9b404b30516a (diff) |
Merge branch 'clockevents/renesas-timers-dt' into clockevents/3.17
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/sh_cmt.c | 233 | ||||
-rw-r--r-- | drivers/clocksource/sh_mtu2.c | 146 | ||||
-rw-r--r-- | drivers/clocksource/sh_tmu.c | 127 |
3 files changed, 184 insertions, 322 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dfa780396b91..2bd13b53b727 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
28 | #include <linux/pm_domain.h> | 29 | #include <linux/pm_domain.h> |
29 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
@@ -114,14 +115,15 @@ struct sh_cmt_device { | |||
114 | struct platform_device *pdev; | 115 | struct platform_device *pdev; |
115 | 116 | ||
116 | const struct sh_cmt_info *info; | 117 | const struct sh_cmt_info *info; |
117 | bool legacy; | ||
118 | 118 | ||
119 | void __iomem *mapbase_ch; | ||
120 | void __iomem *mapbase; | 119 | void __iomem *mapbase; |
121 | struct clk *clk; | 120 | struct clk *clk; |
122 | 121 | ||
122 | raw_spinlock_t lock; /* Protect the shared start/stop register */ | ||
123 | |||
123 | struct sh_cmt_channel *channels; | 124 | struct sh_cmt_channel *channels; |
124 | unsigned int num_channels; | 125 | unsigned int num_channels; |
126 | unsigned int hw_channels; | ||
125 | 127 | ||
126 | bool has_clockevent; | 128 | bool has_clockevent; |
127 | bool has_clocksource; | 129 | bool has_clocksource; |
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, | |||
301 | return v2; | 303 | return v2; |
302 | } | 304 | } |
303 | 305 | ||
304 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); | ||
305 | |||
306 | static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) | 306 | static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) |
307 | { | 307 | { |
308 | unsigned long flags, value; | 308 | unsigned long flags, value; |
309 | 309 | ||
310 | /* start stop register shared by multiple timer channels */ | 310 | /* start stop register shared by multiple timer channels */ |
311 | raw_spin_lock_irqsave(&sh_cmt_lock, flags); | 311 | raw_spin_lock_irqsave(&ch->cmt->lock, flags); |
312 | value = sh_cmt_read_cmstr(ch); | 312 | value = sh_cmt_read_cmstr(ch); |
313 | 313 | ||
314 | if (start) | 314 | if (start) |
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) | |||
317 | value &= ~(1 << ch->timer_bit); | 317 | value &= ~(1 << ch->timer_bit); |
318 | 318 | ||
319 | sh_cmt_write_cmstr(ch, value); | 319 | sh_cmt_write_cmstr(ch, value); |
320 | raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); | 320 | raw_spin_unlock_irqrestore(&ch->cmt->lock, flags); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) | 323 | static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) |
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, | |||
792 | int irq; | 792 | int irq; |
793 | int ret; | 793 | int ret; |
794 | 794 | ||
795 | irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); | 795 | irq = platform_get_irq(ch->cmt->pdev, ch->index); |
796 | if (irq < 0) { | 796 | if (irq < 0) { |
797 | dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", | 797 | dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", |
798 | ch->index); | 798 | ch->index); |
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, | |||
863 | * Compute the address of the channel control register block. For the | 863 | * Compute the address of the channel control register block. For the |
864 | * timers with a per-channel start/stop register, compute its address | 864 | * timers with a per-channel start/stop register, compute its address |
865 | * as well. | 865 | * as well. |
866 | * | ||
867 | * For legacy configuration the address has been mapped explicitly. | ||
868 | */ | 866 | */ |
869 | if (cmt->legacy) { | 867 | switch (cmt->info->model) { |
870 | ch->ioctrl = cmt->mapbase_ch; | 868 | case SH_CMT_16BIT: |
871 | } else { | 869 | ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; |
872 | switch (cmt->info->model) { | 870 | break; |
873 | case SH_CMT_16BIT: | 871 | case SH_CMT_32BIT: |
874 | ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; | 872 | case SH_CMT_48BIT: |
875 | break; | 873 | ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; |
876 | case SH_CMT_32BIT: | 874 | break; |
877 | case SH_CMT_48BIT: | 875 | case SH_CMT_32BIT_FAST: |
878 | ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; | 876 | /* |
879 | break; | 877 | * The 32-bit "fast" timer has a single channel at hwidx 5 but |
880 | case SH_CMT_32BIT_FAST: | 878 | * is located at offset 0x40 instead of 0x60 for some reason. |
881 | /* | 879 | */ |
882 | * The 32-bit "fast" timer has a single channel at hwidx | 880 | ch->ioctrl = cmt->mapbase + 0x40; |
883 | * 5 but is located at offset 0x40 instead of 0x60 for | 881 | break; |
884 | * some reason. | 882 | case SH_CMT_48BIT_GEN2: |
885 | */ | 883 | ch->iostart = cmt->mapbase + ch->hwidx * 0x100; |
886 | ch->ioctrl = cmt->mapbase + 0x40; | 884 | ch->ioctrl = ch->iostart + 0x10; |
887 | break; | 885 | break; |
888 | case SH_CMT_48BIT_GEN2: | ||
889 | ch->iostart = cmt->mapbase + ch->hwidx * 0x100; | ||
890 | ch->ioctrl = ch->iostart + 0x10; | ||
891 | break; | ||
892 | } | ||
893 | } | 886 | } |
894 | 887 | ||
895 | if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) | 888 | if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) |
@@ -900,12 +893,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, | |||
900 | ch->match_value = ch->max_match_value; | 893 | ch->match_value = ch->max_match_value; |
901 | raw_spin_lock_init(&ch->lock); | 894 | raw_spin_lock_init(&ch->lock); |
902 | 895 | ||
903 | if (cmt->legacy) { | 896 | ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx; |
904 | ch->timer_bit = ch->hwidx; | ||
905 | } else { | ||
906 | ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 | ||
907 | ? 0 : ch->hwidx; | ||
908 | } | ||
909 | 897 | ||
910 | ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), | 898 | ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), |
911 | clockevent, clocksource); | 899 | clockevent, clocksource); |
@@ -938,75 +926,65 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) | |||
938 | return 0; | 926 | return 0; |
939 | } | 927 | } |
940 | 928 | ||
941 | static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) | 929 | static const struct platform_device_id sh_cmt_id_table[] = { |
942 | { | 930 | { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, |
943 | struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; | 931 | { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, |
944 | struct resource *res, *res2; | 932 | { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, |
945 | 933 | { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, | |
946 | /* map memory, let mapbase_ch point to our channel */ | 934 | { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, |
947 | res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); | 935 | { } |
948 | if (!res) { | 936 | }; |
949 | dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); | 937 | MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); |
950 | return -ENXIO; | ||
951 | } | ||
952 | |||
953 | cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); | ||
954 | if (cmt->mapbase_ch == NULL) { | ||
955 | dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); | ||
956 | return -ENXIO; | ||
957 | } | ||
958 | |||
959 | /* optional resource for the shared timer start/stop register */ | ||
960 | res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); | ||
961 | |||
962 | /* map second resource for CMSTR */ | ||
963 | cmt->mapbase = ioremap_nocache(res2 ? res2->start : | ||
964 | res->start - cfg->channel_offset, | ||
965 | res2 ? resource_size(res2) : 2); | ||
966 | if (cmt->mapbase == NULL) { | ||
967 | dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); | ||
968 | iounmap(cmt->mapbase_ch); | ||
969 | return -ENXIO; | ||
970 | } | ||
971 | |||
972 | /* identify the model based on the resources */ | ||
973 | if (resource_size(res) == 6) | ||
974 | cmt->info = &sh_cmt_info[SH_CMT_16BIT]; | ||
975 | else if (res2 && (resource_size(res2) == 4)) | ||
976 | cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; | ||
977 | else | ||
978 | cmt->info = &sh_cmt_info[SH_CMT_32BIT]; | ||
979 | 938 | ||
980 | return 0; | 939 | static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { |
981 | } | 940 | { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] }, |
941 | { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] }, | ||
942 | { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, | ||
943 | { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] }, | ||
944 | { } | ||
945 | }; | ||
946 | MODULE_DEVICE_TABLE(of, sh_cmt_of_table); | ||
982 | 947 | ||
983 | static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) | 948 | static int sh_cmt_parse_dt(struct sh_cmt_device *cmt) |
984 | { | 949 | { |
985 | iounmap(cmt->mapbase); | 950 | struct device_node *np = cmt->pdev->dev.of_node; |
986 | if (cmt->mapbase_ch) | 951 | |
987 | iounmap(cmt->mapbase_ch); | 952 | return of_property_read_u32(np, "renesas,channels-mask", |
953 | &cmt->hw_channels); | ||
988 | } | 954 | } |
989 | 955 | ||
990 | static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | 956 | static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) |
991 | { | 957 | { |
992 | struct sh_timer_config *cfg = pdev->dev.platform_data; | 958 | unsigned int mask; |
993 | const struct platform_device_id *id = pdev->id_entry; | 959 | unsigned int i; |
994 | unsigned int hw_channels; | ||
995 | int ret; | 960 | int ret; |
996 | 961 | ||
997 | memset(cmt, 0, sizeof(*cmt)); | 962 | memset(cmt, 0, sizeof(*cmt)); |
998 | cmt->pdev = pdev; | 963 | cmt->pdev = pdev; |
964 | raw_spin_lock_init(&cmt->lock); | ||
965 | |||
966 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { | ||
967 | const struct of_device_id *id; | ||
968 | |||
969 | id = of_match_node(sh_cmt_of_table, pdev->dev.of_node); | ||
970 | cmt->info = id->data; | ||
999 | 971 | ||
1000 | if (!cfg) { | 972 | ret = sh_cmt_parse_dt(cmt); |
973 | if (ret < 0) | ||
974 | return ret; | ||
975 | } else if (pdev->dev.platform_data) { | ||
976 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
977 | const struct platform_device_id *id = pdev->id_entry; | ||
978 | |||
979 | cmt->info = (const struct sh_cmt_info *)id->driver_data; | ||
980 | cmt->hw_channels = cfg->channels_mask; | ||
981 | } else { | ||
1001 | dev_err(&cmt->pdev->dev, "missing platform data\n"); | 982 | dev_err(&cmt->pdev->dev, "missing platform data\n"); |
1002 | return -ENXIO; | 983 | return -ENXIO; |
1003 | } | 984 | } |
1004 | 985 | ||
1005 | cmt->info = (const struct sh_cmt_info *)id->driver_data; | ||
1006 | cmt->legacy = cmt->info ? false : true; | ||
1007 | |||
1008 | /* Get hold of clock. */ | 986 | /* Get hold of clock. */ |
1009 | cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); | 987 | cmt->clk = clk_get(&cmt->pdev->dev, "fck"); |
1010 | if (IS_ERR(cmt->clk)) { | 988 | if (IS_ERR(cmt->clk)) { |
1011 | dev_err(&cmt->pdev->dev, "cannot get clock\n"); | 989 | dev_err(&cmt->pdev->dev, "cannot get clock\n"); |
1012 | return PTR_ERR(cmt->clk); | 990 | return PTR_ERR(cmt->clk); |
@@ -1016,28 +994,13 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1016 | if (ret < 0) | 994 | if (ret < 0) |
1017 | goto err_clk_put; | 995 | goto err_clk_put; |
1018 | 996 | ||
1019 | /* | 997 | /* Map the memory resource(s). */ |
1020 | * Map the memory resource(s). We need to support both the legacy | 998 | ret = sh_cmt_map_memory(cmt); |
1021 | * platform device configuration (with one device per channel) and the | ||
1022 | * new version (with multiple channels per device). | ||
1023 | */ | ||
1024 | if (cmt->legacy) | ||
1025 | ret = sh_cmt_map_memory_legacy(cmt); | ||
1026 | else | ||
1027 | ret = sh_cmt_map_memory(cmt); | ||
1028 | |||
1029 | if (ret < 0) | 999 | if (ret < 0) |
1030 | goto err_clk_unprepare; | 1000 | goto err_clk_unprepare; |
1031 | 1001 | ||
1032 | /* Allocate and setup the channels. */ | 1002 | /* Allocate and setup the channels. */ |
1033 | if (cmt->legacy) { | 1003 | cmt->num_channels = hweight8(cmt->hw_channels); |
1034 | cmt->num_channels = 1; | ||
1035 | hw_channels = 0; | ||
1036 | } else { | ||
1037 | cmt->num_channels = hweight8(cfg->channels_mask); | ||
1038 | hw_channels = cfg->channels_mask; | ||
1039 | } | ||
1040 | |||
1041 | cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), | 1004 | cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), |
1042 | GFP_KERNEL); | 1005 | GFP_KERNEL); |
1043 | if (cmt->channels == NULL) { | 1006 | if (cmt->channels == NULL) { |
@@ -1045,35 +1008,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1045 | goto err_unmap; | 1008 | goto err_unmap; |
1046 | } | 1009 | } |
1047 | 1010 | ||
1048 | if (cmt->legacy) { | 1011 | /* |
1049 | ret = sh_cmt_setup_channel(&cmt->channels[0], | 1012 | * Use the first channel as a clock event device and the second channel |
1050 | cfg->timer_bit, cfg->timer_bit, | 1013 | * as a clock source. If only one channel is available use it for both. |
1051 | cfg->clockevent_rating != 0, | 1014 | */ |
1052 | cfg->clocksource_rating != 0, cmt); | 1015 | for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) { |
1016 | unsigned int hwidx = ffs(mask) - 1; | ||
1017 | bool clocksource = i == 1 || cmt->num_channels == 1; | ||
1018 | bool clockevent = i == 0; | ||
1019 | |||
1020 | ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, | ||
1021 | clockevent, clocksource, cmt); | ||
1053 | if (ret < 0) | 1022 | if (ret < 0) |
1054 | goto err_unmap; | 1023 | goto err_unmap; |
1055 | } else { | ||
1056 | unsigned int mask = hw_channels; | ||
1057 | unsigned int i; | ||
1058 | 1024 | ||
1059 | /* | 1025 | mask &= ~(1 << hwidx); |
1060 | * Use the first channel as a clock event device and the second | ||
1061 | * channel as a clock source. If only one channel is available | ||
1062 | * use it for both. | ||
1063 | */ | ||
1064 | for (i = 0; i < cmt->num_channels; ++i) { | ||
1065 | unsigned int hwidx = ffs(mask) - 1; | ||
1066 | bool clocksource = i == 1 || cmt->num_channels == 1; | ||
1067 | bool clockevent = i == 0; | ||
1068 | |||
1069 | ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, | ||
1070 | clockevent, clocksource, | ||
1071 | cmt); | ||
1072 | if (ret < 0) | ||
1073 | goto err_unmap; | ||
1074 | |||
1075 | mask &= ~(1 << hwidx); | ||
1076 | } | ||
1077 | } | 1026 | } |
1078 | 1027 | ||
1079 | platform_set_drvdata(pdev, cmt); | 1028 | platform_set_drvdata(pdev, cmt); |
@@ -1082,7 +1031,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1082 | 1031 | ||
1083 | err_unmap: | 1032 | err_unmap: |
1084 | kfree(cmt->channels); | 1033 | kfree(cmt->channels); |
1085 | sh_cmt_unmap_memory(cmt); | 1034 | iounmap(cmt->mapbase); |
1086 | err_clk_unprepare: | 1035 | err_clk_unprepare: |
1087 | clk_unprepare(cmt->clk); | 1036 | clk_unprepare(cmt->clk); |
1088 | err_clk_put: | 1037 | err_clk_put: |
@@ -1132,22 +1081,12 @@ static int sh_cmt_remove(struct platform_device *pdev) | |||
1132 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | 1081 | return -EBUSY; /* cannot unregister clockevent and clocksource */ |
1133 | } | 1082 | } |
1134 | 1083 | ||
1135 | static const struct platform_device_id sh_cmt_id_table[] = { | ||
1136 | { "sh_cmt", 0 }, | ||
1137 | { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, | ||
1138 | { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, | ||
1139 | { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, | ||
1140 | { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, | ||
1141 | { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, | ||
1142 | { } | ||
1143 | }; | ||
1144 | MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); | ||
1145 | |||
1146 | static struct platform_driver sh_cmt_device_driver = { | 1084 | static struct platform_driver sh_cmt_device_driver = { |
1147 | .probe = sh_cmt_probe, | 1085 | .probe = sh_cmt_probe, |
1148 | .remove = sh_cmt_remove, | 1086 | .remove = sh_cmt_remove, |
1149 | .driver = { | 1087 | .driver = { |
1150 | .name = "sh_cmt", | 1088 | .name = "sh_cmt", |
1089 | .of_match_table = of_match_ptr(sh_cmt_of_table), | ||
1151 | }, | 1090 | }, |
1152 | .id_table = sh_cmt_id_table, | 1091 | .id_table = sh_cmt_id_table, |
1153 | }; | 1092 | }; |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 188d4e092efc..3d88698cf2b8 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of.h> | ||
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/pm_domain.h> | 28 | #include <linux/pm_domain.h> |
28 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
@@ -37,7 +38,6 @@ struct sh_mtu2_channel { | |||
37 | unsigned int index; | 38 | unsigned int index; |
38 | 39 | ||
39 | void __iomem *base; | 40 | void __iomem *base; |
40 | int irq; | ||
41 | 41 | ||
42 | struct clock_event_device ced; | 42 | struct clock_event_device ced; |
43 | }; | 43 | }; |
@@ -48,15 +48,14 @@ struct sh_mtu2_device { | |||
48 | void __iomem *mapbase; | 48 | void __iomem *mapbase; |
49 | struct clk *clk; | 49 | struct clk *clk; |
50 | 50 | ||
51 | raw_spinlock_t lock; /* Protect the shared registers */ | ||
52 | |||
51 | struct sh_mtu2_channel *channels; | 53 | struct sh_mtu2_channel *channels; |
52 | unsigned int num_channels; | 54 | unsigned int num_channels; |
53 | 55 | ||
54 | bool legacy; | ||
55 | bool has_clockevent; | 56 | bool has_clockevent; |
56 | }; | 57 | }; |
57 | 58 | ||
58 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); | ||
59 | |||
60 | #define TSTR -1 /* shared register */ | 59 | #define TSTR -1 /* shared register */ |
61 | #define TCR 0 /* channel register */ | 60 | #define TCR 0 /* channel register */ |
62 | #define TMDR 1 /* channel register */ | 61 | #define TMDR 1 /* channel register */ |
@@ -162,12 +161,8 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) | |||
162 | { | 161 | { |
163 | unsigned long offs; | 162 | unsigned long offs; |
164 | 163 | ||
165 | if (reg_nr == TSTR) { | 164 | if (reg_nr == TSTR) |
166 | if (ch->mtu->legacy) | 165 | return ioread8(ch->mtu->mapbase + 0x280); |
167 | return ioread8(ch->mtu->mapbase); | ||
168 | else | ||
169 | return ioread8(ch->mtu->mapbase + 0x280); | ||
170 | } | ||
171 | 166 | ||
172 | offs = mtu2_reg_offs[reg_nr]; | 167 | offs = mtu2_reg_offs[reg_nr]; |
173 | 168 | ||
@@ -182,12 +177,8 @@ static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, | |||
182 | { | 177 | { |
183 | unsigned long offs; | 178 | unsigned long offs; |
184 | 179 | ||
185 | if (reg_nr == TSTR) { | 180 | if (reg_nr == TSTR) |
186 | if (ch->mtu->legacy) | 181 | return iowrite8(value, ch->mtu->mapbase + 0x280); |
187 | return iowrite8(value, ch->mtu->mapbase); | ||
188 | else | ||
189 | return iowrite8(value, ch->mtu->mapbase + 0x280); | ||
190 | } | ||
191 | 182 | ||
192 | offs = mtu2_reg_offs[reg_nr]; | 183 | offs = mtu2_reg_offs[reg_nr]; |
193 | 184 | ||
@@ -202,7 +193,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) | |||
202 | unsigned long flags, value; | 193 | unsigned long flags, value; |
203 | 194 | ||
204 | /* start stop register shared by multiple timer channels */ | 195 | /* start stop register shared by multiple timer channels */ |
205 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); | 196 | raw_spin_lock_irqsave(&ch->mtu->lock, flags); |
206 | value = sh_mtu2_read(ch, TSTR); | 197 | value = sh_mtu2_read(ch, TSTR); |
207 | 198 | ||
208 | if (start) | 199 | if (start) |
@@ -211,7 +202,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) | |||
211 | value &= ~(1 << ch->index); | 202 | value &= ~(1 << ch->index); |
212 | 203 | ||
213 | sh_mtu2_write(ch, TSTR, value); | 204 | sh_mtu2_write(ch, TSTR, value); |
214 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 205 | raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); |
215 | } | 206 | } |
216 | 207 | ||
217 | static int sh_mtu2_enable(struct sh_mtu2_channel *ch) | 208 | static int sh_mtu2_enable(struct sh_mtu2_channel *ch) |
@@ -331,7 +322,6 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, | |||
331 | const char *name) | 322 | const char *name) |
332 | { | 323 | { |
333 | struct clock_event_device *ced = &ch->ced; | 324 | struct clock_event_device *ced = &ch->ced; |
334 | int ret; | ||
335 | 325 | ||
336 | ced->name = name; | 326 | ced->name = name; |
337 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | 327 | ced->features = CLOCK_EVT_FEAT_PERIODIC; |
@@ -344,24 +334,12 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, | |||
344 | dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", | 334 | dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", |
345 | ch->index); | 335 | ch->index); |
346 | clockevents_register_device(ced); | 336 | clockevents_register_device(ced); |
347 | |||
348 | ret = request_irq(ch->irq, sh_mtu2_interrupt, | ||
349 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | ||
350 | dev_name(&ch->mtu->pdev->dev), ch); | ||
351 | if (ret) { | ||
352 | dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", | ||
353 | ch->index, ch->irq); | ||
354 | return; | ||
355 | } | ||
356 | } | 337 | } |
357 | 338 | ||
358 | static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, | 339 | static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name) |
359 | bool clockevent) | ||
360 | { | 340 | { |
361 | if (clockevent) { | 341 | ch->mtu->has_clockevent = true; |
362 | ch->mtu->has_clockevent = true; | 342 | sh_mtu2_register_clockevent(ch, name); |
363 | sh_mtu2_register_clockevent(ch, name); | ||
364 | } | ||
365 | 343 | ||
366 | return 0; | 344 | return 0; |
367 | } | 345 | } |
@@ -372,40 +350,32 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, | |||
372 | static const unsigned int channel_offsets[] = { | 350 | static const unsigned int channel_offsets[] = { |
373 | 0x300, 0x380, 0x000, | 351 | 0x300, 0x380, 0x000, |
374 | }; | 352 | }; |
375 | bool clockevent; | 353 | char name[6]; |
354 | int irq; | ||
355 | int ret; | ||
376 | 356 | ||
377 | ch->mtu = mtu; | 357 | ch->mtu = mtu; |
378 | 358 | ||
379 | if (mtu->legacy) { | 359 | sprintf(name, "tgi%ua", index); |
380 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | 360 | irq = platform_get_irq_byname(mtu->pdev, name); |
381 | 361 | if (irq < 0) { | |
382 | clockevent = cfg->clockevent_rating != 0; | ||
383 | |||
384 | ch->irq = platform_get_irq(mtu->pdev, 0); | ||
385 | ch->base = mtu->mapbase - cfg->channel_offset; | ||
386 | ch->index = cfg->timer_bit; | ||
387 | } else { | ||
388 | char name[6]; | ||
389 | |||
390 | clockevent = true; | ||
391 | |||
392 | sprintf(name, "tgi%ua", index); | ||
393 | ch->irq = platform_get_irq_byname(mtu->pdev, name); | ||
394 | ch->base = mtu->mapbase + channel_offsets[index]; | ||
395 | ch->index = index; | ||
396 | } | ||
397 | |||
398 | if (ch->irq < 0) { | ||
399 | /* Skip channels with no declared interrupt. */ | 362 | /* Skip channels with no declared interrupt. */ |
400 | if (!mtu->legacy) | 363 | return 0; |
401 | return 0; | 364 | } |
402 | 365 | ||
403 | dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", | 366 | ret = request_irq(irq, sh_mtu2_interrupt, |
404 | ch->index); | 367 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
405 | return ch->irq; | 368 | dev_name(&ch->mtu->pdev->dev), ch); |
369 | if (ret) { | ||
370 | dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", | ||
371 | index, irq); | ||
372 | return ret; | ||
406 | } | 373 | } |
407 | 374 | ||
408 | return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); | 375 | ch->base = mtu->mapbase + channel_offsets[index]; |
376 | ch->index = index; | ||
377 | |||
378 | return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev)); | ||
409 | } | 379 | } |
410 | 380 | ||
411 | static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) | 381 | static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) |
@@ -422,46 +392,21 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) | |||
422 | if (mtu->mapbase == NULL) | 392 | if (mtu->mapbase == NULL) |
423 | return -ENXIO; | 393 | return -ENXIO; |
424 | 394 | ||
425 | /* | ||
426 | * In legacy platform device configuration (with one device per channel) | ||
427 | * the resource points to the channel base address. | ||
428 | */ | ||
429 | if (mtu->legacy) { | ||
430 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | ||
431 | mtu->mapbase += cfg->channel_offset; | ||
432 | } | ||
433 | |||
434 | return 0; | 395 | return 0; |
435 | } | 396 | } |
436 | 397 | ||
437 | static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) | ||
438 | { | ||
439 | if (mtu->legacy) { | ||
440 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | ||
441 | mtu->mapbase -= cfg->channel_offset; | ||
442 | } | ||
443 | |||
444 | iounmap(mtu->mapbase); | ||
445 | } | ||
446 | |||
447 | static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | 398 | static int sh_mtu2_setup(struct sh_mtu2_device *mtu, |
448 | struct platform_device *pdev) | 399 | struct platform_device *pdev) |
449 | { | 400 | { |
450 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
451 | const struct platform_device_id *id = pdev->id_entry; | ||
452 | unsigned int i; | 401 | unsigned int i; |
453 | int ret; | 402 | int ret; |
454 | 403 | ||
455 | mtu->pdev = pdev; | 404 | mtu->pdev = pdev; |
456 | mtu->legacy = id->driver_data; | ||
457 | 405 | ||
458 | if (mtu->legacy && !cfg) { | 406 | raw_spin_lock_init(&mtu->lock); |
459 | dev_err(&mtu->pdev->dev, "missing platform data\n"); | ||
460 | return -ENXIO; | ||
461 | } | ||
462 | 407 | ||
463 | /* Get hold of clock. */ | 408 | /* Get hold of clock. */ |
464 | mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); | 409 | mtu->clk = clk_get(&mtu->pdev->dev, "fck"); |
465 | if (IS_ERR(mtu->clk)) { | 410 | if (IS_ERR(mtu->clk)) { |
466 | dev_err(&mtu->pdev->dev, "cannot get clock\n"); | 411 | dev_err(&mtu->pdev->dev, "cannot get clock\n"); |
467 | return PTR_ERR(mtu->clk); | 412 | return PTR_ERR(mtu->clk); |
@@ -479,10 +424,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
479 | } | 424 | } |
480 | 425 | ||
481 | /* Allocate and setup the channels. */ | 426 | /* Allocate and setup the channels. */ |
482 | if (mtu->legacy) | 427 | mtu->num_channels = 3; |
483 | mtu->num_channels = 1; | ||
484 | else | ||
485 | mtu->num_channels = 3; | ||
486 | 428 | ||
487 | mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, | 429 | mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, |
488 | GFP_KERNEL); | 430 | GFP_KERNEL); |
@@ -491,16 +433,10 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
491 | goto err_unmap; | 433 | goto err_unmap; |
492 | } | 434 | } |
493 | 435 | ||
494 | if (mtu->legacy) { | 436 | for (i = 0; i < mtu->num_channels; ++i) { |
495 | ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); | 437 | ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); |
496 | if (ret < 0) | 438 | if (ret < 0) |
497 | goto err_unmap; | 439 | goto err_unmap; |
498 | } else { | ||
499 | for (i = 0; i < mtu->num_channels; ++i) { | ||
500 | ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); | ||
501 | if (ret < 0) | ||
502 | goto err_unmap; | ||
503 | } | ||
504 | } | 440 | } |
505 | 441 | ||
506 | platform_set_drvdata(pdev, mtu); | 442 | platform_set_drvdata(pdev, mtu); |
@@ -509,7 +445,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
509 | 445 | ||
510 | err_unmap: | 446 | err_unmap: |
511 | kfree(mtu->channels); | 447 | kfree(mtu->channels); |
512 | sh_mtu2_unmap_memory(mtu); | 448 | iounmap(mtu->mapbase); |
513 | err_clk_unprepare: | 449 | err_clk_unprepare: |
514 | clk_unprepare(mtu->clk); | 450 | clk_unprepare(mtu->clk); |
515 | err_clk_put: | 451 | err_clk_put: |
@@ -560,17 +496,23 @@ static int sh_mtu2_remove(struct platform_device *pdev) | |||
560 | } | 496 | } |
561 | 497 | ||
562 | static const struct platform_device_id sh_mtu2_id_table[] = { | 498 | static const struct platform_device_id sh_mtu2_id_table[] = { |
563 | { "sh_mtu2", 1 }, | ||
564 | { "sh-mtu2", 0 }, | 499 | { "sh-mtu2", 0 }, |
565 | { }, | 500 | { }, |
566 | }; | 501 | }; |
567 | MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); | 502 | MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); |
568 | 503 | ||
504 | static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = { | ||
505 | { .compatible = "renesas,mtu2" }, | ||
506 | { } | ||
507 | }; | ||
508 | MODULE_DEVICE_TABLE(of, sh_mtu2_of_table); | ||
509 | |||
569 | static struct platform_driver sh_mtu2_device_driver = { | 510 | static struct platform_driver sh_mtu2_device_driver = { |
570 | .probe = sh_mtu2_probe, | 511 | .probe = sh_mtu2_probe, |
571 | .remove = sh_mtu2_remove, | 512 | .remove = sh_mtu2_remove, |
572 | .driver = { | 513 | .driver = { |
573 | .name = "sh_mtu2", | 514 | .name = "sh_mtu2", |
515 | .of_match_table = of_match_ptr(sh_mtu2_of_table), | ||
574 | }, | 516 | }, |
575 | .id_table = sh_mtu2_id_table, | 517 | .id_table = sh_mtu2_id_table, |
576 | }; | 518 | }; |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 6bd17a8f3dd4..0f665b8f2461 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
28 | #include <linux/pm_domain.h> | 29 | #include <linux/pm_domain.h> |
29 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
@@ -32,7 +33,6 @@ | |||
32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
33 | 34 | ||
34 | enum sh_tmu_model { | 35 | enum sh_tmu_model { |
35 | SH_TMU_LEGACY, | ||
36 | SH_TMU, | 36 | SH_TMU, |
37 | SH_TMU_SH3, | 37 | SH_TMU_SH3, |
38 | }; | 38 | }; |
@@ -62,6 +62,8 @@ struct sh_tmu_device { | |||
62 | 62 | ||
63 | enum sh_tmu_model model; | 63 | enum sh_tmu_model model; |
64 | 64 | ||
65 | raw_spinlock_t lock; /* Protect the shared start/stop register */ | ||
66 | |||
65 | struct sh_tmu_channel *channels; | 67 | struct sh_tmu_channel *channels; |
66 | unsigned int num_channels; | 68 | unsigned int num_channels; |
67 | 69 | ||
@@ -69,8 +71,6 @@ struct sh_tmu_device { | |||
69 | bool has_clocksource; | 71 | bool has_clocksource; |
70 | }; | 72 | }; |
71 | 73 | ||
72 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); | ||
73 | |||
74 | #define TSTR -1 /* shared register */ | 74 | #define TSTR -1 /* shared register */ |
75 | #define TCOR 0 /* channel register */ | 75 | #define TCOR 0 /* channel register */ |
76 | #define TCNT 1 /* channel register */ | 76 | #define TCNT 1 /* channel register */ |
@@ -91,8 +91,6 @@ static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) | |||
91 | 91 | ||
92 | if (reg_nr == TSTR) { | 92 | if (reg_nr == TSTR) { |
93 | switch (ch->tmu->model) { | 93 | switch (ch->tmu->model) { |
94 | case SH_TMU_LEGACY: | ||
95 | return ioread8(ch->tmu->mapbase); | ||
96 | case SH_TMU_SH3: | 94 | case SH_TMU_SH3: |
97 | return ioread8(ch->tmu->mapbase + 2); | 95 | return ioread8(ch->tmu->mapbase + 2); |
98 | case SH_TMU: | 96 | case SH_TMU: |
@@ -115,8 +113,6 @@ static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, | |||
115 | 113 | ||
116 | if (reg_nr == TSTR) { | 114 | if (reg_nr == TSTR) { |
117 | switch (ch->tmu->model) { | 115 | switch (ch->tmu->model) { |
118 | case SH_TMU_LEGACY: | ||
119 | return iowrite8(value, ch->tmu->mapbase); | ||
120 | case SH_TMU_SH3: | 116 | case SH_TMU_SH3: |
121 | return iowrite8(value, ch->tmu->mapbase + 2); | 117 | return iowrite8(value, ch->tmu->mapbase + 2); |
122 | case SH_TMU: | 118 | case SH_TMU: |
@@ -137,7 +133,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) | |||
137 | unsigned long flags, value; | 133 | unsigned long flags, value; |
138 | 134 | ||
139 | /* start stop register shared by multiple timer channels */ | 135 | /* start stop register shared by multiple timer channels */ |
140 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); | 136 | raw_spin_lock_irqsave(&ch->tmu->lock, flags); |
141 | value = sh_tmu_read(ch, TSTR); | 137 | value = sh_tmu_read(ch, TSTR); |
142 | 138 | ||
143 | if (start) | 139 | if (start) |
@@ -146,7 +142,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) | |||
146 | value &= ~(1 << ch->index); | 142 | value &= ~(1 << ch->index); |
147 | 143 | ||
148 | sh_tmu_write(ch, TSTR, value); | 144 | sh_tmu_write(ch, TSTR, value); |
149 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); | 145 | raw_spin_unlock_irqrestore(&ch->tmu->lock, flags); |
150 | } | 146 | } |
151 | 147 | ||
152 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) | 148 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
@@ -476,27 +472,12 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, | |||
476 | return 0; | 472 | return 0; |
477 | 473 | ||
478 | ch->tmu = tmu; | 474 | ch->tmu = tmu; |
475 | ch->index = index; | ||
479 | 476 | ||
480 | if (tmu->model == SH_TMU_LEGACY) { | 477 | if (tmu->model == SH_TMU_SH3) |
481 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | 478 | ch->base = tmu->mapbase + 4 + ch->index * 12; |
482 | 479 | else | |
483 | /* | 480 | ch->base = tmu->mapbase + 8 + ch->index * 12; |
484 | * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps | ||
485 | * channel registers blocks at base + 2 + 12 * index, while all | ||
486 | * other variants map them at base + 4 + 12 * index. We can | ||
487 | * compute the index by just dividing by 12, the 2 bytes or 4 | ||
488 | * bytes offset being hidden by the integer division. | ||
489 | */ | ||
490 | ch->index = cfg->channel_offset / 12; | ||
491 | ch->base = tmu->mapbase + cfg->channel_offset; | ||
492 | } else { | ||
493 | ch->index = index; | ||
494 | |||
495 | if (tmu->model == SH_TMU_SH3) | ||
496 | ch->base = tmu->mapbase + 4 + ch->index * 12; | ||
497 | else | ||
498 | ch->base = tmu->mapbase + 8 + ch->index * 12; | ||
499 | } | ||
500 | 481 | ||
501 | ch->irq = platform_get_irq(tmu->pdev, index); | 482 | ch->irq = platform_get_irq(tmu->pdev, index); |
502 | if (ch->irq < 0) { | 483 | if (ch->irq < 0) { |
@@ -526,46 +507,53 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu) | |||
526 | if (tmu->mapbase == NULL) | 507 | if (tmu->mapbase == NULL) |
527 | return -ENXIO; | 508 | return -ENXIO; |
528 | 509 | ||
529 | /* | ||
530 | * In legacy platform device configuration (with one device per channel) | ||
531 | * the resource points to the channel base address. | ||
532 | */ | ||
533 | if (tmu->model == SH_TMU_LEGACY) { | ||
534 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | ||
535 | tmu->mapbase -= cfg->channel_offset; | ||
536 | } | ||
537 | |||
538 | return 0; | 510 | return 0; |
539 | } | 511 | } |
540 | 512 | ||
541 | static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) | 513 | static int sh_tmu_parse_dt(struct sh_tmu_device *tmu) |
542 | { | 514 | { |
543 | if (tmu->model == SH_TMU_LEGACY) { | 515 | struct device_node *np = tmu->pdev->dev.of_node; |
544 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | 516 | |
545 | tmu->mapbase += cfg->channel_offset; | 517 | tmu->model = SH_TMU; |
518 | tmu->num_channels = 3; | ||
519 | |||
520 | of_property_read_u32(np, "#renesas,channels", &tmu->num_channels); | ||
521 | |||
522 | if (tmu->num_channels != 2 && tmu->num_channels != 3) { | ||
523 | dev_err(&tmu->pdev->dev, "invalid number of channels %u\n", | ||
524 | tmu->num_channels); | ||
525 | return -EINVAL; | ||
546 | } | 526 | } |
547 | 527 | ||
548 | iounmap(tmu->mapbase); | 528 | return 0; |
549 | } | 529 | } |
550 | 530 | ||
551 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | 531 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) |
552 | { | 532 | { |
553 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
554 | const struct platform_device_id *id = pdev->id_entry; | ||
555 | unsigned int i; | 533 | unsigned int i; |
556 | int ret; | 534 | int ret; |
557 | 535 | ||
558 | if (!cfg) { | 536 | tmu->pdev = pdev; |
537 | |||
538 | raw_spin_lock_init(&tmu->lock); | ||
539 | |||
540 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { | ||
541 | ret = sh_tmu_parse_dt(tmu); | ||
542 | if (ret < 0) | ||
543 | return ret; | ||
544 | } else if (pdev->dev.platform_data) { | ||
545 | const struct platform_device_id *id = pdev->id_entry; | ||
546 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
547 | |||
548 | tmu->model = id->driver_data; | ||
549 | tmu->num_channels = hweight8(cfg->channels_mask); | ||
550 | } else { | ||
559 | dev_err(&tmu->pdev->dev, "missing platform data\n"); | 551 | dev_err(&tmu->pdev->dev, "missing platform data\n"); |
560 | return -ENXIO; | 552 | return -ENXIO; |
561 | } | 553 | } |
562 | 554 | ||
563 | tmu->pdev = pdev; | ||
564 | tmu->model = id->driver_data; | ||
565 | |||
566 | /* Get hold of clock. */ | 555 | /* Get hold of clock. */ |
567 | tmu->clk = clk_get(&tmu->pdev->dev, | 556 | tmu->clk = clk_get(&tmu->pdev->dev, "fck"); |
568 | tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); | ||
569 | if (IS_ERR(tmu->clk)) { | 557 | if (IS_ERR(tmu->clk)) { |
570 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); | 558 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); |
571 | return PTR_ERR(tmu->clk); | 559 | return PTR_ERR(tmu->clk); |
@@ -583,11 +571,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
583 | } | 571 | } |
584 | 572 | ||
585 | /* Allocate and setup the channels. */ | 573 | /* Allocate and setup the channels. */ |
586 | if (tmu->model == SH_TMU_LEGACY) | ||
587 | tmu->num_channels = 1; | ||
588 | else | ||
589 | tmu->num_channels = hweight8(cfg->channels_mask); | ||
590 | |||
591 | tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, | 574 | tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, |
592 | GFP_KERNEL); | 575 | GFP_KERNEL); |
593 | if (tmu->channels == NULL) { | 576 | if (tmu->channels == NULL) { |
@@ -595,23 +578,15 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
595 | goto err_unmap; | 578 | goto err_unmap; |
596 | } | 579 | } |
597 | 580 | ||
598 | if (tmu->model == SH_TMU_LEGACY) { | 581 | /* |
599 | ret = sh_tmu_channel_setup(&tmu->channels[0], 0, | 582 | * Use the first channel as a clock event device and the second channel |
600 | cfg->clockevent_rating != 0, | 583 | * as a clock source. |
601 | cfg->clocksource_rating != 0, tmu); | 584 | */ |
585 | for (i = 0; i < tmu->num_channels; ++i) { | ||
586 | ret = sh_tmu_channel_setup(&tmu->channels[i], i, | ||
587 | i == 0, i == 1, tmu); | ||
602 | if (ret < 0) | 588 | if (ret < 0) |
603 | goto err_unmap; | 589 | goto err_unmap; |
604 | } else { | ||
605 | /* | ||
606 | * Use the first channel as a clock event device and the second | ||
607 | * channel as a clock source. | ||
608 | */ | ||
609 | for (i = 0; i < tmu->num_channels; ++i) { | ||
610 | ret = sh_tmu_channel_setup(&tmu->channels[i], i, | ||
611 | i == 0, i == 1, tmu); | ||
612 | if (ret < 0) | ||
613 | goto err_unmap; | ||
614 | } | ||
615 | } | 590 | } |
616 | 591 | ||
617 | platform_set_drvdata(pdev, tmu); | 592 | platform_set_drvdata(pdev, tmu); |
@@ -620,7 +595,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
620 | 595 | ||
621 | err_unmap: | 596 | err_unmap: |
622 | kfree(tmu->channels); | 597 | kfree(tmu->channels); |
623 | sh_tmu_unmap_memory(tmu); | 598 | iounmap(tmu->mapbase); |
624 | err_clk_unprepare: | 599 | err_clk_unprepare: |
625 | clk_unprepare(tmu->clk); | 600 | clk_unprepare(tmu->clk); |
626 | err_clk_put: | 601 | err_clk_put: |
@@ -671,18 +646,24 @@ static int sh_tmu_remove(struct platform_device *pdev) | |||
671 | } | 646 | } |
672 | 647 | ||
673 | static const struct platform_device_id sh_tmu_id_table[] = { | 648 | static const struct platform_device_id sh_tmu_id_table[] = { |
674 | { "sh_tmu", SH_TMU_LEGACY }, | ||
675 | { "sh-tmu", SH_TMU }, | 649 | { "sh-tmu", SH_TMU }, |
676 | { "sh-tmu-sh3", SH_TMU_SH3 }, | 650 | { "sh-tmu-sh3", SH_TMU_SH3 }, |
677 | { } | 651 | { } |
678 | }; | 652 | }; |
679 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); | 653 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); |
680 | 654 | ||
655 | static const struct of_device_id sh_tmu_of_table[] __maybe_unused = { | ||
656 | { .compatible = "renesas,tmu" }, | ||
657 | { } | ||
658 | }; | ||
659 | MODULE_DEVICE_TABLE(of, sh_tmu_of_table); | ||
660 | |||
681 | static struct platform_driver sh_tmu_device_driver = { | 661 | static struct platform_driver sh_tmu_device_driver = { |
682 | .probe = sh_tmu_probe, | 662 | .probe = sh_tmu_probe, |
683 | .remove = sh_tmu_remove, | 663 | .remove = sh_tmu_remove, |
684 | .driver = { | 664 | .driver = { |
685 | .name = "sh_tmu", | 665 | .name = "sh_tmu", |
666 | .of_match_table = of_match_ptr(sh_tmu_of_table), | ||
686 | }, | 667 | }, |
687 | .id_table = sh_tmu_id_table, | 668 | .id_table = sh_tmu_id_table, |
688 | }; | 669 | }; |