aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:21:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:21:08 -0400
commit167569343fac74ec6825a3ab982f795b5880e63e (patch)
tree965adb59fbe10d9f45a7fb90cb1ec1bc18d4613c /drivers
parentb240b419db5d624ce7a5a397d6f62a1a686009ec (diff)
parentcd903711fd9dce808b5cc07e509135886d962b0c (diff)
Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC platform updates from Arnd Bergmann: "This release brings up a new platform based on the old ARM9 core: the Nuvoton NPCM is used as a baseboard management controller, competing with the better known ASpeed AST2xx series. Another important change is the addition of ARMv7-A based chips in mach-stm32. The older parts in this platform are ARMv7-M based microcontrollers, now they are expanding to general-purpose workloads. The other changes are the usual defconfig updates to enable additional drivers, lesser bugfixes. The largest updates as often are the ongoing OMAP cleanups, but we also have a number of changes for the older PXA and davinci platforms this time. For the Renesas shmobile/r-car platform, some new infrastructure is needed to make the watchdog work correctly. Supporting Multiprocessing on Allwinner A80 required a significant amount of new code, but is not doing anything unexpected" * tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (179 commits) arm: npcm: modify configuration for the NPCM7xx BMC. MAINTAINERS: update entry for ARM/berlin ARM: omap2: fix am43xx build without L2X0 ARM: davinci: da8xx: simplify CFGCHIP regmap_config ARM: davinci: da8xx: fix oops in USB PHY driver due to stack allocated platform_data ARM: multi_v7_defconfig: add NXP FlexCAN IP support ARM: multi_v7_defconfig: enable thermal driver for i.MX devices ARM: multi_v7_defconfig: add RN5T618 PMIC family support ARM: multi_v7_defconfig: add NXP graphics drivers ARM: multi_v7_defconfig: add GPMI NAND controller support ARM: multi_v7_defconfig: add OCOTP driver for NXP SoCs ARM: multi_v7_defconfig: configure I2C driver built-in arm64: defconfig: add CONFIG_UNIPHIER_THERMAL and CONFIG_SNI_AVE ARM: imx: fix imx6sll-only build ARM: imx: select ARM_CPU_SUSPEND for CPU_IDLE as well ARM: mxs_defconfig: Re-sync defconfig ARM: imx_v4_v5_defconfig: Use the generic fsl-asoc-card driver ARM: imx_v4_v5_defconfig: Re-sync defconfig arm64: defconfig: enable stmmac ethernet to defconfig ARM: EXYNOS: Simplify code in coupled CPU idle hot path ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/ti-sysc.c526
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-ti-dm.c1000
-rw-r--r--drivers/mtd/nand/Kconfig11
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/marvell_nand.c3
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2105
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c16
-rw-r--r--drivers/power/avs/smartreflex.c41
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c68
-rw-r--r--drivers/soc/renesas/Kconfig14
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a77965-sysc.c37
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c12
-rw-r--r--drivers/soc/renesas/r8a77980-sysc.c52
-rw-r--r--drivers/soc/renesas/rcar-rst.c37
-rw-r--r--drivers/soc/renesas/rcar-sysc.c8
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/renesas/renesas-soc.c16
-rw-r--r--drivers/soc/ti/Kconfig9
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/pm33xx.c349
23 files changed, 2107 insertions, 2207 deletions
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index cdaeeea7999c..7cd2fd04b212 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -13,22 +13,20 @@
13 13
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h>
17#include <linux/delay.h>
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pm_domain.h>
18#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
19#include <linux/of_address.h> 22#include <linux/of_address.h>
20#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/slab.h>
25
21#include <linux/platform_data/ti-sysc.h> 26#include <linux/platform_data/ti-sysc.h>
22 27
23#include <dt-bindings/bus/ti-sysc.h> 28#include <dt-bindings/bus/ti-sysc.h>
24 29
25enum sysc_registers {
26 SYSC_REVISION,
27 SYSC_SYSCONFIG,
28 SYSC_SYSSTATUS,
29 SYSC_MAX_REGS,
30};
31
32static const char * const reg_names[] = { "rev", "sysc", "syss", }; 30static const char * const reg_names[] = { "rev", "sysc", "syss", };
33 31
34enum sysc_clocks { 32enum sysc_clocks {
@@ -55,6 +53,7 @@ static const char * const clock_names[] = { "fck", "ick", };
55 * @cfg: interconnect target module configuration 53 * @cfg: interconnect target module configuration
56 * @name: name if available 54 * @name: name if available
57 * @revision: interconnect target module revision 55 * @revision: interconnect target module revision
56 * @needs_resume: runtime resume needed on resume from suspend
58 */ 57 */
59struct sysc { 58struct sysc {
60 struct device *dev; 59 struct device *dev;
@@ -66,8 +65,13 @@ struct sysc {
66 const char *legacy_mode; 65 const char *legacy_mode;
67 const struct sysc_capabilities *cap; 66 const struct sysc_capabilities *cap;
68 struct sysc_config cfg; 67 struct sysc_config cfg;
68 struct ti_sysc_cookie cookie;
69 const char *name; 69 const char *name;
70 u32 revision; 70 u32 revision;
71 bool enabled;
72 bool needs_resume;
73 bool child_needs_resume;
74 struct delayed_work idle_work;
71}; 75};
72 76
73static u32 sysc_read(struct sysc *ddata, int offset) 77static u32 sysc_read(struct sysc *ddata, int offset)
@@ -136,9 +140,6 @@ static int sysc_get_clocks(struct sysc *ddata)
136{ 140{
137 int i, error; 141 int i, error;
138 142
139 if (ddata->legacy_mode)
140 return 0;
141
142 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 143 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
143 error = sysc_get_one_clock(ddata, i); 144 error = sysc_get_one_clock(ddata, i);
144 if (error && error != -ENOENT) 145 if (error && error != -ENOENT)
@@ -197,12 +198,53 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
197 ddata->module_pa = of_translate_address(np, ranges++); 198 ddata->module_pa = of_translate_address(np, ranges++);
198 ddata->module_size = be32_to_cpup(ranges); 199 ddata->module_size = be32_to_cpup(ranges);
199 200
200 dev_dbg(ddata->dev, "interconnect target 0x%llx size 0x%x for %pOF\n",
201 ddata->module_pa, ddata->module_size, np);
202
203 return 0; 201 return 0;
204} 202}
205 203
204static struct device_node *stdout_path;
205
206static void sysc_init_stdout_path(struct sysc *ddata)
207{
208 struct device_node *np = NULL;
209 const char *uart;
210
211 if (IS_ERR(stdout_path))
212 return;
213
214 if (stdout_path)
215 return;
216
217 np = of_find_node_by_path("/chosen");
218 if (!np)
219 goto err;
220
221 uart = of_get_property(np, "stdout-path", NULL);
222 if (!uart)
223 goto err;
224
225 np = of_find_node_by_path(uart);
226 if (!np)
227 goto err;
228
229 stdout_path = np;
230
231 return;
232
233err:
234 stdout_path = ERR_PTR(-ENODEV);
235}
236
237static void sysc_check_quirk_stdout(struct sysc *ddata,
238 struct device_node *np)
239{
240 sysc_init_stdout_path(ddata);
241 if (np != stdout_path)
242 return;
243
244 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
245 SYSC_QUIRK_NO_RESET_ON_INIT;
246}
247
206/** 248/**
207 * sysc_check_one_child - check child configuration 249 * sysc_check_one_child - check child configuration
208 * @ddata: device driver data 250 * @ddata: device driver data
@@ -221,6 +263,8 @@ static int sysc_check_one_child(struct sysc *ddata,
221 if (name) 263 if (name)
222 dev_warn(ddata->dev, "really a child ti,hwmods property?"); 264 dev_warn(ddata->dev, "really a child ti,hwmods property?");
223 265
266 sysc_check_quirk_stdout(ddata, np);
267
224 return 0; 268 return 0;
225} 269}
226 270
@@ -246,11 +290,8 @@ static int sysc_check_children(struct sysc *ddata)
246 */ 290 */
247static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res) 291static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
248{ 292{
249 if (resource_size(res) == 8) { 293 if (resource_size(res) == 8)
250 dev_dbg(ddata->dev,
251 "enabling 16-bit and clockactivity quirks\n");
252 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT; 294 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
253 }
254} 295}
255 296
256/** 297/**
@@ -276,7 +317,6 @@ static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
276 res = platform_get_resource_byname(to_platform_device(ddata->dev), 317 res = platform_get_resource_byname(to_platform_device(ddata->dev),
277 IORESOURCE_MEM, name); 318 IORESOURCE_MEM, name);
278 if (!res) { 319 if (!res) {
279 dev_dbg(ddata->dev, "has no %s register\n", name);
280 ddata->offsets[reg] = -ENODEV; 320 ddata->offsets[reg] = -ENODEV;
281 321
282 return 0; 322 return 0;
@@ -437,6 +477,14 @@ static int sysc_show_reg(struct sysc *ddata,
437 return sprintf(bufp, ":%x", ddata->offsets[reg]); 477 return sprintf(bufp, ":%x", ddata->offsets[reg]);
438} 478}
439 479
480static int sysc_show_name(char *bufp, struct sysc *ddata)
481{
482 if (!ddata->name)
483 return 0;
484
485 return sprintf(bufp, ":%s", ddata->name);
486}
487
440/** 488/**
441 * sysc_show_registers - show information about interconnect target module 489 * sysc_show_registers - show information about interconnect target module
442 * @ddata: device driver data 490 * @ddata: device driver data
@@ -451,6 +499,7 @@ static void sysc_show_registers(struct sysc *ddata)
451 bufp += sysc_show_reg(ddata, bufp, i); 499 bufp += sysc_show_reg(ddata, bufp, i);
452 500
453 bufp += sysc_show_rev(bufp, ddata); 501 bufp += sysc_show_rev(bufp, ddata);
502 bufp += sysc_show_name(bufp, ddata);
454 503
455 dev_dbg(ddata->dev, "%llx:%x%s\n", 504 dev_dbg(ddata->dev, "%llx:%x%s\n",
456 ddata->module_pa, ddata->module_size, 505 ddata->module_pa, ddata->module_size,
@@ -459,33 +508,70 @@ static void sysc_show_registers(struct sysc *ddata)
459 508
460static int __maybe_unused sysc_runtime_suspend(struct device *dev) 509static int __maybe_unused sysc_runtime_suspend(struct device *dev)
461{ 510{
511 struct ti_sysc_platform_data *pdata;
462 struct sysc *ddata; 512 struct sysc *ddata;
463 int i; 513 int error = 0, i;
464 514
465 ddata = dev_get_drvdata(dev); 515 ddata = dev_get_drvdata(dev);
466 516
467 if (ddata->legacy_mode) 517 if (!ddata->enabled)
468 return 0; 518 return 0;
469 519
520 if (ddata->legacy_mode) {
521 pdata = dev_get_platdata(ddata->dev);
522 if (!pdata)
523 return 0;
524
525 if (!pdata->idle_module)
526 return -ENODEV;
527
528 error = pdata->idle_module(dev, &ddata->cookie);
529 if (error)
530 dev_err(dev, "%s: could not idle: %i\n",
531 __func__, error);
532
533 goto idled;
534 }
535
470 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 536 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
471 if (IS_ERR_OR_NULL(ddata->clocks[i])) 537 if (IS_ERR_OR_NULL(ddata->clocks[i]))
472 continue; 538 continue;
473 clk_disable(ddata->clocks[i]); 539 clk_disable(ddata->clocks[i]);
474 } 540 }
475 541
476 return 0; 542idled:
543 ddata->enabled = false;
544
545 return error;
477} 546}
478 547
479static int __maybe_unused sysc_runtime_resume(struct device *dev) 548static int __maybe_unused sysc_runtime_resume(struct device *dev)
480{ 549{
550 struct ti_sysc_platform_data *pdata;
481 struct sysc *ddata; 551 struct sysc *ddata;
482 int i, error; 552 int error = 0, i;
483 553
484 ddata = dev_get_drvdata(dev); 554 ddata = dev_get_drvdata(dev);
485 555
486 if (ddata->legacy_mode) 556 if (ddata->enabled)
487 return 0; 557 return 0;
488 558
559 if (ddata->legacy_mode) {
560 pdata = dev_get_platdata(ddata->dev);
561 if (!pdata)
562 return 0;
563
564 if (!pdata->enable_module)
565 return -ENODEV;
566
567 error = pdata->enable_module(dev, &ddata->cookie);
568 if (error)
569 dev_err(dev, "%s: could not enable: %i\n",
570 __func__, error);
571
572 goto awake;
573 }
574
489 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 575 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
490 if (IS_ERR_OR_NULL(ddata->clocks[i])) 576 if (IS_ERR_OR_NULL(ddata->clocks[i]))
491 continue; 577 continue;
@@ -494,20 +580,136 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
494 return error; 580 return error;
495 } 581 }
496 582
583awake:
584 ddata->enabled = true;
585
586 return error;
587}
588
589#ifdef CONFIG_PM_SLEEP
590static int sysc_suspend(struct device *dev)
591{
592 struct sysc *ddata;
593
594 ddata = dev_get_drvdata(dev);
595
596 if (!ddata->enabled)
597 return 0;
598
599 ddata->needs_resume = true;
600
601 return sysc_runtime_suspend(dev);
602}
603
604static int sysc_resume(struct device *dev)
605{
606 struct sysc *ddata;
607
608 ddata = dev_get_drvdata(dev);
609 if (ddata->needs_resume) {
610 ddata->needs_resume = false;
611
612 return sysc_runtime_resume(dev);
613 }
614
497 return 0; 615 return 0;
498} 616}
617#endif
499 618
500static const struct dev_pm_ops sysc_pm_ops = { 619static const struct dev_pm_ops sysc_pm_ops = {
620 SET_SYSTEM_SLEEP_PM_OPS(sysc_suspend, sysc_resume)
501 SET_RUNTIME_PM_OPS(sysc_runtime_suspend, 621 SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
502 sysc_runtime_resume, 622 sysc_runtime_resume,
503 NULL) 623 NULL)
504}; 624};
505 625
626/* Module revision register based quirks */
627struct sysc_revision_quirk {
628 const char *name;
629 u32 base;
630 int rev_offset;
631 int sysc_offset;
632 int syss_offset;
633 u32 revision;
634 u32 revision_mask;
635 u32 quirks;
636};
637
638#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
639 optrev_val, optrevmask, optquirkmask) \
640 { \
641 .name = (optname), \
642 .base = (optbase), \
643 .rev_offset = (optrev), \
644 .sysc_offset = (optsysc), \
645 .syss_offset = (optsyss), \
646 .revision = (optrev_val), \
647 .revision_mask = (optrevmask), \
648 .quirks = (optquirkmask), \
649 }
650
651static const struct sysc_revision_quirk sysc_revision_quirks[] = {
652 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
653 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffffffff,
654 SYSC_QUIRK_LEGACY_IDLE),
655 SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
656 SYSC_QUIRK_LEGACY_IDLE),
657 SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000030, 0xffffffff,
658 SYSC_QUIRK_LEGACY_IDLE),
659 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
660 SYSC_QUIRK_LEGACY_IDLE),
661 SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
662 SYSC_QUIRK_LEGACY_IDLE),
663 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
664 SYSC_QUIRK_LEGACY_IDLE),
665 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
666 SYSC_QUIRK_LEGACY_IDLE),
667 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
668 SYSC_QUIRK_LEGACY_IDLE),
669};
670
671static void sysc_init_revision_quirks(struct sysc *ddata)
672{
673 const struct sysc_revision_quirk *q;
674 int i;
675
676 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
677 q = &sysc_revision_quirks[i];
678
679 if (q->base && q->base != ddata->module_pa)
680 continue;
681
682 if (q->rev_offset >= 0 &&
683 q->rev_offset != ddata->offsets[SYSC_REVISION])
684 continue;
685
686 if (q->sysc_offset >= 0 &&
687 q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
688 continue;
689
690 if (q->syss_offset >= 0 &&
691 q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
692 continue;
693
694 if (q->revision == ddata->revision ||
695 (q->revision & q->revision_mask) ==
696 (ddata->revision & q->revision_mask)) {
697 ddata->name = q->name;
698 ddata->cfg.quirks |= q->quirks;
699 }
700 }
701}
702
506/* At this point the module is configured enough to read the revision */ 703/* At this point the module is configured enough to read the revision */
507static int sysc_init_module(struct sysc *ddata) 704static int sysc_init_module(struct sysc *ddata)
508{ 705{
509 int error; 706 int error;
510 707
708 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
709 ddata->revision = sysc_read_revision(ddata);
710 goto rev_quirks;
711 }
712
511 error = pm_runtime_get_sync(ddata->dev); 713 error = pm_runtime_get_sync(ddata->dev);
512 if (error < 0) { 714 if (error < 0) {
513 pm_runtime_put_noidle(ddata->dev); 715 pm_runtime_put_noidle(ddata->dev);
@@ -517,6 +719,9 @@ static int sysc_init_module(struct sysc *ddata)
517 ddata->revision = sysc_read_revision(ddata); 719 ddata->revision = sysc_read_revision(ddata);
518 pm_runtime_put_sync(ddata->dev); 720 pm_runtime_put_sync(ddata->dev);
519 721
722rev_quirks:
723 sysc_init_revision_quirks(ddata);
724
520 return 0; 725 return 0;
521} 726}
522 727
@@ -605,6 +810,196 @@ static int sysc_init_syss_mask(struct sysc *ddata)
605 return 0; 810 return 0;
606} 811}
607 812
813/*
814 * Many child device drivers need to have fck available to get the clock
815 * rate for device internal configuration.
816 */
817static int sysc_child_add_fck(struct sysc *ddata,
818 struct device *child)
819{
820 struct clk *fck;
821 struct clk_lookup *l;
822 const char *name = clock_names[SYSC_FCK];
823
824 if (IS_ERR_OR_NULL(ddata->clocks[SYSC_FCK]))
825 return 0;
826
827 fck = clk_get(child, name);
828 if (!IS_ERR(fck)) {
829 clk_put(fck);
830
831 return -EEXIST;
832 }
833
834 l = clkdev_create(ddata->clocks[SYSC_FCK], name, dev_name(child));
835
836 return l ? 0 : -ENODEV;
837}
838
839static struct device_type sysc_device_type = {
840};
841
842static struct sysc *sysc_child_to_parent(struct device *dev)
843{
844 struct device *parent = dev->parent;
845
846 if (!parent || parent->type != &sysc_device_type)
847 return NULL;
848
849 return dev_get_drvdata(parent);
850}
851
852static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
853{
854 struct sysc *ddata;
855 int error;
856
857 ddata = sysc_child_to_parent(dev);
858
859 error = pm_generic_runtime_suspend(dev);
860 if (error)
861 return error;
862
863 if (!ddata->enabled)
864 return 0;
865
866 return sysc_runtime_suspend(ddata->dev);
867}
868
869static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
870{
871 struct sysc *ddata;
872 int error;
873
874 ddata = sysc_child_to_parent(dev);
875
876 if (!ddata->enabled) {
877 error = sysc_runtime_resume(ddata->dev);
878 if (error < 0)
879 dev_err(ddata->dev,
880 "%s error: %i\n", __func__, error);
881 }
882
883 return pm_generic_runtime_resume(dev);
884}
885
886#ifdef CONFIG_PM_SLEEP
887static int sysc_child_suspend_noirq(struct device *dev)
888{
889 struct sysc *ddata;
890 int error;
891
892 ddata = sysc_child_to_parent(dev);
893
894 error = pm_generic_suspend_noirq(dev);
895 if (error)
896 return error;
897
898 if (!pm_runtime_status_suspended(dev)) {
899 error = pm_generic_runtime_suspend(dev);
900 if (error)
901 return error;
902
903 error = sysc_runtime_suspend(ddata->dev);
904 if (error)
905 return error;
906
907 ddata->child_needs_resume = true;
908 }
909
910 return 0;
911}
912
913static int sysc_child_resume_noirq(struct device *dev)
914{
915 struct sysc *ddata;
916 int error;
917
918 ddata = sysc_child_to_parent(dev);
919
920 if (ddata->child_needs_resume) {
921 ddata->child_needs_resume = false;
922
923 error = sysc_runtime_resume(ddata->dev);
924 if (error)
925 dev_err(ddata->dev,
926 "%s runtime resume error: %i\n",
927 __func__, error);
928
929 error = pm_generic_runtime_resume(dev);
930 if (error)
931 dev_err(ddata->dev,
932 "%s generic runtime resume: %i\n",
933 __func__, error);
934 }
935
936 return pm_generic_resume_noirq(dev);
937}
938#endif
939
940struct dev_pm_domain sysc_child_pm_domain = {
941 .ops = {
942 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
943 sysc_child_runtime_resume,
944 NULL)
945 USE_PLATFORM_PM_SLEEP_OPS
946 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
947 sysc_child_resume_noirq)
948 }
949};
950
951/**
952 * sysc_legacy_idle_quirk - handle children in omap_device compatible way
953 * @ddata: device driver data
954 * @child: child device driver
955 *
956 * Allow idle for child devices as done with _od_runtime_suspend().
957 * Otherwise many child devices will not idle because of the permanent
958 * parent usecount set in pm_runtime_irq_safe().
959 *
960 * Note that the long term solution is to just modify the child device
961 * drivers to not set pm_runtime_irq_safe() and then this can be just
962 * dropped.
963 */
964static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
965{
966 if (!ddata->legacy_mode)
967 return;
968
969 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
970 dev_pm_domain_set(child, &sysc_child_pm_domain);
971}
972
973static int sysc_notifier_call(struct notifier_block *nb,
974 unsigned long event, void *device)
975{
976 struct device *dev = device;
977 struct sysc *ddata;
978 int error;
979
980 ddata = sysc_child_to_parent(dev);
981 if (!ddata)
982 return NOTIFY_DONE;
983
984 switch (event) {
985 case BUS_NOTIFY_ADD_DEVICE:
986 error = sysc_child_add_fck(ddata, dev);
987 if (error && error != -EEXIST)
988 dev_warn(ddata->dev, "could not add %s fck: %i\n",
989 dev_name(dev), error);
990 sysc_legacy_idle_quirk(ddata, dev);
991 break;
992 default:
993 break;
994 }
995
996 return NOTIFY_DONE;
997}
998
999static struct notifier_block sysc_nb = {
1000 .notifier_call = sysc_notifier_call,
1001};
1002
608/* Device tree configured quirks */ 1003/* Device tree configured quirks */
609struct sysc_dts_quirk { 1004struct sysc_dts_quirk {
610 const char *name; 1005 const char *name;
@@ -797,7 +1192,8 @@ static const struct sysc_capabilities sysc_34xx_sr = {
797 .type = TI_SYSC_OMAP34XX_SR, 1192 .type = TI_SYSC_OMAP34XX_SR,
798 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY, 1193 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
799 .regbits = &sysc_regbits_omap34xx_sr, 1194 .regbits = &sysc_regbits_omap34xx_sr,
800 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED, 1195 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
1196 SYSC_QUIRK_LEGACY_IDLE,
801}; 1197};
802 1198
803/* 1199/*
@@ -818,12 +1214,13 @@ static const struct sysc_capabilities sysc_36xx_sr = {
818 .type = TI_SYSC_OMAP36XX_SR, 1214 .type = TI_SYSC_OMAP36XX_SR,
819 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP, 1215 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
820 .regbits = &sysc_regbits_omap36xx_sr, 1216 .regbits = &sysc_regbits_omap36xx_sr,
821 .mod_quirks = SYSC_QUIRK_UNCACHED, 1217 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
822}; 1218};
823 1219
824static const struct sysc_capabilities sysc_omap4_sr = { 1220static const struct sysc_capabilities sysc_omap4_sr = {
825 .type = TI_SYSC_OMAP4_SR, 1221 .type = TI_SYSC_OMAP4_SR,
826 .regbits = &sysc_regbits_omap36xx_sr, 1222 .regbits = &sysc_regbits_omap36xx_sr,
1223 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
827}; 1224};
828 1225
829/* 1226/*
@@ -865,6 +1262,33 @@ static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
865 .regbits = &sysc_regbits_omap4_usb_host_fs, 1262 .regbits = &sysc_regbits_omap4_usb_host_fs,
866}; 1263};
867 1264
1265static int sysc_init_pdata(struct sysc *ddata)
1266{
1267 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
1268 struct ti_sysc_module_data mdata;
1269 int error = 0;
1270
1271 if (!pdata || !ddata->legacy_mode)
1272 return 0;
1273
1274 mdata.name = ddata->legacy_mode;
1275 mdata.module_pa = ddata->module_pa;
1276 mdata.module_size = ddata->module_size;
1277 mdata.offsets = ddata->offsets;
1278 mdata.nr_offsets = SYSC_MAX_REGS;
1279 mdata.cap = ddata->cap;
1280 mdata.cfg = &ddata->cfg;
1281
1282 if (!pdata->init_module)
1283 return -ENODEV;
1284
1285 error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
1286 if (error == -EEXIST)
1287 error = 0;
1288
1289 return error;
1290}
1291
868static int sysc_init_match(struct sysc *ddata) 1292static int sysc_init_match(struct sysc *ddata)
869{ 1293{
870 const struct sysc_capabilities *cap; 1294 const struct sysc_capabilities *cap;
@@ -880,8 +1304,19 @@ static int sysc_init_match(struct sysc *ddata)
880 return 0; 1304 return 0;
881} 1305}
882 1306
1307static void ti_sysc_idle(struct work_struct *work)
1308{
1309 struct sysc *ddata;
1310
1311 ddata = container_of(work, struct sysc, idle_work.work);
1312
1313 if (pm_runtime_active(ddata->dev))
1314 pm_runtime_put_sync(ddata->dev);
1315}
1316
883static int sysc_probe(struct platform_device *pdev) 1317static int sysc_probe(struct platform_device *pdev)
884{ 1318{
1319 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
885 struct sysc *ddata; 1320 struct sysc *ddata;
886 int error; 1321 int error;
887 1322
@@ -920,6 +1355,10 @@ static int sysc_probe(struct platform_device *pdev)
920 if (error) 1355 if (error)
921 goto unprepare; 1356 goto unprepare;
922 1357
1358 error = sysc_init_pdata(ddata);
1359 if (error)
1360 goto unprepare;
1361
923 pm_runtime_enable(ddata->dev); 1362 pm_runtime_enable(ddata->dev);
924 1363
925 error = sysc_init_module(ddata); 1364 error = sysc_init_module(ddata);
@@ -933,22 +1372,28 @@ static int sysc_probe(struct platform_device *pdev)
933 goto unprepare; 1372 goto unprepare;
934 } 1373 }
935 1374
936 pm_runtime_use_autosuspend(ddata->dev);
937
938 sysc_show_registers(ddata); 1375 sysc_show_registers(ddata);
939 1376
1377 ddata->dev->type = &sysc_device_type;
940 error = of_platform_populate(ddata->dev->of_node, 1378 error = of_platform_populate(ddata->dev->of_node,
941 NULL, NULL, ddata->dev); 1379 NULL, pdata ? pdata->auxdata : NULL,
1380 ddata->dev);
942 if (error) 1381 if (error)
943 goto err; 1382 goto err;
944 1383
945 pm_runtime_mark_last_busy(ddata->dev); 1384 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
946 pm_runtime_put_autosuspend(ddata->dev); 1385
1386 /* At least earlycon won't survive without deferred idle */
1387 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE_ON_INIT |
1388 SYSC_QUIRK_NO_RESET_ON_INIT)) {
1389 schedule_delayed_work(&ddata->idle_work, 3000);
1390 } else {
1391 pm_runtime_put(&pdev->dev);
1392 }
947 1393
948 return 0; 1394 return 0;
949 1395
950err: 1396err:
951 pm_runtime_dont_use_autosuspend(&pdev->dev);
952 pm_runtime_put_sync(&pdev->dev); 1397 pm_runtime_put_sync(&pdev->dev);
953 pm_runtime_disable(&pdev->dev); 1398 pm_runtime_disable(&pdev->dev);
954unprepare: 1399unprepare:
@@ -962,6 +1407,8 @@ static int sysc_remove(struct platform_device *pdev)
962 struct sysc *ddata = platform_get_drvdata(pdev); 1407 struct sysc *ddata = platform_get_drvdata(pdev);
963 int error; 1408 int error;
964 1409
1410 cancel_delayed_work_sync(&ddata->idle_work);
1411
965 error = pm_runtime_get_sync(ddata->dev); 1412 error = pm_runtime_get_sync(ddata->dev);
966 if (error < 0) { 1413 if (error < 0) {
967 pm_runtime_put_noidle(ddata->dev); 1414 pm_runtime_put_noidle(ddata->dev);
@@ -971,7 +1418,6 @@ static int sysc_remove(struct platform_device *pdev)
971 1418
972 of_platform_depopulate(&pdev->dev); 1419 of_platform_depopulate(&pdev->dev);
973 1420
974 pm_runtime_dont_use_autosuspend(&pdev->dev);
975 pm_runtime_put_sync(&pdev->dev); 1421 pm_runtime_put_sync(&pdev->dev);
976 pm_runtime_disable(&pdev->dev); 1422 pm_runtime_disable(&pdev->dev);
977 1423
@@ -1008,7 +1454,21 @@ static struct platform_driver sysc_driver = {
1008 .pm = &sysc_pm_ops, 1454 .pm = &sysc_pm_ops,
1009 }, 1455 },
1010}; 1456};
1011module_platform_driver(sysc_driver); 1457
1458static int __init sysc_init(void)
1459{
1460 bus_register_notifier(&platform_bus_type, &sysc_nb);
1461
1462 return platform_driver_register(&sysc_driver);
1463}
1464module_init(sysc_init);
1465
1466static void __exit sysc_exit(void)
1467{
1468 bus_unregister_notifier(&platform_bus_type, &sysc_nb);
1469 platform_driver_unregister(&sysc_driver);
1470}
1471module_exit(sysc_exit);
1012 1472
1013MODULE_DESCRIPTION("TI sysc interconnect target driver"); 1473MODULE_DESCRIPTION("TI sysc interconnect target driver");
1014MODULE_LICENSE("GPL v2"); 1474MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 6021a5af21da..9ee2888275c1 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -21,6 +21,9 @@ config CLKEVT_I8253
21config I8253_LOCK 21config I8253_LOCK
22 bool 22 bool
23 23
24config OMAP_DM_TIMER
25 bool
26
24config CLKBLD_I8253 27config CLKBLD_I8253
25 def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK 28 def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
26 29
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f0cb07637a65..e8e76dfef00b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
16obj-$(CONFIG_CLKBLD_I8253) += i8253.o 16obj-$(CONFIG_CLKBLD_I8253) += i8253.o
17obj-$(CONFIG_CLKSRC_MMIO) += mmio.o 17obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
18obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o 18obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
19obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
19obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o 20obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
20obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o 21obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
21obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o 22obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
new file mode 100644
index 000000000000..4cce6b224b87
--- /dev/null
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -0,0 +1,1000 @@
1/*
2 * linux/arch/arm/plat-omap/dmtimer.c
3 *
4 * OMAP Dual-Mode Timers
5 *
6 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
7 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
8 * Thara Gopinath <thara@ti.com>
9 *
10 * dmtimer adaptation to platform_driver.
11 *
12 * Copyright (C) 2005 Nokia Corporation
13 * OMAP2 support by Juha Yrjola
14 * API improvements and OMAP2 clock framework support by Timo Teras
15 *
16 * Copyright (C) 2009 Texas Instruments
17 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
27 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * You should have received a copy of the GNU General Public License along
34 * with this program; if not, write to the Free Software Foundation, Inc.,
35 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 */
37
38#include <linux/clk.h>
39#include <linux/clk-provider.h>
40#include <linux/module.h>
41#include <linux/io.h>
42#include <linux/device.h>
43#include <linux/err.h>
44#include <linux/pm_runtime.h>
45#include <linux/of.h>
46#include <linux/of_device.h>
47#include <linux/platform_device.h>
48#include <linux/platform_data/dmtimer-omap.h>
49
50#include <clocksource/timer-ti-dm.h>
51
52static u32 omap_reserved_systimers;
53static LIST_HEAD(omap_timer_list);
54static DEFINE_SPINLOCK(dm_timer_lock);
55
56enum {
57 REQUEST_ANY = 0,
58 REQUEST_BY_ID,
59 REQUEST_BY_CAP,
60 REQUEST_BY_NODE,
61};
62
63/**
64 * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
65 * @timer: timer pointer over which read operation to perform
66 * @reg: lowest byte holds the register offset
67 *
68 * The posted mode bit is encoded in reg. Note that in posted mode write
69 * pending bit must be checked. Otherwise a read of a non completed write
70 * will produce an error.
71 */
72static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
73{
74 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
75 return __omap_dm_timer_read(timer, reg, timer->posted);
76}
77
78/**
79 * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
80 * @timer: timer pointer over which write operation is to perform
81 * @reg: lowest byte holds the register offset
82 * @value: data to write into the register
83 *
84 * The posted mode bit is encoded in reg. Note that in posted mode the write
85 * pending bit must be checked. Otherwise a write on a register which has a
86 * pending write will be lost.
87 */
88static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
89 u32 value)
90{
91 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
92 __omap_dm_timer_write(timer, reg, value, timer->posted);
93}
94
95static void omap_timer_restore_context(struct omap_dm_timer *timer)
96{
97 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
98 timer->context.twer);
99 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
100 timer->context.tcrr);
101 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
102 timer->context.tldr);
103 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
104 timer->context.tmar);
105 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
106 timer->context.tsicr);
107 writel_relaxed(timer->context.tier, timer->irq_ena);
108 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
109 timer->context.tclr);
110}
111
112static int omap_dm_timer_reset(struct omap_dm_timer *timer)
113{
114 u32 l, timeout = 100000;
115
116 if (timer->revision != 1)
117 return -EINVAL;
118
119 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
120
121 do {
122 l = __omap_dm_timer_read(timer,
123 OMAP_TIMER_V1_SYS_STAT_OFFSET, 0);
124 } while (!l && timeout--);
125
126 if (!timeout) {
127 dev_err(&timer->pdev->dev, "Timer failed to reset\n");
128 return -ETIMEDOUT;
129 }
130
131 /* Configure timer for smart-idle mode */
132 l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
133 l |= 0x2 << 0x3;
134 __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0);
135
136 timer->posted = 0;
137
138 return 0;
139}
140
141static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
142{
143 int ret;
144 struct clk *parent;
145
146 /*
147 * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
148 * do not call clk_get() for these devices.
149 */
150 if (!timer->fclk)
151 return -ENODEV;
152
153 parent = clk_get(&timer->pdev->dev, NULL);
154 if (IS_ERR(parent))
155 return -ENODEV;
156
157 ret = clk_set_parent(timer->fclk, parent);
158 if (ret < 0)
159 pr_err("%s: failed to set parent\n", __func__);
160
161 clk_put(parent);
162
163 return ret;
164}
165
166static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
167{
168 int ret;
169 const char *parent_name;
170 struct clk *parent;
171 struct dmtimer_platform_data *pdata;
172
173 if (unlikely(!timer) || IS_ERR(timer->fclk))
174 return -EINVAL;
175
176 switch (source) {
177 case OMAP_TIMER_SRC_SYS_CLK:
178 parent_name = "timer_sys_ck";
179 break;
180 case OMAP_TIMER_SRC_32_KHZ:
181 parent_name = "timer_32k_ck";
182 break;
183 case OMAP_TIMER_SRC_EXT_CLK:
184 parent_name = "timer_ext_ck";
185 break;
186 default:
187 return -EINVAL;
188 }
189
190 pdata = timer->pdev->dev.platform_data;
191
192 /*
193 * FIXME: Used for OMAP1 devices only because they do not currently
194 * use the clock framework to set the parent clock. To be removed
195 * once OMAP1 migrated to using clock framework for dmtimers
196 */
197 if (pdata && pdata->set_timer_src)
198 return pdata->set_timer_src(timer->pdev, source);
199
200#if defined(CONFIG_COMMON_CLK)
201 /* Check if the clock has configurable parents */
202 if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
203 return 0;
204#endif
205
206 parent = clk_get(&timer->pdev->dev, parent_name);
207 if (IS_ERR(parent)) {
208 pr_err("%s: %s not found\n", __func__, parent_name);
209 return -EINVAL;
210 }
211
212 ret = clk_set_parent(timer->fclk, parent);
213 if (ret < 0)
214 pr_err("%s: failed to set %s as parent\n", __func__,
215 parent_name);
216
217 clk_put(parent);
218
219 return ret;
220}
221
222static void omap_dm_timer_enable(struct omap_dm_timer *timer)
223{
224 int c;
225
226 pm_runtime_get_sync(&timer->pdev->dev);
227
228 if (!(timer->capability & OMAP_TIMER_ALWON)) {
229 if (timer->get_context_loss_count) {
230 c = timer->get_context_loss_count(&timer->pdev->dev);
231 if (c != timer->ctx_loss_count) {
232 omap_timer_restore_context(timer);
233 timer->ctx_loss_count = c;
234 }
235 } else {
236 omap_timer_restore_context(timer);
237 }
238 }
239}
240
241static void omap_dm_timer_disable(struct omap_dm_timer *timer)
242{
243 pm_runtime_put_sync(&timer->pdev->dev);
244}
245
246static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
247{
248 int rc;
249
250 /*
251 * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
252 * do not call clk_get() for these devices.
253 */
254 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
255 timer->fclk = clk_get(&timer->pdev->dev, "fck");
256 if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
257 dev_err(&timer->pdev->dev, ": No fclk handle.\n");
258 return -EINVAL;
259 }
260 }
261
262 omap_dm_timer_enable(timer);
263
264 if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
265 rc = omap_dm_timer_reset(timer);
266 if (rc) {
267 omap_dm_timer_disable(timer);
268 return rc;
269 }
270 }
271
272 __omap_dm_timer_enable_posted(timer);
273 omap_dm_timer_disable(timer);
274
275 rc = omap_dm_timer_of_set_source(timer);
276 if (rc == -ENODEV)
277 return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
278
279 return rc;
280}
281
282static inline u32 omap_dm_timer_reserved_systimer(int id)
283{
284 return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
285}
286
287int omap_dm_timer_reserve_systimer(int id)
288{
289 if (omap_dm_timer_reserved_systimer(id))
290 return -ENODEV;
291
292 omap_reserved_systimers |= (1 << (id - 1));
293
294 return 0;
295}
296
297static struct omap_dm_timer *_omap_dm_timer_request(int req_type, void *data)
298{
299 struct omap_dm_timer *timer = NULL, *t;
300 struct device_node *np = NULL;
301 unsigned long flags;
302 u32 cap = 0;
303 int id = 0;
304
305 switch (req_type) {
306 case REQUEST_BY_ID:
307 id = *(int *)data;
308 break;
309 case REQUEST_BY_CAP:
310 cap = *(u32 *)data;
311 break;
312 case REQUEST_BY_NODE:
313 np = (struct device_node *)data;
314 break;
315 default:
316 /* REQUEST_ANY */
317 break;
318 }
319
320 spin_lock_irqsave(&dm_timer_lock, flags);
321 list_for_each_entry(t, &omap_timer_list, node) {
322 if (t->reserved)
323 continue;
324
325 switch (req_type) {
326 case REQUEST_BY_ID:
327 if (id == t->pdev->id) {
328 timer = t;
329 timer->reserved = 1;
330 goto found;
331 }
332 break;
333 case REQUEST_BY_CAP:
334 if (cap == (t->capability & cap)) {
335 /*
336 * If timer is not NULL, we have already found
337 * one timer. But it was not an exact match
338 * because it had more capabilities than what
339 * was required. Therefore, unreserve the last
340 * timer found and see if this one is a better
341 * match.
342 */
343 if (timer)
344 timer->reserved = 0;
345 timer = t;
346 timer->reserved = 1;
347
348 /* Exit loop early if we find an exact match */
349 if (t->capability == cap)
350 goto found;
351 }
352 break;
353 case REQUEST_BY_NODE:
354 if (np == t->pdev->dev.of_node) {
355 timer = t;
356 timer->reserved = 1;
357 goto found;
358 }
359 break;
360 default:
361 /* REQUEST_ANY */
362 timer = t;
363 timer->reserved = 1;
364 goto found;
365 }
366 }
367found:
368 spin_unlock_irqrestore(&dm_timer_lock, flags);
369
370 if (timer && omap_dm_timer_prepare(timer)) {
371 timer->reserved = 0;
372 timer = NULL;
373 }
374
375 if (!timer)
376 pr_debug("%s: timer request failed!\n", __func__);
377
378 return timer;
379}
380
381static struct omap_dm_timer *omap_dm_timer_request(void)
382{
383 return _omap_dm_timer_request(REQUEST_ANY, NULL);
384}
385
386static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
387{
388 /* Requesting timer by ID is not supported when device tree is used */
389 if (of_have_populated_dt()) {
390 pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
391 __func__);
392 return NULL;
393 }
394
395 return _omap_dm_timer_request(REQUEST_BY_ID, &id);
396}
397
398/**
399 * omap_dm_timer_request_by_cap - Request a timer by capability
400 * @cap: Bit mask of capabilities to match
401 *
402 * Find a timer based upon capabilities bit mask. Callers of this function
403 * should use the definitions found in the plat/dmtimer.h file under the
404 * comment "timer capabilities used in hwmod database". Returns pointer to
405 * timer handle on success and a NULL pointer on failure.
406 */
407struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
408{
409 return _omap_dm_timer_request(REQUEST_BY_CAP, &cap);
410}
411
412/**
413 * omap_dm_timer_request_by_node - Request a timer by device-tree node
414 * @np: Pointer to device-tree timer node
415 *
416 * Request a timer based upon a device node pointer. Returns pointer to
417 * timer handle on success and a NULL pointer on failure.
418 */
419static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
420{
421 if (!np)
422 return NULL;
423
424 return _omap_dm_timer_request(REQUEST_BY_NODE, np);
425}
426
427static int omap_dm_timer_free(struct omap_dm_timer *timer)
428{
429 if (unlikely(!timer))
430 return -EINVAL;
431
432 clk_put(timer->fclk);
433
434 WARN_ON(!timer->reserved);
435 timer->reserved = 0;
436 return 0;
437}
438
439int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
440{
441 if (timer)
442 return timer->irq;
443 return -EINVAL;
444}
445
446#if defined(CONFIG_ARCH_OMAP1)
447#include <mach/hardware.h>
448
449static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
450{
451 return NULL;
452}
453
454/**
455 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
456 * @inputmask: current value of idlect mask
457 */
458__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
459{
460 int i = 0;
461 struct omap_dm_timer *timer = NULL;
462 unsigned long flags;
463
464 /* If ARMXOR cannot be idled this function call is unnecessary */
465 if (!(inputmask & (1 << 1)))
466 return inputmask;
467
468 /* If any active timer is using ARMXOR return modified mask */
469 spin_lock_irqsave(&dm_timer_lock, flags);
470 list_for_each_entry(timer, &omap_timer_list, node) {
471 u32 l;
472
473 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
474 if (l & OMAP_TIMER_CTRL_ST) {
475 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
476 inputmask &= ~(1 << 1);
477 else
478 inputmask &= ~(1 << 2);
479 }
480 i++;
481 }
482 spin_unlock_irqrestore(&dm_timer_lock, flags);
483
484 return inputmask;
485}
486
487#else
488
489static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
490{
491 if (timer && !IS_ERR(timer->fclk))
492 return timer->fclk;
493 return NULL;
494}
495
496__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
497{
498 BUG();
499
500 return 0;
501}
502
503#endif
504
505int omap_dm_timer_trigger(struct omap_dm_timer *timer)
506{
507 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
508 pr_err("%s: timer not available or enabled.\n", __func__);
509 return -EINVAL;
510 }
511
512 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
513 return 0;
514}
515
516static int omap_dm_timer_start(struct omap_dm_timer *timer)
517{
518 u32 l;
519
520 if (unlikely(!timer))
521 return -EINVAL;
522
523 omap_dm_timer_enable(timer);
524
525 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
526 if (!(l & OMAP_TIMER_CTRL_ST)) {
527 l |= OMAP_TIMER_CTRL_ST;
528 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
529 }
530
531 /* Save the context */
532 timer->context.tclr = l;
533 return 0;
534}
535
536static int omap_dm_timer_stop(struct omap_dm_timer *timer)
537{
538 unsigned long rate = 0;
539
540 if (unlikely(!timer))
541 return -EINVAL;
542
543 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
544 rate = clk_get_rate(timer->fclk);
545
546 __omap_dm_timer_stop(timer, timer->posted, rate);
547
548 /*
549 * Since the register values are computed and written within
550 * __omap_dm_timer_stop, we need to use read to retrieve the
551 * context.
552 */
553 timer->context.tclr =
554 omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
555 omap_dm_timer_disable(timer);
556 return 0;
557}
558
559static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
560 unsigned int load)
561{
562 u32 l;
563
564 if (unlikely(!timer))
565 return -EINVAL;
566
567 omap_dm_timer_enable(timer);
568 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
569 if (autoreload)
570 l |= OMAP_TIMER_CTRL_AR;
571 else
572 l &= ~OMAP_TIMER_CTRL_AR;
573 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
574 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
575
576 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
577 /* Save the context */
578 timer->context.tclr = l;
579 timer->context.tldr = load;
580 omap_dm_timer_disable(timer);
581 return 0;
582}
583
584/* Optimized set_load which removes costly spin wait in timer_start */
585int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
586 unsigned int load)
587{
588 u32 l;
589
590 if (unlikely(!timer))
591 return -EINVAL;
592
593 omap_dm_timer_enable(timer);
594
595 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
596 if (autoreload) {
597 l |= OMAP_TIMER_CTRL_AR;
598 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
599 } else {
600 l &= ~OMAP_TIMER_CTRL_AR;
601 }
602 l |= OMAP_TIMER_CTRL_ST;
603
604 __omap_dm_timer_load_start(timer, l, load, timer->posted);
605
606 /* Save the context */
607 timer->context.tclr = l;
608 timer->context.tldr = load;
609 timer->context.tcrr = load;
610 return 0;
611}
612static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
613 unsigned int match)
614{
615 u32 l;
616
617 if (unlikely(!timer))
618 return -EINVAL;
619
620 omap_dm_timer_enable(timer);
621 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
622 if (enable)
623 l |= OMAP_TIMER_CTRL_CE;
624 else
625 l &= ~OMAP_TIMER_CTRL_CE;
626 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
627 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
628
629 /* Save the context */
630 timer->context.tclr = l;
631 timer->context.tmar = match;
632 omap_dm_timer_disable(timer);
633 return 0;
634}
635
636static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
637 int toggle, int trigger)
638{
639 u32 l;
640
641 if (unlikely(!timer))
642 return -EINVAL;
643
644 omap_dm_timer_enable(timer);
645 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
646 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
647 OMAP_TIMER_CTRL_PT | (0x03 << 10));
648 if (def_on)
649 l |= OMAP_TIMER_CTRL_SCPWM;
650 if (toggle)
651 l |= OMAP_TIMER_CTRL_PT;
652 l |= trigger << 10;
653 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
654
655 /* Save the context */
656 timer->context.tclr = l;
657 omap_dm_timer_disable(timer);
658 return 0;
659}
660
661static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
662 int prescaler)
663{
664 u32 l;
665
666 if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
667 return -EINVAL;
668
669 omap_dm_timer_enable(timer);
670 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
671 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
672 if (prescaler >= 0) {
673 l |= OMAP_TIMER_CTRL_PRE;
674 l |= prescaler << 2;
675 }
676 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
677
678 /* Save the context */
679 timer->context.tclr = l;
680 omap_dm_timer_disable(timer);
681 return 0;
682}
683
684static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
685 unsigned int value)
686{
687 if (unlikely(!timer))
688 return -EINVAL;
689
690 omap_dm_timer_enable(timer);
691 __omap_dm_timer_int_enable(timer, value);
692
693 /* Save the context */
694 timer->context.tier = value;
695 timer->context.twer = value;
696 omap_dm_timer_disable(timer);
697 return 0;
698}
699
700/**
701 * omap_dm_timer_set_int_disable - disable timer interrupts
702 * @timer: pointer to timer handle
703 * @mask: bit mask of interrupts to be disabled
704 *
705 * Disables the specified timer interrupts for a timer.
706 */
707static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
708{
709 u32 l = mask;
710
711 if (unlikely(!timer))
712 return -EINVAL;
713
714 omap_dm_timer_enable(timer);
715
716 if (timer->revision == 1)
717 l = readl_relaxed(timer->irq_ena) & ~mask;
718
719 writel_relaxed(l, timer->irq_dis);
720 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
721 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
722
723 /* Save the context */
724 timer->context.tier &= ~mask;
725 timer->context.twer &= ~mask;
726 omap_dm_timer_disable(timer);
727 return 0;
728}
729
730static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
731{
732 unsigned int l;
733
734 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
735 pr_err("%s: timer not available or enabled.\n", __func__);
736 return 0;
737 }
738
739 l = readl_relaxed(timer->irq_stat);
740
741 return l;
742}
743
744static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
745{
746 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
747 return -EINVAL;
748
749 __omap_dm_timer_write_status(timer, value);
750
751 return 0;
752}
753
754static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
755{
756 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
757 pr_err("%s: timer not iavailable or enabled.\n", __func__);
758 return 0;
759 }
760
761 return __omap_dm_timer_read_counter(timer, timer->posted);
762}
763
764static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
765{
766 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
767 pr_err("%s: timer not available or enabled.\n", __func__);
768 return -EINVAL;
769 }
770
771 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
772
773 /* Save the context */
774 timer->context.tcrr = value;
775 return 0;
776}
777
778int omap_dm_timers_active(void)
779{
780 struct omap_dm_timer *timer;
781
782 list_for_each_entry(timer, &omap_timer_list, node) {
783 if (!timer->reserved)
784 continue;
785
786 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
787 OMAP_TIMER_CTRL_ST) {
788 return 1;
789 }
790 }
791 return 0;
792}
793
794static const struct of_device_id omap_timer_match[];
795
796/**
797 * omap_dm_timer_probe - probe function called for every registered device
798 * @pdev: pointer to current timer platform device
799 *
800 * Called by driver framework at the end of device registration for all
801 * timer devices.
802 */
803static int omap_dm_timer_probe(struct platform_device *pdev)
804{
805 unsigned long flags;
806 struct omap_dm_timer *timer;
807 struct resource *mem, *irq;
808 struct device *dev = &pdev->dev;
809 const struct dmtimer_platform_data *pdata;
810 int ret;
811
812 pdata = of_device_get_match_data(dev);
813 if (!pdata)
814 pdata = dev_get_platdata(dev);
815 else
816 dev->platform_data = (void *)pdata;
817
818 if (!pdata) {
819 dev_err(dev, "%s: no platform data.\n", __func__);
820 return -ENODEV;
821 }
822
823 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
824 if (unlikely(!irq)) {
825 dev_err(dev, "%s: no IRQ resource.\n", __func__);
826 return -ENODEV;
827 }
828
829 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
830 if (unlikely(!mem)) {
831 dev_err(dev, "%s: no memory resource.\n", __func__);
832 return -ENODEV;
833 }
834
835 timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
836 if (!timer)
837 return -ENOMEM;
838
839 timer->fclk = ERR_PTR(-ENODEV);
840 timer->io_base = devm_ioremap_resource(dev, mem);
841 if (IS_ERR(timer->io_base))
842 return PTR_ERR(timer->io_base);
843
844 if (dev->of_node) {
845 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
846 timer->capability |= OMAP_TIMER_ALWON;
847 if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
848 timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
849 if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
850 timer->capability |= OMAP_TIMER_HAS_PWM;
851 if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
852 timer->capability |= OMAP_TIMER_SECURE;
853 } else {
854 timer->id = pdev->id;
855 timer->capability = pdata->timer_capability;
856 timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
857 timer->get_context_loss_count = pdata->get_context_loss_count;
858 }
859
860 if (pdata)
861 timer->errata = pdata->timer_errata;
862
863 timer->irq = irq->start;
864 timer->pdev = pdev;
865
866 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868
869 if (!timer->reserved) {
870 ret = pm_runtime_get_sync(dev);
871 if (ret < 0) {
872 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
873 __func__);
874 goto err_get_sync;
875 }
876 __omap_dm_timer_init_regs(timer);
877 pm_runtime_put(dev);
878 }
879
880 /* add the timer element to the list */
881 spin_lock_irqsave(&dm_timer_lock, flags);
882 list_add_tail(&timer->node, &omap_timer_list);
883 spin_unlock_irqrestore(&dm_timer_lock, flags);
884
885 dev_dbg(dev, "Device Probed.\n");
886
887 return 0;
888
889err_get_sync:
890 pm_runtime_put_noidle(dev);
891 pm_runtime_disable(dev);
892 return ret;
893}
894
895/**
896 * omap_dm_timer_remove - cleanup a registered timer device
897 * @pdev: pointer to current timer platform device
898 *
899 * Called by driver framework whenever a timer device is unregistered.
900 * In addition to freeing platform resources it also deletes the timer
901 * entry from the local list.
902 */
903static int omap_dm_timer_remove(struct platform_device *pdev)
904{
905 struct omap_dm_timer *timer;
906 unsigned long flags;
907 int ret = -EINVAL;
908
909 spin_lock_irqsave(&dm_timer_lock, flags);
910 list_for_each_entry(timer, &omap_timer_list, node)
911 if (!strcmp(dev_name(&timer->pdev->dev),
912 dev_name(&pdev->dev))) {
913 list_del(&timer->node);
914 ret = 0;
915 break;
916 }
917 spin_unlock_irqrestore(&dm_timer_lock, flags);
918
919 pm_runtime_disable(&pdev->dev);
920
921 return ret;
922}
923
924const static struct omap_dm_timer_ops dmtimer_ops = {
925 .request_by_node = omap_dm_timer_request_by_node,
926 .request_specific = omap_dm_timer_request_specific,
927 .request = omap_dm_timer_request,
928 .set_source = omap_dm_timer_set_source,
929 .get_irq = omap_dm_timer_get_irq,
930 .set_int_enable = omap_dm_timer_set_int_enable,
931 .set_int_disable = omap_dm_timer_set_int_disable,
932 .free = omap_dm_timer_free,
933 .enable = omap_dm_timer_enable,
934 .disable = omap_dm_timer_disable,
935 .get_fclk = omap_dm_timer_get_fclk,
936 .start = omap_dm_timer_start,
937 .stop = omap_dm_timer_stop,
938 .set_load = omap_dm_timer_set_load,
939 .set_match = omap_dm_timer_set_match,
940 .set_pwm = omap_dm_timer_set_pwm,
941 .set_prescaler = omap_dm_timer_set_prescaler,
942 .read_counter = omap_dm_timer_read_counter,
943 .write_counter = omap_dm_timer_write_counter,
944 .read_status = omap_dm_timer_read_status,
945 .write_status = omap_dm_timer_write_status,
946};
947
948static const struct dmtimer_platform_data omap3plus_pdata = {
949 .timer_errata = OMAP_TIMER_ERRATA_I103_I767,
950 .timer_ops = &dmtimer_ops,
951};
952
953static const struct of_device_id omap_timer_match[] = {
954 {
955 .compatible = "ti,omap2420-timer",
956 },
957 {
958 .compatible = "ti,omap3430-timer",
959 .data = &omap3plus_pdata,
960 },
961 {
962 .compatible = "ti,omap4430-timer",
963 .data = &omap3plus_pdata,
964 },
965 {
966 .compatible = "ti,omap5430-timer",
967 .data = &omap3plus_pdata,
968 },
969 {
970 .compatible = "ti,am335x-timer",
971 .data = &omap3plus_pdata,
972 },
973 {
974 .compatible = "ti,am335x-timer-1ms",
975 .data = &omap3plus_pdata,
976 },
977 {
978 .compatible = "ti,dm816-timer",
979 .data = &omap3plus_pdata,
980 },
981 {},
982};
983MODULE_DEVICE_TABLE(of, omap_timer_match);
984
985static struct platform_driver omap_dm_timer_driver = {
986 .probe = omap_dm_timer_probe,
987 .remove = omap_dm_timer_remove,
988 .driver = {
989 .name = "omap_timer",
990 .of_match_table = of_match_ptr(omap_timer_match),
991 },
992};
993
994early_platform_init("earlytimer", &omap_dm_timer_driver);
995module_platform_driver(omap_dm_timer_driver);
996
997MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
998MODULE_LICENSE("GPL");
999MODULE_ALIAS("platform:" DRIVER_NAME);
1000MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 736ac887303c..605ec8cce67b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -313,17 +313,6 @@ config MTD_NAND_ATMEL
313 Enables support for NAND Flash / Smart Media Card interface 313 Enables support for NAND Flash / Smart Media Card interface
314 on Atmel AT91 processors. 314 on Atmel AT91 processors.
315 315
316config MTD_NAND_PXA3xx
317 tristate "NAND support on PXA3xx and Armada 370/XP"
318 depends on !MTD_NAND_MARVELL
319 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
320 help
321
322 This enables the driver for the NAND flash device found on
323 PXA3xx processors (NFCv1) and also on 32-bit Armada
324 platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
325 platforms (7K, 8K) (NFCv2).
326
327config MTD_NAND_MARVELL 316config MTD_NAND_MARVELL
328 tristate "NAND controller support on Marvell boards" 317 tristate "NAND controller support on Marvell boards"
329 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ 318 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 921634ba400c..c882d5ef192a 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -31,7 +31,6 @@ omap2_nand-objs := omap2.o
31obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o 31obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
32obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o 32obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
33obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 33obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
34obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
35obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o 34obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
36obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o 35obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
37obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 36obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
index 2196f2a233d6..03805f9669da 100644
--- a/drivers/mtd/nand/marvell_nand.c
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -2520,8 +2520,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2520 2520
2521 if (pdata) 2521 if (pdata)
2522 /* Legacy bindings support only one chip */ 2522 /* Legacy bindings support only one chip */
2523 ret = mtd_device_register(mtd, pdata->parts[0], 2523 ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
2524 pdata->nr_parts[0]);
2525 else 2524 else
2526 ret = mtd_device_register(mtd, NULL, 0); 2525 ret = mtd_device_register(mtd, NULL, 0);
2527 if (ret) { 2526 if (ret) {
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
deleted file mode 100644
index d1979c7dbe7e..000000000000
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ /dev/null
@@ -1,2105 +0,0 @@
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h>
20#include <linux/dma/pxa-dma.h>
21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/rawnand.h>
25#include <linux/mtd/partitions.h>
26#include <linux/io.h>
27#include <linux/iopoll.h>
28#include <linux/irq.h>
29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/platform_data/mtd-nand-pxa3xx.h>
33#include <linux/mfd/syscon.h>
34#include <linux/regmap.h>
35
36#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
37#define NAND_STOP_DELAY msecs_to_jiffies(40)
38#define PAGE_CHUNK_SIZE (2048)
39
40/*
41 * Define a buffer size for the initial command that detects the flash device:
42 * STATUS, READID and PARAM.
43 * ONFI param page is 256 bytes, and there are three redundant copies
44 * to be read. JEDEC param page is 512 bytes, and there are also three
45 * redundant copies to be read.
46 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
47 */
48#define INIT_BUFFER_SIZE 2048
49
50/* System control register and bit to enable NAND on some SoCs */
51#define GENCONF_SOC_DEVICE_MUX 0x208
52#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
53
54/* registers and bit definitions */
55#define NDCR (0x00) /* Control register */
56#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58#define NDSR (0x14) /* Status Register */
59#define NDPCR (0x18) /* Page Count Register */
60#define NDBDR0 (0x1C) /* Bad Block Register 0 */
61#define NDBDR1 (0x20) /* Bad Block Register 1 */
62#define NDECCCTRL (0x28) /* ECC control */
63#define NDDB (0x40) /* Data Buffer */
64#define NDCB0 (0x48) /* Command Buffer0 */
65#define NDCB1 (0x4C) /* Command Buffer1 */
66#define NDCB2 (0x50) /* Command Buffer2 */
67
68#define NDCR_SPARE_EN (0x1 << 31)
69#define NDCR_ECC_EN (0x1 << 30)
70#define NDCR_DMA_EN (0x1 << 29)
71#define NDCR_ND_RUN (0x1 << 28)
72#define NDCR_DWIDTH_C (0x1 << 27)
73#define NDCR_DWIDTH_M (0x1 << 26)
74#define NDCR_PAGE_SZ (0x1 << 24)
75#define NDCR_NCSX (0x1 << 23)
76#define NDCR_ND_MODE (0x3 << 21)
77#define NDCR_NAND_MODE (0x0)
78#define NDCR_CLR_PG_CNT (0x1 << 20)
79#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
81#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84#define NDCR_RA_START (0x1 << 15)
85#define NDCR_PG_PER_BLK (0x1 << 14)
86#define NDCR_ND_ARB_EN (0x1 << 12)
87#define NDCR_INT_MASK (0xFFF)
88
89#define NDSR_MASK (0xfff)
90#define NDSR_ERR_CNT_OFF (16)
91#define NDSR_ERR_CNT_MASK (0x1f)
92#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93#define NDSR_RDY (0x1 << 12)
94#define NDSR_FLASH_RDY (0x1 << 11)
95#define NDSR_CS0_PAGED (0x1 << 10)
96#define NDSR_CS1_PAGED (0x1 << 9)
97#define NDSR_CS0_CMDD (0x1 << 8)
98#define NDSR_CS1_CMDD (0x1 << 7)
99#define NDSR_CS0_BBD (0x1 << 6)
100#define NDSR_CS1_BBD (0x1 << 5)
101#define NDSR_UNCORERR (0x1 << 4)
102#define NDSR_CORERR (0x1 << 3)
103#define NDSR_WRDREQ (0x1 << 2)
104#define NDSR_RDDREQ (0x1 << 1)
105#define NDSR_WRCMDREQ (0x1)
106
107#define NDCB0_LEN_OVRD (0x1 << 28)
108#define NDCB0_ST_ROW_EN (0x1 << 26)
109#define NDCB0_AUTO_RS (0x1 << 25)
110#define NDCB0_CSEL (0x1 << 24)
111#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115#define NDCB0_NC (0x1 << 20)
116#define NDCB0_DBC (0x1 << 19)
117#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119#define NDCB0_CMD2_MASK (0xff << 8)
120#define NDCB0_CMD1_MASK (0xff)
121#define NDCB0_ADDR_CYC_SHIFT (16)
122
123#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125#define EXT_CMD_TYPE_READ 4 /* Read */
126#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127#define EXT_CMD_TYPE_FINAL 3 /* Final command */
128#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
130
131/*
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 */
136#define READ_ID_BYTES 7
137
138/* macros for registers read/write */
139#define nand_writel(info, off, val) \
140 do { \
141 dev_vdbg(&info->pdev->dev, \
142 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
143 __func__, __LINE__, (val), (off)); \
144 writel_relaxed((val), (info)->mmio_base + (off)); \
145 } while (0)
146
147#define nand_readl(info, off) \
148 ({ \
149 unsigned int _v; \
150 _v = readl_relaxed((info)->mmio_base + (off)); \
151 dev_vdbg(&info->pdev->dev, \
152 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
153 __func__, __LINE__, (off), _v); \
154 _v; \
155 })
156
157/* error code and state */
158enum {
159 ERR_NONE = 0,
160 ERR_DMABUSERR = -1,
161 ERR_SENDCMD = -2,
162 ERR_UNCORERR = -3,
163 ERR_BBERR = -4,
164 ERR_CORERR = -5,
165};
166
167enum {
168 STATE_IDLE = 0,
169 STATE_PREPARED,
170 STATE_CMD_HANDLE,
171 STATE_DMA_READING,
172 STATE_DMA_WRITING,
173 STATE_DMA_DONE,
174 STATE_PIO_READING,
175 STATE_PIO_WRITING,
176 STATE_CMD_DONE,
177 STATE_READY,
178};
179
180enum pxa3xx_nand_variant {
181 PXA3XX_NAND_VARIANT_PXA,
182 PXA3XX_NAND_VARIANT_ARMADA370,
183 PXA3XX_NAND_VARIANT_ARMADA_8K,
184};
185
186struct pxa3xx_nand_host {
187 struct nand_chip chip;
188 void *info_data;
189
190 /* page size of attached chip */
191 int use_ecc;
192 int cs;
193
194 /* calculated from pxa3xx_nand_flash data */
195 unsigned int col_addr_cycles;
196 unsigned int row_addr_cycles;
197};
198
199struct pxa3xx_nand_info {
200 struct nand_hw_control controller;
201 struct platform_device *pdev;
202
203 struct clk *clk;
204 void __iomem *mmio_base;
205 unsigned long mmio_phys;
206 struct completion cmd_complete, dev_ready;
207
208 unsigned int buf_start;
209 unsigned int buf_count;
210 unsigned int buf_size;
211 unsigned int data_buff_pos;
212 unsigned int oob_buff_pos;
213
214 /* DMA information */
215 struct scatterlist sg;
216 enum dma_data_direction dma_dir;
217 struct dma_chan *dma_chan;
218 dma_cookie_t dma_cookie;
219 int drcmr_dat;
220
221 unsigned char *data_buff;
222 unsigned char *oob_buff;
223 dma_addr_t data_buff_phys;
224 int data_dma_ch;
225
226 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
227 unsigned int state;
228
229 /*
230 * This driver supports NFCv1 (as found in PXA SoC)
231 * and NFCv2 (as found in Armada 370/XP SoC).
232 */
233 enum pxa3xx_nand_variant variant;
234
235 int cs;
236 int use_ecc; /* use HW ECC ? */
237 int ecc_bch; /* using BCH ECC? */
238 int use_dma; /* use DMA ? */
239 int use_spare; /* use spare ? */
240 int need_wait;
241
242 /* Amount of real data per full chunk */
243 unsigned int chunk_size;
244
245 /* Amount of spare data per full chunk */
246 unsigned int spare_size;
247
248 /* Number of full chunks (i.e chunk_size + spare_size) */
249 unsigned int nfullchunks;
250
251 /*
252 * Total number of chunks. If equal to nfullchunks, then there
253 * are only full chunks. Otherwise, there is one last chunk of
254 * size (last_chunk_size + last_spare_size)
255 */
256 unsigned int ntotalchunks;
257
258 /* Amount of real data in the last chunk */
259 unsigned int last_chunk_size;
260
261 /* Amount of spare data in the last chunk */
262 unsigned int last_spare_size;
263
264 unsigned int ecc_size;
265 unsigned int ecc_err_cnt;
266 unsigned int max_bitflips;
267 int retcode;
268
269 /*
270 * Variables only valid during command
271 * execution. step_chunk_size and step_spare_size is the
272 * amount of real data and spare data in the current
273 * chunk. cur_chunk is the current chunk being
274 * read/programmed.
275 */
276 unsigned int step_chunk_size;
277 unsigned int step_spare_size;
278 unsigned int cur_chunk;
279
280 /* cached register value */
281 uint32_t reg_ndcr;
282 uint32_t ndtr0cs0;
283 uint32_t ndtr1cs0;
284
285 /* generated NDCBx register values */
286 uint32_t ndcb0;
287 uint32_t ndcb1;
288 uint32_t ndcb2;
289 uint32_t ndcb3;
290};
291
292static bool use_dma = 1;
293module_param(use_dma, bool, 0444);
294MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
295
296struct pxa3xx_nand_timing {
297 unsigned int tCH; /* Enable signal hold time */
298 unsigned int tCS; /* Enable signal setup time */
299 unsigned int tWH; /* ND_nWE high duration */
300 unsigned int tWP; /* ND_nWE pulse time */
301 unsigned int tRH; /* ND_nRE high duration */
302 unsigned int tRP; /* ND_nRE pulse width */
303 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
304 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
305 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
306};
307
308struct pxa3xx_nand_flash {
309 uint32_t chip_id;
310 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
311 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
312 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
313};
314
315static struct pxa3xx_nand_timing timing[] = {
316 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
317 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
318 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
319 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
320};
321
322static struct pxa3xx_nand_flash builtin_flash_types[] = {
323 { 0x46ec, 16, 16, &timing[1] },
324 { 0xdaec, 8, 8, &timing[1] },
325 { 0xd7ec, 8, 8, &timing[1] },
326 { 0xa12c, 8, 8, &timing[2] },
327 { 0xb12c, 16, 16, &timing[2] },
328 { 0xdc2c, 8, 8, &timing[2] },
329 { 0xcc2c, 16, 16, &timing[2] },
330 { 0xba20, 16, 16, &timing[3] },
331};
332
333static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
334 struct mtd_oob_region *oobregion)
335{
336 struct nand_chip *chip = mtd_to_nand(mtd);
337 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
338 struct pxa3xx_nand_info *info = host->info_data;
339 int nchunks = mtd->writesize / info->chunk_size;
340
341 if (section >= nchunks)
342 return -ERANGE;
343
344 oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
345 info->spare_size;
346 oobregion->length = info->ecc_size;
347
348 return 0;
349}
350
351static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
352 struct mtd_oob_region *oobregion)
353{
354 struct nand_chip *chip = mtd_to_nand(mtd);
355 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
356 struct pxa3xx_nand_info *info = host->info_data;
357 int nchunks = mtd->writesize / info->chunk_size;
358
359 if (section >= nchunks)
360 return -ERANGE;
361
362 if (!info->spare_size)
363 return 0;
364
365 oobregion->offset = section * (info->ecc_size + info->spare_size);
366 oobregion->length = info->spare_size;
367 if (!section) {
368 /*
369 * Bootrom looks in bytes 0 & 5 for bad blocks for the
370 * 4KB page / 4bit BCH combination.
371 */
372 if (mtd->writesize == 4096 && info->chunk_size == 2048) {
373 oobregion->offset += 6;
374 oobregion->length -= 6;
375 } else {
376 oobregion->offset += 2;
377 oobregion->length -= 2;
378 }
379 }
380
381 return 0;
382}
383
384static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
385 .ecc = pxa3xx_ooblayout_ecc,
386 .free = pxa3xx_ooblayout_free,
387};
388
389static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
390static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
391
392static struct nand_bbt_descr bbt_main_descr = {
393 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
394 | NAND_BBT_2BIT | NAND_BBT_VERSION,
395 .offs = 8,
396 .len = 6,
397 .veroffs = 14,
398 .maxblocks = 8, /* Last 8 blocks in each chip */
399 .pattern = bbt_pattern
400};
401
402static struct nand_bbt_descr bbt_mirror_descr = {
403 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
404 | NAND_BBT_2BIT | NAND_BBT_VERSION,
405 .offs = 8,
406 .len = 6,
407 .veroffs = 14,
408 .maxblocks = 8, /* Last 8 blocks in each chip */
409 .pattern = bbt_mirror_pattern
410};
411
412#define NDTR0_tCH(c) (min((c), 7) << 19)
413#define NDTR0_tCS(c) (min((c), 7) << 16)
414#define NDTR0_tWH(c) (min((c), 7) << 11)
415#define NDTR0_tWP(c) (min((c), 7) << 8)
416#define NDTR0_tRH(c) (min((c), 7) << 3)
417#define NDTR0_tRP(c) (min((c), 7) << 0)
418
419#define NDTR1_tR(c) (min((c), 65535) << 16)
420#define NDTR1_tWHR(c) (min((c), 15) << 4)
421#define NDTR1_tAR(c) (min((c), 15) << 0)
422
423/* convert nano-seconds to nand flash controller clock cycles */
424#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
425
426static const struct of_device_id pxa3xx_nand_dt_ids[] = {
427 {
428 .compatible = "marvell,pxa3xx-nand",
429 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
430 },
431 {
432 .compatible = "marvell,armada370-nand",
433 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
434 },
435 {
436 .compatible = "marvell,armada-8k-nand",
437 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
438 },
439 {}
440};
441MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
442
443static enum pxa3xx_nand_variant
444pxa3xx_nand_get_variant(struct platform_device *pdev)
445{
446 const struct of_device_id *of_id =
447 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
448 if (!of_id)
449 return PXA3XX_NAND_VARIANT_PXA;
450 return (enum pxa3xx_nand_variant)of_id->data;
451}
452
453static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
454 const struct pxa3xx_nand_timing *t)
455{
456 struct pxa3xx_nand_info *info = host->info_data;
457 unsigned long nand_clk = clk_get_rate(info->clk);
458 uint32_t ndtr0, ndtr1;
459
460 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
461 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
462 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
463 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
464 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
465 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
466
467 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
468 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
469 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
470
471 info->ndtr0cs0 = ndtr0;
472 info->ndtr1cs0 = ndtr1;
473 nand_writel(info, NDTR0CS0, ndtr0);
474 nand_writel(info, NDTR1CS0, ndtr1);
475}
476
477static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
478 const struct nand_sdr_timings *t)
479{
480 struct pxa3xx_nand_info *info = host->info_data;
481 struct nand_chip *chip = &host->chip;
482 unsigned long nand_clk = clk_get_rate(info->clk);
483 uint32_t ndtr0, ndtr1;
484
485 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
486 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
487 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
488 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
489 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
490 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
491 u32 tR = chip->chip_delay * 1000;
492 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
493 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
494
495 /* fallback to a default value if tR = 0 */
496 if (!tR)
497 tR = 20000;
498
499 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
500 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
501 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
502 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
503 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
504 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
505
506 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
507 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
508 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
509
510 info->ndtr0cs0 = ndtr0;
511 info->ndtr1cs0 = ndtr1;
512 nand_writel(info, NDTR0CS0, ndtr0);
513 nand_writel(info, NDTR1CS0, ndtr1);
514}
515
516static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
517 unsigned int *flash_width,
518 unsigned int *dfc_width)
519{
520 struct nand_chip *chip = &host->chip;
521 struct pxa3xx_nand_info *info = host->info_data;
522 const struct pxa3xx_nand_flash *f = NULL;
523 int i, id, ntypes;
524 u8 idbuf[2];
525
526 ntypes = ARRAY_SIZE(builtin_flash_types);
527
528 nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
529 id = idbuf[0] | (idbuf[1] << 8);
530
531 for (i = 0; i < ntypes; i++) {
532 f = &builtin_flash_types[i];
533
534 if (f->chip_id == id)
535 break;
536 }
537
538 if (i == ntypes) {
539 dev_err(&info->pdev->dev, "Error: timings not found\n");
540 return -EINVAL;
541 }
542
543 pxa3xx_nand_set_timing(host, f->timing);
544
545 *flash_width = f->flash_width;
546 *dfc_width = f->dfc_width;
547
548 return 0;
549}
550
551static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
552 int mode)
553{
554 const struct nand_sdr_timings *timings;
555
556 mode = fls(mode) - 1;
557 if (mode < 0)
558 mode = 0;
559
560 timings = onfi_async_timing_mode_to_sdr_timings(mode);
561 if (IS_ERR(timings))
562 return PTR_ERR(timings);
563
564 pxa3xx_nand_set_sdr_timing(host, timings);
565
566 return 0;
567}
568
569static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
570{
571 struct nand_chip *chip = &host->chip;
572 struct pxa3xx_nand_info *info = host->info_data;
573 unsigned int flash_width = 0, dfc_width = 0;
574 int mode, err;
575
576 mode = onfi_get_async_timing_mode(chip);
577 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
578 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
579 &dfc_width);
580 if (err)
581 return err;
582
583 if (flash_width == 16) {
584 info->reg_ndcr |= NDCR_DWIDTH_M;
585 chip->options |= NAND_BUSWIDTH_16;
586 }
587
588 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
589 } else {
590 err = pxa3xx_nand_init_timings_onfi(host, mode);
591 if (err)
592 return err;
593 }
594
595 return 0;
596}
597
598/**
599 * NOTE: it is a must to set ND_RUN firstly, then write
600 * command buffer, otherwise, it does not work.
601 * We enable all the interrupt at the same time, and
602 * let pxa3xx_nand_irq to handle all logic.
603 */
604static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
605{
606 uint32_t ndcr;
607
608 ndcr = info->reg_ndcr;
609
610 if (info->use_ecc) {
611 ndcr |= NDCR_ECC_EN;
612 if (info->ecc_bch)
613 nand_writel(info, NDECCCTRL, 0x1);
614 } else {
615 ndcr &= ~NDCR_ECC_EN;
616 if (info->ecc_bch)
617 nand_writel(info, NDECCCTRL, 0x0);
618 }
619
620 if (info->use_dma)
621 ndcr |= NDCR_DMA_EN;
622 else
623 ndcr &= ~NDCR_DMA_EN;
624
625 if (info->use_spare)
626 ndcr |= NDCR_SPARE_EN;
627 else
628 ndcr &= ~NDCR_SPARE_EN;
629
630 ndcr |= NDCR_ND_RUN;
631
632 /* clear status bits and run */
633 nand_writel(info, NDSR, NDSR_MASK);
634 nand_writel(info, NDCR, 0);
635 nand_writel(info, NDCR, ndcr);
636}
637
638static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
639{
640 uint32_t ndcr;
641 int timeout = NAND_STOP_DELAY;
642
643 /* wait RUN bit in NDCR become 0 */
644 ndcr = nand_readl(info, NDCR);
645 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
646 ndcr = nand_readl(info, NDCR);
647 udelay(1);
648 }
649
650 if (timeout <= 0) {
651 ndcr &= ~NDCR_ND_RUN;
652 nand_writel(info, NDCR, ndcr);
653 }
654 if (info->dma_chan)
655 dmaengine_terminate_all(info->dma_chan);
656
657 /* clear status bits */
658 nand_writel(info, NDSR, NDSR_MASK);
659}
660
661static void __maybe_unused
662enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
663{
664 uint32_t ndcr;
665
666 ndcr = nand_readl(info, NDCR);
667 nand_writel(info, NDCR, ndcr & ~int_mask);
668}
669
670static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
671{
672 uint32_t ndcr;
673
674 ndcr = nand_readl(info, NDCR);
675 nand_writel(info, NDCR, ndcr | int_mask);
676}
677
678static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
679{
680 if (info->ecc_bch) {
681 u32 val;
682 int ret;
683
684 /*
685 * According to the datasheet, when reading from NDDB
686 * with BCH enabled, after each 32 bytes reads, we
687 * have to make sure that the NDSR.RDDREQ bit is set.
688 *
689 * Drain the FIFO 8 32 bits reads at a time, and skip
690 * the polling on the last read.
691 */
692 while (len > 8) {
693 ioread32_rep(info->mmio_base + NDDB, data, 8);
694
695 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
696 val & NDSR_RDDREQ, 1000, 5000);
697 if (ret) {
698 dev_err(&info->pdev->dev,
699 "Timeout on RDDREQ while draining the FIFO\n");
700 return;
701 }
702
703 data += 32;
704 len -= 8;
705 }
706 }
707
708 ioread32_rep(info->mmio_base + NDDB, data, len);
709}
710
711static void handle_data_pio(struct pxa3xx_nand_info *info)
712{
713 switch (info->state) {
714 case STATE_PIO_WRITING:
715 if (info->step_chunk_size)
716 writesl(info->mmio_base + NDDB,
717 info->data_buff + info->data_buff_pos,
718 DIV_ROUND_UP(info->step_chunk_size, 4));
719
720 if (info->step_spare_size)
721 writesl(info->mmio_base + NDDB,
722 info->oob_buff + info->oob_buff_pos,
723 DIV_ROUND_UP(info->step_spare_size, 4));
724 break;
725 case STATE_PIO_READING:
726 if (info->step_chunk_size)
727 drain_fifo(info,
728 info->data_buff + info->data_buff_pos,
729 DIV_ROUND_UP(info->step_chunk_size, 4));
730
731 if (info->step_spare_size)
732 drain_fifo(info,
733 info->oob_buff + info->oob_buff_pos,
734 DIV_ROUND_UP(info->step_spare_size, 4));
735 break;
736 default:
737 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
738 info->state);
739 BUG();
740 }
741
742 /* Update buffer pointers for multi-page read/write */
743 info->data_buff_pos += info->step_chunk_size;
744 info->oob_buff_pos += info->step_spare_size;
745}
746
747static void pxa3xx_nand_data_dma_irq(void *data)
748{
749 struct pxa3xx_nand_info *info = data;
750 struct dma_tx_state state;
751 enum dma_status status;
752
753 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
754 if (likely(status == DMA_COMPLETE)) {
755 info->state = STATE_DMA_DONE;
756 } else {
757 dev_err(&info->pdev->dev, "DMA error on data channel\n");
758 info->retcode = ERR_DMABUSERR;
759 }
760 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
761
762 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
763 enable_int(info, NDCR_INT_MASK);
764}
765
766static void start_data_dma(struct pxa3xx_nand_info *info)
767{
768 enum dma_transfer_direction direction;
769 struct dma_async_tx_descriptor *tx;
770
771 switch (info->state) {
772 case STATE_DMA_WRITING:
773 info->dma_dir = DMA_TO_DEVICE;
774 direction = DMA_MEM_TO_DEV;
775 break;
776 case STATE_DMA_READING:
777 info->dma_dir = DMA_FROM_DEVICE;
778 direction = DMA_DEV_TO_MEM;
779 break;
780 default:
781 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
782 info->state);
783 BUG();
784 }
785 info->sg.length = info->chunk_size;
786 if (info->use_spare)
787 info->sg.length += info->spare_size + info->ecc_size;
788 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
789
790 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
791 DMA_PREP_INTERRUPT);
792 if (!tx) {
793 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
794 return;
795 }
796 tx->callback = pxa3xx_nand_data_dma_irq;
797 tx->callback_param = info;
798 info->dma_cookie = dmaengine_submit(tx);
799 dma_async_issue_pending(info->dma_chan);
800 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
801 __func__, direction, info->dma_cookie, info->sg.length);
802}
803
804static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
805{
806 struct pxa3xx_nand_info *info = data;
807
808 handle_data_pio(info);
809
810 info->state = STATE_CMD_DONE;
811 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
812
813 return IRQ_HANDLED;
814}
815
816static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
817{
818 struct pxa3xx_nand_info *info = devid;
819 unsigned int status, is_completed = 0, is_ready = 0;
820 unsigned int ready, cmd_done;
821 irqreturn_t ret = IRQ_HANDLED;
822
823 if (info->cs == 0) {
824 ready = NDSR_FLASH_RDY;
825 cmd_done = NDSR_CS0_CMDD;
826 } else {
827 ready = NDSR_RDY;
828 cmd_done = NDSR_CS1_CMDD;
829 }
830
831 status = nand_readl(info, NDSR);
832
833 if (status & NDSR_UNCORERR)
834 info->retcode = ERR_UNCORERR;
835 if (status & NDSR_CORERR) {
836 info->retcode = ERR_CORERR;
837 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
838 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
839 info->ecc_bch)
840 info->ecc_err_cnt = NDSR_ERR_CNT(status);
841 else
842 info->ecc_err_cnt = 1;
843
844 /*
845 * Each chunk composing a page is corrected independently,
846 * and we need to store maximum number of corrected bitflips
847 * to return it to the MTD layer in ecc.read_page().
848 */
849 info->max_bitflips = max_t(unsigned int,
850 info->max_bitflips,
851 info->ecc_err_cnt);
852 }
853 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
854 /* whether use dma to transfer data */
855 if (info->use_dma) {
856 disable_int(info, NDCR_INT_MASK);
857 info->state = (status & NDSR_RDDREQ) ?
858 STATE_DMA_READING : STATE_DMA_WRITING;
859 start_data_dma(info);
860 goto NORMAL_IRQ_EXIT;
861 } else {
862 info->state = (status & NDSR_RDDREQ) ?
863 STATE_PIO_READING : STATE_PIO_WRITING;
864 ret = IRQ_WAKE_THREAD;
865 goto NORMAL_IRQ_EXIT;
866 }
867 }
868 if (status & cmd_done) {
869 info->state = STATE_CMD_DONE;
870 is_completed = 1;
871 }
872 if (status & ready) {
873 info->state = STATE_READY;
874 is_ready = 1;
875 }
876
877 /*
878 * Clear all status bit before issuing the next command, which
879 * can and will alter the status bits and will deserve a new
880 * interrupt on its own. This lets the controller exit the IRQ
881 */
882 nand_writel(info, NDSR, status);
883
884 if (status & NDSR_WRCMDREQ) {
885 status &= ~NDSR_WRCMDREQ;
886 info->state = STATE_CMD_HANDLE;
887
888 /*
889 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
890 * must be loaded by writing directly either 12 or 16
891 * bytes directly to NDCB0, four bytes at a time.
892 *
893 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
894 * but each NDCBx register can be read.
895 */
896 nand_writel(info, NDCB0, info->ndcb0);
897 nand_writel(info, NDCB0, info->ndcb1);
898 nand_writel(info, NDCB0, info->ndcb2);
899
900 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
901 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
902 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
903 nand_writel(info, NDCB0, info->ndcb3);
904 }
905
906 if (is_completed)
907 complete(&info->cmd_complete);
908 if (is_ready)
909 complete(&info->dev_ready);
910NORMAL_IRQ_EXIT:
911 return ret;
912}
913
914static inline int is_buf_blank(uint8_t *buf, size_t len)
915{
916 for (; len > 0; len--)
917 if (*buf++ != 0xff)
918 return 0;
919 return 1;
920}
921
922static void set_command_address(struct pxa3xx_nand_info *info,
923 unsigned int page_size, uint16_t column, int page_addr)
924{
925 /* small page addr setting */
926 if (page_size < PAGE_CHUNK_SIZE) {
927 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
928 | (column & 0xFF);
929
930 info->ndcb2 = 0;
931 } else {
932 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
933 | (column & 0xFFFF);
934
935 if (page_addr & 0xFF0000)
936 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
937 else
938 info->ndcb2 = 0;
939 }
940}
941
942static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
943{
944 struct pxa3xx_nand_host *host = info->host[info->cs];
945 struct mtd_info *mtd = nand_to_mtd(&host->chip);
946
947 /* reset data and oob column point to handle data */
948 info->buf_start = 0;
949 info->buf_count = 0;
950 info->data_buff_pos = 0;
951 info->oob_buff_pos = 0;
952 info->step_chunk_size = 0;
953 info->step_spare_size = 0;
954 info->cur_chunk = 0;
955 info->use_ecc = 0;
956 info->use_spare = 1;
957 info->retcode = ERR_NONE;
958 info->ecc_err_cnt = 0;
959 info->ndcb3 = 0;
960 info->need_wait = 0;
961
962 switch (command) {
963 case NAND_CMD_READ0:
964 case NAND_CMD_READOOB:
965 case NAND_CMD_PAGEPROG:
966 info->use_ecc = 1;
967 break;
968 case NAND_CMD_PARAM:
969 info->use_spare = 0;
970 break;
971 default:
972 info->ndcb1 = 0;
973 info->ndcb2 = 0;
974 break;
975 }
976
977 /*
978 * If we are about to issue a read command, or about to set
979 * the write address, then clean the data buffer.
980 */
981 if (command == NAND_CMD_READ0 ||
982 command == NAND_CMD_READOOB ||
983 command == NAND_CMD_SEQIN) {
984
985 info->buf_count = mtd->writesize + mtd->oobsize;
986 memset(info->data_buff, 0xFF, info->buf_count);
987 }
988
989}
990
991static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
992 int ext_cmd_type, uint16_t column, int page_addr)
993{
994 int addr_cycle, exec_cmd;
995 struct pxa3xx_nand_host *host;
996 struct mtd_info *mtd;
997
998 host = info->host[info->cs];
999 mtd = nand_to_mtd(&host->chip);
1000 addr_cycle = 0;
1001 exec_cmd = 1;
1002
1003 if (info->cs != 0)
1004 info->ndcb0 = NDCB0_CSEL;
1005 else
1006 info->ndcb0 = 0;
1007
1008 if (command == NAND_CMD_SEQIN)
1009 exec_cmd = 0;
1010
1011 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
1012 + host->col_addr_cycles);
1013
1014 switch (command) {
1015 case NAND_CMD_READOOB:
1016 case NAND_CMD_READ0:
1017 info->buf_start = column;
1018 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1019 | addr_cycle
1020 | NAND_CMD_READ0;
1021
1022 if (command == NAND_CMD_READOOB)
1023 info->buf_start += mtd->writesize;
1024
1025 if (info->cur_chunk < info->nfullchunks) {
1026 info->step_chunk_size = info->chunk_size;
1027 info->step_spare_size = info->spare_size;
1028 } else {
1029 info->step_chunk_size = info->last_chunk_size;
1030 info->step_spare_size = info->last_spare_size;
1031 }
1032
1033 /*
1034 * Multiple page read needs an 'extended command type' field,
1035 * which is either naked-read or last-read according to the
1036 * state.
1037 */
1038 if (mtd->writesize == PAGE_CHUNK_SIZE) {
1039 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1040 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1041 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1042 | NDCB0_LEN_OVRD
1043 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1044 info->ndcb3 = info->step_chunk_size +
1045 info->step_spare_size;
1046 }
1047
1048 set_command_address(info, mtd->writesize, column, page_addr);
1049 break;
1050
1051 case NAND_CMD_SEQIN:
1052
1053 info->buf_start = column;
1054 set_command_address(info, mtd->writesize, 0, page_addr);
1055
1056 /*
1057 * Multiple page programming needs to execute the initial
1058 * SEQIN command that sets the page address.
1059 */
1060 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1061 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1062 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1063 | addr_cycle
1064 | command;
1065 exec_cmd = 1;
1066 }
1067 break;
1068
1069 case NAND_CMD_PAGEPROG:
1070 if (is_buf_blank(info->data_buff,
1071 (mtd->writesize + mtd->oobsize))) {
1072 exec_cmd = 0;
1073 break;
1074 }
1075
1076 if (info->cur_chunk < info->nfullchunks) {
1077 info->step_chunk_size = info->chunk_size;
1078 info->step_spare_size = info->spare_size;
1079 } else {
1080 info->step_chunk_size = info->last_chunk_size;
1081 info->step_spare_size = info->last_spare_size;
1082 }
1083
1084 /* Second command setting for large pages */
1085 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1086 /*
1087 * Multiple page write uses the 'extended command'
1088 * field. This can be used to issue a command dispatch
1089 * or a naked-write depending on the current stage.
1090 */
1091 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1092 | NDCB0_LEN_OVRD
1093 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1094 info->ndcb3 = info->step_chunk_size +
1095 info->step_spare_size;
1096
1097 /*
1098 * This is the command dispatch that completes a chunked
1099 * page program operation.
1100 */
1101 if (info->cur_chunk == info->ntotalchunks) {
1102 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1103 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1104 | command;
1105 info->ndcb1 = 0;
1106 info->ndcb2 = 0;
1107 info->ndcb3 = 0;
1108 }
1109 } else {
1110 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1111 | NDCB0_AUTO_RS
1112 | NDCB0_ST_ROW_EN
1113 | NDCB0_DBC
1114 | (NAND_CMD_PAGEPROG << 8)
1115 | NAND_CMD_SEQIN
1116 | addr_cycle;
1117 }
1118 break;
1119
1120 case NAND_CMD_PARAM:
1121 info->buf_count = INIT_BUFFER_SIZE;
1122 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1123 | NDCB0_ADDR_CYC(1)
1124 | NDCB0_LEN_OVRD
1125 | command;
1126 info->ndcb1 = (column & 0xFF);
1127 info->ndcb3 = INIT_BUFFER_SIZE;
1128 info->step_chunk_size = INIT_BUFFER_SIZE;
1129 break;
1130
1131 case NAND_CMD_READID:
1132 info->buf_count = READ_ID_BYTES;
1133 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1134 | NDCB0_ADDR_CYC(1)
1135 | command;
1136 info->ndcb1 = (column & 0xFF);
1137
1138 info->step_chunk_size = 8;
1139 break;
1140 case NAND_CMD_STATUS:
1141 info->buf_count = 1;
1142 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1143 | NDCB0_ADDR_CYC(1)
1144 | command;
1145
1146 info->step_chunk_size = 8;
1147 break;
1148
1149 case NAND_CMD_ERASE1:
1150 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1151 | NDCB0_AUTO_RS
1152 | NDCB0_ADDR_CYC(3)
1153 | NDCB0_DBC
1154 | (NAND_CMD_ERASE2 << 8)
1155 | NAND_CMD_ERASE1;
1156 info->ndcb1 = page_addr;
1157 info->ndcb2 = 0;
1158
1159 break;
1160 case NAND_CMD_RESET:
1161 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1162 | command;
1163
1164 break;
1165
1166 case NAND_CMD_ERASE2:
1167 exec_cmd = 0;
1168 break;
1169
1170 default:
1171 exec_cmd = 0;
1172 dev_err(&info->pdev->dev, "non-supported command %x\n",
1173 command);
1174 break;
1175 }
1176
1177 return exec_cmd;
1178}
1179
1180static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1181 int column, int page_addr)
1182{
1183 struct nand_chip *chip = mtd_to_nand(mtd);
1184 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1185 struct pxa3xx_nand_info *info = host->info_data;
1186 int exec_cmd;
1187
1188 /*
1189 * if this is a x16 device ,then convert the input
1190 * "byte" address into a "word" address appropriate
1191 * for indexing a word-oriented device
1192 */
1193 if (info->reg_ndcr & NDCR_DWIDTH_M)
1194 column /= 2;
1195
1196 /*
1197 * There may be different NAND chip hooked to
1198 * different chip select, so check whether
1199 * chip select has been changed, if yes, reset the timing
1200 */
1201 if (info->cs != host->cs) {
1202 info->cs = host->cs;
1203 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1204 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1205 }
1206
1207 prepare_start_command(info, command);
1208
1209 info->state = STATE_PREPARED;
1210 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1211
1212 if (exec_cmd) {
1213 init_completion(&info->cmd_complete);
1214 init_completion(&info->dev_ready);
1215 info->need_wait = 1;
1216 pxa3xx_nand_start(info);
1217
1218 if (!wait_for_completion_timeout(&info->cmd_complete,
1219 CHIP_DELAY_TIMEOUT)) {
1220 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1221 /* Stop State Machine for next command cycle */
1222 pxa3xx_nand_stop(info);
1223 }
1224 }
1225 info->state = STATE_IDLE;
1226}
1227
1228static void nand_cmdfunc_extended(struct mtd_info *mtd,
1229 const unsigned command,
1230 int column, int page_addr)
1231{
1232 struct nand_chip *chip = mtd_to_nand(mtd);
1233 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1234 struct pxa3xx_nand_info *info = host->info_data;
1235 int exec_cmd, ext_cmd_type;
1236
1237 /*
1238 * if this is a x16 device then convert the input
1239 * "byte" address into a "word" address appropriate
1240 * for indexing a word-oriented device
1241 */
1242 if (info->reg_ndcr & NDCR_DWIDTH_M)
1243 column /= 2;
1244
1245 /*
1246 * There may be different NAND chip hooked to
1247 * different chip select, so check whether
1248 * chip select has been changed, if yes, reset the timing
1249 */
1250 if (info->cs != host->cs) {
1251 info->cs = host->cs;
1252 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1253 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1254 }
1255
1256 /* Select the extended command for the first command */
1257 switch (command) {
1258 case NAND_CMD_READ0:
1259 case NAND_CMD_READOOB:
1260 ext_cmd_type = EXT_CMD_TYPE_MONO;
1261 break;
1262 case NAND_CMD_SEQIN:
1263 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1264 break;
1265 case NAND_CMD_PAGEPROG:
1266 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1267 break;
1268 default:
1269 ext_cmd_type = 0;
1270 break;
1271 }
1272
1273 prepare_start_command(info, command);
1274
1275 /*
1276 * Prepare the "is ready" completion before starting a command
1277 * transaction sequence. If the command is not executed the
1278 * completion will be completed, see below.
1279 *
1280 * We can do that inside the loop because the command variable
1281 * is invariant and thus so is the exec_cmd.
1282 */
1283 info->need_wait = 1;
1284 init_completion(&info->dev_ready);
1285 do {
1286 info->state = STATE_PREPARED;
1287
1288 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1289 column, page_addr);
1290 if (!exec_cmd) {
1291 info->need_wait = 0;
1292 complete(&info->dev_ready);
1293 break;
1294 }
1295
1296 init_completion(&info->cmd_complete);
1297 pxa3xx_nand_start(info);
1298
1299 if (!wait_for_completion_timeout(&info->cmd_complete,
1300 CHIP_DELAY_TIMEOUT)) {
1301 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1302 /* Stop State Machine for next command cycle */
1303 pxa3xx_nand_stop(info);
1304 break;
1305 }
1306
1307 /* Only a few commands need several steps */
1308 if (command != NAND_CMD_PAGEPROG &&
1309 command != NAND_CMD_READ0 &&
1310 command != NAND_CMD_READOOB)
1311 break;
1312
1313 info->cur_chunk++;
1314
1315 /* Check if the sequence is complete */
1316 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1317 break;
1318
1319 /*
1320 * After a splitted program command sequence has issued
1321 * the command dispatch, the command sequence is complete.
1322 */
1323 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1324 command == NAND_CMD_PAGEPROG &&
1325 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1326 break;
1327
1328 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1329 /* Last read: issue a 'last naked read' */
1330 if (info->cur_chunk == info->ntotalchunks - 1)
1331 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1332 else
1333 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1334
1335 /*
1336 * If a splitted program command has no more data to transfer,
1337 * the command dispatch must be issued to complete.
1338 */
1339 } else if (command == NAND_CMD_PAGEPROG &&
1340 info->cur_chunk == info->ntotalchunks) {
1341 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1342 }
1343 } while (1);
1344
1345 info->state = STATE_IDLE;
1346}
1347
1348static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1349 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1350 int page)
1351{
1352 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
1353 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1354
1355 return nand_prog_page_end_op(chip);
1356}
1357
1358static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1359 struct nand_chip *chip, uint8_t *buf, int oob_required,
1360 int page)
1361{
1362 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1363 struct pxa3xx_nand_info *info = host->info_data;
1364
1365 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
1366 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1367
1368 if (info->retcode == ERR_CORERR && info->use_ecc) {
1369 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1370
1371 } else if (info->retcode == ERR_UNCORERR) {
1372 /*
1373 * for blank page (all 0xff), HW will calculate its ECC as
1374 * 0, which is different from the ECC information within
1375 * OOB, ignore such uncorrectable errors
1376 */
1377 if (is_buf_blank(buf, mtd->writesize))
1378 info->retcode = ERR_NONE;
1379 else
1380 mtd->ecc_stats.failed++;
1381 }
1382
1383 return info->max_bitflips;
1384}
1385
1386static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1387{
1388 struct nand_chip *chip = mtd_to_nand(mtd);
1389 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1390 struct pxa3xx_nand_info *info = host->info_data;
1391 char retval = 0xFF;
1392
1393 if (info->buf_start < info->buf_count)
1394 /* Has just send a new command? */
1395 retval = info->data_buff[info->buf_start++];
1396
1397 return retval;
1398}
1399
1400static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1401{
1402 struct nand_chip *chip = mtd_to_nand(mtd);
1403 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1404 struct pxa3xx_nand_info *info = host->info_data;
1405 u16 retval = 0xFFFF;
1406
1407 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1408 retval = *((u16 *)(info->data_buff+info->buf_start));
1409 info->buf_start += 2;
1410 }
1411 return retval;
1412}
1413
1414static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1415{
1416 struct nand_chip *chip = mtd_to_nand(mtd);
1417 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1418 struct pxa3xx_nand_info *info = host->info_data;
1419 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1420
1421 memcpy(buf, info->data_buff + info->buf_start, real_len);
1422 info->buf_start += real_len;
1423}
1424
1425static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1426 const uint8_t *buf, int len)
1427{
1428 struct nand_chip *chip = mtd_to_nand(mtd);
1429 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1430 struct pxa3xx_nand_info *info = host->info_data;
1431 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1432
1433 memcpy(info->data_buff + info->buf_start, buf, real_len);
1434 info->buf_start += real_len;
1435}
1436
1437static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1438{
1439 return;
1440}
1441
1442static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1443{
1444 struct nand_chip *chip = mtd_to_nand(mtd);
1445 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1446 struct pxa3xx_nand_info *info = host->info_data;
1447
1448 if (info->need_wait) {
1449 info->need_wait = 0;
1450 if (!wait_for_completion_timeout(&info->dev_ready,
1451 CHIP_DELAY_TIMEOUT)) {
1452 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1453 return NAND_STATUS_FAIL;
1454 }
1455 }
1456
1457 /* pxa3xx_nand_send_command has waited for command complete */
1458 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1459 if (info->retcode == ERR_NONE)
1460 return 0;
1461 else
1462 return NAND_STATUS_FAIL;
1463 }
1464
1465 return NAND_STATUS_READY;
1466}
1467
1468static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1469{
1470 struct pxa3xx_nand_host *host = info->host[info->cs];
1471 struct platform_device *pdev = info->pdev;
1472 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1473 const struct nand_sdr_timings *timings;
1474
1475 /* Configure default flash values */
1476 info->chunk_size = PAGE_CHUNK_SIZE;
1477 info->reg_ndcr = 0x0; /* enable all interrupts */
1478 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1479 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1480 info->reg_ndcr |= NDCR_SPARE_EN;
1481
1482 /* use the common timing to make a try */
1483 timings = onfi_async_timing_mode_to_sdr_timings(0);
1484 if (IS_ERR(timings))
1485 return PTR_ERR(timings);
1486
1487 pxa3xx_nand_set_sdr_timing(host, timings);
1488 return 0;
1489}
1490
1491static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1492{
1493 struct pxa3xx_nand_host *host = info->host[info->cs];
1494 struct nand_chip *chip = &host->chip;
1495 struct mtd_info *mtd = nand_to_mtd(chip);
1496
1497 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1498 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1499 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1500}
1501
1502static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1503{
1504 struct platform_device *pdev = info->pdev;
1505 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1506 uint32_t ndcr = nand_readl(info, NDCR);
1507
1508 /* Set an initial chunk size */
1509 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1510 info->reg_ndcr = ndcr &
1511 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1512 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1513 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1514 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1515}
1516
1517static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1518{
1519 struct platform_device *pdev = info->pdev;
1520 struct dma_slave_config config;
1521 dma_cap_mask_t mask;
1522 struct pxad_param param;
1523 int ret;
1524
1525 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1526 if (info->data_buff == NULL)
1527 return -ENOMEM;
1528 if (use_dma == 0)
1529 return 0;
1530
1531 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1532 if (ret)
1533 return ret;
1534
1535 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1536 dma_cap_zero(mask);
1537 dma_cap_set(DMA_SLAVE, mask);
1538 param.prio = PXAD_PRIO_LOWEST;
1539 param.drcmr = info->drcmr_dat;
1540 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1541 &param, &pdev->dev,
1542 "data");
1543 if (!info->dma_chan) {
1544 dev_err(&pdev->dev, "unable to request data dma channel\n");
1545 return -ENODEV;
1546 }
1547
1548 memset(&config, 0, sizeof(config));
1549 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1550 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1551 config.src_addr = info->mmio_phys + NDDB;
1552 config.dst_addr = info->mmio_phys + NDDB;
1553 config.src_maxburst = 32;
1554 config.dst_maxburst = 32;
1555 ret = dmaengine_slave_config(info->dma_chan, &config);
1556 if (ret < 0) {
1557 dev_err(&info->pdev->dev,
1558 "dma channel configuration failed: %d\n",
1559 ret);
1560 return ret;
1561 }
1562
1563 /*
1564 * Now that DMA buffers are allocated we turn on
1565 * DMA proper for I/O operations.
1566 */
1567 info->use_dma = 1;
1568 return 0;
1569}
1570
1571static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1572{
1573 if (info->use_dma) {
1574 dmaengine_terminate_all(info->dma_chan);
1575 dma_release_channel(info->dma_chan);
1576 }
1577 kfree(info->data_buff);
1578}
1579
1580static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1581 struct mtd_info *mtd,
1582 int strength, int ecc_stepsize, int page_size)
1583{
1584 struct nand_chip *chip = mtd_to_nand(mtd);
1585 struct nand_ecc_ctrl *ecc = &chip->ecc;
1586
1587 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1588 info->nfullchunks = 1;
1589 info->ntotalchunks = 1;
1590 info->chunk_size = 2048;
1591 info->spare_size = 40;
1592 info->ecc_size = 24;
1593 ecc->mode = NAND_ECC_HW;
1594 ecc->size = 512;
1595 ecc->strength = 1;
1596
1597 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1598 info->nfullchunks = 1;
1599 info->ntotalchunks = 1;
1600 info->chunk_size = 512;
1601 info->spare_size = 8;
1602 info->ecc_size = 8;
1603 ecc->mode = NAND_ECC_HW;
1604 ecc->size = 512;
1605 ecc->strength = 1;
1606
1607 /*
1608 * Required ECC: 4-bit correction per 512 bytes
1609 * Select: 16-bit correction per 2048 bytes
1610 */
1611 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1612 info->ecc_bch = 1;
1613 info->nfullchunks = 1;
1614 info->ntotalchunks = 1;
1615 info->chunk_size = 2048;
1616 info->spare_size = 32;
1617 info->ecc_size = 32;
1618 ecc->mode = NAND_ECC_HW;
1619 ecc->size = info->chunk_size;
1620 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1621 ecc->strength = 16;
1622
1623 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1624 info->ecc_bch = 1;
1625 info->nfullchunks = 2;
1626 info->ntotalchunks = 2;
1627 info->chunk_size = 2048;
1628 info->spare_size = 32;
1629 info->ecc_size = 32;
1630 ecc->mode = NAND_ECC_HW;
1631 ecc->size = info->chunk_size;
1632 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1633 ecc->strength = 16;
1634
1635 /*
1636 * Required ECC: 8-bit correction per 512 bytes
1637 * Select: 16-bit correction per 1024 bytes
1638 */
1639 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1640 info->ecc_bch = 1;
1641 info->nfullchunks = 4;
1642 info->ntotalchunks = 5;
1643 info->chunk_size = 1024;
1644 info->spare_size = 0;
1645 info->last_chunk_size = 0;
1646 info->last_spare_size = 64;
1647 info->ecc_size = 32;
1648 ecc->mode = NAND_ECC_HW;
1649 ecc->size = info->chunk_size;
1650 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1651 ecc->strength = 16;
1652 } else {
1653 dev_err(&info->pdev->dev,
1654 "ECC strength %d at page size %d is not supported\n",
1655 strength, page_size);
1656 return -ENODEV;
1657 }
1658
1659 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1660 ecc->strength, ecc->size);
1661 return 0;
1662}
1663
1664static int pxa3xx_nand_scan(struct mtd_info *mtd)
1665{
1666 struct nand_chip *chip = mtd_to_nand(mtd);
1667 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1668 struct pxa3xx_nand_info *info = host->info_data;
1669 struct platform_device *pdev = info->pdev;
1670 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1671 int ret;
1672 uint16_t ecc_strength, ecc_step;
1673
1674 if (pdata->keep_config) {
1675 pxa3xx_nand_detect_config(info);
1676 } else {
1677 ret = pxa3xx_nand_config_ident(info);
1678 if (ret)
1679 return ret;
1680 }
1681
1682 if (info->reg_ndcr & NDCR_DWIDTH_M)
1683 chip->options |= NAND_BUSWIDTH_16;
1684
1685 /* Device detection must be done with ECC disabled */
1686 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1687 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
1688 nand_writel(info, NDECCCTRL, 0x0);
1689
1690 if (pdata->flash_bbt)
1691 chip->bbt_options |= NAND_BBT_USE_FLASH;
1692
1693 chip->ecc.strength = pdata->ecc_strength;
1694 chip->ecc.size = pdata->ecc_step_size;
1695
1696 ret = nand_scan_ident(mtd, 1, NULL);
1697 if (ret)
1698 return ret;
1699
1700 if (!pdata->keep_config) {
1701 ret = pxa3xx_nand_init(host);
1702 if (ret) {
1703 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1704 ret);
1705 return ret;
1706 }
1707 }
1708
1709 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
1710 /*
1711 * We'll use a bad block table stored in-flash and don't
1712 * allow writing the bad block marker to the flash.
1713 */
1714 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
1715 chip->bbt_td = &bbt_main_descr;
1716 chip->bbt_md = &bbt_mirror_descr;
1717 }
1718
1719 /*
1720 * If the page size is bigger than the FIFO size, let's check
1721 * we are given the right variant and then switch to the extended
1722 * (aka splitted) command handling,
1723 */
1724 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1725 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1726 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1727 chip->cmdfunc = nand_cmdfunc_extended;
1728 } else {
1729 dev_err(&info->pdev->dev,
1730 "unsupported page size on this variant\n");
1731 return -ENODEV;
1732 }
1733 }
1734
1735 ecc_strength = chip->ecc.strength;
1736 ecc_step = chip->ecc.size;
1737 if (!ecc_strength || !ecc_step) {
1738 ecc_strength = chip->ecc_strength_ds;
1739 ecc_step = chip->ecc_step_ds;
1740 }
1741
1742 /* Set default ECC strength requirements on non-ONFI devices */
1743 if (ecc_strength < 1 && ecc_step < 1) {
1744 ecc_strength = 1;
1745 ecc_step = 512;
1746 }
1747
1748 ret = pxa_ecc_init(info, mtd, ecc_strength,
1749 ecc_step, mtd->writesize);
1750 if (ret)
1751 return ret;
1752
1753 /* calculate addressing information */
1754 if (mtd->writesize >= 2048)
1755 host->col_addr_cycles = 2;
1756 else
1757 host->col_addr_cycles = 1;
1758
1759 /* release the initial buffer */
1760 kfree(info->data_buff);
1761
1762 /* allocate the real data + oob buffer */
1763 info->buf_size = mtd->writesize + mtd->oobsize;
1764 ret = pxa3xx_nand_init_buff(info);
1765 if (ret)
1766 return ret;
1767 info->oob_buff = info->data_buff + mtd->writesize;
1768
1769 if ((mtd->size >> chip->page_shift) > 65536)
1770 host->row_addr_cycles = 3;
1771 else
1772 host->row_addr_cycles = 2;
1773
1774 if (!pdata->keep_config)
1775 pxa3xx_nand_config_tail(info);
1776
1777 return nand_scan_tail(mtd);
1778}
1779
1780static int alloc_nand_resource(struct platform_device *pdev)
1781{
1782 struct device_node *np = pdev->dev.of_node;
1783 struct pxa3xx_nand_platform_data *pdata;
1784 struct pxa3xx_nand_info *info;
1785 struct pxa3xx_nand_host *host;
1786 struct nand_chip *chip = NULL;
1787 struct mtd_info *mtd;
1788 struct resource *r;
1789 int ret, irq, cs;
1790
1791 pdata = dev_get_platdata(&pdev->dev);
1792 if (pdata->num_cs <= 0) {
1793 dev_err(&pdev->dev, "invalid number of chip selects\n");
1794 return -ENODEV;
1795 }
1796
1797 info = devm_kzalloc(&pdev->dev,
1798 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1799 GFP_KERNEL);
1800 if (!info)
1801 return -ENOMEM;
1802
1803 info->pdev = pdev;
1804 info->variant = pxa3xx_nand_get_variant(pdev);
1805 for (cs = 0; cs < pdata->num_cs; cs++) {
1806 host = (void *)&info[1] + sizeof(*host) * cs;
1807 chip = &host->chip;
1808 nand_set_controller_data(chip, host);
1809 mtd = nand_to_mtd(chip);
1810 info->host[cs] = host;
1811 host->cs = cs;
1812 host->info_data = info;
1813 mtd->dev.parent = &pdev->dev;
1814 /* FIXME: all chips use the same device tree partitions */
1815 nand_set_flash_node(chip, np);
1816
1817 nand_set_controller_data(chip, host);
1818 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1819 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1820 chip->controller = &info->controller;
1821 chip->waitfunc = pxa3xx_nand_waitfunc;
1822 chip->select_chip = pxa3xx_nand_select_chip;
1823 chip->read_word = pxa3xx_nand_read_word;
1824 chip->read_byte = pxa3xx_nand_read_byte;
1825 chip->read_buf = pxa3xx_nand_read_buf;
1826 chip->write_buf = pxa3xx_nand_write_buf;
1827 chip->options |= NAND_NO_SUBPAGE_WRITE;
1828 chip->cmdfunc = nand_cmdfunc;
1829 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
1830 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
1831 }
1832
1833 nand_hw_control_init(chip->controller);
1834 info->clk = devm_clk_get(&pdev->dev, NULL);
1835 if (IS_ERR(info->clk)) {
1836 ret = PTR_ERR(info->clk);
1837 dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
1838 return ret;
1839 }
1840 ret = clk_prepare_enable(info->clk);
1841 if (ret < 0)
1842 return ret;
1843
1844 if (!np && use_dma) {
1845 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1846 if (r == NULL) {
1847 dev_err(&pdev->dev,
1848 "no resource defined for data DMA\n");
1849 ret = -ENXIO;
1850 goto fail_disable_clk;
1851 }
1852 info->drcmr_dat = r->start;
1853 }
1854
1855 irq = platform_get_irq(pdev, 0);
1856 if (irq < 0) {
1857 dev_err(&pdev->dev, "no IRQ resource defined\n");
1858 ret = -ENXIO;
1859 goto fail_disable_clk;
1860 }
1861
1862 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1863 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1864 if (IS_ERR(info->mmio_base)) {
1865 ret = PTR_ERR(info->mmio_base);
1866 dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
1867 goto fail_disable_clk;
1868 }
1869 info->mmio_phys = r->start;
1870
1871 /* Allocate a buffer to allow flash detection */
1872 info->buf_size = INIT_BUFFER_SIZE;
1873 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1874 if (info->data_buff == NULL) {
1875 ret = -ENOMEM;
1876 goto fail_disable_clk;
1877 }
1878
1879 /* initialize all interrupts to be disabled */
1880 disable_int(info, NDSR_MASK);
1881
1882 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1883 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1884 pdev->name, info);
1885 if (ret < 0) {
1886 dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
1887 goto fail_free_buf;
1888 }
1889
1890 platform_set_drvdata(pdev, info);
1891
1892 return 0;
1893
1894fail_free_buf:
1895 free_irq(irq, info);
1896 kfree(info->data_buff);
1897fail_disable_clk:
1898 clk_disable_unprepare(info->clk);
1899 return ret;
1900}
1901
1902static int pxa3xx_nand_remove(struct platform_device *pdev)
1903{
1904 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1905 struct pxa3xx_nand_platform_data *pdata;
1906 int irq, cs;
1907
1908 if (!info)
1909 return 0;
1910
1911 pdata = dev_get_platdata(&pdev->dev);
1912
1913 irq = platform_get_irq(pdev, 0);
1914 if (irq >= 0)
1915 free_irq(irq, info);
1916 pxa3xx_nand_free_buff(info);
1917
1918 /*
1919 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1920 * In order to prevent a lockup of the system bus, the DFI bus
1921 * arbitration is granted to SMC upon driver removal. This is done by
1922 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1923 * access to the bus anymore.
1924 */
1925 nand_writel(info, NDCR,
1926 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1927 NFCV1_NDCR_ARB_CNTL);
1928 clk_disable_unprepare(info->clk);
1929
1930 for (cs = 0; cs < pdata->num_cs; cs++)
1931 nand_release(nand_to_mtd(&info->host[cs]->chip));
1932 return 0;
1933}
1934
1935static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1936{
1937 struct pxa3xx_nand_platform_data *pdata;
1938 struct device_node *np = pdev->dev.of_node;
1939 const struct of_device_id *of_id =
1940 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1941
1942 if (!of_id)
1943 return 0;
1944
1945 /*
1946 * Some SoCs like A7k/A8k need to enable manually the NAND
1947 * controller to avoid being bootloader dependent. This is done
1948 * through the use of a single bit in the System Functions registers.
1949 */
1950 if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1951 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1952 pdev->dev.of_node, "marvell,system-controller");
1953 u32 reg;
1954
1955 if (IS_ERR(sysctrl_base))
1956 return PTR_ERR(sysctrl_base);
1957
1958 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
1959 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1960 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1961 }
1962
1963 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1964 if (!pdata)
1965 return -ENOMEM;
1966
1967 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1968 pdata->enable_arbiter = 1;
1969 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1970 pdata->keep_config = 1;
1971 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1972
1973 pdev->dev.platform_data = pdata;
1974
1975 return 0;
1976}
1977
1978static int pxa3xx_nand_probe(struct platform_device *pdev)
1979{
1980 struct pxa3xx_nand_platform_data *pdata;
1981 struct pxa3xx_nand_info *info;
1982 int ret, cs, probe_success, dma_available;
1983
1984 dma_available = IS_ENABLED(CONFIG_ARM) &&
1985 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1986 if (use_dma && !dma_available) {
1987 use_dma = 0;
1988 dev_warn(&pdev->dev,
1989 "This platform can't do DMA on this device\n");
1990 }
1991
1992 ret = pxa3xx_nand_probe_dt(pdev);
1993 if (ret)
1994 return ret;
1995
1996 pdata = dev_get_platdata(&pdev->dev);
1997 if (!pdata) {
1998 dev_err(&pdev->dev, "no platform data defined\n");
1999 return -ENODEV;
2000 }
2001
2002 ret = alloc_nand_resource(pdev);
2003 if (ret)
2004 return ret;
2005
2006 info = platform_get_drvdata(pdev);
2007 probe_success = 0;
2008 for (cs = 0; cs < pdata->num_cs; cs++) {
2009 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
2010
2011 /*
2012 * The mtd name matches the one used in 'mtdparts' kernel
2013 * parameter. This name cannot be changed or otherwise
2014 * user's mtd partitions configuration would get broken.
2015 */
2016 mtd->name = "pxa3xx_nand-0";
2017 info->cs = cs;
2018 ret = pxa3xx_nand_scan(mtd);
2019 if (ret) {
2020 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
2021 cs);
2022 continue;
2023 }
2024
2025 ret = mtd_device_register(mtd, pdata->parts[cs],
2026 pdata->nr_parts[cs]);
2027 if (!ret)
2028 probe_success = 1;
2029 }
2030
2031 if (!probe_success) {
2032 pxa3xx_nand_remove(pdev);
2033 return -ENODEV;
2034 }
2035
2036 return 0;
2037}
2038
2039#ifdef CONFIG_PM
2040static int pxa3xx_nand_suspend(struct device *dev)
2041{
2042 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2043
2044 if (info->state) {
2045 dev_err(dev, "driver busy, state = %d\n", info->state);
2046 return -EAGAIN;
2047 }
2048
2049 clk_disable(info->clk);
2050 return 0;
2051}
2052
2053static int pxa3xx_nand_resume(struct device *dev)
2054{
2055 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2056 int ret;
2057
2058 ret = clk_enable(info->clk);
2059 if (ret < 0)
2060 return ret;
2061
2062 /* We don't want to handle interrupt without calling mtd routine */
2063 disable_int(info, NDCR_INT_MASK);
2064
2065 /*
2066 * Directly set the chip select to a invalid value,
2067 * then the driver would reset the timing according
2068 * to current chip select at the beginning of cmdfunc
2069 */
2070 info->cs = 0xff;
2071
2072 /*
2073 * As the spec says, the NDSR would be updated to 0x1800 when
2074 * doing the nand_clk disable/enable.
2075 * To prevent it damaging state machine of the driver, clear
2076 * all status before resume
2077 */
2078 nand_writel(info, NDSR, NDSR_MASK);
2079
2080 return 0;
2081}
2082#else
2083#define pxa3xx_nand_suspend NULL
2084#define pxa3xx_nand_resume NULL
2085#endif
2086
2087static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2088 .suspend = pxa3xx_nand_suspend,
2089 .resume = pxa3xx_nand_resume,
2090};
2091
2092static struct platform_driver pxa3xx_nand_driver = {
2093 .driver = {
2094 .name = "pxa3xx-nand",
2095 .of_match_table = pxa3xx_nand_dt_ids,
2096 .pm = &pxa3xx_nand_pm_ops,
2097 },
2098 .probe = pxa3xx_nand_probe,
2099 .remove = pxa3xx_nand_remove,
2100};
2101
2102module_platform_driver(pxa3xx_nand_driver);
2103
2104MODULE_LICENSE("GPL");
2105MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c
index 1b82bff6330f..befb886ff121 100644
--- a/drivers/phy/ti/phy-da8xx-usb.c
+++ b/drivers/phy/ti/phy-da8xx-usb.c
@@ -20,6 +20,7 @@
20#include <linux/mfd/syscon.h> 20#include <linux/mfd/syscon.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/phy/phy.h> 22#include <linux/phy/phy.h>
23#include <linux/platform_data/phy-da8xx-usb.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/regmap.h> 25#include <linux/regmap.h>
25 26
@@ -145,6 +146,7 @@ static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
145static int da8xx_usb_phy_probe(struct platform_device *pdev) 146static int da8xx_usb_phy_probe(struct platform_device *pdev)
146{ 147{
147 struct device *dev = &pdev->dev; 148 struct device *dev = &pdev->dev;
149 struct da8xx_usb_phy_platform_data *pdata = dev->platform_data;
148 struct device_node *node = dev->of_node; 150 struct device_node *node = dev->of_node;
149 struct da8xx_usb_phy *d_phy; 151 struct da8xx_usb_phy *d_phy;
150 152
@@ -152,25 +154,25 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
152 if (!d_phy) 154 if (!d_phy)
153 return -ENOMEM; 155 return -ENOMEM;
154 156
155 if (node) 157 if (pdata)
158 d_phy->regmap = pdata->cfgchip;
159 else
156 d_phy->regmap = syscon_regmap_lookup_by_compatible( 160 d_phy->regmap = syscon_regmap_lookup_by_compatible(
157 "ti,da830-cfgchip"); 161 "ti,da830-cfgchip");
158 else
159 d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon");
160 if (IS_ERR(d_phy->regmap)) { 162 if (IS_ERR(d_phy->regmap)) {
161 dev_err(dev, "Failed to get syscon\n"); 163 dev_err(dev, "Failed to get syscon\n");
162 return PTR_ERR(d_phy->regmap); 164 return PTR_ERR(d_phy->regmap);
163 } 165 }
164 166
165 d_phy->usb11_clk = devm_clk_get(dev, "usb11_phy"); 167 d_phy->usb11_clk = devm_clk_get(dev, "usb1_clk48");
166 if (IS_ERR(d_phy->usb11_clk)) { 168 if (IS_ERR(d_phy->usb11_clk)) {
167 dev_err(dev, "Failed to get usb11_phy clock\n"); 169 dev_err(dev, "Failed to get usb1_clk48\n");
168 return PTR_ERR(d_phy->usb11_clk); 170 return PTR_ERR(d_phy->usb11_clk);
169 } 171 }
170 172
171 d_phy->usb20_clk = devm_clk_get(dev, "usb20_phy"); 173 d_phy->usb20_clk = devm_clk_get(dev, "usb0_clk48");
172 if (IS_ERR(d_phy->usb20_clk)) { 174 if (IS_ERR(d_phy->usb20_clk)) {
173 dev_err(dev, "Failed to get usb20_phy clock\n"); 175 dev_err(dev, "Failed to get usb0_clk48\n");
174 return PTR_ERR(d_phy->usb20_clk); 176 return PTR_ERR(d_phy->usb20_clk);
175 } 177 }
176 178
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 89bf4d6cb486..cb0237143dbe 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -132,12 +132,16 @@ static void sr_set_clk_length(struct omap_sr *sr)
132 struct clk *fck; 132 struct clk *fck;
133 u32 fclk_speed; 133 u32 fclk_speed;
134 134
135 fck = clk_get(&sr->pdev->dev, "fck"); 135 /* Try interconnect target module fck first if it already exists */
136 136 fck = clk_get(sr->pdev->dev.parent, "fck");
137 if (IS_ERR(fck)) { 137 if (IS_ERR(fck)) {
138 dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n", 138 fck = clk_get(&sr->pdev->dev, "fck");
139 __func__, dev_name(&sr->pdev->dev)); 139 if (IS_ERR(fck)) {
140 return; 140 dev_err(&sr->pdev->dev,
141 "%s: unable to get fck for device %s\n",
142 __func__, dev_name(&sr->pdev->dev));
143 return;
144 }
141 } 145 }
142 146
143 fclk_speed = clk_get_rate(fck); 147 fclk_speed = clk_get_rate(fck);
@@ -838,7 +842,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
838DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show, 842DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
839 omap_sr_autocomp_store, "%llu\n"); 843 omap_sr_autocomp_store, "%llu\n");
840 844
841static int __init omap_sr_probe(struct platform_device *pdev) 845static int omap_sr_probe(struct platform_device *pdev)
842{ 846{
843 struct omap_sr *sr_info; 847 struct omap_sr *sr_info;
844 struct omap_sr_data *pdata = pdev->dev.platform_data; 848 struct omap_sr_data *pdata = pdev->dev.platform_data;
@@ -898,6 +902,12 @@ static int __init omap_sr_probe(struct platform_device *pdev)
898 902
899 list_add(&sr_info->node, &sr_list); 903 list_add(&sr_info->node, &sr_list);
900 904
905 ret = pm_runtime_get_sync(&pdev->dev);
906 if (ret < 0) {
907 pm_runtime_put_noidle(&pdev->dev);
908 goto err_list_del;
909 }
910
901 /* 911 /*
902 * Call into late init to do initializations that require 912 * Call into late init to do initializations that require
903 * both sr driver and sr class driver to be initiallized. 913 * both sr driver and sr class driver to be initiallized.
@@ -966,12 +976,17 @@ static int __init omap_sr_probe(struct platform_device *pdev)
966 976
967 } 977 }
968 978
979 pm_runtime_put_sync(&pdev->dev);
980
969 return ret; 981 return ret;
970 982
971err_debugfs: 983err_debugfs:
972 debugfs_remove_recursive(sr_info->dbg_dir); 984 debugfs_remove_recursive(sr_info->dbg_dir);
973err_list_del: 985err_list_del:
974 list_del(&sr_info->node); 986 list_del(&sr_info->node);
987
988 pm_runtime_put_sync(&pdev->dev);
989
975 return ret; 990 return ret;
976} 991}
977 992
@@ -1025,11 +1040,23 @@ static void omap_sr_shutdown(struct platform_device *pdev)
1025 return; 1040 return;
1026} 1041}
1027 1042
1043static const struct of_device_id omap_sr_match[] = {
1044 { .compatible = "ti,omap3-smartreflex-core", },
1045 { .compatible = "ti,omap3-smartreflex-mpu-iva", },
1046 { .compatible = "ti,omap4-smartreflex-core", },
1047 { .compatible = "ti,omap4-smartreflex-mpu", },
1048 { .compatible = "ti,omap4-smartreflex-iva", },
1049 { },
1050};
1051MODULE_DEVICE_TABLE(of, omap_sr_match);
1052
1028static struct platform_driver smartreflex_driver = { 1053static struct platform_driver smartreflex_driver = {
1054 .probe = omap_sr_probe,
1029 .remove = omap_sr_remove, 1055 .remove = omap_sr_remove,
1030 .shutdown = omap_sr_shutdown, 1056 .shutdown = omap_sr_shutdown,
1031 .driver = { 1057 .driver = {
1032 .name = DRIVER_NAME, 1058 .name = DRIVER_NAME,
1059 .of_match_table = omap_sr_match,
1033 }, 1060 },
1034}; 1061};
1035 1062
@@ -1048,7 +1075,7 @@ static int __init sr_init(void)
1048 else 1075 else
1049 pr_warn("%s: No PMIC hook to init smartreflex\n", __func__); 1076 pr_warn("%s: No PMIC hook to init smartreflex\n", __func__);
1050 1077
1051 ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe); 1078 ret = platform_driver_register(&smartreflex_driver);
1052 if (ret) { 1079 if (ret) {
1053 pr_err("%s: platform driver register failed for SR\n", 1080 pr_err("%s: platform driver register failed for SR\n",
1054 __func__); 1081 __func__);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 5ad42f33e70c..665da3c8fbce 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -23,6 +23,7 @@
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26#include <linux/platform_data/dmtimer-omap.h>
26#include <linux/platform_data/pwm_omap_dmtimer.h> 27#include <linux/platform_data/pwm_omap_dmtimer.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
@@ -37,7 +38,7 @@ struct pwm_omap_dmtimer_chip {
37 struct pwm_chip chip; 38 struct pwm_chip chip;
38 struct mutex mutex; 39 struct mutex mutex;
39 pwm_omap_dmtimer *dm_timer; 40 pwm_omap_dmtimer *dm_timer;
40 struct pwm_omap_dmtimer_pdata *pdata; 41 const struct omap_dm_timer_ops *pdata;
41 struct platform_device *dm_timer_pdev; 42 struct platform_device *dm_timer_pdev;
42}; 43};
43 44
@@ -242,19 +243,35 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
242{ 243{
243 struct device_node *np = pdev->dev.of_node; 244 struct device_node *np = pdev->dev.of_node;
244 struct device_node *timer; 245 struct device_node *timer;
246 struct platform_device *timer_pdev;
245 struct pwm_omap_dmtimer_chip *omap; 247 struct pwm_omap_dmtimer_chip *omap;
246 struct pwm_omap_dmtimer_pdata *pdata; 248 struct dmtimer_platform_data *timer_pdata;
249 const struct omap_dm_timer_ops *pdata;
247 pwm_omap_dmtimer *dm_timer; 250 pwm_omap_dmtimer *dm_timer;
248 u32 v; 251 u32 v;
249 int status; 252 int ret = 0;
250 253
251 pdata = dev_get_platdata(&pdev->dev); 254 timer = of_parse_phandle(np, "ti,timers", 0);
252 if (!pdata) { 255 if (!timer)
253 dev_err(&pdev->dev, "Missing dmtimer platform data\n"); 256 return -ENODEV;
254 return -EINVAL; 257
258 timer_pdev = of_find_device_by_node(timer);
259 if (!timer_pdev) {
260 dev_err(&pdev->dev, "Unable to find Timer pdev\n");
261 ret = -ENODEV;
262 goto put;
255 } 263 }
256 264
257 if (!pdata->request_by_node || 265 timer_pdata = dev_get_platdata(&timer_pdev->dev);
266 if (!timer_pdata) {
267 dev_err(&pdev->dev, "dmtimer pdata structure NULL\n");
268 ret = -EINVAL;
269 goto put;
270 }
271
272 pdata = timer_pdata->timer_ops;
273
274 if (!pdata || !pdata->request_by_node ||
258 !pdata->free || 275 !pdata->free ||
259 !pdata->enable || 276 !pdata->enable ||
260 !pdata->disable || 277 !pdata->disable ||
@@ -267,21 +284,26 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
267 !pdata->set_prescaler || 284 !pdata->set_prescaler ||
268 !pdata->write_counter) { 285 !pdata->write_counter) {
269 dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n"); 286 dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
270 return -EINVAL; 287 ret = -EINVAL;
288 goto put;
271 } 289 }
272 290
273 timer = of_parse_phandle(np, "ti,timers", 0);
274 if (!timer)
275 return -ENODEV;
276
277 if (!of_get_property(timer, "ti,timer-pwm", NULL)) { 291 if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
278 dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n"); 292 dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
279 return -ENODEV; 293 ret = -ENODEV;
294 goto put;
280 } 295 }
281 296
282 dm_timer = pdata->request_by_node(timer); 297 dm_timer = pdata->request_by_node(timer);
283 if (!dm_timer) 298 if (!dm_timer) {
284 return -EPROBE_DEFER; 299 ret = -EPROBE_DEFER;
300 goto put;
301 }
302
303put:
304 of_node_put(timer);
305 if (ret < 0)
306 return ret;
285 307
286 omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL); 308 omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
287 if (!omap) { 309 if (!omap) {
@@ -291,13 +313,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
291 313
292 omap->pdata = pdata; 314 omap->pdata = pdata;
293 omap->dm_timer = dm_timer; 315 omap->dm_timer = dm_timer;
294 316 omap->dm_timer_pdev = timer_pdev;
295 omap->dm_timer_pdev = of_find_device_by_node(timer);
296 if (!omap->dm_timer_pdev) {
297 dev_err(&pdev->dev, "Unable to find timer pdev\n");
298 omap->pdata->free(dm_timer);
299 return -EINVAL;
300 }
301 317
302 /* 318 /*
303 * Ensure that the timer is stopped before we allow PWM core to call 319 * Ensure that the timer is stopped before we allow PWM core to call
@@ -322,11 +338,11 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
322 338
323 mutex_init(&omap->mutex); 339 mutex_init(&omap->mutex);
324 340
325 status = pwmchip_add(&omap->chip); 341 ret = pwmchip_add(&omap->chip);
326 if (status < 0) { 342 if (ret < 0) {
327 dev_err(&pdev->dev, "failed to register PWM\n"); 343 dev_err(&pdev->dev, "failed to register PWM\n");
328 omap->pdata->free(omap->dm_timer); 344 omap->pdata->free(omap->dm_timer);
329 return status; 345 return ret;
330 } 346 }
331 347
332 platform_set_drvdata(pdev, omap); 348 platform_set_drvdata(pdev, omap);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 09550b1da56d..3bbe6114a420 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -3,8 +3,8 @@ config SOC_RENESAS
3 default y if ARCH_RENESAS 3 default y if ARCH_RENESAS
4 select SOC_BUS 4 select SOC_BUS
5 select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \ 5 select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
6 ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77970 || \ 6 ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77965 || \
7 ARCH_R8A77995 7 ARCH_R8A77970 || ARCH_R8A77980 || ARCH_R8A77995
8 select SYSC_R8A7743 if ARCH_R8A7743 8 select SYSC_R8A7743 if ARCH_R8A7743
9 select SYSC_R8A7745 if ARCH_R8A7745 9 select SYSC_R8A7745 if ARCH_R8A7745
10 select SYSC_R8A7779 if ARCH_R8A7779 10 select SYSC_R8A7779 if ARCH_R8A7779
@@ -14,7 +14,9 @@ config SOC_RENESAS
14 select SYSC_R8A7794 if ARCH_R8A7794 14 select SYSC_R8A7794 if ARCH_R8A7794
15 select SYSC_R8A7795 if ARCH_R8A7795 15 select SYSC_R8A7795 if ARCH_R8A7795
16 select SYSC_R8A7796 if ARCH_R8A7796 16 select SYSC_R8A7796 if ARCH_R8A7796
17 select SYSC_R8A77965 if ARCH_R8A77965
17 select SYSC_R8A77970 if ARCH_R8A77970 18 select SYSC_R8A77970 if ARCH_R8A77970
19 select SYSC_R8A77980 if ARCH_R8A77980
18 select SYSC_R8A77995 if ARCH_R8A77995 20 select SYSC_R8A77995 if ARCH_R8A77995
19 21
20if SOC_RENESAS 22if SOC_RENESAS
@@ -56,10 +58,18 @@ config SYSC_R8A7796
56 bool "R-Car M3-W System Controller support" if COMPILE_TEST 58 bool "R-Car M3-W System Controller support" if COMPILE_TEST
57 select SYSC_RCAR 59 select SYSC_RCAR
58 60
61config SYSC_R8A77965
62 bool "R-Car M3-N System Controller support" if COMPILE_TEST
63 select SYSC_RCAR
64
59config SYSC_R8A77970 65config SYSC_R8A77970
60 bool "R-Car V3M System Controller support" if COMPILE_TEST 66 bool "R-Car V3M System Controller support" if COMPILE_TEST
61 select SYSC_RCAR 67 select SYSC_RCAR
62 68
69config SYSC_R8A77980
70 bool "R-Car V3H System Controller support" if COMPILE_TEST
71 select SYSC_RCAR
72
63config SYSC_R8A77995 73config SYSC_R8A77995
64 bool "R-Car D3 System Controller support" if COMPILE_TEST 74 bool "R-Car D3 System Controller support" if COMPILE_TEST
65 select SYSC_RCAR 75 select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 845d62a08ce1..ccb5ec57a262 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -12,7 +12,9 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
12obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o 12obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
13obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o 13obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
14obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o 14obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
15obj-$(CONFIG_SYSC_R8A77965) += r8a77965-sysc.o
15obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o 16obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
17obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
16obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o 18obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
17 19
18# Family 20# Family
diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c
new file mode 100644
index 000000000000..d7f7928e3c07
--- /dev/null
+++ b/drivers/soc/renesas/r8a77965-sysc.c
@@ -0,0 +1,37 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Renesas R-Car M3-N System Controller
4 * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
5 *
6 * Based on Renesas R-Car M3-W System Controller
7 * Copyright (C) 2016 Glider bvba
8 */
9
10#include <linux/bug.h>
11#include <linux/kernel.h>
12
13#include <dt-bindings/power/r8a77965-sysc.h>
14
15#include "rcar-sysc.h"
16
17static const struct rcar_sysc_area r8a77965_areas[] __initconst = {
18 { "always-on", 0, 0, R8A77965_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
19 { "ca57-scu", 0x1c0, 0, R8A77965_PD_CA57_SCU, R8A77965_PD_ALWAYS_ON,
20 PD_SCU },
21 { "ca57-cpu0", 0x80, 0, R8A77965_PD_CA57_CPU0, R8A77965_PD_CA57_SCU,
22 PD_CPU_NOCR },
23 { "ca57-cpu1", 0x80, 1, R8A77965_PD_CA57_CPU1, R8A77965_PD_CA57_SCU,
24 PD_CPU_NOCR },
25 { "cr7", 0x240, 0, R8A77965_PD_CR7, R8A77965_PD_ALWAYS_ON },
26 { "a3vc", 0x380, 0, R8A77965_PD_A3VC, R8A77965_PD_ALWAYS_ON },
27 { "a3vp", 0x340, 0, R8A77965_PD_A3VP, R8A77965_PD_ALWAYS_ON },
28 { "a2vc1", 0x3c0, 1, R8A77965_PD_A2VC1, R8A77965_PD_A3VC },
29 { "3dg-a", 0x100, 0, R8A77965_PD_3DG_A, R8A77965_PD_ALWAYS_ON },
30 { "3dg-b", 0x100, 1, R8A77965_PD_3DG_B, R8A77965_PD_3DG_A },
31 { "a3ir", 0x180, 0, R8A77965_PD_A3IR, R8A77965_PD_ALWAYS_ON },
32};
33
34const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
35 .areas = r8a77965_areas,
36 .num_areas = ARRAY_SIZE(r8a77965_areas),
37};
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
index 8c614164718e..caf894f193ed 100644
--- a/drivers/soc/renesas/r8a77970-sysc.c
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -25,12 +25,12 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
25 PD_CPU_NOCR }, 25 PD_CPU_NOCR },
26 { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON }, 26 { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
27 { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON }, 27 { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
28 { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON }, 28 { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR },
29 { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 }, 29 { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR },
30 { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 }, 30 { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A3IR },
31 { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 }, 31 { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A3IR },
32 { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON }, 32 { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR },
33 { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 }, 33 { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR },
34}; 34};
35 35
36const struct rcar_sysc_info r8a77970_sysc_info __initconst = { 36const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
new file mode 100644
index 000000000000..9265fb525ef3
--- /dev/null
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -0,0 +1,52 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Renesas R-Car V3H System Controller
4 *
5 * Copyright (C) 2018 Renesas Electronics Corp.
6 * Copyright (C) 2018 Cogent Embedded, Inc.
7 */
8
9#include <linux/bug.h>
10#include <linux/kernel.h>
11
12#include <dt-bindings/power/r8a77980-sysc.h>
13
14#include "rcar-sysc.h"
15
16static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
17 { "always-on", 0, 0, R8A77980_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
18 { "ca53-scu", 0x140, 0, R8A77980_PD_CA53_SCU, R8A77980_PD_ALWAYS_ON,
19 PD_SCU },
20 { "ca53-cpu0", 0x200, 0, R8A77980_PD_CA53_CPU0, R8A77980_PD_CA53_SCU,
21 PD_CPU_NOCR },
22 { "ca53-cpu1", 0x200, 1, R8A77980_PD_CA53_CPU1, R8A77980_PD_CA53_SCU,
23 PD_CPU_NOCR },
24 { "ca53-cpu2", 0x200, 2, R8A77980_PD_CA53_CPU2, R8A77980_PD_CA53_SCU,
25 PD_CPU_NOCR },
26 { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
27 PD_CPU_NOCR },
28 { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
29 { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
30 { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
31 { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
32 { "a2ir2", 0x400, 2, R8A77980_PD_A2IR2, R8A77980_PD_A3IR },
33 { "a2ir3", 0x400, 3, R8A77980_PD_A2IR3, R8A77980_PD_A3IR },
34 { "a2ir4", 0x400, 4, R8A77980_PD_A2IR4, R8A77980_PD_A3IR },
35 { "a2ir5", 0x400, 5, R8A77980_PD_A2IR5, R8A77980_PD_A3IR },
36 { "a2sc0", 0x400, 6, R8A77980_PD_A2SC0, R8A77980_PD_A3IR },
37 { "a2sc1", 0x400, 7, R8A77980_PD_A2SC1, R8A77980_PD_A3IR },
38 { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR },
39 { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR },
40 { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR },
41 { "a2pd0", 0x400, 11, R8A77980_PD_A2PD0, R8A77980_PD_A3IR },
42 { "a2pd1", 0x400, 12, R8A77980_PD_A2PD1, R8A77980_PD_A3IR },
43 { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR },
44 { "a3vip", 0x2c0, 0, R8A77980_PD_A3VIP, R8A77980_PD_ALWAYS_ON },
45 { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_A3VIP },
46 { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_A3VIP },
47};
48
49const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
50 .areas = r8a77980_areas,
51 .num_areas = ARRAY_SIZE(r8a77980_areas),
52};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 3316b028f231..8e9cb7996ab0 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -13,8 +13,18 @@
13#include <linux/of_address.h> 13#include <linux/of_address.h>
14#include <linux/soc/renesas/rcar-rst.h> 14#include <linux/soc/renesas/rcar-rst.h>
15 15
16#define WDTRSTCR_RESET 0xA55A0002
17#define WDTRSTCR 0x0054
18
19static int rcar_rst_enable_wdt_reset(void __iomem *base)
20{
21 iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
22 return 0;
23}
24
16struct rst_config { 25struct rst_config {
17 unsigned int modemr; /* Mode Monitoring Register Offset */ 26 unsigned int modemr; /* Mode Monitoring Register Offset */
27 int (*configure)(void *base); /* Platform specific configuration */
18}; 28};
19 29
20static const struct rst_config rcar_rst_gen1 __initconst = { 30static const struct rst_config rcar_rst_gen1 __initconst = {
@@ -23,6 +33,11 @@ static const struct rst_config rcar_rst_gen1 __initconst = {
23 33
24static const struct rst_config rcar_rst_gen2 __initconst = { 34static const struct rst_config rcar_rst_gen2 __initconst = {
25 .modemr = 0x60, 35 .modemr = 0x60,
36 .configure = rcar_rst_enable_wdt_reset,
37};
38
39static const struct rst_config rcar_rst_gen3 __initconst = {
40 .modemr = 0x60,
26}; 41};
27 42
28static const struct of_device_id rcar_rst_matches[] __initconst = { 43static const struct of_device_id rcar_rst_matches[] __initconst = {
@@ -38,11 +53,13 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
38 { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 }, 53 { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 },
39 { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 }, 54 { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 },
40 { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 }, 55 { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 },
41 /* R-Car Gen3 is handled like R-Car Gen2 */ 56 /* R-Car Gen3 */
42 { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 }, 57 { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
43 { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 }, 58 { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
44 { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen2 }, 59 { .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
45 { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 }, 60 { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
61 { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
62 { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
46 { /* sentinel */ } 63 { /* sentinel */ }
47}; 64};
48 65
@@ -71,6 +88,14 @@ static int __init rcar_rst_init(void)
71 rcar_rst_base = base; 88 rcar_rst_base = base;
72 cfg = match->data; 89 cfg = match->data;
73 saved_mode = ioread32(base + cfg->modemr); 90 saved_mode = ioread32(base + cfg->modemr);
91 if (cfg->configure) {
92 error = cfg->configure(base);
93 if (error) {
94 pr_warn("%pOF: Cannot run SoC specific configuration\n",
95 np);
96 goto out_put;
97 }
98 }
74 99
75 pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode); 100 pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode);
76 101
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 52c25a5e2646..faf20e719361 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -254,7 +254,7 @@ finalize:
254 pm_genpd_init(genpd, gov, false); 254 pm_genpd_init(genpd, gov, false);
255} 255}
256 256
257static const struct of_device_id rcar_sysc_matches[] = { 257static const struct of_device_id rcar_sysc_matches[] __initconst = {
258#ifdef CONFIG_SYSC_R8A7743 258#ifdef CONFIG_SYSC_R8A7743
259 { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info }, 259 { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
260#endif 260#endif
@@ -284,9 +284,15 @@ static const struct of_device_id rcar_sysc_matches[] = {
284#ifdef CONFIG_SYSC_R8A7796 284#ifdef CONFIG_SYSC_R8A7796
285 { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info }, 285 { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
286#endif 286#endif
287#ifdef CONFIG_SYSC_R8A77965
288 { .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
289#endif
287#ifdef CONFIG_SYSC_R8A77970 290#ifdef CONFIG_SYSC_R8A77970
288 { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info }, 291 { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
289#endif 292#endif
293#ifdef CONFIG_SYSC_R8A77980
294 { .compatible = "renesas,r8a77980-sysc", .data = &r8a77980_sysc_info },
295#endif
290#ifdef CONFIG_SYSC_R8A77995 296#ifdef CONFIG_SYSC_R8A77995
291 { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info }, 297 { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
292#endif 298#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 9d9daf9eb91b..dcdc9ec8eba7 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -58,7 +58,9 @@ extern const struct rcar_sysc_info r8a7792_sysc_info;
58extern const struct rcar_sysc_info r8a7794_sysc_info; 58extern const struct rcar_sysc_info r8a7794_sysc_info;
59extern const struct rcar_sysc_info r8a7795_sysc_info; 59extern const struct rcar_sysc_info r8a7795_sysc_info;
60extern const struct rcar_sysc_info r8a7796_sysc_info; 60extern const struct rcar_sysc_info r8a7796_sysc_info;
61extern const struct rcar_sysc_info r8a77965_sysc_info;
61extern const struct rcar_sysc_info r8a77970_sysc_info; 62extern const struct rcar_sysc_info r8a77970_sysc_info;
63extern const struct rcar_sysc_info r8a77980_sysc_info;
62extern const struct rcar_sysc_info r8a77995_sysc_info; 64extern const struct rcar_sysc_info r8a77995_sysc_info;
63 65
64 66
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 926b7fd6db2d..ea71c413c926 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -144,11 +144,21 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
144 .id = 0x52, 144 .id = 0x52,
145}; 145};
146 146
147static const struct renesas_soc soc_rcar_m3_n __initconst __maybe_unused = {
148 .family = &fam_rcar_gen3,
149 .id = 0x55,
150};
151
147static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = { 152static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
148 .family = &fam_rcar_gen3, 153 .family = &fam_rcar_gen3,
149 .id = 0x54, 154 .id = 0x54,
150}; 155};
151 156
157static const struct renesas_soc soc_rcar_v3h __initconst __maybe_unused = {
158 .family = &fam_rcar_gen3,
159 .id = 0x56,
160};
161
152static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = { 162static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
153 .family = &fam_rcar_gen3, 163 .family = &fam_rcar_gen3,
154 .id = 0x58, 164 .id = 0x58,
@@ -209,9 +219,15 @@ static const struct of_device_id renesas_socs[] __initconst = {
209#ifdef CONFIG_ARCH_R8A7796 219#ifdef CONFIG_ARCH_R8A7796
210 { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, 220 { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
211#endif 221#endif
222#ifdef CONFIG_ARCH_R8A77965
223 { .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
224#endif
212#ifdef CONFIG_ARCH_R8A77970 225#ifdef CONFIG_ARCH_R8A77970
213 { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m }, 226 { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
214#endif 227#endif
228#ifdef CONFIG_ARCH_R8A77980
229 { .compatible = "renesas,r8a77980", .data = &soc_rcar_v3h },
230#endif
215#ifdef CONFIG_ARCH_R8A77995 231#ifdef CONFIG_ARCH_R8A77995
216 { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 }, 232 { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
217#endif 233#endif
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 39e152abe6b9..92770d84a288 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -28,6 +28,15 @@ config KEYSTONE_NAVIGATOR_DMA
28 28
29 If unsure, say N. 29 If unsure, say N.
30 30
31config AMX3_PM
32 tristate "AMx3 Power Management"
33 depends on SOC_AM33XX || SOC_AM43XX
34 depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
35 help
36 Enable power management on AM335x and AM437x. Required for suspend to mem
37 and standby states on both AM335x and AM437x platforms and for deeper cpuidle
38 c-states on AM335x.
39
31config WKUP_M3_IPC 40config WKUP_M3_IPC
32 tristate "TI AMx3 Wkup-M3 IPC Driver" 41 tristate "TI AMx3 Wkup-M3 IPC Driver"
33 depends on WKUP_M3_RPROC 42 depends on WKUP_M3_RPROC
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 8e205287f120..a22edc0b258a 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -5,5 +5,6 @@
5obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o 5obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
6knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o 6knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
7obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o 7obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
8obj-$(CONFIG_AMX3_PM) += pm33xx.o
8obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o 9obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
9obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o 10obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
new file mode 100644
index 000000000000..652739c7f718
--- /dev/null
+++ b/drivers/soc/ti/pm33xx.c
@@ -0,0 +1,349 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AM33XX Power Management Routines
4 *
5 * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Vaibhav Bedia, Dave Gerlach
7 */
8
9#include <linux/cpu.h>
10#include <linux/err.h>
11#include <linux/genalloc.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_data/pm33xx.h>
18#include <linux/platform_device.h>
19#include <linux/sizes.h>
20#include <linux/sram.h>
21#include <linux/suspend.h>
22#include <linux/ti-emif-sram.h>
23#include <linux/wkup_m3_ipc.h>
24
25#include <asm/proc-fns.h>
26#include <asm/suspend.h>
27#include <asm/system_misc.h>
28
29#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
30 (unsigned long)pm_sram->do_wfi)
31
32static int (*am33xx_do_wfi_sram)(unsigned long unused);
33static phys_addr_t am33xx_do_wfi_sram_phys;
34
35static struct gen_pool *sram_pool, *sram_pool_data;
36static unsigned long ocmcram_location, ocmcram_location_data;
37
38static struct am33xx_pm_platform_data *pm_ops;
39static struct am33xx_pm_sram_addr *pm_sram;
40
41static struct device *pm33xx_dev;
42static struct wkup_m3_ipc *m3_ipc;
43
44static u32 sram_suspend_address(unsigned long addr)
45{
46 return ((unsigned long)am33xx_do_wfi_sram +
47 AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
48}
49
50#ifdef CONFIG_SUSPEND
51static int am33xx_pm_suspend(suspend_state_t suspend_state)
52{
53 int i, ret = 0;
54
55 ret = pm_ops->soc_suspend((unsigned long)suspend_state,
56 am33xx_do_wfi_sram);
57
58 if (ret) {
59 dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
60 } else {
61 i = m3_ipc->ops->request_pm_status(m3_ipc);
62
63 switch (i) {
64 case 0:
65 dev_info(pm33xx_dev,
66 "PM: Successfully put all powerdomains to target state\n");
67 break;
68 case 1:
69 dev_err(pm33xx_dev,
70 "PM: Could not transition all powerdomains to target state\n");
71 ret = -1;
72 break;
73 default:
74 dev_err(pm33xx_dev,
75 "PM: CM3 returned unknown result = %d\n", i);
76 ret = -1;
77 }
78 }
79
80 return ret;
81}
82
83static int am33xx_pm_enter(suspend_state_t suspend_state)
84{
85 int ret = 0;
86
87 switch (suspend_state) {
88 case PM_SUSPEND_MEM:
89 case PM_SUSPEND_STANDBY:
90 ret = am33xx_pm_suspend(suspend_state);
91 break;
92 default:
93 ret = -EINVAL;
94 }
95
96 return ret;
97}
98
99static int am33xx_pm_begin(suspend_state_t state)
100{
101 int ret = -EINVAL;
102
103 switch (state) {
104 case PM_SUSPEND_MEM:
105 ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
106 break;
107 case PM_SUSPEND_STANDBY:
108 ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
109 break;
110 }
111
112 return ret;
113}
114
115static void am33xx_pm_end(void)
116{
117 m3_ipc->ops->finish_low_power(m3_ipc);
118}
119
120static int am33xx_pm_valid(suspend_state_t state)
121{
122 switch (state) {
123 case PM_SUSPEND_STANDBY:
124 case PM_SUSPEND_MEM:
125 return 1;
126 default:
127 return 0;
128 }
129}
130
131static const struct platform_suspend_ops am33xx_pm_ops = {
132 .begin = am33xx_pm_begin,
133 .end = am33xx_pm_end,
134 .enter = am33xx_pm_enter,
135 .valid = am33xx_pm_valid,
136};
137#endif /* CONFIG_SUSPEND */
138
139static void am33xx_pm_set_ipc_ops(void)
140{
141 u32 resume_address;
142 int temp;
143
144 temp = ti_emif_get_mem_type();
145 if (temp < 0) {
146 dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
147 return;
148 }
149 m3_ipc->ops->set_mem_type(m3_ipc, temp);
150
151 /* Physical resume address to be used by ROM code */
152 resume_address = am33xx_do_wfi_sram_phys +
153 *pm_sram->resume_offset + 0x4;
154
155 m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
156}
157
158static void am33xx_pm_free_sram(void)
159{
160 gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
161 gen_pool_free(sram_pool_data, ocmcram_location_data,
162 sizeof(struct am33xx_pm_ro_sram_data));
163}
164
165/*
166 * Push the minimal suspend-resume code to SRAM
167 */
168static int am33xx_pm_alloc_sram(void)
169{
170 struct device_node *np;
171 int ret = 0;
172
173 np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
174 if (!np) {
175 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
176 if (!np) {
177 dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
178 __func__);
179 return -ENODEV;
180 }
181 }
182
183 sram_pool = of_gen_pool_get(np, "pm-sram", 0);
184 if (!sram_pool) {
185 dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
186 __func__);
187 ret = -ENODEV;
188 goto mpu_put_node;
189 }
190
191 sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
192 if (!sram_pool_data) {
193 dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
194 __func__);
195 ret = -ENODEV;
196 goto mpu_put_node;
197 }
198
199 ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
200 if (!ocmcram_location) {
201 dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
202 __func__);
203 ret = -ENOMEM;
204 goto mpu_put_node;
205 }
206
207 ocmcram_location_data = gen_pool_alloc(sram_pool_data,
208 sizeof(struct emif_regs_amx3));
209 if (!ocmcram_location_data) {
210 dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
211 gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
212 ret = -ENOMEM;
213 }
214
215mpu_put_node:
216 of_node_put(np);
217 return ret;
218}
219
220static int am33xx_push_sram_idle(void)
221{
222 struct am33xx_pm_ro_sram_data ro_sram_data;
223 int ret;
224 u32 table_addr, ro_data_addr;
225 void *copy_addr;
226
227 ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
228 ro_sram_data.amx3_pm_sram_data_phys =
229 gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
230
231 /* Save physical address to calculate resume offset during pm init */
232 am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
233 ocmcram_location);
234
235 am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
236 pm_sram->do_wfi,
237 *pm_sram->do_wfi_sz);
238 if (!am33xx_do_wfi_sram) {
239 dev_err(pm33xx_dev,
240 "PM: %s: am33xx_do_wfi copy to sram failed\n",
241 __func__);
242 return -ENODEV;
243 }
244
245 table_addr =
246 sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
247 ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
248 if (ret) {
249 dev_dbg(pm33xx_dev,
250 "PM: %s: EMIF function copy failed\n", __func__);
251 return -EPROBE_DEFER;
252 }
253
254 ro_data_addr =
255 sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
256 copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
257 &ro_sram_data,
258 sizeof(ro_sram_data));
259 if (!copy_addr) {
260 dev_err(pm33xx_dev,
261 "PM: %s: ro_sram_data copy to sram failed\n",
262 __func__);
263 return -ENODEV;
264 }
265
266 return 0;
267}
268
269static int am33xx_pm_probe(struct platform_device *pdev)
270{
271 struct device *dev = &pdev->dev;
272 int ret;
273
274 if (!of_machine_is_compatible("ti,am33xx") &&
275 !of_machine_is_compatible("ti,am43"))
276 return -ENODEV;
277
278 pm_ops = dev->platform_data;
279 if (!pm_ops) {
280 dev_err(dev, "PM: Cannot get core PM ops!\n");
281 return -ENODEV;
282 }
283
284 pm_sram = pm_ops->get_sram_addrs();
285 if (!pm_sram) {
286 dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
287 return -ENODEV;
288 }
289
290 pm33xx_dev = dev;
291
292 ret = am33xx_pm_alloc_sram();
293 if (ret)
294 return ret;
295
296 ret = am33xx_push_sram_idle();
297 if (ret)
298 goto err_free_sram;
299
300 m3_ipc = wkup_m3_ipc_get();
301 if (!m3_ipc) {
302 dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
303 ret = -EPROBE_DEFER;
304 goto err_free_sram;
305 }
306
307 am33xx_pm_set_ipc_ops();
308
309#ifdef CONFIG_SUSPEND
310 suspend_set_ops(&am33xx_pm_ops);
311#endif /* CONFIG_SUSPEND */
312
313 ret = pm_ops->init();
314 if (ret) {
315 dev_err(dev, "Unable to call core pm init!\n");
316 ret = -ENODEV;
317 goto err_put_wkup_m3_ipc;
318 }
319
320 return 0;
321
322err_put_wkup_m3_ipc:
323 wkup_m3_ipc_put(m3_ipc);
324err_free_sram:
325 am33xx_pm_free_sram();
326 pm33xx_dev = NULL;
327 return ret;
328}
329
330static int am33xx_pm_remove(struct platform_device *pdev)
331{
332 suspend_set_ops(NULL);
333 wkup_m3_ipc_put(m3_ipc);
334 am33xx_pm_free_sram();
335 return 0;
336}
337
338static struct platform_driver am33xx_pm_driver = {
339 .driver = {
340 .name = "pm33xx",
341 },
342 .probe = am33xx_pm_probe,
343 .remove = am33xx_pm_remove,
344};
345module_platform_driver(am33xx_pm_driver);
346
347MODULE_ALIAS("platform:pm33xx");
348MODULE_LICENSE("GPL v2");
349MODULE_DESCRIPTION("am33xx power management driver");