diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-23 01:46:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-23 01:46:06 -0500 |
commit | 14b661ebb6cfa386afa5a5247eb09e24d420af3a (patch) | |
tree | 3917a344c6fc46b2365077e99a27d84b9e2f2675 | |
parent | 3f3211e755f329c56acf55faa0dbf91befd7b5ca (diff) | |
parent | 1530578abdac4edce9244c7a1962ded3ffdb58ce (diff) |
Merge tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd
Pull MTD updates from Richard Weinberger:
"General changes:
- Unconfuse get_unmapped_area and point/unpoint driver methods
- New partition parser: sharpslpart
- Kill GENERIC_IO
- Various fixes
NAND changes:
- Add a flag to mark NANDs that require 3 address cycles to encode a
page address
- Set a default ECC/free layout when NAND_ECC_NONE is requested
- Fix a bug in panic_nand_write()
- Another batch of cleanups for the denali driver
- Fix PM support in the atmel driver
- Remove support for platform data in the omap driver
- Fix subpage write in the omap driver
- Fix irq handling in the mtk driver
- Change link order of mtk_ecc and mtk_nand drivers to speed up boot
time
- Change log level of ECC error messages in the mxc driver
- Patch the pxa3xx driver to support Armada 8k platforms
- Add BAM DMA support to the qcom driver
- Convert gpio-nand to the GPIO desc API
- Fix ECC handling in the mt29f driver
SPI-NOR changes:
- Introduce system power management support
- New mechanism to select the proper .quad_enable() hook by JEDEC
ID, when needed, instead of only by manufacturer ID
- Add support to new memory parts from Gigadevice, Winbond, Macronix
and Everspin
- Maintainance for Cadence, Intel, Mediatek and STM32 drivers"
* tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd: (85 commits)
mtd: Avoid probe failures when mtd->dbg.dfs_dir is invalid
mtd: sharpslpart: Add sharpslpart partition parser
mtd: Add sanity checks in mtd_write/read_oob()
mtd: remove the get_unmapped_area method
mtd: implement mtd_get_unmapped_area() using the point method
mtd: chips/map_rom.c: implement point and unpoint methods
mtd: chips/map_ram.c: implement point and unpoint methods
mtd: mtdram: properly handle the phys argument in the point method
mtd: mtdswap: fix spelling mistake: 'TRESHOLD' -> 'THRESHOLD'
mtd: slram: use memremap() instead of ioremap()
kconfig: kill off GENERIC_IO option
mtd: Fix C++ comment in include/linux/mtd/mtd.h
mtd: constify mtd_partition
mtd: plat-ram: Replace manual resource management by devm
mtd: nand: Fix writing mtdoops to nand flash.
mtd: intel-spi: Add Intel Lewisburg PCH SPI super SKU PCI ID
mtd: nand: mtk: fix infinite ECC decode IRQ issue
mtd: spi-nor: Add support for mr25h128
mtd: nand: mtk: change the compile sequence of mtk_nand.o and mtk_ecc.o
mtd: spi-nor: enable 4B opcodes for mx66l51235l
...
67 files changed, 1648 insertions, 754 deletions
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt index f248056da24c..bb2075df9b38 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt | |||
@@ -1,7 +1,9 @@ | |||
1 | * Cadence Quad SPI controller | 1 | * Cadence Quad SPI controller |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : Should be "cdns,qspi-nor". | 4 | - compatible : should be one of the following: |
5 | Generic default - "cdns,qspi-nor". | ||
6 | For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor". | ||
5 | - reg : Contains two entries, each of which is a tuple consisting of a | 7 | - reg : Contains two entries, each of which is a tuple consisting of a |
6 | physical address and length. The first entry is the address and | 8 | physical address and length. The first entry is the address and |
7 | length of the controller register set. The second entry is the | 9 | length of the controller register set. The second entry is the |
@@ -14,6 +16,9 @@ Required properties: | |||
14 | 16 | ||
15 | Optional properties: | 17 | Optional properties: |
16 | - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not. | 18 | - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not. |
19 | - cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch | ||
20 | the read data rather than the QSPI clock. Make sure that QSPI return | ||
21 | clock is populated on the board before using this property. | ||
17 | 22 | ||
18 | Optional subnodes: | 23 | Optional subnodes: |
19 | Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional | 24 | Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional |
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt index 504291d2e5c2..0ee8edb60efc 100644 --- a/Documentation/devicetree/bindings/mtd/denali-nand.txt +++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt | |||
@@ -29,7 +29,7 @@ nand: nand@ff900000 { | |||
29 | #address-cells = <1>; | 29 | #address-cells = <1>; |
30 | #size-cells = <1>; | 30 | #size-cells = <1>; |
31 | compatible = "altr,socfpga-denali-nand"; | 31 | compatible = "altr,socfpga-denali-nand"; |
32 | reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; | 32 | reg = <0xff900000 0x20>, <0xffb80000 0x1000>; |
33 | reg-names = "nand_data", "denali_reg"; | 33 | reg-names = "nand_data", "denali_reg"; |
34 | interrupts = <0 144 4>; | 34 | interrupts = <0 144 4>; |
35 | }; | 35 | }; |
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt index 4cab5d85cf6f..376fa2f50e6b 100644 --- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt +++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt | |||
@@ -14,6 +14,7 @@ Required properties: | |||
14 | at25df641 | 14 | at25df641 |
15 | at26df081a | 15 | at26df081a |
16 | en25s64 | 16 | en25s64 |
17 | mr25h128 | ||
17 | mr25h256 | 18 | mr25h256 |
18 | mr25h10 | 19 | mr25h10 |
19 | mr25h40 | 20 | mr25h40 |
diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt index 840f9405dcf0..56d3668e2c50 100644 --- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt | |||
@@ -1,13 +1,16 @@ | |||
1 | * Serial NOR flash controller for MTK MT81xx (and similar) | 1 | * Serial NOR flash controller for MTK MT81xx (and similar) |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: The possible values are: | 4 | - compatible: For mt8173, compatible should be "mediatek,mt8173-nor", |
5 | "mediatek,mt2701-nor" | 5 | and it's the fallback compatible for other Soc. |
6 | "mediatek,mt7623-nor" | 6 | For every other SoC, should contain both the SoC-specific compatible |
7 | string and "mediatek,mt8173-nor". | ||
8 | The possible values are: | ||
9 | "mediatek,mt2701-nor", "mediatek,mt8173-nor" | ||
10 | "mediatek,mt2712-nor", "mediatek,mt8173-nor" | ||
11 | "mediatek,mt7622-nor", "mediatek,mt8173-nor" | ||
12 | "mediatek,mt7623-nor", "mediatek,mt8173-nor" | ||
7 | "mediatek,mt8173-nor" | 13 | "mediatek,mt8173-nor" |
8 | For mt8173, compatible should be "mediatek,mt8173-nor". | ||
9 | For every other SoC, should contain both the SoC-specific compatible string | ||
10 | and "mediatek,mt8173-nor". | ||
11 | - reg: physical base address and length of the controller's register | 14 | - reg: physical base address and length of the controller's register |
12 | - clocks: the phandle of the clocks needed by the nor controller | 15 | - clocks: the phandle of the clocks needed by the nor controller |
13 | - clock-names: the names of the clocks | 16 | - clock-names: the names of the clocks |
diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt index d9b655f11048..d4ee4da58463 100644 --- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt +++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt | |||
@@ -5,9 +5,13 @@ Required properties: | |||
5 | - compatible: Should be set to one of the following: | 5 | - compatible: Should be set to one of the following: |
6 | marvell,pxa3xx-nand | 6 | marvell,pxa3xx-nand |
7 | marvell,armada370-nand | 7 | marvell,armada370-nand |
8 | marvell,armada-8k-nand | ||
8 | - reg: The register base for the controller | 9 | - reg: The register base for the controller |
9 | - interrupts: The interrupt to map | 10 | - interrupts: The interrupt to map |
10 | - #address-cells: Set to <1> if the node includes partitions | 11 | - #address-cells: Set to <1> if the node includes partitions |
12 | - marvell,system-controller: Set to retrieve the syscon node that handles | ||
13 | NAND controller related registers (only required | ||
14 | with marvell,armada-8k-nand compatible). | ||
11 | 15 | ||
12 | Optional properties: | 16 | Optional properties: |
13 | 17 | ||
diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c index b592f79a1742..fa8e7dd4d898 100644 --- a/arch/arm/mach-pxa/cm-x255.c +++ b/arch/arm/mach-pxa/cm-x255.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/mtd/partitions.h> | 14 | #include <linux/mtd/partitions.h> |
15 | #include <linux/mtd/physmap.h> | 15 | #include <linux/mtd/physmap.h> |
16 | #include <linux/mtd/nand-gpio.h> | 16 | #include <linux/mtd/nand-gpio.h> |
17 | 17 | #include <linux/gpio/machine.h> | |
18 | #include <linux/spi/spi.h> | 18 | #include <linux/spi/spi.h> |
19 | #include <linux/spi/pxa2xx_spi.h> | 19 | #include <linux/spi/pxa2xx_spi.h> |
20 | 20 | ||
@@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {} | |||
176 | #endif | 176 | #endif |
177 | 177 | ||
178 | #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) | 178 | #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) |
179 | |||
180 | static struct gpiod_lookup_table cmx255_nand_gpiod_table = { | ||
181 | .dev_id = "gpio-nand", | ||
182 | .table = { | ||
183 | GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH), | ||
184 | GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH), | ||
185 | GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH), | ||
186 | GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH), | ||
187 | }, | ||
188 | }; | ||
189 | |||
179 | static struct resource cmx255_nand_resource[] = { | 190 | static struct resource cmx255_nand_resource[] = { |
180 | [0] = { | 191 | [0] = { |
181 | .start = PXA_CS1_PHYS, | 192 | .start = PXA_CS1_PHYS, |
@@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = { | |||
198 | }; | 209 | }; |
199 | 210 | ||
200 | static struct gpio_nand_platdata cmx255_nand_platdata = { | 211 | static struct gpio_nand_platdata cmx255_nand_platdata = { |
201 | .gpio_nce = GPIO_NAND_CS, | ||
202 | .gpio_cle = GPIO_NAND_CLE, | ||
203 | .gpio_ale = GPIO_NAND_ALE, | ||
204 | .gpio_rdy = GPIO_NAND_RB, | ||
205 | .gpio_nwp = -1, | ||
206 | .parts = cmx255_nand_parts, | 212 | .parts = cmx255_nand_parts, |
207 | .num_parts = ARRAY_SIZE(cmx255_nand_parts), | 213 | .num_parts = ARRAY_SIZE(cmx255_nand_parts), |
208 | .chip_delay = 25, | 214 | .chip_delay = 25, |
@@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = { | |||
220 | 226 | ||
221 | static void __init cmx255_init_nand(void) | 227 | static void __init cmx255_init_nand(void) |
222 | { | 228 | { |
229 | gpiod_add_lookup_table(&cmx255_nand_gpiod_table); | ||
223 | platform_device_register(&cmx255_nand); | 230 | platform_device_register(&cmx255_nand); |
224 | } | 231 | } |
225 | #else | 232 | #else |
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index d9280482a2f8..c68add8df3ae 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common | |||
@@ -10,7 +10,6 @@ config UML | |||
10 | select HAVE_DEBUG_KMEMLEAK | 10 | select HAVE_DEBUG_KMEMLEAK |
11 | select GENERIC_IRQ_SHOW | 11 | select GENERIC_IRQ_SHOW |
12 | select GENERIC_CPU_DEVICES | 12 | select GENERIC_CPU_DEVICES |
13 | select GENERIC_IO | ||
14 | select GENERIC_CLOCKEVENTS | 13 | select GENERIC_CLOCKEVENTS |
15 | select HAVE_GCC_PLUGINS | 14 | select HAVE_GCC_PLUGINS |
16 | select TTY # Needed for line.c | 15 | select TTY # Needed for line.c |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 5a2d71729b9a..2a8ac6829d42 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | menuconfig MTD | 1 | menuconfig MTD |
2 | tristate "Memory Technology Device (MTD) support" | 2 | tristate "Memory Technology Device (MTD) support" |
3 | depends on GENERIC_IO | ||
4 | help | 3 | help |
5 | Memory Technology Devices are flash, RAM and similar chips, often | 4 | Memory Technology Devices are flash, RAM and similar chips, often |
6 | used for solid state file systems on embedded devices. This option | 5 | used for solid state file systems on embedded devices. This option |
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index afb43d5e1782..1cd0fff0e940 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c | |||
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch | |||
20 | static int mapram_erase (struct mtd_info *, struct erase_info *); | 20 | static int mapram_erase (struct mtd_info *, struct erase_info *); |
21 | static void mapram_nop (struct mtd_info *); | 21 | static void mapram_nop (struct mtd_info *); |
22 | static struct mtd_info *map_ram_probe(struct map_info *map); | 22 | static struct mtd_info *map_ram_probe(struct map_info *map); |
23 | static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long, | 23 | static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len, |
24 | unsigned long, unsigned long); | 24 | size_t *retlen, void **virt, resource_size_t *phys); |
25 | static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len); | ||
25 | 26 | ||
26 | 27 | ||
27 | static struct mtd_chip_driver mapram_chipdrv = { | 28 | static struct mtd_chip_driver mapram_chipdrv = { |
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map) | |||
65 | mtd->type = MTD_RAM; | 66 | mtd->type = MTD_RAM; |
66 | mtd->size = map->size; | 67 | mtd->size = map->size; |
67 | mtd->_erase = mapram_erase; | 68 | mtd->_erase = mapram_erase; |
68 | mtd->_get_unmapped_area = mapram_unmapped_area; | ||
69 | mtd->_read = mapram_read; | 69 | mtd->_read = mapram_read; |
70 | mtd->_write = mapram_write; | 70 | mtd->_write = mapram_write; |
71 | mtd->_panic_write = mapram_write; | 71 | mtd->_panic_write = mapram_write; |
72 | mtd->_point = mapram_point; | ||
72 | mtd->_sync = mapram_nop; | 73 | mtd->_sync = mapram_nop; |
74 | mtd->_unpoint = mapram_unpoint; | ||
73 | mtd->flags = MTD_CAP_RAM; | 75 | mtd->flags = MTD_CAP_RAM; |
74 | mtd->writesize = 1; | 76 | mtd->writesize = 1; |
75 | 77 | ||
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map) | |||
81 | return mtd; | 83 | return mtd; |
82 | } | 84 | } |
83 | 85 | ||
84 | 86 | static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len, | |
85 | /* | 87 | size_t *retlen, void **virt, resource_size_t *phys) |
86 | * Allow NOMMU mmap() to directly map the device (if not NULL) | ||
87 | * - return the address to which the offset maps | ||
88 | * - return -ENOSYS to indicate refusal to do the mapping | ||
89 | */ | ||
90 | static unsigned long mapram_unmapped_area(struct mtd_info *mtd, | ||
91 | unsigned long len, | ||
92 | unsigned long offset, | ||
93 | unsigned long flags) | ||
94 | { | 88 | { |
95 | struct map_info *map = mtd->priv; | 89 | struct map_info *map = mtd->priv; |
96 | return (unsigned long) map->virt + offset; | 90 | |
91 | if (!map->virt) | ||
92 | return -EINVAL; | ||
93 | *virt = map->virt + from; | ||
94 | if (phys) | ||
95 | *phys = map->phys + from; | ||
96 | *retlen = len; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | ||
101 | { | ||
102 | return 0; | ||
97 | } | 103 | } |
98 | 104 | ||
99 | static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) | 105 | static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) |
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index e67f73ab44c9..20e3604b4d71 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c | |||
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch | |||
20 | static void maprom_nop (struct mtd_info *); | 20 | static void maprom_nop (struct mtd_info *); |
21 | static struct mtd_info *map_rom_probe(struct map_info *map); | 21 | static struct mtd_info *map_rom_probe(struct map_info *map); |
22 | static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); | 22 | static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); |
23 | static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long, | 23 | static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len, |
24 | unsigned long, unsigned long); | 24 | size_t *retlen, void **virt, resource_size_t *phys); |
25 | static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len); | ||
26 | |||
25 | 27 | ||
26 | static struct mtd_chip_driver maprom_chipdrv = { | 28 | static struct mtd_chip_driver maprom_chipdrv = { |
27 | .probe = map_rom_probe, | 29 | .probe = map_rom_probe, |
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map) | |||
51 | mtd->name = map->name; | 53 | mtd->name = map->name; |
52 | mtd->type = MTD_ROM; | 54 | mtd->type = MTD_ROM; |
53 | mtd->size = map->size; | 55 | mtd->size = map->size; |
54 | mtd->_get_unmapped_area = maprom_unmapped_area; | 56 | mtd->_point = maprom_point; |
57 | mtd->_unpoint = maprom_unpoint; | ||
55 | mtd->_read = maprom_read; | 58 | mtd->_read = maprom_read; |
56 | mtd->_write = maprom_write; | 59 | mtd->_write = maprom_write; |
57 | mtd->_sync = maprom_nop; | 60 | mtd->_sync = maprom_nop; |
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map) | |||
66 | } | 69 | } |
67 | 70 | ||
68 | 71 | ||
69 | /* | 72 | static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len, |
70 | * Allow NOMMU mmap() to directly map the device (if not NULL) | 73 | size_t *retlen, void **virt, resource_size_t *phys) |
71 | * - return the address to which the offset maps | ||
72 | * - return -ENOSYS to indicate refusal to do the mapping | ||
73 | */ | ||
74 | static unsigned long maprom_unmapped_area(struct mtd_info *mtd, | ||
75 | unsigned long len, | ||
76 | unsigned long offset, | ||
77 | unsigned long flags) | ||
78 | { | 74 | { |
79 | struct map_info *map = mtd->priv; | 75 | struct map_info *map = mtd->priv; |
80 | return (unsigned long) map->virt + offset; | 76 | |
77 | if (!map->virt) | ||
78 | return -EINVAL; | ||
79 | *virt = map->virt + from; | ||
80 | if (phys) | ||
81 | *phys = map->phys + from; | ||
82 | *retlen = len; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | ||
87 | { | ||
88 | return 0; | ||
81 | } | 89 | } |
82 | 90 | ||
83 | static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) | 91 | static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) |
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 84b16133554b..0806f72102c0 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c | |||
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor) | |||
1814 | struct dentry *root = floor->dbg.dfs_dir; | 1814 | struct dentry *root = floor->dbg.dfs_dir; |
1815 | struct docg3 *docg3 = floor->priv; | 1815 | struct docg3 *docg3 = floor->priv; |
1816 | 1816 | ||
1817 | if (IS_ERR_OR_NULL(root)) | 1817 | if (IS_ERR_OR_NULL(root)) { |
1818 | if (IS_ENABLED(CONFIG_DEBUG_FS) && | ||
1819 | !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) | ||
1820 | dev_warn(floor->dev.parent, | ||
1821 | "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); | ||
1818 | return; | 1822 | return; |
1823 | } | ||
1819 | 1824 | ||
1820 | debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, | 1825 | debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, |
1821 | &flashcontrol_fops); | 1826 | &flashcontrol_fops); |
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 268aae45b514..555b94406e0b 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c | |||
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = { | |||
583 | } | 583 | } |
584 | }; | 584 | }; |
585 | 585 | ||
586 | static struct mtd_partition lart_partitions[] = { | 586 | static const struct mtd_partition lart_partitions[] = { |
587 | /* blob */ | 587 | /* blob */ |
588 | { | 588 | { |
589 | .name = "blob", | 589 | .name = "blob", |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 00eea6fd379c..dbe6a1de2bb8 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
359 | {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, | 359 | {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, |
360 | 360 | ||
361 | /* Everspin MRAMs (non-JEDEC) */ | 361 | /* Everspin MRAMs (non-JEDEC) */ |
362 | { "mr25h128" }, /* 128 Kib, 40 MHz */ | ||
362 | { "mr25h256" }, /* 256 Kib, 40 MHz */ | 363 | { "mr25h256" }, /* 256 Kib, 40 MHz */ |
363 | { "mr25h10" }, /* 1 Mib, 40 MHz */ | 364 | { "mr25h10" }, /* 1 Mib, 40 MHz */ |
364 | { "mr25h40" }, /* 4 Mib, 40 MHz */ | 365 | { "mr25h40" }, /* 4 Mib, 40 MHz */ |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index cbd8547d7aad..0bf4aeaf0cb8 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
16 | #include <linux/mm.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
18 | #include <linux/mtd/mtdram.h> | 19 | #include <linux/mtd/mtdram.h> |
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
69 | { | 70 | { |
70 | *virt = mtd->priv + from; | 71 | *virt = mtd->priv + from; |
71 | *retlen = len; | 72 | *retlen = len; |
73 | |||
74 | if (phys) { | ||
75 | /* limit retlen to the number of contiguous physical pages */ | ||
76 | unsigned long page_ofs = offset_in_page(*virt); | ||
77 | void *addr = *virt - page_ofs; | ||
78 | unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr); | ||
79 | |||
80 | *phys = __pfn_to_phys(pfn0) + page_ofs; | ||
81 | len += page_ofs; | ||
82 | while (len > PAGE_SIZE) { | ||
83 | len -= PAGE_SIZE; | ||
84 | addr += PAGE_SIZE; | ||
85 | pfn0++; | ||
86 | pfn1 = vmalloc_to_pfn(addr); | ||
87 | if (pfn1 != pfn0) { | ||
88 | *retlen = addr - *virt; | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | |||
72 | return 0; | 94 | return 0; |
73 | } | 95 | } |
74 | 96 | ||
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
77 | return 0; | 99 | return 0; |
78 | } | 100 | } |
79 | 101 | ||
80 | /* | ||
81 | * Allow NOMMU mmap() to directly map the device (if not NULL) | ||
82 | * - return the address to which the offset maps | ||
83 | * - return -ENOSYS to indicate refusal to do the mapping | ||
84 | */ | ||
85 | static unsigned long ram_get_unmapped_area(struct mtd_info *mtd, | ||
86 | unsigned long len, | ||
87 | unsigned long offset, | ||
88 | unsigned long flags) | ||
89 | { | ||
90 | return (unsigned long) mtd->priv + offset; | ||
91 | } | ||
92 | |||
93 | static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, | 102 | static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, |
94 | size_t *retlen, u_char *buf) | 103 | size_t *retlen, u_char *buf) |
95 | { | 104 | { |
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, | |||
134 | mtd->_erase = ram_erase; | 143 | mtd->_erase = ram_erase; |
135 | mtd->_point = ram_point; | 144 | mtd->_point = ram_point; |
136 | mtd->_unpoint = ram_unpoint; | 145 | mtd->_unpoint = ram_unpoint; |
137 | mtd->_get_unmapped_area = ram_get_unmapped_area; | ||
138 | mtd->_read = ram_read; | 146 | mtd->_read = ram_read; |
139 | mtd->_write = ram_write; | 147 | mtd->_write = ram_write; |
140 | 148 | ||
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8087c36dc693..0ec85f316d24 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c | |||
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length | |||
163 | } | 163 | } |
164 | 164 | ||
165 | if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start = | 165 | if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start = |
166 | ioremap(start, length))) { | 166 | memremap(start, length, |
167 | E("slram: ioremap failed\n"); | 167 | MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) { |
168 | E("slram: memremap failed\n"); | ||
168 | return -EIO; | 169 | return -EIO; |
169 | } | 170 | } |
170 | ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end = | 171 | ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end = |
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length | |||
186 | 187 | ||
187 | if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) { | 188 | if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) { |
188 | E("slram: Failed to register new device\n"); | 189 | E("slram: Failed to register new device\n"); |
189 | iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); | 190 | memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); |
190 | kfree((*curmtd)->mtdinfo->priv); | 191 | kfree((*curmtd)->mtdinfo->priv); |
191 | kfree((*curmtd)->mtdinfo); | 192 | kfree((*curmtd)->mtdinfo); |
192 | return(-EAGAIN); | 193 | return(-EAGAIN); |
@@ -206,7 +207,7 @@ static void unregister_devices(void) | |||
206 | while (slram_mtdlist) { | 207 | while (slram_mtdlist) { |
207 | nextitem = slram_mtdlist->next; | 208 | nextitem = slram_mtdlist->next; |
208 | mtd_device_unregister(slram_mtdlist->mtdinfo); | 209 | mtd_device_unregister(slram_mtdlist->mtdinfo); |
209 | iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); | 210 | memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); |
210 | kfree(slram_mtdlist->mtdinfo->priv); | 211 | kfree(slram_mtdlist->mtdinfo->priv); |
211 | kfree(slram_mtdlist->mtdinfo); | 212 | kfree(slram_mtdlist->mtdinfo); |
212 | kfree(slram_mtdlist); | 213 | kfree(slram_mtdlist); |
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c index d504b3d1791d..70f488628464 100644 --- a/drivers/mtd/maps/cfi_flagadm.c +++ b/drivers/mtd/maps/cfi_flagadm.c | |||
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = { | |||
61 | .bankwidth = 2, | 61 | .bankwidth = 2, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static struct mtd_partition flagadm_parts[] = { | 64 | static const struct mtd_partition flagadm_parts[] = { |
65 | { | 65 | { |
66 | .name = "Bootloader", | 66 | .name = "Bootloader", |
67 | .offset = FLASH_PARTITION0_ADDR, | 67 | .offset = FLASH_PARTITION0_ADDR, |
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index 15bbda03be65..a0b8fa7849a9 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c | |||
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = { | |||
47 | /* | 47 | /* |
48 | * MTD partitioning stuff | 48 | * MTD partitioning stuff |
49 | */ | 49 | */ |
50 | static struct mtd_partition partitions[] = | 50 | static const struct mtd_partition partitions[] = |
51 | { | 51 | { |
52 | { | 52 | { |
53 | .name = "FileSystem", | 53 | .name = "FileSystem", |
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c index 81dc2598bc0a..3528497f96c7 100644 --- a/drivers/mtd/maps/netsc520.c +++ b/drivers/mtd/maps/netsc520.c | |||
@@ -52,7 +52,7 @@ | |||
52 | /* partition_info gives details on the logical partitions that the split the | 52 | /* partition_info gives details on the logical partitions that the split the |
53 | * single flash device into. If the size if zero we use up to the end of the | 53 | * single flash device into. If the size if zero we use up to the end of the |
54 | * device. */ | 54 | * device. */ |
55 | static struct mtd_partition partition_info[]={ | 55 | static const struct mtd_partition partition_info[] = { |
56 | { | 56 | { |
57 | .name = "NetSc520 boot kernel", | 57 | .name = "NetSc520 boot kernel", |
58 | .offset = 0, | 58 | .offset = 0, |
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index a577ef8553d0..729579fb654f 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c | |||
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = { | |||
107 | .bankwidth = AMD_BUSWIDTH, | 107 | .bankwidth = AMD_BUSWIDTH, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static struct mtd_partition nettel_amd_partitions[] = { | 110 | static const struct mtd_partition nettel_amd_partitions[] = { |
111 | { | 111 | { |
112 | .name = "SnapGear BIOS config", | 112 | .name = "SnapGear BIOS config", |
113 | .offset = 0x000e0000, | 113 | .offset = 0x000e0000, |
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 51572895c02c..6d9a4d6f9839 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c | |||
@@ -43,7 +43,6 @@ struct platram_info { | |||
43 | struct device *dev; | 43 | struct device *dev; |
44 | struct mtd_info *mtd; | 44 | struct mtd_info *mtd; |
45 | struct map_info map; | 45 | struct map_info map; |
46 | struct resource *area; | ||
47 | struct platdata_mtd_ram *pdata; | 46 | struct platdata_mtd_ram *pdata; |
48 | }; | 47 | }; |
49 | 48 | ||
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev) | |||
97 | 96 | ||
98 | platram_setrw(info, PLATRAM_RO); | 97 | platram_setrw(info, PLATRAM_RO); |
99 | 98 | ||
100 | /* release resources */ | ||
101 | |||
102 | if (info->area) { | ||
103 | release_resource(info->area); | ||
104 | kfree(info->area); | ||
105 | } | ||
106 | |||
107 | if (info->map.virt != NULL) | ||
108 | iounmap(info->map.virt); | ||
109 | |||
110 | kfree(info); | 99 | kfree(info); |
111 | 100 | ||
112 | return 0; | 101 | return 0; |
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev) | |||
147 | info->pdata = pdata; | 136 | info->pdata = pdata; |
148 | 137 | ||
149 | /* get the resource for the memory mapping */ | 138 | /* get the resource for the memory mapping */ |
150 | |||
151 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 139 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
152 | 140 | info->map.virt = devm_ioremap_resource(&pdev->dev, res); | |
153 | if (res == NULL) { | 141 | if (IS_ERR(info->map.virt)) { |
154 | dev_err(&pdev->dev, "no memory resource specified\n"); | 142 | err = PTR_ERR(info->map.virt); |
155 | err = -ENOENT; | 143 | dev_err(&pdev->dev, "failed to ioremap() region\n"); |
156 | goto exit_free; | 144 | goto exit_free; |
157 | } | 145 | } |
158 | 146 | ||
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev) | |||
167 | (char *)pdata->mapname : (char *)pdev->name; | 155 | (char *)pdata->mapname : (char *)pdev->name; |
168 | info->map.bankwidth = pdata->bankwidth; | 156 | info->map.bankwidth = pdata->bankwidth; |
169 | 157 | ||
170 | /* register our usage of the memory area */ | ||
171 | |||
172 | info->area = request_mem_region(res->start, info->map.size, pdev->name); | ||
173 | if (info->area == NULL) { | ||
174 | dev_err(&pdev->dev, "failed to request memory region\n"); | ||
175 | err = -EIO; | ||
176 | goto exit_free; | ||
177 | } | ||
178 | |||
179 | /* remap the memory area */ | ||
180 | |||
181 | info->map.virt = ioremap(res->start, info->map.size); | ||
182 | dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); | 158 | dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); |
183 | 159 | ||
184 | if (info->map.virt == NULL) { | ||
185 | dev_err(&pdev->dev, "failed to ioremap() region\n"); | ||
186 | err = -EIO; | ||
187 | goto exit_free; | ||
188 | } | ||
189 | |||
190 | simple_map_init(&info->map); | 160 | simple_map_init(&info->map); |
191 | 161 | ||
192 | dev_dbg(&pdev->dev, "initialised map, probing for mtd\n"); | 162 | dev_dbg(&pdev->dev, "initialised map, probing for mtd\n"); |
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c index 556a2dfe94c5..4337d279ad83 100644 --- a/drivers/mtd/maps/sbc_gxx.c +++ b/drivers/mtd/maps/sbc_gxx.c | |||
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin); | |||
87 | /* partition_info gives details on the logical partitions that the split the | 87 | /* partition_info gives details on the logical partitions that the split the |
88 | * single flash device into. If the size if zero we use up to the end of the | 88 | * single flash device into. If the size if zero we use up to the end of the |
89 | * device. */ | 89 | * device. */ |
90 | static struct mtd_partition partition_info[]={ | 90 | static const struct mtd_partition partition_info[] = { |
91 | { .name = "SBC-GXx flash boot partition", | 91 | { .name = "SBC-GXx flash boot partition", |
92 | .offset = 0, | 92 | .offset = 0, |
93 | .size = BOOT_PARTITION_SIZE_KiB*1024 }, | 93 | .size = BOOT_PARTITION_SIZE_KiB*1024 }, |
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index 9969fedb1f13..8f177e0acb8c 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c | |||
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = { | |||
43 | .phys = WINDOW_ADDR | 43 | .phys = WINDOW_ADDR |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct mtd_partition ts5500_partitions[] = { | 46 | static const struct mtd_partition ts5500_partitions[] = { |
47 | { | 47 | { |
48 | .name = "Drive A", | 48 | .name = "Drive A", |
49 | .offset = 0, | 49 | .offset = 0, |
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index 00a8190797ec..aef030ca8601 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c | |||
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo; | |||
49 | 49 | ||
50 | /****************************************************************************/ | 50 | /****************************************************************************/ |
51 | 51 | ||
52 | static struct mtd_partition uclinux_romfs[] = { | 52 | static const struct mtd_partition uclinux_romfs[] = { |
53 | { .name = "ROMfs" } | 53 | { .name = "ROMfs" } |
54 | }; | 54 | }; |
55 | 55 | ||
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index d573606b91c2..60bf53df5454 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
644 | } | 644 | } |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * try to support NOMMU mmaps on concatenated devices | ||
648 | * - we don't support subdev spanning as we can't guarantee it'll work | ||
649 | */ | ||
650 | static unsigned long concat_get_unmapped_area(struct mtd_info *mtd, | ||
651 | unsigned long len, | ||
652 | unsigned long offset, | ||
653 | unsigned long flags) | ||
654 | { | ||
655 | struct mtd_concat *concat = CONCAT(mtd); | ||
656 | int i; | ||
657 | |||
658 | for (i = 0; i < concat->num_subdev; i++) { | ||
659 | struct mtd_info *subdev = concat->subdev[i]; | ||
660 | |||
661 | if (offset >= subdev->size) { | ||
662 | offset -= subdev->size; | ||
663 | continue; | ||
664 | } | ||
665 | |||
666 | return mtd_get_unmapped_area(subdev, len, offset, flags); | ||
667 | } | ||
668 | |||
669 | return (unsigned long) -ENOSYS; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * This function constructs a virtual MTD device by concatenating | 647 | * This function constructs a virtual MTD device by concatenating |
674 | * num_devs MTD devices. A pointer to the new device object is | 648 | * num_devs MTD devices. A pointer to the new device object is |
675 | * stored to *new_dev upon success. This function does _not_ | 649 | * stored to *new_dev upon success. This function does _not_ |
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
790 | concat->mtd._unlock = concat_unlock; | 764 | concat->mtd._unlock = concat_unlock; |
791 | concat->mtd._suspend = concat_suspend; | 765 | concat->mtd._suspend = concat_suspend; |
792 | concat->mtd._resume = concat_resume; | 766 | concat->mtd._resume = concat_resume; |
793 | concat->mtd._get_unmapped_area = concat_get_unmapped_area; | ||
794 | 767 | ||
795 | /* | 768 | /* |
796 | * Combine the erase block size info of the subdevices: | 769 | * Combine the erase block size info of the subdevices: |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index e7ea842ba3db..f80e911b8843 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint); | |||
1022 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, | 1022 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, |
1023 | unsigned long offset, unsigned long flags) | 1023 | unsigned long offset, unsigned long flags) |
1024 | { | 1024 | { |
1025 | if (!mtd->_get_unmapped_area) | 1025 | size_t retlen; |
1026 | return -EOPNOTSUPP; | 1026 | void *virt; |
1027 | if (offset >= mtd->size || len > mtd->size - offset) | 1027 | int ret; |
1028 | return -EINVAL; | 1028 | |
1029 | return mtd->_get_unmapped_area(mtd, len, offset, flags); | 1029 | ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); |
1030 | if (ret) | ||
1031 | return ret; | ||
1032 | if (retlen != len) { | ||
1033 | mtd_unpoint(mtd, offset, retlen); | ||
1034 | return -ENOSYS; | ||
1035 | } | ||
1036 | return (unsigned long)virt; | ||
1030 | } | 1037 | } |
1031 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); | 1038 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); |
1032 | 1039 | ||
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | |||
1093 | } | 1100 | } |
1094 | EXPORT_SYMBOL_GPL(mtd_panic_write); | 1101 | EXPORT_SYMBOL_GPL(mtd_panic_write); |
1095 | 1102 | ||
1103 | static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, | ||
1104 | struct mtd_oob_ops *ops) | ||
1105 | { | ||
1106 | /* | ||
1107 | * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving | ||
1108 | * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in | ||
1109 | * this case. | ||
1110 | */ | ||
1111 | if (!ops->datbuf) | ||
1112 | ops->len = 0; | ||
1113 | |||
1114 | if (!ops->oobbuf) | ||
1115 | ops->ooblen = 0; | ||
1116 | |||
1117 | if (offs < 0 || offs + ops->len >= mtd->size) | ||
1118 | return -EINVAL; | ||
1119 | |||
1120 | if (ops->ooblen) { | ||
1121 | u64 maxooblen; | ||
1122 | |||
1123 | if (ops->ooboffs >= mtd_oobavail(mtd, ops)) | ||
1124 | return -EINVAL; | ||
1125 | |||
1126 | maxooblen = ((mtd_div_by_ws(mtd->size, mtd) - | ||
1127 | mtd_div_by_ws(offs, mtd)) * | ||
1128 | mtd_oobavail(mtd, ops)) - ops->ooboffs; | ||
1129 | if (ops->ooblen > maxooblen) | ||
1130 | return -EINVAL; | ||
1131 | } | ||
1132 | |||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1096 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | 1136 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) |
1097 | { | 1137 | { |
1098 | int ret_code; | 1138 | int ret_code; |
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | |||
1100 | if (!mtd->_read_oob) | 1140 | if (!mtd->_read_oob) |
1101 | return -EOPNOTSUPP; | 1141 | return -EOPNOTSUPP; |
1102 | 1142 | ||
1143 | ret_code = mtd_check_oob_ops(mtd, from, ops); | ||
1144 | if (ret_code) | ||
1145 | return ret_code; | ||
1146 | |||
1103 | ledtrig_mtd_activity(); | 1147 | ledtrig_mtd_activity(); |
1104 | /* | 1148 | /* |
1105 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics | 1149 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics |
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob); | |||
1119 | int mtd_write_oob(struct mtd_info *mtd, loff_t to, | 1163 | int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
1120 | struct mtd_oob_ops *ops) | 1164 | struct mtd_oob_ops *ops) |
1121 | { | 1165 | { |
1166 | int ret; | ||
1167 | |||
1122 | ops->retlen = ops->oobretlen = 0; | 1168 | ops->retlen = ops->oobretlen = 0; |
1123 | if (!mtd->_write_oob) | 1169 | if (!mtd->_write_oob) |
1124 | return -EOPNOTSUPP; | 1170 | return -EOPNOTSUPP; |
1125 | if (!(mtd->flags & MTD_WRITEABLE)) | 1171 | if (!(mtd->flags & MTD_WRITEABLE)) |
1126 | return -EROFS; | 1172 | return -EROFS; |
1173 | |||
1174 | ret = mtd_check_oob_ops(mtd, to, ops); | ||
1175 | if (ret) | ||
1176 | return ret; | ||
1177 | |||
1127 | ledtrig_mtd_activity(); | 1178 | ledtrig_mtd_activity(); |
1128 | return mtd->_write_oob(mtd, to, ops); | 1179 | return mtd->_write_oob(mtd, to, ops); |
1129 | } | 1180 | } |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index a308e707392d..be088bccd593 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
101 | return part->parent->_unpoint(part->parent, from + part->offset, len); | 101 | return part->parent->_unpoint(part->parent, from + part->offset, len); |
102 | } | 102 | } |
103 | 103 | ||
104 | static unsigned long part_get_unmapped_area(struct mtd_info *mtd, | ||
105 | unsigned long len, | ||
106 | unsigned long offset, | ||
107 | unsigned long flags) | ||
108 | { | ||
109 | struct mtd_part *part = mtd_to_part(mtd); | ||
110 | |||
111 | offset += part->offset; | ||
112 | return part->parent->_get_unmapped_area(part->parent, len, offset, | ||
113 | flags); | ||
114 | } | ||
115 | |||
116 | static int part_read_oob(struct mtd_info *mtd, loff_t from, | 104 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
117 | struct mtd_oob_ops *ops) | 105 | struct mtd_oob_ops *ops) |
118 | { | 106 | { |
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, | |||
458 | slave->mtd._unpoint = part_unpoint; | 446 | slave->mtd._unpoint = part_unpoint; |
459 | } | 447 | } |
460 | 448 | ||
461 | if (parent->_get_unmapped_area) | ||
462 | slave->mtd._get_unmapped_area = part_get_unmapped_area; | ||
463 | if (parent->_read_oob) | 449 | if (parent->_read_oob) |
464 | slave->mtd._read_oob = part_read_oob; | 450 | slave->mtd._read_oob = part_read_oob; |
465 | if (parent->_write_oob) | 451 | if (parent->_write_oob) |
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index 7d9080e33865..f07492c6f4b2 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c | |||
@@ -50,7 +50,7 @@ | |||
50 | * Number of free eraseblocks below which GC can also collect low frag | 50 | * Number of free eraseblocks below which GC can also collect low frag |
51 | * blocks. | 51 | * blocks. |
52 | */ | 52 | */ |
53 | #define LOW_FRAG_GC_TRESHOLD 5 | 53 | #define LOW_FRAG_GC_THRESHOLD 5 |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Wear level cost amortization. We want to do wear leveling on the background | 56 | * Wear level cost amortization. We want to do wear leveling on the background |
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) | |||
805 | { | 805 | { |
806 | int idx, stopat; | 806 | int idx, stopat; |
807 | 807 | ||
808 | if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) | 808 | if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD) |
809 | stopat = MTDSWAP_LOWFRAG; | 809 | stopat = MTDSWAP_LOWFRAG; |
810 | else | 810 | else |
811 | stopat = MTDSWAP_HIFRAG; | 811 | stopat = MTDSWAP_HIFRAG; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 3f2036f31da4..bb48aafed9a2 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx | |||
317 | tristate "NAND support on PXA3xx and Armada 370/XP" | 317 | tristate "NAND support on PXA3xx and Armada 370/XP" |
318 | depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU | 318 | depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU |
319 | help | 319 | help |
320 | |||
320 | This enables the driver for the NAND flash device found on | 321 | This enables the driver for the NAND flash device found on |
321 | PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). | 322 | PXA3xx processors (NFCv1) and also on 32-bit Armada |
323 | platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada | ||
324 | platforms (7K, 8K) (NFCv2). | ||
322 | 325 | ||
323 | config MTD_NAND_SLC_LPC32XX | 326 | config MTD_NAND_SLC_LPC32XX |
324 | tristate "NXP LPC32xx SLC Controller" | 327 | tristate "NXP LPC32xx SLC Controller" |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 6e2db700d923..118a1349aad3 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o | |||
59 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o | 59 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o |
60 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ | 60 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ |
61 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o | 61 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o |
62 | obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o | 62 | obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o |
63 | 63 | ||
64 | nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o | 64 | nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o |
65 | nand-objs += nand_amd.o | 65 | nand-objs += nand_amd.o |
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index dcec9cf4983f..d60ada45c549 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c | |||
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL; | |||
41 | * Define partitions for flash devices | 41 | * Define partitions for flash devices |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static struct mtd_partition partition_info[] = { | 44 | static const struct mtd_partition partition_info[] = { |
45 | { .name = "Kernel", | 45 | { .name = "Kernel", |
46 | .offset = 0, | 46 | .offset = 0, |
47 | .size = 3 * SZ_1M + SZ_512K }, | 47 | .size = 3 * SZ_1M + SZ_512K }, |
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index f25eca79f4e5..90a71a56bc23 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c | |||
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column) | |||
718 | nc->op.addrs[nc->op.naddrs++] = page; | 718 | nc->op.addrs[nc->op.naddrs++] = page; |
719 | nc->op.addrs[nc->op.naddrs++] = page >> 8; | 719 | nc->op.addrs[nc->op.naddrs++] = page >> 8; |
720 | 720 | ||
721 | if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || | 721 | if (chip->options & NAND_ROW_ADDR_3) |
722 | (mtd->writesize <= 512 && chip->chipsize > SZ_32M)) | ||
723 | nc->op.addrs[nc->op.naddrs++] = page >> 16; | 722 | nc->op.addrs[nc->op.naddrs++] = page >> 16; |
724 | } | 723 | } |
725 | } | 724 | } |
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev) | |||
2530 | struct atmel_nand_controller *nc = dev_get_drvdata(dev); | 2529 | struct atmel_nand_controller *nc = dev_get_drvdata(dev); |
2531 | struct atmel_nand *nand; | 2530 | struct atmel_nand *nand; |
2532 | 2531 | ||
2532 | if (nc->pmecc) | ||
2533 | atmel_pmecc_reset(nc->pmecc); | ||
2534 | |||
2533 | list_for_each_entry(nand, &nc->chips, node) { | 2535 | list_for_each_entry(nand, &nc->chips, node) { |
2534 | int i; | 2536 | int i; |
2535 | 2537 | ||
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = { | |||
2547 | .driver = { | 2549 | .driver = { |
2548 | .name = "atmel-nand-controller", | 2550 | .name = "atmel-nand-controller", |
2549 | .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), | 2551 | .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), |
2552 | .pm = &atmel_nand_controller_pm_ops, | ||
2550 | }, | 2553 | }, |
2551 | .probe = atmel_nand_controller_probe, | 2554 | .probe = atmel_nand_controller_probe, |
2552 | .remove = atmel_nand_controller_remove, | 2555 | .remove = atmel_nand_controller_remove, |
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 8268636675ef..fcbe4fd6e684 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c | |||
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, | |||
765 | } | 765 | } |
766 | EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); | 766 | EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); |
767 | 767 | ||
768 | void atmel_pmecc_reset(struct atmel_pmecc *pmecc) | ||
769 | { | ||
770 | writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
771 | writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
772 | } | ||
773 | EXPORT_SYMBOL_GPL(atmel_pmecc_reset); | ||
774 | |||
768 | int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) | 775 | int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) |
769 | { | 776 | { |
770 | struct atmel_pmecc *pmecc = user->pmecc; | 777 | struct atmel_pmecc *pmecc = user->pmecc; |
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable); | |||
797 | 804 | ||
798 | void atmel_pmecc_disable(struct atmel_pmecc_user *user) | 805 | void atmel_pmecc_disable(struct atmel_pmecc_user *user) |
799 | { | 806 | { |
800 | struct atmel_pmecc *pmecc = user->pmecc; | 807 | atmel_pmecc_reset(user->pmecc); |
801 | |||
802 | writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
803 | writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
804 | mutex_unlock(&user->pmecc->lock); | 808 | mutex_unlock(&user->pmecc->lock); |
805 | } | 809 | } |
806 | EXPORT_SYMBOL_GPL(atmel_pmecc_disable); | 810 | EXPORT_SYMBOL_GPL(atmel_pmecc_disable); |
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, | |||
855 | 859 | ||
856 | /* Disable all interrupts before registering the PMECC handler. */ | 860 | /* Disable all interrupts before registering the PMECC handler. */ |
857 | writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); | 861 | writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); |
858 | 862 | atmel_pmecc_reset(pmecc); | |
859 | /* Reset the ECC engine */ | ||
860 | writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
861 | writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); | ||
862 | 863 | ||
863 | return pmecc; | 864 | return pmecc; |
864 | } | 865 | } |
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h index a8ddbfca2ea5..817e0dd9fd15 100644 --- a/drivers/mtd/nand/atmel/pmecc.h +++ b/drivers/mtd/nand/atmel/pmecc.h | |||
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, | |||
61 | struct atmel_pmecc_user_req *req); | 61 | struct atmel_pmecc_user_req *req); |
62 | void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); | 62 | void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); |
63 | 63 | ||
64 | void atmel_pmecc_reset(struct atmel_pmecc *pmecc); | ||
64 | int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); | 65 | int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); |
65 | void atmel_pmecc_disable(struct atmel_pmecc_user *user); | 66 | void atmel_pmecc_disable(struct atmel_pmecc_user *user); |
66 | int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user); | 67 | int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user); |
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 9d4a28fa6b73..8ab827edf94e 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i | |||
331 | 331 | ||
332 | ctx->write_byte(mtd, (u8)(page_addr >> 8)); | 332 | ctx->write_byte(mtd, (u8)(page_addr >> 8)); |
333 | 333 | ||
334 | /* One more address cycle for devices > 32MiB */ | 334 | if (this->options & NAND_ROW_ADDR_3) |
335 | if (this->chipsize > (32 << 20)) | ||
336 | ctx->write_byte(mtd, | 335 | ctx->write_byte(mtd, |
337 | ((page_addr >> 16) & 0x0f)); | 336 | ((page_addr >> 16) & 0x0f)); |
338 | } | 337 | } |
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 1fc435f994e1..b01c9804590e 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io; | |||
42 | /* | 42 | /* |
43 | * Define static partitions for flash device | 43 | * Define static partitions for flash device |
44 | */ | 44 | */ |
45 | static struct mtd_partition partition_info[] = { | 45 | static const struct mtd_partition partition_info[] = { |
46 | [0] = { | 46 | [0] = { |
47 | .name = "cmx270-0", | 47 | .name = "cmx270-0", |
48 | .offset = 0, | 48 | .offset = 0, |
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 3087b0ba7b7f..5124f8ae8c04 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c | |||
@@ -10,20 +10,18 @@ | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | 13 | */ |
19 | #include <linux/interrupt.h> | 14 | |
20 | #include <linux/delay.h> | 15 | #include <linux/bitfield.h> |
16 | #include <linux/completion.h> | ||
21 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
22 | #include <linux/wait.h> | 18 | #include <linux/interrupt.h> |
23 | #include <linux/mutex.h> | 19 | #include <linux/io.h> |
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/mtd/mtd.h> | ||
22 | #include <linux/mtd/rawnand.h> | ||
26 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> | ||
27 | 25 | ||
28 | #include "denali.h" | 26 | #include "denali.h" |
29 | 27 | ||
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL"); | |||
31 | 29 | ||
32 | #define DENALI_NAND_NAME "denali-nand" | 30 | #define DENALI_NAND_NAME "denali-nand" |
33 | 31 | ||
34 | /* Host Data/Command Interface */ | 32 | /* for Indexed Addressing */ |
35 | #define DENALI_HOST_ADDR 0x00 | 33 | #define DENALI_INDEXED_CTRL 0x00 |
36 | #define DENALI_HOST_DATA 0x10 | 34 | #define DENALI_INDEXED_DATA 0x10 |
37 | 35 | ||
38 | #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ | 36 | #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ |
39 | #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ | 37 | #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ |
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL"); | |||
61 | */ | 59 | */ |
62 | #define DENALI_CLK_X_MULT 6 | 60 | #define DENALI_CLK_X_MULT 6 |
63 | 61 | ||
64 | /* | ||
65 | * this macro allows us to convert from an MTD structure to our own | ||
66 | * device context (denali) structure. | ||
67 | */ | ||
68 | static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) | 62 | static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) |
69 | { | 63 | { |
70 | return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); | 64 | return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); |
71 | } | 65 | } |
72 | 66 | ||
73 | static void denali_host_write(struct denali_nand_info *denali, | 67 | /* |
74 | uint32_t addr, uint32_t data) | 68 | * Direct Addressing - the slave address forms the control information (command |
69 | * type, bank, block, and page address). The slave data is the actual data to | ||
70 | * be transferred. This mode requires 28 bits of address region allocated. | ||
71 | */ | ||
72 | static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) | ||
73 | { | ||
74 | return ioread32(denali->host + addr); | ||
75 | } | ||
76 | |||
77 | static void denali_direct_write(struct denali_nand_info *denali, u32 addr, | ||
78 | u32 data) | ||
75 | { | 79 | { |
76 | iowrite32(addr, denali->host + DENALI_HOST_ADDR); | 80 | iowrite32(data, denali->host + addr); |
77 | iowrite32(data, denali->host + DENALI_HOST_DATA); | 81 | } |
82 | |||
83 | /* | ||
84 | * Indexed Addressing - address translation module intervenes in passing the | ||
85 | * control information. This mode reduces the required address range. The | ||
86 | * control information and transferred data are latched by the registers in | ||
87 | * the translation module. | ||
88 | */ | ||
89 | static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) | ||
90 | { | ||
91 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); | ||
92 | return ioread32(denali->host + DENALI_INDEXED_DATA); | ||
93 | } | ||
94 | |||
95 | static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, | ||
96 | u32 data) | ||
97 | { | ||
98 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); | ||
99 | iowrite32(data, denali->host + DENALI_INDEXED_DATA); | ||
78 | } | 100 | } |
79 | 101 | ||
80 | /* | 102 | /* |
81 | * Use the configuration feature register to determine the maximum number of | 103 | * Use the configuration feature register to determine the maximum number of |
82 | * banks that the hardware supports. | 104 | * banks that the hardware supports. |
83 | */ | 105 | */ |
84 | static void detect_max_banks(struct denali_nand_info *denali) | 106 | static void denali_detect_max_banks(struct denali_nand_info *denali) |
85 | { | 107 | { |
86 | uint32_t features = ioread32(denali->reg + FEATURES); | 108 | uint32_t features = ioread32(denali->reg + FEATURES); |
87 | 109 | ||
88 | denali->max_banks = 1 << (features & FEATURES__N_BANKS); | 110 | denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); |
89 | 111 | ||
90 | /* the encoding changed from rev 5.0 to 5.1 */ | 112 | /* the encoding changed from rev 5.0 to 5.1 */ |
91 | if (denali->revision < 0x0501) | 113 | if (denali->revision < 0x0501) |
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, | |||
189 | msecs_to_jiffies(1000)); | 211 | msecs_to_jiffies(1000)); |
190 | if (!time_left) { | 212 | if (!time_left) { |
191 | dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", | 213 | dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", |
192 | denali->irq_mask); | 214 | irq_mask); |
193 | return 0; | 215 | return 0; |
194 | } | 216 | } |
195 | 217 | ||
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali) | |||
208 | return irq_status; | 230 | return irq_status; |
209 | } | 231 | } |
210 | 232 | ||
211 | /* | ||
212 | * This helper function setups the registers for ECC and whether or not | ||
213 | * the spare area will be transferred. | ||
214 | */ | ||
215 | static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, | ||
216 | bool transfer_spare) | ||
217 | { | ||
218 | int ecc_en_flag, transfer_spare_flag; | ||
219 | |||
220 | /* set ECC, transfer spare bits if needed */ | ||
221 | ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; | ||
222 | transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; | ||
223 | |||
224 | /* Enable spare area/ECC per user's request. */ | ||
225 | iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE); | ||
226 | iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG); | ||
227 | } | ||
228 | |||
229 | static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | 233 | static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
230 | { | 234 | { |
231 | struct denali_nand_info *denali = mtd_to_denali(mtd); | 235 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
236 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); | ||
232 | int i; | 237 | int i; |
233 | 238 | ||
234 | iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), | ||
235 | denali->host + DENALI_HOST_ADDR); | ||
236 | |||
237 | for (i = 0; i < len; i++) | 239 | for (i = 0; i < len; i++) |
238 | buf[i] = ioread32(denali->host + DENALI_HOST_DATA); | 240 | buf[i] = denali->host_read(denali, addr); |
239 | } | 241 | } |
240 | 242 | ||
241 | static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 243 | static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
242 | { | 244 | { |
243 | struct denali_nand_info *denali = mtd_to_denali(mtd); | 245 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
246 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); | ||
244 | int i; | 247 | int i; |
245 | 248 | ||
246 | iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), | ||
247 | denali->host + DENALI_HOST_ADDR); | ||
248 | |||
249 | for (i = 0; i < len; i++) | 249 | for (i = 0; i < len; i++) |
250 | iowrite32(buf[i], denali->host + DENALI_HOST_DATA); | 250 | denali->host_write(denali, addr, buf[i]); |
251 | } | 251 | } |
252 | 252 | ||
253 | static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) | 253 | static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) |
254 | { | 254 | { |
255 | struct denali_nand_info *denali = mtd_to_denali(mtd); | 255 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
256 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); | ||
256 | uint16_t *buf16 = (uint16_t *)buf; | 257 | uint16_t *buf16 = (uint16_t *)buf; |
257 | int i; | 258 | int i; |
258 | 259 | ||
259 | iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), | ||
260 | denali->host + DENALI_HOST_ADDR); | ||
261 | |||
262 | for (i = 0; i < len / 2; i++) | 260 | for (i = 0; i < len / 2; i++) |
263 | buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); | 261 | buf16[i] = denali->host_read(denali, addr); |
264 | } | 262 | } |
265 | 263 | ||
266 | static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, | 264 | static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, |
267 | int len) | 265 | int len) |
268 | { | 266 | { |
269 | struct denali_nand_info *denali = mtd_to_denali(mtd); | 267 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
268 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); | ||
270 | const uint16_t *buf16 = (const uint16_t *)buf; | 269 | const uint16_t *buf16 = (const uint16_t *)buf; |
271 | int i; | 270 | int i; |
272 | 271 | ||
273 | iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), | ||
274 | denali->host + DENALI_HOST_ADDR); | ||
275 | |||
276 | for (i = 0; i < len / 2; i++) | 272 | for (i = 0; i < len / 2; i++) |
277 | iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); | 273 | denali->host_write(denali, addr, buf16[i]); |
278 | } | 274 | } |
279 | 275 | ||
280 | static uint8_t denali_read_byte(struct mtd_info *mtd) | 276 | static uint8_t denali_read_byte(struct mtd_info *mtd) |
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) | |||
319 | if (ctrl & NAND_CTRL_CHANGE) | 315 | if (ctrl & NAND_CTRL_CHANGE) |
320 | denali_reset_irq(denali); | 316 | denali_reset_irq(denali); |
321 | 317 | ||
322 | denali_host_write(denali, DENALI_BANK(denali) | type, dat); | 318 | denali->host_write(denali, DENALI_BANK(denali) | type, dat); |
323 | } | 319 | } |
324 | 320 | ||
325 | static int denali_dev_ready(struct mtd_info *mtd) | 321 | static int denali_dev_ready(struct mtd_info *mtd) |
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, | |||
389 | return 0; | 385 | return 0; |
390 | } | 386 | } |
391 | 387 | ||
392 | max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; | 388 | max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); |
393 | 389 | ||
394 | /* | 390 | /* |
395 | * The register holds the maximum of per-sector corrected bitflips. | 391 | * The register holds the maximum of per-sector corrected bitflips. |
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, | |||
402 | return max_bitflips; | 398 | return max_bitflips; |
403 | } | 399 | } |
404 | 400 | ||
405 | #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) | ||
406 | #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) | ||
407 | #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) | ||
408 | #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE) | ||
409 | #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) | ||
410 | #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) | ||
411 | |||
412 | static int denali_sw_ecc_fixup(struct mtd_info *mtd, | 401 | static int denali_sw_ecc_fixup(struct mtd_info *mtd, |
413 | struct denali_nand_info *denali, | 402 | struct denali_nand_info *denali, |
414 | unsigned long *uncor_ecc_flags, uint8_t *buf) | 403 | unsigned long *uncor_ecc_flags, uint8_t *buf) |
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, | |||
426 | 415 | ||
427 | do { | 416 | do { |
428 | err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); | 417 | err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); |
429 | err_sector = ECC_SECTOR(err_addr); | 418 | err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); |
430 | err_byte = ECC_BYTE(err_addr); | 419 | err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); |
431 | 420 | ||
432 | err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); | 421 | err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); |
433 | err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); | 422 | err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, |
434 | err_device = ECC_ERR_DEVICE(err_cor_info); | 423 | err_cor_info); |
424 | err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, | ||
425 | err_cor_info); | ||
435 | 426 | ||
436 | /* reset the bitflip counter when crossing ECC sector */ | 427 | /* reset the bitflip counter when crossing ECC sector */ |
437 | if (err_sector != prev_sector) | 428 | if (err_sector != prev_sector) |
438 | bitflips = 0; | 429 | bitflips = 0; |
439 | 430 | ||
440 | if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { | 431 | if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { |
441 | /* | 432 | /* |
442 | * Check later if this is a real ECC error, or | 433 | * Check later if this is a real ECC error, or |
443 | * an erased sector. | 434 | * an erased sector. |
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, | |||
467 | } | 458 | } |
468 | 459 | ||
469 | prev_sector = err_sector; | 460 | prev_sector = err_sector; |
470 | } while (!ECC_LAST_ERR(err_cor_info)); | 461 | } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); |
471 | 462 | ||
472 | /* | 463 | /* |
473 | * Once handle all ecc errors, controller will trigger a | 464 | * Once handle all ECC errors, controller will trigger an |
474 | * ECC_TRANSACTION_DONE interrupt, so here just wait for | 465 | * ECC_TRANSACTION_DONE interrupt. |
475 | * a while for this interrupt | ||
476 | */ | 466 | */ |
477 | irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); | 467 | irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); |
478 | if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) | 468 | if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) |
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, | |||
481 | return max_bitflips; | 471 | return max_bitflips; |
482 | } | 472 | } |
483 | 473 | ||
484 | /* programs the controller to either enable/disable DMA transfers */ | ||
485 | static void denali_enable_dma(struct denali_nand_info *denali, bool en) | ||
486 | { | ||
487 | iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE); | ||
488 | ioread32(denali->reg + DMA_ENABLE); | ||
489 | } | ||
490 | |||
491 | static void denali_setup_dma64(struct denali_nand_info *denali, | 474 | static void denali_setup_dma64(struct denali_nand_info *denali, |
492 | dma_addr_t dma_addr, int page, int write) | 475 | dma_addr_t dma_addr, int page, int write) |
493 | { | 476 | { |
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali, | |||
502 | * 1. setup transfer type, interrupt when complete, | 485 | * 1. setup transfer type, interrupt when complete, |
503 | * burst len = 64 bytes, the number of pages | 486 | * burst len = 64 bytes, the number of pages |
504 | */ | 487 | */ |
505 | denali_host_write(denali, mode, | 488 | denali->host_write(denali, mode, |
506 | 0x01002000 | (64 << 16) | (write << 8) | page_count); | 489 | 0x01002000 | (64 << 16) | (write << 8) | page_count); |
507 | 490 | ||
508 | /* 2. set memory low address */ | 491 | /* 2. set memory low address */ |
509 | denali_host_write(denali, mode, dma_addr); | 492 | denali->host_write(denali, mode, lower_32_bits(dma_addr)); |
510 | 493 | ||
511 | /* 3. set memory high address */ | 494 | /* 3. set memory high address */ |
512 | denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); | 495 | denali->host_write(denali, mode, upper_32_bits(dma_addr)); |
513 | } | 496 | } |
514 | 497 | ||
515 | static void denali_setup_dma32(struct denali_nand_info *denali, | 498 | static void denali_setup_dma32(struct denali_nand_info *denali, |
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali, | |||
523 | /* DMA is a four step process */ | 506 | /* DMA is a four step process */ |
524 | 507 | ||
525 | /* 1. setup transfer type and # of pages */ | 508 | /* 1. setup transfer type and # of pages */ |
526 | denali_host_write(denali, mode | page, | 509 | denali->host_write(denali, mode | page, |
527 | 0x2000 | (write << 8) | page_count); | 510 | 0x2000 | (write << 8) | page_count); |
528 | 511 | ||
529 | /* 2. set memory high address bits 23:8 */ | 512 | /* 2. set memory high address bits 23:8 */ |
530 | denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); | 513 | denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); |
531 | 514 | ||
532 | /* 3. set memory low address bits 23:8 */ | 515 | /* 3. set memory low address bits 23:8 */ |
533 | denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); | 516 | denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); |
534 | 517 | ||
535 | /* 4. interrupt when complete, burst len = 64 bytes */ | 518 | /* 4. interrupt when complete, burst len = 64 bytes */ |
536 | denali_host_write(denali, mode | 0x14000, 0x2400); | 519 | denali->host_write(denali, mode | 0x14000, 0x2400); |
537 | } | ||
538 | |||
539 | static void denali_setup_dma(struct denali_nand_info *denali, | ||
540 | dma_addr_t dma_addr, int page, int write) | ||
541 | { | ||
542 | if (denali->caps & DENALI_CAP_DMA_64BIT) | ||
543 | denali_setup_dma64(denali, dma_addr, page, write); | ||
544 | else | ||
545 | denali_setup_dma32(denali, dma_addr, page, write); | ||
546 | } | 520 | } |
547 | 521 | ||
548 | static int denali_pio_read(struct denali_nand_info *denali, void *buf, | 522 | static int denali_pio_read(struct denali_nand_info *denali, void *buf, |
549 | size_t size, int page, int raw) | 523 | size_t size, int page, int raw) |
550 | { | 524 | { |
551 | uint32_t addr = DENALI_BANK(denali) | page; | 525 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
552 | uint32_t *buf32 = (uint32_t *)buf; | 526 | uint32_t *buf32 = (uint32_t *)buf; |
553 | uint32_t irq_status, ecc_err_mask; | 527 | uint32_t irq_status, ecc_err_mask; |
554 | int i; | 528 | int i; |
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, | |||
560 | 534 | ||
561 | denali_reset_irq(denali); | 535 | denali_reset_irq(denali); |
562 | 536 | ||
563 | iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); | ||
564 | for (i = 0; i < size / 4; i++) | 537 | for (i = 0; i < size / 4; i++) |
565 | *buf32++ = ioread32(denali->host + DENALI_HOST_DATA); | 538 | *buf32++ = denali->host_read(denali, addr); |
566 | 539 | ||
567 | irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); | 540 | irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); |
568 | if (!(irq_status & INTR__PAGE_XFER_INC)) | 541 | if (!(irq_status & INTR__PAGE_XFER_INC)) |
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, | |||
577 | static int denali_pio_write(struct denali_nand_info *denali, | 550 | static int denali_pio_write(struct denali_nand_info *denali, |
578 | const void *buf, size_t size, int page, int raw) | 551 | const void *buf, size_t size, int page, int raw) |
579 | { | 552 | { |
580 | uint32_t addr = DENALI_BANK(denali) | page; | 553 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
581 | const uint32_t *buf32 = (uint32_t *)buf; | 554 | const uint32_t *buf32 = (uint32_t *)buf; |
582 | uint32_t irq_status; | 555 | uint32_t irq_status; |
583 | int i; | 556 | int i; |
584 | 557 | ||
585 | denali_reset_irq(denali); | 558 | denali_reset_irq(denali); |
586 | 559 | ||
587 | iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); | ||
588 | for (i = 0; i < size / 4; i++) | 560 | for (i = 0; i < size / 4; i++) |
589 | iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); | 561 | denali->host_write(denali, addr, *buf32++); |
590 | 562 | ||
591 | irq_status = denali_wait_for_irq(denali, | 563 | irq_status = denali_wait_for_irq(denali, |
592 | INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); | 564 | INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); |
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, | |||
635 | ecc_err_mask = INTR__ECC_ERR; | 607 | ecc_err_mask = INTR__ECC_ERR; |
636 | } | 608 | } |
637 | 609 | ||
638 | denali_enable_dma(denali, true); | 610 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
639 | 611 | ||
640 | denali_reset_irq(denali); | 612 | denali_reset_irq(denali); |
641 | denali_setup_dma(denali, dma_addr, page, write); | 613 | denali->setup_dma(denali, dma_addr, page, write); |
642 | 614 | ||
643 | /* wait for operation to complete */ | ||
644 | irq_status = denali_wait_for_irq(denali, irq_mask); | 615 | irq_status = denali_wait_for_irq(denali, irq_mask); |
645 | if (!(irq_status & INTR__DMA_CMD_COMP)) | 616 | if (!(irq_status & INTR__DMA_CMD_COMP)) |
646 | ret = -EIO; | 617 | ret = -EIO; |
647 | else if (irq_status & ecc_err_mask) | 618 | else if (irq_status & ecc_err_mask) |
648 | ret = -EBADMSG; | 619 | ret = -EBADMSG; |
649 | 620 | ||
650 | denali_enable_dma(denali, false); | 621 | iowrite32(0, denali->reg + DMA_ENABLE); |
622 | |||
651 | dma_unmap_single(denali->dev, dma_addr, size, dir); | 623 | dma_unmap_single(denali->dev, dma_addr, size, dir); |
652 | 624 | ||
653 | if (irq_status & INTR__ERASED_PAGE) | 625 | if (irq_status & INTR__ERASED_PAGE) |
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, | |||
659 | static int denali_data_xfer(struct denali_nand_info *denali, void *buf, | 631 | static int denali_data_xfer(struct denali_nand_info *denali, void *buf, |
660 | size_t size, int page, int raw, int write) | 632 | size_t size, int page, int raw, int write) |
661 | { | 633 | { |
662 | setup_ecc_for_xfer(denali, !raw, raw); | 634 | iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); |
635 | iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, | ||
636 | denali->reg + TRANSFER_SPARE_REG); | ||
663 | 637 | ||
664 | if (denali->dma_avail) | 638 | if (denali->dma_avail) |
665 | return denali_dma_xfer(denali, buf, size, page, raw, write); | 639 | return denali_dma_xfer(denali, buf, size, page, raw, write); |
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page) | |||
970 | 944 | ||
971 | denali_reset_irq(denali); | 945 | denali_reset_irq(denali); |
972 | 946 | ||
973 | denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, | 947 | denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, |
974 | DENALI_ERASE); | 948 | DENALI_ERASE); |
975 | 949 | ||
976 | /* wait for erase to complete or failure to occur */ | 950 | /* wait for erase to complete or failure to occur */ |
977 | irq_status = denali_wait_for_irq(denali, | 951 | irq_status = denali_wait_for_irq(denali, |
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1009 | 983 | ||
1010 | tmp = ioread32(denali->reg + ACC_CLKS); | 984 | tmp = ioread32(denali->reg + ACC_CLKS); |
1011 | tmp &= ~ACC_CLKS__VALUE; | 985 | tmp &= ~ACC_CLKS__VALUE; |
1012 | tmp |= acc_clks; | 986 | tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); |
1013 | iowrite32(tmp, denali->reg + ACC_CLKS); | 987 | iowrite32(tmp, denali->reg + ACC_CLKS); |
1014 | 988 | ||
1015 | /* tRWH -> RE_2_WE */ | 989 | /* tRWH -> RE_2_WE */ |
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1018 | 992 | ||
1019 | tmp = ioread32(denali->reg + RE_2_WE); | 993 | tmp = ioread32(denali->reg + RE_2_WE); |
1020 | tmp &= ~RE_2_WE__VALUE; | 994 | tmp &= ~RE_2_WE__VALUE; |
1021 | tmp |= re_2_we; | 995 | tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); |
1022 | iowrite32(tmp, denali->reg + RE_2_WE); | 996 | iowrite32(tmp, denali->reg + RE_2_WE); |
1023 | 997 | ||
1024 | /* tRHZ -> RE_2_RE */ | 998 | /* tRHZ -> RE_2_RE */ |
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1027 | 1001 | ||
1028 | tmp = ioread32(denali->reg + RE_2_RE); | 1002 | tmp = ioread32(denali->reg + RE_2_RE); |
1029 | tmp &= ~RE_2_RE__VALUE; | 1003 | tmp &= ~RE_2_RE__VALUE; |
1030 | tmp |= re_2_re; | 1004 | tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); |
1031 | iowrite32(tmp, denali->reg + RE_2_RE); | 1005 | iowrite32(tmp, denali->reg + RE_2_RE); |
1032 | 1006 | ||
1033 | /* tWHR -> WE_2_RE */ | 1007 | /* |
1034 | we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); | 1008 | * tCCS, tWHR -> WE_2_RE |
1009 | * | ||
1010 | * With WE_2_RE properly set, the Denali controller automatically takes | ||
1011 | * care of the delay; the driver need not set NAND_WAIT_TCCS. | ||
1012 | */ | ||
1013 | we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), | ||
1014 | t_clk); | ||
1035 | we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); | 1015 | we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); |
1036 | 1016 | ||
1037 | tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); | 1017 | tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); |
1038 | tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; | 1018 | tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; |
1039 | tmp |= we_2_re; | 1019 | tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); |
1040 | iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); | 1020 | iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); |
1041 | 1021 | ||
1042 | /* tADL -> ADDR_2_DATA */ | 1022 | /* tADL -> ADDR_2_DATA */ |
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1050 | addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); | 1030 | addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); |
1051 | 1031 | ||
1052 | tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); | 1032 | tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); |
1053 | tmp &= ~addr_2_data_mask; | 1033 | tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
1054 | tmp |= addr_2_data; | 1034 | tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); |
1055 | iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); | 1035 | iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); |
1056 | 1036 | ||
1057 | /* tREH, tWH -> RDWR_EN_HI_CNT */ | 1037 | /* tREH, tWH -> RDWR_EN_HI_CNT */ |
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1061 | 1041 | ||
1062 | tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); | 1042 | tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); |
1063 | tmp &= ~RDWR_EN_HI_CNT__VALUE; | 1043 | tmp &= ~RDWR_EN_HI_CNT__VALUE; |
1064 | tmp |= rdwr_en_hi; | 1044 | tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); |
1065 | iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); | 1045 | iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); |
1066 | 1046 | ||
1067 | /* tRP, tWP -> RDWR_EN_LO_CNT */ | 1047 | /* tRP, tWP -> RDWR_EN_LO_CNT */ |
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1075 | 1055 | ||
1076 | tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); | 1056 | tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); |
1077 | tmp &= ~RDWR_EN_LO_CNT__VALUE; | 1057 | tmp &= ~RDWR_EN_LO_CNT__VALUE; |
1078 | tmp |= rdwr_en_lo; | 1058 | tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); |
1079 | iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); | 1059 | iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); |
1080 | 1060 | ||
1081 | /* tCS, tCEA -> CS_SETUP_CNT */ | 1061 | /* tCS, tCEA -> CS_SETUP_CNT */ |
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, | |||
1086 | 1066 | ||
1087 | tmp = ioread32(denali->reg + CS_SETUP_CNT); | 1067 | tmp = ioread32(denali->reg + CS_SETUP_CNT); |
1088 | tmp &= ~CS_SETUP_CNT__VALUE; | 1068 | tmp &= ~CS_SETUP_CNT__VALUE; |
1089 | tmp |= cs_setup; | 1069 | tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); |
1090 | iowrite32(tmp, denali->reg + CS_SETUP_CNT); | 1070 | iowrite32(tmp, denali->reg + CS_SETUP_CNT); |
1091 | 1071 | ||
1092 | return 0; | 1072 | return 0; |
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali) | |||
1131 | * if this value is 0, just let it be. | 1111 | * if this value is 0, just let it be. |
1132 | */ | 1112 | */ |
1133 | denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); | 1113 | denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); |
1134 | detect_max_banks(denali); | 1114 | denali_detect_max_banks(denali); |
1135 | iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); | 1115 | iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); |
1136 | iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); | 1116 | iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); |
1137 | 1117 | ||
1138 | iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); | 1118 | iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); |
1139 | |||
1140 | /* Should set value for these registers when init */ | ||
1141 | iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES); | ||
1142 | iowrite32(1, denali->reg + ECC_ENABLE); | ||
1143 | } | 1119 | } |
1144 | 1120 | ||
1145 | int denali_calc_ecc_bytes(int step_size, int strength) | 1121 | int denali_calc_ecc_bytes(int step_size, int strength) |
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = { | |||
1211 | .free = denali_ooblayout_free, | 1187 | .free = denali_ooblayout_free, |
1212 | }; | 1188 | }; |
1213 | 1189 | ||
1214 | /* initialize driver data structures */ | ||
1215 | static void denali_drv_init(struct denali_nand_info *denali) | ||
1216 | { | ||
1217 | /* | ||
1218 | * the completion object will be used to notify | ||
1219 | * the callee that the interrupt is done | ||
1220 | */ | ||
1221 | init_completion(&denali->complete); | ||
1222 | |||
1223 | /* | ||
1224 | * the spinlock will be used to synchronize the ISR with any | ||
1225 | * element that might be access shared data (interrupt status) | ||
1226 | */ | ||
1227 | spin_lock_init(&denali->irq_lock); | ||
1228 | } | ||
1229 | |||
1230 | static int denali_multidev_fixup(struct denali_nand_info *denali) | 1190 | static int denali_multidev_fixup(struct denali_nand_info *denali) |
1231 | { | 1191 | { |
1232 | struct nand_chip *chip = &denali->nand; | 1192 | struct nand_chip *chip = &denali->nand; |
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali) | |||
1282 | { | 1242 | { |
1283 | struct nand_chip *chip = &denali->nand; | 1243 | struct nand_chip *chip = &denali->nand; |
1284 | struct mtd_info *mtd = nand_to_mtd(chip); | 1244 | struct mtd_info *mtd = nand_to_mtd(chip); |
1245 | u32 features = ioread32(denali->reg + FEATURES); | ||
1285 | int ret; | 1246 | int ret; |
1286 | 1247 | ||
1287 | mtd->dev.parent = denali->dev; | 1248 | mtd->dev.parent = denali->dev; |
1288 | denali_hw_init(denali); | 1249 | denali_hw_init(denali); |
1289 | denali_drv_init(denali); | 1250 | |
1251 | init_completion(&denali->complete); | ||
1252 | spin_lock_init(&denali->irq_lock); | ||
1290 | 1253 | ||
1291 | denali_clear_irq_all(denali); | 1254 | denali_clear_irq_all(denali); |
1292 | 1255 | ||
1293 | /* Request IRQ after all the hardware initialization is finished */ | ||
1294 | ret = devm_request_irq(denali->dev, denali->irq, denali_isr, | 1256 | ret = devm_request_irq(denali->dev, denali->irq, denali_isr, |
1295 | IRQF_SHARED, DENALI_NAND_NAME, denali); | 1257 | IRQF_SHARED, DENALI_NAND_NAME, denali); |
1296 | if (ret) { | 1258 | if (ret) { |
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali) | |||
1308 | if (!mtd->name) | 1270 | if (!mtd->name) |
1309 | mtd->name = "denali-nand"; | 1271 | mtd->name = "denali-nand"; |
1310 | 1272 | ||
1311 | /* register the driver with the NAND core subsystem */ | ||
1312 | chip->select_chip = denali_select_chip; | 1273 | chip->select_chip = denali_select_chip; |
1313 | chip->read_byte = denali_read_byte; | 1274 | chip->read_byte = denali_read_byte; |
1314 | chip->write_byte = denali_write_byte; | 1275 | chip->write_byte = denali_write_byte; |
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali) | |||
1317 | chip->dev_ready = denali_dev_ready; | 1278 | chip->dev_ready = denali_dev_ready; |
1318 | chip->waitfunc = denali_waitfunc; | 1279 | chip->waitfunc = denali_waitfunc; |
1319 | 1280 | ||
1281 | if (features & FEATURES__INDEX_ADDR) { | ||
1282 | denali->host_read = denali_indexed_read; | ||
1283 | denali->host_write = denali_indexed_write; | ||
1284 | } else { | ||
1285 | denali->host_read = denali_direct_read; | ||
1286 | denali->host_write = denali_direct_write; | ||
1287 | } | ||
1288 | |||
1320 | /* clk rate info is needed for setup_data_interface */ | 1289 | /* clk rate info is needed for setup_data_interface */ |
1321 | if (denali->clk_x_rate) | 1290 | if (denali->clk_x_rate) |
1322 | chip->setup_data_interface = denali_setup_data_interface; | 1291 | chip->setup_data_interface = denali_setup_data_interface; |
1323 | 1292 | ||
1324 | /* | ||
1325 | * scan for NAND devices attached to the controller | ||
1326 | * this is the first stage in a two step process to register | ||
1327 | * with the nand subsystem | ||
1328 | */ | ||
1329 | ret = nand_scan_ident(mtd, denali->max_banks, NULL); | 1293 | ret = nand_scan_ident(mtd, denali->max_banks, NULL); |
1330 | if (ret) | 1294 | if (ret) |
1331 | goto disable_irq; | 1295 | goto disable_irq; |
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali) | |||
1347 | if (denali->dma_avail) { | 1311 | if (denali->dma_avail) { |
1348 | chip->options |= NAND_USE_BOUNCE_BUFFER; | 1312 | chip->options |= NAND_USE_BOUNCE_BUFFER; |
1349 | chip->buf_align = 16; | 1313 | chip->buf_align = 16; |
1314 | if (denali->caps & DENALI_CAP_DMA_64BIT) | ||
1315 | denali->setup_dma = denali_setup_dma64; | ||
1316 | else | ||
1317 | denali->setup_dma = denali_setup_dma32; | ||
1350 | } | 1318 | } |
1351 | 1319 | ||
1352 | /* | ||
1353 | * second stage of the NAND scan | ||
1354 | * this stage requires information regarding ECC and | ||
1355 | * bad block management. | ||
1356 | */ | ||
1357 | |||
1358 | chip->bbt_options |= NAND_BBT_USE_FLASH; | 1320 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
1359 | chip->bbt_options |= NAND_BBT_NO_OOB; | 1321 | chip->bbt_options |= NAND_BBT_NO_OOB; |
1360 | |||
1361 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; | 1322 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; |
1362 | |||
1363 | /* no subpage writes on denali */ | ||
1364 | chip->options |= NAND_NO_SUBPAGE_WRITE; | 1323 | chip->options |= NAND_NO_SUBPAGE_WRITE; |
1365 | 1324 | ||
1366 | ret = denali_ecc_setup(mtd, chip, denali); | 1325 | ret = denali_ecc_setup(mtd, chip, denali); |
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali) | |||
1373 | "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", | 1332 | "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", |
1374 | chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); | 1333 | chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); |
1375 | 1334 | ||
1376 | iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), | 1335 | iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | |
1336 | FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), | ||
1377 | denali->reg + ECC_CORRECTION); | 1337 | denali->reg + ECC_CORRECTION); |
1378 | iowrite32(mtd->erasesize / mtd->writesize, | 1338 | iowrite32(mtd->erasesize / mtd->writesize, |
1379 | denali->reg + PAGES_PER_BLOCK); | 1339 | denali->reg + PAGES_PER_BLOCK); |
1380 | iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, | 1340 | iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, |
1381 | denali->reg + DEVICE_WIDTH); | 1341 | denali->reg + DEVICE_WIDTH); |
1342 | iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, | ||
1343 | denali->reg + TWO_ROW_ADDR_CYCLES); | ||
1382 | iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); | 1344 | iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); |
1383 | iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); | 1345 | iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); |
1384 | 1346 | ||
@@ -1441,7 +1403,6 @@ disable_irq: | |||
1441 | } | 1403 | } |
1442 | EXPORT_SYMBOL(denali_init); | 1404 | EXPORT_SYMBOL(denali_init); |
1443 | 1405 | ||
1444 | /* driver exit point */ | ||
1445 | void denali_remove(struct denali_nand_info *denali) | 1406 | void denali_remove(struct denali_nand_info *denali) |
1446 | { | 1407 | { |
1447 | struct mtd_info *mtd = nand_to_mtd(&denali->nand); | 1408 | struct mtd_info *mtd = nand_to_mtd(&denali->nand); |
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index 9239e6793e6e..2911066dacac 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h | |||
@@ -10,18 +10,16 @@ | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | 13 | */ |
19 | 14 | ||
20 | #ifndef __DENALI_H__ | 15 | #ifndef __DENALI_H__ |
21 | #define __DENALI_H__ | 16 | #define __DENALI_H__ |
22 | 17 | ||
23 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/completion.h> | ||
24 | #include <linux/mtd/rawnand.h> | 20 | #include <linux/mtd/rawnand.h> |
21 | #include <linux/spinlock_types.h> | ||
22 | #include <linux/types.h> | ||
25 | 23 | ||
26 | #define DEVICE_RESET 0x0 | 24 | #define DEVICE_RESET 0x0 |
27 | #define DEVICE_RESET__BANK(bank) BIT(bank) | 25 | #define DEVICE_RESET__BANK(bank) BIT(bank) |
@@ -111,9 +109,6 @@ | |||
111 | #define ECC_CORRECTION 0x1b0 | 109 | #define ECC_CORRECTION 0x1b0 |
112 | #define ECC_CORRECTION__VALUE GENMASK(4, 0) | 110 | #define ECC_CORRECTION__VALUE GENMASK(4, 0) |
113 | #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) | 111 | #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) |
114 | #define MAKE_ECC_CORRECTION(val, thresh) \ | ||
115 | (((val) & (ECC_CORRECTION__VALUE)) | \ | ||
116 | (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD))) | ||
117 | 112 | ||
118 | #define READ_MODE 0x1c0 | 113 | #define READ_MODE 0x1c0 |
119 | #define READ_MODE__VALUE GENMASK(3, 0) | 114 | #define READ_MODE__VALUE GENMASK(3, 0) |
@@ -255,13 +250,13 @@ | |||
255 | 250 | ||
256 | #define ECC_ERROR_ADDRESS 0x630 | 251 | #define ECC_ERROR_ADDRESS 0x630 |
257 | #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) | 252 | #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) |
258 | #define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) | 253 | #define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12) |
259 | 254 | ||
260 | #define ERR_CORRECTION_INFO 0x640 | 255 | #define ERR_CORRECTION_INFO 0x640 |
261 | #define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) | 256 | #define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0) |
262 | #define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) | 257 | #define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8) |
263 | #define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) | 258 | #define ERR_CORRECTION_INFO__UNCOR BIT(14) |
264 | #define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) | 259 | #define ERR_CORRECTION_INFO__LAST_ERR BIT(15) |
265 | 260 | ||
266 | #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) | 261 | #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) |
267 | #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) | 262 | #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) |
@@ -310,23 +305,24 @@ struct denali_nand_info { | |||
310 | struct device *dev; | 305 | struct device *dev; |
311 | void __iomem *reg; /* Register Interface */ | 306 | void __iomem *reg; /* Register Interface */ |
312 | void __iomem *host; /* Host Data/Command Interface */ | 307 | void __iomem *host; /* Host Data/Command Interface */ |
313 | |||
314 | /* elements used by ISR */ | ||
315 | struct completion complete; | 308 | struct completion complete; |
316 | spinlock_t irq_lock; | 309 | spinlock_t irq_lock; /* protect irq_mask and irq_status */ |
317 | uint32_t irq_mask; | 310 | u32 irq_mask; /* interrupts we are waiting for */ |
318 | uint32_t irq_status; | 311 | u32 irq_status; /* interrupts that have happened */ |
319 | int irq; | 312 | int irq; |
320 | 313 | void *buf; /* for syndrome layout conversion */ | |
321 | void *buf; | ||
322 | dma_addr_t dma_addr; | 314 | dma_addr_t dma_addr; |
323 | int dma_avail; | 315 | int dma_avail; /* can support DMA? */ |
324 | int devs_per_cs; /* devices connected in parallel */ | 316 | int devs_per_cs; /* devices connected in parallel */ |
325 | int oob_skip_bytes; | 317 | int oob_skip_bytes; /* number of bytes reserved for BBM */ |
326 | int max_banks; | 318 | int max_banks; |
327 | unsigned int revision; | 319 | unsigned int revision; /* IP revision */ |
328 | unsigned int caps; | 320 | unsigned int caps; /* IP capability (or quirk) */ |
329 | const struct nand_ecc_caps *ecc_caps; | 321 | const struct nand_ecc_caps *ecc_caps; |
322 | u32 (*host_read)(struct denali_nand_info *denali, u32 addr); | ||
323 | void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data); | ||
324 | void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr, | ||
325 | int page, int write); | ||
330 | }; | 326 | }; |
331 | 327 | ||
332 | #define DENALI_CAP_HW_ECC_FIXUP BIT(0) | 328 | #define DENALI_CAP_HW_ECC_FIXUP BIT(0) |
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 56e2e177644d..cfd33e6ca77f 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c | |||
@@ -12,15 +12,16 @@ | |||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
13 | * more details. | 13 | * more details. |
14 | */ | 14 | */ |
15 | |||
15 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
16 | #include <linux/err.h> | 17 | #include <linux/err.h> |
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
18 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
24 | #include <linux/platform_device.h> | ||
24 | 25 | ||
25 | #include "denali.h" | 26 | #include "denali.h" |
26 | 27 | ||
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = { | |||
155 | .of_match_table = denali_nand_dt_ids, | 156 | .of_match_table = denali_nand_dt_ids, |
156 | }, | 157 | }, |
157 | }; | 158 | }; |
158 | |||
159 | module_platform_driver(denali_dt_driver); | 159 | module_platform_driver(denali_dt_driver); |
160 | 160 | ||
161 | MODULE_LICENSE("GPL"); | 161 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index 81370c79aa48..57fb7ae31412 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c | |||
@@ -11,6 +11,9 @@ | |||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | |||
15 | #include <linux/errno.h> | ||
16 | #include <linux/io.h> | ||
14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
16 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
@@ -106,7 +109,6 @@ failed_remap_reg: | |||
106 | return ret; | 109 | return ret; |
107 | } | 110 | } |
108 | 111 | ||
109 | /* driver exit point */ | ||
110 | static void denali_pci_remove(struct pci_dev *dev) | 112 | static void denali_pci_remove(struct pci_dev *dev) |
111 | { | 113 | { |
112 | struct denali_nand_info *denali = pci_get_drvdata(dev); | 114 | struct denali_nand_info *denali = pci_get_drvdata(dev); |
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = { | |||
122 | .probe = denali_pci_probe, | 124 | .probe = denali_pci_probe, |
123 | .remove = denali_pci_remove, | 125 | .remove = denali_pci_remove, |
124 | }; | 126 | }; |
125 | |||
126 | module_pci_driver(denali_pci_driver); | 127 | module_pci_driver(denali_pci_driver); |
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index c3aa53caab5c..72671dc52e2e 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c | |||
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu | |||
705 | if (page_addr != -1) { | 705 | if (page_addr != -1) { |
706 | WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); | 706 | WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); |
707 | WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); | 707 | WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); |
708 | /* One more address cycle for higher density devices */ | 708 | if (this->options & NAND_ROW_ADDR_3) { |
709 | if (this->chipsize & 0x0c000000) { | ||
710 | WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); | 709 | WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); |
711 | printk("high density\n"); | 710 | printk("high density\n"); |
712 | } | 711 | } |
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index fd3648952b5a..484f7fbc3f7d 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/gpio.h> | 26 | #include <linux/gpio/consumer.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/mtd/mtd.h> | 28 | #include <linux/mtd/mtd.h> |
29 | #include <linux/mtd/rawnand.h> | 29 | #include <linux/mtd/rawnand.h> |
@@ -31,12 +31,16 @@ | |||
31 | #include <linux/mtd/nand-gpio.h> | 31 | #include <linux/mtd/nand-gpio.h> |
32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
33 | #include <linux/of_address.h> | 33 | #include <linux/of_address.h> |
34 | #include <linux/of_gpio.h> | ||
35 | 34 | ||
36 | struct gpiomtd { | 35 | struct gpiomtd { |
37 | void __iomem *io_sync; | 36 | void __iomem *io_sync; |
38 | struct nand_chip nand_chip; | 37 | struct nand_chip nand_chip; |
39 | struct gpio_nand_platdata plat; | 38 | struct gpio_nand_platdata plat; |
39 | struct gpio_desc *nce; /* Optional chip enable */ | ||
40 | struct gpio_desc *cle; | ||
41 | struct gpio_desc *ale; | ||
42 | struct gpio_desc *rdy; | ||
43 | struct gpio_desc *nwp; /* Optional write protection */ | ||
40 | }; | 44 | }; |
41 | 45 | ||
42 | static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) | 46 | static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) |
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
78 | gpio_nand_dosync(gpiomtd); | 82 | gpio_nand_dosync(gpiomtd); |
79 | 83 | ||
80 | if (ctrl & NAND_CTRL_CHANGE) { | 84 | if (ctrl & NAND_CTRL_CHANGE) { |
81 | if (gpio_is_valid(gpiomtd->plat.gpio_nce)) | 85 | if (gpiomtd->nce) |
82 | gpio_set_value(gpiomtd->plat.gpio_nce, | 86 | gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE)); |
83 | !(ctrl & NAND_NCE)); | 87 | gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE)); |
84 | gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); | 88 | gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE)); |
85 | gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); | ||
86 | gpio_nand_dosync(gpiomtd); | 89 | gpio_nand_dosync(gpiomtd); |
87 | } | 90 | } |
88 | if (cmd == NAND_CMD_NONE) | 91 | if (cmd == NAND_CMD_NONE) |
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd) | |||
96 | { | 99 | { |
97 | struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); | 100 | struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); |
98 | 101 | ||
99 | return gpio_get_value(gpiomtd->plat.gpio_rdy); | 102 | return gpiod_get_value(gpiomtd->rdy); |
100 | } | 103 | } |
101 | 104 | ||
102 | #ifdef CONFIG_OF | 105 | #ifdef CONFIG_OF |
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev, | |||
123 | } | 126 | } |
124 | } | 127 | } |
125 | 128 | ||
126 | plat->gpio_rdy = of_get_gpio(dev->of_node, 0); | ||
127 | plat->gpio_nce = of_get_gpio(dev->of_node, 1); | ||
128 | plat->gpio_ale = of_get_gpio(dev->of_node, 2); | ||
129 | plat->gpio_cle = of_get_gpio(dev->of_node, 3); | ||
130 | plat->gpio_nwp = of_get_gpio(dev->of_node, 4); | ||
131 | |||
132 | if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) | 129 | if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) |
133 | plat->chip_delay = val; | 130 | plat->chip_delay = val; |
134 | 131 | ||
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev) | |||
201 | 198 | ||
202 | nand_release(nand_to_mtd(&gpiomtd->nand_chip)); | 199 | nand_release(nand_to_mtd(&gpiomtd->nand_chip)); |
203 | 200 | ||
204 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) | 201 | /* Enable write protection and disable the chip */ |
205 | gpio_set_value(gpiomtd->plat.gpio_nwp, 0); | 202 | if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) |
206 | if (gpio_is_valid(gpiomtd->plat.gpio_nce)) | 203 | gpiod_set_value(gpiomtd->nwp, 0); |
207 | gpio_set_value(gpiomtd->plat.gpio_nce, 1); | 204 | if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) |
205 | gpiod_set_value(gpiomtd->nce, 0); | ||
208 | 206 | ||
209 | return 0; | 207 | return 0; |
210 | } | 208 | } |
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev) | |||
215 | struct nand_chip *chip; | 213 | struct nand_chip *chip; |
216 | struct mtd_info *mtd; | 214 | struct mtd_info *mtd; |
217 | struct resource *res; | 215 | struct resource *res; |
216 | struct device *dev = &pdev->dev; | ||
218 | int ret = 0; | 217 | int ret = 0; |
219 | 218 | ||
220 | if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) | 219 | if (!dev->of_node && !dev_get_platdata(dev)) |
221 | return -EINVAL; | 220 | return -EINVAL; |
222 | 221 | ||
223 | gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); | 222 | gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL); |
224 | if (!gpiomtd) | 223 | if (!gpiomtd) |
225 | return -ENOMEM; | 224 | return -ENOMEM; |
226 | 225 | ||
227 | chip = &gpiomtd->nand_chip; | 226 | chip = &gpiomtd->nand_chip; |
228 | 227 | ||
229 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 228 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
230 | chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); | 229 | chip->IO_ADDR_R = devm_ioremap_resource(dev, res); |
231 | if (IS_ERR(chip->IO_ADDR_R)) | 230 | if (IS_ERR(chip->IO_ADDR_R)) |
232 | return PTR_ERR(chip->IO_ADDR_R); | 231 | return PTR_ERR(chip->IO_ADDR_R); |
233 | 232 | ||
234 | res = gpio_nand_get_io_sync(pdev); | 233 | res = gpio_nand_get_io_sync(pdev); |
235 | if (res) { | 234 | if (res) { |
236 | gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); | 235 | gpiomtd->io_sync = devm_ioremap_resource(dev, res); |
237 | if (IS_ERR(gpiomtd->io_sync)) | 236 | if (IS_ERR(gpiomtd->io_sync)) |
238 | return PTR_ERR(gpiomtd->io_sync); | 237 | return PTR_ERR(gpiomtd->io_sync); |
239 | } | 238 | } |
240 | 239 | ||
241 | ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); | 240 | ret = gpio_nand_get_config(dev, &gpiomtd->plat); |
242 | if (ret) | 241 | if (ret) |
243 | return ret; | 242 | return ret; |
244 | 243 | ||
245 | if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { | 244 | /* Just enable the chip */ |
246 | ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, | 245 | gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH); |
247 | "NAND NCE"); | 246 | if (IS_ERR(gpiomtd->nce)) |
248 | if (ret) | 247 | return PTR_ERR(gpiomtd->nce); |
249 | return ret; | 248 | |
250 | gpio_direction_output(gpiomtd->plat.gpio_nce, 1); | 249 | /* We disable write protection once we know probe() will succeed */ |
250 | gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW); | ||
251 | if (IS_ERR(gpiomtd->nwp)) { | ||
252 | ret = PTR_ERR(gpiomtd->nwp); | ||
253 | goto out_ce; | ||
251 | } | 254 | } |
252 | 255 | ||
253 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { | 256 | gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW); |
254 | ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, | 257 | if (IS_ERR(gpiomtd->nwp)) { |
255 | "NAND NWP"); | 258 | ret = PTR_ERR(gpiomtd->nwp); |
256 | if (ret) | 259 | goto out_ce; |
257 | return ret; | ||
258 | } | 260 | } |
259 | 261 | ||
260 | ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); | 262 | gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW); |
261 | if (ret) | 263 | if (IS_ERR(gpiomtd->cle)) { |
262 | return ret; | 264 | ret = PTR_ERR(gpiomtd->cle); |
263 | gpio_direction_output(gpiomtd->plat.gpio_ale, 0); | 265 | goto out_ce; |
266 | } | ||
264 | 267 | ||
265 | ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); | 268 | gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN); |
266 | if (ret) | 269 | if (IS_ERR(gpiomtd->rdy)) { |
267 | return ret; | 270 | ret = PTR_ERR(gpiomtd->rdy); |
268 | gpio_direction_output(gpiomtd->plat.gpio_cle, 0); | 271 | goto out_ce; |
269 | |||
270 | if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { | ||
271 | ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy, | ||
272 | "NAND RDY"); | ||
273 | if (ret) | ||
274 | return ret; | ||
275 | gpio_direction_input(gpiomtd->plat.gpio_rdy); | ||
276 | chip->dev_ready = gpio_nand_devready; | ||
277 | } | 272 | } |
273 | /* Using RDY pin */ | ||
274 | if (gpiomtd->rdy) | ||
275 | chip->dev_ready = gpio_nand_devready; | ||
278 | 276 | ||
279 | nand_set_flash_node(chip, pdev->dev.of_node); | 277 | nand_set_flash_node(chip, pdev->dev.of_node); |
280 | chip->IO_ADDR_W = chip->IO_ADDR_R; | 278 | chip->IO_ADDR_W = chip->IO_ADDR_R; |
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev) | |||
285 | chip->cmd_ctrl = gpio_nand_cmd_ctrl; | 283 | chip->cmd_ctrl = gpio_nand_cmd_ctrl; |
286 | 284 | ||
287 | mtd = nand_to_mtd(chip); | 285 | mtd = nand_to_mtd(chip); |
288 | mtd->dev.parent = &pdev->dev; | 286 | mtd->dev.parent = dev; |
289 | 287 | ||
290 | platform_set_drvdata(pdev, gpiomtd); | 288 | platform_set_drvdata(pdev, gpiomtd); |
291 | 289 | ||
292 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) | 290 | /* Disable write protection, if wired up */ |
293 | gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); | 291 | if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) |
292 | gpiod_direction_output(gpiomtd->nwp, 1); | ||
294 | 293 | ||
295 | ret = nand_scan(mtd, 1); | 294 | ret = nand_scan(mtd, 1); |
296 | if (ret) | 295 | if (ret) |
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev) | |||
305 | return 0; | 304 | return 0; |
306 | 305 | ||
307 | err_wp: | 306 | err_wp: |
308 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) | 307 | if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) |
309 | gpio_set_value(gpiomtd->plat.gpio_nwp, 0); | 308 | gpiod_set_value(gpiomtd->nwp, 0); |
309 | out_ce: | ||
310 | if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) | ||
311 | gpiod_set_value(gpiomtd->nce, 0); | ||
310 | 312 | ||
311 | return ret; | 313 | return ret; |
312 | } | 314 | } |
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index d9ee1a7e6956..0897261c3e17 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c | |||
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr) | |||
432 | host->addr_value[0] |= (page_addr & 0xffff) | 432 | host->addr_value[0] |= (page_addr & 0xffff) |
433 | << (host->addr_cycle * 8); | 433 | << (host->addr_cycle * 8); |
434 | host->addr_cycle += 2; | 434 | host->addr_cycle += 2; |
435 | /* One more address cycle for devices > 128MiB */ | 435 | if (chip->options & NAND_ROW_ADDR_3) { |
436 | if (chip->chipsize > (128 << 20)) { | ||
437 | host->addr_cycle += 1; | 436 | host->addr_cycle += 1; |
438 | if (host->command == NAND_CMD_ERASE1) | 437 | if (host->command == NAND_CMD_ERASE1) |
439 | host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; | 438 | host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; |
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 7f3b065b6b8f..c51d214d169e 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c | |||
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) | |||
115 | op = ECC_DECODE; | 115 | op = ECC_DECODE; |
116 | dec = readw(ecc->regs + ECC_DECDONE); | 116 | dec = readw(ecc->regs + ECC_DECDONE); |
117 | if (dec & ecc->sectors) { | 117 | if (dec & ecc->sectors) { |
118 | /* | ||
119 | * Clear decode IRQ status once again to ensure that | ||
120 | * there will be no extra IRQ. | ||
121 | */ | ||
122 | readw(ecc->regs + ECC_DECIRQ_STA); | ||
118 | ecc->sectors = 0; | 123 | ecc->sectors = 0; |
119 | complete(&ecc->done); | 124 | complete(&ecc->done); |
120 | } else { | 125 | } else { |
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) | |||
130 | } | 135 | } |
131 | } | 136 | } |
132 | 137 | ||
133 | writel(0, ecc->regs + ECC_IRQ_REG(op)); | ||
134 | |||
135 | return IRQ_HANDLED; | 138 | return IRQ_HANDLED; |
136 | } | 139 | } |
137 | 140 | ||
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) | |||
307 | 310 | ||
308 | /* disable it */ | 311 | /* disable it */ |
309 | mtk_ecc_wait_idle(ecc, op); | 312 | mtk_ecc_wait_idle(ecc, op); |
313 | if (op == ECC_DECODE) | ||
314 | /* | ||
315 | * Clear decode IRQ status in case there is a timeout to wait | ||
316 | * decode IRQ. | ||
317 | */ | ||
318 | readw(ecc->regs + ECC_DECIRQ_STA); | ||
310 | writew(0, ecc->regs + ECC_IRQ_REG(op)); | 319 | writew(0, ecc->regs + ECC_IRQ_REG(op)); |
311 | writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); | 320 | writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); |
312 | 321 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 53e5e0337c3e..f3be0b2a8869 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) | |||
415 | * waits for completion. */ | 415 | * waits for completion. */ |
416 | static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) | 416 | static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) |
417 | { | 417 | { |
418 | pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); | 418 | dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); |
419 | 419 | ||
420 | writew(cmd, NFC_V1_V2_FLASH_CMD); | 420 | writew(cmd, NFC_V1_V2_FLASH_CMD); |
421 | writew(NFC_CMD, NFC_V1_V2_CONFIG2); | 421 | writew(NFC_CMD, NFC_V1_V2_CONFIG2); |
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) | |||
431 | udelay(1); | 431 | udelay(1); |
432 | } | 432 | } |
433 | if (max_retries < 0) | 433 | if (max_retries < 0) |
434 | pr_debug("%s: RESET failed\n", __func__); | 434 | dev_dbg(host->dev, "%s: RESET failed\n", __func__); |
435 | } else { | 435 | } else { |
436 | /* Wait for operation to complete */ | 436 | /* Wait for operation to complete */ |
437 | wait_op_done(host, useirq); | 437 | wait_op_done(host, useirq); |
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) | |||
454 | * a NAND command. */ | 454 | * a NAND command. */ |
455 | static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) | 455 | static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) |
456 | { | 456 | { |
457 | pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); | 457 | dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast); |
458 | 458 | ||
459 | writew(addr, NFC_V1_V2_FLASH_ADDR); | 459 | writew(addr, NFC_V1_V2_FLASH_ADDR); |
460 | writew(NFC_ADDR, NFC_V1_V2_CONFIG2); | 460 | writew(NFC_ADDR, NFC_V1_V2_CONFIG2); |
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, | |||
607 | uint16_t ecc_status = get_ecc_status_v1(host); | 607 | uint16_t ecc_status = get_ecc_status_v1(host); |
608 | 608 | ||
609 | if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { | 609 | if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { |
610 | pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); | 610 | dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n"); |
611 | return -EBADMSG; | 611 | return -EBADMSG; |
612 | } | 612 | } |
613 | 613 | ||
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, | |||
634 | do { | 634 | do { |
635 | err = ecc_stat & ecc_bit_mask; | 635 | err = ecc_stat & ecc_bit_mask; |
636 | if (err > err_limit) { | 636 | if (err > err_limit) { |
637 | printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); | 637 | dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n"); |
638 | return -EBADMSG; | 638 | return -EBADMSG; |
639 | } else { | 639 | } else { |
640 | ret += err; | 640 | ret += err; |
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, | |||
642 | ecc_stat >>= 4; | 642 | ecc_stat >>= 4; |
643 | } while (--no_subpages); | 643 | } while (--no_subpages); |
644 | 644 | ||
645 | pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); | 645 | dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret); |
646 | 646 | ||
647 | return ret; | 647 | return ret; |
648 | } | 648 | } |
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd) | |||
673 | host->buf_start++; | 673 | host->buf_start++; |
674 | } | 674 | } |
675 | 675 | ||
676 | pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); | 676 | dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); |
677 | return ret; | 677 | return ret; |
678 | } | 678 | } |
679 | 679 | ||
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) | |||
859 | host->devtype_data->send_addr(host, | 859 | host->devtype_data->send_addr(host, |
860 | (page_addr >> 8) & 0xff, true); | 860 | (page_addr >> 8) & 0xff, true); |
861 | } else { | 861 | } else { |
862 | /* One more address cycle for higher density devices */ | 862 | if (nand_chip->options & NAND_ROW_ADDR_3) { |
863 | if (mtd->size >= 0x4000000) { | ||
864 | /* paddr_8 - paddr_15 */ | 863 | /* paddr_8 - paddr_15 */ |
865 | host->devtype_data->send_addr(host, | 864 | host->devtype_data->send_addr(host, |
866 | (page_addr >> 8) & 0xff, | 865 | (page_addr >> 8) & 0xff, |
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, | |||
1212 | struct nand_chip *nand_chip = mtd_to_nand(mtd); | 1211 | struct nand_chip *nand_chip = mtd_to_nand(mtd); |
1213 | struct mxc_nand_host *host = nand_get_controller_data(nand_chip); | 1212 | struct mxc_nand_host *host = nand_get_controller_data(nand_chip); |
1214 | 1213 | ||
1215 | pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", | 1214 | dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", |
1216 | command, column, page_addr); | 1215 | command, column, page_addr); |
1217 | 1216 | ||
1218 | /* Reset command state information */ | 1217 | /* Reset command state information */ |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 12edaae17d81..6135d007a068 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, | |||
115 | struct nand_chip *chip = mtd_to_nand(mtd); | 115 | struct nand_chip *chip = mtd_to_nand(mtd); |
116 | struct nand_ecc_ctrl *ecc = &chip->ecc; | 116 | struct nand_ecc_ctrl *ecc = &chip->ecc; |
117 | 117 | ||
118 | if (section) | 118 | if (section || !ecc->total) |
119 | return -ERANGE; | 119 | return -ERANGE; |
120 | 120 | ||
121 | oobregion->length = ecc->total; | 121 | oobregion->length = ecc->total; |
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
727 | chip->cmd_ctrl(mtd, page_addr, ctrl); | 727 | chip->cmd_ctrl(mtd, page_addr, ctrl); |
728 | ctrl &= ~NAND_CTRL_CHANGE; | 728 | ctrl &= ~NAND_CTRL_CHANGE; |
729 | chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); | 729 | chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); |
730 | /* One more address cycle for devices > 32MiB */ | 730 | if (chip->options & NAND_ROW_ADDR_3) |
731 | if (chip->chipsize > (32 << 20)) | ||
732 | chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); | 731 | chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); |
733 | } | 732 | } |
734 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); | 733 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); |
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
854 | chip->cmd_ctrl(mtd, page_addr, ctrl); | 853 | chip->cmd_ctrl(mtd, page_addr, ctrl); |
855 | chip->cmd_ctrl(mtd, page_addr >> 8, | 854 | chip->cmd_ctrl(mtd, page_addr >> 8, |
856 | NAND_NCE | NAND_ALE); | 855 | NAND_NCE | NAND_ALE); |
857 | /* One more address cycle for devices > 128MiB */ | 856 | if (chip->options & NAND_ROW_ADDR_3) |
858 | if (chip->chipsize > (128 << 20)) | ||
859 | chip->cmd_ctrl(mtd, page_addr >> 16, | 857 | chip->cmd_ctrl(mtd, page_addr >> 16, |
860 | NAND_NCE | NAND_ALE); | 858 | NAND_NCE | NAND_ALE); |
861 | } | 859 | } |
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) | |||
1246 | 1244 | ||
1247 | return 0; | 1245 | return 0; |
1248 | } | 1246 | } |
1247 | EXPORT_SYMBOL_GPL(nand_reset); | ||
1249 | 1248 | ||
1250 | /** | 1249 | /** |
1251 | * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data | 1250 | * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data |
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2799 | size_t *retlen, const uint8_t *buf) | 2798 | size_t *retlen, const uint8_t *buf) |
2800 | { | 2799 | { |
2801 | struct nand_chip *chip = mtd_to_nand(mtd); | 2800 | struct nand_chip *chip = mtd_to_nand(mtd); |
2801 | int chipnr = (int)(to >> chip->chip_shift); | ||
2802 | struct mtd_oob_ops ops; | 2802 | struct mtd_oob_ops ops; |
2803 | int ret; | 2803 | int ret; |
2804 | 2804 | ||
2805 | /* Wait for the device to get ready */ | ||
2806 | panic_nand_wait(mtd, chip, 400); | ||
2807 | |||
2808 | /* Grab the device */ | 2805 | /* Grab the device */ |
2809 | panic_nand_get_device(chip, mtd, FL_WRITING); | 2806 | panic_nand_get_device(chip, mtd, FL_WRITING); |
2810 | 2807 | ||
2808 | chip->select_chip(mtd, chipnr); | ||
2809 | |||
2810 | /* Wait for the device to get ready */ | ||
2811 | panic_nand_wait(mtd, chip, 400); | ||
2812 | |||
2811 | memset(&ops, 0, sizeof(ops)); | 2813 | memset(&ops, 0, sizeof(ops)); |
2812 | ops.len = len; | 2814 | ops.len = len; |
2813 | ops.datbuf = (uint8_t *)buf; | 2815 | ops.datbuf = (uint8_t *)buf; |
@@ -3999,6 +4001,9 @@ ident_done: | |||
3999 | chip->chip_shift += 32 - 1; | 4001 | chip->chip_shift += 32 - 1; |
4000 | } | 4002 | } |
4001 | 4003 | ||
4004 | if (chip->chip_shift - chip->page_shift > 16) | ||
4005 | chip->options |= NAND_ROW_ADDR_3; | ||
4006 | |||
4002 | chip->badblockbits = 8; | 4007 | chip->badblockbits = 8; |
4003 | chip->erase = single_erase; | 4008 | chip->erase = single_erase; |
4004 | 4009 | ||
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
4700 | mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); | 4705 | mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); |
4701 | break; | 4706 | break; |
4702 | default: | 4707 | default: |
4708 | /* | ||
4709 | * Expose the whole OOB area to users if ECC_NONE | ||
4710 | * is passed. We could do that for all kind of | ||
4711 | * ->oobsize, but we must keep the old large/small | ||
4712 | * page with ECC layout when ->oobsize <= 128 for | ||
4713 | * compatibility reasons. | ||
4714 | */ | ||
4715 | if (ecc->mode == NAND_ECC_NONE) { | ||
4716 | mtd_set_ooblayout(mtd, | ||
4717 | &nand_ooblayout_lp_ops); | ||
4718 | break; | ||
4719 | } | ||
4720 | |||
4703 | WARN(1, "No oob scheme defined for oobsize %d\n", | 4721 | WARN(1, "No oob scheme defined for oobsize %d\n", |
4704 | mtd->oobsize); | 4722 | mtd->oobsize); |
4705 | ret = -EINVAL; | 4723 | ret = -EINVAL; |
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 246b4393118e..44322a363ba5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev) | |||
520 | struct dentry *root = nsmtd->dbg.dfs_dir; | 520 | struct dentry *root = nsmtd->dbg.dfs_dir; |
521 | struct dentry *dent; | 521 | struct dentry *dent; |
522 | 522 | ||
523 | if (!IS_ENABLED(CONFIG_DEBUG_FS)) | 523 | /* |
524 | * Just skip debugfs initialization when the debugfs directory is | ||
525 | * missing. | ||
526 | */ | ||
527 | if (IS_ERR_OR_NULL(root)) { | ||
528 | if (IS_ENABLED(CONFIG_DEBUG_FS) && | ||
529 | !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) | ||
530 | NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); | ||
524 | return 0; | 531 | return 0; |
525 | 532 | } | |
526 | if (IS_ERR_OR_NULL(root)) | ||
527 | return -1; | ||
528 | 533 | ||
529 | dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, | 534 | dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, |
530 | root, dev, &dfs_fops); | 535 | root, dev, &dfs_fops); |
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 7bb4d2ea9342..af5b32c9a791 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c | |||
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
154 | if (page_addr != -1) { | 154 | if (page_addr != -1) { |
155 | write_addr_reg(nand, page_addr); | 155 | write_addr_reg(nand, page_addr); |
156 | 156 | ||
157 | if (chip->chipsize > (128 << 20)) { | 157 | if (chip->options & NAND_ROW_ADDR_3) { |
158 | write_addr_reg(nand, page_addr >> 8); | 158 | write_addr_reg(nand, page_addr >> 8); |
159 | write_addr_reg(nand, page_addr >> 16 | ENDADDR); | 159 | write_addr_reg(nand, page_addr >> 16 | ENDADDR); |
160 | } else { | 160 | } else { |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 54540c8fa1a2..dad438c4906a 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, | |||
1133 | 0x97, 0x79, 0xe5, 0x24, 0xb5}; | 1133 | 0x97, 0x79, 0xe5, 0x24, 0xb5}; |
1134 | 1134 | ||
1135 | /** | 1135 | /** |
1136 | * omap_calculate_ecc_bch - Generate bytes of ECC bytes | 1136 | * _omap_calculate_ecc_bch - Generate ECC bytes for one sector |
1137 | * @mtd: MTD device structure | 1137 | * @mtd: MTD device structure |
1138 | * @dat: The pointer to data on which ecc is computed | 1138 | * @dat: The pointer to data on which ecc is computed |
1139 | * @ecc_code: The ecc_code buffer | 1139 | * @ecc_code: The ecc_code buffer |
1140 | * @i: The sector number (for a multi sector page) | ||
1140 | * | 1141 | * |
1141 | * Support calculating of BCH4/8 ecc vectors for the page | 1142 | * Support calculating of BCH4/8/16 ECC vectors for one sector |
1143 | * within a page. Sector number is in @i. | ||
1142 | */ | 1144 | */ |
1143 | static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, | 1145 | static int _omap_calculate_ecc_bch(struct mtd_info *mtd, |
1144 | const u_char *dat, u_char *ecc_calc) | 1146 | const u_char *dat, u_char *ecc_calc, int i) |
1145 | { | 1147 | { |
1146 | struct omap_nand_info *info = mtd_to_omap(mtd); | 1148 | struct omap_nand_info *info = mtd_to_omap(mtd); |
1147 | int eccbytes = info->nand.ecc.bytes; | 1149 | int eccbytes = info->nand.ecc.bytes; |
1148 | struct gpmc_nand_regs *gpmc_regs = &info->reg; | 1150 | struct gpmc_nand_regs *gpmc_regs = &info->reg; |
1149 | u8 *ecc_code; | 1151 | u8 *ecc_code; |
1150 | unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; | 1152 | unsigned long bch_val1, bch_val2, bch_val3, bch_val4; |
1151 | u32 val; | 1153 | u32 val; |
1152 | int i, j; | 1154 | int j; |
1155 | |||
1156 | ecc_code = ecc_calc; | ||
1157 | switch (info->ecc_opt) { | ||
1158 | case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: | ||
1159 | case OMAP_ECC_BCH8_CODE_HW: | ||
1160 | bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1161 | bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1162 | bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); | ||
1163 | bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); | ||
1164 | *ecc_code++ = (bch_val4 & 0xFF); | ||
1165 | *ecc_code++ = ((bch_val3 >> 24) & 0xFF); | ||
1166 | *ecc_code++ = ((bch_val3 >> 16) & 0xFF); | ||
1167 | *ecc_code++ = ((bch_val3 >> 8) & 0xFF); | ||
1168 | *ecc_code++ = (bch_val3 & 0xFF); | ||
1169 | *ecc_code++ = ((bch_val2 >> 24) & 0xFF); | ||
1170 | *ecc_code++ = ((bch_val2 >> 16) & 0xFF); | ||
1171 | *ecc_code++ = ((bch_val2 >> 8) & 0xFF); | ||
1172 | *ecc_code++ = (bch_val2 & 0xFF); | ||
1173 | *ecc_code++ = ((bch_val1 >> 24) & 0xFF); | ||
1174 | *ecc_code++ = ((bch_val1 >> 16) & 0xFF); | ||
1175 | *ecc_code++ = ((bch_val1 >> 8) & 0xFF); | ||
1176 | *ecc_code++ = (bch_val1 & 0xFF); | ||
1177 | break; | ||
1178 | case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: | ||
1179 | case OMAP_ECC_BCH4_CODE_HW: | ||
1180 | bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1181 | bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1182 | *ecc_code++ = ((bch_val2 >> 12) & 0xFF); | ||
1183 | *ecc_code++ = ((bch_val2 >> 4) & 0xFF); | ||
1184 | *ecc_code++ = ((bch_val2 & 0xF) << 4) | | ||
1185 | ((bch_val1 >> 28) & 0xF); | ||
1186 | *ecc_code++ = ((bch_val1 >> 20) & 0xFF); | ||
1187 | *ecc_code++ = ((bch_val1 >> 12) & 0xFF); | ||
1188 | *ecc_code++ = ((bch_val1 >> 4) & 0xFF); | ||
1189 | *ecc_code++ = ((bch_val1 & 0xF) << 4); | ||
1190 | break; | ||
1191 | case OMAP_ECC_BCH16_CODE_HW: | ||
1192 | val = readl(gpmc_regs->gpmc_bch_result6[i]); | ||
1193 | ecc_code[0] = ((val >> 8) & 0xFF); | ||
1194 | ecc_code[1] = ((val >> 0) & 0xFF); | ||
1195 | val = readl(gpmc_regs->gpmc_bch_result5[i]); | ||
1196 | ecc_code[2] = ((val >> 24) & 0xFF); | ||
1197 | ecc_code[3] = ((val >> 16) & 0xFF); | ||
1198 | ecc_code[4] = ((val >> 8) & 0xFF); | ||
1199 | ecc_code[5] = ((val >> 0) & 0xFF); | ||
1200 | val = readl(gpmc_regs->gpmc_bch_result4[i]); | ||
1201 | ecc_code[6] = ((val >> 24) & 0xFF); | ||
1202 | ecc_code[7] = ((val >> 16) & 0xFF); | ||
1203 | ecc_code[8] = ((val >> 8) & 0xFF); | ||
1204 | ecc_code[9] = ((val >> 0) & 0xFF); | ||
1205 | val = readl(gpmc_regs->gpmc_bch_result3[i]); | ||
1206 | ecc_code[10] = ((val >> 24) & 0xFF); | ||
1207 | ecc_code[11] = ((val >> 16) & 0xFF); | ||
1208 | ecc_code[12] = ((val >> 8) & 0xFF); | ||
1209 | ecc_code[13] = ((val >> 0) & 0xFF); | ||
1210 | val = readl(gpmc_regs->gpmc_bch_result2[i]); | ||
1211 | ecc_code[14] = ((val >> 24) & 0xFF); | ||
1212 | ecc_code[15] = ((val >> 16) & 0xFF); | ||
1213 | ecc_code[16] = ((val >> 8) & 0xFF); | ||
1214 | ecc_code[17] = ((val >> 0) & 0xFF); | ||
1215 | val = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1216 | ecc_code[18] = ((val >> 24) & 0xFF); | ||
1217 | ecc_code[19] = ((val >> 16) & 0xFF); | ||
1218 | ecc_code[20] = ((val >> 8) & 0xFF); | ||
1219 | ecc_code[21] = ((val >> 0) & 0xFF); | ||
1220 | val = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1221 | ecc_code[22] = ((val >> 24) & 0xFF); | ||
1222 | ecc_code[23] = ((val >> 16) & 0xFF); | ||
1223 | ecc_code[24] = ((val >> 8) & 0xFF); | ||
1224 | ecc_code[25] = ((val >> 0) & 0xFF); | ||
1225 | break; | ||
1226 | default: | ||
1227 | return -EINVAL; | ||
1228 | } | ||
1229 | |||
1230 | /* ECC scheme specific syndrome customizations */ | ||
1231 | switch (info->ecc_opt) { | ||
1232 | case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: | ||
1233 | /* Add constant polynomial to remainder, so that | ||
1234 | * ECC of blank pages results in 0x0 on reading back | ||
1235 | */ | ||
1236 | for (j = 0; j < eccbytes; j++) | ||
1237 | ecc_calc[j] ^= bch4_polynomial[j]; | ||
1238 | break; | ||
1239 | case OMAP_ECC_BCH4_CODE_HW: | ||
1240 | /* Set 8th ECC byte as 0x0 for ROM compatibility */ | ||
1241 | ecc_calc[eccbytes - 1] = 0x0; | ||
1242 | break; | ||
1243 | case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: | ||
1244 | /* Add constant polynomial to remainder, so that | ||
1245 | * ECC of blank pages results in 0x0 on reading back | ||
1246 | */ | ||
1247 | for (j = 0; j < eccbytes; j++) | ||
1248 | ecc_calc[j] ^= bch8_polynomial[j]; | ||
1249 | break; | ||
1250 | case OMAP_ECC_BCH8_CODE_HW: | ||
1251 | /* Set 14th ECC byte as 0x0 for ROM compatibility */ | ||
1252 | ecc_calc[eccbytes - 1] = 0x0; | ||
1253 | break; | ||
1254 | case OMAP_ECC_BCH16_CODE_HW: | ||
1255 | break; | ||
1256 | default: | ||
1257 | return -EINVAL; | ||
1258 | } | ||
1259 | |||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1264 | * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction | ||
1265 | * @mtd: MTD device structure | ||
1266 | * @dat: The pointer to data on which ecc is computed | ||
1267 | * @ecc_code: The ecc_code buffer | ||
1268 | * | ||
1269 | * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used | ||
1270 | * when SW based correction is required as ECC is required for one sector | ||
1271 | * at a time. | ||
1272 | */ | ||
1273 | static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, | ||
1274 | const u_char *dat, u_char *ecc_calc) | ||
1275 | { | ||
1276 | return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); | ||
1277 | } | ||
1278 | |||
1279 | /** | ||
1280 | * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors | ||
1281 | * @mtd: MTD device structure | ||
1282 | * @dat: The pointer to data on which ecc is computed | ||
1283 | * @ecc_code: The ecc_code buffer | ||
1284 | * | ||
1285 | * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. | ||
1286 | */ | ||
1287 | static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, | ||
1288 | const u_char *dat, u_char *ecc_calc) | ||
1289 | { | ||
1290 | struct omap_nand_info *info = mtd_to_omap(mtd); | ||
1291 | int eccbytes = info->nand.ecc.bytes; | ||
1292 | unsigned long nsectors; | ||
1293 | int i, ret; | ||
1153 | 1294 | ||
1154 | nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; | 1295 | nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; |
1155 | for (i = 0; i < nsectors; i++) { | 1296 | for (i = 0; i < nsectors; i++) { |
1156 | ecc_code = ecc_calc; | 1297 | ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); |
1157 | switch (info->ecc_opt) { | 1298 | if (ret) |
1158 | case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: | 1299 | return ret; |
1159 | case OMAP_ECC_BCH8_CODE_HW: | ||
1160 | bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1161 | bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1162 | bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); | ||
1163 | bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); | ||
1164 | *ecc_code++ = (bch_val4 & 0xFF); | ||
1165 | *ecc_code++ = ((bch_val3 >> 24) & 0xFF); | ||
1166 | *ecc_code++ = ((bch_val3 >> 16) & 0xFF); | ||
1167 | *ecc_code++ = ((bch_val3 >> 8) & 0xFF); | ||
1168 | *ecc_code++ = (bch_val3 & 0xFF); | ||
1169 | *ecc_code++ = ((bch_val2 >> 24) & 0xFF); | ||
1170 | *ecc_code++ = ((bch_val2 >> 16) & 0xFF); | ||
1171 | *ecc_code++ = ((bch_val2 >> 8) & 0xFF); | ||
1172 | *ecc_code++ = (bch_val2 & 0xFF); | ||
1173 | *ecc_code++ = ((bch_val1 >> 24) & 0xFF); | ||
1174 | *ecc_code++ = ((bch_val1 >> 16) & 0xFF); | ||
1175 | *ecc_code++ = ((bch_val1 >> 8) & 0xFF); | ||
1176 | *ecc_code++ = (bch_val1 & 0xFF); | ||
1177 | break; | ||
1178 | case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: | ||
1179 | case OMAP_ECC_BCH4_CODE_HW: | ||
1180 | bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1181 | bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1182 | *ecc_code++ = ((bch_val2 >> 12) & 0xFF); | ||
1183 | *ecc_code++ = ((bch_val2 >> 4) & 0xFF); | ||
1184 | *ecc_code++ = ((bch_val2 & 0xF) << 4) | | ||
1185 | ((bch_val1 >> 28) & 0xF); | ||
1186 | *ecc_code++ = ((bch_val1 >> 20) & 0xFF); | ||
1187 | *ecc_code++ = ((bch_val1 >> 12) & 0xFF); | ||
1188 | *ecc_code++ = ((bch_val1 >> 4) & 0xFF); | ||
1189 | *ecc_code++ = ((bch_val1 & 0xF) << 4); | ||
1190 | break; | ||
1191 | case OMAP_ECC_BCH16_CODE_HW: | ||
1192 | val = readl(gpmc_regs->gpmc_bch_result6[i]); | ||
1193 | ecc_code[0] = ((val >> 8) & 0xFF); | ||
1194 | ecc_code[1] = ((val >> 0) & 0xFF); | ||
1195 | val = readl(gpmc_regs->gpmc_bch_result5[i]); | ||
1196 | ecc_code[2] = ((val >> 24) & 0xFF); | ||
1197 | ecc_code[3] = ((val >> 16) & 0xFF); | ||
1198 | ecc_code[4] = ((val >> 8) & 0xFF); | ||
1199 | ecc_code[5] = ((val >> 0) & 0xFF); | ||
1200 | val = readl(gpmc_regs->gpmc_bch_result4[i]); | ||
1201 | ecc_code[6] = ((val >> 24) & 0xFF); | ||
1202 | ecc_code[7] = ((val >> 16) & 0xFF); | ||
1203 | ecc_code[8] = ((val >> 8) & 0xFF); | ||
1204 | ecc_code[9] = ((val >> 0) & 0xFF); | ||
1205 | val = readl(gpmc_regs->gpmc_bch_result3[i]); | ||
1206 | ecc_code[10] = ((val >> 24) & 0xFF); | ||
1207 | ecc_code[11] = ((val >> 16) & 0xFF); | ||
1208 | ecc_code[12] = ((val >> 8) & 0xFF); | ||
1209 | ecc_code[13] = ((val >> 0) & 0xFF); | ||
1210 | val = readl(gpmc_regs->gpmc_bch_result2[i]); | ||
1211 | ecc_code[14] = ((val >> 24) & 0xFF); | ||
1212 | ecc_code[15] = ((val >> 16) & 0xFF); | ||
1213 | ecc_code[16] = ((val >> 8) & 0xFF); | ||
1214 | ecc_code[17] = ((val >> 0) & 0xFF); | ||
1215 | val = readl(gpmc_regs->gpmc_bch_result1[i]); | ||
1216 | ecc_code[18] = ((val >> 24) & 0xFF); | ||
1217 | ecc_code[19] = ((val >> 16) & 0xFF); | ||
1218 | ecc_code[20] = ((val >> 8) & 0xFF); | ||
1219 | ecc_code[21] = ((val >> 0) & 0xFF); | ||
1220 | val = readl(gpmc_regs->gpmc_bch_result0[i]); | ||
1221 | ecc_code[22] = ((val >> 24) & 0xFF); | ||
1222 | ecc_code[23] = ((val >> 16) & 0xFF); | ||
1223 | ecc_code[24] = ((val >> 8) & 0xFF); | ||
1224 | ecc_code[25] = ((val >> 0) & 0xFF); | ||
1225 | break; | ||
1226 | default: | ||
1227 | return -EINVAL; | ||
1228 | } | ||
1229 | |||
1230 | /* ECC scheme specific syndrome customizations */ | ||
1231 | switch (info->ecc_opt) { | ||
1232 | case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: | ||
1233 | /* Add constant polynomial to remainder, so that | ||
1234 | * ECC of blank pages results in 0x0 on reading back */ | ||
1235 | for (j = 0; j < eccbytes; j++) | ||
1236 | ecc_calc[j] ^= bch4_polynomial[j]; | ||
1237 | break; | ||
1238 | case OMAP_ECC_BCH4_CODE_HW: | ||
1239 | /* Set 8th ECC byte as 0x0 for ROM compatibility */ | ||
1240 | ecc_calc[eccbytes - 1] = 0x0; | ||
1241 | break; | ||
1242 | case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: | ||
1243 | /* Add constant polynomial to remainder, so that | ||
1244 | * ECC of blank pages results in 0x0 on reading back */ | ||
1245 | for (j = 0; j < eccbytes; j++) | ||
1246 | ecc_calc[j] ^= bch8_polynomial[j]; | ||
1247 | break; | ||
1248 | case OMAP_ECC_BCH8_CODE_HW: | ||
1249 | /* Set 14th ECC byte as 0x0 for ROM compatibility */ | ||
1250 | ecc_calc[eccbytes - 1] = 0x0; | ||
1251 | break; | ||
1252 | case OMAP_ECC_BCH16_CODE_HW: | ||
1253 | break; | ||
1254 | default: | ||
1255 | return -EINVAL; | ||
1256 | } | ||
1257 | 1300 | ||
1258 | ecc_calc += eccbytes; | 1301 | ecc_calc += eccbytes; |
1259 | } | 1302 | } |
1260 | 1303 | ||
1261 | return 0; | 1304 | return 0; |
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, | |||
1496 | chip->write_buf(mtd, buf, mtd->writesize); | 1539 | chip->write_buf(mtd, buf, mtd->writesize); |
1497 | 1540 | ||
1498 | /* Update ecc vector from GPMC result registers */ | 1541 | /* Update ecc vector from GPMC result registers */ |
1499 | chip->ecc.calculate(mtd, buf, &ecc_calc[0]); | 1542 | omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); |
1500 | 1543 | ||
1501 | ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, | 1544 | ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, |
1502 | chip->ecc.total); | 1545 | chip->ecc.total); |
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, | |||
1509 | } | 1552 | } |
1510 | 1553 | ||
1511 | /** | 1554 | /** |
1555 | * omap_write_subpage_bch - BCH hardware ECC based subpage write | ||
1556 | * @mtd: mtd info structure | ||
1557 | * @chip: nand chip info structure | ||
1558 | * @offset: column address of subpage within the page | ||
1559 | * @data_len: data length | ||
1560 | * @buf: data buffer | ||
1561 | * @oob_required: must write chip->oob_poi to OOB | ||
1562 | * @page: page number to write | ||
1563 | * | ||
1564 | * OMAP optimized subpage write method. | ||
1565 | */ | ||
1566 | static int omap_write_subpage_bch(struct mtd_info *mtd, | ||
1567 | struct nand_chip *chip, u32 offset, | ||
1568 | u32 data_len, const u8 *buf, | ||
1569 | int oob_required, int page) | ||
1570 | { | ||
1571 | u8 *ecc_calc = chip->buffers->ecccalc; | ||
1572 | int ecc_size = chip->ecc.size; | ||
1573 | int ecc_bytes = chip->ecc.bytes; | ||
1574 | int ecc_steps = chip->ecc.steps; | ||
1575 | u32 start_step = offset / ecc_size; | ||
1576 | u32 end_step = (offset + data_len - 1) / ecc_size; | ||
1577 | int step, ret = 0; | ||
1578 | |||
1579 | /* | ||
1580 | * Write entire page at one go as it would be optimal | ||
1581 | * as ECC is calculated by hardware. | ||
1582 | * ECC is calculated for all subpages but we choose | ||
1583 | * only what we want. | ||
1584 | */ | ||
1585 | |||
1586 | /* Enable GPMC ECC engine */ | ||
1587 | chip->ecc.hwctl(mtd, NAND_ECC_WRITE); | ||
1588 | |||
1589 | /* Write data */ | ||
1590 | chip->write_buf(mtd, buf, mtd->writesize); | ||
1591 | |||
1592 | for (step = 0; step < ecc_steps; step++) { | ||
1593 | /* mask ECC of un-touched subpages by padding 0xFF */ | ||
1594 | if (step < start_step || step > end_step) | ||
1595 | memset(ecc_calc, 0xff, ecc_bytes); | ||
1596 | else | ||
1597 | ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); | ||
1598 | |||
1599 | if (ret) | ||
1600 | return ret; | ||
1601 | |||
1602 | buf += ecc_size; | ||
1603 | ecc_calc += ecc_bytes; | ||
1604 | } | ||
1605 | |||
1606 | /* copy calculated ECC for whole page to chip->buffer->oob */ | ||
1607 | /* this include masked-value(0xFF) for unwritten subpages */ | ||
1608 | ecc_calc = chip->buffers->ecccalc; | ||
1609 | ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, | ||
1610 | chip->ecc.total); | ||
1611 | if (ret) | ||
1612 | return ret; | ||
1613 | |||
1614 | /* write OOB buffer to NAND device */ | ||
1615 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
1616 | |||
1617 | return 0; | ||
1618 | } | ||
1619 | |||
1620 | /** | ||
1512 | * omap_read_page_bch - BCH ecc based page read function for entire page | 1621 | * omap_read_page_bch - BCH ecc based page read function for entire page |
1513 | * @mtd: mtd info structure | 1622 | * @mtd: mtd info structure |
1514 | * @chip: nand chip info structure | 1623 | * @chip: nand chip info structure |
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, | |||
1544 | chip->ecc.total); | 1653 | chip->ecc.total); |
1545 | 1654 | ||
1546 | /* Calculate ecc bytes */ | 1655 | /* Calculate ecc bytes */ |
1547 | chip->ecc.calculate(mtd, buf, ecc_calc); | 1656 | omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); |
1548 | 1657 | ||
1549 | ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, | 1658 | ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, |
1550 | chip->ecc.total); | 1659 | chip->ecc.total); |
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info, | |||
1588 | return true; | 1697 | return true; |
1589 | } | 1698 | } |
1590 | 1699 | ||
1591 | static bool omap2_nand_ecc_check(struct omap_nand_info *info, | 1700 | static bool omap2_nand_ecc_check(struct omap_nand_info *info) |
1592 | struct omap_nand_platform_data *pdata) | ||
1593 | { | 1701 | { |
1594 | bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; | 1702 | bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; |
1595 | 1703 | ||
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = { | |||
1804 | static int omap_nand_probe(struct platform_device *pdev) | 1912 | static int omap_nand_probe(struct platform_device *pdev) |
1805 | { | 1913 | { |
1806 | struct omap_nand_info *info; | 1914 | struct omap_nand_info *info; |
1807 | struct omap_nand_platform_data *pdata = NULL; | ||
1808 | struct mtd_info *mtd; | 1915 | struct mtd_info *mtd; |
1809 | struct nand_chip *nand_chip; | 1916 | struct nand_chip *nand_chip; |
1810 | int err; | 1917 | int err; |
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
1821 | 1928 | ||
1822 | info->pdev = pdev; | 1929 | info->pdev = pdev; |
1823 | 1930 | ||
1824 | if (dev->of_node) { | 1931 | err = omap_get_dt_info(dev, info); |
1825 | if (omap_get_dt_info(dev, info)) | 1932 | if (err) |
1826 | return -EINVAL; | 1933 | return err; |
1827 | } else { | ||
1828 | pdata = dev_get_platdata(&pdev->dev); | ||
1829 | if (!pdata) { | ||
1830 | dev_err(&pdev->dev, "platform data missing\n"); | ||
1831 | return -EINVAL; | ||
1832 | } | ||
1833 | |||
1834 | info->gpmc_cs = pdata->cs; | ||
1835 | info->reg = pdata->reg; | ||
1836 | info->ecc_opt = pdata->ecc_opt; | ||
1837 | if (pdata->dev_ready) | ||
1838 | dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); | ||
1839 | |||
1840 | info->xfer_type = pdata->xfer_type; | ||
1841 | info->devsize = pdata->devsize; | ||
1842 | info->elm_of_node = pdata->elm_of_node; | ||
1843 | info->flash_bbt = pdata->flash_bbt; | ||
1844 | } | ||
1845 | 1934 | ||
1846 | platform_set_drvdata(pdev, info); | ||
1847 | info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); | 1935 | info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); |
1848 | if (!info->ops) { | 1936 | if (!info->ops) { |
1849 | dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); | 1937 | dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); |
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2002 | goto return_error; | 2090 | goto return_error; |
2003 | } | 2091 | } |
2004 | 2092 | ||
2005 | if (!omap2_nand_ecc_check(info, pdata)) { | 2093 | if (!omap2_nand_ecc_check(info)) { |
2006 | err = -EINVAL; | 2094 | err = -EINVAL; |
2007 | goto return_error; | 2095 | goto return_error; |
2008 | } | 2096 | } |
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2044 | nand_chip->ecc.strength = 4; | 2132 | nand_chip->ecc.strength = 4; |
2045 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; | 2133 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; |
2046 | nand_chip->ecc.correct = nand_bch_correct_data; | 2134 | nand_chip->ecc.correct = nand_bch_correct_data; |
2047 | nand_chip->ecc.calculate = omap_calculate_ecc_bch; | 2135 | nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; |
2048 | mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); | 2136 | mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); |
2049 | /* Reserve one byte for the OMAP marker */ | 2137 | /* Reserve one byte for the OMAP marker */ |
2050 | oobbytes_per_step = nand_chip->ecc.bytes + 1; | 2138 | oobbytes_per_step = nand_chip->ecc.bytes + 1; |
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2066 | nand_chip->ecc.strength = 4; | 2154 | nand_chip->ecc.strength = 4; |
2067 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; | 2155 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; |
2068 | nand_chip->ecc.correct = omap_elm_correct_data; | 2156 | nand_chip->ecc.correct = omap_elm_correct_data; |
2069 | nand_chip->ecc.calculate = omap_calculate_ecc_bch; | ||
2070 | nand_chip->ecc.read_page = omap_read_page_bch; | 2157 | nand_chip->ecc.read_page = omap_read_page_bch; |
2071 | nand_chip->ecc.write_page = omap_write_page_bch; | 2158 | nand_chip->ecc.write_page = omap_write_page_bch; |
2159 | nand_chip->ecc.write_subpage = omap_write_subpage_bch; | ||
2072 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); | 2160 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); |
2073 | oobbytes_per_step = nand_chip->ecc.bytes; | 2161 | oobbytes_per_step = nand_chip->ecc.bytes; |
2074 | 2162 | ||
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2087 | nand_chip->ecc.strength = 8; | 2175 | nand_chip->ecc.strength = 8; |
2088 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; | 2176 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; |
2089 | nand_chip->ecc.correct = nand_bch_correct_data; | 2177 | nand_chip->ecc.correct = nand_bch_correct_data; |
2090 | nand_chip->ecc.calculate = omap_calculate_ecc_bch; | 2178 | nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; |
2091 | mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); | 2179 | mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); |
2092 | /* Reserve one byte for the OMAP marker */ | 2180 | /* Reserve one byte for the OMAP marker */ |
2093 | oobbytes_per_step = nand_chip->ecc.bytes + 1; | 2181 | oobbytes_per_step = nand_chip->ecc.bytes + 1; |
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2109 | nand_chip->ecc.strength = 8; | 2197 | nand_chip->ecc.strength = 8; |
2110 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; | 2198 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; |
2111 | nand_chip->ecc.correct = omap_elm_correct_data; | 2199 | nand_chip->ecc.correct = omap_elm_correct_data; |
2112 | nand_chip->ecc.calculate = omap_calculate_ecc_bch; | ||
2113 | nand_chip->ecc.read_page = omap_read_page_bch; | 2200 | nand_chip->ecc.read_page = omap_read_page_bch; |
2114 | nand_chip->ecc.write_page = omap_write_page_bch; | 2201 | nand_chip->ecc.write_page = omap_write_page_bch; |
2202 | nand_chip->ecc.write_subpage = omap_write_subpage_bch; | ||
2115 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); | 2203 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); |
2116 | oobbytes_per_step = nand_chip->ecc.bytes; | 2204 | oobbytes_per_step = nand_chip->ecc.bytes; |
2117 | 2205 | ||
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
2131 | nand_chip->ecc.strength = 16; | 2219 | nand_chip->ecc.strength = 16; |
2132 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; | 2220 | nand_chip->ecc.hwctl = omap_enable_hwecc_bch; |
2133 | nand_chip->ecc.correct = omap_elm_correct_data; | 2221 | nand_chip->ecc.correct = omap_elm_correct_data; |
2134 | nand_chip->ecc.calculate = omap_calculate_ecc_bch; | ||
2135 | nand_chip->ecc.read_page = omap_read_page_bch; | 2222 | nand_chip->ecc.read_page = omap_read_page_bch; |
2136 | nand_chip->ecc.write_page = omap_write_page_bch; | 2223 | nand_chip->ecc.write_page = omap_write_page_bch; |
2224 | nand_chip->ecc.write_subpage = omap_write_subpage_bch; | ||
2137 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); | 2225 | mtd_set_ooblayout(mtd, &omap_ooblayout_ops); |
2138 | oobbytes_per_step = nand_chip->ecc.bytes; | 2226 | oobbytes_per_step = nand_chip->ecc.bytes; |
2139 | 2227 | ||
@@ -2167,10 +2255,9 @@ scan_tail: | |||
2167 | if (err) | 2255 | if (err) |
2168 | goto return_error; | 2256 | goto return_error; |
2169 | 2257 | ||
2170 | if (dev->of_node) | 2258 | err = mtd_device_register(mtd, NULL, 0); |
2171 | mtd_device_register(mtd, NULL, 0); | 2259 | if (err) |
2172 | else | 2260 | goto return_error; |
2173 | mtd_device_register(mtd, pdata->parts, pdata->nr_parts); | ||
2174 | 2261 | ||
2175 | platform_set_drvdata(pdev, mtd); | 2262 | platform_set_drvdata(pdev, mtd); |
2176 | 2263 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 85cff68643e0..90b9a9ccbe60 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/of.h> | 30 | #include <linux/of.h> |
31 | #include <linux/of_device.h> | 31 | #include <linux/of_device.h> |
32 | #include <linux/platform_data/mtd-nand-pxa3xx.h> | 32 | #include <linux/platform_data/mtd-nand-pxa3xx.h> |
33 | #include <linux/mfd/syscon.h> | ||
34 | #include <linux/regmap.h> | ||
33 | 35 | ||
34 | #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) | 36 | #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) |
35 | #define NAND_STOP_DELAY msecs_to_jiffies(40) | 37 | #define NAND_STOP_DELAY msecs_to_jiffies(40) |
@@ -45,6 +47,10 @@ | |||
45 | */ | 47 | */ |
46 | #define INIT_BUFFER_SIZE 2048 | 48 | #define INIT_BUFFER_SIZE 2048 |
47 | 49 | ||
50 | /* System control register and bit to enable NAND on some SoCs */ | ||
51 | #define GENCONF_SOC_DEVICE_MUX 0x208 | ||
52 | #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) | ||
53 | |||
48 | /* registers and bit definitions */ | 54 | /* registers and bit definitions */ |
49 | #define NDCR (0x00) /* Control register */ | 55 | #define NDCR (0x00) /* Control register */ |
50 | #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ | 56 | #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ |
@@ -174,6 +180,7 @@ enum { | |||
174 | enum pxa3xx_nand_variant { | 180 | enum pxa3xx_nand_variant { |
175 | PXA3XX_NAND_VARIANT_PXA, | 181 | PXA3XX_NAND_VARIANT_PXA, |
176 | PXA3XX_NAND_VARIANT_ARMADA370, | 182 | PXA3XX_NAND_VARIANT_ARMADA370, |
183 | PXA3XX_NAND_VARIANT_ARMADA_8K, | ||
177 | }; | 184 | }; |
178 | 185 | ||
179 | struct pxa3xx_nand_host { | 186 | struct pxa3xx_nand_host { |
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = { | |||
425 | .compatible = "marvell,armada370-nand", | 432 | .compatible = "marvell,armada370-nand", |
426 | .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, | 433 | .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, |
427 | }, | 434 | }, |
435 | { | ||
436 | .compatible = "marvell,armada-8k-nand", | ||
437 | .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K, | ||
438 | }, | ||
428 | {} | 439 | {} |
429 | }; | 440 | }; |
430 | MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); | 441 | MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); |
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | |||
825 | info->retcode = ERR_UNCORERR; | 836 | info->retcode = ERR_UNCORERR; |
826 | if (status & NDSR_CORERR) { | 837 | if (status & NDSR_CORERR) { |
827 | info->retcode = ERR_CORERR; | 838 | info->retcode = ERR_CORERR; |
828 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && | 839 | if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || |
840 | info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) && | ||
829 | info->ecc_bch) | 841 | info->ecc_bch) |
830 | info->ecc_err_cnt = NDSR_ERR_CNT(status); | 842 | info->ecc_err_cnt = NDSR_ERR_CNT(status); |
831 | else | 843 | else |
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | |||
888 | nand_writel(info, NDCB0, info->ndcb2); | 900 | nand_writel(info, NDCB0, info->ndcb2); |
889 | 901 | ||
890 | /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ | 902 | /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ |
891 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) | 903 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || |
904 | info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) | ||
892 | nand_writel(info, NDCB0, info->ndcb3); | 905 | nand_writel(info, NDCB0, info->ndcb3); |
893 | } | 906 | } |
894 | 907 | ||
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
1671 | chip->options |= NAND_BUSWIDTH_16; | 1684 | chip->options |= NAND_BUSWIDTH_16; |
1672 | 1685 | ||
1673 | /* Device detection must be done with ECC disabled */ | 1686 | /* Device detection must be done with ECC disabled */ |
1674 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) | 1687 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || |
1688 | info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) | ||
1675 | nand_writel(info, NDECCCTRL, 0x0); | 1689 | nand_writel(info, NDECCCTRL, 0x0); |
1676 | 1690 | ||
1677 | if (pdata->flash_bbt) | 1691 | if (pdata->flash_bbt) |
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
1709 | * (aka splitted) command handling, | 1723 | * (aka splitted) command handling, |
1710 | */ | 1724 | */ |
1711 | if (mtd->writesize > PAGE_CHUNK_SIZE) { | 1725 | if (mtd->writesize > PAGE_CHUNK_SIZE) { |
1712 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { | 1726 | if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || |
1727 | info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) { | ||
1713 | chip->cmdfunc = nand_cmdfunc_extended; | 1728 | chip->cmdfunc = nand_cmdfunc_extended; |
1714 | } else { | 1729 | } else { |
1715 | dev_err(&info->pdev->dev, | 1730 | dev_err(&info->pdev->dev, |
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev) | |||
1928 | if (!of_id) | 1943 | if (!of_id) |
1929 | return 0; | 1944 | return 0; |
1930 | 1945 | ||
1946 | /* | ||
1947 | * Some SoCs like A7k/A8k need to enable manually the NAND | ||
1948 | * controller to avoid being bootloader dependent. This is done | ||
1949 | * through the use of a single bit in the System Functions registers. | ||
1950 | */ | ||
1951 | if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) { | ||
1952 | struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle( | ||
1953 | pdev->dev.of_node, "marvell,system-controller"); | ||
1954 | u32 reg; | ||
1955 | |||
1956 | if (IS_ERR(sysctrl_base)) | ||
1957 | return PTR_ERR(sysctrl_base); | ||
1958 | |||
1959 | regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, ®); | ||
1960 | reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN; | ||
1961 | regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); | ||
1962 | } | ||
1963 | |||
1931 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 1964 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
1932 | if (!pdata) | 1965 | if (!pdata) |
1933 | return -ENOMEM; | 1966 | return -ENOMEM; |
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 3baddfc997d1..2656c1ac5646 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/dma/qcom_bam_dma.h> | ||
25 | 26 | ||
26 | /* NANDc reg offsets */ | 27 | /* NANDc reg offsets */ |
27 | #define NAND_FLASH_CMD 0x00 | 28 | #define NAND_FLASH_CMD 0x00 |
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ | |||
199 | */ | 200 | */ |
200 | #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) | 201 | #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) |
201 | 202 | ||
203 | /* Returns the NAND register physical address */ | ||
204 | #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) | ||
205 | |||
206 | /* Returns the dma address for reg read buffer */ | ||
207 | #define reg_buf_dma_addr(chip, vaddr) \ | ||
208 | ((chip)->reg_read_dma + \ | ||
209 | ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) | ||
210 | |||
211 | #define QPIC_PER_CW_CMD_ELEMENTS 32 | ||
202 | #define QPIC_PER_CW_CMD_SGL 32 | 212 | #define QPIC_PER_CW_CMD_SGL 32 |
203 | #define QPIC_PER_CW_DATA_SGL 8 | 213 | #define QPIC_PER_CW_DATA_SGL 8 |
204 | 214 | ||
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ | |||
221 | /* | 231 | /* |
222 | * This data type corresponds to the BAM transaction which will be used for all | 232 | * This data type corresponds to the BAM transaction which will be used for all |
223 | * NAND transfers. | 233 | * NAND transfers. |
234 | * @bam_ce - the array of BAM command elements | ||
224 | * @cmd_sgl - sgl for NAND BAM command pipe | 235 | * @cmd_sgl - sgl for NAND BAM command pipe |
225 | * @data_sgl - sgl for NAND BAM consumer/producer pipe | 236 | * @data_sgl - sgl for NAND BAM consumer/producer pipe |
237 | * @bam_ce_pos - the index in bam_ce which is available for next sgl | ||
238 | * @bam_ce_start - the index in bam_ce which marks the start position ce | ||
239 | * for current sgl. It will be used for size calculation | ||
240 | * for current sgl | ||
226 | * @cmd_sgl_pos - current index in command sgl. | 241 | * @cmd_sgl_pos - current index in command sgl. |
227 | * @cmd_sgl_start - start index in command sgl. | 242 | * @cmd_sgl_start - start index in command sgl. |
228 | * @tx_sgl_pos - current index in data sgl for tx. | 243 | * @tx_sgl_pos - current index in data sgl for tx. |
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ | |||
231 | * @rx_sgl_start - start index in data sgl for rx. | 246 | * @rx_sgl_start - start index in data sgl for rx. |
232 | */ | 247 | */ |
233 | struct bam_transaction { | 248 | struct bam_transaction { |
249 | struct bam_cmd_element *bam_ce; | ||
234 | struct scatterlist *cmd_sgl; | 250 | struct scatterlist *cmd_sgl; |
235 | struct scatterlist *data_sgl; | 251 | struct scatterlist *data_sgl; |
252 | u32 bam_ce_pos; | ||
253 | u32 bam_ce_start; | ||
236 | u32 cmd_sgl_pos; | 254 | u32 cmd_sgl_pos; |
237 | u32 cmd_sgl_start; | 255 | u32 cmd_sgl_start; |
238 | u32 tx_sgl_pos; | 256 | u32 tx_sgl_pos; |
@@ -307,7 +325,8 @@ struct nandc_regs { | |||
307 | * controller | 325 | * controller |
308 | * @dev: parent device | 326 | * @dev: parent device |
309 | * @base: MMIO base | 327 | * @base: MMIO base |
310 | * @base_dma: physical base address of controller registers | 328 | * @base_phys: physical base address of controller registers |
329 | * @base_dma: dma base address of controller registers | ||
311 | * @core_clk: controller clock | 330 | * @core_clk: controller clock |
312 | * @aon_clk: another controller clock | 331 | * @aon_clk: another controller clock |
313 | * | 332 | * |
@@ -340,6 +359,7 @@ struct qcom_nand_controller { | |||
340 | struct device *dev; | 359 | struct device *dev; |
341 | 360 | ||
342 | void __iomem *base; | 361 | void __iomem *base; |
362 | phys_addr_t base_phys; | ||
343 | dma_addr_t base_dma; | 363 | dma_addr_t base_dma; |
344 | 364 | ||
345 | struct clk *core_clk; | 365 | struct clk *core_clk; |
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) | |||
462 | 482 | ||
463 | bam_txn_size = | 483 | bam_txn_size = |
464 | sizeof(*bam_txn) + num_cw * | 484 | sizeof(*bam_txn) + num_cw * |
465 | ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + | 485 | ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + |
486 | (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + | ||
466 | (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); | 487 | (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); |
467 | 488 | ||
468 | bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); | 489 | bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); |
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) | |||
472 | bam_txn = bam_txn_buf; | 493 | bam_txn = bam_txn_buf; |
473 | bam_txn_buf += sizeof(*bam_txn); | 494 | bam_txn_buf += sizeof(*bam_txn); |
474 | 495 | ||
496 | bam_txn->bam_ce = bam_txn_buf; | ||
497 | bam_txn_buf += | ||
498 | sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; | ||
499 | |||
475 | bam_txn->cmd_sgl = bam_txn_buf; | 500 | bam_txn->cmd_sgl = bam_txn_buf; |
476 | bam_txn_buf += | 501 | bam_txn_buf += |
477 | sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; | 502 | sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; |
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) | |||
489 | if (!nandc->props->is_bam) | 514 | if (!nandc->props->is_bam) |
490 | return; | 515 | return; |
491 | 516 | ||
517 | bam_txn->bam_ce_pos = 0; | ||
518 | bam_txn->bam_ce_start = 0; | ||
492 | bam_txn->cmd_sgl_pos = 0; | 519 | bam_txn->cmd_sgl_pos = 0; |
493 | bam_txn->cmd_sgl_start = 0; | 520 | bam_txn->cmd_sgl_start = 0; |
494 | bam_txn->tx_sgl_pos = 0; | 521 | bam_txn->tx_sgl_pos = 0; |
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, | |||
734 | } | 761 | } |
735 | 762 | ||
736 | /* | 763 | /* |
764 | * Prepares the command descriptor for BAM DMA which will be used for NAND | ||
765 | * register reads and writes. The command descriptor requires the command | ||
766 | * to be formed in command element type so this function uses the command | ||
767 | * element from bam transaction ce array and fills the same with required | ||
768 | * data. A single SGL can contain multiple command elements so | ||
769 | * NAND_BAM_NEXT_SGL will be used for starting the separate SGL | ||
770 | * after the current command element. | ||
771 | */ | ||
772 | static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, | ||
773 | int reg_off, const void *vaddr, | ||
774 | int size, unsigned int flags) | ||
775 | { | ||
776 | int bam_ce_size; | ||
777 | int i, ret; | ||
778 | struct bam_cmd_element *bam_ce_buffer; | ||
779 | struct bam_transaction *bam_txn = nandc->bam_txn; | ||
780 | |||
781 | bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; | ||
782 | |||
783 | /* fill the command desc */ | ||
784 | for (i = 0; i < size; i++) { | ||
785 | if (read) | ||
786 | bam_prep_ce(&bam_ce_buffer[i], | ||
787 | nandc_reg_phys(nandc, reg_off + 4 * i), | ||
788 | BAM_READ_COMMAND, | ||
789 | reg_buf_dma_addr(nandc, | ||
790 | (__le32 *)vaddr + i)); | ||
791 | else | ||
792 | bam_prep_ce_le32(&bam_ce_buffer[i], | ||
793 | nandc_reg_phys(nandc, reg_off + 4 * i), | ||
794 | BAM_WRITE_COMMAND, | ||
795 | *((__le32 *)vaddr + i)); | ||
796 | } | ||
797 | |||
798 | bam_txn->bam_ce_pos += size; | ||
799 | |||
800 | /* use the separate sgl after this command */ | ||
801 | if (flags & NAND_BAM_NEXT_SGL) { | ||
802 | bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; | ||
803 | bam_ce_size = (bam_txn->bam_ce_pos - | ||
804 | bam_txn->bam_ce_start) * | ||
805 | sizeof(struct bam_cmd_element); | ||
806 | sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], | ||
807 | bam_ce_buffer, bam_ce_size); | ||
808 | bam_txn->cmd_sgl_pos++; | ||
809 | bam_txn->bam_ce_start = bam_txn->bam_ce_pos; | ||
810 | |||
811 | if (flags & NAND_BAM_NWD) { | ||
812 | ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, | ||
813 | DMA_PREP_FENCE | | ||
814 | DMA_PREP_CMD); | ||
815 | if (ret) | ||
816 | return ret; | ||
817 | } | ||
818 | } | ||
819 | |||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | /* | ||
737 | * Prepares the data descriptor for BAM DMA which will be used for NAND | 824 | * Prepares the data descriptor for BAM DMA which will be used for NAND |
738 | * data reads and writes. | 825 | * data reads and writes. |
739 | */ | 826 | */ |
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, | |||
851 | { | 938 | { |
852 | bool flow_control = false; | 939 | bool flow_control = false; |
853 | void *vaddr; | 940 | void *vaddr; |
854 | int size; | ||
855 | 941 | ||
856 | if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) | 942 | vaddr = nandc->reg_read_buf + nandc->reg_read_pos; |
857 | flow_control = true; | 943 | nandc->reg_read_pos += num_regs; |
858 | 944 | ||
859 | if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) | 945 | if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) |
860 | first = dev_cmd_reg_addr(nandc, first); | 946 | first = dev_cmd_reg_addr(nandc, first); |
861 | 947 | ||
862 | size = num_regs * sizeof(u32); | 948 | if (nandc->props->is_bam) |
863 | vaddr = nandc->reg_read_buf + nandc->reg_read_pos; | 949 | return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, |
864 | nandc->reg_read_pos += num_regs; | 950 | num_regs, flags); |
951 | |||
952 | if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) | ||
953 | flow_control = true; | ||
865 | 954 | ||
866 | return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); | 955 | return prep_adm_dma_desc(nandc, true, first, vaddr, |
956 | num_regs * sizeof(u32), flow_control); | ||
867 | } | 957 | } |
868 | 958 | ||
869 | /* | 959 | /* |
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, | |||
880 | bool flow_control = false; | 970 | bool flow_control = false; |
881 | struct nandc_regs *regs = nandc->regs; | 971 | struct nandc_regs *regs = nandc->regs; |
882 | void *vaddr; | 972 | void *vaddr; |
883 | int size; | ||
884 | 973 | ||
885 | vaddr = offset_to_nandc_reg(regs, first); | 974 | vaddr = offset_to_nandc_reg(regs, first); |
886 | 975 | ||
887 | if (first == NAND_FLASH_CMD) | ||
888 | flow_control = true; | ||
889 | |||
890 | if (first == NAND_ERASED_CW_DETECT_CFG) { | 976 | if (first == NAND_ERASED_CW_DETECT_CFG) { |
891 | if (flags & NAND_ERASED_CW_SET) | 977 | if (flags & NAND_ERASED_CW_SET) |
892 | vaddr = ®s->erased_cw_detect_cfg_set; | 978 | vaddr = ®s->erased_cw_detect_cfg_set; |
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, | |||
903 | if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) | 989 | if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) |
904 | first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); | 990 | first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); |
905 | 991 | ||
906 | size = num_regs * sizeof(u32); | 992 | if (nandc->props->is_bam) |
993 | return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, | ||
994 | num_regs, flags); | ||
995 | |||
996 | if (first == NAND_FLASH_CMD) | ||
997 | flow_control = true; | ||
907 | 998 | ||
908 | return prep_adm_dma_desc(nandc, false, first, vaddr, size, | 999 | return prep_adm_dma_desc(nandc, false, first, vaddr, |
909 | flow_control); | 1000 | num_regs * sizeof(u32), flow_control); |
910 | } | 1001 | } |
911 | 1002 | ||
912 | /* | 1003 | /* |
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc) | |||
1170 | } | 1261 | } |
1171 | 1262 | ||
1172 | if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { | 1263 | if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { |
1173 | r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); | 1264 | r = prepare_bam_async_desc(nandc, nandc->cmd_chan, |
1265 | DMA_PREP_CMD); | ||
1174 | if (r) | 1266 | if (r) |
1175 | return r; | 1267 | return r; |
1176 | } | 1268 | } |
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) | |||
2705 | if (IS_ERR(nandc->base)) | 2797 | if (IS_ERR(nandc->base)) |
2706 | return PTR_ERR(nandc->base); | 2798 | return PTR_ERR(nandc->base); |
2707 | 2799 | ||
2800 | nandc->base_phys = res->start; | ||
2708 | nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); | 2801 | nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); |
2709 | 2802 | ||
2710 | nandc->core_clk = devm_clk_get(dev, "core"); | 2803 | nandc->core_clk = devm_clk_get(dev, "core"); |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index e7f3c98487e6..3c5008a4f5f3 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match); | |||
1094 | 1094 | ||
1095 | static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) | 1095 | static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) |
1096 | { | 1096 | { |
1097 | const struct of_device_id *match; | 1097 | const struct flctl_soc_config *config; |
1098 | struct flctl_soc_config *config; | ||
1099 | struct sh_flctl_platform_data *pdata; | 1098 | struct sh_flctl_platform_data *pdata; |
1100 | 1099 | ||
1101 | match = of_match_device(of_flctl_match, dev); | 1100 | config = of_device_get_match_data(dev); |
1102 | if (match) | 1101 | if (!config) { |
1103 | config = (struct flctl_soc_config *)match->data; | ||
1104 | else { | ||
1105 | dev_err(dev, "%s: no OF configuration attached\n", __func__); | 1102 | dev_err(dev, "%s: no OF configuration attached\n", __func__); |
1106 | return NULL; | 1103 | return NULL; |
1107 | } | 1104 | } |
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig index d206b3c533bc..ee5ab994132f 100644 --- a/drivers/mtd/parsers/Kconfig +++ b/drivers/mtd/parsers/Kconfig | |||
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX | |||
6 | may contain up to 3/4 partitions (depending on the version). | 6 | may contain up to 3/4 partitions (depending on the version). |
7 | This driver will parse TRX header and report at least two partitions: | 7 | This driver will parse TRX header and report at least two partitions: |
8 | kernel and rootfs. | 8 | kernel and rootfs. |
9 | |||
10 | config MTD_SHARPSL_PARTS | ||
11 | tristate "Sharp SL Series NAND flash partition parser" | ||
12 | depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST | ||
13 | help | ||
14 | This provides the read-only FTL logic necessary to read the partition | ||
15 | table from the NAND flash of Sharp SL Series (Zaurus) and the MTD | ||
16 | partition parser using this code. | ||
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile index 4d9024e0be3b..5b1bcc3d90d9 100644 --- a/drivers/mtd/parsers/Makefile +++ b/drivers/mtd/parsers/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o | 1 | obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o |
2 | obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o | ||
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c new file mode 100644 index 000000000000..5fe0079ea5ed --- /dev/null +++ b/drivers/mtd/parsers/sharpslpart.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /* | ||
2 | * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL | ||
3 | * for logical addressing, as used on the PXA models of the SHARP SL Series. | ||
4 | * | ||
5 | * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com> | ||
6 | * | ||
7 | * Based on SHARP GPL 2.4 sources: | ||
8 | * http://support.ezaurus.com/developer/source/source_dl.asp | ||
9 | * drivers/mtd/nand/sharp_sl_logical.c | ||
10 | * linux/include/asm-arm/sharp_nand_logical.h | ||
11 | * | ||
12 | * Copyright (C) 2002 SHARP | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/bitops.h> | ||
31 | #include <linux/sizes.h> | ||
32 | #include <linux/mtd/mtd.h> | ||
33 | #include <linux/mtd/partitions.h> | ||
34 | |||
35 | /* oob structure */ | ||
36 | #define NAND_NOOB_LOGADDR_00 8 | ||
37 | #define NAND_NOOB_LOGADDR_01 9 | ||
38 | #define NAND_NOOB_LOGADDR_10 10 | ||
39 | #define NAND_NOOB_LOGADDR_11 11 | ||
40 | #define NAND_NOOB_LOGADDR_20 12 | ||
41 | #define NAND_NOOB_LOGADDR_21 13 | ||
42 | |||
43 | #define BLOCK_IS_RESERVED 0xffff | ||
44 | #define BLOCK_UNMASK_COMPLEMENT 1 | ||
45 | |||
46 | /* factory defaults */ | ||
47 | #define SHARPSL_NAND_PARTS 3 | ||
48 | #define SHARPSL_FTL_PART_SIZE (7 * SZ_1M) | ||
49 | #define SHARPSL_PARTINFO1_LADDR 0x00060000 | ||
50 | #define SHARPSL_PARTINFO2_LADDR 0x00064000 | ||
51 | |||
52 | #define BOOT_MAGIC 0x424f4f54 | ||
53 | #define FSRO_MAGIC 0x4653524f | ||
54 | #define FSRW_MAGIC 0x46535257 | ||
55 | |||
56 | /** | ||
57 | * struct sharpsl_ftl - Sharp FTL Logical Table | ||
58 | * @logmax: number of logical blocks | ||
59 | * @log2phy: the logical-to-physical table | ||
60 | * | ||
61 | * Structure containing the logical-to-physical translation table | ||
62 | * used by the SHARP SL FTL. | ||
63 | */ | ||
64 | struct sharpsl_ftl { | ||
65 | unsigned int logmax; | ||
66 | unsigned int *log2phy; | ||
67 | }; | ||
68 | |||
69 | /* verify that the OOB bytes 8 to 15 are free and available for the FTL */ | ||
70 | static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd) | ||
71 | { | ||
72 | u8 freebytes = 0; | ||
73 | int section = 0; | ||
74 | |||
75 | while (true) { | ||
76 | struct mtd_oob_region oobfree = { }; | ||
77 | int ret, i; | ||
78 | |||
79 | ret = mtd_ooblayout_free(mtd, section++, &oobfree); | ||
80 | if (ret) | ||
81 | break; | ||
82 | |||
83 | if (!oobfree.length || oobfree.offset > 15 || | ||
84 | (oobfree.offset + oobfree.length) < 8) | ||
85 | continue; | ||
86 | |||
87 | i = oobfree.offset >= 8 ? oobfree.offset : 8; | ||
88 | for (; i < oobfree.offset + oobfree.length && i < 16; i++) | ||
89 | freebytes |= BIT(i - 8); | ||
90 | |||
91 | if (freebytes == 0xff) | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | return -ENOTSUPP; | ||
96 | } | ||
97 | |||
98 | static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf) | ||
99 | { | ||
100 | struct mtd_oob_ops ops = { }; | ||
101 | int ret; | ||
102 | |||
103 | ops.mode = MTD_OPS_PLACE_OOB; | ||
104 | ops.ooblen = mtd->oobsize; | ||
105 | ops.oobbuf = buf; | ||
106 | |||
107 | ret = mtd_read_oob(mtd, offs, &ops); | ||
108 | if (ret != 0 || mtd->oobsize != ops.oobretlen) | ||
109 | return -1; | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * The logical block number assigned to a physical block is stored in the OOB | ||
116 | * of the first page, in 3 16-bit copies with the following layout: | ||
117 | * | ||
118 | * 01234567 89abcdef | ||
119 | * -------- -------- | ||
120 | * ECC BB xyxyxy | ||
121 | * | ||
122 | * When reading we check that the first two copies agree. | ||
123 | * In case of error, matching is tried using the following pairs. | ||
124 | * Reserved values 0xffff mean the block is kept for wear leveling. | ||
125 | * | ||
126 | * 01234567 89abcdef | ||
127 | * -------- -------- | ||
128 | * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9 | ||
129 | * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11 | ||
130 | * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13 | ||
131 | */ | ||
132 | static int sharpsl_nand_get_logical_num(u8 *oob) | ||
133 | { | ||
134 | u16 us; | ||
135 | int good0, good1; | ||
136 | |||
137 | if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] && | ||
138 | oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) { | ||
139 | good0 = NAND_NOOB_LOGADDR_00; | ||
140 | good1 = NAND_NOOB_LOGADDR_01; | ||
141 | } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] && | ||
142 | oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) { | ||
143 | good0 = NAND_NOOB_LOGADDR_10; | ||
144 | good1 = NAND_NOOB_LOGADDR_11; | ||
145 | } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] && | ||
146 | oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) { | ||
147 | good0 = NAND_NOOB_LOGADDR_20; | ||
148 | good1 = NAND_NOOB_LOGADDR_21; | ||
149 | } else { | ||
150 | return -EINVAL; | ||
151 | } | ||
152 | |||
153 | us = oob[good0] | oob[good1] << 8; | ||
154 | |||
155 | /* parity check */ | ||
156 | if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT) | ||
157 | return -EINVAL; | ||
158 | |||
159 | /* reserved */ | ||
160 | if (us == BLOCK_IS_RESERVED) | ||
161 | return BLOCK_IS_RESERVED; | ||
162 | |||
163 | return (us >> 1) & GENMASK(9, 0); | ||
164 | } | ||
165 | |||
166 | static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl) | ||
167 | { | ||
168 | unsigned int block_num, log_num, phymax; | ||
169 | loff_t block_adr; | ||
170 | u8 *oob; | ||
171 | int i, ret; | ||
172 | |||
173 | oob = kzalloc(mtd->oobsize, GFP_KERNEL); | ||
174 | if (!oob) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd); | ||
178 | |||
179 | /* FTL reserves 5% of the blocks + 1 spare */ | ||
180 | ftl->logmax = ((phymax * 95) / 100) - 1; | ||
181 | |||
182 | ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy), | ||
183 | GFP_KERNEL); | ||
184 | if (!ftl->log2phy) { | ||
185 | ret = -ENOMEM; | ||
186 | goto exit; | ||
187 | } | ||
188 | |||
189 | /* initialize ftl->log2phy */ | ||
190 | for (i = 0; i < ftl->logmax; i++) | ||
191 | ftl->log2phy[i] = UINT_MAX; | ||
192 | |||
193 | /* create physical-logical table */ | ||
194 | for (block_num = 0; block_num < phymax; block_num++) { | ||
195 | block_adr = block_num * mtd->erasesize; | ||
196 | |||
197 | if (mtd_block_isbad(mtd, block_adr)) | ||
198 | continue; | ||
199 | |||
200 | if (sharpsl_nand_read_oob(mtd, block_adr, oob)) | ||
201 | continue; | ||
202 | |||
203 | /* get logical block */ | ||
204 | log_num = sharpsl_nand_get_logical_num(oob); | ||
205 | |||
206 | /* cut-off errors and skip the out-of-range values */ | ||
207 | if (log_num > 0 && log_num < ftl->logmax) { | ||
208 | if (ftl->log2phy[log_num] == UINT_MAX) | ||
209 | ftl->log2phy[log_num] = block_num; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n", | ||
214 | phymax, ftl->logmax, phymax - ftl->logmax); | ||
215 | |||
216 | ret = 0; | ||
217 | exit: | ||
218 | kfree(oob); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl) | ||
223 | { | ||
224 | kfree(ftl->log2phy); | ||
225 | } | ||
226 | |||
227 | static int sharpsl_nand_read_laddr(struct mtd_info *mtd, | ||
228 | loff_t from, | ||
229 | size_t len, | ||
230 | void *buf, | ||
231 | struct sharpsl_ftl *ftl) | ||
232 | { | ||
233 | unsigned int log_num, final_log_num; | ||
234 | unsigned int block_num; | ||
235 | loff_t block_adr; | ||
236 | loff_t block_ofs; | ||
237 | size_t retlen; | ||
238 | int err; | ||
239 | |||
240 | log_num = mtd_div_by_eb((u32)from, mtd); | ||
241 | final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd); | ||
242 | |||
243 | if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num) | ||
244 | return -EINVAL; | ||
245 | |||
246 | block_num = ftl->log2phy[log_num]; | ||
247 | block_adr = block_num * mtd->erasesize; | ||
248 | block_ofs = mtd_mod_by_eb((u32)from, mtd); | ||
249 | |||
250 | err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf); | ||
251 | /* Ignore corrected ECC errors */ | ||
252 | if (mtd_is_bitflip(err)) | ||
253 | err = 0; | ||
254 | |||
255 | if (!err && retlen != len) | ||
256 | err = -EIO; | ||
257 | |||
258 | if (err) | ||
259 | pr_err("sharpslpart: error, read failed at %#llx\n", | ||
260 | block_adr + block_ofs); | ||
261 | |||
262 | return err; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * MTD Partition Parser | ||
267 | * | ||
268 | * Sample values read from SL-C860 | ||
269 | * | ||
270 | * # cat /proc/mtd | ||
271 | * dev: size erasesize name | ||
272 | * mtd0: 006d0000 00020000 "Filesystem" | ||
273 | * mtd1: 00700000 00004000 "smf" | ||
274 | * mtd2: 03500000 00004000 "root" | ||
275 | * mtd3: 04400000 00004000 "home" | ||
276 | * | ||
277 | * PARTITIONINFO1 | ||
278 | * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT.... | ||
279 | * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO.... | ||
280 | * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW.... | ||
281 | */ | ||
282 | struct sharpsl_nand_partinfo { | ||
283 | __le32 start; | ||
284 | __le32 end; | ||
285 | __be32 magic; | ||
286 | u32 reserved; | ||
287 | }; | ||
288 | |||
289 | static int sharpsl_nand_read_partinfo(struct mtd_info *master, | ||
290 | loff_t from, | ||
291 | size_t len, | ||
292 | struct sharpsl_nand_partinfo *buf, | ||
293 | struct sharpsl_ftl *ftl) | ||
294 | { | ||
295 | int ret; | ||
296 | |||
297 | ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl); | ||
298 | if (ret) | ||
299 | return ret; | ||
300 | |||
301 | /* check for magics */ | ||
302 | if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC || | ||
303 | be32_to_cpu(buf[1].magic) != FSRO_MAGIC || | ||
304 | be32_to_cpu(buf[2].magic) != FSRW_MAGIC) { | ||
305 | pr_err("sharpslpart: magic values mismatch\n"); | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | |||
309 | /* fixup for hardcoded value 64 MiB (for older models) */ | ||
310 | buf[2].end = cpu_to_le32(master->size); | ||
311 | |||
312 | /* extra sanity check */ | ||
313 | if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) || | ||
314 | le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) || | ||
315 | le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) || | ||
316 | le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) || | ||
317 | le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) { | ||
318 | pr_err("sharpslpart: partition sizes mismatch\n"); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static int sharpsl_parse_mtd_partitions(struct mtd_info *master, | ||
326 | const struct mtd_partition **pparts, | ||
327 | struct mtd_part_parser_data *data) | ||
328 | { | ||
329 | struct sharpsl_ftl ftl; | ||
330 | struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS]; | ||
331 | struct mtd_partition *sharpsl_nand_parts; | ||
332 | int err; | ||
333 | |||
334 | /* check that OOB bytes 8 to 15 used by the FTL are actually free */ | ||
335 | err = sharpsl_nand_check_ooblayout(master); | ||
336 | if (err) | ||
337 | return err; | ||
338 | |||
339 | /* init logical mgmt (FTL) */ | ||
340 | err = sharpsl_nand_init_ftl(master, &ftl); | ||
341 | if (err) | ||
342 | return err; | ||
343 | |||
344 | /* read and validate first partition table */ | ||
345 | pr_info("sharpslpart: try reading first partition table\n"); | ||
346 | err = sharpsl_nand_read_partinfo(master, | ||
347 | SHARPSL_PARTINFO1_LADDR, | ||
348 | sizeof(buf), buf, &ftl); | ||
349 | if (err) { | ||
350 | /* fallback: read second partition table */ | ||
351 | pr_warn("sharpslpart: first partition table is invalid, retry using the second\n"); | ||
352 | err = sharpsl_nand_read_partinfo(master, | ||
353 | SHARPSL_PARTINFO2_LADDR, | ||
354 | sizeof(buf), buf, &ftl); | ||
355 | } | ||
356 | |||
357 | /* cleanup logical mgmt (FTL) */ | ||
358 | sharpsl_nand_cleanup_ftl(&ftl); | ||
359 | |||
360 | if (err) { | ||
361 | pr_err("sharpslpart: both partition tables are invalid\n"); | ||
362 | return err; | ||
363 | } | ||
364 | |||
365 | sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) * | ||
366 | SHARPSL_NAND_PARTS, GFP_KERNEL); | ||
367 | if (!sharpsl_nand_parts) | ||
368 | return -ENOMEM; | ||
369 | |||
370 | /* original names */ | ||
371 | sharpsl_nand_parts[0].name = "smf"; | ||
372 | sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start); | ||
373 | sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) - | ||
374 | le32_to_cpu(buf[0].start); | ||
375 | |||
376 | sharpsl_nand_parts[1].name = "root"; | ||
377 | sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start); | ||
378 | sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) - | ||
379 | le32_to_cpu(buf[1].start); | ||
380 | |||
381 | sharpsl_nand_parts[2].name = "home"; | ||
382 | sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start); | ||
383 | sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) - | ||
384 | le32_to_cpu(buf[2].start); | ||
385 | |||
386 | *pparts = sharpsl_nand_parts; | ||
387 | return SHARPSL_NAND_PARTS; | ||
388 | } | ||
389 | |||
390 | static struct mtd_part_parser sharpsl_mtd_parser = { | ||
391 | .parse_fn = sharpsl_parse_mtd_partitions, | ||
392 | .name = "sharpslpart", | ||
393 | }; | ||
394 | module_mtd_part_parser(sharpsl_mtd_parser); | ||
395 | |||
396 | MODULE_LICENSE("GPL"); | ||
397 | MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>"); | ||
398 | MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series"); | ||
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 69c638dd0484..89da88e59121 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig | |||
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI | |||
50 | 50 | ||
51 | config SPI_CADENCE_QUADSPI | 51 | config SPI_CADENCE_QUADSPI |
52 | tristate "Cadence Quad SPI controller" | 52 | tristate "Cadence Quad SPI controller" |
53 | depends on OF && (ARM || COMPILE_TEST) | 53 | depends on OF && (ARM || ARM64 || COMPILE_TEST) |
54 | help | 54 | help |
55 | Enable support for the Cadence Quad SPI Flash controller. | 55 | Enable support for the Cadence Quad SPI Flash controller. |
56 | 56 | ||
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI | |||
90 | tristate | 90 | tristate |
91 | 91 | ||
92 | config SPI_INTEL_SPI_PCI | 92 | config SPI_INTEL_SPI_PCI |
93 | tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT | 93 | tristate "Intel PCH/PCU SPI flash PCI driver" |
94 | depends on X86 && PCI | 94 | depends on X86 && PCI |
95 | select SPI_INTEL_SPI | 95 | select SPI_INTEL_SPI |
96 | help | 96 | help |
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI | |||
106 | will be called intel-spi-pci. | 106 | will be called intel-spi-pci. |
107 | 107 | ||
108 | config SPI_INTEL_SPI_PLATFORM | 108 | config SPI_INTEL_SPI_PLATFORM |
109 | tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT | 109 | tristate "Intel PCH/PCU SPI flash platform driver" |
110 | depends on X86 | 110 | depends on X86 |
111 | select SPI_INTEL_SPI | 111 | select SPI_INTEL_SPI |
112 | help | 112 | help |
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 53c7d8e0327a..75a2bc447a99 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/of_device.h> | 31 | #include <linux/of_device.h> |
32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
33 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
34 | #include <linux/pm_runtime.h> | ||
34 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
35 | #include <linux/spi/spi.h> | 36 | #include <linux/spi/spi.h> |
36 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
@@ -38,6 +39,9 @@ | |||
38 | #define CQSPI_NAME "cadence-qspi" | 39 | #define CQSPI_NAME "cadence-qspi" |
39 | #define CQSPI_MAX_CHIPSELECT 16 | 40 | #define CQSPI_MAX_CHIPSELECT 16 |
40 | 41 | ||
42 | /* Quirks */ | ||
43 | #define CQSPI_NEEDS_WR_DELAY BIT(0) | ||
44 | |||
41 | struct cqspi_st; | 45 | struct cqspi_st; |
42 | 46 | ||
43 | struct cqspi_flash_pdata { | 47 | struct cqspi_flash_pdata { |
@@ -75,7 +79,9 @@ struct cqspi_st { | |||
75 | bool is_decoded_cs; | 79 | bool is_decoded_cs; |
76 | u32 fifo_depth; | 80 | u32 fifo_depth; |
77 | u32 fifo_width; | 81 | u32 fifo_width; |
82 | bool rclk_en; | ||
78 | u32 trigger_address; | 83 | u32 trigger_address; |
84 | u32 wr_delay; | ||
79 | struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; | 85 | struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; |
80 | }; | 86 | }; |
81 | 87 | ||
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, | |||
608 | reinit_completion(&cqspi->transfer_complete); | 614 | reinit_completion(&cqspi->transfer_complete); |
609 | writel(CQSPI_REG_INDIRECTWR_START_MASK, | 615 | writel(CQSPI_REG_INDIRECTWR_START_MASK, |
610 | reg_base + CQSPI_REG_INDIRECTWR); | 616 | reg_base + CQSPI_REG_INDIRECTWR); |
617 | /* | ||
618 | * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access | ||
619 | * Controller programming sequence, couple of cycles of | ||
620 | * QSPI_REF_CLK delay is required for the above bit to | ||
621 | * be internally synchronized by the QSPI module. Provide 5 | ||
622 | * cycles of delay. | ||
623 | */ | ||
624 | if (cqspi->wr_delay) | ||
625 | ndelay(cqspi->wr_delay); | ||
611 | 626 | ||
612 | while (remaining > 0) { | 627 | while (remaining > 0) { |
613 | write_bytes = remaining > page_size ? page_size : remaining; | 628 | write_bytes = remaining > page_size ? page_size : remaining; |
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) | |||
775 | } | 790 | } |
776 | 791 | ||
777 | static void cqspi_readdata_capture(struct cqspi_st *cqspi, | 792 | static void cqspi_readdata_capture(struct cqspi_st *cqspi, |
778 | const unsigned int bypass, | 793 | const bool bypass, |
779 | const unsigned int delay) | 794 | const unsigned int delay) |
780 | { | 795 | { |
781 | void __iomem *reg_base = cqspi->iobase; | 796 | void __iomem *reg_base = cqspi->iobase; |
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor) | |||
839 | cqspi->sclk = sclk; | 854 | cqspi->sclk = sclk; |
840 | cqspi_config_baudrate_div(cqspi); | 855 | cqspi_config_baudrate_div(cqspi); |
841 | cqspi_delay(nor); | 856 | cqspi_delay(nor); |
842 | cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); | 857 | cqspi_readdata_capture(cqspi, !cqspi->rclk_en, |
858 | f_pdata->read_delay); | ||
843 | } | 859 | } |
844 | 860 | ||
845 | if (switch_cs || switch_ck) | 861 | if (switch_cs || switch_ck) |
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev) | |||
1036 | return -ENXIO; | 1052 | return -ENXIO; |
1037 | } | 1053 | } |
1038 | 1054 | ||
1055 | cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); | ||
1056 | |||
1039 | return 0; | 1057 | return 0; |
1040 | } | 1058 | } |
1041 | 1059 | ||
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev) | |||
1156 | struct cqspi_st *cqspi; | 1174 | struct cqspi_st *cqspi; |
1157 | struct resource *res; | 1175 | struct resource *res; |
1158 | struct resource *res_ahb; | 1176 | struct resource *res_ahb; |
1177 | unsigned long data; | ||
1159 | int ret; | 1178 | int ret; |
1160 | int irq; | 1179 | int irq; |
1161 | 1180 | ||
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev) | |||
1206 | return -ENXIO; | 1225 | return -ENXIO; |
1207 | } | 1226 | } |
1208 | 1227 | ||
1228 | pm_runtime_enable(dev); | ||
1229 | ret = pm_runtime_get_sync(dev); | ||
1230 | if (ret < 0) { | ||
1231 | pm_runtime_put_noidle(dev); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
1209 | ret = clk_prepare_enable(cqspi->clk); | 1235 | ret = clk_prepare_enable(cqspi->clk); |
1210 | if (ret) { | 1236 | if (ret) { |
1211 | dev_err(dev, "Cannot enable QSPI clock.\n"); | 1237 | dev_err(dev, "Cannot enable QSPI clock.\n"); |
1212 | return ret; | 1238 | goto probe_clk_failed; |
1213 | } | 1239 | } |
1214 | 1240 | ||
1215 | cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); | 1241 | cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); |
1242 | data = (unsigned long)of_device_get_match_data(dev); | ||
1243 | if (data & CQSPI_NEEDS_WR_DELAY) | ||
1244 | cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, | ||
1245 | cqspi->master_ref_clk_hz); | ||
1216 | 1246 | ||
1217 | ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, | 1247 | ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, |
1218 | pdev->name, cqspi); | 1248 | pdev->name, cqspi); |
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev) | |||
1233 | } | 1263 | } |
1234 | 1264 | ||
1235 | return ret; | 1265 | return ret; |
1236 | probe_irq_failed: | ||
1237 | cqspi_controller_enable(cqspi, 0); | ||
1238 | probe_setup_failed: | 1266 | probe_setup_failed: |
1267 | cqspi_controller_enable(cqspi, 0); | ||
1268 | probe_irq_failed: | ||
1239 | clk_disable_unprepare(cqspi->clk); | 1269 | clk_disable_unprepare(cqspi->clk); |
1270 | probe_clk_failed: | ||
1271 | pm_runtime_put_sync(dev); | ||
1272 | pm_runtime_disable(dev); | ||
1240 | return ret; | 1273 | return ret; |
1241 | } | 1274 | } |
1242 | 1275 | ||
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev) | |||
1253 | 1286 | ||
1254 | clk_disable_unprepare(cqspi->clk); | 1287 | clk_disable_unprepare(cqspi->clk); |
1255 | 1288 | ||
1289 | pm_runtime_put_sync(&pdev->dev); | ||
1290 | pm_runtime_disable(&pdev->dev); | ||
1291 | |||
1256 | return 0; | 1292 | return 0; |
1257 | } | 1293 | } |
1258 | 1294 | ||
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = { | |||
1284 | #endif | 1320 | #endif |
1285 | 1321 | ||
1286 | static const struct of_device_id cqspi_dt_ids[] = { | 1322 | static const struct of_device_id cqspi_dt_ids[] = { |
1287 | {.compatible = "cdns,qspi-nor",}, | 1323 | { |
1324 | .compatible = "cdns,qspi-nor", | ||
1325 | .data = (void *)0, | ||
1326 | }, | ||
1327 | { | ||
1328 | .compatible = "ti,k2g-qspi", | ||
1329 | .data = (void *)CQSPI_NEEDS_WR_DELAY, | ||
1330 | }, | ||
1288 | { /* end of table */ } | 1331 | { /* end of table */ } |
1289 | }; | 1332 | }; |
1290 | 1333 | ||
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index e82652335ede..c0976f2e3dd1 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c | |||
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static const struct pci_device_id intel_spi_pci_ids[] = { | 65 | static const struct pci_device_id intel_spi_pci_ids[] = { |
66 | { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, | ||
66 | { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, | 67 | { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, |
68 | { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, | ||
69 | { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, | ||
67 | { }, | 70 | { }, |
68 | }; | 71 | }; |
69 | MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); | 72 | MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); |
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 8a596bfeddff..ef034d898a23 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c | |||
@@ -67,8 +67,6 @@ | |||
67 | #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) | 67 | #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) |
68 | #define PR_RPE BIT(15) | 68 | #define PR_RPE BIT(15) |
69 | #define PR_BASE_MASK 0x3fff | 69 | #define PR_BASE_MASK 0x3fff |
70 | /* Last PR is GPR0 */ | ||
71 | #define PR_NUM (5 + 1) | ||
72 | 70 | ||
73 | /* Offsets are from @ispi->sregs */ | 71 | /* Offsets are from @ispi->sregs */ |
74 | #define SSFSTS_CTL 0x00 | 72 | #define SSFSTS_CTL 0x00 |
@@ -90,20 +88,35 @@ | |||
90 | #define OPMENU0 0x08 | 88 | #define OPMENU0 0x08 |
91 | #define OPMENU1 0x0c | 89 | #define OPMENU1 0x0c |
92 | 90 | ||
91 | #define OPTYPE_READ_NO_ADDR 0 | ||
92 | #define OPTYPE_WRITE_NO_ADDR 1 | ||
93 | #define OPTYPE_READ_WITH_ADDR 2 | ||
94 | #define OPTYPE_WRITE_WITH_ADDR 3 | ||
95 | |||
93 | /* CPU specifics */ | 96 | /* CPU specifics */ |
94 | #define BYT_PR 0x74 | 97 | #define BYT_PR 0x74 |
95 | #define BYT_SSFSTS_CTL 0x90 | 98 | #define BYT_SSFSTS_CTL 0x90 |
96 | #define BYT_BCR 0xfc | 99 | #define BYT_BCR 0xfc |
97 | #define BYT_BCR_WPD BIT(0) | 100 | #define BYT_BCR_WPD BIT(0) |
98 | #define BYT_FREG_NUM 5 | 101 | #define BYT_FREG_NUM 5 |
102 | #define BYT_PR_NUM 5 | ||
99 | 103 | ||
100 | #define LPT_PR 0x74 | 104 | #define LPT_PR 0x74 |
101 | #define LPT_SSFSTS_CTL 0x90 | 105 | #define LPT_SSFSTS_CTL 0x90 |
102 | #define LPT_FREG_NUM 5 | 106 | #define LPT_FREG_NUM 5 |
107 | #define LPT_PR_NUM 5 | ||
103 | 108 | ||
104 | #define BXT_PR 0x84 | 109 | #define BXT_PR 0x84 |
105 | #define BXT_SSFSTS_CTL 0xa0 | 110 | #define BXT_SSFSTS_CTL 0xa0 |
106 | #define BXT_FREG_NUM 12 | 111 | #define BXT_FREG_NUM 12 |
112 | #define BXT_PR_NUM 6 | ||
113 | |||
114 | #define LVSCC 0xc4 | ||
115 | #define UVSCC 0xc8 | ||
116 | #define ERASE_OPCODE_SHIFT 8 | ||
117 | #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) | ||
118 | #define ERASE_64K_OPCODE_SHIFT 16 | ||
119 | #define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) | ||
107 | 120 | ||
108 | #define INTEL_SPI_TIMEOUT 5000 /* ms */ | 121 | #define INTEL_SPI_TIMEOUT 5000 /* ms */ |
109 | #define INTEL_SPI_FIFO_SZ 64 | 122 | #define INTEL_SPI_FIFO_SZ 64 |
@@ -117,8 +130,11 @@ | |||
117 | * @pregs: Start of protection registers | 130 | * @pregs: Start of protection registers |
118 | * @sregs: Start of software sequencer registers | 131 | * @sregs: Start of software sequencer registers |
119 | * @nregions: Maximum number of regions | 132 | * @nregions: Maximum number of regions |
133 | * @pr_num: Maximum number of protected range registers | ||
120 | * @writeable: Is the chip writeable | 134 | * @writeable: Is the chip writeable |
121 | * @swseq: Use SW sequencer in register reads/writes | 135 | * @locked: Is SPI setting locked |
136 | * @swseq_reg: Use SW sequencer in register reads/writes | ||
137 | * @swseq_erase: Use SW sequencer in erase operation | ||
122 | * @erase_64k: 64k erase supported | 138 | * @erase_64k: 64k erase supported |
123 | * @opcodes: Opcodes which are supported. This are programmed by BIOS | 139 | * @opcodes: Opcodes which are supported. This are programmed by BIOS |
124 | * before it locks down the controller. | 140 | * before it locks down the controller. |
@@ -132,8 +148,11 @@ struct intel_spi { | |||
132 | void __iomem *pregs; | 148 | void __iomem *pregs; |
133 | void __iomem *sregs; | 149 | void __iomem *sregs; |
134 | size_t nregions; | 150 | size_t nregions; |
151 | size_t pr_num; | ||
135 | bool writeable; | 152 | bool writeable; |
136 | bool swseq; | 153 | bool locked; |
154 | bool swseq_reg; | ||
155 | bool swseq_erase; | ||
137 | bool erase_64k; | 156 | bool erase_64k; |
138 | u8 opcodes[8]; | 157 | u8 opcodes[8]; |
139 | u8 preopcodes[2]; | 158 | u8 preopcodes[2]; |
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) | |||
167 | for (i = 0; i < ispi->nregions; i++) | 186 | for (i = 0; i < ispi->nregions; i++) |
168 | dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, | 187 | dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, |
169 | readl(ispi->base + FREG(i))); | 188 | readl(ispi->base + FREG(i))); |
170 | for (i = 0; i < PR_NUM; i++) | 189 | for (i = 0; i < ispi->pr_num; i++) |
171 | dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, | 190 | dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, |
172 | readl(ispi->pregs + PR(i))); | 191 | readl(ispi->pregs + PR(i))); |
173 | 192 | ||
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) | |||
181 | if (ispi->info->type == INTEL_SPI_BYT) | 200 | if (ispi->info->type == INTEL_SPI_BYT) |
182 | dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); | 201 | dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); |
183 | 202 | ||
203 | dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); | ||
204 | dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); | ||
205 | |||
184 | dev_dbg(ispi->dev, "Protected regions:\n"); | 206 | dev_dbg(ispi->dev, "Protected regions:\n"); |
185 | for (i = 0; i < PR_NUM; i++) { | 207 | for (i = 0; i < ispi->pr_num; i++) { |
186 | u32 base, limit; | 208 | u32 base, limit; |
187 | 209 | ||
188 | value = readl(ispi->pregs + PR(i)); | 210 | value = readl(ispi->pregs + PR(i)); |
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) | |||
214 | } | 236 | } |
215 | 237 | ||
216 | dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", | 238 | dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", |
217 | ispi->swseq ? 'S' : 'H'); | 239 | ispi->swseq_reg ? 'S' : 'H'); |
240 | dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", | ||
241 | ispi->swseq_erase ? 'S' : 'H'); | ||
218 | } | 242 | } |
219 | 243 | ||
220 | /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ | 244 | /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ |
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) | |||
278 | 302 | ||
279 | static int intel_spi_init(struct intel_spi *ispi) | 303 | static int intel_spi_init(struct intel_spi *ispi) |
280 | { | 304 | { |
281 | u32 opmenu0, opmenu1, val; | 305 | u32 opmenu0, opmenu1, lvscc, uvscc, val; |
282 | int i; | 306 | int i; |
283 | 307 | ||
284 | switch (ispi->info->type) { | 308 | switch (ispi->info->type) { |
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi) | |||
286 | ispi->sregs = ispi->base + BYT_SSFSTS_CTL; | 310 | ispi->sregs = ispi->base + BYT_SSFSTS_CTL; |
287 | ispi->pregs = ispi->base + BYT_PR; | 311 | ispi->pregs = ispi->base + BYT_PR; |
288 | ispi->nregions = BYT_FREG_NUM; | 312 | ispi->nregions = BYT_FREG_NUM; |
313 | ispi->pr_num = BYT_PR_NUM; | ||
314 | ispi->swseq_reg = true; | ||
289 | 315 | ||
290 | if (writeable) { | 316 | if (writeable) { |
291 | /* Disable write protection */ | 317 | /* Disable write protection */ |
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi) | |||
305 | ispi->sregs = ispi->base + LPT_SSFSTS_CTL; | 331 | ispi->sregs = ispi->base + LPT_SSFSTS_CTL; |
306 | ispi->pregs = ispi->base + LPT_PR; | 332 | ispi->pregs = ispi->base + LPT_PR; |
307 | ispi->nregions = LPT_FREG_NUM; | 333 | ispi->nregions = LPT_FREG_NUM; |
334 | ispi->pr_num = LPT_PR_NUM; | ||
335 | ispi->swseq_reg = true; | ||
308 | break; | 336 | break; |
309 | 337 | ||
310 | case INTEL_SPI_BXT: | 338 | case INTEL_SPI_BXT: |
311 | ispi->sregs = ispi->base + BXT_SSFSTS_CTL; | 339 | ispi->sregs = ispi->base + BXT_SSFSTS_CTL; |
312 | ispi->pregs = ispi->base + BXT_PR; | 340 | ispi->pregs = ispi->base + BXT_PR; |
313 | ispi->nregions = BXT_FREG_NUM; | 341 | ispi->nregions = BXT_FREG_NUM; |
342 | ispi->pr_num = BXT_PR_NUM; | ||
314 | ispi->erase_64k = true; | 343 | ispi->erase_64k = true; |
315 | break; | 344 | break; |
316 | 345 | ||
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi) | |||
318 | return -EINVAL; | 347 | return -EINVAL; |
319 | } | 348 | } |
320 | 349 | ||
321 | /* Disable #SMI generation */ | 350 | /* Disable #SMI generation from HW sequencer */ |
322 | val = readl(ispi->base + HSFSTS_CTL); | 351 | val = readl(ispi->base + HSFSTS_CTL); |
323 | val &= ~HSFSTS_CTL_FSMIE; | 352 | val &= ~HSFSTS_CTL_FSMIE; |
324 | writel(val, ispi->base + HSFSTS_CTL); | 353 | writel(val, ispi->base + HSFSTS_CTL); |
325 | 354 | ||
326 | /* | 355 | /* |
327 | * BIOS programs allowed opcodes and then locks down the register. | 356 | * Determine whether erase operation should use HW or SW sequencer. |
328 | * So read back what opcodes it decided to support. That's the set | 357 | * |
329 | * we are going to support as well. | 358 | * The HW sequencer has a predefined list of opcodes, with only the |
359 | * erase opcode being programmable in LVSCC and UVSCC registers. | ||
360 | * If these registers don't contain a valid erase opcode, erase | ||
361 | * cannot be done using HW sequencer. | ||
330 | */ | 362 | */ |
331 | opmenu0 = readl(ispi->sregs + OPMENU0); | 363 | lvscc = readl(ispi->base + LVSCC); |
332 | opmenu1 = readl(ispi->sregs + OPMENU1); | 364 | uvscc = readl(ispi->base + UVSCC); |
365 | if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) | ||
366 | ispi->swseq_erase = true; | ||
367 | /* SPI controller on Intel BXT supports 64K erase opcode */ | ||
368 | if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) | ||
369 | if (!(lvscc & ERASE_64K_OPCODE_MASK) || | ||
370 | !(uvscc & ERASE_64K_OPCODE_MASK)) | ||
371 | ispi->erase_64k = false; | ||
333 | 372 | ||
334 | /* | 373 | /* |
335 | * Some controllers can only do basic operations using hardware | 374 | * Some controllers can only do basic operations using hardware |
336 | * sequencer. All other operations are supposed to be carried out | 375 | * sequencer. All other operations are supposed to be carried out |
337 | * using software sequencer. If we find that BIOS has programmed | 376 | * using software sequencer. |
338 | * opcodes for the software sequencer we use that over the hardware | ||
339 | * sequencer. | ||
340 | */ | 377 | */ |
341 | if (opmenu0 && opmenu1) { | 378 | if (ispi->swseq_reg) { |
342 | for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { | ||
343 | ispi->opcodes[i] = opmenu0 >> i * 8; | ||
344 | ispi->opcodes[i + 4] = opmenu1 >> i * 8; | ||
345 | } | ||
346 | |||
347 | val = readl(ispi->sregs + PREOP_OPTYPE); | ||
348 | ispi->preopcodes[0] = val; | ||
349 | ispi->preopcodes[1] = val >> 8; | ||
350 | |||
351 | /* Disable #SMI generation from SW sequencer */ | 379 | /* Disable #SMI generation from SW sequencer */ |
352 | val = readl(ispi->sregs + SSFSTS_CTL); | 380 | val = readl(ispi->sregs + SSFSTS_CTL); |
353 | val &= ~SSFSTS_CTL_FSMIE; | 381 | val &= ~SSFSTS_CTL_FSMIE; |
354 | writel(val, ispi->sregs + SSFSTS_CTL); | 382 | writel(val, ispi->sregs + SSFSTS_CTL); |
383 | } | ||
384 | |||
385 | /* Check controller's lock status */ | ||
386 | val = readl(ispi->base + HSFSTS_CTL); | ||
387 | ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); | ||
388 | |||
389 | if (ispi->locked) { | ||
390 | /* | ||
391 | * BIOS programs allowed opcodes and then locks down the | ||
392 | * register. So read back what opcodes it decided to support. | ||
393 | * That's the set we are going to support as well. | ||
394 | */ | ||
395 | opmenu0 = readl(ispi->sregs + OPMENU0); | ||
396 | opmenu1 = readl(ispi->sregs + OPMENU1); | ||
355 | 397 | ||
356 | ispi->swseq = true; | 398 | if (opmenu0 && opmenu1) { |
399 | for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { | ||
400 | ispi->opcodes[i] = opmenu0 >> i * 8; | ||
401 | ispi->opcodes[i + 4] = opmenu1 >> i * 8; | ||
402 | } | ||
403 | |||
404 | val = readl(ispi->sregs + PREOP_OPTYPE); | ||
405 | ispi->preopcodes[0] = val; | ||
406 | ispi->preopcodes[1] = val >> 8; | ||
407 | } | ||
357 | } | 408 | } |
358 | 409 | ||
359 | intel_spi_dump_regs(ispi); | 410 | intel_spi_dump_regs(ispi); |
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi) | |||
361 | return 0; | 412 | return 0; |
362 | } | 413 | } |
363 | 414 | ||
364 | static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode) | 415 | static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) |
365 | { | 416 | { |
366 | int i; | 417 | int i; |
418 | int preop; | ||
367 | 419 | ||
368 | for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) | 420 | if (ispi->locked) { |
369 | if (ispi->opcodes[i] == opcode) | 421 | for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) |
370 | return i; | 422 | if (ispi->opcodes[i] == opcode) |
371 | return -EINVAL; | 423 | return i; |
424 | |||
425 | return -EINVAL; | ||
426 | } | ||
427 | |||
428 | /* The lock is off, so just use index 0 */ | ||
429 | writel(opcode, ispi->sregs + OPMENU0); | ||
430 | preop = readw(ispi->sregs + PREOP_OPTYPE); | ||
431 | writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); | ||
432 | |||
433 | return 0; | ||
372 | } | 434 | } |
373 | 435 | ||
374 | static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, | 436 | static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len) |
375 | int len) | ||
376 | { | 437 | { |
377 | u32 val, status; | 438 | u32 val, status; |
378 | int ret; | 439 | int ret; |
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, | |||
394 | return -EINVAL; | 455 | return -EINVAL; |
395 | } | 456 | } |
396 | 457 | ||
458 | if (len > INTEL_SPI_FIFO_SZ) | ||
459 | return -EINVAL; | ||
460 | |||
397 | val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; | 461 | val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; |
398 | val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; | 462 | val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; |
399 | val |= HSFSTS_CTL_FGO; | 463 | val |= HSFSTS_CTL_FGO; |
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, | |||
412 | return 0; | 476 | return 0; |
413 | } | 477 | } |
414 | 478 | ||
415 | static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, | 479 | static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len, |
416 | int len) | 480 | int optype) |
417 | { | 481 | { |
418 | u32 val, status; | 482 | u32 val = 0, status; |
483 | u16 preop; | ||
419 | int ret; | 484 | int ret; |
420 | 485 | ||
421 | ret = intel_spi_opcode_index(ispi, opcode); | 486 | ret = intel_spi_opcode_index(ispi, opcode, optype); |
422 | if (ret < 0) | 487 | if (ret < 0) |
423 | return ret; | 488 | return ret; |
424 | 489 | ||
425 | val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; | 490 | if (len > INTEL_SPI_FIFO_SZ) |
491 | return -EINVAL; | ||
492 | |||
493 | /* Only mark 'Data Cycle' bit when there is data to be transferred */ | ||
494 | if (len > 0) | ||
495 | val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; | ||
426 | val |= ret << SSFSTS_CTL_COP_SHIFT; | 496 | val |= ret << SSFSTS_CTL_COP_SHIFT; |
427 | val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; | 497 | val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; |
428 | val |= SSFSTS_CTL_SCGO; | 498 | val |= SSFSTS_CTL_SCGO; |
499 | preop = readw(ispi->sregs + PREOP_OPTYPE); | ||
500 | if (preop) { | ||
501 | val |= SSFSTS_CTL_ACS; | ||
502 | if (preop >> 8) | ||
503 | val |= SSFSTS_CTL_SPOP; | ||
504 | } | ||
429 | writel(val, ispi->sregs + SSFSTS_CTL); | 505 | writel(val, ispi->sregs + SSFSTS_CTL); |
430 | 506 | ||
431 | ret = intel_spi_wait_sw_busy(ispi); | 507 | ret = intel_spi_wait_sw_busy(ispi); |
432 | if (ret) | 508 | if (ret) |
433 | return ret; | 509 | return ret; |
434 | 510 | ||
435 | status = readl(ispi->base + SSFSTS_CTL); | 511 | status = readl(ispi->sregs + SSFSTS_CTL); |
436 | if (status & SSFSTS_CTL_FCERR) | 512 | if (status & SSFSTS_CTL_FCERR) |
437 | return -EIO; | 513 | return -EIO; |
438 | else if (status & SSFSTS_CTL_AEL) | 514 | else if (status & SSFSTS_CTL_AEL) |
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
449 | /* Address of the first chip */ | 525 | /* Address of the first chip */ |
450 | writel(0, ispi->base + FADDR); | 526 | writel(0, ispi->base + FADDR); |
451 | 527 | ||
452 | if (ispi->swseq) | 528 | if (ispi->swseq_reg) |
453 | ret = intel_spi_sw_cycle(ispi, opcode, buf, len); | 529 | ret = intel_spi_sw_cycle(ispi, opcode, len, |
530 | OPTYPE_READ_NO_ADDR); | ||
454 | else | 531 | else |
455 | ret = intel_spi_hw_cycle(ispi, opcode, buf, len); | 532 | ret = intel_spi_hw_cycle(ispi, opcode, len); |
456 | 533 | ||
457 | if (ret) | 534 | if (ret) |
458 | return ret; | 535 | return ret; |
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
467 | 544 | ||
468 | /* | 545 | /* |
469 | * This is handled with atomic operation and preop code in Intel | 546 | * This is handled with atomic operation and preop code in Intel |
470 | * controller so skip it here now. | 547 | * controller so skip it here now. If the controller is not locked, |
548 | * program the opcode to the PREOP register for later use. | ||
471 | */ | 549 | */ |
472 | if (opcode == SPINOR_OP_WREN) | 550 | if (opcode == SPINOR_OP_WREN) { |
551 | if (!ispi->locked) | ||
552 | writel(opcode, ispi->sregs + PREOP_OPTYPE); | ||
553 | |||
473 | return 0; | 554 | return 0; |
555 | } | ||
474 | 556 | ||
475 | writel(0, ispi->base + FADDR); | 557 | writel(0, ispi->base + FADDR); |
476 | 558 | ||
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
479 | if (ret) | 561 | if (ret) |
480 | return ret; | 562 | return ret; |
481 | 563 | ||
482 | if (ispi->swseq) | 564 | if (ispi->swseq_reg) |
483 | return intel_spi_sw_cycle(ispi, opcode, buf, len); | 565 | return intel_spi_sw_cycle(ispi, opcode, len, |
484 | return intel_spi_hw_cycle(ispi, opcode, buf, len); | 566 | OPTYPE_WRITE_NO_ADDR); |
567 | return intel_spi_hw_cycle(ispi, opcode, len); | ||
485 | } | 568 | } |
486 | 569 | ||
487 | static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, | 570 | static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, |
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, | |||
561 | val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; | 644 | val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; |
562 | val |= HSFSTS_CTL_FCYCLE_WRITE; | 645 | val |= HSFSTS_CTL_FCYCLE_WRITE; |
563 | 646 | ||
564 | /* Write enable */ | ||
565 | if (ispi->preopcodes[1] == SPINOR_OP_WREN) | ||
566 | val |= SSFSTS_CTL_SPOP; | ||
567 | val |= SSFSTS_CTL_ACS; | ||
568 | writel(val, ispi->base + HSFSTS_CTL); | ||
569 | |||
570 | ret = intel_spi_write_block(ispi, write_buf, block_size); | 647 | ret = intel_spi_write_block(ispi, write_buf, block_size); |
571 | if (ret) { | 648 | if (ret) { |
572 | dev_err(ispi->dev, "failed to write block\n"); | 649 | dev_err(ispi->dev, "failed to write block\n"); |
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, | |||
574 | } | 651 | } |
575 | 652 | ||
576 | /* Start the write now */ | 653 | /* Start the write now */ |
577 | val = readl(ispi->base + HSFSTS_CTL); | 654 | val |= HSFSTS_CTL_FGO; |
578 | writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL); | 655 | writel(val, ispi->base + HSFSTS_CTL); |
579 | 656 | ||
580 | ret = intel_spi_wait_hw_busy(ispi); | 657 | ret = intel_spi_wait_hw_busy(ispi); |
581 | if (ret) { | 658 | if (ret) { |
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs) | |||
620 | erase_size = SZ_4K; | 697 | erase_size = SZ_4K; |
621 | } | 698 | } |
622 | 699 | ||
700 | if (ispi->swseq_erase) { | ||
701 | while (len > 0) { | ||
702 | writel(offs, ispi->base + FADDR); | ||
703 | |||
704 | ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, | ||
705 | 0, OPTYPE_WRITE_WITH_ADDR); | ||
706 | if (ret) | ||
707 | return ret; | ||
708 | |||
709 | offs += erase_size; | ||
710 | len -= erase_size; | ||
711 | } | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
623 | while (len > 0) { | 716 | while (len > 0) { |
624 | writel(offs, ispi->base + FADDR); | 717 | writel(offs, ispi->base + FADDR); |
625 | 718 | ||
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi, | |||
652 | { | 745 | { |
653 | int i; | 746 | int i; |
654 | 747 | ||
655 | for (i = 0; i < PR_NUM; i++) { | 748 | for (i = 0; i < ispi->pr_num; i++) { |
656 | u32 pr_base, pr_limit, pr_value; | 749 | u32 pr_base, pr_limit, pr_value; |
657 | 750 | ||
658 | pr_value = readl(ispi->pregs + PR(i)); | 751 | pr_value = readl(ispi->pregs + PR(i)); |
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index c258c7adf1c5..abe455ccd68b 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c | |||
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, | |||
404 | return ret; | 404 | return ret; |
405 | } | 405 | } |
406 | 406 | ||
407 | static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor) | ||
408 | { | ||
409 | clk_disable_unprepare(mt8173_nor->spi_clk); | ||
410 | clk_disable_unprepare(mt8173_nor->nor_clk); | ||
411 | } | ||
412 | |||
413 | static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor) | ||
414 | { | ||
415 | int ret; | ||
416 | |||
417 | ret = clk_prepare_enable(mt8173_nor->spi_clk); | ||
418 | if (ret) | ||
419 | return ret; | ||
420 | |||
421 | ret = clk_prepare_enable(mt8173_nor->nor_clk); | ||
422 | if (ret) { | ||
423 | clk_disable_unprepare(mt8173_nor->spi_clk); | ||
424 | return ret; | ||
425 | } | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
407 | static int mtk_nor_init(struct mt8173_nor *mt8173_nor, | 430 | static int mtk_nor_init(struct mt8173_nor *mt8173_nor, |
408 | struct device_node *flash_node) | 431 | struct device_node *flash_node) |
409 | { | 432 | { |
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev) | |||
468 | return PTR_ERR(mt8173_nor->nor_clk); | 491 | return PTR_ERR(mt8173_nor->nor_clk); |
469 | 492 | ||
470 | mt8173_nor->dev = &pdev->dev; | 493 | mt8173_nor->dev = &pdev->dev; |
471 | ret = clk_prepare_enable(mt8173_nor->spi_clk); | 494 | |
495 | ret = mt8173_nor_enable_clk(mt8173_nor); | ||
472 | if (ret) | 496 | if (ret) |
473 | return ret; | 497 | return ret; |
474 | 498 | ||
475 | ret = clk_prepare_enable(mt8173_nor->nor_clk); | ||
476 | if (ret) { | ||
477 | clk_disable_unprepare(mt8173_nor->spi_clk); | ||
478 | return ret; | ||
479 | } | ||
480 | /* only support one attached flash */ | 499 | /* only support one attached flash */ |
481 | flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); | 500 | flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); |
482 | if (!flash_np) { | 501 | if (!flash_np) { |
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev) | |||
487 | ret = mtk_nor_init(mt8173_nor, flash_np); | 506 | ret = mtk_nor_init(mt8173_nor, flash_np); |
488 | 507 | ||
489 | nor_free: | 508 | nor_free: |
490 | if (ret) { | 509 | if (ret) |
491 | clk_disable_unprepare(mt8173_nor->spi_clk); | 510 | mt8173_nor_disable_clk(mt8173_nor); |
492 | clk_disable_unprepare(mt8173_nor->nor_clk); | 511 | |
493 | } | ||
494 | return ret; | 512 | return ret; |
495 | } | 513 | } |
496 | 514 | ||
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev) | |||
498 | { | 516 | { |
499 | struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev); | 517 | struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev); |
500 | 518 | ||
501 | clk_disable_unprepare(mt8173_nor->spi_clk); | 519 | mt8173_nor_disable_clk(mt8173_nor); |
502 | clk_disable_unprepare(mt8173_nor->nor_clk); | 520 | |
521 | return 0; | ||
522 | } | ||
523 | |||
524 | #ifdef CONFIG_PM_SLEEP | ||
525 | static int mtk_nor_suspend(struct device *dev) | ||
526 | { | ||
527 | struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); | ||
528 | |||
529 | mt8173_nor_disable_clk(mt8173_nor); | ||
530 | |||
503 | return 0; | 531 | return 0; |
504 | } | 532 | } |
505 | 533 | ||
534 | static int mtk_nor_resume(struct device *dev) | ||
535 | { | ||
536 | struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); | ||
537 | |||
538 | return mt8173_nor_enable_clk(mt8173_nor); | ||
539 | } | ||
540 | |||
541 | static const struct dev_pm_ops mtk_nor_dev_pm_ops = { | ||
542 | .suspend = mtk_nor_suspend, | ||
543 | .resume = mtk_nor_resume, | ||
544 | }; | ||
545 | |||
546 | #define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops) | ||
547 | #else | ||
548 | #define MTK_NOR_DEV_PM_OPS NULL | ||
549 | #endif | ||
550 | |||
506 | static const struct of_device_id mtk_nor_of_ids[] = { | 551 | static const struct of_device_id mtk_nor_of_ids[] = { |
507 | { .compatible = "mediatek,mt8173-nor"}, | 552 | { .compatible = "mediatek,mt8173-nor"}, |
508 | { /* sentinel */ } | 553 | { /* sentinel */ } |
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = { | |||
514 | .remove = mtk_nor_drv_remove, | 559 | .remove = mtk_nor_drv_remove, |
515 | .driver = { | 560 | .driver = { |
516 | .name = "mtk-nor", | 561 | .name = "mtk-nor", |
562 | .pm = MTK_NOR_DEV_PM_OPS, | ||
517 | .of_match_table = mtk_nor_of_ids, | 563 | .of_match_table = mtk_nor_of_ids, |
518 | }, | 564 | }, |
519 | }; | 565 | }; |
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 19c000722cbc..bc266f70a15b 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
@@ -89,6 +89,8 @@ struct flash_info { | |||
89 | #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ | 89 | #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ |
90 | #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ | 90 | #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ |
91 | #define USE_CLSR BIT(14) /* use CLSR command */ | 91 | #define USE_CLSR BIT(14) /* use CLSR command */ |
92 | |||
93 | int (*quad_enable)(struct spi_nor *nor); | ||
92 | }; | 94 | }; |
93 | 95 | ||
94 | #define JEDEC_MFR(info) ((info)->id[0]) | 96 | #define JEDEC_MFR(info) ((info)->id[0]) |
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
870 | return ret; | 872 | return ret; |
871 | } | 873 | } |
872 | 874 | ||
875 | static int macronix_quad_enable(struct spi_nor *nor); | ||
876 | |||
873 | /* Used when the "_ext_id" is two bytes at most */ | 877 | /* Used when the "_ext_id" is two bytes at most */ |
874 | #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ | 878 | #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ |
875 | .id = { \ | 879 | .id = { \ |
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = { | |||
964 | { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, | 968 | { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, |
965 | 969 | ||
966 | /* Everspin */ | 970 | /* Everspin */ |
971 | { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, | ||
967 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, | 972 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, |
968 | { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, | 973 | { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, |
969 | { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, | 974 | { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, |
@@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = { | |||
983 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | 988 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
984 | }, | 989 | }, |
985 | { | 990 | { |
991 | "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64, | ||
992 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
993 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
994 | }, | ||
995 | { | ||
986 | "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, | 996 | "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, |
987 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | 997 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | |
988 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | 998 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = { | |||
997 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | 1007 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | |
998 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | 1008 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
999 | }, | 1009 | }, |
1010 | { | ||
1011 | "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, | ||
1012 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
1013 | SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
1014 | .quad_enable = macronix_quad_enable, | ||
1015 | }, | ||
1000 | 1016 | ||
1001 | /* Intel/Numonyx -- xxxs33b */ | 1017 | /* Intel/Numonyx -- xxxs33b */ |
1002 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, | 1018 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = { | |||
1024 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, | 1040 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, |
1025 | { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, | 1041 | { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, |
1026 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, | 1042 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, |
1027 | { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, | 1043 | { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, |
1028 | { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, | 1044 | { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, |
1029 | { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, | 1045 | { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, |
1030 | { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, | 1046 | { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, |
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = { | |||
1137 | { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, | 1153 | { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, |
1138 | { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, | 1154 | { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, |
1139 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, | 1155 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, |
1156 | { | ||
1157 | "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, | ||
1158 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
1159 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
1160 | }, | ||
1140 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, | 1161 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, |
1141 | { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, | 1162 | { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, |
1142 | { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, | 1163 | { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, |
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, | |||
2288 | 2309 | ||
2289 | /* Check the SFDP header version. */ | 2310 | /* Check the SFDP header version. */ |
2290 | if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || | 2311 | if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || |
2291 | header.major != SFDP_JESD216_MAJOR || | 2312 | header.major != SFDP_JESD216_MAJOR) |
2292 | header.minor < SFDP_JESD216_MINOR) | ||
2293 | return -EINVAL; | 2313 | return -EINVAL; |
2294 | 2314 | ||
2295 | /* | 2315 | /* |
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor, | |||
2427 | params->quad_enable = spansion_quad_enable; | 2447 | params->quad_enable = spansion_quad_enable; |
2428 | break; | 2448 | break; |
2429 | } | 2449 | } |
2450 | |||
2451 | /* | ||
2452 | * Some manufacturer like GigaDevice may use different | ||
2453 | * bit to set QE on different memories, so the MFR can't | ||
2454 | * indicate the quad_enable method for this case, we need | ||
2455 | * set it in flash info list. | ||
2456 | */ | ||
2457 | if (info->quad_enable) | ||
2458 | params->quad_enable = info->quad_enable; | ||
2430 | } | 2459 | } |
2431 | 2460 | ||
2432 | /* Override the parameters with data read from SFDP tables. */ | 2461 | /* Override the parameters with data read from SFDP tables. */ |
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info, | |||
2630 | /* Enable Quad I/O if needed. */ | 2659 | /* Enable Quad I/O if needed. */ |
2631 | enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || | 2660 | enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || |
2632 | spi_nor_get_protocol_width(nor->write_proto) == 4); | 2661 | spi_nor_get_protocol_width(nor->write_proto) == 4); |
2633 | if (enable_quad_io && params->quad_enable) { | 2662 | if (enable_quad_io && params->quad_enable) |
2634 | err = params->quad_enable(nor); | 2663 | nor->quad_enable = params->quad_enable; |
2664 | else | ||
2665 | nor->quad_enable = NULL; | ||
2666 | |||
2667 | return 0; | ||
2668 | } | ||
2669 | |||
2670 | static int spi_nor_init(struct spi_nor *nor) | ||
2671 | { | ||
2672 | int err; | ||
2673 | |||
2674 | /* | ||
2675 | * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up | ||
2676 | * with the software protection bits set | ||
2677 | */ | ||
2678 | if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || | ||
2679 | JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || | ||
2680 | JEDEC_MFR(nor->info) == SNOR_MFR_SST || | ||
2681 | nor->info->flags & SPI_NOR_HAS_LOCK) { | ||
2682 | write_enable(nor); | ||
2683 | write_sr(nor, 0); | ||
2684 | spi_nor_wait_till_ready(nor); | ||
2685 | } | ||
2686 | |||
2687 | if (nor->quad_enable) { | ||
2688 | err = nor->quad_enable(nor); | ||
2635 | if (err) { | 2689 | if (err) { |
2636 | dev_err(nor->dev, "quad mode not supported\n"); | 2690 | dev_err(nor->dev, "quad mode not supported\n"); |
2637 | return err; | 2691 | return err; |
2638 | } | 2692 | } |
2639 | } | 2693 | } |
2640 | 2694 | ||
2695 | if ((nor->addr_width == 4) && | ||
2696 | (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) && | ||
2697 | !(nor->info->flags & SPI_NOR_4B_OPCODES)) | ||
2698 | set_4byte(nor, nor->info, 1); | ||
2699 | |||
2641 | return 0; | 2700 | return 0; |
2642 | } | 2701 | } |
2643 | 2702 | ||
2703 | /* mtd resume handler */ | ||
2704 | static void spi_nor_resume(struct mtd_info *mtd) | ||
2705 | { | ||
2706 | struct spi_nor *nor = mtd_to_spi_nor(mtd); | ||
2707 | struct device *dev = nor->dev; | ||
2708 | int ret; | ||
2709 | |||
2710 | /* re-initialize the nor chip */ | ||
2711 | ret = spi_nor_init(nor); | ||
2712 | if (ret) | ||
2713 | dev_err(dev, "resume() failed\n"); | ||
2714 | } | ||
2715 | |||
2644 | int spi_nor_scan(struct spi_nor *nor, const char *name, | 2716 | int spi_nor_scan(struct spi_nor *nor, const char *name, |
2645 | const struct spi_nor_hwcaps *hwcaps) | 2717 | const struct spi_nor_hwcaps *hwcaps) |
2646 | { | 2718 | { |
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, | |||
2708 | if (ret) | 2780 | if (ret) |
2709 | return ret; | 2781 | return ret; |
2710 | 2782 | ||
2711 | /* | ||
2712 | * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up | ||
2713 | * with the software protection bits set | ||
2714 | */ | ||
2715 | |||
2716 | if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || | ||
2717 | JEDEC_MFR(info) == SNOR_MFR_INTEL || | ||
2718 | JEDEC_MFR(info) == SNOR_MFR_SST || | ||
2719 | info->flags & SPI_NOR_HAS_LOCK) { | ||
2720 | write_enable(nor); | ||
2721 | write_sr(nor, 0); | ||
2722 | spi_nor_wait_till_ready(nor); | ||
2723 | } | ||
2724 | |||
2725 | if (!mtd->name) | 2783 | if (!mtd->name) |
2726 | mtd->name = dev_name(dev); | 2784 | mtd->name = dev_name(dev); |
2727 | mtd->priv = nor; | 2785 | mtd->priv = nor; |
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, | |||
2731 | mtd->size = params.size; | 2789 | mtd->size = params.size; |
2732 | mtd->_erase = spi_nor_erase; | 2790 | mtd->_erase = spi_nor_erase; |
2733 | mtd->_read = spi_nor_read; | 2791 | mtd->_read = spi_nor_read; |
2792 | mtd->_resume = spi_nor_resume; | ||
2734 | 2793 | ||
2735 | /* NOR protection support for STmicro/Micron chips and similar */ | 2794 | /* NOR protection support for STmicro/Micron chips and similar */ |
2736 | if (JEDEC_MFR(info) == SNOR_MFR_MICRON || | 2795 | if (JEDEC_MFR(info) == SNOR_MFR_MICRON || |
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, | |||
2804 | if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || | 2863 | if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || |
2805 | info->flags & SPI_NOR_4B_OPCODES) | 2864 | info->flags & SPI_NOR_4B_OPCODES) |
2806 | spi_nor_set_4byte_opcodes(nor, info); | 2865 | spi_nor_set_4byte_opcodes(nor, info); |
2807 | else | ||
2808 | set_4byte(nor, info, 1); | ||
2809 | } else { | 2866 | } else { |
2810 | nor->addr_width = 3; | 2867 | nor->addr_width = 3; |
2811 | } | 2868 | } |
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, | |||
2822 | return ret; | 2879 | return ret; |
2823 | } | 2880 | } |
2824 | 2881 | ||
2882 | /* Send all the required SPI flash commands to initialize device */ | ||
2883 | nor->info = info; | ||
2884 | ret = spi_nor_init(nor); | ||
2885 | if (ret) | ||
2886 | return ret; | ||
2887 | |||
2825 | dev_info(dev, "%s (%lld Kbytes)\n", info->name, | 2888 | dev_info(dev, "%s (%lld Kbytes)\n", info->name, |
2826 | (long long)mtd->size >> 10); | 2889 | (long long)mtd->size >> 10); |
2827 | 2890 | ||
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index 86c0931543c5..b3c7f6addba7 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c | |||
@@ -1,9 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * stm32_quadspi.c | 2 | * Driver for stm32 quadspi controller |
3 | * | 3 | * |
4 | * Copyright (C) 2017, Ludovic Barre | 4 | * Copyright (C) 2017, STMicroelectronics - All Rights Reserved |
5 | * Author(s): Ludovic Barre author <ludovic.barre@st.com>. | ||
5 | * | 6 | * |
6 | * License terms: GNU General Public License (GPL), version 2 | 7 | * License terms: GPL V2.0. |
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License version 2 as published by | ||
11 | * the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | ||
16 | * details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * This program. If not, see <http://www.gnu.org/licenses/>. | ||
7 | */ | 20 | */ |
8 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
9 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
@@ -113,6 +126,7 @@ | |||
113 | #define STM32_MAX_MMAP_SZ SZ_256M | 126 | #define STM32_MAX_MMAP_SZ SZ_256M |
114 | #define STM32_MAX_NORCHIP 2 | 127 | #define STM32_MAX_NORCHIP 2 |
115 | 128 | ||
129 | #define STM32_QSPI_FIFO_SZ 32 | ||
116 | #define STM32_QSPI_FIFO_TIMEOUT_US 30000 | 130 | #define STM32_QSPI_FIFO_TIMEOUT_US 30000 |
117 | #define STM32_QSPI_BUSY_TIMEOUT_US 100000 | 131 | #define STM32_QSPI_BUSY_TIMEOUT_US 100000 |
118 | 132 | ||
@@ -124,6 +138,7 @@ struct stm32_qspi_flash { | |||
124 | u32 presc; | 138 | u32 presc; |
125 | u32 read_mode; | 139 | u32 read_mode; |
126 | bool registered; | 140 | bool registered; |
141 | u32 prefetch_limit; | ||
127 | }; | 142 | }; |
128 | 143 | ||
129 | struct stm32_qspi { | 144 | struct stm32_qspi { |
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, | |||
240 | STM32_QSPI_FIFO_TIMEOUT_US); | 255 | STM32_QSPI_FIFO_TIMEOUT_US); |
241 | if (ret) { | 256 | if (ret) { |
242 | dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); | 257 | dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); |
243 | break; | 258 | return ret; |
244 | } | 259 | } |
245 | tx_fifo(buf++, qspi->io_base + QUADSPI_DR); | 260 | tx_fifo(buf++, qspi->io_base + QUADSPI_DR); |
246 | } | 261 | } |
247 | 262 | ||
248 | return ret; | 263 | return 0; |
249 | } | 264 | } |
250 | 265 | ||
251 | static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, | 266 | static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, |
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash, | |||
272 | { | 287 | { |
273 | struct stm32_qspi *qspi = flash->qspi; | 288 | struct stm32_qspi *qspi = flash->qspi; |
274 | u32 ccr, dcr, cr; | 289 | u32 ccr, dcr, cr; |
290 | u32 last_byte; | ||
275 | int err; | 291 | int err; |
276 | 292 | ||
277 | err = stm32_qspi_wait_nobusy(qspi); | 293 | err = stm32_qspi_wait_nobusy(qspi); |
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash, | |||
314 | if (err) | 330 | if (err) |
315 | goto abort; | 331 | goto abort; |
316 | writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); | 332 | writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); |
333 | } else { | ||
334 | last_byte = cmd->addr + cmd->len; | ||
335 | if (last_byte > flash->prefetch_limit) | ||
336 | goto abort; | ||
317 | } | 337 | } |
318 | 338 | ||
319 | return err; | 339 | return err; |
@@ -322,7 +342,9 @@ abort: | |||
322 | cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; | 342 | cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; |
323 | writel_relaxed(cr, qspi->io_base + QUADSPI_CR); | 343 | writel_relaxed(cr, qspi->io_base + QUADSPI_CR); |
324 | 344 | ||
325 | dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); | 345 | if (err) |
346 | dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); | ||
347 | |||
326 | return err; | 348 | return err; |
327 | } | 349 | } |
328 | 350 | ||
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, | |||
550 | } | 572 | } |
551 | 573 | ||
552 | flash->fsize = FSIZE_VAL(mtd->size); | 574 | flash->fsize = FSIZE_VAL(mtd->size); |
575 | flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ; | ||
553 | 576 | ||
554 | flash->read_mode = CCR_FMODE_MM; | 577 | flash->read_mode = CCR_FMODE_MM; |
555 | if (mtd->size > qspi->mm_size) | 578 | if (mtd->size > qspi->mm_size) |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 6cd0f6b7658b..cd55bf14ad51 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -267,7 +267,7 @@ struct mtd_info { | |||
267 | */ | 267 | */ |
268 | unsigned int bitflip_threshold; | 268 | unsigned int bitflip_threshold; |
269 | 269 | ||
270 | // Kernel-only stuff starts here. | 270 | /* Kernel-only stuff starts here. */ |
271 | const char *name; | 271 | const char *name; |
272 | int index; | 272 | int index; |
273 | 273 | ||
@@ -297,10 +297,6 @@ struct mtd_info { | |||
297 | int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, | 297 | int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, |
298 | size_t *retlen, void **virt, resource_size_t *phys); | 298 | size_t *retlen, void **virt, resource_size_t *phys); |
299 | int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); | 299 | int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); |
300 | unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, | ||
301 | unsigned long len, | ||
302 | unsigned long offset, | ||
303 | unsigned long flags); | ||
304 | int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, | 300 | int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, |
305 | size_t *retlen, u_char *buf); | 301 | size_t *retlen, u_char *buf); |
306 | int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, | 302 | int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, |
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h index fdef72d6e198..7ab51bc4a380 100644 --- a/include/linux/mtd/nand-gpio.h +++ b/include/linux/mtd/nand-gpio.h | |||
@@ -5,11 +5,6 @@ | |||
5 | #include <linux/mtd/rawnand.h> | 5 | #include <linux/mtd/rawnand.h> |
6 | 6 | ||
7 | struct gpio_nand_platdata { | 7 | struct gpio_nand_platdata { |
8 | int gpio_nce; | ||
9 | int gpio_nwp; | ||
10 | int gpio_cle; | ||
11 | int gpio_ale; | ||
12 | int gpio_rdy; | ||
13 | void (*adjust_parts)(struct gpio_nand_platdata *, size_t); | 8 | void (*adjust_parts)(struct gpio_nand_platdata *, size_t); |
14 | struct mtd_partition *parts; | 9 | struct mtd_partition *parts; |
15 | unsigned int num_parts; | 10 | unsigned int num_parts; |
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2b05f4273bab..749bb08c4772 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h | |||
@@ -177,6 +177,9 @@ enum nand_ecc_algo { | |||
177 | */ | 177 | */ |
178 | #define NAND_NEED_SCRAMBLING 0x00002000 | 178 | #define NAND_NEED_SCRAMBLING 0x00002000 |
179 | 179 | ||
180 | /* Device needs 3rd row address cycle */ | ||
181 | #define NAND_ROW_ADDR_3 0x00004000 | ||
182 | |||
180 | /* Options valid for Samsung large page devices */ | 183 | /* Options valid for Samsung large page devices */ |
181 | #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG | 184 | #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG |
182 | 185 | ||
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1f0a7fc7772f..d0c66a0975cf 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
@@ -232,10 +232,17 @@ enum spi_nor_option_flags { | |||
232 | }; | 232 | }; |
233 | 233 | ||
234 | /** | 234 | /** |
235 | * struct flash_info - Forward declaration of a structure used internally by | ||
236 | * spi_nor_scan() | ||
237 | */ | ||
238 | struct flash_info; | ||
239 | |||
240 | /** | ||
235 | * struct spi_nor - Structure for defining a the SPI NOR layer | 241 | * struct spi_nor - Structure for defining a the SPI NOR layer |
236 | * @mtd: point to a mtd_info structure | 242 | * @mtd: point to a mtd_info structure |
237 | * @lock: the lock for the read/write/erase/lock/unlock operations | 243 | * @lock: the lock for the read/write/erase/lock/unlock operations |
238 | * @dev: point to a spi device, or a spi nor controller device. | 244 | * @dev: point to a spi device, or a spi nor controller device. |
245 | * @info: spi-nor part JDEC MFR id and other info | ||
239 | * @page_size: the page size of the SPI NOR | 246 | * @page_size: the page size of the SPI NOR |
240 | * @addr_width: number of address bytes | 247 | * @addr_width: number of address bytes |
241 | * @erase_opcode: the opcode for erasing a sector | 248 | * @erase_opcode: the opcode for erasing a sector |
@@ -262,6 +269,7 @@ enum spi_nor_option_flags { | |||
262 | * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR | 269 | * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR |
263 | * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR | 270 | * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR |
264 | * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is | 271 | * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is |
272 | * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode | ||
265 | * completely locked | 273 | * completely locked |
266 | * @priv: the private data | 274 | * @priv: the private data |
267 | */ | 275 | */ |
@@ -269,6 +277,7 @@ struct spi_nor { | |||
269 | struct mtd_info mtd; | 277 | struct mtd_info mtd; |
270 | struct mutex lock; | 278 | struct mutex lock; |
271 | struct device *dev; | 279 | struct device *dev; |
280 | const struct flash_info *info; | ||
272 | u32 page_size; | 281 | u32 page_size; |
273 | u8 addr_width; | 282 | u8 addr_width; |
274 | u8 erase_opcode; | 283 | u8 erase_opcode; |
@@ -296,6 +305,7 @@ struct spi_nor { | |||
296 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 305 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); |
297 | int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 306 | int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); |
298 | int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 307 | int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); |
308 | int (*quad_enable)(struct spi_nor *nor); | ||
299 | 309 | ||
300 | void *priv; | 310 | void *priv; |
301 | }; | 311 | }; |
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 25e267f1970c..619df2431e75 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h | |||
@@ -64,21 +64,4 @@ struct gpmc_nand_regs { | |||
64 | void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; | 64 | void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; |
65 | void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; | 65 | void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; |
66 | }; | 66 | }; |
67 | |||
68 | struct omap_nand_platform_data { | ||
69 | int cs; | ||
70 | struct mtd_partition *parts; | ||
71 | int nr_parts; | ||
72 | bool flash_bbt; | ||
73 | enum nand_io xfer_type; | ||
74 | int devsize; | ||
75 | enum omap_ecc ecc_opt; | ||
76 | |||
77 | struct device_node *elm_of_node; | ||
78 | |||
79 | /* deprecated */ | ||
80 | struct gpmc_nand_regs reg; | ||
81 | struct device_node *of_node; | ||
82 | bool dev_ready; | ||
83 | }; | ||
84 | #endif | 67 | #endif |
diff --git a/lib/Kconfig b/lib/Kconfig index 368972f0db78..c5e84fbcb30b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -46,10 +46,6 @@ config GENERIC_IOMAP | |||
46 | bool | 46 | bool |
47 | select GENERIC_PCI_IOMAP | 47 | select GENERIC_PCI_IOMAP |
48 | 48 | ||
49 | config GENERIC_IO | ||
50 | bool | ||
51 | default n | ||
52 | |||
53 | config STMP_DEVICE | 49 | config STMP_DEVICE |
54 | bool | 50 | bool |
55 | 51 | ||