diff options
300 files changed, 16285 insertions, 7871 deletions
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index e0aedb7a7827..fe122d6e686f 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl | |||
| @@ -1216,8 +1216,6 @@ in this page</entry> | |||
| 1216 | #define NAND_BBT_LASTBLOCK 0x00000010 | 1216 | #define NAND_BBT_LASTBLOCK 0x00000010 |
| 1217 | /* The bbt is at the given page, else we must scan for the bbt */ | 1217 | /* The bbt is at the given page, else we must scan for the bbt */ |
| 1218 | #define NAND_BBT_ABSPAGE 0x00000020 | 1218 | #define NAND_BBT_ABSPAGE 0x00000020 |
| 1219 | /* The bbt is at the given page, else we must scan for the bbt */ | ||
| 1220 | #define NAND_BBT_SEARCH 0x00000040 | ||
| 1221 | /* bbt is stored per chip on multichip devices */ | 1219 | /* bbt is stored per chip on multichip devices */ |
| 1222 | #define NAND_BBT_PERCHIP 0x00000080 | 1220 | #define NAND_BBT_PERCHIP 0x00000080 |
| 1223 | /* bbt has a version counter at offset veroffs */ | 1221 | /* bbt has a version counter at offset veroffs */ |
diff --git a/Documentation/devicetree/bindings/arm/davinci/nand.txt b/Documentation/devicetree/bindings/arm/davinci/nand.txt new file mode 100644 index 000000000000..e37241f1fdd8 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/davinci/nand.txt | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | * Texas Instruments Davinci NAND | ||
| 2 | |||
| 3 | This file provides information, what the device node for the | ||
| 4 | davinci nand interface contain. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible: "ti,davinci-nand"; | ||
| 8 | - reg : contain 2 offset/length values: | ||
| 9 | - offset and length for the access window | ||
| 10 | - offset and length for accessing the aemif control registers | ||
| 11 | - ti,davinci-chipselect: Indicates on the davinci_nand driver which | ||
| 12 | chipselect is used for accessing the nand. | ||
| 13 | |||
| 14 | Recommended properties : | ||
| 15 | - ti,davinci-mask-ale: mask for ale | ||
| 16 | - ti,davinci-mask-cle: mask for cle | ||
| 17 | - ti,davinci-mask-chipsel: mask for chipselect | ||
| 18 | - ti,davinci-ecc-mode: ECC mode valid values for davinci driver: | ||
| 19 | - "none" | ||
| 20 | - "soft" | ||
| 21 | - "hw" | ||
| 22 | - ti,davinci-ecc-bits: used ECC bits, currently supported 1 or 4. | ||
| 23 | - ti,davinci-nand-buswidth: buswidth 8 or 16 | ||
| 24 | - ti,davinci-nand-use-bbt: use flash based bad block table support. | ||
| 25 | |||
| 26 | Example (enbw_cmc board): | ||
| 27 | aemif@60000000 { | ||
| 28 | compatible = "ti,davinci-aemif"; | ||
| 29 | #address-cells = <2>; | ||
| 30 | #size-cells = <1>; | ||
| 31 | reg = <0x68000000 0x80000>; | ||
| 32 | ranges = <2 0 0x60000000 0x02000000 | ||
| 33 | 3 0 0x62000000 0x02000000 | ||
| 34 | 4 0 0x64000000 0x02000000 | ||
| 35 | 5 0 0x66000000 0x02000000 | ||
| 36 | 6 0 0x68000000 0x02000000>; | ||
| 37 | nand@3,0 { | ||
| 38 | compatible = "ti,davinci-nand"; | ||
| 39 | reg = <3 0x0 0x807ff | ||
| 40 | 6 0x0 0x8000>; | ||
| 41 | #address-cells = <1>; | ||
| 42 | #size-cells = <1>; | ||
| 43 | ti,davinci-chipselect = <1>; | ||
| 44 | ti,davinci-mask-ale = <0>; | ||
| 45 | ti,davinci-mask-cle = <0>; | ||
| 46 | ti,davinci-mask-chipsel = <0>; | ||
| 47 | ti,davinci-ecc-mode = "hw"; | ||
| 48 | ti,davinci-ecc-bits = <4>; | ||
| 49 | ti,davinci-nand-use-bbt; | ||
| 50 | }; | ||
| 51 | }; | ||
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt new file mode 100644 index 000000000000..0a85c70cd30a --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | * Atmel High Speed MultiMedia Card Interface | ||
| 2 | |||
| 3 | This controller on atmel products provides an interface for MMC, SD and SDIO | ||
| 4 | types of memory cards. | ||
| 5 | |||
| 6 | This file documents differences between the core properties described | ||
| 7 | by mmc.txt and the properties used by the atmel-mci driver. | ||
| 8 | |||
| 9 | 1) MCI node | ||
| 10 | |||
| 11 | Required properties: | ||
| 12 | - compatible: should be "atmel,hsmci" | ||
| 13 | - #address-cells: should be one. The cell is the slot id. | ||
| 14 | - #size-cells: should be zero. | ||
| 15 | - at least one slot node | ||
| 16 | |||
| 17 | The node contains child nodes for each slot that the platform uses | ||
| 18 | |||
| 19 | Example MCI node: | ||
| 20 | |||
| 21 | mmc0: mmc@f0008000 { | ||
| 22 | compatible = "atmel,hsmci"; | ||
| 23 | reg = <0xf0008000 0x600>; | ||
| 24 | interrupts = <12 4>; | ||
| 25 | #address-cells = <1>; | ||
| 26 | #size-cells = <0>; | ||
| 27 | |||
| 28 | [ child node definitions...] | ||
| 29 | }; | ||
| 30 | |||
| 31 | 2) slot nodes | ||
| 32 | |||
| 33 | Required properties: | ||
| 34 | - reg: should contain the slot id. | ||
| 35 | - bus-width: number of data lines connected to the controller | ||
| 36 | |||
| 37 | Optional properties: | ||
| 38 | - cd-gpios: specify GPIOs for card detection | ||
| 39 | - cd-inverted: invert the value of external card detect gpio line | ||
| 40 | - wp-gpios: specify GPIOs for write protection | ||
| 41 | |||
| 42 | Example slot node: | ||
| 43 | |||
| 44 | slot@0 { | ||
| 45 | reg = <0>; | ||
| 46 | bus-width = <4>; | ||
| 47 | cd-gpios = <&pioD 15 0> | ||
| 48 | cd-inverted; | ||
| 49 | }; | ||
| 50 | |||
| 51 | Example full MCI node: | ||
| 52 | mmc0: mmc@f0008000 { | ||
| 53 | compatible = "atmel,hsmci"; | ||
| 54 | reg = <0xf0008000 0x600>; | ||
| 55 | interrupts = <12 4>; | ||
| 56 | #address-cells = <1>; | ||
| 57 | #size-cells = <0>; | ||
| 58 | slot@0 { | ||
| 59 | reg = <0>; | ||
| 60 | bus-width = <4>; | ||
| 61 | cd-gpios = <&pioD 15 0> | ||
| 62 | cd-inverted; | ||
| 63 | }; | ||
| 64 | slot@1 { | ||
| 65 | reg = <1>; | ||
| 66 | bus-width = <4>; | ||
| 67 | }; | ||
| 68 | }; | ||
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt new file mode 100644 index 000000000000..792768953330 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt | |||
| @@ -0,0 +1,87 @@ | |||
| 1 | * Samsung Exynos specific extensions to the Synopsis Designware Mobile | ||
| 2 | Storage Host Controller | ||
| 3 | |||
| 4 | The Synopsis designware mobile storage host controller is used to interface | ||
| 5 | a SoC with storage medium such as eMMC or SD/MMC cards. This file documents | ||
| 6 | differences between the core Synopsis dw mshc controller properties described | ||
| 7 | by synposis-dw-mshc.txt and the properties used by the Samsung Exynos specific | ||
| 8 | extensions to the Synopsis Designware Mobile Storage Host Controller. | ||
| 9 | |||
| 10 | Required Properties: | ||
| 11 | |||
| 12 | * compatible: should be | ||
| 13 | - "samsung,exynos4210-dw-mshc": for controllers with Samsung Exynos4210 | ||
| 14 | specific extentions. | ||
| 15 | - "samsung,exynos4412-dw-mshc": for controllers with Samsung Exynos4412 | ||
| 16 | specific extentions. | ||
| 17 | - "samsung,exynos5250-dw-mshc": for controllers with Samsung Exynos5250 | ||
| 18 | specific extentions. | ||
| 19 | |||
| 20 | * samsung,dw-mshc-ciu-div: Specifies the divider value for the card interface | ||
| 21 | unit (ciu) clock. This property is applicable only for Exynos5 SoC's and | ||
| 22 | ignored for Exynos4 SoC's. The valid range of divider value is 0 to 7. | ||
| 23 | |||
| 24 | * samsung,dw-mshc-sdr-timing: Specifies the value of CIU clock phase shift value | ||
| 25 | in transmit mode and CIU clock phase shift value in receive mode for single | ||
| 26 | data rate mode operation. Refer notes below for the order of the cells and the | ||
| 27 | valid values. | ||
| 28 | |||
| 29 | * samsung,dw-mshc-ddr-timing: Specifies the value of CUI clock phase shift value | ||
| 30 | in transmit mode and CIU clock phase shift value in receive mode for double | ||
| 31 | data rate mode operation. Refer notes below for the order of the cells and the | ||
| 32 | valid values. | ||
| 33 | |||
| 34 | Notes for the sdr-timing and ddr-timing values: | ||
| 35 | |||
| 36 | The order of the cells should be | ||
| 37 | - First Cell: CIU clock phase shift value for tx mode. | ||
| 38 | - Second Cell: CIU clock phase shift value for rx mode. | ||
| 39 | |||
| 40 | Valid values for SDR and DDR CIU clock timing for Exynos5250: | ||
| 41 | - valid value for tx phase shift and rx phase shift is 0 to 7. | ||
| 42 | - when CIU clock divider value is set to 3, all possible 8 phase shift | ||
| 43 | values can be used. | ||
| 44 | - if CIU clock divider value is 0 (that is divide by 1), both tx and rx | ||
| 45 | phase shift clocks should be 0. | ||
| 46 | |||
| 47 | Required properties for a slot: | ||
| 48 | |||
| 49 | * gpios: specifies a list of gpios used for command, clock and data bus. The | ||
| 50 | first gpio is the command line and the second gpio is the clock line. The | ||
| 51 | rest of the gpios (depending on the bus-width property) are the data lines in | ||
| 52 | no particular order. The format of the gpio specifier depends on the gpio | ||
| 53 | controller. | ||
| 54 | |||
| 55 | Example: | ||
| 56 | |||
| 57 | The MSHC controller node can be split into two portions, SoC specific and | ||
| 58 | board specific portions as listed below. | ||
| 59 | |||
| 60 | dwmmc0@12200000 { | ||
| 61 | compatible = "samsung,exynos5250-dw-mshc"; | ||
| 62 | reg = <0x12200000 0x1000>; | ||
| 63 | interrupts = <0 75 0>; | ||
| 64 | #address-cells = <1>; | ||
| 65 | #size-cells = <0>; | ||
| 66 | }; | ||
| 67 | |||
| 68 | dwmmc0@12200000 { | ||
| 69 | num-slots = <1>; | ||
| 70 | supports-highspeed; | ||
| 71 | broken-cd; | ||
| 72 | fifo-depth = <0x80>; | ||
| 73 | card-detect-delay = <200>; | ||
| 74 | samsung,dw-mshc-ciu-div = <3>; | ||
| 75 | samsung,dw-mshc-sdr-timing = <2 3>; | ||
| 76 | samsung,dw-mshc-ddr-timing = <1 2>; | ||
| 77 | |||
| 78 | slot@0 { | ||
| 79 | reg = <0>; | ||
| 80 | bus-width = <8>; | ||
| 81 | gpios = <&gpc0 0 2 0 3>, <&gpc0 1 2 0 3>, | ||
| 82 | <&gpc1 0 2 3 3>, <&gpc1 1 2 3 3>, | ||
| 83 | <&gpc1 2 2 3 3>, <&gpc1 3 2 3 3>, | ||
| 84 | <&gpc0 3 2 3 3>, <&gpc0 4 2 3 3>, | ||
| 85 | <&gpc0 5 2 3 3>, <&gpc0 6 2 3 3>; | ||
| 86 | }; | ||
| 87 | }; | ||
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt index 8a6811f4a02f..8e2e0ba2f486 100644 --- a/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/Documentation/devicetree/bindings/mmc/mmc.txt | |||
| @@ -9,12 +9,17 @@ Interpreted by the OF core: | |||
| 9 | Required properties: | 9 | Required properties: |
| 10 | - bus-width: Number of data lines, can be <1>, <4>, or <8> | 10 | - bus-width: Number of data lines, can be <1>, <4>, or <8> |
| 11 | 11 | ||
| 12 | Card detection: | ||
| 13 | If no property below is supplied, standard SDHCI card detect is used. | ||
| 14 | Only one of the properties in this section should be supplied: | ||
| 15 | - broken-cd: There is no card detection available; polling must be used. | ||
| 16 | - cd-gpios: Specify GPIOs for card detection, see gpio binding | ||
| 17 | - non-removable: non-removable slot (like eMMC); assume always present. | ||
| 18 | |||
| 12 | Optional properties: | 19 | Optional properties: |
| 13 | - cd-gpios: Specify GPIOs for card detection, see gpio binding | ||
| 14 | - wp-gpios: Specify GPIOs for write protection, see gpio binding | 20 | - wp-gpios: Specify GPIOs for write protection, see gpio binding |
| 15 | - cd-inverted: when present, polarity on the cd gpio line is inverted | 21 | - cd-inverted: when present, polarity on the cd gpio line is inverted |
| 16 | - wp-inverted: when present, polarity on the wp gpio line is inverted | 22 | - wp-inverted: when present, polarity on the wp gpio line is inverted |
| 17 | - non-removable: non-removable slot (like eMMC) | ||
| 18 | - max-frequency: maximum operating clock frequency | 23 | - max-frequency: maximum operating clock frequency |
| 19 | 24 | ||
| 20 | Example: | 25 | Example: |
diff --git a/Documentation/devicetree/bindings/mmc/pxa-mmc.txt b/Documentation/devicetree/bindings/mmc/pxa-mmc.txt new file mode 100644 index 000000000000..b7025de7dced --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/pxa-mmc.txt | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | * PXA MMC drivers | ||
| 2 | |||
| 3 | Driver bindings for the PXA MCI (MMC/SDIO) interfaces | ||
| 4 | |||
| 5 | Required properties: | ||
| 6 | - compatible: Should be "marvell,pxa-mmc". | ||
| 7 | - vmmc-supply: A regulator for VMMC | ||
| 8 | |||
| 9 | Optional properties: | ||
| 10 | - marvell,detect-delay-ms: sets the detection delay timeout in ms. | ||
| 11 | - marvell,gpio-power: GPIO spec for the card power enable pin | ||
| 12 | |||
| 13 | This file documents differences between the core properties in mmc.txt | ||
| 14 | and the properties used by the pxa-mmc driver. | ||
| 15 | |||
| 16 | Examples: | ||
| 17 | |||
| 18 | mmc0: mmc@41100000 { | ||
| 19 | compatible = "marvell,pxa-mmc"; | ||
| 20 | reg = <0x41100000 0x1000>; | ||
| 21 | interrupts = <23>; | ||
| 22 | cd-gpios = <&gpio 23 0>; | ||
| 23 | wp-gpios = <&gpio 24 0>; | ||
| 24 | }; | ||
| 25 | |||
diff --git a/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt b/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt new file mode 100644 index 000000000000..630a7d7f4718 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | * Samsung's SDHCI Controller device tree bindings | ||
| 2 | |||
| 3 | Samsung's SDHCI controller is used as a connectivity interface with external | ||
| 4 | MMC, SD and eMMC storage mediums. This file documents differences between the | ||
| 5 | core mmc properties described by mmc.txt and the properties used by the | ||
| 6 | Samsung implmentation of the SDHCI controller. | ||
| 7 | |||
| 8 | Note: The mmc core bindings documentation states that if none of the core | ||
| 9 | card-detect bindings are used, then the standard sdhci card detect mechanism | ||
| 10 | is used. The Samsung's SDHCI controller bindings extends this as listed below. | ||
| 11 | |||
| 12 | [A] The property "samsung,cd-pinmux-gpio" can be used as stated in the | ||
| 13 | "Optional Board Specific Properties" section below. | ||
| 14 | |||
| 15 | [B] If core card-detect bindings and "samsung,cd-pinmux-gpio" property | ||
| 16 | is not specified, it is assumed that there is no card detection | ||
| 17 | mechanism used. | ||
| 18 | |||
| 19 | Required SoC Specific Properties: | ||
| 20 | - compatible: should be one of the following | ||
| 21 | - "samsung,s3c6410-sdhci": For controllers compatible with s3c6410 sdhci | ||
| 22 | controller. | ||
| 23 | - "samsung,exynos4210-sdhci": For controllers compatible with Exynos4 sdhci | ||
| 24 | controller. | ||
| 25 | |||
| 26 | Required Board Specific Properties: | ||
| 27 | - gpios: Should specify the gpios used for clock, command and data lines. The | ||
| 28 | gpio specifier format depends on the gpio controller. | ||
| 29 | |||
| 30 | Optional Board Specific Properties: | ||
| 31 | - samsung,cd-pinmux-gpio: Specifies the card detect line that is routed | ||
| 32 | through a pinmux to the card-detect pin of the card slot. This property | ||
| 33 | should be used only if none of the mmc core card-detect properties are | ||
| 34 | used. | ||
| 35 | |||
| 36 | Example: | ||
| 37 | sdhci@12530000 { | ||
| 38 | compatible = "samsung,exynos4210-sdhci"; | ||
| 39 | reg = <0x12530000 0x100>; | ||
| 40 | interrupts = <0 75 0>; | ||
| 41 | bus-width = <4>; | ||
| 42 | cd-gpios = <&gpk2 2 2 3 3>; | ||
| 43 | gpios = <&gpk2 0 2 0 3>, /* clock line */ | ||
| 44 | <&gpk2 1 2 0 3>, /* command line */ | ||
| 45 | <&gpk2 3 2 3 3>, /* data line 0 */ | ||
| 46 | <&gpk2 4 2 3 3>, /* data line 1 */ | ||
| 47 | <&gpk2 5 2 3 3>, /* data line 2 */ | ||
| 48 | <&gpk2 6 2 3 3>; /* data line 3 */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | Note: This example shows both SoC specific and board specific properties | ||
| 52 | in a single device node. The properties can be actually be seperated | ||
| 53 | into SoC specific node and board specific node. | ||
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-dove.txt b/Documentation/devicetree/bindings/mmc/sdhci-dove.txt new file mode 100644 index 000000000000..ae9aab9abcd7 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/sdhci-dove.txt | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | * Marvell sdhci-dove controller | ||
| 2 | |||
| 3 | This file documents differences between the core properties in mmc.txt | ||
| 4 | and the properties used by the sdhci-pxav2 and sdhci-pxav3 drivers. | ||
| 5 | |||
| 6 | - compatible: Should be "marvell,dove-sdhci". | ||
| 7 | |||
| 8 | Example: | ||
| 9 | |||
| 10 | sdio0: sdio@92000 { | ||
| 11 | compatible = "marvell,dove-sdhci"; | ||
| 12 | reg = <0x92000 0x100>; | ||
| 13 | interrupts = <35>; | ||
| 14 | }; | ||
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-spear.txt b/Documentation/devicetree/bindings/mmc/sdhci-spear.txt new file mode 100644 index 000000000000..fd3643e7e467 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/sdhci-spear.txt | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | * SPEAr SDHCI Controller | ||
| 2 | |||
| 3 | This file documents differences between the core properties in mmc.txt | ||
| 4 | and the properties used by the sdhci-spear driver. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible: "st,spear300-sdhci" | ||
| 8 | |||
| 9 | Optional properties: | ||
| 10 | - cd-gpios: card detect gpio, with zero flags. | ||
| 11 | |||
| 12 | Example: | ||
| 13 | |||
| 14 | sdhci@fc000000 { | ||
| 15 | compatible = "st,spear300-sdhci"; | ||
| 16 | reg = <0xfc000000 0x1000>; | ||
| 17 | cd-gpios = <&gpio0 6 0>; | ||
| 18 | }; | ||
diff --git a/Documentation/devicetree/bindings/mmc/synposis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synposis-dw-mshc.txt new file mode 100644 index 000000000000..06cd32d08052 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/synposis-dw-mshc.txt | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | * Synopsis Designware Mobile Storage Host Controller | ||
| 2 | |||
| 3 | The Synopsis designware mobile storage host controller is used to interface | ||
| 4 | a SoC with storage medium such as eMMC or SD/MMC cards. This file documents | ||
| 5 | differences between the core mmc properties described by mmc.txt and the | ||
| 6 | properties used by the Synopsis Designware Mobile Storage Host Controller. | ||
| 7 | |||
| 8 | Required Properties: | ||
| 9 | |||
| 10 | * compatible: should be | ||
| 11 | - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. | ||
| 12 | * #address-cells: should be 1. | ||
| 13 | * #size-cells: should be 0. | ||
| 14 | |||
| 15 | # Slots: The slot specific information are contained within child-nodes with | ||
| 16 | each child-node representing a supported slot. There should be atleast one | ||
| 17 | child node representing a card slot. The name of the child node representing | ||
| 18 | the slot is recommended to be slot@n where n is the unique number of the slot | ||
| 19 | connnected to the controller. The following are optional properties which | ||
| 20 | can be included in the slot child node. | ||
| 21 | |||
| 22 | * reg: specifies the physical slot number. The valid values of this | ||
| 23 | property is 0 to (num-slots -1), where num-slots is the value | ||
| 24 | specified by the num-slots property. | ||
| 25 | |||
| 26 | * bus-width: as documented in mmc core bindings. | ||
| 27 | |||
| 28 | * wp-gpios: specifies the write protect gpio line. The format of the | ||
| 29 | gpio specifier depends on the gpio controller. If the write-protect | ||
| 30 | line is not available, this property is optional. | ||
| 31 | |||
| 32 | Optional properties: | ||
| 33 | |||
| 34 | * num-slots: specifies the number of slots supported by the controller. | ||
| 35 | The number of physical slots actually used could be equal or less than the | ||
| 36 | value specified by num-slots. If this property is not specified, the value | ||
| 37 | of num-slot property is assumed to be 1. | ||
| 38 | |||
| 39 | * fifo-depth: The maximum size of the tx/rx fifo's. If this property is not | ||
| 40 | specified, the default value of the fifo size is determined from the | ||
| 41 | controller registers. | ||
| 42 | |||
| 43 | * card-detect-delay: Delay in milli-seconds before detecting card after card | ||
| 44 | insert event. The default value is 0. | ||
| 45 | |||
| 46 | * supports-highspeed: Enables support for high speed cards (upto 50MHz) | ||
| 47 | |||
| 48 | * broken-cd: as documented in mmc core bindings. | ||
| 49 | |||
| 50 | Aliases: | ||
| 51 | |||
| 52 | - All the MSHC controller nodes should be represented in the aliases node using | ||
| 53 | the following format 'mshc{n}' where n is a unique number for the alias. | ||
| 54 | |||
| 55 | Example: | ||
| 56 | |||
| 57 | The MSHC controller node can be split into two portions, SoC specific and | ||
| 58 | board specific portions as listed below. | ||
| 59 | |||
| 60 | dwmmc0@12200000 { | ||
| 61 | compatible = "snps,dw-mshc"; | ||
| 62 | reg = <0x12200000 0x1000>; | ||
| 63 | interrupts = <0 75 0>; | ||
| 64 | #address-cells = <1>; | ||
| 65 | #size-cells = <0>; | ||
| 66 | }; | ||
| 67 | |||
| 68 | dwmmc0@12200000 { | ||
| 69 | num-slots = <1>; | ||
| 70 | supports-highspeed; | ||
| 71 | broken-cd; | ||
| 72 | fifo-depth = <0x80>; | ||
| 73 | card-detect-delay = <200>; | ||
| 74 | |||
| 75 | slot@0 { | ||
| 76 | reg = <0>; | ||
| 77 | bus-width = <8>; | ||
| 78 | }; | ||
| 79 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt index a20069502f5a..d555421ea49f 100644 --- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt +++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt | |||
| @@ -3,7 +3,9 @@ Atmel NAND flash | |||
| 3 | Required properties: | 3 | Required properties: |
| 4 | - compatible : "atmel,at91rm9200-nand". | 4 | - compatible : "atmel,at91rm9200-nand". |
| 5 | - reg : should specify localbus address and size used for the chip, | 5 | - reg : should specify localbus address and size used for the chip, |
| 6 | and if availlable the ECC. | 6 | and hardware ECC controller if available. |
| 7 | If the hardware ECC is PMECC, it should contain address and size for | ||
| 8 | PMECC, PMECC Error Location controller and ROM which has lookup tables. | ||
| 7 | - atmel,nand-addr-offset : offset for the address latch. | 9 | - atmel,nand-addr-offset : offset for the address latch. |
| 8 | - atmel,nand-cmd-offset : offset for the command latch. | 10 | - atmel,nand-cmd-offset : offset for the command latch. |
| 9 | - #address-cells, #size-cells : Must be present if the device has sub-nodes | 11 | - #address-cells, #size-cells : Must be present if the device has sub-nodes |
| @@ -16,6 +18,15 @@ Optional properties: | |||
| 16 | - nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default. | 18 | - nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default. |
| 17 | Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first", | 19 | Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first", |
| 18 | "soft_bch". | 20 | "soft_bch". |
| 21 | - atmel,has-pmecc : boolean to enable Programmable Multibit ECC hardware. | ||
| 22 | Only supported by at91sam9x5 or later sam9 product. | ||
| 23 | - atmel,pmecc-cap : error correct capability for Programmable Multibit ECC | ||
| 24 | Controller. Supported values are: 2, 4, 8, 12, 24. | ||
| 25 | - atmel,pmecc-sector-size : sector size for ECC computation. Supported values | ||
| 26 | are: 512, 1024. | ||
| 27 | - atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM | ||
| 28 | for different sector size. First one is for sector size 512, the next is for | ||
| 29 | sector size 1024. | ||
| 19 | - nand-bus-width : 8 or 16 bus width if not present 8 | 30 | - nand-bus-width : 8 or 16 bus width if not present 8 |
| 20 | - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false | 31 | - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false |
| 21 | 32 | ||
| @@ -39,3 +50,30 @@ nand0: nand@40000000,0 { | |||
| 39 | ... | 50 | ... |
| 40 | }; | 51 | }; |
| 41 | }; | 52 | }; |
| 53 | |||
| 54 | /* for PMECC supported chips */ | ||
| 55 | nand0: nand@40000000 { | ||
| 56 | compatible = "atmel,at91rm9200-nand"; | ||
| 57 | #address-cells = <1>; | ||
| 58 | #size-cells = <1>; | ||
| 59 | reg = < 0x40000000 0x10000000 /* bus addr & size */ | ||
| 60 | 0xffffe000 0x00000600 /* PMECC addr & size */ | ||
| 61 | 0xffffe600 0x00000200 /* PMECC ERRLOC addr & size */ | ||
| 62 | 0x00100000 0x00100000 /* ROM addr & size */ | ||
| 63 | >; | ||
| 64 | atmel,nand-addr-offset = <21>; /* ale */ | ||
| 65 | atmel,nand-cmd-offset = <22>; /* cle */ | ||
| 66 | nand-on-flash-bbt; | ||
| 67 | nand-ecc-mode = "hw"; | ||
| 68 | atmel,has-pmecc; /* enable PMECC */ | ||
| 69 | atmel,pmecc-cap = <2>; | ||
| 70 | atmel,pmecc-sector-size = <512>; | ||
| 71 | atmel,pmecc-lookup-table-offset = <0x8000 0x10000>; | ||
| 72 | gpios = <&pioD 5 0 /* rdy */ | ||
| 73 | &pioD 4 0 /* nce */ | ||
| 74 | 0 /* cd */ | ||
| 75 | >; | ||
| 76 | partition@0 { | ||
| 77 | ... | ||
| 78 | }; | ||
| 79 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt index 1a5bbd346d22..3fb3f9015365 100644 --- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt | |||
| @@ -12,6 +12,10 @@ Required properties: | |||
| 12 | - interrupt-names : The interrupt names "gpmi-dma", "bch"; | 12 | - interrupt-names : The interrupt names "gpmi-dma", "bch"; |
| 13 | - fsl,gpmi-dma-channel : Should contain the dma channel it uses. | 13 | - fsl,gpmi-dma-channel : Should contain the dma channel it uses. |
| 14 | 14 | ||
| 15 | Optional properties: | ||
| 16 | - nand-on-flash-bbt: boolean to enable on flash bbt option if not | ||
| 17 | present false | ||
| 18 | |||
| 15 | The device tree may optionally contain sub-nodes describing partitions of the | 19 | The device tree may optionally contain sub-nodes describing partitions of the |
| 16 | address space. See partition.txt for more detail. | 20 | address space. See partition.txt for more detail. |
| 17 | 21 | ||
diff --git a/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt b/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt new file mode 100644 index 000000000000..d0a37252eb22 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | NXP LPC32xx SoC NAND MLC controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: "nxp,lpc3220-mlc" | ||
| 5 | - reg: Address and size of the controller | ||
| 6 | - interrupts: The NAND interrupt specification | ||
| 7 | - gpios: GPIO specification for NAND write protect | ||
| 8 | |||
| 9 | The following required properties are very controller specific. See the LPC32xx | ||
| 10 | User Manual 7.5.14 MLC NAND Timing Register (the values here are specified in | ||
| 11 | Hz, to make them independent of actual clock speed and to provide for good | ||
| 12 | accuracy:) | ||
| 13 | - nxp,tcea_delay: TCEA_DELAY | ||
| 14 | - nxp,busy_delay: BUSY_DELAY | ||
| 15 | - nxp,nand_ta: NAND_TA | ||
| 16 | - nxp,rd_high: RD_HIGH | ||
| 17 | - nxp,rd_low: RD_LOW | ||
| 18 | - nxp,wr_high: WR_HIGH | ||
| 19 | - nxp,wr_low: WR_LOW | ||
| 20 | |||
| 21 | Optional subnodes: | ||
| 22 | - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt | ||
| 23 | |||
| 24 | Example: | ||
| 25 | |||
| 26 | mlc: flash@200A8000 { | ||
| 27 | compatible = "nxp,lpc3220-mlc"; | ||
| 28 | reg = <0x200A8000 0x11000>; | ||
| 29 | interrupts = <11 0>; | ||
| 30 | #address-cells = <1>; | ||
| 31 | #size-cells = <1>; | ||
| 32 | |||
| 33 | nxp,tcea-delay = <333333333>; | ||
| 34 | nxp,busy-delay = <10000000>; | ||
| 35 | nxp,nand-ta = <18181818>; | ||
| 36 | nxp,rd-high = <31250000>; | ||
| 37 | nxp,rd-low = <45454545>; | ||
| 38 | nxp,wr-high = <40000000>; | ||
| 39 | nxp,wr-low = <83333333>; | ||
| 40 | gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */ | ||
| 41 | |||
| 42 | mtd0@00000000 { | ||
| 43 | label = "boot"; | ||
| 44 | reg = <0x00000000 0x00064000>; | ||
| 45 | read-only; | ||
| 46 | }; | ||
| 47 | |||
| 48 | ... | ||
| 49 | |||
| 50 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt b/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt new file mode 100644 index 000000000000..d94edc0fc554 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | NXP LPC32xx SoC NAND SLC controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: "nxp,lpc3220-slc" | ||
| 5 | - reg: Address and size of the controller | ||
| 6 | - nand-on-flash-bbt: Use bad block table on flash | ||
| 7 | - gpios: GPIO specification for NAND write protect | ||
| 8 | |||
| 9 | The following required properties are very controller specific. See the LPC32xx | ||
| 10 | User Manual: | ||
| 11 | - nxp,wdr-clks: Delay before Ready signal is tested on write (W_RDY) | ||
| 12 | - nxp,rdr-clks: Delay before Ready signal is tested on read (R_RDY) | ||
| 13 | (The following values are specified in Hz, to make them independent of actual | ||
| 14 | clock speed:) | ||
| 15 | - nxp,wwidth: Write pulse width (W_WIDTH) | ||
| 16 | - nxp,whold: Write hold time (W_HOLD) | ||
| 17 | - nxp,wsetup: Write setup time (W_SETUP) | ||
| 18 | - nxp,rwidth: Read pulse width (R_WIDTH) | ||
| 19 | - nxp,rhold: Read hold time (R_HOLD) | ||
| 20 | - nxp,rsetup: Read setup time (R_SETUP) | ||
| 21 | |||
| 22 | Optional subnodes: | ||
| 23 | - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt | ||
| 24 | |||
| 25 | Example: | ||
| 26 | |||
| 27 | slc: flash@20020000 { | ||
| 28 | compatible = "nxp,lpc3220-slc"; | ||
| 29 | reg = <0x20020000 0x1000>; | ||
| 30 | #address-cells = <1>; | ||
| 31 | #size-cells = <1>; | ||
| 32 | |||
| 33 | nxp,wdr-clks = <14>; | ||
| 34 | nxp,wwidth = <40000000>; | ||
| 35 | nxp,whold = <100000000>; | ||
| 36 | nxp,wsetup = <100000000>; | ||
| 37 | nxp,rdr-clks = <14>; | ||
| 38 | nxp,rwidth = <40000000>; | ||
| 39 | nxp,rhold = <66666666>; | ||
| 40 | nxp,rsetup = <100000000>; | ||
| 41 | nand-on-flash-bbt; | ||
| 42 | gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */ | ||
| 43 | |||
| 44 | mtd0@00000000 { | ||
| 45 | label = "phy3250-boot"; | ||
| 46 | reg = <0x00000000 0x00064000>; | ||
| 47 | read-only; | ||
| 48 | }; | ||
| 49 | |||
| 50 | ... | ||
| 51 | |||
| 52 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt index a63c2bd7de2b..94de19b8f16b 100644 --- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt | |||
| @@ -16,6 +16,13 @@ file systems on embedded devices. | |||
| 16 | - #address-cells, #size-cells : Must be present if the device has | 16 | - #address-cells, #size-cells : Must be present if the device has |
| 17 | sub-nodes representing partitions (see below). In this case | 17 | sub-nodes representing partitions (see below). In this case |
| 18 | both #address-cells and #size-cells must be equal to 1. | 18 | both #address-cells and #size-cells must be equal to 1. |
| 19 | - no-unaligned-direct-access: boolean to disable the default direct | ||
| 20 | mapping of the flash. | ||
| 21 | On some platforms (e.g. MPC5200) a direct 1:1 mapping may cause | ||
| 22 | problems with JFFS2 usage, as the local bus (LPB) doesn't support | ||
| 23 | unaligned accesses as implemented in the JFFS2 code via memcpy(). | ||
| 24 | By defining "no-unaligned-direct-access", the flash will not be | ||
| 25 | exposed directly to the MTD users (e.g. JFFS2) any more. | ||
| 19 | 26 | ||
| 20 | For JEDEC compatible devices, the following additional properties | 27 | For JEDEC compatible devices, the following additional properties |
| 21 | are defined: | 28 | are defined: |
diff --git a/MAINTAINERS b/MAINTAINERS index eae3cd86831e..0f6ff811da07 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1544,7 +1544,7 @@ S: Supported | |||
| 1544 | F: drivers/rtc/rtc-bfin.c | 1544 | F: drivers/rtc/rtc-bfin.c |
| 1545 | 1545 | ||
| 1546 | BLACKFIN SDH DRIVER | 1546 | BLACKFIN SDH DRIVER |
| 1547 | M: Cliff Cai <cliff.cai@analog.com> | 1547 | M: Sonic Zhang <sonic.zhang@analog.com> |
| 1548 | L: uclinux-dist-devel@blackfin.uclinux.org | 1548 | L: uclinux-dist-devel@blackfin.uclinux.org |
| 1549 | W: http://blackfin.uclinux.org | 1549 | W: http://blackfin.uclinux.org |
| 1550 | S: Supported | 1550 | S: Supported |
| @@ -5207,8 +5207,10 @@ S: Maintained | |||
| 5207 | F: drivers/mmc/host/omap.c | 5207 | F: drivers/mmc/host/omap.c |
| 5208 | 5208 | ||
| 5209 | OMAP HS MMC SUPPORT | 5209 | OMAP HS MMC SUPPORT |
| 5210 | M: Venkatraman S <svenkatr@ti.com> | ||
| 5211 | L: linux-mmc@vger.kernel.org | ||
| 5210 | L: linux-omap@vger.kernel.org | 5212 | L: linux-omap@vger.kernel.org |
| 5211 | S: Orphan | 5213 | S: Maintained |
| 5212 | F: drivers/mmc/host/omap_hsmmc.c | 5214 | F: drivers/mmc/host/omap_hsmmc.c |
| 5213 | 5215 | ||
| 5214 | OMAP RANDOM NUMBER GENERATOR SUPPORT | 5216 | OMAP RANDOM NUMBER GENERATOR SUPPORT |
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 2f71a91ca98e..75d069fcf897 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi | |||
| @@ -407,6 +407,13 @@ | |||
| 407 | status = "disabled"; | 407 | status = "disabled"; |
| 408 | }; | 408 | }; |
| 409 | 409 | ||
| 410 | nand@83fdb000 { | ||
| 411 | compatible = "fsl,imx51-nand"; | ||
| 412 | reg = <0x83fdb000 0x1000 0xcfff0000 0x10000>; | ||
| 413 | interrupts = <8>; | ||
| 414 | status = "disabled"; | ||
| 415 | }; | ||
| 416 | |||
| 410 | ssi3: ssi@83fe8000 { | 417 | ssi3: ssi@83fe8000 { |
| 411 | compatible = "fsl,imx51-ssi", "fsl,imx21-ssi"; | 418 | compatible = "fsl,imx51-ssi", "fsl,imx21-ssi"; |
| 412 | reg = <0x83fe8000 0x4000>; | 419 | reg = <0x83fe8000 0x4000>; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 221cf3321b0a..76ebb1ad2675 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
| @@ -518,6 +518,13 @@ | |||
| 518 | status = "disabled"; | 518 | status = "disabled"; |
| 519 | }; | 519 | }; |
| 520 | 520 | ||
| 521 | nand@63fdb000 { | ||
| 522 | compatible = "fsl,imx53-nand"; | ||
| 523 | reg = <0x63fdb000 0x1000 0xf7ff0000 0x10000>; | ||
| 524 | interrupts = <8>; | ||
| 525 | status = "disabled"; | ||
| 526 | }; | ||
| 527 | |||
| 521 | ssi3: ssi@63fe8000 { | 528 | ssi3: ssi@63fe8000 { |
| 522 | compatible = "fsl,imx53-ssi", "fsl,imx21-ssi"; | 529 | compatible = "fsl,imx53-ssi", "fsl,imx21-ssi"; |
| 523 | reg = <0x63fe8000 0x4000>; | 530 | reg = <0x63fe8000 0x4000>; |
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts index d71b8d581e3d..1e7c7a8e2123 100644 --- a/arch/arm/boot/dts/spear300-evb.dts +++ b/arch/arm/boot/dts/spear300-evb.dts | |||
| @@ -80,8 +80,7 @@ | |||
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | sdhci@70000000 { | 82 | sdhci@70000000 { |
| 83 | int-gpio = <&gpio1 0 0>; | 83 | cd-gpios = <&gpio1 0 0>; |
| 84 | power-gpio = <&gpio1 2 1>; | ||
| 85 | status = "okay"; | 84 | status = "okay"; |
| 86 | }; | 85 | }; |
| 87 | 86 | ||
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts index e4e912f95024..082328bd64ab 100644 --- a/arch/arm/boot/dts/spear320-evb.dts +++ b/arch/arm/boot/dts/spear320-evb.dts | |||
| @@ -103,8 +103,6 @@ | |||
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | sdhci@70000000 { | 105 | sdhci@70000000 { |
| 106 | power-gpio = <&gpio0 2 1>; | ||
| 107 | power_always_enb; | ||
| 108 | status = "okay"; | 106 | status = "okay"; |
| 109 | }; | 107 | }; |
| 110 | 108 | ||
diff --git a/arch/arm/configs/cam60_defconfig b/arch/arm/configs/cam60_defconfig index cedc92ef88ab..14579711d8fc 100644 --- a/arch/arm/configs/cam60_defconfig +++ b/arch/arm/configs/cam60_defconfig | |||
| @@ -49,7 +49,6 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y | |||
| 49 | CONFIG_MTD_PLATRAM=m | 49 | CONFIG_MTD_PLATRAM=m |
| 50 | CONFIG_MTD_DATAFLASH=y | 50 | CONFIG_MTD_DATAFLASH=y |
| 51 | CONFIG_MTD_NAND=y | 51 | CONFIG_MTD_NAND=y |
| 52 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 53 | CONFIG_MTD_NAND_ATMEL=y | 52 | CONFIG_MTD_NAND_ATMEL=y |
| 54 | CONFIG_BLK_DEV_LOOP=y | 53 | CONFIG_BLK_DEV_LOOP=y |
| 55 | CONFIG_BLK_DEV_RAM=y | 54 | CONFIG_BLK_DEV_RAM=y |
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig index e53c47563845..4b8a25d9e686 100644 --- a/arch/arm/configs/corgi_defconfig +++ b/arch/arm/configs/corgi_defconfig | |||
| @@ -97,7 +97,6 @@ CONFIG_MTD_BLOCK=y | |||
| 97 | CONFIG_MTD_ROM=y | 97 | CONFIG_MTD_ROM=y |
| 98 | CONFIG_MTD_COMPLEX_MAPPINGS=y | 98 | CONFIG_MTD_COMPLEX_MAPPINGS=y |
| 99 | CONFIG_MTD_NAND=y | 99 | CONFIG_MTD_NAND=y |
| 100 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 101 | CONFIG_MTD_NAND_SHARPSL=y | 100 | CONFIG_MTD_NAND_SHARPSL=y |
| 102 | CONFIG_BLK_DEV_LOOP=y | 101 | CONFIG_BLK_DEV_LOOP=y |
| 103 | CONFIG_IDE=y | 102 | CONFIG_IDE=y |
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 8e97b2f7ceec..806005a4c4c1 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig | |||
| @@ -61,7 +61,6 @@ CONFIG_MTD_CFI_STAA=y | |||
| 61 | CONFIG_MTD_ROM=y | 61 | CONFIG_MTD_ROM=y |
| 62 | CONFIG_MTD_PHYSMAP=y | 62 | CONFIG_MTD_PHYSMAP=y |
| 63 | CONFIG_MTD_NAND=y | 63 | CONFIG_MTD_NAND=y |
| 64 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 65 | CONFIG_BLK_DEV_NBD=y | 64 | CONFIG_BLK_DEV_NBD=y |
| 66 | CONFIG_EEPROM_LEGACY=y | 65 | CONFIG_EEPROM_LEGACY=y |
| 67 | CONFIG_SCSI=y | 66 | CONFIG_SCSI=y |
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig index 082175c54e7c..00630e6af45c 100644 --- a/arch/arm/configs/mini2440_defconfig +++ b/arch/arm/configs/mini2440_defconfig | |||
| @@ -102,7 +102,6 @@ CONFIG_MTD_CFI_STAA=y | |||
| 102 | CONFIG_MTD_RAM=y | 102 | CONFIG_MTD_RAM=y |
| 103 | CONFIG_MTD_ROM=y | 103 | CONFIG_MTD_ROM=y |
| 104 | CONFIG_MTD_NAND=y | 104 | CONFIG_MTD_NAND=y |
| 105 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 106 | CONFIG_MTD_NAND_S3C2410=y | 105 | CONFIG_MTD_NAND_S3C2410=y |
| 107 | CONFIG_MTD_NAND_PLATFORM=y | 106 | CONFIG_MTD_NAND_PLATFORM=y |
| 108 | CONFIG_MTD_LPDDR=y | 107 | CONFIG_MTD_LPDDR=y |
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig index 7305ebddb510..1f08219c1b3c 100644 --- a/arch/arm/configs/mv78xx0_defconfig +++ b/arch/arm/configs/mv78xx0_defconfig | |||
| @@ -49,7 +49,6 @@ CONFIG_MTD_CFI_INTELEXT=y | |||
| 49 | CONFIG_MTD_CFI_AMDSTD=y | 49 | CONFIG_MTD_CFI_AMDSTD=y |
| 50 | CONFIG_MTD_PHYSMAP=y | 50 | CONFIG_MTD_PHYSMAP=y |
| 51 | CONFIG_MTD_NAND=y | 51 | CONFIG_MTD_NAND=y |
| 52 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 53 | CONFIG_MTD_NAND_ORION=y | 52 | CONFIG_MTD_NAND_ORION=y |
| 54 | CONFIG_BLK_DEV_LOOP=y | 53 | CONFIG_BLK_DEV_LOOP=y |
| 55 | # CONFIG_SCSI_PROC_FS is not set | 54 | # CONFIG_SCSI_PROC_FS is not set |
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index bf123c5384d4..240b25eea565 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig | |||
| @@ -57,7 +57,6 @@ CONFIG_MTD_CHAR=y | |||
| 57 | CONFIG_MTD_BLOCK=y | 57 | CONFIG_MTD_BLOCK=y |
| 58 | CONFIG_MTD_NAND=y | 58 | CONFIG_MTD_NAND=y |
| 59 | CONFIG_MTD_NAND_ECC_SMC=y | 59 | CONFIG_MTD_NAND_ECC_SMC=y |
| 60 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 61 | CONFIG_MTD_NAND_NOMADIK=y | 60 | CONFIG_MTD_NAND_NOMADIK=y |
| 62 | CONFIG_MTD_ONENAND=y | 61 | CONFIG_MTD_ONENAND=y |
| 63 | CONFIG_MTD_ONENAND_VERIFY_WRITE=y | 62 | CONFIG_MTD_ONENAND_VERIFY_WRITE=y |
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index a288d7033950..cd5e6ba9a54d 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig | |||
| @@ -72,7 +72,6 @@ CONFIG_MTD_CFI_INTELEXT=y | |||
| 72 | CONFIG_MTD_CFI_AMDSTD=y | 72 | CONFIG_MTD_CFI_AMDSTD=y |
| 73 | CONFIG_MTD_PHYSMAP=y | 73 | CONFIG_MTD_PHYSMAP=y |
| 74 | CONFIG_MTD_NAND=y | 74 | CONFIG_MTD_NAND=y |
| 75 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 76 | CONFIG_MTD_NAND_PLATFORM=y | 75 | CONFIG_MTD_NAND_PLATFORM=y |
| 77 | CONFIG_MTD_NAND_ORION=y | 76 | CONFIG_MTD_NAND_ORION=y |
| 78 | CONFIG_BLK_DEV_LOOP=y | 77 | CONFIG_BLK_DEV_LOOP=y |
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig index 1677a0607ca9..60e313834b3f 100644 --- a/arch/arm/configs/pxa3xx_defconfig +++ b/arch/arm/configs/pxa3xx_defconfig | |||
| @@ -36,7 +36,6 @@ CONFIG_MTD_CONCAT=y | |||
| 36 | CONFIG_MTD_CHAR=y | 36 | CONFIG_MTD_CHAR=y |
| 37 | CONFIG_MTD_BLOCK=y | 37 | CONFIG_MTD_BLOCK=y |
| 38 | CONFIG_MTD_NAND=y | 38 | CONFIG_MTD_NAND=y |
| 39 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 40 | CONFIG_MTD_NAND_PXA3xx=y | 39 | CONFIG_MTD_NAND_PXA3xx=y |
| 41 | CONFIG_MTD_NAND_PXA3xx_BUILTIN=y | 40 | CONFIG_MTD_NAND_PXA3xx_BUILTIN=y |
| 42 | CONFIG_MTD_ONENAND=y | 41 | CONFIG_MTD_ONENAND=y |
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index 70158273c6dd..df77931a4326 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig | |||
| @@ -94,7 +94,6 @@ CONFIG_MTD_BLOCK=y | |||
| 94 | CONFIG_MTD_ROM=y | 94 | CONFIG_MTD_ROM=y |
| 95 | CONFIG_MTD_COMPLEX_MAPPINGS=y | 95 | CONFIG_MTD_COMPLEX_MAPPINGS=y |
| 96 | CONFIG_MTD_NAND=y | 96 | CONFIG_MTD_NAND=y |
| 97 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 98 | CONFIG_MTD_NAND_SHARPSL=y | 97 | CONFIG_MTD_NAND_SHARPSL=y |
| 99 | CONFIG_BLK_DEV_LOOP=y | 98 | CONFIG_BLK_DEV_LOOP=y |
| 100 | CONFIG_IDE=y | 99 | CONFIG_IDE=y |
diff --git a/arch/arm/mach-clps711x/autcpu12.c b/arch/arm/mach-clps711x/autcpu12.c index 3fb79a1d0bde..32871918bb6e 100644 --- a/arch/arm/mach-clps711x/autcpu12.c +++ b/arch/arm/mach-clps711x/autcpu12.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
| 25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
| 26 | #include <linux/ioport.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 26 | 28 | ||
| 27 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
| 28 | #include <asm/sizes.h> | 30 | #include <asm/sizes.h> |
| @@ -62,9 +64,26 @@ void __init autcpu12_map_io(void) | |||
| 62 | iotable_init(autcpu12_io_desc, ARRAY_SIZE(autcpu12_io_desc)); | 64 | iotable_init(autcpu12_io_desc, ARRAY_SIZE(autcpu12_io_desc)); |
| 63 | } | 65 | } |
| 64 | 66 | ||
| 67 | static struct resource autcpu12_nvram_resource[] __initdata = { | ||
| 68 | DEFINE_RES_MEM_NAMED(AUTCPU12_PHYS_NVRAM, SZ_128K, "SRAM"), | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct platform_device autcpu12_nvram_pdev __initdata = { | ||
| 72 | .name = "autcpu12_nvram", | ||
| 73 | .id = -1, | ||
| 74 | .resource = autcpu12_nvram_resource, | ||
| 75 | .num_resources = ARRAY_SIZE(autcpu12_nvram_resource), | ||
| 76 | }; | ||
| 77 | |||
| 78 | static void __init autcpu12_init(void) | ||
| 79 | { | ||
| 80 | platform_device_register(&autcpu12_nvram_pdev); | ||
| 81 | } | ||
| 82 | |||
| 65 | MACHINE_START(AUTCPU12, "autronix autcpu12") | 83 | MACHINE_START(AUTCPU12, "autronix autcpu12") |
| 66 | /* Maintainer: Thomas Gleixner */ | 84 | /* Maintainer: Thomas Gleixner */ |
| 67 | .atag_offset = 0x20000, | 85 | .atag_offset = 0x20000, |
| 86 | .init_machine = autcpu12_init, | ||
| 68 | .map_io = autcpu12_map_io, | 87 | .map_io = autcpu12_map_io, |
| 69 | .init_irq = clps711x_init_irq, | 88 | .init_irq = clps711x_init_irq, |
| 70 | .timer = &clps711x_timer, | 89 | .timer = &clps711x_timer, |
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index f60b66dbcf84..21d568b3b149 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c | |||
| @@ -303,10 +303,12 @@ static int __init exynos_dma_init(void) | |||
| 303 | 303 | ||
| 304 | dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); | 304 | dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); |
| 305 | dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); | 305 | dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); |
| 306 | dma_cap_set(DMA_PRIVATE, exynos_pdma0_pdata.cap_mask); | ||
| 306 | amba_device_register(&exynos_pdma0_device, &iomem_resource); | 307 | amba_device_register(&exynos_pdma0_device, &iomem_resource); |
| 307 | 308 | ||
| 308 | dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); | 309 | dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); |
| 309 | dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); | 310 | dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); |
| 311 | dma_cap_set(DMA_PRIVATE, exynos_pdma1_pdata.cap_mask); | ||
| 310 | amba_device_register(&exynos_pdma1_device, &iomem_resource); | 312 | amba_device_register(&exynos_pdma1_device, &iomem_resource); |
| 311 | 313 | ||
| 312 | dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); | 314 | dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); |
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c index e5165a84f93f..a0bf84803eac 100644 --- a/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/arch/arm/mach-imx/clk-imx51-imx53.c | |||
| @@ -369,6 +369,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |||
| 369 | clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi"); | 369 | clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi"); |
| 370 | clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi"); | 370 | clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi"); |
| 371 | clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi"); | 371 | clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi"); |
| 372 | clk_register_clkdev(clk[nfc_gate], NULL, "83fdb000.nand"); | ||
| 372 | 373 | ||
| 373 | /* set the usboh3 parent to pll2_sw */ | 374 | /* set the usboh3 parent to pll2_sw */ |
| 374 | clk_set_parent(clk[usboh3_sel], clk[pll2_sw]); | 375 | clk_set_parent(clk[usboh3_sel], clk[pll2_sw]); |
| @@ -461,6 +462,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |||
| 461 | clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi"); | 462 | clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi"); |
| 462 | clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi"); | 463 | clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi"); |
| 463 | clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi"); | 464 | clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi"); |
| 465 | clk_register_clkdev(clk[nfc_gate], NULL, "63fdb000.nand"); | ||
| 464 | clk_register_clkdev(clk[can1_ipg_gate], "ipg", "53fc8000.can"); | 466 | clk_register_clkdev(clk[can1_ipg_gate], "ipg", "53fc8000.can"); |
| 465 | clk_register_clkdev(clk[can1_serial_gate], "per", "53fc8000.can"); | 467 | clk_register_clkdev(clk[can1_serial_gate], "per", "53fc8000.can"); |
| 466 | clk_register_clkdev(clk[can2_ipg_gate], "ipg", "53fcc000.can"); | 468 | clk_register_clkdev(clk[can2_ipg_gate], "ipg", "53fcc000.can"); |
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c index e10648801b2e..5633d698f1e1 100644 --- a/arch/arm/mach-spear13xx/spear13xx.c +++ b/arch/arm/mach-spear13xx/spear13xx.c | |||
| @@ -78,6 +78,9 @@ struct dw_dma_platform_data dmac_plat_data = { | |||
| 78 | .nr_channels = 8, | 78 | .nr_channels = 8, |
| 79 | .chan_allocation_order = CHAN_ALLOCATION_DESCENDING, | 79 | .chan_allocation_order = CHAN_ALLOCATION_DESCENDING, |
| 80 | .chan_priority = CHAN_PRIORITY_DESCENDING, | 80 | .chan_priority = CHAN_PRIORITY_DESCENDING, |
| 81 | .block_size = 4095U, | ||
| 82 | .nr_masters = 2, | ||
| 83 | .data_width = { 3, 3, 0, 0 }, | ||
| 81 | }; | 84 | }; |
| 82 | 85 | ||
| 83 | void __init spear13xx_l2x0_init(void) | 86 | void __init spear13xx_l2x0_init(void) |
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_nand.c b/arch/arm/plat-mxc/devices/platform-mxc_nand.c index 1568f39fba8b..95b75cc70515 100644 --- a/arch/arm/plat-mxc/devices/platform-mxc_nand.c +++ b/arch/arm/plat-mxc/devices/platform-mxc_nand.c | |||
| @@ -63,10 +63,6 @@ struct platform_device *__init imx_add_mxc_nand( | |||
| 63 | /* AXI has to come first, that's how the mxc_nand driver expect it */ | 63 | /* AXI has to come first, that's how the mxc_nand driver expect it */ |
| 64 | struct resource res[] = { | 64 | struct resource res[] = { |
| 65 | { | 65 | { |
| 66 | .start = data->axibase, | ||
| 67 | .end = data->axibase + SZ_16K - 1, | ||
| 68 | .flags = IORESOURCE_MEM, | ||
| 69 | }, { | ||
| 70 | .start = data->iobase, | 66 | .start = data->iobase, |
| 71 | .end = data->iobase + data->iosize - 1, | 67 | .end = data->iobase + data->iosize - 1, |
| 72 | .flags = IORESOURCE_MEM, | 68 | .flags = IORESOURCE_MEM, |
| @@ -74,10 +70,13 @@ struct platform_device *__init imx_add_mxc_nand( | |||
| 74 | .start = data->irq, | 70 | .start = data->irq, |
| 75 | .end = data->irq, | 71 | .end = data->irq, |
| 76 | .flags = IORESOURCE_IRQ, | 72 | .flags = IORESOURCE_IRQ, |
| 73 | }, { | ||
| 74 | .start = data->axibase, | ||
| 75 | .end = data->axibase + SZ_16K - 1, | ||
| 76 | .flags = IORESOURCE_MEM, | ||
| 77 | }, | 77 | }, |
| 78 | }; | 78 | }; |
| 79 | return imx_add_platform_device("mxc_nand", data->id, | 79 | return imx_add_platform_device("mxc_nand", data->id, |
| 80 | res + !data->axibase, | 80 | res, ARRAY_SIZE(res) - !data->axibase, |
| 81 | ARRAY_SIZE(res) - !data->axibase, | ||
| 82 | pdata, sizeof(*pdata)); | 81 | pdata, sizeof(*pdata)); |
| 83 | } | 82 | } |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 0445c4fd67e3..b323d8d3185b 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
| @@ -605,6 +605,9 @@ static void __init genclk_init_parent(struct clk *clk) | |||
| 605 | 605 | ||
| 606 | static struct dw_dma_platform_data dw_dmac0_data = { | 606 | static struct dw_dma_platform_data dw_dmac0_data = { |
| 607 | .nr_channels = 3, | 607 | .nr_channels = 3, |
| 608 | .block_size = 4095U, | ||
| 609 | .nr_masters = 2, | ||
| 610 | .data_width = { 2, 2, 0, 0 }, | ||
| 608 | }; | 611 | }; |
| 609 | 612 | ||
| 610 | static struct resource dw_dmac0_resource[] = { | 613 | static struct resource dw_dmac0_resource[] = { |
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig index 0fdc4ecaa531..91988370b75e 100644 --- a/arch/blackfin/configs/BF561-ACVILON_defconfig +++ b/arch/blackfin/configs/BF561-ACVILON_defconfig | |||
| @@ -57,7 +57,6 @@ CONFIG_MTD_PLATRAM=y | |||
| 57 | CONFIG_MTD_PHRAM=y | 57 | CONFIG_MTD_PHRAM=y |
| 58 | CONFIG_MTD_BLOCK2MTD=y | 58 | CONFIG_MTD_BLOCK2MTD=y |
| 59 | CONFIG_MTD_NAND=y | 59 | CONFIG_MTD_NAND=y |
| 60 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 61 | CONFIG_MTD_NAND_PLATFORM=y | 60 | CONFIG_MTD_NAND_PLATFORM=y |
| 62 | CONFIG_BLK_DEV_LOOP=y | 61 | CONFIG_BLK_DEV_LOOP=y |
| 63 | CONFIG_BLK_DEV_RAM=y | 62 | CONFIG_BLK_DEV_RAM=y |
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig index 55902d9cd0f2..b85b121397c8 100644 --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig | |||
| @@ -119,7 +119,6 @@ CONFIG_MTD_CHAR=y | |||
| 119 | CONFIG_MTD_BLOCK=y | 119 | CONFIG_MTD_BLOCK=y |
| 120 | CONFIG_MTD_BLOCK2MTD=y | 120 | CONFIG_MTD_BLOCK2MTD=y |
| 121 | CONFIG_MTD_NAND=y | 121 | CONFIG_MTD_NAND=y |
| 122 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 123 | CONFIG_MTD_NAND_PLATFORM=y | 122 | CONFIG_MTD_NAND_PLATFORM=y |
| 124 | CONFIG_ATA=y | 123 | CONFIG_ATA=y |
| 125 | # CONFIG_ATA_VERBOSE_ERROR is not set | 124 | # CONFIG_ATA_VERBOSE_ERROR is not set |
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig index 126ef1b08a01..e4ad2e27551a 100644 --- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig | |||
| @@ -38,7 +38,6 @@ CONFIG_MTD_CFI=y | |||
| 38 | CONFIG_MTD_CFI_AMDSTD=y | 38 | CONFIG_MTD_CFI_AMDSTD=y |
| 39 | CONFIG_MTD_PHYSMAP_OF=y | 39 | CONFIG_MTD_PHYSMAP_OF=y |
| 40 | CONFIG_MTD_NAND=y | 40 | CONFIG_MTD_NAND=y |
| 41 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 42 | CONFIG_MTD_NAND_FSL_ELBC=y | 41 | CONFIG_MTD_NAND_FSL_ELBC=y |
| 43 | CONFIG_PROC_DEVICETREE=y | 42 | CONFIG_PROC_DEVICETREE=y |
| 44 | CONFIG_BLK_DEV_LOOP=y | 43 | CONFIG_BLK_DEV_LOOP=y |
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig index abcf00ad939e..34ff5686be08 100644 --- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig | |||
| @@ -37,7 +37,6 @@ CONFIG_MTD_CFI=y | |||
| 37 | CONFIG_MTD_CFI_AMDSTD=y | 37 | CONFIG_MTD_CFI_AMDSTD=y |
| 38 | CONFIG_MTD_PHYSMAP_OF=y | 38 | CONFIG_MTD_PHYSMAP_OF=y |
| 39 | CONFIG_MTD_NAND=y | 39 | CONFIG_MTD_NAND=y |
| 40 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 41 | CONFIG_PROC_DEVICETREE=y | 40 | CONFIG_PROC_DEVICETREE=y |
| 42 | CONFIG_BLK_DEV_LOOP=y | 41 | CONFIG_BLK_DEV_LOOP=y |
| 43 | CONFIG_BLK_DEV_RAM=y | 42 | CONFIG_BLK_DEV_RAM=y |
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 9352e4430c3b..09116c6a6719 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig | |||
| @@ -50,7 +50,6 @@ CONFIG_MTD_CFI=y | |||
| 50 | CONFIG_MTD_CFI_AMDSTD=y | 50 | CONFIG_MTD_CFI_AMDSTD=y |
| 51 | CONFIG_MTD_PHYSMAP_OF=y | 51 | CONFIG_MTD_PHYSMAP_OF=y |
| 52 | CONFIG_MTD_NAND=y | 52 | CONFIG_MTD_NAND=y |
| 53 | CONFIG_MTD_NAND_VERIFY_WRITE=y | ||
| 54 | CONFIG_MTD_NAND_FSL_ELBC=y | 53 | CONFIG_MTD_NAND_FSL_ELBC=y |
| 55 | CONFIG_PROC_DEVICETREE=y | 54 | CONFIG_PROC_DEVICETREE=y |
| 56 | CONFIG_BLK_DEV_LOOP=y | 55 | CONFIG_BLK_DEV_LOOP=y |
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index f80ff93f6f75..61d41c11ee0a 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
| @@ -1,21 +1,5 @@ | |||
| 1 | # User exported sparc header files | 1 | # User exported sparc header files |
| 2 | include include/asm-generic/Kbuild.asm | ||
| 3 | 2 | ||
| 4 | header-y += apc.h | ||
| 5 | header-y += asi.h | ||
| 6 | header-y += display7seg.h | ||
| 7 | header-y += envctrl.h | ||
| 8 | header-y += fbio.h | ||
| 9 | header-y += jsflash.h | ||
| 10 | header-y += openpromio.h | ||
| 11 | header-y += perfctr.h | ||
| 12 | header-y += psrcompat.h | ||
| 13 | header-y += psr.h | ||
| 14 | header-y += pstate.h | ||
| 15 | header-y += traps.h | ||
| 16 | header-y += uctx.h | ||
| 17 | header-y += utrap.h | ||
| 18 | header-y += watchdog.h | ||
| 19 | 3 | ||
| 20 | generic-y += clkdev.h | 4 | generic-y += clkdev.h |
| 21 | generic-y += div64.h | 5 | generic-y += div64.h |
diff --git a/arch/sparc/include/asm/fbio.h b/arch/sparc/include/asm/fbio.h index 0a21da87f7d6..1d9afe277e9c 100644 --- a/arch/sparc/include/asm/fbio.h +++ b/arch/sparc/include/asm/fbio.h | |||
| @@ -1,225 +1,10 @@ | |||
| 1 | #ifndef __LINUX_FBIO_H | 1 | #ifndef __LINUX_FBIO_H |
| 2 | #define __LINUX_FBIO_H | 2 | #define __LINUX_FBIO_H |
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <uapi/asm/fbio.h> |
| 5 | #include <linux/types.h> | ||
| 6 | 5 | ||
| 7 | /* Constants used for fbio SunOS compatibility */ | ||
| 8 | /* (C) 1996 Miguel de Icaza */ | ||
| 9 | |||
| 10 | /* Frame buffer types */ | ||
| 11 | #define FBTYPE_NOTYPE -1 | ||
| 12 | #define FBTYPE_SUN1BW 0 /* mono */ | ||
| 13 | #define FBTYPE_SUN1COLOR 1 | ||
| 14 | #define FBTYPE_SUN2BW 2 | ||
| 15 | #define FBTYPE_SUN2COLOR 3 | ||
| 16 | #define FBTYPE_SUN2GP 4 | ||
| 17 | #define FBTYPE_SUN5COLOR 5 | ||
| 18 | #define FBTYPE_SUN3COLOR 6 | ||
| 19 | #define FBTYPE_MEMCOLOR 7 | ||
| 20 | #define FBTYPE_SUN4COLOR 8 | ||
| 21 | |||
| 22 | #define FBTYPE_NOTSUN1 9 | ||
| 23 | #define FBTYPE_NOTSUN2 10 | ||
| 24 | #define FBTYPE_NOTSUN3 11 | ||
| 25 | |||
| 26 | #define FBTYPE_SUNFAST_COLOR 12 /* cg6 */ | ||
| 27 | #define FBTYPE_SUNROP_COLOR 13 | ||
| 28 | #define FBTYPE_SUNFB_VIDEO 14 | ||
| 29 | #define FBTYPE_SUNGIFB 15 | ||
| 30 | #define FBTYPE_SUNGPLAS 16 | ||
| 31 | #define FBTYPE_SUNGP3 17 | ||
| 32 | #define FBTYPE_SUNGT 18 | ||
| 33 | #define FBTYPE_SUNLEO 19 /* zx Leo card */ | ||
| 34 | #define FBTYPE_MDICOLOR 20 /* cg14 */ | ||
| 35 | #define FBTYPE_TCXCOLOR 21 /* SUNW,tcx card */ | ||
| 36 | |||
| 37 | #define FBTYPE_LASTPLUSONE 21 /* This is not last + 1 in fact... */ | ||
| 38 | |||
| 39 | /* Does not seem to be listed in the Sun file either */ | ||
| 40 | #define FBTYPE_CREATOR 22 | ||
| 41 | #define FBTYPE_PCI_IGA1682 23 | ||
| 42 | #define FBTYPE_P9100COLOR 24 | ||
| 43 | |||
| 44 | #define FBTYPE_PCI_GENERIC 1000 | ||
| 45 | #define FBTYPE_PCI_MACH64 1001 | ||
| 46 | |||
| 47 | /* fbio ioctls */ | ||
| 48 | /* Returned by FBIOGTYPE */ | ||
| 49 | struct fbtype { | ||
| 50 | int fb_type; /* fb type, see above */ | ||
| 51 | int fb_height; /* pixels */ | ||
| 52 | int fb_width; /* pixels */ | ||
| 53 | int fb_depth; | ||
| 54 | int fb_cmsize; /* color map entries */ | ||
| 55 | int fb_size; /* fb size in bytes */ | ||
| 56 | }; | ||
| 57 | #define FBIOGTYPE _IOR('F', 0, struct fbtype) | ||
| 58 | |||
| 59 | struct fbcmap { | ||
| 60 | int index; /* first element (0 origin) */ | ||
| 61 | int count; | ||
| 62 | unsigned char __user *red; | ||
| 63 | unsigned char __user *green; | ||
| 64 | unsigned char __user *blue; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #ifdef __KERNEL__ | ||
| 68 | #define FBIOPUTCMAP_SPARC _IOW('F', 3, struct fbcmap) | 6 | #define FBIOPUTCMAP_SPARC _IOW('F', 3, struct fbcmap) |
| 69 | #define FBIOGETCMAP_SPARC _IOW('F', 4, struct fbcmap) | 7 | #define FBIOGETCMAP_SPARC _IOW('F', 4, struct fbcmap) |
| 70 | #else | ||
| 71 | #define FBIOPUTCMAP _IOW('F', 3, struct fbcmap) | ||
| 72 | #define FBIOGETCMAP _IOW('F', 4, struct fbcmap) | ||
| 73 | #endif | ||
| 74 | |||
| 75 | /* # of device specific values */ | ||
| 76 | #define FB_ATTR_NDEVSPECIFIC 8 | ||
| 77 | /* # of possible emulations */ | ||
| 78 | #define FB_ATTR_NEMUTYPES 4 | ||
| 79 | |||
| 80 | struct fbsattr { | ||
| 81 | int flags; | ||
| 82 | int emu_type; /* -1 if none */ | ||
| 83 | int dev_specific[FB_ATTR_NDEVSPECIFIC]; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct fbgattr { | ||
| 87 | int real_type; /* real frame buffer type */ | ||
| 88 | int owner; /* unknown */ | ||
| 89 | struct fbtype fbtype; /* real frame buffer fbtype */ | ||
| 90 | struct fbsattr sattr; | ||
| 91 | int emu_types[FB_ATTR_NEMUTYPES]; /* supported emulations */ | ||
| 92 | }; | ||
| 93 | #define FBIOSATTR _IOW('F', 5, struct fbgattr) /* Unsupported: */ | ||
| 94 | #define FBIOGATTR _IOR('F', 6, struct fbgattr) /* supported */ | ||
| 95 | |||
| 96 | #define FBIOSVIDEO _IOW('F', 7, int) | ||
| 97 | #define FBIOGVIDEO _IOR('F', 8, int) | ||
| 98 | |||
| 99 | struct fbcursor { | ||
| 100 | short set; /* what to set, choose from the list above */ | ||
| 101 | short enable; /* cursor on/off */ | ||
| 102 | struct fbcurpos pos; /* cursor position */ | ||
| 103 | struct fbcurpos hot; /* cursor hot spot */ | ||
| 104 | struct fbcmap cmap; /* color map info */ | ||
| 105 | struct fbcurpos size; /* cursor bit map size */ | ||
| 106 | char __user *image; /* cursor image bits */ | ||
| 107 | char __user *mask; /* cursor mask bits */ | ||
| 108 | }; | ||
| 109 | |||
| 110 | /* set/get cursor attributes/shape */ | ||
| 111 | #define FBIOSCURSOR _IOW('F', 24, struct fbcursor) | ||
| 112 | #define FBIOGCURSOR _IOWR('F', 25, struct fbcursor) | ||
| 113 | |||
| 114 | /* set/get cursor position */ | ||
| 115 | #define FBIOSCURPOS _IOW('F', 26, struct fbcurpos) | ||
| 116 | #define FBIOGCURPOS _IOW('F', 27, struct fbcurpos) | ||
| 117 | |||
| 118 | /* get max cursor size */ | ||
| 119 | #define FBIOGCURMAX _IOR('F', 28, struct fbcurpos) | ||
| 120 | |||
| 121 | /* wid manipulation */ | ||
| 122 | struct fb_wid_alloc { | ||
| 123 | #define FB_WID_SHARED_8 0 | ||
| 124 | #define FB_WID_SHARED_24 1 | ||
| 125 | #define FB_WID_DBL_8 2 | ||
| 126 | #define FB_WID_DBL_24 3 | ||
| 127 | __u32 wa_type; | ||
| 128 | __s32 wa_index; /* Set on return */ | ||
| 129 | __u32 wa_count; | ||
| 130 | }; | ||
| 131 | struct fb_wid_item { | ||
| 132 | __u32 wi_type; | ||
| 133 | __s32 wi_index; | ||
| 134 | __u32 wi_attrs; | ||
| 135 | __u32 wi_values[32]; | ||
| 136 | }; | ||
| 137 | struct fb_wid_list { | ||
| 138 | __u32 wl_flags; | ||
| 139 | __u32 wl_count; | ||
| 140 | struct fb_wid_item *wl_list; | ||
| 141 | }; | ||
| 142 | |||
| 143 | #define FBIO_WID_ALLOC _IOWR('F', 30, struct fb_wid_alloc) | ||
| 144 | #define FBIO_WID_FREE _IOW('F', 31, struct fb_wid_alloc) | ||
| 145 | #define FBIO_WID_PUT _IOW('F', 32, struct fb_wid_list) | ||
| 146 | #define FBIO_WID_GET _IOWR('F', 33, struct fb_wid_list) | ||
| 147 | |||
| 148 | /* Creator ioctls */ | ||
| 149 | #define FFB_IOCTL ('F'<<8) | ||
| 150 | #define FFB_SYS_INFO (FFB_IOCTL|80) | ||
| 151 | #define FFB_CLUTREAD (FFB_IOCTL|81) | ||
| 152 | #define FFB_CLUTPOST (FFB_IOCTL|82) | ||
| 153 | #define FFB_SETDIAGMODE (FFB_IOCTL|83) | ||
| 154 | #define FFB_GETMONITORID (FFB_IOCTL|84) | ||
| 155 | #define FFB_GETVIDEOMODE (FFB_IOCTL|85) | ||
| 156 | #define FFB_SETVIDEOMODE (FFB_IOCTL|86) | ||
| 157 | #define FFB_SETSERVER (FFB_IOCTL|87) | ||
| 158 | #define FFB_SETOVCTL (FFB_IOCTL|88) | ||
| 159 | #define FFB_GETOVCTL (FFB_IOCTL|89) | ||
| 160 | #define FFB_GETSAXNUM (FFB_IOCTL|90) | ||
| 161 | #define FFB_FBDEBUG (FFB_IOCTL|91) | ||
| 162 | |||
| 163 | /* Cg14 ioctls */ | ||
| 164 | #define MDI_IOCTL ('M'<<8) | ||
| 165 | #define MDI_RESET (MDI_IOCTL|1) | ||
| 166 | #define MDI_GET_CFGINFO (MDI_IOCTL|2) | ||
| 167 | #define MDI_SET_PIXELMODE (MDI_IOCTL|3) | ||
| 168 | # define MDI_32_PIX 32 | ||
| 169 | # define MDI_16_PIX 16 | ||
| 170 | # define MDI_8_PIX 8 | ||
| 171 | |||
| 172 | struct mdi_cfginfo { | ||
| 173 | int mdi_ncluts; /* Number of implemented CLUTs in this MDI */ | ||
| 174 | int mdi_type; /* FBTYPE name */ | ||
| 175 | int mdi_height; /* height */ | ||
| 176 | int mdi_width; /* width */ | ||
| 177 | int mdi_size; /* available ram */ | ||
| 178 | int mdi_mode; /* 8bpp, 16bpp or 32bpp */ | ||
| 179 | int mdi_pixfreq; /* pixel clock (from PROM) */ | ||
| 180 | }; | ||
| 181 | |||
| 182 | /* SparcLinux specific ioctl for the MDI, should be replaced for | ||
| 183 | * the SET_XLUT/SET_CLUTn ioctls instead | ||
| 184 | */ | ||
| 185 | #define MDI_CLEAR_XLUT (MDI_IOCTL|9) | ||
| 186 | |||
| 187 | /* leo & ffb ioctls */ | ||
| 188 | struct fb_clut_alloc { | ||
| 189 | __u32 clutid; /* Set on return */ | ||
| 190 | __u32 flag; | ||
| 191 | __u32 index; | ||
| 192 | }; | ||
| 193 | |||
| 194 | struct fb_clut { | ||
| 195 | #define FB_CLUT_WAIT 0x00000001 /* Not yet implemented */ | ||
| 196 | __u32 flag; | ||
| 197 | __u32 clutid; | ||
| 198 | __u32 offset; | ||
| 199 | __u32 count; | ||
| 200 | char * red; | ||
| 201 | char * green; | ||
| 202 | char * blue; | ||
| 203 | }; | ||
| 204 | |||
| 205 | struct fb_clut32 { | ||
| 206 | __u32 flag; | ||
| 207 | __u32 clutid; | ||
| 208 | __u32 offset; | ||
| 209 | __u32 count; | ||
| 210 | __u32 red; | ||
| 211 | __u32 green; | ||
| 212 | __u32 blue; | ||
| 213 | }; | ||
| 214 | |||
| 215 | #define LEO_CLUTALLOC _IOWR('L', 53, struct fb_clut_alloc) | ||
| 216 | #define LEO_CLUTFREE _IOW('L', 54, struct fb_clut_alloc) | ||
| 217 | #define LEO_CLUTREAD _IOW('L', 55, struct fb_clut) | ||
| 218 | #define LEO_CLUTPOST _IOW('L', 56, struct fb_clut) | ||
| 219 | #define LEO_SETGAMMA _IOW('L', 68, int) /* Not yet implemented */ | ||
| 220 | #define LEO_GETGAMMA _IOR('L', 69, int) /* Not yet implemented */ | ||
| 221 | |||
| 222 | #ifdef __KERNEL__ | ||
| 223 | /* Addresses on the fd of a cgsix that are mappable */ | 8 | /* Addresses on the fd of a cgsix that are mappable */ |
| 224 | #define CG6_FBC 0x70000000 | 9 | #define CG6_FBC 0x70000000 |
| 225 | #define CG6_TEC 0x70001000 | 10 | #define CG6_TEC 0x70001000 |
| @@ -260,47 +45,6 @@ struct fb_clut32 { | |||
| 260 | #define CG14_CLUT3 0x6000 /* Color Look Up Table */ | 45 | #define CG14_CLUT3 0x6000 /* Color Look Up Table */ |
| 261 | #define CG14_AUTO 0xf000 | 46 | #define CG14_AUTO 0xf000 |
| 262 | 47 | ||
| 263 | #endif /* KERNEL */ | ||
| 264 | |||
| 265 | /* These are exported to userland for applications to use */ | ||
| 266 | /* Mappable offsets for the cg14: control registers */ | ||
| 267 | #define MDI_DIRECT_MAP 0x10000000 | ||
| 268 | #define MDI_CTLREG_MAP 0x20000000 | ||
| 269 | #define MDI_CURSOR_MAP 0x30000000 | ||
| 270 | #define MDI_SHDW_VRT_MAP 0x40000000 | ||
| 271 | |||
| 272 | /* Mappable offsets for the cg14: frame buffer resolutions */ | ||
| 273 | /* 32 bits */ | ||
| 274 | #define MDI_CHUNKY_XBGR_MAP 0x50000000 | ||
| 275 | #define MDI_CHUNKY_BGR_MAP 0x60000000 | ||
| 276 | |||
| 277 | /* 16 bits */ | ||
| 278 | #define MDI_PLANAR_X16_MAP 0x70000000 | ||
| 279 | #define MDI_PLANAR_C16_MAP 0x80000000 | ||
| 280 | |||
| 281 | /* 8 bit is done as CG3 MMAP offset */ | ||
| 282 | /* 32 bits, planar */ | ||
| 283 | #define MDI_PLANAR_X32_MAP 0x90000000 | ||
| 284 | #define MDI_PLANAR_B32_MAP 0xa0000000 | ||
| 285 | #define MDI_PLANAR_G32_MAP 0xb0000000 | ||
| 286 | #define MDI_PLANAR_R32_MAP 0xc0000000 | ||
| 287 | |||
| 288 | /* Mappable offsets on leo */ | ||
| 289 | #define LEO_SS0_MAP 0x00000000 | ||
| 290 | #define LEO_LC_SS0_USR_MAP 0x00800000 | ||
| 291 | #define LEO_LD_SS0_MAP 0x00801000 | ||
| 292 | #define LEO_LX_CURSOR_MAP 0x00802000 | ||
| 293 | #define LEO_SS1_MAP 0x00803000 | ||
| 294 | #define LEO_LC_SS1_USR_MAP 0x01003000 | ||
| 295 | #define LEO_LD_SS1_MAP 0x01004000 | ||
| 296 | #define LEO_UNK_MAP 0x01005000 | ||
| 297 | #define LEO_LX_KRN_MAP 0x01006000 | ||
| 298 | #define LEO_LC_SS0_KRN_MAP 0x01007000 | ||
| 299 | #define LEO_LC_SS1_KRN_MAP 0x01008000 | ||
| 300 | #define LEO_LD_GBL_MAP 0x01009000 | ||
| 301 | #define LEO_UNK2_MAP 0x0100a000 | ||
| 302 | |||
| 303 | #ifdef __KERNEL__ | ||
| 304 | struct fbcmap32 { | 48 | struct fbcmap32 { |
| 305 | int index; /* first element (0 origin) */ | 49 | int index; /* first element (0 origin) */ |
| 306 | int count; | 50 | int count; |
| @@ -325,6 +69,4 @@ struct fbcursor32 { | |||
| 325 | 69 | ||
| 326 | #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) | 70 | #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) |
| 327 | #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) | 71 | #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) |
| 328 | #endif | ||
| 329 | |||
| 330 | #endif /* __LINUX_FBIO_H */ | 72 | #endif /* __LINUX_FBIO_H */ |
diff --git a/arch/sparc/include/asm/ioctls.h b/arch/sparc/include/asm/ioctls.h index 28d0c8b02cc3..77413b7e3a18 100644 --- a/arch/sparc/include/asm/ioctls.h +++ b/arch/sparc/include/asm/ioctls.h | |||
| @@ -1,123 +1,8 @@ | |||
| 1 | #ifndef _ASM_SPARC_IOCTLS_H | 1 | #ifndef _ASM_SPARC_IOCTLS_H |
| 2 | #define _ASM_SPARC_IOCTLS_H | 2 | #define _ASM_SPARC_IOCTLS_H |
| 3 | 3 | ||
| 4 | #include <asm/ioctl.h> | 4 | #include <uapi/asm/ioctls.h> |
| 5 | 5 | ||
| 6 | /* Big T */ | ||
| 7 | #define TCGETA _IOR('T', 1, struct termio) | ||
| 8 | #define TCSETA _IOW('T', 2, struct termio) | ||
| 9 | #define TCSETAW _IOW('T', 3, struct termio) | ||
| 10 | #define TCSETAF _IOW('T', 4, struct termio) | ||
| 11 | #define TCSBRK _IO('T', 5) | ||
| 12 | #define TCXONC _IO('T', 6) | ||
| 13 | #define TCFLSH _IO('T', 7) | ||
| 14 | #define TCGETS _IOR('T', 8, struct termios) | ||
| 15 | #define TCSETS _IOW('T', 9, struct termios) | ||
| 16 | #define TCSETSW _IOW('T', 10, struct termios) | ||
| 17 | #define TCSETSF _IOW('T', 11, struct termios) | ||
| 18 | #define TCGETS2 _IOR('T', 12, struct termios2) | ||
| 19 | #define TCSETS2 _IOW('T', 13, struct termios2) | ||
| 20 | #define TCSETSW2 _IOW('T', 14, struct termios2) | ||
| 21 | #define TCSETSF2 _IOW('T', 15, struct termios2) | ||
| 22 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | ||
| 23 | #define TIOCVHANGUP _IO('T', 0x37) | ||
| 24 | |||
| 25 | /* Note that all the ioctls that are not available in Linux have a | ||
| 26 | * double underscore on the front to: a) avoid some programs to | ||
| 27 | * think we support some ioctls under Linux (autoconfiguration stuff) | ||
| 28 | */ | ||
| 29 | /* Little t */ | ||
| 30 | #define TIOCGETD _IOR('t', 0, int) | ||
| 31 | #define TIOCSETD _IOW('t', 1, int) | ||
| 32 | #define __TIOCHPCL _IO('t', 2) /* SunOS Specific */ | ||
| 33 | #define __TIOCMODG _IOR('t', 3, int) /* SunOS Specific */ | ||
| 34 | #define __TIOCMODS _IOW('t', 4, int) /* SunOS Specific */ | ||
| 35 | #define __TIOCGETP _IOR('t', 8, struct sgttyb) /* SunOS Specific */ | ||
| 36 | #define __TIOCSETP _IOW('t', 9, struct sgttyb) /* SunOS Specific */ | ||
| 37 | #define __TIOCSETN _IOW('t', 10, struct sgttyb) /* SunOS Specific */ | ||
| 38 | #define TIOCEXCL _IO('t', 13) | ||
| 39 | #define TIOCNXCL _IO('t', 14) | ||
| 40 | #define __TIOCFLUSH _IOW('t', 16, int) /* SunOS Specific */ | ||
| 41 | #define __TIOCSETC _IOW('t', 17, struct tchars) /* SunOS Specific */ | ||
| 42 | #define __TIOCGETC _IOR('t', 18, struct tchars) /* SunOS Specific */ | ||
| 43 | #define __TIOCTCNTL _IOW('t', 32, int) /* SunOS Specific */ | ||
| 44 | #define __TIOCSIGNAL _IOW('t', 33, int) /* SunOS Specific */ | ||
| 45 | #define __TIOCSETX _IOW('t', 34, int) /* SunOS Specific */ | ||
| 46 | #define __TIOCGETX _IOR('t', 35, int) /* SunOS Specific */ | ||
| 47 | #define TIOCCONS _IO('t', 36) | ||
| 48 | #define TIOCGSOFTCAR _IOR('t', 100, int) | ||
| 49 | #define TIOCSSOFTCAR _IOW('t', 101, int) | ||
| 50 | #define __TIOCUCNTL _IOW('t', 102, int) /* SunOS Specific */ | ||
| 51 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) | ||
| 52 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) | ||
| 53 | #define __TIOCREMOTE _IOW('t', 105, int) /* SunOS Specific */ | ||
| 54 | #define TIOCMGET _IOR('t', 106, int) | ||
| 55 | #define TIOCMBIC _IOW('t', 107, int) | ||
| 56 | #define TIOCMBIS _IOW('t', 108, int) | ||
| 57 | #define TIOCMSET _IOW('t', 109, int) | ||
| 58 | #define TIOCSTART _IO('t', 110) | ||
| 59 | #define TIOCSTOP _IO('t', 111) | ||
| 60 | #define TIOCPKT _IOW('t', 112, int) | ||
| 61 | #define TIOCNOTTY _IO('t', 113) | ||
| 62 | #define TIOCSTI _IOW('t', 114, char) | ||
| 63 | #define TIOCOUTQ _IOR('t', 115, int) | ||
| 64 | #define __TIOCGLTC _IOR('t', 116, struct ltchars) /* SunOS Specific */ | ||
| 65 | #define __TIOCSLTC _IOW('t', 117, struct ltchars) /* SunOS Specific */ | ||
| 66 | /* 118 is the non-posix setpgrp tty ioctl */ | ||
| 67 | /* 119 is the non-posix getpgrp tty ioctl */ | ||
| 68 | #define __TIOCCDTR _IO('t', 120) /* SunOS Specific */ | ||
| 69 | #define __TIOCSDTR _IO('t', 121) /* SunOS Specific */ | ||
| 70 | #define TIOCCBRK _IO('t', 122) | ||
| 71 | #define TIOCSBRK _IO('t', 123) | ||
| 72 | #define __TIOCLGET _IOW('t', 124, int) /* SunOS Specific */ | ||
| 73 | #define __TIOCLSET _IOW('t', 125, int) /* SunOS Specific */ | ||
| 74 | #define __TIOCLBIC _IOW('t', 126, int) /* SunOS Specific */ | ||
| 75 | #define __TIOCLBIS _IOW('t', 127, int) /* SunOS Specific */ | ||
| 76 | #define __TIOCISPACE _IOR('t', 128, int) /* SunOS Specific */ | ||
| 77 | #define __TIOCISIZE _IOR('t', 129, int) /* SunOS Specific */ | ||
| 78 | #define TIOCSPGRP _IOW('t', 130, int) | ||
| 79 | #define TIOCGPGRP _IOR('t', 131, int) | ||
| 80 | #define TIOCSCTTY _IO('t', 132) | ||
| 81 | #define TIOCGSID _IOR('t', 133, int) | ||
| 82 | /* Get minor device of a pty master's FD -- Solaris equiv is ISPTM */ | ||
| 83 | #define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */ | ||
| 84 | #define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */ | ||
| 85 | #define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */ | ||
| 86 | |||
| 87 | /* Little f */ | ||
| 88 | #define FIOCLEX _IO('f', 1) | ||
| 89 | #define FIONCLEX _IO('f', 2) | ||
| 90 | #define FIOASYNC _IOW('f', 125, int) | ||
| 91 | #define FIONBIO _IOW('f', 126, int) | ||
| 92 | #define FIONREAD _IOR('f', 127, int) | ||
| 93 | #define TIOCINQ FIONREAD | ||
| 94 | #define FIOQSIZE _IOR('f', 128, loff_t) | ||
| 95 | |||
| 96 | /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it | ||
| 97 | * someday. This is completely bogus, I know... | ||
| 98 | */ | ||
| 99 | #define __TCGETSTAT _IO('T', 200) /* Rutgers specific */ | ||
| 100 | #define __TCSETSTAT _IO('T', 201) /* Rutgers specific */ | ||
| 101 | |||
| 102 | /* Linux specific, no SunOS equivalent. */ | ||
| 103 | #define TIOCLINUX 0x541C | ||
| 104 | #define TIOCGSERIAL 0x541E | ||
| 105 | #define TIOCSSERIAL 0x541F | ||
| 106 | #define TCSBRKP 0x5425 | ||
| 107 | #define TIOCSERCONFIG 0x5453 | ||
| 108 | #define TIOCSERGWILD 0x5454 | ||
| 109 | #define TIOCSERSWILD 0x5455 | ||
| 110 | #define TIOCGLCKTRMIOS 0x5456 | ||
| 111 | #define TIOCSLCKTRMIOS 0x5457 | ||
| 112 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
| 113 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
| 114 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
| 115 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
| 116 | #define TIOCMIWAIT 0x545C /* Wait for change on serial input line(s) */ | ||
| 117 | #define TIOCGICOUNT 0x545D /* Read serial port inline interrupt counts */ | ||
| 118 | |||
| 119 | /* Kernel definitions */ | ||
| 120 | #ifdef __KERNEL__ | ||
| 121 | #define TIOCGETC __TIOCGETC | 6 | #define TIOCGETC __TIOCGETC |
| 122 | #define TIOCGETP __TIOCGETP | 7 | #define TIOCGETP __TIOCGETP |
| 123 | #define TIOCGLTC __TIOCGLTC | 8 | #define TIOCGLTC __TIOCGLTC |
| @@ -125,16 +10,4 @@ | |||
| 125 | #define TIOCSETP __TIOCSETP | 10 | #define TIOCSETP __TIOCSETP |
| 126 | #define TIOCSETN __TIOCSETN | 11 | #define TIOCSETN __TIOCSETN |
| 127 | #define TIOCSETC __TIOCSETC | 12 | #define TIOCSETC __TIOCSETC |
| 128 | #endif | ||
| 129 | |||
| 130 | /* Used for packet mode */ | ||
| 131 | #define TIOCPKT_DATA 0 | ||
| 132 | #define TIOCPKT_FLUSHREAD 1 | ||
| 133 | #define TIOCPKT_FLUSHWRITE 2 | ||
| 134 | #define TIOCPKT_STOP 4 | ||
| 135 | #define TIOCPKT_START 8 | ||
| 136 | #define TIOCPKT_NOSTOP 16 | ||
| 137 | #define TIOCPKT_DOSTOP 32 | ||
| 138 | #define TIOCPKT_IOCTL 64 | ||
| 139 | |||
| 140 | #endif /* !(_ASM_SPARC_IOCTLS_H) */ | 13 | #endif /* !(_ASM_SPARC_IOCTLS_H) */ |
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index c3029ad6619a..59bb5938d852 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h | |||
| @@ -1,33 +1,10 @@ | |||
| 1 | #ifndef __SPARC_MMAN_H__ | 1 | #ifndef __SPARC_MMAN_H__ |
| 2 | #define __SPARC_MMAN_H__ | 2 | #define __SPARC_MMAN_H__ |
| 3 | 3 | ||
| 4 | #include <asm-generic/mman-common.h> | 4 | #include <uapi/asm/mman.h> |
| 5 | 5 | ||
| 6 | /* SunOS'ified... */ | ||
| 7 | |||
| 8 | #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ | ||
| 9 | #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ | ||
| 10 | #define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ | ||
| 11 | #define MAP_LOCKED 0x100 /* lock the mapping */ | ||
| 12 | #define _MAP_NEW 0x80000000 /* Binary compatibility is fun... */ | ||
| 13 | |||
| 14 | #define MAP_GROWSDOWN 0x0200 /* stack-like segment */ | ||
| 15 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
| 16 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
| 17 | |||
| 18 | #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ | ||
| 19 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ | ||
| 20 | |||
| 21 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
| 22 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
| 23 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
| 24 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
| 25 | |||
| 26 | #ifdef __KERNEL__ | ||
| 27 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
| 28 | #define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len) | 7 | #define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len) |
| 29 | int sparc_mmap_check(unsigned long addr, unsigned long len); | 8 | int sparc_mmap_check(unsigned long addr, unsigned long len); |
| 30 | #endif | 9 | #endif |
| 31 | #endif | ||
| 32 | |||
| 33 | #endif /* __SPARC_MMAN_H__ */ | 10 | #endif /* __SPARC_MMAN_H__ */ |
diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h index cee7ed9c927d..e71eb57945e0 100644 --- a/arch/sparc/include/asm/psr.h +++ b/arch/sparc/include/asm/psr.h | |||
| @@ -7,43 +7,11 @@ | |||
| 7 | * | 7 | * |
| 8 | * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) | 8 | * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) |
| 9 | */ | 9 | */ |
| 10 | |||
| 11 | #ifndef __LINUX_SPARC_PSR_H | 10 | #ifndef __LINUX_SPARC_PSR_H |
| 12 | #define __LINUX_SPARC_PSR_H | 11 | #define __LINUX_SPARC_PSR_H |
| 13 | 12 | ||
| 14 | /* The Sparc PSR fields are laid out as the following: | 13 | #include <uapi/asm/psr.h> |
| 15 | * | ||
| 16 | * ------------------------------------------------------------------------ | ||
| 17 | * | impl | vers | icc | resv | EC | EF | PIL | S | PS | ET | CWP | | ||
| 18 | * | 31-28 | 27-24 | 23-20 | 19-14 | 13 | 12 | 11-8 | 7 | 6 | 5 | 4-0 | | ||
| 19 | * ------------------------------------------------------------------------ | ||
| 20 | */ | ||
| 21 | #define PSR_CWP 0x0000001f /* current window pointer */ | ||
| 22 | #define PSR_ET 0x00000020 /* enable traps field */ | ||
| 23 | #define PSR_PS 0x00000040 /* previous privilege level */ | ||
| 24 | #define PSR_S 0x00000080 /* current privilege level */ | ||
| 25 | #define PSR_PIL 0x00000f00 /* processor interrupt level */ | ||
| 26 | #define PSR_EF 0x00001000 /* enable floating point */ | ||
| 27 | #define PSR_EC 0x00002000 /* enable co-processor */ | ||
| 28 | #define PSR_SYSCALL 0x00004000 /* inside of a syscall */ | ||
| 29 | #define PSR_LE 0x00008000 /* SuperSparcII little-endian */ | ||
| 30 | #define PSR_ICC 0x00f00000 /* integer condition codes */ | ||
| 31 | #define PSR_C 0x00100000 /* carry bit */ | ||
| 32 | #define PSR_V 0x00200000 /* overflow bit */ | ||
| 33 | #define PSR_Z 0x00400000 /* zero bit */ | ||
| 34 | #define PSR_N 0x00800000 /* negative bit */ | ||
| 35 | #define PSR_VERS 0x0f000000 /* cpu-version field */ | ||
| 36 | #define PSR_IMPL 0xf0000000 /* cpu-implementation field */ | ||
| 37 | |||
| 38 | #define PSR_VERS_SHIFT 24 | ||
| 39 | #define PSR_IMPL_SHIFT 28 | ||
| 40 | #define PSR_VERS_SHIFTED_MASK 0xf | ||
| 41 | #define PSR_IMPL_SHIFTED_MASK 0xf | ||
| 42 | |||
| 43 | #define PSR_IMPL_TI 0x4 | ||
| 44 | #define PSR_IMPL_LEON 0xf | ||
| 45 | 14 | ||
| 46 | #ifdef __KERNEL__ | ||
| 47 | 15 | ||
| 48 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
| 49 | /* Get the %psr register. */ | 17 | /* Get the %psr register. */ |
| @@ -96,6 +64,4 @@ static inline unsigned int get_fsr(void) | |||
| 96 | 64 | ||
| 97 | #endif /* !(__ASSEMBLY__) */ | 65 | #endif /* !(__ASSEMBLY__) */ |
| 98 | 66 | ||
| 99 | #endif /* (__KERNEL__) */ | ||
| 100 | |||
| 101 | #endif /* !(__LINUX_SPARC_PSR_H) */ | 67 | #endif /* !(__LINUX_SPARC_PSR_H) */ |
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index fd9c3f21cbf0..0c6f6b068289 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h | |||
| @@ -1,169 +1,11 @@ | |||
| 1 | #ifndef __SPARC_PTRACE_H | 1 | #ifndef __SPARC_PTRACE_H |
| 2 | #define __SPARC_PTRACE_H | 2 | #define __SPARC_PTRACE_H |
| 3 | 3 | ||
| 4 | #if defined(__sparc__) && defined(__arch64__) | 4 | #include <uapi/asm/ptrace.h> |
| 5 | /* 64 bit sparc */ | ||
| 6 | #include <asm/pstate.h> | ||
| 7 | |||
| 8 | /* This struct defines the way the registers are stored on the | ||
| 9 | * stack during a system call and basically all traps. | ||
| 10 | */ | ||
| 11 | |||
| 12 | /* This magic value must have the low 9 bits clear, | ||
| 13 | * as that is where we encode the %tt value, see below. | ||
| 14 | */ | ||
| 15 | #define PT_REGS_MAGIC 0x57ac6c00 | ||
| 16 | |||
| 17 | #ifndef __ASSEMBLY__ | ||
| 18 | |||
| 19 | #include <linux/types.h> | ||
| 20 | |||
| 21 | struct pt_regs { | ||
| 22 | unsigned long u_regs[16]; /* globals and ins */ | ||
| 23 | unsigned long tstate; | ||
| 24 | unsigned long tpc; | ||
| 25 | unsigned long tnpc; | ||
| 26 | unsigned int y; | ||
| 27 | |||
| 28 | /* We encode a magic number, PT_REGS_MAGIC, along | ||
| 29 | * with the %tt (trap type) register value at trap | ||
| 30 | * entry time. The magic number allows us to identify | ||
| 31 | * accurately a trap stack frame in the stack | ||
| 32 | * unwinder, and the %tt value allows us to test | ||
| 33 | * things like "in a system call" etc. for an arbitray | ||
| 34 | * process. | ||
| 35 | * | ||
| 36 | * The PT_REGS_MAGIC is chosen such that it can be | ||
| 37 | * loaded completely using just a sethi instruction. | ||
| 38 | */ | ||
| 39 | unsigned int magic; | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct pt_regs32 { | ||
| 43 | unsigned int psr; | ||
| 44 | unsigned int pc; | ||
| 45 | unsigned int npc; | ||
| 46 | unsigned int y; | ||
| 47 | unsigned int u_regs[16]; /* globals and ins */ | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* A V9 register window */ | ||
| 51 | struct reg_window { | ||
| 52 | unsigned long locals[8]; | ||
| 53 | unsigned long ins[8]; | ||
| 54 | }; | ||
| 55 | |||
| 56 | /* A 32-bit register window. */ | ||
| 57 | struct reg_window32 { | ||
| 58 | unsigned int locals[8]; | ||
| 59 | unsigned int ins[8]; | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* A V9 Sparc stack frame */ | ||
| 63 | struct sparc_stackf { | ||
| 64 | unsigned long locals[8]; | ||
| 65 | unsigned long ins[6]; | ||
| 66 | struct sparc_stackf *fp; | ||
| 67 | unsigned long callers_pc; | ||
| 68 | char *structptr; | ||
| 69 | unsigned long xargs[6]; | ||
| 70 | unsigned long xxargs[1]; | ||
| 71 | }; | ||
| 72 | |||
| 73 | /* A 32-bit Sparc stack frame */ | ||
| 74 | struct sparc_stackf32 { | ||
| 75 | unsigned int locals[8]; | ||
| 76 | unsigned int ins[6]; | ||
| 77 | unsigned int fp; | ||
| 78 | unsigned int callers_pc; | ||
| 79 | unsigned int structptr; | ||
| 80 | unsigned int xargs[6]; | ||
| 81 | unsigned int xxargs[1]; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct sparc_trapf { | ||
| 85 | unsigned long locals[8]; | ||
| 86 | unsigned long ins[8]; | ||
| 87 | unsigned long _unused; | ||
| 88 | struct pt_regs *regs; | ||
| 89 | }; | ||
| 90 | #endif /* (!__ASSEMBLY__) */ | ||
| 91 | #else | ||
| 92 | /* 32 bit sparc */ | ||
| 93 | |||
| 94 | #include <asm/psr.h> | ||
| 95 | |||
| 96 | /* This struct defines the way the registers are stored on the | ||
| 97 | * stack during a system call and basically all traps. | ||
| 98 | */ | ||
| 99 | #ifndef __ASSEMBLY__ | ||
| 100 | |||
| 101 | #include <linux/types.h> | ||
| 102 | |||
| 103 | struct pt_regs { | ||
| 104 | unsigned long psr; | ||
| 105 | unsigned long pc; | ||
| 106 | unsigned long npc; | ||
| 107 | unsigned long y; | ||
| 108 | unsigned long u_regs[16]; /* globals and ins */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* A 32-bit register window. */ | ||
| 112 | struct reg_window32 { | ||
| 113 | unsigned long locals[8]; | ||
| 114 | unsigned long ins[8]; | ||
| 115 | }; | ||
| 116 | |||
| 117 | /* A Sparc stack frame */ | ||
| 118 | struct sparc_stackf { | ||
| 119 | unsigned long locals[8]; | ||
| 120 | unsigned long ins[6]; | ||
| 121 | struct sparc_stackf *fp; | ||
| 122 | unsigned long callers_pc; | ||
| 123 | char *structptr; | ||
| 124 | unsigned long xargs[6]; | ||
| 125 | unsigned long xxargs[1]; | ||
| 126 | }; | ||
| 127 | #endif /* (!__ASSEMBLY__) */ | ||
| 128 | |||
| 129 | #endif /* (defined(__sparc__) && defined(__arch64__))*/ | ||
| 130 | |||
| 131 | #ifndef __ASSEMBLY__ | ||
| 132 | |||
| 133 | #define TRACEREG_SZ sizeof(struct pt_regs) | ||
| 134 | #define STACKFRAME_SZ sizeof(struct sparc_stackf) | ||
| 135 | |||
| 136 | #define TRACEREG32_SZ sizeof(struct pt_regs32) | ||
| 137 | #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) | ||
| 138 | |||
| 139 | #endif /* (!__ASSEMBLY__) */ | ||
| 140 | |||
| 141 | #define UREG_G0 0 | ||
| 142 | #define UREG_G1 1 | ||
| 143 | #define UREG_G2 2 | ||
| 144 | #define UREG_G3 3 | ||
| 145 | #define UREG_G4 4 | ||
| 146 | #define UREG_G5 5 | ||
| 147 | #define UREG_G6 6 | ||
| 148 | #define UREG_G7 7 | ||
| 149 | #define UREG_I0 8 | ||
| 150 | #define UREG_I1 9 | ||
| 151 | #define UREG_I2 10 | ||
| 152 | #define UREG_I3 11 | ||
| 153 | #define UREG_I4 12 | ||
| 154 | #define UREG_I5 13 | ||
| 155 | #define UREG_I6 14 | ||
| 156 | #define UREG_I7 15 | ||
| 157 | #define UREG_FP UREG_I6 | ||
| 158 | #define UREG_RETPC UREG_I7 | ||
| 159 | 5 | ||
| 160 | #if defined(__sparc__) && defined(__arch64__) | 6 | #if defined(__sparc__) && defined(__arch64__) |
| 161 | /* 64 bit sparc */ | ||
| 162 | |||
| 163 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
| 164 | 8 | ||
| 165 | #ifdef __KERNEL__ | ||
| 166 | |||
| 167 | #include <linux/threads.h> | 9 | #include <linux/threads.h> |
| 168 | #include <asm/switch_to.h> | 10 | #include <asm/switch_to.h> |
| 169 | 11 | ||
| @@ -223,24 +65,10 @@ extern unsigned long profile_pc(struct pt_regs *); | |||
| 223 | #else | 65 | #else |
| 224 | #define profile_pc(regs) instruction_pointer(regs) | 66 | #define profile_pc(regs) instruction_pointer(regs) |
| 225 | #endif | 67 | #endif |
| 226 | #endif /* (__KERNEL__) */ | ||
| 227 | |||
| 228 | #else /* __ASSEMBLY__ */ | 68 | #else /* __ASSEMBLY__ */ |
| 229 | /* For assembly code. */ | ||
| 230 | #define TRACEREG_SZ 0xa0 | ||
| 231 | #define STACKFRAME_SZ 0xc0 | ||
| 232 | |||
| 233 | #define TRACEREG32_SZ 0x50 | ||
| 234 | #define STACKFRAME32_SZ 0x60 | ||
| 235 | #endif /* __ASSEMBLY__ */ | 69 | #endif /* __ASSEMBLY__ */ |
| 236 | |||
| 237 | #else /* (defined(__sparc__) && defined(__arch64__)) */ | 70 | #else /* (defined(__sparc__) && defined(__arch64__)) */ |
| 238 | |||
| 239 | /* 32 bit sparc */ | ||
| 240 | |||
| 241 | #ifndef __ASSEMBLY__ | 71 | #ifndef __ASSEMBLY__ |
| 242 | |||
| 243 | #ifdef __KERNEL__ | ||
| 244 | #include <asm/switch_to.h> | 72 | #include <asm/switch_to.h> |
| 245 | 73 | ||
| 246 | static inline bool pt_regs_is_syscall(struct pt_regs *regs) | 74 | static inline bool pt_regs_is_syscall(struct pt_regs *regs) |
| @@ -265,158 +93,10 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs) | |||
| 265 | #define instruction_pointer(regs) ((regs)->pc) | 93 | #define instruction_pointer(regs) ((regs)->pc) |
| 266 | #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) | 94 | #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) |
| 267 | unsigned long profile_pc(struct pt_regs *); | 95 | unsigned long profile_pc(struct pt_regs *); |
| 268 | #endif /* (__KERNEL__) */ | ||
| 269 | |||
| 270 | #else /* (!__ASSEMBLY__) */ | 96 | #else /* (!__ASSEMBLY__) */ |
| 271 | /* For assembly code. */ | ||
| 272 | #define TRACEREG_SZ 0x50 | ||
| 273 | #define STACKFRAME_SZ 0x60 | ||
| 274 | #endif /* (!__ASSEMBLY__) */ | 97 | #endif /* (!__ASSEMBLY__) */ |
| 275 | |||
| 276 | #endif /* (defined(__sparc__) && defined(__arch64__)) */ | 98 | #endif /* (defined(__sparc__) && defined(__arch64__)) */ |
| 277 | |||
| 278 | #ifdef __KERNEL__ | ||
| 279 | #define STACK_BIAS 2047 | 99 | #define STACK_BIAS 2047 |
| 280 | #endif | ||
| 281 | |||
| 282 | /* These are for pt_regs. */ | ||
| 283 | #define PT_V9_G0 0x00 | ||
| 284 | #define PT_V9_G1 0x08 | ||
| 285 | #define PT_V9_G2 0x10 | ||
| 286 | #define PT_V9_G3 0x18 | ||
| 287 | #define PT_V9_G4 0x20 | ||
| 288 | #define PT_V9_G5 0x28 | ||
| 289 | #define PT_V9_G6 0x30 | ||
| 290 | #define PT_V9_G7 0x38 | ||
| 291 | #define PT_V9_I0 0x40 | ||
| 292 | #define PT_V9_I1 0x48 | ||
| 293 | #define PT_V9_I2 0x50 | ||
| 294 | #define PT_V9_I3 0x58 | ||
| 295 | #define PT_V9_I4 0x60 | ||
| 296 | #define PT_V9_I5 0x68 | ||
| 297 | #define PT_V9_I6 0x70 | ||
| 298 | #define PT_V9_FP PT_V9_I6 | ||
| 299 | #define PT_V9_I7 0x78 | ||
| 300 | #define PT_V9_TSTATE 0x80 | ||
| 301 | #define PT_V9_TPC 0x88 | ||
| 302 | #define PT_V9_TNPC 0x90 | ||
| 303 | #define PT_V9_Y 0x98 | ||
| 304 | #define PT_V9_MAGIC 0x9c | ||
| 305 | #define PT_TSTATE PT_V9_TSTATE | ||
| 306 | #define PT_TPC PT_V9_TPC | ||
| 307 | #define PT_TNPC PT_V9_TNPC | ||
| 308 | |||
| 309 | /* These for pt_regs32. */ | ||
| 310 | #define PT_PSR 0x0 | ||
| 311 | #define PT_PC 0x4 | ||
| 312 | #define PT_NPC 0x8 | ||
| 313 | #define PT_Y 0xc | ||
| 314 | #define PT_G0 0x10 | ||
| 315 | #define PT_WIM PT_G0 | ||
| 316 | #define PT_G1 0x14 | ||
| 317 | #define PT_G2 0x18 | ||
| 318 | #define PT_G3 0x1c | ||
| 319 | #define PT_G4 0x20 | ||
| 320 | #define PT_G5 0x24 | ||
| 321 | #define PT_G6 0x28 | ||
| 322 | #define PT_G7 0x2c | ||
| 323 | #define PT_I0 0x30 | ||
| 324 | #define PT_I1 0x34 | ||
| 325 | #define PT_I2 0x38 | ||
| 326 | #define PT_I3 0x3c | ||
| 327 | #define PT_I4 0x40 | ||
| 328 | #define PT_I5 0x44 | ||
| 329 | #define PT_I6 0x48 | ||
| 330 | #define PT_FP PT_I6 | ||
| 331 | #define PT_I7 0x4c | ||
| 332 | |||
| 333 | /* Reg_window offsets */ | ||
| 334 | #define RW_V9_L0 0x00 | ||
| 335 | #define RW_V9_L1 0x08 | ||
| 336 | #define RW_V9_L2 0x10 | ||
| 337 | #define RW_V9_L3 0x18 | ||
| 338 | #define RW_V9_L4 0x20 | ||
| 339 | #define RW_V9_L5 0x28 | ||
| 340 | #define RW_V9_L6 0x30 | ||
| 341 | #define RW_V9_L7 0x38 | ||
| 342 | #define RW_V9_I0 0x40 | ||
| 343 | #define RW_V9_I1 0x48 | ||
| 344 | #define RW_V9_I2 0x50 | ||
| 345 | #define RW_V9_I3 0x58 | ||
| 346 | #define RW_V9_I4 0x60 | ||
| 347 | #define RW_V9_I5 0x68 | ||
| 348 | #define RW_V9_I6 0x70 | ||
| 349 | #define RW_V9_I7 0x78 | ||
| 350 | |||
| 351 | #define RW_L0 0x00 | ||
| 352 | #define RW_L1 0x04 | ||
| 353 | #define RW_L2 0x08 | ||
| 354 | #define RW_L3 0x0c | ||
| 355 | #define RW_L4 0x10 | ||
| 356 | #define RW_L5 0x14 | ||
| 357 | #define RW_L6 0x18 | ||
| 358 | #define RW_L7 0x1c | ||
| 359 | #define RW_I0 0x20 | ||
| 360 | #define RW_I1 0x24 | ||
| 361 | #define RW_I2 0x28 | ||
| 362 | #define RW_I3 0x2c | ||
| 363 | #define RW_I4 0x30 | ||
| 364 | #define RW_I5 0x34 | ||
| 365 | #define RW_I6 0x38 | ||
| 366 | #define RW_I7 0x3c | ||
| 367 | |||
| 368 | /* Stack_frame offsets */ | ||
| 369 | #define SF_V9_L0 0x00 | ||
| 370 | #define SF_V9_L1 0x08 | ||
| 371 | #define SF_V9_L2 0x10 | ||
| 372 | #define SF_V9_L3 0x18 | ||
| 373 | #define SF_V9_L4 0x20 | ||
| 374 | #define SF_V9_L5 0x28 | ||
| 375 | #define SF_V9_L6 0x30 | ||
| 376 | #define SF_V9_L7 0x38 | ||
| 377 | #define SF_V9_I0 0x40 | ||
| 378 | #define SF_V9_I1 0x48 | ||
| 379 | #define SF_V9_I2 0x50 | ||
| 380 | #define SF_V9_I3 0x58 | ||
| 381 | #define SF_V9_I4 0x60 | ||
| 382 | #define SF_V9_I5 0x68 | ||
| 383 | #define SF_V9_FP 0x70 | ||
| 384 | #define SF_V9_PC 0x78 | ||
| 385 | #define SF_V9_RETP 0x80 | ||
| 386 | #define SF_V9_XARG0 0x88 | ||
| 387 | #define SF_V9_XARG1 0x90 | ||
| 388 | #define SF_V9_XARG2 0x98 | ||
| 389 | #define SF_V9_XARG3 0xa0 | ||
| 390 | #define SF_V9_XARG4 0xa8 | ||
| 391 | #define SF_V9_XARG5 0xb0 | ||
| 392 | #define SF_V9_XXARG 0xb8 | ||
| 393 | |||
| 394 | #define SF_L0 0x00 | ||
| 395 | #define SF_L1 0x04 | ||
| 396 | #define SF_L2 0x08 | ||
| 397 | #define SF_L3 0x0c | ||
| 398 | #define SF_L4 0x10 | ||
| 399 | #define SF_L5 0x14 | ||
| 400 | #define SF_L6 0x18 | ||
| 401 | #define SF_L7 0x1c | ||
| 402 | #define SF_I0 0x20 | ||
| 403 | #define SF_I1 0x24 | ||
| 404 | #define SF_I2 0x28 | ||
| 405 | #define SF_I3 0x2c | ||
| 406 | #define SF_I4 0x30 | ||
| 407 | #define SF_I5 0x34 | ||
| 408 | #define SF_FP 0x38 | ||
| 409 | #define SF_PC 0x3c | ||
| 410 | #define SF_RETP 0x40 | ||
| 411 | #define SF_XARG0 0x44 | ||
| 412 | #define SF_XARG1 0x48 | ||
| 413 | #define SF_XARG2 0x4c | ||
| 414 | #define SF_XARG3 0x50 | ||
| 415 | #define SF_XARG4 0x54 | ||
| 416 | #define SF_XARG5 0x58 | ||
| 417 | #define SF_XXARG 0x5c | ||
| 418 | |||
| 419 | #ifdef __KERNEL__ | ||
| 420 | 100 | ||
| 421 | /* global_reg_snapshot offsets */ | 101 | /* global_reg_snapshot offsets */ |
| 422 | #define GR_SNAP_TSTATE 0x00 | 102 | #define GR_SNAP_TSTATE 0x00 |
| @@ -428,29 +108,4 @@ unsigned long profile_pc(struct pt_regs *); | |||
| 428 | #define GR_SNAP_THREAD 0x30 | 108 | #define GR_SNAP_THREAD 0x30 |
| 429 | #define GR_SNAP_PAD1 0x38 | 109 | #define GR_SNAP_PAD1 0x38 |
| 430 | 110 | ||
| 431 | #endif /* __KERNEL__ */ | ||
| 432 | |||
| 433 | /* Stuff for the ptrace system call */ | ||
| 434 | #define PTRACE_SPARC_DETACH 11 | ||
| 435 | #define PTRACE_GETREGS 12 | ||
| 436 | #define PTRACE_SETREGS 13 | ||
| 437 | #define PTRACE_GETFPREGS 14 | ||
| 438 | #define PTRACE_SETFPREGS 15 | ||
| 439 | #define PTRACE_READDATA 16 | ||
| 440 | #define PTRACE_WRITEDATA 17 | ||
| 441 | #define PTRACE_READTEXT 18 | ||
| 442 | #define PTRACE_WRITETEXT 19 | ||
| 443 | #define PTRACE_GETFPAREGS 20 | ||
| 444 | #define PTRACE_SETFPAREGS 21 | ||
| 445 | |||
| 446 | /* There are for debugging 64-bit processes, either from a 32 or 64 bit | ||
| 447 | * parent. Thus their complements are for debugging 32-bit processes only. | ||
| 448 | */ | ||
| 449 | |||
| 450 | #define PTRACE_GETREGS64 22 | ||
| 451 | #define PTRACE_SETREGS64 23 | ||
| 452 | /* PTRACE_SYSCALL is 24 */ | ||
| 453 | #define PTRACE_GETFPREGS64 25 | ||
| 454 | #define PTRACE_SETFPREGS64 26 | ||
| 455 | |||
| 456 | #endif /* !(__SPARC_PTRACE_H) */ | 111 | #endif /* !(__SPARC_PTRACE_H) */ |
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 8a83699a5507..5e35e0517318 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h | |||
| @@ -1,17 +1,11 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Just a place holder. | 2 | * Just a place holder. |
| 3 | */ | 3 | */ |
| 4 | |||
| 5 | #ifndef _SPARC_SETUP_H | 4 | #ifndef _SPARC_SETUP_H |
| 6 | #define _SPARC_SETUP_H | 5 | #define _SPARC_SETUP_H |
| 7 | 6 | ||
| 8 | #if defined(__sparc__) && defined(__arch64__) | 7 | #include <uapi/asm/setup.h> |
| 9 | # define COMMAND_LINE_SIZE 2048 | ||
| 10 | #else | ||
| 11 | # define COMMAND_LINE_SIZE 256 | ||
| 12 | #endif | ||
| 13 | 8 | ||
| 14 | #ifdef __KERNEL__ | ||
| 15 | 9 | ||
| 16 | extern char reboot_command[]; | 10 | extern char reboot_command[]; |
| 17 | 11 | ||
| @@ -34,6 +28,4 @@ extern void sun_do_break(void); | |||
| 34 | extern int stop_a_enabled; | 28 | extern int stop_a_enabled; |
| 35 | extern int scons_pwroff; | 29 | extern int scons_pwroff; |
| 36 | 30 | ||
| 37 | #endif /* __KERNEL__ */ | ||
| 38 | |||
| 39 | #endif /* _SPARC_SETUP_H */ | 31 | #endif /* _SPARC_SETUP_H */ |
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h index 69914d748130..fc2df1e892cb 100644 --- a/arch/sparc/include/asm/sigcontext.h +++ b/arch/sparc/include/asm/sigcontext.h | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | #ifndef __SPARC_SIGCONTEXT_H | 1 | #ifndef __SPARC_SIGCONTEXT_H |
| 2 | #define __SPARC_SIGCONTEXT_H | 2 | #define __SPARC_SIGCONTEXT_H |
| 3 | 3 | ||
| 4 | #ifdef __KERNEL__ | ||
| 5 | #include <asm/ptrace.h> | 4 | #include <asm/ptrace.h> |
| 5 | #include <uapi/asm/sigcontext.h> | ||
| 6 | 6 | ||
| 7 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
| 8 | 8 | ||
| @@ -105,6 +105,4 @@ typedef struct { | |||
| 105 | 105 | ||
| 106 | #endif /* !(__ASSEMBLY__) */ | 106 | #endif /* !(__ASSEMBLY__) */ |
| 107 | 107 | ||
| 108 | #endif /* (__KERNEL__) */ | ||
| 109 | |||
| 110 | #endif /* !(__SPARC_SIGCONTEXT_H) */ | 108 | #endif /* !(__SPARC_SIGCONTEXT_H) */ |
diff --git a/arch/sparc/include/asm/siginfo.h b/arch/sparc/include/asm/siginfo.h index dbc182c438b4..48c34c19f810 100644 --- a/arch/sparc/include/asm/siginfo.h +++ b/arch/sparc/include/asm/siginfo.h | |||
| @@ -1,19 +1,8 @@ | |||
| 1 | #ifndef __SPARC_SIGINFO_H | 1 | #ifndef __SPARC_SIGINFO_H |
| 2 | #define __SPARC_SIGINFO_H | 2 | #define __SPARC_SIGINFO_H |
| 3 | 3 | ||
| 4 | #if defined(__sparc__) && defined(__arch64__) | 4 | #include <uapi/asm/siginfo.h> |
| 5 | 5 | ||
| 6 | #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | ||
| 7 | #define __ARCH_SI_BAND_T int | ||
| 8 | |||
| 9 | #endif /* defined(__sparc__) && defined(__arch64__) */ | ||
| 10 | |||
| 11 | |||
| 12 | #define __ARCH_SI_TRAPNO | ||
| 13 | |||
| 14 | #include <asm-generic/siginfo.h> | ||
| 15 | |||
| 16 | #ifdef __KERNEL__ | ||
| 17 | 6 | ||
| 18 | #ifdef CONFIG_COMPAT | 7 | #ifdef CONFIG_COMPAT |
| 19 | 8 | ||
| @@ -21,14 +10,4 @@ struct compat_siginfo; | |||
| 21 | 10 | ||
| 22 | #endif /* CONFIG_COMPAT */ | 11 | #endif /* CONFIG_COMPAT */ |
| 23 | 12 | ||
| 24 | #endif /* __KERNEL__ */ | ||
| 25 | |||
| 26 | #define SI_NOINFO 32767 /* no information in siginfo_t */ | ||
| 27 | |||
| 28 | /* | ||
| 29 | * SIGEMT si_codes | ||
| 30 | */ | ||
| 31 | #define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */ | ||
| 32 | #define NSIGEMT 1 | ||
| 33 | |||
| 34 | #endif /* !(__SPARC_SIGINFO_H) */ | 13 | #endif /* !(__SPARC_SIGINFO_H) */ |
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h index aa42fe30d5b9..d243c2ae02d2 100644 --- a/arch/sparc/include/asm/signal.h +++ b/arch/sparc/include/asm/signal.h | |||
| @@ -1,168 +1,13 @@ | |||
| 1 | #ifndef __SPARC_SIGNAL_H | 1 | #ifndef __SPARC_SIGNAL_H |
| 2 | #define __SPARC_SIGNAL_H | 2 | #define __SPARC_SIGNAL_H |
| 3 | 3 | ||
| 4 | #include <asm/sigcontext.h> | ||
| 5 | #include <linux/compiler.h> | ||
| 6 | |||
| 7 | #ifdef __KERNEL__ | ||
| 8 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
| 9 | #include <linux/personality.h> | 5 | #include <linux/personality.h> |
| 10 | #include <linux/types.h> | 6 | #include <linux/types.h> |
| 11 | #endif | 7 | #endif |
| 12 | #endif | 8 | #include <uapi/asm/signal.h> |
| 13 | |||
| 14 | /* On the Sparc the signal handlers get passed a 'sub-signal' code | ||
| 15 | * for certain signal types, which we document here. | ||
| 16 | */ | ||
| 17 | #define SIGHUP 1 | ||
| 18 | #define SIGINT 2 | ||
| 19 | #define SIGQUIT 3 | ||
| 20 | #define SIGILL 4 | ||
| 21 | #define SUBSIG_STACK 0 | ||
| 22 | #define SUBSIG_ILLINST 2 | ||
| 23 | #define SUBSIG_PRIVINST 3 | ||
| 24 | #define SUBSIG_BADTRAP(t) (0x80 + (t)) | ||
| 25 | |||
| 26 | #define SIGTRAP 5 | ||
| 27 | #define SIGABRT 6 | ||
| 28 | #define SIGIOT 6 | ||
| 29 | |||
| 30 | #define SIGEMT 7 | ||
| 31 | #define SUBSIG_TAG 10 | ||
| 32 | |||
| 33 | #define SIGFPE 8 | ||
| 34 | #define SUBSIG_FPDISABLED 0x400 | ||
| 35 | #define SUBSIG_FPERROR 0x404 | ||
| 36 | #define SUBSIG_FPINTOVFL 0x001 | ||
| 37 | #define SUBSIG_FPSTSIG 0x002 | ||
| 38 | #define SUBSIG_IDIVZERO 0x014 | ||
| 39 | #define SUBSIG_FPINEXACT 0x0c4 | ||
| 40 | #define SUBSIG_FPDIVZERO 0x0c8 | ||
| 41 | #define SUBSIG_FPUNFLOW 0x0cc | ||
| 42 | #define SUBSIG_FPOPERROR 0x0d0 | ||
| 43 | #define SUBSIG_FPOVFLOW 0x0d4 | ||
| 44 | |||
| 45 | #define SIGKILL 9 | ||
| 46 | #define SIGBUS 10 | ||
| 47 | #define SUBSIG_BUSTIMEOUT 1 | ||
| 48 | #define SUBSIG_ALIGNMENT 2 | ||
| 49 | #define SUBSIG_MISCERROR 5 | ||
| 50 | |||
| 51 | #define SIGSEGV 11 | ||
| 52 | #define SUBSIG_NOMAPPING 3 | ||
| 53 | #define SUBSIG_PROTECTION 4 | ||
| 54 | #define SUBSIG_SEGERROR 5 | ||
| 55 | |||
| 56 | #define SIGSYS 12 | ||
| 57 | |||
| 58 | #define SIGPIPE 13 | ||
| 59 | #define SIGALRM 14 | ||
| 60 | #define SIGTERM 15 | ||
| 61 | #define SIGURG 16 | ||
| 62 | |||
| 63 | /* SunOS values which deviate from the Linux/i386 ones */ | ||
| 64 | #define SIGSTOP 17 | ||
| 65 | #define SIGTSTP 18 | ||
| 66 | #define SIGCONT 19 | ||
| 67 | #define SIGCHLD 20 | ||
| 68 | #define SIGTTIN 21 | ||
| 69 | #define SIGTTOU 22 | ||
| 70 | #define SIGIO 23 | ||
| 71 | #define SIGPOLL SIGIO /* SysV name for SIGIO */ | ||
| 72 | #define SIGXCPU 24 | ||
| 73 | #define SIGXFSZ 25 | ||
| 74 | #define SIGVTALRM 26 | ||
| 75 | #define SIGPROF 27 | ||
| 76 | #define SIGWINCH 28 | ||
| 77 | #define SIGLOST 29 | ||
| 78 | #define SIGPWR SIGLOST | ||
| 79 | #define SIGUSR1 30 | ||
| 80 | #define SIGUSR2 31 | ||
| 81 | |||
| 82 | /* Most things should be clean enough to redefine this at will, if care | ||
| 83 | is taken to make libc match. */ | ||
| 84 | |||
| 85 | #define __OLD_NSIG 32 | ||
| 86 | #define __NEW_NSIG 64 | ||
| 87 | #ifdef __arch64__ | ||
| 88 | #define _NSIG_BPW 64 | ||
| 89 | #else | ||
| 90 | #define _NSIG_BPW 32 | ||
| 91 | #endif | ||
| 92 | #define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW) | ||
| 93 | |||
| 94 | #define SIGRTMIN 32 | ||
| 95 | #define SIGRTMAX __NEW_NSIG | ||
| 96 | |||
| 97 | #if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__) | ||
| 98 | #define _NSIG __NEW_NSIG | ||
| 99 | #define __new_sigset_t sigset_t | ||
| 100 | #define __new_sigaction sigaction | ||
| 101 | #define __new_sigaction32 sigaction32 | ||
| 102 | #define __old_sigset_t old_sigset_t | ||
| 103 | #define __old_sigaction old_sigaction | ||
| 104 | #define __old_sigaction32 old_sigaction32 | ||
| 105 | #else | ||
| 106 | #define _NSIG __OLD_NSIG | ||
| 107 | #define NSIG _NSIG | ||
| 108 | #define __old_sigset_t sigset_t | ||
| 109 | #define __old_sigaction sigaction | ||
| 110 | #define __old_sigaction32 sigaction32 | ||
| 111 | #endif | ||
| 112 | 9 | ||
| 113 | #ifndef __ASSEMBLY__ | 10 | #ifndef __ASSEMBLY__ |
| 114 | |||
| 115 | typedef unsigned long __old_sigset_t; /* at least 32 bits */ | ||
| 116 | |||
| 117 | typedef struct { | ||
| 118 | unsigned long sig[_NSIG_WORDS]; | ||
| 119 | } __new_sigset_t; | ||
| 120 | |||
| 121 | /* A SunOS sigstack */ | ||
| 122 | struct sigstack { | ||
| 123 | /* XXX 32-bit pointers pinhead XXX */ | ||
| 124 | char *the_stack; | ||
| 125 | int cur_status; | ||
| 126 | }; | ||
| 127 | |||
| 128 | /* Sigvec flags */ | ||
| 129 | #define _SV_SSTACK 1u /* This signal handler should use sig-stack */ | ||
| 130 | #define _SV_INTR 2u /* Sig return should not restart system call */ | ||
| 131 | #define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */ | ||
| 132 | #define _SV_IGNCHILD 8u /* Do not send SIGCHLD */ | ||
| 133 | |||
| 134 | /* | ||
| 135 | * sa_flags values: SA_STACK is not currently supported, but will allow the | ||
| 136 | * usage of signal stacks by using the (now obsolete) sa_restorer field in | ||
| 137 | * the sigaction structure as a stack pointer. This is now possible due to | ||
| 138 | * the changes in signal handling. LBT 010493. | ||
| 139 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
| 140 | */ | ||
| 141 | #define SA_NOCLDSTOP _SV_IGNCHILD | ||
| 142 | #define SA_STACK _SV_SSTACK | ||
| 143 | #define SA_ONSTACK _SV_SSTACK | ||
| 144 | #define SA_RESTART _SV_INTR | ||
| 145 | #define SA_ONESHOT _SV_RESET | ||
| 146 | #define SA_NODEFER 0x20u | ||
| 147 | #define SA_NOCLDWAIT 0x100u | ||
| 148 | #define SA_SIGINFO 0x200u | ||
| 149 | |||
| 150 | #define SA_NOMASK SA_NODEFER | ||
| 151 | |||
| 152 | #define SIG_BLOCK 0x01 /* for blocking signals */ | ||
| 153 | #define SIG_UNBLOCK 0x02 /* for unblocking signals */ | ||
| 154 | #define SIG_SETMASK 0x04 /* for setting the signal mask */ | ||
| 155 | |||
| 156 | /* | ||
| 157 | * sigaltstack controls | ||
| 158 | */ | ||
| 159 | #define SS_ONSTACK 1 | ||
| 160 | #define SS_DISABLE 2 | ||
| 161 | |||
| 162 | #define MINSIGSTKSZ 4096 | ||
| 163 | #define SIGSTKSZ 16384 | ||
| 164 | |||
| 165 | #ifdef __KERNEL__ | ||
| 166 | /* | 11 | /* |
| 167 | * DJHR | 12 | * DJHR |
| 168 | * SA_STATIC_ALLOC is used for the sparc32 system to indicate that this | 13 | * SA_STATIC_ALLOC is used for the sparc32 system to indicate that this |
| @@ -175,31 +20,6 @@ struct sigstack { | |||
| 175 | * | 20 | * |
| 176 | */ | 21 | */ |
| 177 | #define SA_STATIC_ALLOC 0x8000 | 22 | #define SA_STATIC_ALLOC 0x8000 |
| 178 | #endif | ||
| 179 | |||
| 180 | #include <asm-generic/signal-defs.h> | ||
| 181 | |||
| 182 | struct __new_sigaction { | ||
| 183 | __sighandler_t sa_handler; | ||
| 184 | unsigned long sa_flags; | ||
| 185 | __sigrestore_t sa_restorer; /* not used by Linux/SPARC yet */ | ||
| 186 | __new_sigset_t sa_mask; | ||
| 187 | }; | ||
| 188 | |||
| 189 | struct __old_sigaction { | ||
| 190 | __sighandler_t sa_handler; | ||
| 191 | __old_sigset_t sa_mask; | ||
| 192 | unsigned long sa_flags; | ||
| 193 | void (*sa_restorer)(void); /* not used by Linux/SPARC yet */ | ||
| 194 | }; | ||
| 195 | |||
| 196 | typedef struct sigaltstack { | ||
| 197 | void __user *ss_sp; | ||
| 198 | int ss_flags; | ||
| 199 | size_t ss_size; | ||
| 200 | } stack_t; | ||
| 201 | |||
| 202 | #ifdef __KERNEL__ | ||
| 203 | 23 | ||
| 204 | struct k_sigaction { | 24 | struct k_sigaction { |
| 205 | struct __new_sigaction sa; | 25 | struct __new_sigaction sa; |
| @@ -208,8 +28,5 @@ struct k_sigaction { | |||
| 208 | 28 | ||
| 209 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 29 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
| 210 | 30 | ||
| 211 | #endif /* !(__KERNEL__) */ | ||
| 212 | |||
| 213 | #endif /* !(__ASSEMBLY__) */ | 31 | #endif /* !(__ASSEMBLY__) */ |
| 214 | |||
| 215 | #endif /* !(__SPARC_SIGNAL_H) */ | 32 | #endif /* !(__SPARC_SIGNAL_H) */ |
diff --git a/arch/sparc/include/asm/termbits.h b/arch/sparc/include/asm/termbits.h index 23b10ff08df2..948067065ac5 100644 --- a/arch/sparc/include/asm/termbits.h +++ b/arch/sparc/include/asm/termbits.h | |||
| @@ -1,266 +1,8 @@ | |||
| 1 | #ifndef _SPARC_TERMBITS_H | 1 | #ifndef _SPARC_TERMBITS_H |
| 2 | #define _SPARC_TERMBITS_H | 2 | #define _SPARC_TERMBITS_H |
| 3 | 3 | ||
| 4 | #include <linux/posix_types.h> | 4 | #include <uapi/asm/termbits.h> |
| 5 | 5 | ||
| 6 | typedef unsigned char cc_t; | ||
| 7 | typedef unsigned int speed_t; | ||
| 8 | |||
| 9 | #if defined(__sparc__) && defined(__arch64__) | ||
| 10 | typedef unsigned int tcflag_t; | ||
| 11 | #else | ||
| 12 | typedef unsigned long tcflag_t; | ||
| 13 | #endif | ||
| 14 | |||
| 15 | #define NCC 8 | ||
| 16 | struct termio { | ||
| 17 | unsigned short c_iflag; /* input mode flags */ | ||
| 18 | unsigned short c_oflag; /* output mode flags */ | ||
| 19 | unsigned short c_cflag; /* control mode flags */ | ||
| 20 | unsigned short c_lflag; /* local mode flags */ | ||
| 21 | unsigned char c_line; /* line discipline */ | ||
| 22 | unsigned char c_cc[NCC]; /* control characters */ | ||
| 23 | }; | ||
| 24 | |||
| 25 | #define NCCS 17 | ||
| 26 | struct termios { | ||
| 27 | tcflag_t c_iflag; /* input mode flags */ | ||
| 28 | tcflag_t c_oflag; /* output mode flags */ | ||
| 29 | tcflag_t c_cflag; /* control mode flags */ | ||
| 30 | tcflag_t c_lflag; /* local mode flags */ | ||
| 31 | cc_t c_line; /* line discipline */ | ||
| 32 | #ifndef __KERNEL__ | ||
| 33 | cc_t c_cc[NCCS]; /* control characters */ | ||
| 34 | #else | ||
| 35 | cc_t c_cc[NCCS+2]; /* kernel needs 2 more to hold vmin/vtime */ | ||
| 36 | #define SIZEOF_USER_TERMIOS sizeof (struct termios) - (2*sizeof (cc_t)) | ||
| 37 | #endif | ||
| 38 | }; | ||
| 39 | |||
| 40 | struct termios2 { | ||
| 41 | tcflag_t c_iflag; /* input mode flags */ | ||
| 42 | tcflag_t c_oflag; /* output mode flags */ | ||
| 43 | tcflag_t c_cflag; /* control mode flags */ | ||
| 44 | tcflag_t c_lflag; /* local mode flags */ | ||
| 45 | cc_t c_line; /* line discipline */ | ||
| 46 | cc_t c_cc[NCCS+2]; /* control characters */ | ||
| 47 | speed_t c_ispeed; /* input speed */ | ||
| 48 | speed_t c_ospeed; /* output speed */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct ktermios { | ||
| 52 | tcflag_t c_iflag; /* input mode flags */ | ||
| 53 | tcflag_t c_oflag; /* output mode flags */ | ||
| 54 | tcflag_t c_cflag; /* control mode flags */ | ||
| 55 | tcflag_t c_lflag; /* local mode flags */ | ||
| 56 | cc_t c_line; /* line discipline */ | ||
| 57 | cc_t c_cc[NCCS+2]; /* control characters */ | ||
| 58 | speed_t c_ispeed; /* input speed */ | ||
| 59 | speed_t c_ospeed; /* output speed */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* c_cc characters */ | ||
| 63 | #define VINTR 0 | ||
| 64 | #define VQUIT 1 | ||
| 65 | #define VERASE 2 | ||
| 66 | #define VKILL 3 | ||
| 67 | #define VEOF 4 | ||
| 68 | #define VEOL 5 | ||
| 69 | #define VEOL2 6 | ||
| 70 | #define VSWTC 7 | ||
| 71 | #define VSTART 8 | ||
| 72 | #define VSTOP 9 | ||
| 73 | |||
| 74 | |||
| 75 | |||
| 76 | #define VSUSP 10 | ||
| 77 | #define VDSUSP 11 /* SunOS POSIX nicety I do believe... */ | ||
| 78 | #define VREPRINT 12 | ||
| 79 | #define VDISCARD 13 | ||
| 80 | #define VWERASE 14 | ||
| 81 | #define VLNEXT 15 | ||
| 82 | |||
| 83 | /* Kernel keeps vmin/vtime separated, user apps assume vmin/vtime is | ||
| 84 | * shared with eof/eol | ||
| 85 | */ | ||
| 86 | #ifdef __KERNEL__ | ||
| 87 | #define VMIN 16 | 6 | #define VMIN 16 |
| 88 | #define VTIME 17 | 7 | #define VTIME 17 |
| 89 | #else | ||
| 90 | #define VMIN VEOF | ||
| 91 | #define VTIME VEOL | ||
| 92 | #endif | ||
| 93 | |||
| 94 | /* c_iflag bits */ | ||
| 95 | #define IGNBRK 0x00000001 | ||
| 96 | #define BRKINT 0x00000002 | ||
| 97 | #define IGNPAR 0x00000004 | ||
| 98 | #define PARMRK 0x00000008 | ||
| 99 | #define INPCK 0x00000010 | ||
| 100 | #define ISTRIP 0x00000020 | ||
| 101 | #define INLCR 0x00000040 | ||
| 102 | #define IGNCR 0x00000080 | ||
| 103 | #define ICRNL 0x00000100 | ||
| 104 | #define IUCLC 0x00000200 | ||
| 105 | #define IXON 0x00000400 | ||
| 106 | #define IXANY 0x00000800 | ||
| 107 | #define IXOFF 0x00001000 | ||
| 108 | #define IMAXBEL 0x00002000 | ||
| 109 | #define IUTF8 0x00004000 | ||
| 110 | |||
| 111 | /* c_oflag bits */ | ||
| 112 | #define OPOST 0x00000001 | ||
| 113 | #define OLCUC 0x00000002 | ||
| 114 | #define ONLCR 0x00000004 | ||
| 115 | #define OCRNL 0x00000008 | ||
| 116 | #define ONOCR 0x00000010 | ||
| 117 | #define ONLRET 0x00000020 | ||
| 118 | #define OFILL 0x00000040 | ||
| 119 | #define OFDEL 0x00000080 | ||
| 120 | #define NLDLY 0x00000100 | ||
| 121 | #define NL0 0x00000000 | ||
| 122 | #define NL1 0x00000100 | ||
| 123 | #define CRDLY 0x00000600 | ||
| 124 | #define CR0 0x00000000 | ||
| 125 | #define CR1 0x00000200 | ||
| 126 | #define CR2 0x00000400 | ||
| 127 | #define CR3 0x00000600 | ||
| 128 | #define TABDLY 0x00001800 | ||
| 129 | #define TAB0 0x00000000 | ||
| 130 | #define TAB1 0x00000800 | ||
| 131 | #define TAB2 0x00001000 | ||
| 132 | #define TAB3 0x00001800 | ||
| 133 | #define XTABS 0x00001800 | ||
| 134 | #define BSDLY 0x00002000 | ||
| 135 | #define BS0 0x00000000 | ||
| 136 | #define BS1 0x00002000 | ||
| 137 | #define VTDLY 0x00004000 | ||
| 138 | #define VT0 0x00000000 | ||
| 139 | #define VT1 0x00004000 | ||
| 140 | #define FFDLY 0x00008000 | ||
| 141 | #define FF0 0x00000000 | ||
| 142 | #define FF1 0x00008000 | ||
| 143 | #define PAGEOUT 0x00010000 /* SUNOS specific */ | ||
| 144 | #define WRAP 0x00020000 /* SUNOS specific */ | ||
| 145 | |||
| 146 | /* c_cflag bit meaning */ | ||
| 147 | #define CBAUD 0x0000100f | ||
| 148 | #define B0 0x00000000 /* hang up */ | ||
| 149 | #define B50 0x00000001 | ||
| 150 | #define B75 0x00000002 | ||
| 151 | #define B110 0x00000003 | ||
| 152 | #define B134 0x00000004 | ||
| 153 | #define B150 0x00000005 | ||
| 154 | #define B200 0x00000006 | ||
| 155 | #define B300 0x00000007 | ||
| 156 | #define B600 0x00000008 | ||
| 157 | #define B1200 0x00000009 | ||
| 158 | #define B1800 0x0000000a | ||
| 159 | #define B2400 0x0000000b | ||
| 160 | #define B4800 0x0000000c | ||
| 161 | #define B9600 0x0000000d | ||
| 162 | #define B19200 0x0000000e | ||
| 163 | #define B38400 0x0000000f | ||
| 164 | #define EXTA B19200 | ||
| 165 | #define EXTB B38400 | ||
| 166 | #define CSIZE 0x00000030 | ||
| 167 | #define CS5 0x00000000 | ||
| 168 | #define CS6 0x00000010 | ||
| 169 | #define CS7 0x00000020 | ||
| 170 | #define CS8 0x00000030 | ||
| 171 | #define CSTOPB 0x00000040 | ||
| 172 | #define CREAD 0x00000080 | ||
| 173 | #define PARENB 0x00000100 | ||
| 174 | #define PARODD 0x00000200 | ||
| 175 | #define HUPCL 0x00000400 | ||
| 176 | #define CLOCAL 0x00000800 | ||
| 177 | #define CBAUDEX 0x00001000 | ||
| 178 | /* We'll never see these speeds with the Zilogs, but for completeness... */ | ||
| 179 | #define BOTHER 0x00001000 | ||
| 180 | #define B57600 0x00001001 | ||
| 181 | #define B115200 0x00001002 | ||
| 182 | #define B230400 0x00001003 | ||
| 183 | #define B460800 0x00001004 | ||
| 184 | /* This is what we can do with the Zilogs. */ | ||
| 185 | #define B76800 0x00001005 | ||
| 186 | /* This is what we can do with the SAB82532. */ | ||
| 187 | #define B153600 0x00001006 | ||
| 188 | #define B307200 0x00001007 | ||
| 189 | #define B614400 0x00001008 | ||
| 190 | #define B921600 0x00001009 | ||
| 191 | /* And these are the rest... */ | ||
| 192 | #define B500000 0x0000100a | ||
| 193 | #define B576000 0x0000100b | ||
| 194 | #define B1000000 0x0000100c | ||
| 195 | #define B1152000 0x0000100d | ||
| 196 | #define B1500000 0x0000100e | ||
| 197 | #define B2000000 0x0000100f | ||
| 198 | /* These have totally bogus values and nobody uses them | ||
| 199 | so far. Later on we'd have to use say 0x10000x and | ||
| 200 | adjust CBAUD constant and drivers accordingly. | ||
| 201 | #define B2500000 0x00001010 | ||
| 202 | #define B3000000 0x00001011 | ||
| 203 | #define B3500000 0x00001012 | ||
| 204 | #define B4000000 0x00001013 */ | ||
| 205 | #define CIBAUD 0x100f0000 /* input baud rate (not used) */ | ||
| 206 | #define CMSPAR 0x40000000 /* mark or space (stick) parity */ | ||
| 207 | #define CRTSCTS 0x80000000 /* flow control */ | ||
| 208 | |||
| 209 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
| 210 | |||
| 211 | /* c_lflag bits */ | ||
| 212 | #define ISIG 0x00000001 | ||
| 213 | #define ICANON 0x00000002 | ||
| 214 | #define XCASE 0x00000004 | ||
| 215 | #define ECHO 0x00000008 | ||
| 216 | #define ECHOE 0x00000010 | ||
| 217 | #define ECHOK 0x00000020 | ||
| 218 | #define ECHONL 0x00000040 | ||
| 219 | #define NOFLSH 0x00000080 | ||
| 220 | #define TOSTOP 0x00000100 | ||
| 221 | #define ECHOCTL 0x00000200 | ||
| 222 | #define ECHOPRT 0x00000400 | ||
| 223 | #define ECHOKE 0x00000800 | ||
| 224 | #define DEFECHO 0x00001000 /* SUNOS thing, what is it? */ | ||
| 225 | #define FLUSHO 0x00002000 | ||
| 226 | #define PENDIN 0x00004000 | ||
| 227 | #define IEXTEN 0x00008000 | ||
| 228 | #define EXTPROC 0x00010000 | ||
| 229 | |||
| 230 | /* modem lines */ | ||
| 231 | #define TIOCM_LE 0x001 | ||
| 232 | #define TIOCM_DTR 0x002 | ||
| 233 | #define TIOCM_RTS 0x004 | ||
| 234 | #define TIOCM_ST 0x008 | ||
| 235 | #define TIOCM_SR 0x010 | ||
| 236 | #define TIOCM_CTS 0x020 | ||
| 237 | #define TIOCM_CAR 0x040 | ||
| 238 | #define TIOCM_RNG 0x080 | ||
| 239 | #define TIOCM_DSR 0x100 | ||
| 240 | #define TIOCM_CD TIOCM_CAR | ||
| 241 | #define TIOCM_RI TIOCM_RNG | ||
| 242 | #define TIOCM_OUT1 0x2000 | ||
| 243 | #define TIOCM_OUT2 0x4000 | ||
| 244 | #define TIOCM_LOOP 0x8000 | ||
| 245 | |||
| 246 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
| 247 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
| 248 | |||
| 249 | |||
| 250 | /* tcflow() and TCXONC use these */ | ||
| 251 | #define TCOOFF 0 | ||
| 252 | #define TCOON 1 | ||
| 253 | #define TCIOFF 2 | ||
| 254 | #define TCION 3 | ||
| 255 | |||
| 256 | /* tcflush() and TCFLSH use these */ | ||
| 257 | #define TCIFLUSH 0 | ||
| 258 | #define TCOFLUSH 1 | ||
| 259 | #define TCIOFLUSH 2 | ||
| 260 | |||
| 261 | /* tcsetattr uses these */ | ||
| 262 | #define TCSANOW 0 | ||
| 263 | #define TCSADRAIN 1 | ||
| 264 | #define TCSAFLUSH 2 | ||
| 265 | |||
| 266 | #endif /* !(_SPARC_TERMBITS_H) */ | 8 | #endif /* !(_SPARC_TERMBITS_H) */ |
diff --git a/arch/sparc/include/asm/termios.h b/arch/sparc/include/asm/termios.h index e2f46705a210..0c2414ddd52c 100644 --- a/arch/sparc/include/asm/termios.h +++ b/arch/sparc/include/asm/termios.h | |||
| @@ -1,45 +1,8 @@ | |||
| 1 | #ifndef _SPARC_TERMIOS_H | 1 | #ifndef _SPARC_TERMIOS_H |
| 2 | #define _SPARC_TERMIOS_H | 2 | #define _SPARC_TERMIOS_H |
| 3 | 3 | ||
| 4 | #include <asm/ioctls.h> | 4 | #include <uapi/asm/termios.h> |
| 5 | #include <asm/termbits.h> | ||
| 6 | 5 | ||
| 7 | #if defined(__KERNEL__) || defined(__DEFINE_BSD_TERMIOS) | ||
| 8 | struct sgttyb { | ||
| 9 | char sg_ispeed; | ||
| 10 | char sg_ospeed; | ||
| 11 | char sg_erase; | ||
| 12 | char sg_kill; | ||
| 13 | short sg_flags; | ||
| 14 | }; | ||
| 15 | |||
| 16 | struct tchars { | ||
| 17 | char t_intrc; | ||
| 18 | char t_quitc; | ||
| 19 | char t_startc; | ||
| 20 | char t_stopc; | ||
| 21 | char t_eofc; | ||
| 22 | char t_brkc; | ||
| 23 | }; | ||
| 24 | |||
| 25 | struct ltchars { | ||
| 26 | char t_suspc; | ||
| 27 | char t_dsuspc; | ||
| 28 | char t_rprntc; | ||
| 29 | char t_flushc; | ||
| 30 | char t_werasc; | ||
| 31 | char t_lnextc; | ||
| 32 | }; | ||
| 33 | #endif /* __KERNEL__ */ | ||
| 34 | |||
| 35 | struct winsize { | ||
| 36 | unsigned short ws_row; | ||
| 37 | unsigned short ws_col; | ||
| 38 | unsigned short ws_xpixel; | ||
| 39 | unsigned short ws_ypixel; | ||
| 40 | }; | ||
| 41 | |||
| 42 | #ifdef __KERNEL__ | ||
| 43 | 6 | ||
| 44 | /* | 7 | /* |
| 45 | * c_cc characters in the termio structure. Oh, how I love being | 8 | * c_cc characters in the termio structure. Oh, how I love being |
| @@ -180,6 +143,4 @@ struct winsize { | |||
| 180 | err; \ | 143 | err; \ |
| 181 | }) | 144 | }) |
| 182 | 145 | ||
| 183 | #endif /* __KERNEL__ */ | ||
| 184 | |||
| 185 | #endif /* _SPARC_TERMIOS_H */ | 146 | #endif /* _SPARC_TERMIOS_H */ |
diff --git a/arch/sparc/include/asm/traps.h b/arch/sparc/include/asm/traps.h index 3aa62dde343f..51abcb1f9b3b 100644 --- a/arch/sparc/include/asm/traps.h +++ b/arch/sparc/include/asm/traps.h | |||
| @@ -3,14 +3,12 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
| 5 | */ | 5 | */ |
| 6 | |||
| 7 | #ifndef _SPARC_TRAPS_H | 6 | #ifndef _SPARC_TRAPS_H |
| 8 | #define _SPARC_TRAPS_H | 7 | #define _SPARC_TRAPS_H |
| 9 | 8 | ||
| 10 | #define NUM_SPARC_TRAPS 255 | 9 | #include <uapi/asm/traps.h> |
| 11 | 10 | ||
| 12 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
| 13 | #ifdef __KERNEL__ | ||
| 14 | /* This is for V8 compliant Sparc CPUS */ | 12 | /* This is for V8 compliant Sparc CPUS */ |
| 15 | struct tt_entry { | 13 | struct tt_entry { |
| 16 | unsigned long inst_one; | 14 | unsigned long inst_one; |
| @@ -22,112 +20,5 @@ struct tt_entry { | |||
| 22 | /* We set this to _start in system setup. */ | 20 | /* We set this to _start in system setup. */ |
| 23 | extern struct tt_entry *sparc_ttable; | 21 | extern struct tt_entry *sparc_ttable; |
| 24 | 22 | ||
| 25 | #endif /* (__KERNEL__) */ | ||
| 26 | #endif /* !(__ASSEMBLY__) */ | 23 | #endif /* !(__ASSEMBLY__) */ |
| 27 | |||
| 28 | /* For patching the trap table at boot time, we need to know how to | ||
| 29 | * form various common Sparc instructions. Thus these macros... | ||
| 30 | */ | ||
| 31 | |||
| 32 | #define SPARC_MOV_CONST_L3(const) (0xa6102000 | (const&0xfff)) | ||
| 33 | |||
| 34 | /* The following assumes that the branch lies before the place we | ||
| 35 | * are branching to. This is the case for a trap vector... | ||
| 36 | * You have been warned. | ||
| 37 | */ | ||
| 38 | #define SPARC_BRANCH(dest_addr, inst_addr) \ | ||
| 39 | (0x10800000 | (((dest_addr-inst_addr)>>2)&0x3fffff)) | ||
| 40 | |||
| 41 | #define SPARC_RD_PSR_L0 (0xa1480000) | ||
| 42 | #define SPARC_RD_WIM_L3 (0xa7500000) | ||
| 43 | #define SPARC_NOP (0x01000000) | ||
| 44 | |||
| 45 | /* Various interesting trap levels. */ | ||
| 46 | /* First, hardware traps. */ | ||
| 47 | #define SP_TRAP_TFLT 0x1 /* Text fault */ | ||
| 48 | #define SP_TRAP_II 0x2 /* Illegal Instruction */ | ||
| 49 | #define SP_TRAP_PI 0x3 /* Privileged Instruction */ | ||
| 50 | #define SP_TRAP_FPD 0x4 /* Floating Point Disabled */ | ||
| 51 | #define SP_TRAP_WOVF 0x5 /* Window Overflow */ | ||
| 52 | #define SP_TRAP_WUNF 0x6 /* Window Underflow */ | ||
| 53 | #define SP_TRAP_MNA 0x7 /* Memory Address Unaligned */ | ||
| 54 | #define SP_TRAP_FPE 0x8 /* Floating Point Exception */ | ||
| 55 | #define SP_TRAP_DFLT 0x9 /* Data Fault */ | ||
| 56 | #define SP_TRAP_TOF 0xa /* Tag Overflow */ | ||
| 57 | #define SP_TRAP_WDOG 0xb /* Watchpoint Detected */ | ||
| 58 | #define SP_TRAP_IRQ1 0x11 /* IRQ level 1 */ | ||
| 59 | #define SP_TRAP_IRQ2 0x12 /* IRQ level 2 */ | ||
| 60 | #define SP_TRAP_IRQ3 0x13 /* IRQ level 3 */ | ||
| 61 | #define SP_TRAP_IRQ4 0x14 /* IRQ level 4 */ | ||
| 62 | #define SP_TRAP_IRQ5 0x15 /* IRQ level 5 */ | ||
| 63 | #define SP_TRAP_IRQ6 0x16 /* IRQ level 6 */ | ||
| 64 | #define SP_TRAP_IRQ7 0x17 /* IRQ level 7 */ | ||
| 65 | #define SP_TRAP_IRQ8 0x18 /* IRQ level 8 */ | ||
| 66 | #define SP_TRAP_IRQ9 0x19 /* IRQ level 9 */ | ||
| 67 | #define SP_TRAP_IRQ10 0x1a /* IRQ level 10 */ | ||
| 68 | #define SP_TRAP_IRQ11 0x1b /* IRQ level 11 */ | ||
| 69 | #define SP_TRAP_IRQ12 0x1c /* IRQ level 12 */ | ||
| 70 | #define SP_TRAP_IRQ13 0x1d /* IRQ level 13 */ | ||
| 71 | #define SP_TRAP_IRQ14 0x1e /* IRQ level 14 */ | ||
| 72 | #define SP_TRAP_IRQ15 0x1f /* IRQ level 15 Non-maskable */ | ||
| 73 | #define SP_TRAP_RACC 0x20 /* Register Access Error ??? */ | ||
| 74 | #define SP_TRAP_IACC 0x21 /* Instruction Access Error */ | ||
| 75 | #define SP_TRAP_CPDIS 0x24 /* Co-Processor Disabled */ | ||
| 76 | #define SP_TRAP_BADFL 0x25 /* Unimplemented Flush Instruction */ | ||
| 77 | #define SP_TRAP_CPEXP 0x28 /* Co-Processor Exception */ | ||
| 78 | #define SP_TRAP_DACC 0x29 /* Data Access Error */ | ||
| 79 | #define SP_TRAP_DIVZ 0x2a /* Divide By Zero */ | ||
| 80 | #define SP_TRAP_DSTORE 0x2b /* Data Store Error ??? */ | ||
| 81 | #define SP_TRAP_DMM 0x2c /* Data Access MMU Miss ??? */ | ||
| 82 | #define SP_TRAP_IMM 0x3c /* Instruction Access MMU Miss ??? */ | ||
| 83 | |||
| 84 | /* Now the Software Traps... */ | ||
| 85 | #define SP_TRAP_SUNOS 0x80 /* SunOS System Call */ | ||
| 86 | #define SP_TRAP_SBPT 0x81 /* Software Breakpoint */ | ||
| 87 | #define SP_TRAP_SDIVZ 0x82 /* Software Divide-by-Zero trap */ | ||
| 88 | #define SP_TRAP_FWIN 0x83 /* Flush Windows */ | ||
| 89 | #define SP_TRAP_CWIN 0x84 /* Clean Windows */ | ||
| 90 | #define SP_TRAP_RCHK 0x85 /* Range Check */ | ||
| 91 | #define SP_TRAP_FUNA 0x86 /* Fix Unaligned Access */ | ||
| 92 | #define SP_TRAP_IOWFL 0x87 /* Integer Overflow */ | ||
| 93 | #define SP_TRAP_SOLARIS 0x88 /* Solaris System Call */ | ||
| 94 | #define SP_TRAP_NETBSD 0x89 /* NetBSD System Call */ | ||
| 95 | #define SP_TRAP_LINUX 0x90 /* Linux System Call */ | ||
| 96 | |||
| 97 | /* Names used for compatibility with SunOS */ | ||
| 98 | #define ST_SYSCALL 0x00 | ||
| 99 | #define ST_BREAKPOINT 0x01 | ||
| 100 | #define ST_DIV0 0x02 | ||
| 101 | #define ST_FLUSH_WINDOWS 0x03 | ||
| 102 | #define ST_CLEAN_WINDOWS 0x04 | ||
| 103 | #define ST_RANGE_CHECK 0x05 | ||
| 104 | #define ST_FIX_ALIGN 0x06 | ||
| 105 | #define ST_INT_OVERFLOW 0x07 | ||
| 106 | |||
| 107 | /* Special traps... */ | ||
| 108 | #define SP_TRAP_KBPT1 0xfe /* KADB/PROM Breakpoint one */ | ||
| 109 | #define SP_TRAP_KBPT2 0xff /* KADB/PROM Breakpoint two */ | ||
| 110 | |||
| 111 | /* Handy Macros */ | ||
| 112 | /* Is this a trap we never expect to get? */ | ||
| 113 | #define BAD_TRAP_P(level) \ | ||
| 114 | ((level > SP_TRAP_WDOG && level < SP_TRAP_IRQ1) || \ | ||
| 115 | (level > SP_TRAP_IACC && level < SP_TRAP_CPDIS) || \ | ||
| 116 | (level > SP_TRAP_BADFL && level < SP_TRAP_CPEXP) || \ | ||
| 117 | (level > SP_TRAP_DMM && level < SP_TRAP_IMM) || \ | ||
| 118 | (level > SP_TRAP_IMM && level < SP_TRAP_SUNOS) || \ | ||
| 119 | (level > SP_TRAP_LINUX && level < SP_TRAP_KBPT1)) | ||
| 120 | |||
| 121 | /* Is this a Hardware trap? */ | ||
| 122 | #define HW_TRAP_P(level) ((level > 0) && (level < SP_TRAP_SUNOS)) | ||
| 123 | |||
| 124 | /* Is this a Software trap? */ | ||
| 125 | #define SW_TRAP_P(level) ((level >= SP_TRAP_SUNOS) && (level <= SP_TRAP_KBPT2)) | ||
| 126 | |||
| 127 | /* Is this a system call for some OS we know about? */ | ||
| 128 | #define SCALL_TRAP_P(level) ((level == SP_TRAP_SUNOS) || \ | ||
| 129 | (level == SP_TRAP_SOLARIS) || \ | ||
| 130 | (level == SP_TRAP_NETBSD) || \ | ||
| 131 | (level == SP_TRAP_LINUX)) | ||
| 132 | |||
| 133 | #endif /* !(_SPARC_TRAPS_H) */ | 24 | #endif /* !(_SPARC_TRAPS_H) */ |
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index d9a677c51926..0ecea6ed943e 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
| @@ -1,6 +1,3 @@ | |||
| 1 | #ifndef _SPARC_UNISTD_H | ||
| 2 | #define _SPARC_UNISTD_H | ||
| 3 | |||
| 4 | /* | 1 | /* |
| 5 | * System calls under the Sparc. | 2 | * System calls under the Sparc. |
| 6 | * | 3 | * |
| @@ -14,415 +11,15 @@ | |||
| 14 | * | 11 | * |
| 15 | * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) | 12 | * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) |
| 16 | */ | 13 | */ |
| 17 | #ifndef __32bit_syscall_numbers__ | 14 | #ifndef _SPARC_UNISTD_H |
| 18 | #ifndef __arch64__ | 15 | #define _SPARC_UNISTD_H |
| 19 | #define __32bit_syscall_numbers__ | 16 | |
| 20 | #endif | 17 | #include <uapi/asm/unistd.h> |
| 21 | #endif | ||
| 22 | 18 | ||
| 23 | #define __NR_restart_syscall 0 /* Linux Specific */ | ||
| 24 | #define __NR_exit 1 /* Common */ | ||
| 25 | #define __NR_fork 2 /* Common */ | ||
| 26 | #define __NR_read 3 /* Common */ | ||
| 27 | #define __NR_write 4 /* Common */ | ||
| 28 | #define __NR_open 5 /* Common */ | ||
| 29 | #define __NR_close 6 /* Common */ | ||
| 30 | #define __NR_wait4 7 /* Common */ | ||
| 31 | #define __NR_creat 8 /* Common */ | ||
| 32 | #define __NR_link 9 /* Common */ | ||
| 33 | #define __NR_unlink 10 /* Common */ | ||
| 34 | #define __NR_execv 11 /* SunOS Specific */ | ||
| 35 | #define __NR_chdir 12 /* Common */ | ||
| 36 | #define __NR_chown 13 /* Common */ | ||
| 37 | #define __NR_mknod 14 /* Common */ | ||
| 38 | #define __NR_chmod 15 /* Common */ | ||
| 39 | #define __NR_lchown 16 /* Common */ | ||
| 40 | #define __NR_brk 17 /* Common */ | ||
| 41 | #define __NR_perfctr 18 /* Performance counter operations */ | ||
| 42 | #define __NR_lseek 19 /* Common */ | ||
| 43 | #define __NR_getpid 20 /* Common */ | ||
| 44 | #define __NR_capget 21 /* Linux Specific */ | ||
| 45 | #define __NR_capset 22 /* Linux Specific */ | ||
| 46 | #define __NR_setuid 23 /* Implemented via setreuid in SunOS */ | ||
| 47 | #define __NR_getuid 24 /* Common */ | ||
| 48 | #define __NR_vmsplice 25 /* ENOSYS under SunOS */ | ||
| 49 | #define __NR_ptrace 26 /* Common */ | ||
| 50 | #define __NR_alarm 27 /* Implemented via setitimer in SunOS */ | ||
| 51 | #define __NR_sigaltstack 28 /* Common */ | ||
| 52 | #define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */ | ||
| 53 | #define __NR_utime 30 /* Implemented via utimes() under SunOS */ | ||
| 54 | #ifdef __32bit_syscall_numbers__ | ||
| 55 | #define __NR_lchown32 31 /* Linux sparc32 specific */ | ||
| 56 | #define __NR_fchown32 32 /* Linux sparc32 specific */ | ||
| 57 | #endif | ||
| 58 | #define __NR_access 33 /* Common */ | ||
| 59 | #define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */ | ||
| 60 | #ifdef __32bit_syscall_numbers__ | ||
| 61 | #define __NR_chown32 35 /* Linux sparc32 specific */ | ||
| 62 | #endif | ||
| 63 | #define __NR_sync 36 /* Common */ | ||
| 64 | #define __NR_kill 37 /* Common */ | ||
| 65 | #define __NR_stat 38 /* Common */ | ||
| 66 | #define __NR_sendfile 39 /* Linux Specific */ | ||
| 67 | #define __NR_lstat 40 /* Common */ | ||
| 68 | #define __NR_dup 41 /* Common */ | ||
| 69 | #define __NR_pipe 42 /* Common */ | ||
| 70 | #define __NR_times 43 /* Implemented via getrusage() in SunOS */ | ||
| 71 | #ifdef __32bit_syscall_numbers__ | ||
| 72 | #define __NR_getuid32 44 /* Linux sparc32 specific */ | ||
| 73 | #endif | ||
| 74 | #define __NR_umount2 45 /* Linux Specific */ | ||
| 75 | #define __NR_setgid 46 /* Implemented via setregid() in SunOS */ | ||
| 76 | #define __NR_getgid 47 /* Common */ | ||
| 77 | #define __NR_signal 48 /* Implemented via sigvec() in SunOS */ | ||
| 78 | #define __NR_geteuid 49 /* SunOS calls getuid() */ | ||
| 79 | #define __NR_getegid 50 /* SunOS calls getgid() */ | ||
| 80 | #define __NR_acct 51 /* Common */ | ||
| 81 | #ifdef __32bit_syscall_numbers__ | ||
| 82 | #define __NR_getgid32 53 /* Linux sparc32 specific */ | ||
| 83 | #else | ||
| 84 | #define __NR_memory_ordering 52 /* Linux Specific */ | ||
| 85 | #endif | ||
| 86 | #define __NR_ioctl 54 /* Common */ | ||
| 87 | #define __NR_reboot 55 /* Common */ | ||
| 88 | #ifdef __32bit_syscall_numbers__ | ||
| 89 | #define __NR_mmap2 56 /* Linux sparc32 Specific */ | ||
| 90 | #endif | ||
| 91 | #define __NR_symlink 57 /* Common */ | ||
| 92 | #define __NR_readlink 58 /* Common */ | ||
| 93 | #define __NR_execve 59 /* Common */ | ||
| 94 | #define __NR_umask 60 /* Common */ | ||
| 95 | #define __NR_chroot 61 /* Common */ | ||
| 96 | #define __NR_fstat 62 /* Common */ | ||
| 97 | #define __NR_fstat64 63 /* Linux Specific */ | ||
| 98 | #define __NR_getpagesize 64 /* Common */ | ||
| 99 | #define __NR_msync 65 /* Common in newer 1.3.x revs... */ | ||
| 100 | #define __NR_vfork 66 /* Common */ | ||
| 101 | #define __NR_pread64 67 /* Linux Specific */ | ||
| 102 | #define __NR_pwrite64 68 /* Linux Specific */ | ||
| 103 | #ifdef __32bit_syscall_numbers__ | ||
| 104 | #define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */ | ||
| 105 | #define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */ | ||
| 106 | #endif | ||
| 107 | #define __NR_mmap 71 /* Common */ | ||
| 108 | #ifdef __32bit_syscall_numbers__ | ||
| 109 | #define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */ | ||
| 110 | #endif | ||
| 111 | #define __NR_munmap 73 /* Common */ | ||
| 112 | #define __NR_mprotect 74 /* Common */ | ||
| 113 | #define __NR_madvise 75 /* Common */ | ||
| 114 | #define __NR_vhangup 76 /* Common */ | ||
| 115 | #ifdef __32bit_syscall_numbers__ | ||
| 116 | #define __NR_truncate64 77 /* Linux sparc32 Specific */ | ||
| 117 | #endif | ||
| 118 | #define __NR_mincore 78 /* Common */ | ||
| 119 | #define __NR_getgroups 79 /* Common */ | ||
| 120 | #define __NR_setgroups 80 /* Common */ | ||
| 121 | #define __NR_getpgrp 81 /* Common */ | ||
| 122 | #ifdef __32bit_syscall_numbers__ | ||
| 123 | #define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */ | ||
| 124 | #endif | ||
| 125 | #define __NR_setitimer 83 /* Common */ | ||
| 126 | #ifdef __32bit_syscall_numbers__ | ||
| 127 | #define __NR_ftruncate64 84 /* Linux sparc32 Specific */ | ||
| 128 | #endif | ||
| 129 | #define __NR_swapon 85 /* Common */ | ||
| 130 | #define __NR_getitimer 86 /* Common */ | ||
| 131 | #ifdef __32bit_syscall_numbers__ | ||
| 132 | #define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */ | ||
| 133 | #endif | ||
| 134 | #define __NR_sethostname 88 /* Common */ | ||
| 135 | #ifdef __32bit_syscall_numbers__ | ||
| 136 | #define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */ | ||
| 137 | #endif | ||
| 138 | #define __NR_dup2 90 /* Common */ | ||
| 139 | #ifdef __32bit_syscall_numbers__ | ||
| 140 | #define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */ | ||
| 141 | #endif | ||
| 142 | #define __NR_fcntl 92 /* Common */ | ||
| 143 | #define __NR_select 93 /* Common */ | ||
| 144 | #ifdef __32bit_syscall_numbers__ | ||
| 145 | #define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */ | ||
| 146 | #endif | ||
| 147 | #define __NR_fsync 95 /* Common */ | ||
| 148 | #define __NR_setpriority 96 /* Common */ | ||
| 149 | #define __NR_socket 97 /* Common */ | ||
| 150 | #define __NR_connect 98 /* Common */ | ||
| 151 | #define __NR_accept 99 /* Common */ | ||
| 152 | #define __NR_getpriority 100 /* Common */ | ||
| 153 | #define __NR_rt_sigreturn 101 /* Linux Specific */ | ||
| 154 | #define __NR_rt_sigaction 102 /* Linux Specific */ | ||
| 155 | #define __NR_rt_sigprocmask 103 /* Linux Specific */ | ||
| 156 | #define __NR_rt_sigpending 104 /* Linux Specific */ | ||
| 157 | #define __NR_rt_sigtimedwait 105 /* Linux Specific */ | ||
| 158 | #define __NR_rt_sigqueueinfo 106 /* Linux Specific */ | ||
| 159 | #define __NR_rt_sigsuspend 107 /* Linux Specific */ | ||
| 160 | #ifdef __32bit_syscall_numbers__ | ||
| 161 | #define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */ | ||
| 162 | #define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */ | ||
| 163 | #define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */ | ||
| 164 | #define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */ | ||
| 165 | #define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */ | ||
| 166 | #else | ||
| 167 | #define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */ | ||
| 168 | #define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */ | ||
| 169 | #define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */ | ||
| 170 | #define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */ | ||
| 171 | #endif | ||
| 172 | #define __NR_recvmsg 113 /* Common */ | ||
| 173 | #define __NR_sendmsg 114 /* Common */ | ||
| 174 | #ifdef __32bit_syscall_numbers__ | ||
| 175 | #define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */ | ||
| 176 | #endif | ||
| 177 | #define __NR_gettimeofday 116 /* Common */ | ||
| 178 | #define __NR_getrusage 117 /* Common */ | ||
| 179 | #define __NR_getsockopt 118 /* Common */ | ||
| 180 | #define __NR_getcwd 119 /* Linux Specific */ | ||
| 181 | #define __NR_readv 120 /* Common */ | ||
| 182 | #define __NR_writev 121 /* Common */ | ||
| 183 | #define __NR_settimeofday 122 /* Common */ | ||
| 184 | #define __NR_fchown 123 /* Common */ | ||
| 185 | #define __NR_fchmod 124 /* Common */ | ||
| 186 | #define __NR_recvfrom 125 /* Common */ | ||
| 187 | #define __NR_setreuid 126 /* Common */ | ||
| 188 | #define __NR_setregid 127 /* Common */ | ||
| 189 | #define __NR_rename 128 /* Common */ | ||
| 190 | #define __NR_truncate 129 /* Common */ | ||
| 191 | #define __NR_ftruncate 130 /* Common */ | ||
| 192 | #define __NR_flock 131 /* Common */ | ||
| 193 | #define __NR_lstat64 132 /* Linux Specific */ | ||
| 194 | #define __NR_sendto 133 /* Common */ | ||
| 195 | #define __NR_shutdown 134 /* Common */ | ||
| 196 | #define __NR_socketpair 135 /* Common */ | ||
| 197 | #define __NR_mkdir 136 /* Common */ | ||
| 198 | #define __NR_rmdir 137 /* Common */ | ||
| 199 | #define __NR_utimes 138 /* SunOS Specific */ | ||
| 200 | #define __NR_stat64 139 /* Linux Specific */ | ||
| 201 | #define __NR_sendfile64 140 /* adjtime under SunOS */ | ||
| 202 | #define __NR_getpeername 141 /* Common */ | ||
| 203 | #define __NR_futex 142 /* gethostid under SunOS */ | ||
| 204 | #define __NR_gettid 143 /* ENOSYS under SunOS */ | ||
| 205 | #define __NR_getrlimit 144 /* Common */ | ||
| 206 | #define __NR_setrlimit 145 /* Common */ | ||
| 207 | #define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */ | ||
| 208 | #define __NR_prctl 147 /* ENOSYS under SunOS */ | ||
| 209 | #define __NR_pciconfig_read 148 /* ENOSYS under SunOS */ | ||
| 210 | #define __NR_pciconfig_write 149 /* ENOSYS under SunOS */ | ||
| 211 | #define __NR_getsockname 150 /* Common */ | ||
| 212 | #define __NR_inotify_init 151 /* Linux specific */ | ||
| 213 | #define __NR_inotify_add_watch 152 /* Linux specific */ | ||
| 214 | #define __NR_poll 153 /* Common */ | ||
| 215 | #define __NR_getdents64 154 /* Linux specific */ | ||
| 216 | #ifdef __32bit_syscall_numbers__ | ||
| 217 | #define __NR_fcntl64 155 /* Linux sparc32 Specific */ | ||
| 218 | #endif | ||
| 219 | #define __NR_inotify_rm_watch 156 /* Linux specific */ | ||
| 220 | #define __NR_statfs 157 /* Common */ | ||
| 221 | #define __NR_fstatfs 158 /* Common */ | ||
| 222 | #define __NR_umount 159 /* Common */ | ||
| 223 | #define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */ | ||
| 224 | #define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */ | ||
| 225 | #define __NR_getdomainname 162 /* SunOS Specific */ | ||
| 226 | #define __NR_setdomainname 163 /* Common */ | ||
| 227 | #ifndef __32bit_syscall_numbers__ | ||
| 228 | #define __NR_utrap_install 164 /* SYSV ABI/v9 required */ | ||
| 229 | #endif | ||
| 230 | #define __NR_quotactl 165 /* Common */ | ||
| 231 | #define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */ | ||
| 232 | #define __NR_mount 167 /* Common */ | ||
| 233 | #define __NR_ustat 168 /* Common */ | ||
| 234 | #define __NR_setxattr 169 /* SunOS: semsys */ | ||
| 235 | #define __NR_lsetxattr 170 /* SunOS: msgsys */ | ||
| 236 | #define __NR_fsetxattr 171 /* SunOS: shmsys */ | ||
| 237 | #define __NR_getxattr 172 /* SunOS: auditsys */ | ||
| 238 | #define __NR_lgetxattr 173 /* SunOS: rfssys */ | ||
| 239 | #define __NR_getdents 174 /* Common */ | ||
| 240 | #define __NR_setsid 175 /* Common */ | ||
| 241 | #define __NR_fchdir 176 /* Common */ | ||
| 242 | #define __NR_fgetxattr 177 /* SunOS: fchroot */ | ||
| 243 | #define __NR_listxattr 178 /* SunOS: vpixsys */ | ||
| 244 | #define __NR_llistxattr 179 /* SunOS: aioread */ | ||
| 245 | #define __NR_flistxattr 180 /* SunOS: aiowrite */ | ||
| 246 | #define __NR_removexattr 181 /* SunOS: aiowait */ | ||
| 247 | #define __NR_lremovexattr 182 /* SunOS: aiocancel */ | ||
| 248 | #define __NR_sigpending 183 /* Common */ | ||
| 249 | #define __NR_query_module 184 /* Linux Specific */ | ||
| 250 | #define __NR_setpgid 185 /* Common */ | ||
| 251 | #define __NR_fremovexattr 186 /* SunOS: pathconf */ | ||
| 252 | #define __NR_tkill 187 /* SunOS: fpathconf */ | ||
| 253 | #define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */ | ||
| 254 | #define __NR_uname 189 /* Linux Specific */ | ||
| 255 | #define __NR_init_module 190 /* Linux Specific */ | ||
| 256 | #define __NR_personality 191 /* Linux Specific */ | ||
| 257 | #define __NR_remap_file_pages 192 /* Linux Specific */ | ||
| 258 | #define __NR_epoll_create 193 /* Linux Specific */ | ||
| 259 | #define __NR_epoll_ctl 194 /* Linux Specific */ | ||
| 260 | #define __NR_epoll_wait 195 /* Linux Specific */ | ||
| 261 | #define __NR_ioprio_set 196 /* Linux Specific */ | ||
| 262 | #define __NR_getppid 197 /* Linux Specific */ | ||
| 263 | #define __NR_sigaction 198 /* Linux Specific */ | ||
| 264 | #define __NR_sgetmask 199 /* Linux Specific */ | ||
| 265 | #define __NR_ssetmask 200 /* Linux Specific */ | ||
| 266 | #define __NR_sigsuspend 201 /* Linux Specific */ | ||
| 267 | #define __NR_oldlstat 202 /* Linux Specific */ | ||
| 268 | #define __NR_uselib 203 /* Linux Specific */ | ||
| 269 | #define __NR_readdir 204 /* Linux Specific */ | ||
| 270 | #define __NR_readahead 205 /* Linux Specific */ | ||
| 271 | #define __NR_socketcall 206 /* Linux Specific */ | ||
| 272 | #define __NR_syslog 207 /* Linux Specific */ | ||
| 273 | #define __NR_lookup_dcookie 208 /* Linux Specific */ | ||
| 274 | #define __NR_fadvise64 209 /* Linux Specific */ | ||
| 275 | #define __NR_fadvise64_64 210 /* Linux Specific */ | ||
| 276 | #define __NR_tgkill 211 /* Linux Specific */ | ||
| 277 | #define __NR_waitpid 212 /* Linux Specific */ | ||
| 278 | #define __NR_swapoff 213 /* Linux Specific */ | ||
| 279 | #define __NR_sysinfo 214 /* Linux Specific */ | ||
| 280 | #define __NR_ipc 215 /* Linux Specific */ | ||
| 281 | #define __NR_sigreturn 216 /* Linux Specific */ | ||
| 282 | #define __NR_clone 217 /* Linux Specific */ | ||
| 283 | #define __NR_ioprio_get 218 /* Linux Specific */ | ||
| 284 | #define __NR_adjtimex 219 /* Linux Specific */ | ||
| 285 | #define __NR_sigprocmask 220 /* Linux Specific */ | ||
| 286 | #define __NR_create_module 221 /* Linux Specific */ | ||
| 287 | #define __NR_delete_module 222 /* Linux Specific */ | ||
| 288 | #define __NR_get_kernel_syms 223 /* Linux Specific */ | ||
| 289 | #define __NR_getpgid 224 /* Linux Specific */ | ||
| 290 | #define __NR_bdflush 225 /* Linux Specific */ | ||
| 291 | #define __NR_sysfs 226 /* Linux Specific */ | ||
| 292 | #define __NR_afs_syscall 227 /* Linux Specific */ | ||
| 293 | #define __NR_setfsuid 228 /* Linux Specific */ | ||
| 294 | #define __NR_setfsgid 229 /* Linux Specific */ | ||
| 295 | #define __NR__newselect 230 /* Linux Specific */ | ||
| 296 | #ifdef __32bit_syscall_numbers__ | 19 | #ifdef __32bit_syscall_numbers__ |
| 297 | #define __NR_time 231 /* Linux Specific */ | ||
| 298 | #else | 20 | #else |
| 299 | #ifdef __KERNEL__ | ||
| 300 | #define __NR_time 231 /* Linux sparc32 */ | 21 | #define __NR_time 231 /* Linux sparc32 */ |
| 301 | #endif | 22 | #endif |
| 302 | #endif | ||
| 303 | #define __NR_splice 232 /* Linux Specific */ | ||
| 304 | #define __NR_stime 233 /* Linux Specific */ | ||
| 305 | #define __NR_statfs64 234 /* Linux Specific */ | ||
| 306 | #define __NR_fstatfs64 235 /* Linux Specific */ | ||
| 307 | #define __NR__llseek 236 /* Linux Specific */ | ||
| 308 | #define __NR_mlock 237 | ||
| 309 | #define __NR_munlock 238 | ||
| 310 | #define __NR_mlockall 239 | ||
| 311 | #define __NR_munlockall 240 | ||
| 312 | #define __NR_sched_setparam 241 | ||
| 313 | #define __NR_sched_getparam 242 | ||
| 314 | #define __NR_sched_setscheduler 243 | ||
| 315 | #define __NR_sched_getscheduler 244 | ||
| 316 | #define __NR_sched_yield 245 | ||
| 317 | #define __NR_sched_get_priority_max 246 | ||
| 318 | #define __NR_sched_get_priority_min 247 | ||
| 319 | #define __NR_sched_rr_get_interval 248 | ||
| 320 | #define __NR_nanosleep 249 | ||
| 321 | #define __NR_mremap 250 | ||
| 322 | #define __NR__sysctl 251 | ||
| 323 | #define __NR_getsid 252 | ||
| 324 | #define __NR_fdatasync 253 | ||
| 325 | #define __NR_nfsservctl 254 | ||
| 326 | #define __NR_sync_file_range 255 | ||
| 327 | #define __NR_clock_settime 256 | ||
| 328 | #define __NR_clock_gettime 257 | ||
| 329 | #define __NR_clock_getres 258 | ||
| 330 | #define __NR_clock_nanosleep 259 | ||
| 331 | #define __NR_sched_getaffinity 260 | ||
| 332 | #define __NR_sched_setaffinity 261 | ||
| 333 | #define __NR_timer_settime 262 | ||
| 334 | #define __NR_timer_gettime 263 | ||
| 335 | #define __NR_timer_getoverrun 264 | ||
| 336 | #define __NR_timer_delete 265 | ||
| 337 | #define __NR_timer_create 266 | ||
| 338 | /* #define __NR_vserver 267 Reserved for VSERVER */ | ||
| 339 | #define __NR_io_setup 268 | ||
| 340 | #define __NR_io_destroy 269 | ||
| 341 | #define __NR_io_submit 270 | ||
| 342 | #define __NR_io_cancel 271 | ||
| 343 | #define __NR_io_getevents 272 | ||
| 344 | #define __NR_mq_open 273 | ||
| 345 | #define __NR_mq_unlink 274 | ||
| 346 | #define __NR_mq_timedsend 275 | ||
| 347 | #define __NR_mq_timedreceive 276 | ||
| 348 | #define __NR_mq_notify 277 | ||
| 349 | #define __NR_mq_getsetattr 278 | ||
| 350 | #define __NR_waitid 279 | ||
| 351 | #define __NR_tee 280 | ||
| 352 | #define __NR_add_key 281 | ||
| 353 | #define __NR_request_key 282 | ||
| 354 | #define __NR_keyctl 283 | ||
| 355 | #define __NR_openat 284 | ||
| 356 | #define __NR_mkdirat 285 | ||
| 357 | #define __NR_mknodat 286 | ||
| 358 | #define __NR_fchownat 287 | ||
| 359 | #define __NR_futimesat 288 | ||
| 360 | #define __NR_fstatat64 289 | ||
| 361 | #define __NR_unlinkat 290 | ||
| 362 | #define __NR_renameat 291 | ||
| 363 | #define __NR_linkat 292 | ||
| 364 | #define __NR_symlinkat 293 | ||
| 365 | #define __NR_readlinkat 294 | ||
| 366 | #define __NR_fchmodat 295 | ||
| 367 | #define __NR_faccessat 296 | ||
| 368 | #define __NR_pselect6 297 | ||
| 369 | #define __NR_ppoll 298 | ||
| 370 | #define __NR_unshare 299 | ||
| 371 | #define __NR_set_robust_list 300 | ||
| 372 | #define __NR_get_robust_list 301 | ||
| 373 | #define __NR_migrate_pages 302 | ||
| 374 | #define __NR_mbind 303 | ||
| 375 | #define __NR_get_mempolicy 304 | ||
| 376 | #define __NR_set_mempolicy 305 | ||
| 377 | #define __NR_kexec_load 306 | ||
| 378 | #define __NR_move_pages 307 | ||
| 379 | #define __NR_getcpu 308 | ||
| 380 | #define __NR_epoll_pwait 309 | ||
| 381 | #define __NR_utimensat 310 | ||
| 382 | #define __NR_signalfd 311 | ||
| 383 | #define __NR_timerfd_create 312 | ||
| 384 | #define __NR_eventfd 313 | ||
| 385 | #define __NR_fallocate 314 | ||
| 386 | #define __NR_timerfd_settime 315 | ||
| 387 | #define __NR_timerfd_gettime 316 | ||
| 388 | #define __NR_signalfd4 317 | ||
| 389 | #define __NR_eventfd2 318 | ||
| 390 | #define __NR_epoll_create1 319 | ||
| 391 | #define __NR_dup3 320 | ||
| 392 | #define __NR_pipe2 321 | ||
| 393 | #define __NR_inotify_init1 322 | ||
| 394 | #define __NR_accept4 323 | ||
| 395 | #define __NR_preadv 324 | ||
| 396 | #define __NR_pwritev 325 | ||
| 397 | #define __NR_rt_tgsigqueueinfo 326 | ||
| 398 | #define __NR_perf_event_open 327 | ||
| 399 | #define __NR_recvmmsg 328 | ||
| 400 | #define __NR_fanotify_init 329 | ||
| 401 | #define __NR_fanotify_mark 330 | ||
| 402 | #define __NR_prlimit64 331 | ||
| 403 | #define __NR_name_to_handle_at 332 | ||
| 404 | #define __NR_open_by_handle_at 333 | ||
| 405 | #define __NR_clock_adjtime 334 | ||
| 406 | #define __NR_syncfs 335 | ||
| 407 | #define __NR_sendmmsg 336 | ||
| 408 | #define __NR_setns 337 | ||
| 409 | #define __NR_process_vm_readv 338 | ||
| 410 | #define __NR_process_vm_writev 339 | ||
| 411 | |||
| 412 | #define NR_syscalls 340 | ||
| 413 | |||
| 414 | #ifdef __32bit_syscall_numbers__ | ||
| 415 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | ||
| 416 | * it never had the plain ones and there is no value to adding those | ||
| 417 | * old versions into the syscall table. | ||
| 418 | */ | ||
| 419 | #define __IGNORE_setresuid | ||
| 420 | #define __IGNORE_getresuid | ||
| 421 | #define __IGNORE_setresgid | ||
| 422 | #define __IGNORE_getresgid | ||
| 423 | #endif | ||
| 424 | |||
| 425 | #ifdef __KERNEL__ | ||
| 426 | #define __ARCH_WANT_OLD_READDIR | 23 | #define __ARCH_WANT_OLD_READDIR |
| 427 | #define __ARCH_WANT_STAT64 | 24 | #define __ARCH_WANT_STAT64 |
| 428 | #define __ARCH_WANT_SYS_ALARM | 25 | #define __ARCH_WANT_SYS_ALARM |
| @@ -458,5 +55,4 @@ | |||
| 458 | */ | 55 | */ |
| 459 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | 56 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") |
| 460 | 57 | ||
| 461 | #endif /* __KERNEL__ */ | ||
| 462 | #endif /* _SPARC_UNISTD_H */ | 58 | #endif /* _SPARC_UNISTD_H */ |
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index 7518ad286963..ce175aff71b7 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild | |||
| @@ -3,3 +3,49 @@ | |||
| 3 | 3 | ||
| 4 | include include/uapi/asm-generic/Kbuild.asm | 4 | include include/uapi/asm-generic/Kbuild.asm |
| 5 | 5 | ||
| 6 | header-y += apc.h | ||
| 7 | header-y += asi.h | ||
| 8 | header-y += auxvec.h | ||
| 9 | header-y += bitsperlong.h | ||
| 10 | header-y += byteorder.h | ||
| 11 | header-y += display7seg.h | ||
| 12 | header-y += envctrl.h | ||
| 13 | header-y += errno.h | ||
| 14 | header-y += fbio.h | ||
| 15 | header-y += fcntl.h | ||
| 16 | header-y += ioctl.h | ||
| 17 | header-y += ioctls.h | ||
| 18 | header-y += ipcbuf.h | ||
| 19 | header-y += jsflash.h | ||
| 20 | header-y += kvm_para.h | ||
| 21 | header-y += mman.h | ||
| 22 | header-y += msgbuf.h | ||
| 23 | header-y += openpromio.h | ||
| 24 | header-y += param.h | ||
| 25 | header-y += perfctr.h | ||
| 26 | header-y += poll.h | ||
| 27 | header-y += posix_types.h | ||
| 28 | header-y += psr.h | ||
| 29 | header-y += psrcompat.h | ||
| 30 | header-y += pstate.h | ||
| 31 | header-y += ptrace.h | ||
| 32 | header-y += resource.h | ||
| 33 | header-y += sembuf.h | ||
| 34 | header-y += setup.h | ||
| 35 | header-y += shmbuf.h | ||
| 36 | header-y += sigcontext.h | ||
| 37 | header-y += siginfo.h | ||
| 38 | header-y += signal.h | ||
| 39 | header-y += socket.h | ||
| 40 | header-y += sockios.h | ||
| 41 | header-y += stat.h | ||
| 42 | header-y += statfs.h | ||
| 43 | header-y += swab.h | ||
| 44 | header-y += termbits.h | ||
| 45 | header-y += termios.h | ||
| 46 | header-y += traps.h | ||
| 47 | header-y += types.h | ||
| 48 | header-y += uctx.h | ||
| 49 | header-y += unistd.h | ||
| 50 | header-y += utrap.h | ||
| 51 | header-y += watchdog.h | ||
diff --git a/arch/sparc/include/asm/apc.h b/arch/sparc/include/uapi/asm/apc.h index 24e9a7d4d97e..24e9a7d4d97e 100644 --- a/arch/sparc/include/asm/apc.h +++ b/arch/sparc/include/uapi/asm/apc.h | |||
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/uapi/asm/asi.h index aace6f313716..aace6f313716 100644 --- a/arch/sparc/include/asm/asi.h +++ b/arch/sparc/include/uapi/asm/asi.h | |||
diff --git a/arch/sparc/include/asm/auxvec.h b/arch/sparc/include/uapi/asm/auxvec.h index ad6f360261f6..ad6f360261f6 100644 --- a/arch/sparc/include/asm/auxvec.h +++ b/arch/sparc/include/uapi/asm/auxvec.h | |||
diff --git a/arch/sparc/include/asm/bitsperlong.h b/arch/sparc/include/uapi/asm/bitsperlong.h index 40dcaa3aaa56..40dcaa3aaa56 100644 --- a/arch/sparc/include/asm/bitsperlong.h +++ b/arch/sparc/include/uapi/asm/bitsperlong.h | |||
diff --git a/arch/sparc/include/asm/byteorder.h b/arch/sparc/include/uapi/asm/byteorder.h index ccc1b6b7de6c..ccc1b6b7de6c 100644 --- a/arch/sparc/include/asm/byteorder.h +++ b/arch/sparc/include/uapi/asm/byteorder.h | |||
diff --git a/arch/sparc/include/asm/display7seg.h b/arch/sparc/include/uapi/asm/display7seg.h index 86d4a901df24..86d4a901df24 100644 --- a/arch/sparc/include/asm/display7seg.h +++ b/arch/sparc/include/uapi/asm/display7seg.h | |||
diff --git a/arch/sparc/include/asm/envctrl.h b/arch/sparc/include/uapi/asm/envctrl.h index 624fa7e2da8e..624fa7e2da8e 100644 --- a/arch/sparc/include/asm/envctrl.h +++ b/arch/sparc/include/uapi/asm/envctrl.h | |||
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/uapi/asm/errno.h index c351aba997b7..c351aba997b7 100644 --- a/arch/sparc/include/asm/errno.h +++ b/arch/sparc/include/uapi/asm/errno.h | |||
diff --git a/arch/sparc/include/uapi/asm/fbio.h b/arch/sparc/include/uapi/asm/fbio.h new file mode 100644 index 000000000000..d6cea07afb61 --- /dev/null +++ b/arch/sparc/include/uapi/asm/fbio.h | |||
| @@ -0,0 +1,259 @@ | |||
| 1 | #ifndef _UAPI__LINUX_FBIO_H | ||
| 2 | #define _UAPI__LINUX_FBIO_H | ||
| 3 | |||
| 4 | #include <linux/compiler.h> | ||
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 7 | /* Constants used for fbio SunOS compatibility */ | ||
| 8 | /* (C) 1996 Miguel de Icaza */ | ||
| 9 | |||
| 10 | /* Frame buffer types */ | ||
| 11 | #define FBTYPE_NOTYPE -1 | ||
| 12 | #define FBTYPE_SUN1BW 0 /* mono */ | ||
| 13 | #define FBTYPE_SUN1COLOR 1 | ||
| 14 | #define FBTYPE_SUN2BW 2 | ||
| 15 | #define FBTYPE_SUN2COLOR 3 | ||
| 16 | #define FBTYPE_SUN2GP 4 | ||
| 17 | #define FBTYPE_SUN5COLOR 5 | ||
| 18 | #define FBTYPE_SUN3COLOR 6 | ||
| 19 | #define FBTYPE_MEMCOLOR 7 | ||
| 20 | #define FBTYPE_SUN4COLOR 8 | ||
| 21 | |||
| 22 | #define FBTYPE_NOTSUN1 9 | ||
| 23 | #define FBTYPE_NOTSUN2 10 | ||
| 24 | #define FBTYPE_NOTSUN3 11 | ||
| 25 | |||
| 26 | #define FBTYPE_SUNFAST_COLOR 12 /* cg6 */ | ||
| 27 | #define FBTYPE_SUNROP_COLOR 13 | ||
| 28 | #define FBTYPE_SUNFB_VIDEO 14 | ||
| 29 | #define FBTYPE_SUNGIFB 15 | ||
| 30 | #define FBTYPE_SUNGPLAS 16 | ||
| 31 | #define FBTYPE_SUNGP3 17 | ||
| 32 | #define FBTYPE_SUNGT 18 | ||
| 33 | #define FBTYPE_SUNLEO 19 /* zx Leo card */ | ||
| 34 | #define FBTYPE_MDICOLOR 20 /* cg14 */ | ||
| 35 | #define FBTYPE_TCXCOLOR 21 /* SUNW,tcx card */ | ||
| 36 | |||
| 37 | #define FBTYPE_LASTPLUSONE 21 /* This is not last + 1 in fact... */ | ||
| 38 | |||
| 39 | /* Does not seem to be listed in the Sun file either */ | ||
| 40 | #define FBTYPE_CREATOR 22 | ||
| 41 | #define FBTYPE_PCI_IGA1682 23 | ||
| 42 | #define FBTYPE_P9100COLOR 24 | ||
| 43 | |||
| 44 | #define FBTYPE_PCI_GENERIC 1000 | ||
| 45 | #define FBTYPE_PCI_MACH64 1001 | ||
| 46 | |||
| 47 | /* fbio ioctls */ | ||
| 48 | /* Returned by FBIOGTYPE */ | ||
| 49 | struct fbtype { | ||
| 50 | int fb_type; /* fb type, see above */ | ||
| 51 | int fb_height; /* pixels */ | ||
| 52 | int fb_width; /* pixels */ | ||
| 53 | int fb_depth; | ||
| 54 | int fb_cmsize; /* color map entries */ | ||
| 55 | int fb_size; /* fb size in bytes */ | ||
| 56 | }; | ||
| 57 | #define FBIOGTYPE _IOR('F', 0, struct fbtype) | ||
| 58 | |||
| 59 | struct fbcmap { | ||
| 60 | int index; /* first element (0 origin) */ | ||
| 61 | int count; | ||
| 62 | unsigned char __user *red; | ||
| 63 | unsigned char __user *green; | ||
| 64 | unsigned char __user *blue; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #ifndef __KERNEL__ | ||
| 68 | #define FBIOPUTCMAP _IOW('F', 3, struct fbcmap) | ||
| 69 | #define FBIOGETCMAP _IOW('F', 4, struct fbcmap) | ||
| 70 | #endif | ||
| 71 | |||
| 72 | /* # of device specific values */ | ||
| 73 | #define FB_ATTR_NDEVSPECIFIC 8 | ||
| 74 | /* # of possible emulations */ | ||
| 75 | #define FB_ATTR_NEMUTYPES 4 | ||
| 76 | |||
| 77 | struct fbsattr { | ||
| 78 | int flags; | ||
| 79 | int emu_type; /* -1 if none */ | ||
| 80 | int dev_specific[FB_ATTR_NDEVSPECIFIC]; | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct fbgattr { | ||
| 84 | int real_type; /* real frame buffer type */ | ||
| 85 | int owner; /* unknown */ | ||
| 86 | struct fbtype fbtype; /* real frame buffer fbtype */ | ||
| 87 | struct fbsattr sattr; | ||
| 88 | int emu_types[FB_ATTR_NEMUTYPES]; /* supported emulations */ | ||
| 89 | }; | ||
| 90 | #define FBIOSATTR _IOW('F', 5, struct fbgattr) /* Unsupported: */ | ||
| 91 | #define FBIOGATTR _IOR('F', 6, struct fbgattr) /* supported */ | ||
| 92 | |||
| 93 | #define FBIOSVIDEO _IOW('F', 7, int) | ||
| 94 | #define FBIOGVIDEO _IOR('F', 8, int) | ||
| 95 | |||
| 96 | struct fbcursor { | ||
| 97 | short set; /* what to set, choose from the list above */ | ||
| 98 | short enable; /* cursor on/off */ | ||
| 99 | struct fbcurpos pos; /* cursor position */ | ||
| 100 | struct fbcurpos hot; /* cursor hot spot */ | ||
| 101 | struct fbcmap cmap; /* color map info */ | ||
| 102 | struct fbcurpos size; /* cursor bit map size */ | ||
| 103 | char __user *image; /* cursor image bits */ | ||
| 104 | char __user *mask; /* cursor mask bits */ | ||
| 105 | }; | ||
| 106 | |||
| 107 | /* set/get cursor attributes/shape */ | ||
| 108 | #define FBIOSCURSOR _IOW('F', 24, struct fbcursor) | ||
| 109 | #define FBIOGCURSOR _IOWR('F', 25, struct fbcursor) | ||
| 110 | |||
| 111 | /* set/get cursor position */ | ||
| 112 | #define FBIOSCURPOS _IOW('F', 26, struct fbcurpos) | ||
| 113 | #define FBIOGCURPOS _IOW('F', 27, struct fbcurpos) | ||
| 114 | |||
| 115 | /* get max cursor size */ | ||
| 116 | #define FBIOGCURMAX _IOR('F', 28, struct fbcurpos) | ||
| 117 | |||
| 118 | /* wid manipulation */ | ||
| 119 | struct fb_wid_alloc { | ||
| 120 | #define FB_WID_SHARED_8 0 | ||
| 121 | #define FB_WID_SHARED_24 1 | ||
| 122 | #define FB_WID_DBL_8 2 | ||
| 123 | #define FB_WID_DBL_24 3 | ||
| 124 | __u32 wa_type; | ||
| 125 | __s32 wa_index; /* Set on return */ | ||
| 126 | __u32 wa_count; | ||
| 127 | }; | ||
| 128 | struct fb_wid_item { | ||
| 129 | __u32 wi_type; | ||
| 130 | __s32 wi_index; | ||
| 131 | __u32 wi_attrs; | ||
| 132 | __u32 wi_values[32]; | ||
| 133 | }; | ||
| 134 | struct fb_wid_list { | ||
| 135 | __u32 wl_flags; | ||
| 136 | __u32 wl_count; | ||
| 137 | struct fb_wid_item *wl_list; | ||
| 138 | }; | ||
| 139 | |||
| 140 | #define FBIO_WID_ALLOC _IOWR('F', 30, struct fb_wid_alloc) | ||
| 141 | #define FBIO_WID_FREE _IOW('F', 31, struct fb_wid_alloc) | ||
| 142 | #define FBIO_WID_PUT _IOW('F', 32, struct fb_wid_list) | ||
| 143 | #define FBIO_WID_GET _IOWR('F', 33, struct fb_wid_list) | ||
| 144 | |||
| 145 | /* Creator ioctls */ | ||
| 146 | #define FFB_IOCTL ('F'<<8) | ||
| 147 | #define FFB_SYS_INFO (FFB_IOCTL|80) | ||
| 148 | #define FFB_CLUTREAD (FFB_IOCTL|81) | ||
| 149 | #define FFB_CLUTPOST (FFB_IOCTL|82) | ||
| 150 | #define FFB_SETDIAGMODE (FFB_IOCTL|83) | ||
| 151 | #define FFB_GETMONITORID (FFB_IOCTL|84) | ||
| 152 | #define FFB_GETVIDEOMODE (FFB_IOCTL|85) | ||
| 153 | #define FFB_SETVIDEOMODE (FFB_IOCTL|86) | ||
| 154 | #define FFB_SETSERVER (FFB_IOCTL|87) | ||
| 155 | #define FFB_SETOVCTL (FFB_IOCTL|88) | ||
| 156 | #define FFB_GETOVCTL (FFB_IOCTL|89) | ||
| 157 | #define FFB_GETSAXNUM (FFB_IOCTL|90) | ||
| 158 | #define FFB_FBDEBUG (FFB_IOCTL|91) | ||
| 159 | |||
| 160 | /* Cg14 ioctls */ | ||
| 161 | #define MDI_IOCTL ('M'<<8) | ||
| 162 | #define MDI_RESET (MDI_IOCTL|1) | ||
| 163 | #define MDI_GET_CFGINFO (MDI_IOCTL|2) | ||
| 164 | #define MDI_SET_PIXELMODE (MDI_IOCTL|3) | ||
| 165 | # define MDI_32_PIX 32 | ||
| 166 | # define MDI_16_PIX 16 | ||
| 167 | # define MDI_8_PIX 8 | ||
| 168 | |||
| 169 | struct mdi_cfginfo { | ||
| 170 | int mdi_ncluts; /* Number of implemented CLUTs in this MDI */ | ||
| 171 | int mdi_type; /* FBTYPE name */ | ||
| 172 | int mdi_height; /* height */ | ||
| 173 | int mdi_width; /* width */ | ||
| 174 | int mdi_size; /* available ram */ | ||
| 175 | int mdi_mode; /* 8bpp, 16bpp or 32bpp */ | ||
| 176 | int mdi_pixfreq; /* pixel clock (from PROM) */ | ||
| 177 | }; | ||
| 178 | |||
| 179 | /* SparcLinux specific ioctl for the MDI, should be replaced for | ||
| 180 | * the SET_XLUT/SET_CLUTn ioctls instead | ||
| 181 | */ | ||
| 182 | #define MDI_CLEAR_XLUT (MDI_IOCTL|9) | ||
| 183 | |||
| 184 | /* leo & ffb ioctls */ | ||
| 185 | struct fb_clut_alloc { | ||
| 186 | __u32 clutid; /* Set on return */ | ||
| 187 | __u32 flag; | ||
| 188 | __u32 index; | ||
| 189 | }; | ||
| 190 | |||
| 191 | struct fb_clut { | ||
| 192 | #define FB_CLUT_WAIT 0x00000001 /* Not yet implemented */ | ||
| 193 | __u32 flag; | ||
| 194 | __u32 clutid; | ||
| 195 | __u32 offset; | ||
| 196 | __u32 count; | ||
| 197 | char * red; | ||
| 198 | char * green; | ||
| 199 | char * blue; | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct fb_clut32 { | ||
| 203 | __u32 flag; | ||
| 204 | __u32 clutid; | ||
| 205 | __u32 offset; | ||
| 206 | __u32 count; | ||
| 207 | __u32 red; | ||
| 208 | __u32 green; | ||
| 209 | __u32 blue; | ||
| 210 | }; | ||
| 211 | |||
| 212 | #define LEO_CLUTALLOC _IOWR('L', 53, struct fb_clut_alloc) | ||
| 213 | #define LEO_CLUTFREE _IOW('L', 54, struct fb_clut_alloc) | ||
| 214 | #define LEO_CLUTREAD _IOW('L', 55, struct fb_clut) | ||
| 215 | #define LEO_CLUTPOST _IOW('L', 56, struct fb_clut) | ||
| 216 | #define LEO_SETGAMMA _IOW('L', 68, int) /* Not yet implemented */ | ||
| 217 | #define LEO_GETGAMMA _IOR('L', 69, int) /* Not yet implemented */ | ||
| 218 | |||
| 219 | |||
| 220 | /* These are exported to userland for applications to use */ | ||
| 221 | /* Mappable offsets for the cg14: control registers */ | ||
| 222 | #define MDI_DIRECT_MAP 0x10000000 | ||
| 223 | #define MDI_CTLREG_MAP 0x20000000 | ||
| 224 | #define MDI_CURSOR_MAP 0x30000000 | ||
| 225 | #define MDI_SHDW_VRT_MAP 0x40000000 | ||
| 226 | |||
| 227 | /* Mappable offsets for the cg14: frame buffer resolutions */ | ||
| 228 | /* 32 bits */ | ||
| 229 | #define MDI_CHUNKY_XBGR_MAP 0x50000000 | ||
| 230 | #define MDI_CHUNKY_BGR_MAP 0x60000000 | ||
| 231 | |||
| 232 | /* 16 bits */ | ||
| 233 | #define MDI_PLANAR_X16_MAP 0x70000000 | ||
| 234 | #define MDI_PLANAR_C16_MAP 0x80000000 | ||
| 235 | |||
| 236 | /* 8 bit is done as CG3 MMAP offset */ | ||
| 237 | /* 32 bits, planar */ | ||
| 238 | #define MDI_PLANAR_X32_MAP 0x90000000 | ||
| 239 | #define MDI_PLANAR_B32_MAP 0xa0000000 | ||
| 240 | #define MDI_PLANAR_G32_MAP 0xb0000000 | ||
| 241 | #define MDI_PLANAR_R32_MAP 0xc0000000 | ||
| 242 | |||
| 243 | /* Mappable offsets on leo */ | ||
| 244 | #define LEO_SS0_MAP 0x00000000 | ||
| 245 | #define LEO_LC_SS0_USR_MAP 0x00800000 | ||
| 246 | #define LEO_LD_SS0_MAP 0x00801000 | ||
| 247 | #define LEO_LX_CURSOR_MAP 0x00802000 | ||
| 248 | #define LEO_SS1_MAP 0x00803000 | ||
| 249 | #define LEO_LC_SS1_USR_MAP 0x01003000 | ||
| 250 | #define LEO_LD_SS1_MAP 0x01004000 | ||
| 251 | #define LEO_UNK_MAP 0x01005000 | ||
| 252 | #define LEO_LX_KRN_MAP 0x01006000 | ||
| 253 | #define LEO_LC_SS0_KRN_MAP 0x01007000 | ||
| 254 | #define LEO_LC_SS1_KRN_MAP 0x01008000 | ||
| 255 | #define LEO_LD_GBL_MAP 0x01009000 | ||
| 256 | #define LEO_UNK2_MAP 0x0100a000 | ||
| 257 | |||
| 258 | |||
| 259 | #endif /* _UAPI__LINUX_FBIO_H */ | ||
diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/uapi/asm/fcntl.h index d0b83f66f356..d0b83f66f356 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/uapi/asm/fcntl.h | |||
diff --git a/arch/sparc/include/asm/ioctl.h b/arch/sparc/include/uapi/asm/ioctl.h index 7d6bd51321b9..7d6bd51321b9 100644 --- a/arch/sparc/include/asm/ioctl.h +++ b/arch/sparc/include/uapi/asm/ioctl.h | |||
diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h new file mode 100644 index 000000000000..9155f7041d44 --- /dev/null +++ b/arch/sparc/include/uapi/asm/ioctls.h | |||
| @@ -0,0 +1,131 @@ | |||
| 1 | #ifndef _UAPI_ASM_SPARC_IOCTLS_H | ||
| 2 | #define _UAPI_ASM_SPARC_IOCTLS_H | ||
| 3 | |||
| 4 | #include <asm/ioctl.h> | ||
| 5 | |||
| 6 | /* Big T */ | ||
| 7 | #define TCGETA _IOR('T', 1, struct termio) | ||
| 8 | #define TCSETA _IOW('T', 2, struct termio) | ||
| 9 | #define TCSETAW _IOW('T', 3, struct termio) | ||
| 10 | #define TCSETAF _IOW('T', 4, struct termio) | ||
| 11 | #define TCSBRK _IO('T', 5) | ||
| 12 | #define TCXONC _IO('T', 6) | ||
| 13 | #define TCFLSH _IO('T', 7) | ||
| 14 | #define TCGETS _IOR('T', 8, struct termios) | ||
| 15 | #define TCSETS _IOW('T', 9, struct termios) | ||
| 16 | #define TCSETSW _IOW('T', 10, struct termios) | ||
| 17 | #define TCSETSF _IOW('T', 11, struct termios) | ||
| 18 | #define TCGETS2 _IOR('T', 12, struct termios2) | ||
| 19 | #define TCSETS2 _IOW('T', 13, struct termios2) | ||
| 20 | #define TCSETSW2 _IOW('T', 14, struct termios2) | ||
| 21 | #define TCSETSF2 _IOW('T', 15, struct termios2) | ||
| 22 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | ||
| 23 | #define TIOCVHANGUP _IO('T', 0x37) | ||
| 24 | |||
| 25 | /* Note that all the ioctls that are not available in Linux have a | ||
| 26 | * double underscore on the front to: a) avoid some programs to | ||
| 27 | * think we support some ioctls under Linux (autoconfiguration stuff) | ||
| 28 | */ | ||
| 29 | /* Little t */ | ||
| 30 | #define TIOCGETD _IOR('t', 0, int) | ||
| 31 | #define TIOCSETD _IOW('t', 1, int) | ||
| 32 | #define __TIOCHPCL _IO('t', 2) /* SunOS Specific */ | ||
| 33 | #define __TIOCMODG _IOR('t', 3, int) /* SunOS Specific */ | ||
| 34 | #define __TIOCMODS _IOW('t', 4, int) /* SunOS Specific */ | ||
| 35 | #define __TIOCGETP _IOR('t', 8, struct sgttyb) /* SunOS Specific */ | ||
| 36 | #define __TIOCSETP _IOW('t', 9, struct sgttyb) /* SunOS Specific */ | ||
| 37 | #define __TIOCSETN _IOW('t', 10, struct sgttyb) /* SunOS Specific */ | ||
| 38 | #define TIOCEXCL _IO('t', 13) | ||
| 39 | #define TIOCNXCL _IO('t', 14) | ||
| 40 | #define __TIOCFLUSH _IOW('t', 16, int) /* SunOS Specific */ | ||
| 41 | #define __TIOCSETC _IOW('t', 17, struct tchars) /* SunOS Specific */ | ||
| 42 | #define __TIOCGETC _IOR('t', 18, struct tchars) /* SunOS Specific */ | ||
| 43 | #define __TIOCTCNTL _IOW('t', 32, int) /* SunOS Specific */ | ||
| 44 | #define __TIOCSIGNAL _IOW('t', 33, int) /* SunOS Specific */ | ||
| 45 | #define __TIOCSETX _IOW('t', 34, int) /* SunOS Specific */ | ||
| 46 | #define __TIOCGETX _IOR('t', 35, int) /* SunOS Specific */ | ||
| 47 | #define TIOCCONS _IO('t', 36) | ||
| 48 | #define TIOCGSOFTCAR _IOR('t', 100, int) | ||
| 49 | #define TIOCSSOFTCAR _IOW('t', 101, int) | ||
| 50 | #define __TIOCUCNTL _IOW('t', 102, int) /* SunOS Specific */ | ||
| 51 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) | ||
| 52 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) | ||
| 53 | #define __TIOCREMOTE _IOW('t', 105, int) /* SunOS Specific */ | ||
| 54 | #define TIOCMGET _IOR('t', 106, int) | ||
| 55 | #define TIOCMBIC _IOW('t', 107, int) | ||
| 56 | #define TIOCMBIS _IOW('t', 108, int) | ||
| 57 | #define TIOCMSET _IOW('t', 109, int) | ||
| 58 | #define TIOCSTART _IO('t', 110) | ||
| 59 | #define TIOCSTOP _IO('t', 111) | ||
| 60 | #define TIOCPKT _IOW('t', 112, int) | ||
| 61 | #define TIOCNOTTY _IO('t', 113) | ||
| 62 | #define TIOCSTI _IOW('t', 114, char) | ||
| 63 | #define TIOCOUTQ _IOR('t', 115, int) | ||
| 64 | #define __TIOCGLTC _IOR('t', 116, struct ltchars) /* SunOS Specific */ | ||
| 65 | #define __TIOCSLTC _IOW('t', 117, struct ltchars) /* SunOS Specific */ | ||
| 66 | /* 118 is the non-posix setpgrp tty ioctl */ | ||
| 67 | /* 119 is the non-posix getpgrp tty ioctl */ | ||
| 68 | #define __TIOCCDTR _IO('t', 120) /* SunOS Specific */ | ||
| 69 | #define __TIOCSDTR _IO('t', 121) /* SunOS Specific */ | ||
| 70 | #define TIOCCBRK _IO('t', 122) | ||
| 71 | #define TIOCSBRK _IO('t', 123) | ||
| 72 | #define __TIOCLGET _IOW('t', 124, int) /* SunOS Specific */ | ||
| 73 | #define __TIOCLSET _IOW('t', 125, int) /* SunOS Specific */ | ||
| 74 | #define __TIOCLBIC _IOW('t', 126, int) /* SunOS Specific */ | ||
| 75 | #define __TIOCLBIS _IOW('t', 127, int) /* SunOS Specific */ | ||
| 76 | #define __TIOCISPACE _IOR('t', 128, int) /* SunOS Specific */ | ||
| 77 | #define __TIOCISIZE _IOR('t', 129, int) /* SunOS Specific */ | ||
| 78 | #define TIOCSPGRP _IOW('t', 130, int) | ||
| 79 | #define TIOCGPGRP _IOR('t', 131, int) | ||
| 80 | #define TIOCSCTTY _IO('t', 132) | ||
| 81 | #define TIOCGSID _IOR('t', 133, int) | ||
| 82 | /* Get minor device of a pty master's FD -- Solaris equiv is ISPTM */ | ||
| 83 | #define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */ | ||
| 84 | #define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */ | ||
| 85 | #define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */ | ||
| 86 | |||
| 87 | /* Little f */ | ||
| 88 | #define FIOCLEX _IO('f', 1) | ||
| 89 | #define FIONCLEX _IO('f', 2) | ||
| 90 | #define FIOASYNC _IOW('f', 125, int) | ||
| 91 | #define FIONBIO _IOW('f', 126, int) | ||
| 92 | #define FIONREAD _IOR('f', 127, int) | ||
| 93 | #define TIOCINQ FIONREAD | ||
| 94 | #define FIOQSIZE _IOR('f', 128, loff_t) | ||
| 95 | |||
| 96 | /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it | ||
| 97 | * someday. This is completely bogus, I know... | ||
| 98 | */ | ||
| 99 | #define __TCGETSTAT _IO('T', 200) /* Rutgers specific */ | ||
| 100 | #define __TCSETSTAT _IO('T', 201) /* Rutgers specific */ | ||
| 101 | |||
| 102 | /* Linux specific, no SunOS equivalent. */ | ||
| 103 | #define TIOCLINUX 0x541C | ||
| 104 | #define TIOCGSERIAL 0x541E | ||
| 105 | #define TIOCSSERIAL 0x541F | ||
| 106 | #define TCSBRKP 0x5425 | ||
| 107 | #define TIOCSERCONFIG 0x5453 | ||
| 108 | #define TIOCSERGWILD 0x5454 | ||
| 109 | #define TIOCSERSWILD 0x5455 | ||
| 110 | #define TIOCGLCKTRMIOS 0x5456 | ||
| 111 | #define TIOCSLCKTRMIOS 0x5457 | ||
| 112 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
| 113 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
| 114 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
| 115 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
| 116 | #define TIOCMIWAIT 0x545C /* Wait for change on serial input line(s) */ | ||
| 117 | #define TIOCGICOUNT 0x545D /* Read serial port inline interrupt counts */ | ||
| 118 | |||
| 119 | /* Kernel definitions */ | ||
| 120 | |||
| 121 | /* Used for packet mode */ | ||
| 122 | #define TIOCPKT_DATA 0 | ||
| 123 | #define TIOCPKT_FLUSHREAD 1 | ||
| 124 | #define TIOCPKT_FLUSHWRITE 2 | ||
| 125 | #define TIOCPKT_STOP 4 | ||
| 126 | #define TIOCPKT_START 8 | ||
| 127 | #define TIOCPKT_NOSTOP 16 | ||
| 128 | #define TIOCPKT_DOSTOP 32 | ||
| 129 | #define TIOCPKT_IOCTL 64 | ||
| 130 | |||
| 131 | #endif /* _UAPI_ASM_SPARC_IOCTLS_H */ | ||
diff --git a/arch/sparc/include/asm/ipcbuf.h b/arch/sparc/include/uapi/asm/ipcbuf.h index 66013b4fe10d..66013b4fe10d 100644 --- a/arch/sparc/include/asm/ipcbuf.h +++ b/arch/sparc/include/uapi/asm/ipcbuf.h | |||
diff --git a/arch/sparc/include/asm/jsflash.h b/arch/sparc/include/uapi/asm/jsflash.h index 0717d9e39d2d..0717d9e39d2d 100644 --- a/arch/sparc/include/asm/jsflash.h +++ b/arch/sparc/include/uapi/asm/jsflash.h | |||
diff --git a/arch/sparc/include/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h index 14fab8f0b957..14fab8f0b957 100644 --- a/arch/sparc/include/asm/kvm_para.h +++ b/arch/sparc/include/uapi/asm/kvm_para.h | |||
diff --git a/arch/sparc/include/uapi/asm/mman.h b/arch/sparc/include/uapi/asm/mman.h new file mode 100644 index 000000000000..0b14df33cffa --- /dev/null +++ b/arch/sparc/include/uapi/asm/mman.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | #ifndef _UAPI__SPARC_MMAN_H__ | ||
| 2 | #define _UAPI__SPARC_MMAN_H__ | ||
| 3 | |||
| 4 | #include <asm-generic/mman-common.h> | ||
| 5 | |||
| 6 | /* SunOS'ified... */ | ||
| 7 | |||
| 8 | #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ | ||
| 9 | #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ | ||
| 10 | #define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ | ||
| 11 | #define MAP_LOCKED 0x100 /* lock the mapping */ | ||
| 12 | #define _MAP_NEW 0x80000000 /* Binary compatibility is fun... */ | ||
| 13 | |||
| 14 | #define MAP_GROWSDOWN 0x0200 /* stack-like segment */ | ||
| 15 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
| 16 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
| 17 | |||
| 18 | #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ | ||
| 19 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ | ||
| 20 | |||
| 21 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
| 22 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
| 23 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
| 24 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
| 25 | |||
| 26 | |||
| 27 | #endif /* _UAPI__SPARC_MMAN_H__ */ | ||
diff --git a/arch/sparc/include/asm/msgbuf.h b/arch/sparc/include/uapi/asm/msgbuf.h index efc7cbe9788f..efc7cbe9788f 100644 --- a/arch/sparc/include/asm/msgbuf.h +++ b/arch/sparc/include/uapi/asm/msgbuf.h | |||
diff --git a/arch/sparc/include/asm/openpromio.h b/arch/sparc/include/uapi/asm/openpromio.h index 917fb8e9c633..917fb8e9c633 100644 --- a/arch/sparc/include/asm/openpromio.h +++ b/arch/sparc/include/uapi/asm/openpromio.h | |||
diff --git a/arch/sparc/include/asm/param.h b/arch/sparc/include/uapi/asm/param.h index 0bc356bf8c50..0bc356bf8c50 100644 --- a/arch/sparc/include/asm/param.h +++ b/arch/sparc/include/uapi/asm/param.h | |||
diff --git a/arch/sparc/include/asm/perfctr.h b/arch/sparc/include/uapi/asm/perfctr.h index 214feefa577c..214feefa577c 100644 --- a/arch/sparc/include/asm/perfctr.h +++ b/arch/sparc/include/uapi/asm/perfctr.h | |||
diff --git a/arch/sparc/include/asm/poll.h b/arch/sparc/include/uapi/asm/poll.h index 091d3ad2e830..091d3ad2e830 100644 --- a/arch/sparc/include/asm/poll.h +++ b/arch/sparc/include/uapi/asm/poll.h | |||
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/uapi/asm/posix_types.h index 156220ed99eb..156220ed99eb 100644 --- a/arch/sparc/include/asm/posix_types.h +++ b/arch/sparc/include/uapi/asm/posix_types.h | |||
diff --git a/arch/sparc/include/uapi/asm/psr.h b/arch/sparc/include/uapi/asm/psr.h new file mode 100644 index 000000000000..2f0ed856530b --- /dev/null +++ b/arch/sparc/include/uapi/asm/psr.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* | ||
| 2 | * psr.h: This file holds the macros for masking off various parts of | ||
| 3 | * the processor status register on the Sparc. This is valid | ||
| 4 | * for Version 8. On the V9 this is renamed to the PSTATE | ||
| 5 | * register and its members are accessed as fields like | ||
| 6 | * PSTATE.PRIV for the current CPU privilege level. | ||
| 7 | * | ||
| 8 | * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _UAPI__LINUX_SPARC_PSR_H | ||
| 12 | #define _UAPI__LINUX_SPARC_PSR_H | ||
| 13 | |||
| 14 | /* The Sparc PSR fields are laid out as the following: | ||
| 15 | * | ||
| 16 | * ------------------------------------------------------------------------ | ||
| 17 | * | impl | vers | icc | resv | EC | EF | PIL | S | PS | ET | CWP | | ||
| 18 | * | 31-28 | 27-24 | 23-20 | 19-14 | 13 | 12 | 11-8 | 7 | 6 | 5 | 4-0 | | ||
| 19 | * ------------------------------------------------------------------------ | ||
| 20 | */ | ||
| 21 | #define PSR_CWP 0x0000001f /* current window pointer */ | ||
| 22 | #define PSR_ET 0x00000020 /* enable traps field */ | ||
| 23 | #define PSR_PS 0x00000040 /* previous privilege level */ | ||
| 24 | #define PSR_S 0x00000080 /* current privilege level */ | ||
| 25 | #define PSR_PIL 0x00000f00 /* processor interrupt level */ | ||
| 26 | #define PSR_EF 0x00001000 /* enable floating point */ | ||
| 27 | #define PSR_EC 0x00002000 /* enable co-processor */ | ||
| 28 | #define PSR_SYSCALL 0x00004000 /* inside of a syscall */ | ||
| 29 | #define PSR_LE 0x00008000 /* SuperSparcII little-endian */ | ||
| 30 | #define PSR_ICC 0x00f00000 /* integer condition codes */ | ||
| 31 | #define PSR_C 0x00100000 /* carry bit */ | ||
| 32 | #define PSR_V 0x00200000 /* overflow bit */ | ||
| 33 | #define PSR_Z 0x00400000 /* zero bit */ | ||
| 34 | #define PSR_N 0x00800000 /* negative bit */ | ||
| 35 | #define PSR_VERS 0x0f000000 /* cpu-version field */ | ||
| 36 | #define PSR_IMPL 0xf0000000 /* cpu-implementation field */ | ||
| 37 | |||
| 38 | #define PSR_VERS_SHIFT 24 | ||
| 39 | #define PSR_IMPL_SHIFT 28 | ||
| 40 | #define PSR_VERS_SHIFTED_MASK 0xf | ||
| 41 | #define PSR_IMPL_SHIFTED_MASK 0xf | ||
| 42 | |||
| 43 | #define PSR_IMPL_TI 0x4 | ||
| 44 | #define PSR_IMPL_LEON 0xf | ||
| 45 | |||
| 46 | |||
| 47 | #endif /* _UAPI__LINUX_SPARC_PSR_H */ | ||
diff --git a/arch/sparc/include/asm/psrcompat.h b/arch/sparc/include/uapi/asm/psrcompat.h index 44b6327dbbf5..44b6327dbbf5 100644 --- a/arch/sparc/include/asm/psrcompat.h +++ b/arch/sparc/include/uapi/asm/psrcompat.h | |||
diff --git a/arch/sparc/include/asm/pstate.h b/arch/sparc/include/uapi/asm/pstate.h index 4b6b998afd99..4b6b998afd99 100644 --- a/arch/sparc/include/asm/pstate.h +++ b/arch/sparc/include/uapi/asm/pstate.h | |||
diff --git a/arch/sparc/include/uapi/asm/ptrace.h b/arch/sparc/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..56fe4ea73feb --- /dev/null +++ b/arch/sparc/include/uapi/asm/ptrace.h | |||
| @@ -0,0 +1,352 @@ | |||
| 1 | #ifndef _UAPI__SPARC_PTRACE_H | ||
| 2 | #define _UAPI__SPARC_PTRACE_H | ||
| 3 | |||
| 4 | #if defined(__sparc__) && defined(__arch64__) | ||
| 5 | /* 64 bit sparc */ | ||
| 6 | #include <asm/pstate.h> | ||
| 7 | |||
| 8 | /* This struct defines the way the registers are stored on the | ||
| 9 | * stack during a system call and basically all traps. | ||
| 10 | */ | ||
| 11 | |||
| 12 | /* This magic value must have the low 9 bits clear, | ||
| 13 | * as that is where we encode the %tt value, see below. | ||
| 14 | */ | ||
| 15 | #define PT_REGS_MAGIC 0x57ac6c00 | ||
| 16 | |||
| 17 | #ifndef __ASSEMBLY__ | ||
| 18 | |||
| 19 | #include <linux/types.h> | ||
| 20 | |||
| 21 | struct pt_regs { | ||
| 22 | unsigned long u_regs[16]; /* globals and ins */ | ||
| 23 | unsigned long tstate; | ||
| 24 | unsigned long tpc; | ||
| 25 | unsigned long tnpc; | ||
| 26 | unsigned int y; | ||
| 27 | |||
| 28 | /* We encode a magic number, PT_REGS_MAGIC, along | ||
| 29 | * with the %tt (trap type) register value at trap | ||
| 30 | * entry time. The magic number allows us to identify | ||
| 31 | * accurately a trap stack frame in the stack | ||
| 32 | * unwinder, and the %tt value allows us to test | ||
| 33 | * things like "in a system call" etc. for an arbitray | ||
| 34 | * process. | ||
| 35 | * | ||
| 36 | * The PT_REGS_MAGIC is chosen such that it can be | ||
| 37 | * loaded completely using just a sethi instruction. | ||
| 38 | */ | ||
| 39 | unsigned int magic; | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct pt_regs32 { | ||
| 43 | unsigned int psr; | ||
| 44 | unsigned int pc; | ||
| 45 | unsigned int npc; | ||
| 46 | unsigned int y; | ||
| 47 | unsigned int u_regs[16]; /* globals and ins */ | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* A V9 register window */ | ||
| 51 | struct reg_window { | ||
| 52 | unsigned long locals[8]; | ||
| 53 | unsigned long ins[8]; | ||
| 54 | }; | ||
| 55 | |||
| 56 | /* A 32-bit register window. */ | ||
| 57 | struct reg_window32 { | ||
| 58 | unsigned int locals[8]; | ||
| 59 | unsigned int ins[8]; | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* A V9 Sparc stack frame */ | ||
| 63 | struct sparc_stackf { | ||
| 64 | unsigned long locals[8]; | ||
| 65 | unsigned long ins[6]; | ||
| 66 | struct sparc_stackf *fp; | ||
| 67 | unsigned long callers_pc; | ||
| 68 | char *structptr; | ||
| 69 | unsigned long xargs[6]; | ||
| 70 | unsigned long xxargs[1]; | ||
| 71 | }; | ||
| 72 | |||
| 73 | /* A 32-bit Sparc stack frame */ | ||
| 74 | struct sparc_stackf32 { | ||
| 75 | unsigned int locals[8]; | ||
| 76 | unsigned int ins[6]; | ||
| 77 | unsigned int fp; | ||
| 78 | unsigned int callers_pc; | ||
| 79 | unsigned int structptr; | ||
| 80 | unsigned int xargs[6]; | ||
| 81 | unsigned int xxargs[1]; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct sparc_trapf { | ||
| 85 | unsigned long locals[8]; | ||
| 86 | unsigned long ins[8]; | ||
| 87 | unsigned long _unused; | ||
| 88 | struct pt_regs *regs; | ||
| 89 | }; | ||
| 90 | #endif /* (!__ASSEMBLY__) */ | ||
| 91 | #else | ||
| 92 | /* 32 bit sparc */ | ||
| 93 | |||
| 94 | #include <asm/psr.h> | ||
| 95 | |||
| 96 | /* This struct defines the way the registers are stored on the | ||
| 97 | * stack during a system call and basically all traps. | ||
| 98 | */ | ||
| 99 | #ifndef __ASSEMBLY__ | ||
| 100 | |||
| 101 | #include <linux/types.h> | ||
| 102 | |||
| 103 | struct pt_regs { | ||
| 104 | unsigned long psr; | ||
| 105 | unsigned long pc; | ||
| 106 | unsigned long npc; | ||
| 107 | unsigned long y; | ||
| 108 | unsigned long u_regs[16]; /* globals and ins */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* A 32-bit register window. */ | ||
| 112 | struct reg_window32 { | ||
| 113 | unsigned long locals[8]; | ||
| 114 | unsigned long ins[8]; | ||
| 115 | }; | ||
| 116 | |||
| 117 | /* A Sparc stack frame */ | ||
| 118 | struct sparc_stackf { | ||
| 119 | unsigned long locals[8]; | ||
| 120 | unsigned long ins[6]; | ||
| 121 | struct sparc_stackf *fp; | ||
| 122 | unsigned long callers_pc; | ||
| 123 | char *structptr; | ||
| 124 | unsigned long xargs[6]; | ||
| 125 | unsigned long xxargs[1]; | ||
| 126 | }; | ||
| 127 | #endif /* (!__ASSEMBLY__) */ | ||
| 128 | |||
| 129 | #endif /* (defined(__sparc__) && defined(__arch64__))*/ | ||
| 130 | |||
| 131 | #ifndef __ASSEMBLY__ | ||
| 132 | |||
| 133 | #define TRACEREG_SZ sizeof(struct pt_regs) | ||
| 134 | #define STACKFRAME_SZ sizeof(struct sparc_stackf) | ||
| 135 | |||
| 136 | #define TRACEREG32_SZ sizeof(struct pt_regs32) | ||
| 137 | #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) | ||
| 138 | |||
| 139 | #endif /* (!__ASSEMBLY__) */ | ||
| 140 | |||
| 141 | #define UREG_G0 0 | ||
| 142 | #define UREG_G1 1 | ||
| 143 | #define UREG_G2 2 | ||
| 144 | #define UREG_G3 3 | ||
| 145 | #define UREG_G4 4 | ||
| 146 | #define UREG_G5 5 | ||
| 147 | #define UREG_G6 6 | ||
| 148 | #define UREG_G7 7 | ||
| 149 | #define UREG_I0 8 | ||
| 150 | #define UREG_I1 9 | ||
| 151 | #define UREG_I2 10 | ||
| 152 | #define UREG_I3 11 | ||
| 153 | #define UREG_I4 12 | ||
| 154 | #define UREG_I5 13 | ||
| 155 | #define UREG_I6 14 | ||
| 156 | #define UREG_I7 15 | ||
| 157 | #define UREG_FP UREG_I6 | ||
| 158 | #define UREG_RETPC UREG_I7 | ||
| 159 | |||
| 160 | #if defined(__sparc__) && defined(__arch64__) | ||
| 161 | /* 64 bit sparc */ | ||
| 162 | |||
| 163 | #ifndef __ASSEMBLY__ | ||
| 164 | |||
| 165 | |||
| 166 | #else /* __ASSEMBLY__ */ | ||
| 167 | /* For assembly code. */ | ||
| 168 | #define TRACEREG_SZ 0xa0 | ||
| 169 | #define STACKFRAME_SZ 0xc0 | ||
| 170 | |||
| 171 | #define TRACEREG32_SZ 0x50 | ||
| 172 | #define STACKFRAME32_SZ 0x60 | ||
| 173 | #endif /* __ASSEMBLY__ */ | ||
| 174 | |||
| 175 | #else /* (defined(__sparc__) && defined(__arch64__)) */ | ||
| 176 | |||
| 177 | /* 32 bit sparc */ | ||
| 178 | |||
| 179 | #ifndef __ASSEMBLY__ | ||
| 180 | |||
| 181 | |||
| 182 | #else /* (!__ASSEMBLY__) */ | ||
| 183 | /* For assembly code. */ | ||
| 184 | #define TRACEREG_SZ 0x50 | ||
| 185 | #define STACKFRAME_SZ 0x60 | ||
| 186 | #endif /* (!__ASSEMBLY__) */ | ||
| 187 | |||
| 188 | #endif /* (defined(__sparc__) && defined(__arch64__)) */ | ||
| 189 | |||
| 190 | |||
| 191 | /* These are for pt_regs. */ | ||
| 192 | #define PT_V9_G0 0x00 | ||
| 193 | #define PT_V9_G1 0x08 | ||
| 194 | #define PT_V9_G2 0x10 | ||
| 195 | #define PT_V9_G3 0x18 | ||
| 196 | #define PT_V9_G4 0x20 | ||
| 197 | #define PT_V9_G5 0x28 | ||
| 198 | #define PT_V9_G6 0x30 | ||
| 199 | #define PT_V9_G7 0x38 | ||
| 200 | #define PT_V9_I0 0x40 | ||
| 201 | #define PT_V9_I1 0x48 | ||
| 202 | #define PT_V9_I2 0x50 | ||
| 203 | #define PT_V9_I3 0x58 | ||
| 204 | #define PT_V9_I4 0x60 | ||
| 205 | #define PT_V9_I5 0x68 | ||
| 206 | #define PT_V9_I6 0x70 | ||
| 207 | #define PT_V9_FP PT_V9_I6 | ||
| 208 | #define PT_V9_I7 0x78 | ||
| 209 | #define PT_V9_TSTATE 0x80 | ||
| 210 | #define PT_V9_TPC 0x88 | ||
| 211 | #define PT_V9_TNPC 0x90 | ||
| 212 | #define PT_V9_Y 0x98 | ||
| 213 | #define PT_V9_MAGIC 0x9c | ||
| 214 | #define PT_TSTATE PT_V9_TSTATE | ||
| 215 | #define PT_TPC PT_V9_TPC | ||
| 216 | #define PT_TNPC PT_V9_TNPC | ||
| 217 | |||
| 218 | /* These for pt_regs32. */ | ||
| 219 | #define PT_PSR 0x0 | ||
| 220 | #define PT_PC 0x4 | ||
| 221 | #define PT_NPC 0x8 | ||
| 222 | #define PT_Y 0xc | ||
| 223 | #define PT_G0 0x10 | ||
| 224 | #define PT_WIM PT_G0 | ||
| 225 | #define PT_G1 0x14 | ||
| 226 | #define PT_G2 0x18 | ||
| 227 | #define PT_G3 0x1c | ||
| 228 | #define PT_G4 0x20 | ||
| 229 | #define PT_G5 0x24 | ||
| 230 | #define PT_G6 0x28 | ||
| 231 | #define PT_G7 0x2c | ||
| 232 | #define PT_I0 0x30 | ||
| 233 | #define PT_I1 0x34 | ||
| 234 | #define PT_I2 0x38 | ||
| 235 | #define PT_I3 0x3c | ||
| 236 | #define PT_I4 0x40 | ||
| 237 | #define PT_I5 0x44 | ||
| 238 | #define PT_I6 0x48 | ||
| 239 | #define PT_FP PT_I6 | ||
| 240 | #define PT_I7 0x4c | ||
| 241 | |||
| 242 | /* Reg_window offsets */ | ||
| 243 | #define RW_V9_L0 0x00 | ||
| 244 | #define RW_V9_L1 0x08 | ||
| 245 | #define RW_V9_L2 0x10 | ||
| 246 | #define RW_V9_L3 0x18 | ||
| 247 | #define RW_V9_L4 0x20 | ||
| 248 | #define RW_V9_L5 0x28 | ||
| 249 | #define RW_V9_L6 0x30 | ||
| 250 | #define RW_V9_L7 0x38 | ||
| 251 | #define RW_V9_I0 0x40 | ||
| 252 | #define RW_V9_I1 0x48 | ||
| 253 | #define RW_V9_I2 0x50 | ||
| 254 | #define RW_V9_I3 0x58 | ||
| 255 | #define RW_V9_I4 0x60 | ||
| 256 | #define RW_V9_I5 0x68 | ||
| 257 | #define RW_V9_I6 0x70 | ||
| 258 | #define RW_V9_I7 0x78 | ||
| 259 | |||
| 260 | #define RW_L0 0x00 | ||
| 261 | #define RW_L1 0x04 | ||
| 262 | #define RW_L2 0x08 | ||
| 263 | #define RW_L3 0x0c | ||
| 264 | #define RW_L4 0x10 | ||
| 265 | #define RW_L5 0x14 | ||
| 266 | #define RW_L6 0x18 | ||
| 267 | #define RW_L7 0x1c | ||
| 268 | #define RW_I0 0x20 | ||
| 269 | #define RW_I1 0x24 | ||
| 270 | #define RW_I2 0x28 | ||
| 271 | #define RW_I3 0x2c | ||
| 272 | #define RW_I4 0x30 | ||
| 273 | #define RW_I5 0x34 | ||
| 274 | #define RW_I6 0x38 | ||
| 275 | #define RW_I7 0x3c | ||
| 276 | |||
| 277 | /* Stack_frame offsets */ | ||
| 278 | #define SF_V9_L0 0x00 | ||
| 279 | #define SF_V9_L1 0x08 | ||
| 280 | #define SF_V9_L2 0x10 | ||
| 281 | #define SF_V9_L3 0x18 | ||
| 282 | #define SF_V9_L4 0x20 | ||
| 283 | #define SF_V9_L5 0x28 | ||
| 284 | #define SF_V9_L6 0x30 | ||
| 285 | #define SF_V9_L7 0x38 | ||
| 286 | #define SF_V9_I0 0x40 | ||
| 287 | #define SF_V9_I1 0x48 | ||
| 288 | #define SF_V9_I2 0x50 | ||
| 289 | #define SF_V9_I3 0x58 | ||
| 290 | #define SF_V9_I4 0x60 | ||
| 291 | #define SF_V9_I5 0x68 | ||
| 292 | #define SF_V9_FP 0x70 | ||
| 293 | #define SF_V9_PC 0x78 | ||
| 294 | #define SF_V9_RETP 0x80 | ||
| 295 | #define SF_V9_XARG0 0x88 | ||
| 296 | #define SF_V9_XARG1 0x90 | ||
| 297 | #define SF_V9_XARG2 0x98 | ||
| 298 | #define SF_V9_XARG3 0xa0 | ||
| 299 | #define SF_V9_XARG4 0xa8 | ||
| 300 | #define SF_V9_XARG5 0xb0 | ||
| 301 | #define SF_V9_XXARG 0xb8 | ||
| 302 | |||
| 303 | #define SF_L0 0x00 | ||
| 304 | #define SF_L1 0x04 | ||
| 305 | #define SF_L2 0x08 | ||
| 306 | #define SF_L3 0x0c | ||
| 307 | #define SF_L4 0x10 | ||
| 308 | #define SF_L5 0x14 | ||
| 309 | #define SF_L6 0x18 | ||
| 310 | #define SF_L7 0x1c | ||
| 311 | #define SF_I0 0x20 | ||
| 312 | #define SF_I1 0x24 | ||
| 313 | #define SF_I2 0x28 | ||
| 314 | #define SF_I3 0x2c | ||
| 315 | #define SF_I4 0x30 | ||
| 316 | #define SF_I5 0x34 | ||
| 317 | #define SF_FP 0x38 | ||
| 318 | #define SF_PC 0x3c | ||
| 319 | #define SF_RETP 0x40 | ||
| 320 | #define SF_XARG0 0x44 | ||
| 321 | #define SF_XARG1 0x48 | ||
| 322 | #define SF_XARG2 0x4c | ||
| 323 | #define SF_XARG3 0x50 | ||
| 324 | #define SF_XARG4 0x54 | ||
| 325 | #define SF_XARG5 0x58 | ||
| 326 | #define SF_XXARG 0x5c | ||
| 327 | |||
| 328 | |||
| 329 | /* Stuff for the ptrace system call */ | ||
| 330 | #define PTRACE_SPARC_DETACH 11 | ||
| 331 | #define PTRACE_GETREGS 12 | ||
| 332 | #define PTRACE_SETREGS 13 | ||
| 333 | #define PTRACE_GETFPREGS 14 | ||
| 334 | #define PTRACE_SETFPREGS 15 | ||
| 335 | #define PTRACE_READDATA 16 | ||
| 336 | #define PTRACE_WRITEDATA 17 | ||
| 337 | #define PTRACE_READTEXT 18 | ||
| 338 | #define PTRACE_WRITETEXT 19 | ||
| 339 | #define PTRACE_GETFPAREGS 20 | ||
| 340 | #define PTRACE_SETFPAREGS 21 | ||
| 341 | |||
| 342 | /* There are for debugging 64-bit processes, either from a 32 or 64 bit | ||
| 343 | * parent. Thus their complements are for debugging 32-bit processes only. | ||
| 344 | */ | ||
| 345 | |||
| 346 | #define PTRACE_GETREGS64 22 | ||
| 347 | #define PTRACE_SETREGS64 23 | ||
| 348 | /* PTRACE_SYSCALL is 24 */ | ||
| 349 | #define PTRACE_GETFPREGS64 25 | ||
| 350 | #define PTRACE_SETFPREGS64 26 | ||
| 351 | |||
| 352 | #endif /* _UAPI__SPARC_PTRACE_H */ | ||
diff --git a/arch/sparc/include/asm/resource.h b/arch/sparc/include/uapi/asm/resource.h index fe163cafb4c7..fe163cafb4c7 100644 --- a/arch/sparc/include/asm/resource.h +++ b/arch/sparc/include/uapi/asm/resource.h | |||
diff --git a/arch/sparc/include/asm/sembuf.h b/arch/sparc/include/uapi/asm/sembuf.h index faee1be08d67..faee1be08d67 100644 --- a/arch/sparc/include/asm/sembuf.h +++ b/arch/sparc/include/uapi/asm/sembuf.h | |||
diff --git a/arch/sparc/include/uapi/asm/setup.h b/arch/sparc/include/uapi/asm/setup.h new file mode 100644 index 000000000000..533768450872 --- /dev/null +++ b/arch/sparc/include/uapi/asm/setup.h | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | /* | ||
| 2 | * Just a place holder. | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _UAPI_SPARC_SETUP_H | ||
| 6 | #define _UAPI_SPARC_SETUP_H | ||
| 7 | |||
| 8 | #if defined(__sparc__) && defined(__arch64__) | ||
| 9 | # define COMMAND_LINE_SIZE 2048 | ||
| 10 | #else | ||
| 11 | # define COMMAND_LINE_SIZE 256 | ||
| 12 | #endif | ||
| 13 | |||
| 14 | |||
| 15 | #endif /* _UAPI_SPARC_SETUP_H */ | ||
diff --git a/arch/sparc/include/asm/shmbuf.h b/arch/sparc/include/uapi/asm/shmbuf.h index 83a16055363f..83a16055363f 100644 --- a/arch/sparc/include/asm/shmbuf.h +++ b/arch/sparc/include/uapi/asm/shmbuf.h | |||
diff --git a/arch/sparc/include/uapi/asm/sigcontext.h b/arch/sparc/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/arch/sparc/include/uapi/asm/sigcontext.h | |||
diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..2d9b79ccaa50 --- /dev/null +++ b/arch/sparc/include/uapi/asm/siginfo.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #ifndef _UAPI__SPARC_SIGINFO_H | ||
| 2 | #define _UAPI__SPARC_SIGINFO_H | ||
| 3 | |||
| 4 | #if defined(__sparc__) && defined(__arch64__) | ||
| 5 | |||
| 6 | #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | ||
| 7 | #define __ARCH_SI_BAND_T int | ||
| 8 | |||
| 9 | #endif /* defined(__sparc__) && defined(__arch64__) */ | ||
| 10 | |||
| 11 | |||
| 12 | #define __ARCH_SI_TRAPNO | ||
| 13 | |||
| 14 | #include <asm-generic/siginfo.h> | ||
| 15 | |||
| 16 | |||
| 17 | #define SI_NOINFO 32767 /* no information in siginfo_t */ | ||
| 18 | |||
| 19 | /* | ||
| 20 | * SIGEMT si_codes | ||
| 21 | */ | ||
| 22 | #define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */ | ||
| 23 | #define NSIGEMT 1 | ||
| 24 | |||
| 25 | #endif /* _UAPI__SPARC_SIGINFO_H */ | ||
diff --git a/arch/sparc/include/uapi/asm/signal.h b/arch/sparc/include/uapi/asm/signal.h new file mode 100644 index 000000000000..1a041892538f --- /dev/null +++ b/arch/sparc/include/uapi/asm/signal.h | |||
| @@ -0,0 +1,185 @@ | |||
| 1 | #ifndef _UAPI__SPARC_SIGNAL_H | ||
| 2 | #define _UAPI__SPARC_SIGNAL_H | ||
| 3 | |||
| 4 | #include <asm/sigcontext.h> | ||
| 5 | #include <linux/compiler.h> | ||
| 6 | |||
| 7 | |||
| 8 | /* On the Sparc the signal handlers get passed a 'sub-signal' code | ||
| 9 | * for certain signal types, which we document here. | ||
| 10 | */ | ||
| 11 | #define SIGHUP 1 | ||
| 12 | #define SIGINT 2 | ||
| 13 | #define SIGQUIT 3 | ||
| 14 | #define SIGILL 4 | ||
| 15 | #define SUBSIG_STACK 0 | ||
| 16 | #define SUBSIG_ILLINST 2 | ||
| 17 | #define SUBSIG_PRIVINST 3 | ||
| 18 | #define SUBSIG_BADTRAP(t) (0x80 + (t)) | ||
| 19 | |||
| 20 | #define SIGTRAP 5 | ||
| 21 | #define SIGABRT 6 | ||
| 22 | #define SIGIOT 6 | ||
| 23 | |||
| 24 | #define SIGEMT 7 | ||
| 25 | #define SUBSIG_TAG 10 | ||
| 26 | |||
| 27 | #define SIGFPE 8 | ||
| 28 | #define SUBSIG_FPDISABLED 0x400 | ||
| 29 | #define SUBSIG_FPERROR 0x404 | ||
| 30 | #define SUBSIG_FPINTOVFL 0x001 | ||
| 31 | #define SUBSIG_FPSTSIG 0x002 | ||
| 32 | #define SUBSIG_IDIVZERO 0x014 | ||
| 33 | #define SUBSIG_FPINEXACT 0x0c4 | ||
| 34 | #define SUBSIG_FPDIVZERO 0x0c8 | ||
| 35 | #define SUBSIG_FPUNFLOW 0x0cc | ||
| 36 | #define SUBSIG_FPOPERROR 0x0d0 | ||
| 37 | #define SUBSIG_FPOVFLOW 0x0d4 | ||
| 38 | |||
| 39 | #define SIGKILL 9 | ||
| 40 | #define SIGBUS 10 | ||
| 41 | #define SUBSIG_BUSTIMEOUT 1 | ||
| 42 | #define SUBSIG_ALIGNMENT 2 | ||
| 43 | #define SUBSIG_MISCERROR 5 | ||
| 44 | |||
| 45 | #define SIGSEGV 11 | ||
| 46 | #define SUBSIG_NOMAPPING 3 | ||
| 47 | #define SUBSIG_PROTECTION 4 | ||
| 48 | #define SUBSIG_SEGERROR 5 | ||
| 49 | |||
| 50 | #define SIGSYS 12 | ||
| 51 | |||
| 52 | #define SIGPIPE 13 | ||
| 53 | #define SIGALRM 14 | ||
| 54 | #define SIGTERM 15 | ||
| 55 | #define SIGURG 16 | ||
| 56 | |||
| 57 | /* SunOS values which deviate from the Linux/i386 ones */ | ||
| 58 | #define SIGSTOP 17 | ||
| 59 | #define SIGTSTP 18 | ||
| 60 | #define SIGCONT 19 | ||
| 61 | #define SIGCHLD 20 | ||
| 62 | #define SIGTTIN 21 | ||
| 63 | #define SIGTTOU 22 | ||
| 64 | #define SIGIO 23 | ||
| 65 | #define SIGPOLL SIGIO /* SysV name for SIGIO */ | ||
| 66 | #define SIGXCPU 24 | ||
| 67 | #define SIGXFSZ 25 | ||
| 68 | #define SIGVTALRM 26 | ||
| 69 | #define SIGPROF 27 | ||
| 70 | #define SIGWINCH 28 | ||
| 71 | #define SIGLOST 29 | ||
| 72 | #define SIGPWR SIGLOST | ||
| 73 | #define SIGUSR1 30 | ||
| 74 | #define SIGUSR2 31 | ||
| 75 | |||
| 76 | /* Most things should be clean enough to redefine this at will, if care | ||
| 77 | is taken to make libc match. */ | ||
| 78 | |||
| 79 | #define __OLD_NSIG 32 | ||
| 80 | #define __NEW_NSIG 64 | ||
| 81 | #ifdef __arch64__ | ||
| 82 | #define _NSIG_BPW 64 | ||
| 83 | #else | ||
| 84 | #define _NSIG_BPW 32 | ||
| 85 | #endif | ||
| 86 | #define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW) | ||
| 87 | |||
| 88 | #define SIGRTMIN 32 | ||
| 89 | #define SIGRTMAX __NEW_NSIG | ||
| 90 | |||
| 91 | #if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__) | ||
| 92 | #define _NSIG __NEW_NSIG | ||
| 93 | #define __new_sigset_t sigset_t | ||
| 94 | #define __new_sigaction sigaction | ||
| 95 | #define __new_sigaction32 sigaction32 | ||
| 96 | #define __old_sigset_t old_sigset_t | ||
| 97 | #define __old_sigaction old_sigaction | ||
| 98 | #define __old_sigaction32 old_sigaction32 | ||
| 99 | #else | ||
| 100 | #define _NSIG __OLD_NSIG | ||
| 101 | #define NSIG _NSIG | ||
| 102 | #define __old_sigset_t sigset_t | ||
| 103 | #define __old_sigaction sigaction | ||
| 104 | #define __old_sigaction32 sigaction32 | ||
| 105 | #endif | ||
| 106 | |||
| 107 | #ifndef __ASSEMBLY__ | ||
| 108 | |||
| 109 | typedef unsigned long __old_sigset_t; /* at least 32 bits */ | ||
| 110 | |||
| 111 | typedef struct { | ||
| 112 | unsigned long sig[_NSIG_WORDS]; | ||
| 113 | } __new_sigset_t; | ||
| 114 | |||
| 115 | /* A SunOS sigstack */ | ||
| 116 | struct sigstack { | ||
| 117 | /* XXX 32-bit pointers pinhead XXX */ | ||
| 118 | char *the_stack; | ||
| 119 | int cur_status; | ||
| 120 | }; | ||
| 121 | |||
| 122 | /* Sigvec flags */ | ||
| 123 | #define _SV_SSTACK 1u /* This signal handler should use sig-stack */ | ||
| 124 | #define _SV_INTR 2u /* Sig return should not restart system call */ | ||
| 125 | #define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */ | ||
| 126 | #define _SV_IGNCHILD 8u /* Do not send SIGCHLD */ | ||
| 127 | |||
| 128 | /* | ||
| 129 | * sa_flags values: SA_STACK is not currently supported, but will allow the | ||
| 130 | * usage of signal stacks by using the (now obsolete) sa_restorer field in | ||
| 131 | * the sigaction structure as a stack pointer. This is now possible due to | ||
| 132 | * the changes in signal handling. LBT 010493. | ||
| 133 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
| 134 | */ | ||
| 135 | #define SA_NOCLDSTOP _SV_IGNCHILD | ||
| 136 | #define SA_STACK _SV_SSTACK | ||
| 137 | #define SA_ONSTACK _SV_SSTACK | ||
| 138 | #define SA_RESTART _SV_INTR | ||
| 139 | #define SA_ONESHOT _SV_RESET | ||
| 140 | #define SA_NODEFER 0x20u | ||
| 141 | #define SA_NOCLDWAIT 0x100u | ||
| 142 | #define SA_SIGINFO 0x200u | ||
| 143 | |||
| 144 | #define SA_NOMASK SA_NODEFER | ||
| 145 | |||
| 146 | #define SIG_BLOCK 0x01 /* for blocking signals */ | ||
| 147 | #define SIG_UNBLOCK 0x02 /* for unblocking signals */ | ||
| 148 | #define SIG_SETMASK 0x04 /* for setting the signal mask */ | ||
| 149 | |||
| 150 | /* | ||
| 151 | * sigaltstack controls | ||
| 152 | */ | ||
| 153 | #define SS_ONSTACK 1 | ||
| 154 | #define SS_DISABLE 2 | ||
| 155 | |||
| 156 | #define MINSIGSTKSZ 4096 | ||
| 157 | #define SIGSTKSZ 16384 | ||
| 158 | |||
| 159 | |||
| 160 | #include <asm-generic/signal-defs.h> | ||
| 161 | |||
| 162 | struct __new_sigaction { | ||
| 163 | __sighandler_t sa_handler; | ||
| 164 | unsigned long sa_flags; | ||
| 165 | __sigrestore_t sa_restorer; /* not used by Linux/SPARC yet */ | ||
| 166 | __new_sigset_t sa_mask; | ||
| 167 | }; | ||
| 168 | |||
| 169 | struct __old_sigaction { | ||
| 170 | __sighandler_t sa_handler; | ||
| 171 | __old_sigset_t sa_mask; | ||
| 172 | unsigned long sa_flags; | ||
| 173 | void (*sa_restorer)(void); /* not used by Linux/SPARC yet */ | ||
| 174 | }; | ||
| 175 | |||
| 176 | typedef struct sigaltstack { | ||
| 177 | void __user *ss_sp; | ||
| 178 | int ss_flags; | ||
| 179 | size_t ss_size; | ||
| 180 | } stack_t; | ||
| 181 | |||
| 182 | |||
| 183 | #endif /* !(__ASSEMBLY__) */ | ||
| 184 | |||
| 185 | #endif /* _UAPI__SPARC_SIGNAL_H */ | ||
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index bea1568ae4af..bea1568ae4af 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h | |||
diff --git a/arch/sparc/include/asm/sockios.h b/arch/sparc/include/uapi/asm/sockios.h index 990ea746486b..990ea746486b 100644 --- a/arch/sparc/include/asm/sockios.h +++ b/arch/sparc/include/uapi/asm/sockios.h | |||
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h index a232e9e1f4e5..a232e9e1f4e5 100644 --- a/arch/sparc/include/asm/stat.h +++ b/arch/sparc/include/uapi/asm/stat.h | |||
diff --git a/arch/sparc/include/asm/statfs.h b/arch/sparc/include/uapi/asm/statfs.h index 55e607ad461d..55e607ad461d 100644 --- a/arch/sparc/include/asm/statfs.h +++ b/arch/sparc/include/uapi/asm/statfs.h | |||
diff --git a/arch/sparc/include/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h index a34ad079487e..a34ad079487e 100644 --- a/arch/sparc/include/asm/swab.h +++ b/arch/sparc/include/uapi/asm/swab.h | |||
diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h new file mode 100644 index 000000000000..dd91642fcca7 --- /dev/null +++ b/arch/sparc/include/uapi/asm/termbits.h | |||
| @@ -0,0 +1,263 @@ | |||
| 1 | #ifndef _UAPI_SPARC_TERMBITS_H | ||
| 2 | #define _UAPI_SPARC_TERMBITS_H | ||
| 3 | |||
| 4 | #include <linux/posix_types.h> | ||
| 5 | |||
| 6 | typedef unsigned char cc_t; | ||
| 7 | typedef unsigned int speed_t; | ||
| 8 | |||
| 9 | #if defined(__sparc__) && defined(__arch64__) | ||
| 10 | typedef unsigned int tcflag_t; | ||
| 11 | #else | ||
| 12 | typedef unsigned long tcflag_t; | ||
| 13 | #endif | ||
| 14 | |||
| 15 | #define NCC 8 | ||
| 16 | struct termio { | ||
| 17 | unsigned short c_iflag; /* input mode flags */ | ||
| 18 | unsigned short c_oflag; /* output mode flags */ | ||
| 19 | unsigned short c_cflag; /* control mode flags */ | ||
| 20 | unsigned short c_lflag; /* local mode flags */ | ||
| 21 | unsigned char c_line; /* line discipline */ | ||
| 22 | unsigned char c_cc[NCC]; /* control characters */ | ||
| 23 | }; | ||
| 24 | |||
| 25 | #define NCCS 17 | ||
| 26 | struct termios { | ||
| 27 | tcflag_t c_iflag; /* input mode flags */ | ||
| 28 | tcflag_t c_oflag; /* output mode flags */ | ||
| 29 | tcflag_t c_cflag; /* control mode flags */ | ||
| 30 | tcflag_t c_lflag; /* local mode flags */ | ||
| 31 | cc_t c_line; /* line discipline */ | ||
| 32 | #ifndef __KERNEL__ | ||
| 33 | cc_t c_cc[NCCS]; /* control characters */ | ||
| 34 | #else | ||
| 35 | cc_t c_cc[NCCS+2]; /* kernel needs 2 more to hold vmin/vtime */ | ||
| 36 | #define SIZEOF_USER_TERMIOS sizeof (struct termios) - (2*sizeof (cc_t)) | ||
| 37 | #endif | ||
| 38 | }; | ||
| 39 | |||
| 40 | struct termios2 { | ||
| 41 | tcflag_t c_iflag; /* input mode flags */ | ||
| 42 | tcflag_t c_oflag; /* output mode flags */ | ||
| 43 | tcflag_t c_cflag; /* control mode flags */ | ||
| 44 | tcflag_t c_lflag; /* local mode flags */ | ||
| 45 | cc_t c_line; /* line discipline */ | ||
| 46 | cc_t c_cc[NCCS+2]; /* control characters */ | ||
| 47 | speed_t c_ispeed; /* input speed */ | ||
| 48 | speed_t c_ospeed; /* output speed */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct ktermios { | ||
| 52 | tcflag_t c_iflag; /* input mode flags */ | ||
| 53 | tcflag_t c_oflag; /* output mode flags */ | ||
| 54 | tcflag_t c_cflag; /* control mode flags */ | ||
| 55 | tcflag_t c_lflag; /* local mode flags */ | ||
| 56 | cc_t c_line; /* line discipline */ | ||
| 57 | cc_t c_cc[NCCS+2]; /* control characters */ | ||
| 58 | speed_t c_ispeed; /* input speed */ | ||
| 59 | speed_t c_ospeed; /* output speed */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* c_cc characters */ | ||
| 63 | #define VINTR 0 | ||
| 64 | #define VQUIT 1 | ||
| 65 | #define VERASE 2 | ||
| 66 | #define VKILL 3 | ||
| 67 | #define VEOF 4 | ||
| 68 | #define VEOL 5 | ||
| 69 | #define VEOL2 6 | ||
| 70 | #define VSWTC 7 | ||
| 71 | #define VSTART 8 | ||
| 72 | #define VSTOP 9 | ||
| 73 | |||
| 74 | |||
| 75 | |||
| 76 | #define VSUSP 10 | ||
| 77 | #define VDSUSP 11 /* SunOS POSIX nicety I do believe... */ | ||
| 78 | #define VREPRINT 12 | ||
| 79 | #define VDISCARD 13 | ||
| 80 | #define VWERASE 14 | ||
| 81 | #define VLNEXT 15 | ||
| 82 | |||
| 83 | /* Kernel keeps vmin/vtime separated, user apps assume vmin/vtime is | ||
| 84 | * shared with eof/eol | ||
| 85 | */ | ||
| 86 | #ifndef __KERNEL__ | ||
| 87 | #define VMIN VEOF | ||
| 88 | #define VTIME VEOL | ||
| 89 | #endif | ||
| 90 | |||
| 91 | /* c_iflag bits */ | ||
| 92 | #define IGNBRK 0x00000001 | ||
| 93 | #define BRKINT 0x00000002 | ||
| 94 | #define IGNPAR 0x00000004 | ||
| 95 | #define PARMRK 0x00000008 | ||
| 96 | #define INPCK 0x00000010 | ||
| 97 | #define ISTRIP 0x00000020 | ||
| 98 | #define INLCR 0x00000040 | ||
| 99 | #define IGNCR 0x00000080 | ||
| 100 | #define ICRNL 0x00000100 | ||
| 101 | #define IUCLC 0x00000200 | ||
| 102 | #define IXON 0x00000400 | ||
| 103 | #define IXANY 0x00000800 | ||
| 104 | #define IXOFF 0x00001000 | ||
| 105 | #define IMAXBEL 0x00002000 | ||
| 106 | #define IUTF8 0x00004000 | ||
| 107 | |||
| 108 | /* c_oflag bits */ | ||
| 109 | #define OPOST 0x00000001 | ||
| 110 | #define OLCUC 0x00000002 | ||
| 111 | #define ONLCR 0x00000004 | ||
| 112 | #define OCRNL 0x00000008 | ||
| 113 | #define ONOCR 0x00000010 | ||
| 114 | #define ONLRET 0x00000020 | ||
| 115 | #define OFILL 0x00000040 | ||
| 116 | #define OFDEL 0x00000080 | ||
| 117 | #define NLDLY 0x00000100 | ||
| 118 | #define NL0 0x00000000 | ||
| 119 | #define NL1 0x00000100 | ||
| 120 | #define CRDLY 0x00000600 | ||
| 121 | #define CR0 0x00000000 | ||
| 122 | #define CR1 0x00000200 | ||
| 123 | #define CR2 0x00000400 | ||
| 124 | #define CR3 0x00000600 | ||
| 125 | #define TABDLY 0x00001800 | ||
| 126 | #define TAB0 0x00000000 | ||
| 127 | #define TAB1 0x00000800 | ||
| 128 | #define TAB2 0x00001000 | ||
| 129 | #define TAB3 0x00001800 | ||
| 130 | #define XTABS 0x00001800 | ||
| 131 | #define BSDLY 0x00002000 | ||
| 132 | #define BS0 0x00000000 | ||
| 133 | #define BS1 0x00002000 | ||
| 134 | #define VTDLY 0x00004000 | ||
| 135 | #define VT0 0x00000000 | ||
| 136 | #define VT1 0x00004000 | ||
| 137 | #define FFDLY 0x00008000 | ||
| 138 | #define FF0 0x00000000 | ||
| 139 | #define FF1 0x00008000 | ||
| 140 | #define PAGEOUT 0x00010000 /* SUNOS specific */ | ||
| 141 | #define WRAP 0x00020000 /* SUNOS specific */ | ||
| 142 | |||
| 143 | /* c_cflag bit meaning */ | ||
| 144 | #define CBAUD 0x0000100f | ||
| 145 | #define B0 0x00000000 /* hang up */ | ||
| 146 | #define B50 0x00000001 | ||
| 147 | #define B75 0x00000002 | ||
| 148 | #define B110 0x00000003 | ||
| 149 | #define B134 0x00000004 | ||
| 150 | #define B150 0x00000005 | ||
| 151 | #define B200 0x00000006 | ||
| 152 | #define B300 0x00000007 | ||
| 153 | #define B600 0x00000008 | ||
| 154 | #define B1200 0x00000009 | ||
| 155 | #define B1800 0x0000000a | ||
| 156 | #define B2400 0x0000000b | ||
| 157 | #define B4800 0x0000000c | ||
| 158 | #define B9600 0x0000000d | ||
| 159 | #define B19200 0x0000000e | ||
| 160 | #define B38400 0x0000000f | ||
| 161 | #define EXTA B19200 | ||
| 162 | #define EXTB B38400 | ||
| 163 | #define CSIZE 0x00000030 | ||
| 164 | #define CS5 0x00000000 | ||
| 165 | #define CS6 0x00000010 | ||
| 166 | #define CS7 0x00000020 | ||
| 167 | #define CS8 0x00000030 | ||
| 168 | #define CSTOPB 0x00000040 | ||
| 169 | #define CREAD 0x00000080 | ||
| 170 | #define PARENB 0x00000100 | ||
| 171 | #define PARODD 0x00000200 | ||
| 172 | #define HUPCL 0x00000400 | ||
| 173 | #define CLOCAL 0x00000800 | ||
| 174 | #define CBAUDEX 0x00001000 | ||
| 175 | /* We'll never see these speeds with the Zilogs, but for completeness... */ | ||
| 176 | #define BOTHER 0x00001000 | ||
| 177 | #define B57600 0x00001001 | ||
| 178 | #define B115200 0x00001002 | ||
| 179 | #define B230400 0x00001003 | ||
| 180 | #define B460800 0x00001004 | ||
| 181 | /* This is what we can do with the Zilogs. */ | ||
| 182 | #define B76800 0x00001005 | ||
| 183 | /* This is what we can do with the SAB82532. */ | ||
| 184 | #define B153600 0x00001006 | ||
| 185 | #define B307200 0x00001007 | ||
| 186 | #define B614400 0x00001008 | ||
| 187 | #define B921600 0x00001009 | ||
| 188 | /* And these are the rest... */ | ||
| 189 | #define B500000 0x0000100a | ||
| 190 | #define B576000 0x0000100b | ||
| 191 | #define B1000000 0x0000100c | ||
| 192 | #define B1152000 0x0000100d | ||
| 193 | #define B1500000 0x0000100e | ||
| 194 | #define B2000000 0x0000100f | ||
| 195 | /* These have totally bogus values and nobody uses them | ||
| 196 | so far. Later on we'd have to use say 0x10000x and | ||
| 197 | adjust CBAUD constant and drivers accordingly. | ||
| 198 | #define B2500000 0x00001010 | ||
| 199 | #define B3000000 0x00001011 | ||
| 200 | #define B3500000 0x00001012 | ||
| 201 | #define B4000000 0x00001013 */ | ||
| 202 | #define CIBAUD 0x100f0000 /* input baud rate (not used) */ | ||
| 203 | #define CMSPAR 0x40000000 /* mark or space (stick) parity */ | ||
| 204 | #define CRTSCTS 0x80000000 /* flow control */ | ||
| 205 | |||
| 206 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
| 207 | |||
| 208 | /* c_lflag bits */ | ||
| 209 | #define ISIG 0x00000001 | ||
| 210 | #define ICANON 0x00000002 | ||
| 211 | #define XCASE 0x00000004 | ||
| 212 | #define ECHO 0x00000008 | ||
| 213 | #define ECHOE 0x00000010 | ||
| 214 | #define ECHOK 0x00000020 | ||
| 215 | #define ECHONL 0x00000040 | ||
| 216 | #define NOFLSH 0x00000080 | ||
| 217 | #define TOSTOP 0x00000100 | ||
| 218 | #define ECHOCTL 0x00000200 | ||
| 219 | #define ECHOPRT 0x00000400 | ||
| 220 | #define ECHOKE 0x00000800 | ||
| 221 | #define DEFECHO 0x00001000 /* SUNOS thing, what is it? */ | ||
| 222 | #define FLUSHO 0x00002000 | ||
| 223 | #define PENDIN 0x00004000 | ||
| 224 | #define IEXTEN 0x00008000 | ||
| 225 | #define EXTPROC 0x00010000 | ||
| 226 | |||
| 227 | /* modem lines */ | ||
| 228 | #define TIOCM_LE 0x001 | ||
| 229 | #define TIOCM_DTR 0x002 | ||
| 230 | #define TIOCM_RTS 0x004 | ||
| 231 | #define TIOCM_ST 0x008 | ||
| 232 | #define TIOCM_SR 0x010 | ||
| 233 | #define TIOCM_CTS 0x020 | ||
| 234 | #define TIOCM_CAR 0x040 | ||
| 235 | #define TIOCM_RNG 0x080 | ||
| 236 | #define TIOCM_DSR 0x100 | ||
| 237 | #define TIOCM_CD TIOCM_CAR | ||
| 238 | #define TIOCM_RI TIOCM_RNG | ||
| 239 | #define TIOCM_OUT1 0x2000 | ||
| 240 | #define TIOCM_OUT2 0x4000 | ||
| 241 | #define TIOCM_LOOP 0x8000 | ||
| 242 | |||
| 243 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
| 244 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
| 245 | |||
| 246 | |||
| 247 | /* tcflow() and TCXONC use these */ | ||
| 248 | #define TCOOFF 0 | ||
| 249 | #define TCOON 1 | ||
| 250 | #define TCIOFF 2 | ||
| 251 | #define TCION 3 | ||
| 252 | |||
| 253 | /* tcflush() and TCFLSH use these */ | ||
| 254 | #define TCIFLUSH 0 | ||
| 255 | #define TCOFLUSH 1 | ||
| 256 | #define TCIOFLUSH 2 | ||
| 257 | |||
| 258 | /* tcsetattr uses these */ | ||
| 259 | #define TCSANOW 0 | ||
| 260 | #define TCSADRAIN 1 | ||
| 261 | #define TCSAFLUSH 2 | ||
| 262 | |||
| 263 | #endif /* _UAPI_SPARC_TERMBITS_H */ | ||
diff --git a/arch/sparc/include/uapi/asm/termios.h b/arch/sparc/include/uapi/asm/termios.h new file mode 100644 index 000000000000..ea6f09e51e53 --- /dev/null +++ b/arch/sparc/include/uapi/asm/termios.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | #ifndef _UAPI_SPARC_TERMIOS_H | ||
| 2 | #define _UAPI_SPARC_TERMIOS_H | ||
| 3 | |||
| 4 | #include <asm/ioctls.h> | ||
| 5 | #include <asm/termbits.h> | ||
| 6 | |||
| 7 | #if defined(__KERNEL__) || defined(__DEFINE_BSD_TERMIOS) | ||
| 8 | struct sgttyb { | ||
| 9 | char sg_ispeed; | ||
| 10 | char sg_ospeed; | ||
| 11 | char sg_erase; | ||
| 12 | char sg_kill; | ||
| 13 | short sg_flags; | ||
| 14 | }; | ||
| 15 | |||
| 16 | struct tchars { | ||
| 17 | char t_intrc; | ||
| 18 | char t_quitc; | ||
| 19 | char t_startc; | ||
| 20 | char t_stopc; | ||
| 21 | char t_eofc; | ||
| 22 | char t_brkc; | ||
| 23 | }; | ||
| 24 | |||
| 25 | struct ltchars { | ||
| 26 | char t_suspc; | ||
| 27 | char t_dsuspc; | ||
| 28 | char t_rprntc; | ||
| 29 | char t_flushc; | ||
| 30 | char t_werasc; | ||
| 31 | char t_lnextc; | ||
| 32 | }; | ||
| 33 | #endif /* __KERNEL__ */ | ||
| 34 | |||
| 35 | struct winsize { | ||
| 36 | unsigned short ws_row; | ||
| 37 | unsigned short ws_col; | ||
| 38 | unsigned short ws_xpixel; | ||
| 39 | unsigned short ws_ypixel; | ||
| 40 | }; | ||
| 41 | |||
| 42 | |||
| 43 | #endif /* _UAPI_SPARC_TERMIOS_H */ | ||
diff --git a/arch/sparc/include/uapi/asm/traps.h b/arch/sparc/include/uapi/asm/traps.h new file mode 100644 index 000000000000..a4eceace6ccf --- /dev/null +++ b/arch/sparc/include/uapi/asm/traps.h | |||
| @@ -0,0 +1,120 @@ | |||
| 1 | /* | ||
| 2 | * traps.h: Format of entries for the Sparc trap table. | ||
| 3 | * | ||
| 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef _UAPI_SPARC_TRAPS_H | ||
| 8 | #define _UAPI_SPARC_TRAPS_H | ||
| 9 | |||
| 10 | #define NUM_SPARC_TRAPS 255 | ||
| 11 | |||
| 12 | #ifndef __ASSEMBLY__ | ||
| 13 | #endif /* !(__ASSEMBLY__) */ | ||
| 14 | |||
| 15 | /* For patching the trap table at boot time, we need to know how to | ||
| 16 | * form various common Sparc instructions. Thus these macros... | ||
| 17 | */ | ||
| 18 | |||
| 19 | #define SPARC_MOV_CONST_L3(const) (0xa6102000 | (const&0xfff)) | ||
| 20 | |||
| 21 | /* The following assumes that the branch lies before the place we | ||
| 22 | * are branching to. This is the case for a trap vector... | ||
| 23 | * You have been warned. | ||
| 24 | */ | ||
| 25 | #define SPARC_BRANCH(dest_addr, inst_addr) \ | ||
| 26 | (0x10800000 | (((dest_addr-inst_addr)>>2)&0x3fffff)) | ||
| 27 | |||
| 28 | #define SPARC_RD_PSR_L0 (0xa1480000) | ||
| 29 | #define SPARC_RD_WIM_L3 (0xa7500000) | ||
| 30 | #define SPARC_NOP (0x01000000) | ||
| 31 | |||
| 32 | /* Various interesting trap levels. */ | ||
| 33 | /* First, hardware traps. */ | ||
| 34 | #define SP_TRAP_TFLT 0x1 /* Text fault */ | ||
| 35 | #define SP_TRAP_II 0x2 /* Illegal Instruction */ | ||
| 36 | #define SP_TRAP_PI 0x3 /* Privileged Instruction */ | ||
| 37 | #define SP_TRAP_FPD 0x4 /* Floating Point Disabled */ | ||
| 38 | #define SP_TRAP_WOVF 0x5 /* Window Overflow */ | ||
| 39 | #define SP_TRAP_WUNF 0x6 /* Window Underflow */ | ||
| 40 | #define SP_TRAP_MNA 0x7 /* Memory Address Unaligned */ | ||
| 41 | #define SP_TRAP_FPE 0x8 /* Floating Point Exception */ | ||
| 42 | #define SP_TRAP_DFLT 0x9 /* Data Fault */ | ||
| 43 | #define SP_TRAP_TOF 0xa /* Tag Overflow */ | ||
| 44 | #define SP_TRAP_WDOG 0xb /* Watchpoint Detected */ | ||
| 45 | #define SP_TRAP_IRQ1 0x11 /* IRQ level 1 */ | ||
| 46 | #define SP_TRAP_IRQ2 0x12 /* IRQ level 2 */ | ||
| 47 | #define SP_TRAP_IRQ3 0x13 /* IRQ level 3 */ | ||
| 48 | #define SP_TRAP_IRQ4 0x14 /* IRQ level 4 */ | ||
| 49 | #define SP_TRAP_IRQ5 0x15 /* IRQ level 5 */ | ||
| 50 | #define SP_TRAP_IRQ6 0x16 /* IRQ level 6 */ | ||
| 51 | #define SP_TRAP_IRQ7 0x17 /* IRQ level 7 */ | ||
| 52 | #define SP_TRAP_IRQ8 0x18 /* IRQ level 8 */ | ||
| 53 | #define SP_TRAP_IRQ9 0x19 /* IRQ level 9 */ | ||
| 54 | #define SP_TRAP_IRQ10 0x1a /* IRQ level 10 */ | ||
| 55 | #define SP_TRAP_IRQ11 0x1b /* IRQ level 11 */ | ||
| 56 | #define SP_TRAP_IRQ12 0x1c /* IRQ level 12 */ | ||
| 57 | #define SP_TRAP_IRQ13 0x1d /* IRQ level 13 */ | ||
| 58 | #define SP_TRAP_IRQ14 0x1e /* IRQ level 14 */ | ||
| 59 | #define SP_TRAP_IRQ15 0x1f /* IRQ level 15 Non-maskable */ | ||
| 60 | #define SP_TRAP_RACC 0x20 /* Register Access Error ??? */ | ||
| 61 | #define SP_TRAP_IACC 0x21 /* Instruction Access Error */ | ||
| 62 | #define SP_TRAP_CPDIS 0x24 /* Co-Processor Disabled */ | ||
| 63 | #define SP_TRAP_BADFL 0x25 /* Unimplemented Flush Instruction */ | ||
| 64 | #define SP_TRAP_CPEXP 0x28 /* Co-Processor Exception */ | ||
| 65 | #define SP_TRAP_DACC 0x29 /* Data Access Error */ | ||
| 66 | #define SP_TRAP_DIVZ 0x2a /* Divide By Zero */ | ||
| 67 | #define SP_TRAP_DSTORE 0x2b /* Data Store Error ??? */ | ||
| 68 | #define SP_TRAP_DMM 0x2c /* Data Access MMU Miss ??? */ | ||
| 69 | #define SP_TRAP_IMM 0x3c /* Instruction Access MMU Miss ??? */ | ||
| 70 | |||
| 71 | /* Now the Software Traps... */ | ||
| 72 | #define SP_TRAP_SUNOS 0x80 /* SunOS System Call */ | ||
| 73 | #define SP_TRAP_SBPT 0x81 /* Software Breakpoint */ | ||
| 74 | #define SP_TRAP_SDIVZ 0x82 /* Software Divide-by-Zero trap */ | ||
| 75 | #define SP_TRAP_FWIN 0x83 /* Flush Windows */ | ||
| 76 | #define SP_TRAP_CWIN 0x84 /* Clean Windows */ | ||
| 77 | #define SP_TRAP_RCHK 0x85 /* Range Check */ | ||
| 78 | #define SP_TRAP_FUNA 0x86 /* Fix Unaligned Access */ | ||
| 79 | #define SP_TRAP_IOWFL 0x87 /* Integer Overflow */ | ||
| 80 | #define SP_TRAP_SOLARIS 0x88 /* Solaris System Call */ | ||
| 81 | #define SP_TRAP_NETBSD 0x89 /* NetBSD System Call */ | ||
| 82 | #define SP_TRAP_LINUX 0x90 /* Linux System Call */ | ||
| 83 | |||
| 84 | /* Names used for compatibility with SunOS */ | ||
| 85 | #define ST_SYSCALL 0x00 | ||
| 86 | #define ST_BREAKPOINT 0x01 | ||
| 87 | #define ST_DIV0 0x02 | ||
| 88 | #define ST_FLUSH_WINDOWS 0x03 | ||
| 89 | #define ST_CLEAN_WINDOWS 0x04 | ||
| 90 | #define ST_RANGE_CHECK 0x05 | ||
| 91 | #define ST_FIX_ALIGN 0x06 | ||
| 92 | #define ST_INT_OVERFLOW 0x07 | ||
| 93 | |||
| 94 | /* Special traps... */ | ||
| 95 | #define SP_TRAP_KBPT1 0xfe /* KADB/PROM Breakpoint one */ | ||
| 96 | #define SP_TRAP_KBPT2 0xff /* KADB/PROM Breakpoint two */ | ||
| 97 | |||
| 98 | /* Handy Macros */ | ||
| 99 | /* Is this a trap we never expect to get? */ | ||
| 100 | #define BAD_TRAP_P(level) \ | ||
| 101 | ((level > SP_TRAP_WDOG && level < SP_TRAP_IRQ1) || \ | ||
| 102 | (level > SP_TRAP_IACC && level < SP_TRAP_CPDIS) || \ | ||
| 103 | (level > SP_TRAP_BADFL && level < SP_TRAP_CPEXP) || \ | ||
| 104 | (level > SP_TRAP_DMM && level < SP_TRAP_IMM) || \ | ||
| 105 | (level > SP_TRAP_IMM && level < SP_TRAP_SUNOS) || \ | ||
| 106 | (level > SP_TRAP_LINUX && level < SP_TRAP_KBPT1)) | ||
| 107 | |||
| 108 | /* Is this a Hardware trap? */ | ||
| 109 | #define HW_TRAP_P(level) ((level > 0) && (level < SP_TRAP_SUNOS)) | ||
| 110 | |||
| 111 | /* Is this a Software trap? */ | ||
| 112 | #define SW_TRAP_P(level) ((level >= SP_TRAP_SUNOS) && (level <= SP_TRAP_KBPT2)) | ||
| 113 | |||
| 114 | /* Is this a system call for some OS we know about? */ | ||
| 115 | #define SCALL_TRAP_P(level) ((level == SP_TRAP_SUNOS) || \ | ||
| 116 | (level == SP_TRAP_SOLARIS) || \ | ||
| 117 | (level == SP_TRAP_NETBSD) || \ | ||
| 118 | (level == SP_TRAP_LINUX)) | ||
| 119 | |||
| 120 | #endif /* _UAPI_SPARC_TRAPS_H */ | ||
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/uapi/asm/types.h index 383d156cde9c..383d156cde9c 100644 --- a/arch/sparc/include/asm/types.h +++ b/arch/sparc/include/uapi/asm/types.h | |||
diff --git a/arch/sparc/include/asm/uctx.h b/arch/sparc/include/uapi/asm/uctx.h index dc937c75ffdd..dc937c75ffdd 100644 --- a/arch/sparc/include/asm/uctx.h +++ b/arch/sparc/include/uapi/asm/uctx.h | |||
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..8974ef7ae920 --- /dev/null +++ b/arch/sparc/include/uapi/asm/unistd.h | |||
| @@ -0,0 +1,422 @@ | |||
| 1 | /* | ||
| 2 | * System calls under the Sparc. | ||
| 3 | * | ||
| 4 | * Don't be scared by the ugly clobbers, it is the only way I can | ||
| 5 | * think of right now to force the arguments into fixed registers | ||
| 6 | * before the trap into the system call with gcc 'asm' statements. | ||
| 7 | * | ||
| 8 | * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) | ||
| 9 | * | ||
| 10 | * SunOS compatibility based upon preliminary work which is: | ||
| 11 | * | ||
| 12 | * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) | ||
| 13 | */ | ||
| 14 | #ifndef _UAPI_SPARC_UNISTD_H | ||
| 15 | #define _UAPI_SPARC_UNISTD_H | ||
| 16 | |||
| 17 | #ifndef __32bit_syscall_numbers__ | ||
| 18 | #ifndef __arch64__ | ||
| 19 | #define __32bit_syscall_numbers__ | ||
| 20 | #endif | ||
| 21 | #endif | ||
| 22 | |||
| 23 | #define __NR_restart_syscall 0 /* Linux Specific */ | ||
| 24 | #define __NR_exit 1 /* Common */ | ||
| 25 | #define __NR_fork 2 /* Common */ | ||
| 26 | #define __NR_read 3 /* Common */ | ||
| 27 | #define __NR_write 4 /* Common */ | ||
| 28 | #define __NR_open 5 /* Common */ | ||
| 29 | #define __NR_close 6 /* Common */ | ||
| 30 | #define __NR_wait4 7 /* Common */ | ||
| 31 | #define __NR_creat 8 /* Common */ | ||
| 32 | #define __NR_link 9 /* Common */ | ||
| 33 | #define __NR_unlink 10 /* Common */ | ||
| 34 | #define __NR_execv 11 /* SunOS Specific */ | ||
| 35 | #define __NR_chdir 12 /* Common */ | ||
| 36 | #define __NR_chown 13 /* Common */ | ||
| 37 | #define __NR_mknod 14 /* Common */ | ||
| 38 | #define __NR_chmod 15 /* Common */ | ||
| 39 | #define __NR_lchown 16 /* Common */ | ||
| 40 | #define __NR_brk 17 /* Common */ | ||
| 41 | #define __NR_perfctr 18 /* Performance counter operations */ | ||
| 42 | #define __NR_lseek 19 /* Common */ | ||
| 43 | #define __NR_getpid 20 /* Common */ | ||
| 44 | #define __NR_capget 21 /* Linux Specific */ | ||
| 45 | #define __NR_capset 22 /* Linux Specific */ | ||
| 46 | #define __NR_setuid 23 /* Implemented via setreuid in SunOS */ | ||
| 47 | #define __NR_getuid 24 /* Common */ | ||
| 48 | #define __NR_vmsplice 25 /* ENOSYS under SunOS */ | ||
| 49 | #define __NR_ptrace 26 /* Common */ | ||
| 50 | #define __NR_alarm 27 /* Implemented via setitimer in SunOS */ | ||
| 51 | #define __NR_sigaltstack 28 /* Common */ | ||
| 52 | #define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */ | ||
| 53 | #define __NR_utime 30 /* Implemented via utimes() under SunOS */ | ||
| 54 | #ifdef __32bit_syscall_numbers__ | ||
| 55 | #define __NR_lchown32 31 /* Linux sparc32 specific */ | ||
| 56 | #define __NR_fchown32 32 /* Linux sparc32 specific */ | ||
| 57 | #endif | ||
| 58 | #define __NR_access 33 /* Common */ | ||
| 59 | #define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */ | ||
| 60 | #ifdef __32bit_syscall_numbers__ | ||
| 61 | #define __NR_chown32 35 /* Linux sparc32 specific */ | ||
| 62 | #endif | ||
| 63 | #define __NR_sync 36 /* Common */ | ||
| 64 | #define __NR_kill 37 /* Common */ | ||
| 65 | #define __NR_stat 38 /* Common */ | ||
| 66 | #define __NR_sendfile 39 /* Linux Specific */ | ||
| 67 | #define __NR_lstat 40 /* Common */ | ||
| 68 | #define __NR_dup 41 /* Common */ | ||
| 69 | #define __NR_pipe 42 /* Common */ | ||
| 70 | #define __NR_times 43 /* Implemented via getrusage() in SunOS */ | ||
| 71 | #ifdef __32bit_syscall_numbers__ | ||
| 72 | #define __NR_getuid32 44 /* Linux sparc32 specific */ | ||
| 73 | #endif | ||
| 74 | #define __NR_umount2 45 /* Linux Specific */ | ||
| 75 | #define __NR_setgid 46 /* Implemented via setregid() in SunOS */ | ||
| 76 | #define __NR_getgid 47 /* Common */ | ||
| 77 | #define __NR_signal 48 /* Implemented via sigvec() in SunOS */ | ||
| 78 | #define __NR_geteuid 49 /* SunOS calls getuid() */ | ||
| 79 | #define __NR_getegid 50 /* SunOS calls getgid() */ | ||
| 80 | #define __NR_acct 51 /* Common */ | ||
| 81 | #ifdef __32bit_syscall_numbers__ | ||
| 82 | #define __NR_getgid32 53 /* Linux sparc32 specific */ | ||
| 83 | #else | ||
| 84 | #define __NR_memory_ordering 52 /* Linux Specific */ | ||
| 85 | #endif | ||
| 86 | #define __NR_ioctl 54 /* Common */ | ||
| 87 | #define __NR_reboot 55 /* Common */ | ||
| 88 | #ifdef __32bit_syscall_numbers__ | ||
| 89 | #define __NR_mmap2 56 /* Linux sparc32 Specific */ | ||
| 90 | #endif | ||
| 91 | #define __NR_symlink 57 /* Common */ | ||
| 92 | #define __NR_readlink 58 /* Common */ | ||
| 93 | #define __NR_execve 59 /* Common */ | ||
| 94 | #define __NR_umask 60 /* Common */ | ||
| 95 | #define __NR_chroot 61 /* Common */ | ||
| 96 | #define __NR_fstat 62 /* Common */ | ||
| 97 | #define __NR_fstat64 63 /* Linux Specific */ | ||
| 98 | #define __NR_getpagesize 64 /* Common */ | ||
| 99 | #define __NR_msync 65 /* Common in newer 1.3.x revs... */ | ||
| 100 | #define __NR_vfork 66 /* Common */ | ||
| 101 | #define __NR_pread64 67 /* Linux Specific */ | ||
| 102 | #define __NR_pwrite64 68 /* Linux Specific */ | ||
| 103 | #ifdef __32bit_syscall_numbers__ | ||
| 104 | #define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */ | ||
| 105 | #define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */ | ||
| 106 | #endif | ||
| 107 | #define __NR_mmap 71 /* Common */ | ||
| 108 | #ifdef __32bit_syscall_numbers__ | ||
| 109 | #define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */ | ||
| 110 | #endif | ||
| 111 | #define __NR_munmap 73 /* Common */ | ||
| 112 | #define __NR_mprotect 74 /* Common */ | ||
| 113 | #define __NR_madvise 75 /* Common */ | ||
| 114 | #define __NR_vhangup 76 /* Common */ | ||
| 115 | #ifdef __32bit_syscall_numbers__ | ||
| 116 | #define __NR_truncate64 77 /* Linux sparc32 Specific */ | ||
| 117 | #endif | ||
| 118 | #define __NR_mincore 78 /* Common */ | ||
| 119 | #define __NR_getgroups 79 /* Common */ | ||
| 120 | #define __NR_setgroups 80 /* Common */ | ||
| 121 | #define __NR_getpgrp 81 /* Common */ | ||
| 122 | #ifdef __32bit_syscall_numbers__ | ||
| 123 | #define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */ | ||
| 124 | #endif | ||
| 125 | #define __NR_setitimer 83 /* Common */ | ||
| 126 | #ifdef __32bit_syscall_numbers__ | ||
| 127 | #define __NR_ftruncate64 84 /* Linux sparc32 Specific */ | ||
| 128 | #endif | ||
| 129 | #define __NR_swapon 85 /* Common */ | ||
| 130 | #define __NR_getitimer 86 /* Common */ | ||
| 131 | #ifdef __32bit_syscall_numbers__ | ||
| 132 | #define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */ | ||
| 133 | #endif | ||
| 134 | #define __NR_sethostname 88 /* Common */ | ||
| 135 | #ifdef __32bit_syscall_numbers__ | ||
| 136 | #define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */ | ||
| 137 | #endif | ||
| 138 | #define __NR_dup2 90 /* Common */ | ||
| 139 | #ifdef __32bit_syscall_numbers__ | ||
| 140 | #define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */ | ||
| 141 | #endif | ||
| 142 | #define __NR_fcntl 92 /* Common */ | ||
| 143 | #define __NR_select 93 /* Common */ | ||
| 144 | #ifdef __32bit_syscall_numbers__ | ||
| 145 | #define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */ | ||
| 146 | #endif | ||
| 147 | #define __NR_fsync 95 /* Common */ | ||
| 148 | #define __NR_setpriority 96 /* Common */ | ||
| 149 | #define __NR_socket 97 /* Common */ | ||
| 150 | #define __NR_connect 98 /* Common */ | ||
| 151 | #define __NR_accept 99 /* Common */ | ||
| 152 | #define __NR_getpriority 100 /* Common */ | ||
| 153 | #define __NR_rt_sigreturn 101 /* Linux Specific */ | ||
| 154 | #define __NR_rt_sigaction 102 /* Linux Specific */ | ||
| 155 | #define __NR_rt_sigprocmask 103 /* Linux Specific */ | ||
| 156 | #define __NR_rt_sigpending 104 /* Linux Specific */ | ||
| 157 | #define __NR_rt_sigtimedwait 105 /* Linux Specific */ | ||
| 158 | #define __NR_rt_sigqueueinfo 106 /* Linux Specific */ | ||
| 159 | #define __NR_rt_sigsuspend 107 /* Linux Specific */ | ||
| 160 | #ifdef __32bit_syscall_numbers__ | ||
| 161 | #define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */ | ||
| 162 | #define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */ | ||
| 163 | #define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */ | ||
| 164 | #define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */ | ||
| 165 | #define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */ | ||
| 166 | #else | ||
| 167 | #define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */ | ||
| 168 | #define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */ | ||
| 169 | #define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */ | ||
| 170 | #define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */ | ||
| 171 | #endif | ||
| 172 | #define __NR_recvmsg 113 /* Common */ | ||
| 173 | #define __NR_sendmsg 114 /* Common */ | ||
| 174 | #ifdef __32bit_syscall_numbers__ | ||
| 175 | #define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */ | ||
| 176 | #endif | ||
| 177 | #define __NR_gettimeofday 116 /* Common */ | ||
| 178 | #define __NR_getrusage 117 /* Common */ | ||
| 179 | #define __NR_getsockopt 118 /* Common */ | ||
| 180 | #define __NR_getcwd 119 /* Linux Specific */ | ||
| 181 | #define __NR_readv 120 /* Common */ | ||
| 182 | #define __NR_writev 121 /* Common */ | ||
| 183 | #define __NR_settimeofday 122 /* Common */ | ||
| 184 | #define __NR_fchown 123 /* Common */ | ||
| 185 | #define __NR_fchmod 124 /* Common */ | ||
| 186 | #define __NR_recvfrom 125 /* Common */ | ||
| 187 | #define __NR_setreuid 126 /* Common */ | ||
| 188 | #define __NR_setregid 127 /* Common */ | ||
| 189 | #define __NR_rename 128 /* Common */ | ||
| 190 | #define __NR_truncate 129 /* Common */ | ||
| 191 | #define __NR_ftruncate 130 /* Common */ | ||
| 192 | #define __NR_flock 131 /* Common */ | ||
| 193 | #define __NR_lstat64 132 /* Linux Specific */ | ||
| 194 | #define __NR_sendto 133 /* Common */ | ||
| 195 | #define __NR_shutdown 134 /* Common */ | ||
| 196 | #define __NR_socketpair 135 /* Common */ | ||
| 197 | #define __NR_mkdir 136 /* Common */ | ||
| 198 | #define __NR_rmdir 137 /* Common */ | ||
| 199 | #define __NR_utimes 138 /* SunOS Specific */ | ||
| 200 | #define __NR_stat64 139 /* Linux Specific */ | ||
| 201 | #define __NR_sendfile64 140 /* adjtime under SunOS */ | ||
| 202 | #define __NR_getpeername 141 /* Common */ | ||
| 203 | #define __NR_futex 142 /* gethostid under SunOS */ | ||
| 204 | #define __NR_gettid 143 /* ENOSYS under SunOS */ | ||
| 205 | #define __NR_getrlimit 144 /* Common */ | ||
| 206 | #define __NR_setrlimit 145 /* Common */ | ||
| 207 | #define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */ | ||
| 208 | #define __NR_prctl 147 /* ENOSYS under SunOS */ | ||
| 209 | #define __NR_pciconfig_read 148 /* ENOSYS under SunOS */ | ||
| 210 | #define __NR_pciconfig_write 149 /* ENOSYS under SunOS */ | ||
| 211 | #define __NR_getsockname 150 /* Common */ | ||
| 212 | #define __NR_inotify_init 151 /* Linux specific */ | ||
| 213 | #define __NR_inotify_add_watch 152 /* Linux specific */ | ||
| 214 | #define __NR_poll 153 /* Common */ | ||
| 215 | #define __NR_getdents64 154 /* Linux specific */ | ||
| 216 | #ifdef __32bit_syscall_numbers__ | ||
| 217 | #define __NR_fcntl64 155 /* Linux sparc32 Specific */ | ||
| 218 | #endif | ||
| 219 | #define __NR_inotify_rm_watch 156 /* Linux specific */ | ||
| 220 | #define __NR_statfs 157 /* Common */ | ||
| 221 | #define __NR_fstatfs 158 /* Common */ | ||
| 222 | #define __NR_umount 159 /* Common */ | ||
| 223 | #define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */ | ||
| 224 | #define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */ | ||
| 225 | #define __NR_getdomainname 162 /* SunOS Specific */ | ||
| 226 | #define __NR_setdomainname 163 /* Common */ | ||
| 227 | #ifndef __32bit_syscall_numbers__ | ||
| 228 | #define __NR_utrap_install 164 /* SYSV ABI/v9 required */ | ||
| 229 | #endif | ||
| 230 | #define __NR_quotactl 165 /* Common */ | ||
| 231 | #define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */ | ||
| 232 | #define __NR_mount 167 /* Common */ | ||
| 233 | #define __NR_ustat 168 /* Common */ | ||
| 234 | #define __NR_setxattr 169 /* SunOS: semsys */ | ||
| 235 | #define __NR_lsetxattr 170 /* SunOS: msgsys */ | ||
| 236 | #define __NR_fsetxattr 171 /* SunOS: shmsys */ | ||
| 237 | #define __NR_getxattr 172 /* SunOS: auditsys */ | ||
| 238 | #define __NR_lgetxattr 173 /* SunOS: rfssys */ | ||
| 239 | #define __NR_getdents 174 /* Common */ | ||
| 240 | #define __NR_setsid 175 /* Common */ | ||
| 241 | #define __NR_fchdir 176 /* Common */ | ||
| 242 | #define __NR_fgetxattr 177 /* SunOS: fchroot */ | ||
| 243 | #define __NR_listxattr 178 /* SunOS: vpixsys */ | ||
| 244 | #define __NR_llistxattr 179 /* SunOS: aioread */ | ||
| 245 | #define __NR_flistxattr 180 /* SunOS: aiowrite */ | ||
| 246 | #define __NR_removexattr 181 /* SunOS: aiowait */ | ||
| 247 | #define __NR_lremovexattr 182 /* SunOS: aiocancel */ | ||
| 248 | #define __NR_sigpending 183 /* Common */ | ||
| 249 | #define __NR_query_module 184 /* Linux Specific */ | ||
| 250 | #define __NR_setpgid 185 /* Common */ | ||
| 251 | #define __NR_fremovexattr 186 /* SunOS: pathconf */ | ||
| 252 | #define __NR_tkill 187 /* SunOS: fpathconf */ | ||
| 253 | #define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */ | ||
| 254 | #define __NR_uname 189 /* Linux Specific */ | ||
| 255 | #define __NR_init_module 190 /* Linux Specific */ | ||
| 256 | #define __NR_personality 191 /* Linux Specific */ | ||
| 257 | #define __NR_remap_file_pages 192 /* Linux Specific */ | ||
| 258 | #define __NR_epoll_create 193 /* Linux Specific */ | ||
| 259 | #define __NR_epoll_ctl 194 /* Linux Specific */ | ||
| 260 | #define __NR_epoll_wait 195 /* Linux Specific */ | ||
| 261 | #define __NR_ioprio_set 196 /* Linux Specific */ | ||
| 262 | #define __NR_getppid 197 /* Linux Specific */ | ||
| 263 | #define __NR_sigaction 198 /* Linux Specific */ | ||
| 264 | #define __NR_sgetmask 199 /* Linux Specific */ | ||
| 265 | #define __NR_ssetmask 200 /* Linux Specific */ | ||
| 266 | #define __NR_sigsuspend 201 /* Linux Specific */ | ||
| 267 | #define __NR_oldlstat 202 /* Linux Specific */ | ||
| 268 | #define __NR_uselib 203 /* Linux Specific */ | ||
| 269 | #define __NR_readdir 204 /* Linux Specific */ | ||
| 270 | #define __NR_readahead 205 /* Linux Specific */ | ||
| 271 | #define __NR_socketcall 206 /* Linux Specific */ | ||
| 272 | #define __NR_syslog 207 /* Linux Specific */ | ||
| 273 | #define __NR_lookup_dcookie 208 /* Linux Specific */ | ||
| 274 | #define __NR_fadvise64 209 /* Linux Specific */ | ||
| 275 | #define __NR_fadvise64_64 210 /* Linux Specific */ | ||
| 276 | #define __NR_tgkill 211 /* Linux Specific */ | ||
| 277 | #define __NR_waitpid 212 /* Linux Specific */ | ||
| 278 | #define __NR_swapoff 213 /* Linux Specific */ | ||
| 279 | #define __NR_sysinfo 214 /* Linux Specific */ | ||
| 280 | #define __NR_ipc 215 /* Linux Specific */ | ||
| 281 | #define __NR_sigreturn 216 /* Linux Specific */ | ||
| 282 | #define __NR_clone 217 /* Linux Specific */ | ||
| 283 | #define __NR_ioprio_get 218 /* Linux Specific */ | ||
| 284 | #define __NR_adjtimex 219 /* Linux Specific */ | ||
| 285 | #define __NR_sigprocmask 220 /* Linux Specific */ | ||
| 286 | #define __NR_create_module 221 /* Linux Specific */ | ||
| 287 | #define __NR_delete_module 222 /* Linux Specific */ | ||
| 288 | #define __NR_get_kernel_syms 223 /* Linux Specific */ | ||
| 289 | #define __NR_getpgid 224 /* Linux Specific */ | ||
| 290 | #define __NR_bdflush 225 /* Linux Specific */ | ||
| 291 | #define __NR_sysfs 226 /* Linux Specific */ | ||
| 292 | #define __NR_afs_syscall 227 /* Linux Specific */ | ||
| 293 | #define __NR_setfsuid 228 /* Linux Specific */ | ||
| 294 | #define __NR_setfsgid 229 /* Linux Specific */ | ||
| 295 | #define __NR__newselect 230 /* Linux Specific */ | ||
| 296 | #ifdef __32bit_syscall_numbers__ | ||
| 297 | #define __NR_time 231 /* Linux Specific */ | ||
| 298 | #else | ||
| 299 | #endif | ||
| 300 | #define __NR_splice 232 /* Linux Specific */ | ||
| 301 | #define __NR_stime 233 /* Linux Specific */ | ||
| 302 | #define __NR_statfs64 234 /* Linux Specific */ | ||
| 303 | #define __NR_fstatfs64 235 /* Linux Specific */ | ||
| 304 | #define __NR__llseek 236 /* Linux Specific */ | ||
| 305 | #define __NR_mlock 237 | ||
| 306 | #define __NR_munlock 238 | ||
| 307 | #define __NR_mlockall 239 | ||
| 308 | #define __NR_munlockall 240 | ||
| 309 | #define __NR_sched_setparam 241 | ||
| 310 | #define __NR_sched_getparam 242 | ||
| 311 | #define __NR_sched_setscheduler 243 | ||
| 312 | #define __NR_sched_getscheduler 244 | ||
| 313 | #define __NR_sched_yield 245 | ||
| 314 | #define __NR_sched_get_priority_max 246 | ||
| 315 | #define __NR_sched_get_priority_min 247 | ||
| 316 | #define __NR_sched_rr_get_interval 248 | ||
| 317 | #define __NR_nanosleep 249 | ||
| 318 | #define __NR_mremap 250 | ||
| 319 | #define __NR__sysctl 251 | ||
| 320 | #define __NR_getsid 252 | ||
| 321 | #define __NR_fdatasync 253 | ||
| 322 | #define __NR_nfsservctl 254 | ||
| 323 | #define __NR_sync_file_range 255 | ||
| 324 | #define __NR_clock_settime 256 | ||
| 325 | #define __NR_clock_gettime 257 | ||
| 326 | #define __NR_clock_getres 258 | ||
| 327 | #define __NR_clock_nanosleep 259 | ||
| 328 | #define __NR_sched_getaffinity 260 | ||
| 329 | #define __NR_sched_setaffinity 261 | ||
| 330 | #define __NR_timer_settime 262 | ||
| 331 | #define __NR_timer_gettime 263 | ||
| 332 | #define __NR_timer_getoverrun 264 | ||
| 333 | #define __NR_timer_delete 265 | ||
| 334 | #define __NR_timer_create 266 | ||
| 335 | /* #define __NR_vserver 267 Reserved for VSERVER */ | ||
| 336 | #define __NR_io_setup 268 | ||
| 337 | #define __NR_io_destroy 269 | ||
| 338 | #define __NR_io_submit 270 | ||
| 339 | #define __NR_io_cancel 271 | ||
| 340 | #define __NR_io_getevents 272 | ||
| 341 | #define __NR_mq_open 273 | ||
| 342 | #define __NR_mq_unlink 274 | ||
| 343 | #define __NR_mq_timedsend 275 | ||
| 344 | #define __NR_mq_timedreceive 276 | ||
| 345 | #define __NR_mq_notify 277 | ||
| 346 | #define __NR_mq_getsetattr 278 | ||
| 347 | #define __NR_waitid 279 | ||
| 348 | #define __NR_tee 280 | ||
| 349 | #define __NR_add_key 281 | ||
| 350 | #define __NR_request_key 282 | ||
| 351 | #define __NR_keyctl 283 | ||
| 352 | #define __NR_openat 284 | ||
| 353 | #define __NR_mkdirat 285 | ||
| 354 | #define __NR_mknodat 286 | ||
| 355 | #define __NR_fchownat 287 | ||
| 356 | #define __NR_futimesat 288 | ||
| 357 | #define __NR_fstatat64 289 | ||
| 358 | #define __NR_unlinkat 290 | ||
| 359 | #define __NR_renameat 291 | ||
| 360 | #define __NR_linkat 292 | ||
| 361 | #define __NR_symlinkat 293 | ||
| 362 | #define __NR_readlinkat 294 | ||
| 363 | #define __NR_fchmodat 295 | ||
| 364 | #define __NR_faccessat 296 | ||
| 365 | #define __NR_pselect6 297 | ||
| 366 | #define __NR_ppoll 298 | ||
| 367 | #define __NR_unshare 299 | ||
| 368 | #define __NR_set_robust_list 300 | ||
| 369 | #define __NR_get_robust_list 301 | ||
| 370 | #define __NR_migrate_pages 302 | ||
| 371 | #define __NR_mbind 303 | ||
| 372 | #define __NR_get_mempolicy 304 | ||
| 373 | #define __NR_set_mempolicy 305 | ||
| 374 | #define __NR_kexec_load 306 | ||
| 375 | #define __NR_move_pages 307 | ||
| 376 | #define __NR_getcpu 308 | ||
| 377 | #define __NR_epoll_pwait 309 | ||
| 378 | #define __NR_utimensat 310 | ||
| 379 | #define __NR_signalfd 311 | ||
| 380 | #define __NR_timerfd_create 312 | ||
| 381 | #define __NR_eventfd 313 | ||
| 382 | #define __NR_fallocate 314 | ||
| 383 | #define __NR_timerfd_settime 315 | ||
| 384 | #define __NR_timerfd_gettime 316 | ||
| 385 | #define __NR_signalfd4 317 | ||
| 386 | #define __NR_eventfd2 318 | ||
| 387 | #define __NR_epoll_create1 319 | ||
| 388 | #define __NR_dup3 320 | ||
| 389 | #define __NR_pipe2 321 | ||
| 390 | #define __NR_inotify_init1 322 | ||
| 391 | #define __NR_accept4 323 | ||
| 392 | #define __NR_preadv 324 | ||
| 393 | #define __NR_pwritev 325 | ||
| 394 | #define __NR_rt_tgsigqueueinfo 326 | ||
| 395 | #define __NR_perf_event_open 327 | ||
| 396 | #define __NR_recvmmsg 328 | ||
| 397 | #define __NR_fanotify_init 329 | ||
| 398 | #define __NR_fanotify_mark 330 | ||
| 399 | #define __NR_prlimit64 331 | ||
| 400 | #define __NR_name_to_handle_at 332 | ||
| 401 | #define __NR_open_by_handle_at 333 | ||
| 402 | #define __NR_clock_adjtime 334 | ||
| 403 | #define __NR_syncfs 335 | ||
| 404 | #define __NR_sendmmsg 336 | ||
| 405 | #define __NR_setns 337 | ||
| 406 | #define __NR_process_vm_readv 338 | ||
| 407 | #define __NR_process_vm_writev 339 | ||
| 408 | |||
| 409 | #define NR_syscalls 340 | ||
| 410 | |||
| 411 | #ifdef __32bit_syscall_numbers__ | ||
| 412 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | ||
| 413 | * it never had the plain ones and there is no value to adding those | ||
| 414 | * old versions into the syscall table. | ||
| 415 | */ | ||
| 416 | #define __IGNORE_setresuid | ||
| 417 | #define __IGNORE_getresuid | ||
| 418 | #define __IGNORE_setresgid | ||
| 419 | #define __IGNORE_getresgid | ||
| 420 | #endif | ||
| 421 | |||
| 422 | #endif /* _UAPI_SPARC_UNISTD_H */ | ||
diff --git a/arch/sparc/include/asm/utrap.h b/arch/sparc/include/uapi/asm/utrap.h index b10e527c22d9..b10e527c22d9 100644 --- a/arch/sparc/include/asm/utrap.h +++ b/arch/sparc/include/uapi/asm/utrap.h | |||
diff --git a/arch/sparc/include/asm/watchdog.h b/arch/sparc/include/uapi/asm/watchdog.h index 5baf2d3919cf..5baf2d3919cf 100644 --- a/arch/sparc/include/asm/watchdog.h +++ b/arch/sparc/include/uapi/asm/watchdog.h | |||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d06ea2950dd9..677cd6e4e1a1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -208,6 +208,16 @@ config SIRF_DMA | |||
| 208 | help | 208 | help |
| 209 | Enable support for the CSR SiRFprimaII DMA engine. | 209 | Enable support for the CSR SiRFprimaII DMA engine. |
| 210 | 210 | ||
| 211 | config TI_EDMA | ||
| 212 | tristate "TI EDMA support" | ||
| 213 | depends on ARCH_DAVINCI | ||
| 214 | select DMA_ENGINE | ||
| 215 | select DMA_VIRTUAL_CHANNELS | ||
| 216 | default n | ||
| 217 | help | ||
| 218 | Enable support for the TI EDMA controller. This DMA | ||
| 219 | engine is found on TI DaVinci and AM33xx parts. | ||
| 220 | |||
| 211 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 221 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
| 212 | bool | 222 | bool |
| 213 | 223 | ||
| @@ -292,6 +302,13 @@ config DMA_OMAP | |||
| 292 | select DMA_ENGINE | 302 | select DMA_ENGINE |
| 293 | select DMA_VIRTUAL_CHANNELS | 303 | select DMA_VIRTUAL_CHANNELS |
| 294 | 304 | ||
| 305 | config MMP_PDMA | ||
| 306 | bool "MMP PDMA support" | ||
| 307 | depends on (ARCH_MMP || ARCH_PXA) | ||
| 308 | select DMA_ENGINE | ||
| 309 | help | ||
| 310 | Support the MMP PDMA engine for PXA and MMP platfrom. | ||
| 311 | |||
| 295 | config DMA_ENGINE | 312 | config DMA_ENGINE |
| 296 | bool | 313 | bool |
| 297 | 314 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 4cf6b128ab9a..7428feaa8705 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o | |||
| 23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
| 24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
| 25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
| 26 | obj-$(CONFIG_TI_EDMA) += edma.o | ||
| 26 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
| 27 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 28 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
| 28 | obj-$(CONFIG_PL330_DMA) += pl330.o | 29 | obj-$(CONFIG_PL330_DMA) += pl330.o |
| @@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | |||
| 32 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 33 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
| 33 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | 34 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o |
| 34 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | 35 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o |
| 36 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6fbeebb9486f..d1cc5791476b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 1892 | pl08x->pd = dev_get_platdata(&adev->dev); | 1892 | pl08x->pd = dev_get_platdata(&adev->dev); |
| 1893 | if (!pl08x->pd) { | 1893 | if (!pl08x->pd) { |
| 1894 | dev_err(&adev->dev, "no platform data supplied\n"); | 1894 | dev_err(&adev->dev, "no platform data supplied\n"); |
| 1895 | ret = -EINVAL; | ||
| 1895 | goto out_no_platdata; | 1896 | goto out_no_platdata; |
| 1896 | } | 1897 | } |
| 1897 | 1898 | ||
| @@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 1943 | dev_err(&adev->dev, "%s failed to allocate " | 1944 | dev_err(&adev->dev, "%s failed to allocate " |
| 1944 | "physical channel holders\n", | 1945 | "physical channel holders\n", |
| 1945 | __func__); | 1946 | __func__); |
| 1947 | ret = -ENOMEM; | ||
| 1946 | goto out_no_phychans; | 1948 | goto out_no_phychans; |
| 1947 | } | 1949 | } |
| 1948 | 1950 | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index d3c5a5a88f1e..c4b0eb3cde81 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
| @@ -36,12 +36,22 @@ | |||
| 36 | * which does not support descriptor writeback. | 36 | * which does not support descriptor writeback. |
| 37 | */ | 37 | */ |
| 38 | 38 | ||
| 39 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
| 40 | { | ||
| 41 | return slave ? slave->dst_master : 0; | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
| 45 | { | ||
| 46 | return slave ? slave->src_master : 1; | ||
| 47 | } | ||
| 48 | |||
| 39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 49 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
| 40 | struct dw_dma_slave *__slave = (_chan->private); \ | 50 | struct dw_dma_slave *__slave = (_chan->private); \ |
| 41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 51 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
| 42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 52 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
| 43 | int _dms = __slave ? __slave->dst_master : 0; \ | 53 | int _dms = dwc_get_dms(__slave); \ |
| 44 | int _sms = __slave ? __slave->src_master : 1; \ | 54 | int _sms = dwc_get_sms(__slave); \ |
| 45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | 55 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ |
| 46 | DW_DMA_MSIZE_16; \ | 56 | DW_DMA_MSIZE_16; \ |
| 47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | 57 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ |
| @@ -56,16 +66,6 @@ | |||
| 56 | }) | 66 | }) |
| 57 | 67 | ||
| 58 | /* | 68 | /* |
| 59 | * This is configuration-dependent and usually a funny size like 4095. | ||
| 60 | * | ||
| 61 | * Note that this is a transfer count, i.e. if we transfer 32-bit | ||
| 62 | * words, we can do 16380 bytes per descriptor. | ||
| 63 | * | ||
| 64 | * This parameter is also system-specific. | ||
| 65 | */ | ||
| 66 | #define DWC_MAX_COUNT 4095U | ||
| 67 | |||
| 68 | /* | ||
| 69 | * Number of descriptors to allocate for each channel. This should be | 69 | * Number of descriptors to allocate for each channel. This should be |
| 70 | * made configurable somehow; preferably, the clients (at least the | 70 | * made configurable somehow; preferably, the clients (at least the |
| 71 | * ones using slave transfers) should be able to give us a hint. | 71 | * ones using slave transfers) should be able to give us a hint. |
| @@ -177,6 +177,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
| 177 | 177 | ||
| 178 | cfghi = dws->cfg_hi; | 178 | cfghi = dws->cfg_hi; |
| 179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
| 180 | } else { | ||
| 181 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | ||
| 182 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | ||
| 183 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | ||
| 184 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | ||
| 180 | } | 185 | } |
| 181 | 186 | ||
| 182 | channel_writel(dwc, CFG_LO, cfglo); | 187 | channel_writel(dwc, CFG_LO, cfglo); |
| @@ -206,7 +211,7 @@ static inline unsigned int dwc_fast_fls(unsigned long long v) | |||
| 206 | return 0; | 211 | return 0; |
| 207 | } | 212 | } |
| 208 | 213 | ||
| 209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 214 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
| 210 | { | 215 | { |
| 211 | dev_err(chan2dev(&dwc->chan), | 216 | dev_err(chan2dev(&dwc->chan), |
| 212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 217 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
| @@ -227,10 +232,29 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 227 | 232 | ||
| 228 | /*----------------------------------------------------------------------*/ | 233 | /*----------------------------------------------------------------------*/ |
| 229 | 234 | ||
| 235 | /* Perform single block transfer */ | ||
| 236 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | ||
| 237 | struct dw_desc *desc) | ||
| 238 | { | ||
| 239 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 240 | u32 ctllo; | ||
| 241 | |||
| 242 | /* Software emulation of LLP mode relies on interrupts to continue | ||
| 243 | * multi block transfer. */ | ||
| 244 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | ||
| 245 | |||
| 246 | channel_writel(dwc, SAR, desc->lli.sar); | ||
| 247 | channel_writel(dwc, DAR, desc->lli.dar); | ||
| 248 | channel_writel(dwc, CTL_LO, ctllo); | ||
| 249 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | ||
| 250 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
| 251 | } | ||
| 252 | |||
| 230 | /* Called with dwc->lock held and bh disabled */ | 253 | /* Called with dwc->lock held and bh disabled */ |
| 231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 254 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
| 232 | { | 255 | { |
| 233 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 256 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
| 257 | unsigned long was_soft_llp; | ||
| 234 | 258 | ||
| 235 | /* ASSERT: channel is idle */ | 259 | /* ASSERT: channel is idle */ |
| 236 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 260 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
| @@ -242,6 +266,26 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
| 242 | return; | 266 | return; |
| 243 | } | 267 | } |
| 244 | 268 | ||
| 269 | if (dwc->nollp) { | ||
| 270 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | ||
| 271 | &dwc->flags); | ||
| 272 | if (was_soft_llp) { | ||
| 273 | dev_err(chan2dev(&dwc->chan), | ||
| 274 | "BUG: Attempted to start new LLP transfer " | ||
| 275 | "inside ongoing one\n"); | ||
| 276 | return; | ||
| 277 | } | ||
| 278 | |||
| 279 | dwc_initialize(dwc); | ||
| 280 | |||
| 281 | dwc->tx_list = &first->tx_list; | ||
| 282 | dwc->tx_node_active = first->tx_list.next; | ||
| 283 | |||
| 284 | dwc_do_single_block(dwc, first); | ||
| 285 | |||
| 286 | return; | ||
| 287 | } | ||
| 288 | |||
| 245 | dwc_initialize(dwc); | 289 | dwc_initialize(dwc); |
| 246 | 290 | ||
| 247 | channel_writel(dwc, LLP, first->txd.phys); | 291 | channel_writel(dwc, LLP, first->txd.phys); |
| @@ -553,8 +597,36 @@ static void dw_dma_tasklet(unsigned long data) | |||
| 553 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 597 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
| 554 | else if (status_err & (1 << i)) | 598 | else if (status_err & (1 << i)) |
| 555 | dwc_handle_error(dw, dwc); | 599 | dwc_handle_error(dw, dwc); |
| 556 | else if (status_xfer & (1 << i)) | 600 | else if (status_xfer & (1 << i)) { |
| 601 | unsigned long flags; | ||
| 602 | |||
| 603 | spin_lock_irqsave(&dwc->lock, flags); | ||
| 604 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
| 605 | if (dwc->tx_node_active != dwc->tx_list) { | ||
| 606 | struct dw_desc *desc = | ||
| 607 | list_entry(dwc->tx_node_active, | ||
| 608 | struct dw_desc, | ||
| 609 | desc_node); | ||
| 610 | |||
| 611 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
| 612 | |||
| 613 | /* move pointer to next descriptor */ | ||
| 614 | dwc->tx_node_active = | ||
| 615 | dwc->tx_node_active->next; | ||
| 616 | |||
| 617 | dwc_do_single_block(dwc, desc); | ||
| 618 | |||
| 619 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
| 620 | continue; | ||
| 621 | } else { | ||
| 622 | /* we are done here */ | ||
| 623 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
| 624 | } | ||
| 625 | } | ||
| 626 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
| 627 | |||
| 557 | dwc_scan_descriptors(dw, dwc); | 628 | dwc_scan_descriptors(dw, dwc); |
| 629 | } | ||
| 558 | } | 630 | } |
| 559 | 631 | ||
| 560 | /* | 632 | /* |
| @@ -636,6 +708,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 636 | size_t len, unsigned long flags) | 708 | size_t len, unsigned long flags) |
| 637 | { | 709 | { |
| 638 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 710 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
| 711 | struct dw_dma_slave *dws = chan->private; | ||
| 639 | struct dw_desc *desc; | 712 | struct dw_desc *desc; |
| 640 | struct dw_desc *first; | 713 | struct dw_desc *first; |
| 641 | struct dw_desc *prev; | 714 | struct dw_desc *prev; |
| @@ -643,6 +716,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 643 | size_t offset; | 716 | size_t offset; |
| 644 | unsigned int src_width; | 717 | unsigned int src_width; |
| 645 | unsigned int dst_width; | 718 | unsigned int dst_width; |
| 719 | unsigned int data_width; | ||
| 646 | u32 ctllo; | 720 | u32 ctllo; |
| 647 | 721 | ||
| 648 | dev_vdbg(chan2dev(chan), | 722 | dev_vdbg(chan2dev(chan), |
| @@ -655,7 +729,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 655 | return NULL; | 729 | return NULL; |
| 656 | } | 730 | } |
| 657 | 731 | ||
| 658 | src_width = dst_width = dwc_fast_fls(src | dest | len); | 732 | data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], |
| 733 | dwc->dw->data_width[dwc_get_dms(dws)]); | ||
| 734 | |||
| 735 | src_width = dst_width = min_t(unsigned int, data_width, | ||
| 736 | dwc_fast_fls(src | dest | len)); | ||
| 659 | 737 | ||
| 660 | ctllo = DWC_DEFAULT_CTLLO(chan) | 738 | ctllo = DWC_DEFAULT_CTLLO(chan) |
| 661 | | DWC_CTLL_DST_WIDTH(dst_width) | 739 | | DWC_CTLL_DST_WIDTH(dst_width) |
| @@ -667,7 +745,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 667 | 745 | ||
| 668 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 746 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
| 669 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 747 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
| 670 | DWC_MAX_COUNT); | 748 | dwc->block_size); |
| 671 | 749 | ||
| 672 | desc = dwc_desc_get(dwc); | 750 | desc = dwc_desc_get(dwc); |
| 673 | if (!desc) | 751 | if (!desc) |
| @@ -725,6 +803,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 725 | dma_addr_t reg; | 803 | dma_addr_t reg; |
| 726 | unsigned int reg_width; | 804 | unsigned int reg_width; |
| 727 | unsigned int mem_width; | 805 | unsigned int mem_width; |
| 806 | unsigned int data_width; | ||
| 728 | unsigned int i; | 807 | unsigned int i; |
| 729 | struct scatterlist *sg; | 808 | struct scatterlist *sg; |
| 730 | size_t total_len = 0; | 809 | size_t total_len = 0; |
| @@ -748,6 +827,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 748 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 827 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
| 749 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 828 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
| 750 | 829 | ||
| 830 | data_width = dwc->dw->data_width[dwc_get_sms(dws)]; | ||
| 831 | |||
| 751 | for_each_sg(sgl, sg, sg_len, i) { | 832 | for_each_sg(sgl, sg, sg_len, i) { |
| 752 | struct dw_desc *desc; | 833 | struct dw_desc *desc; |
| 753 | u32 len, dlen, mem; | 834 | u32 len, dlen, mem; |
| @@ -755,7 +836,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 755 | mem = sg_dma_address(sg); | 836 | mem = sg_dma_address(sg); |
| 756 | len = sg_dma_len(sg); | 837 | len = sg_dma_len(sg); |
| 757 | 838 | ||
| 758 | mem_width = dwc_fast_fls(mem | len); | 839 | mem_width = min_t(unsigned int, |
| 840 | data_width, dwc_fast_fls(mem | len)); | ||
| 759 | 841 | ||
| 760 | slave_sg_todev_fill_desc: | 842 | slave_sg_todev_fill_desc: |
| 761 | desc = dwc_desc_get(dwc); | 843 | desc = dwc_desc_get(dwc); |
| @@ -768,8 +850,8 @@ slave_sg_todev_fill_desc: | |||
| 768 | desc->lli.sar = mem; | 850 | desc->lli.sar = mem; |
| 769 | desc->lli.dar = reg; | 851 | desc->lli.dar = reg; |
| 770 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 852 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
| 771 | if ((len >> mem_width) > DWC_MAX_COUNT) { | 853 | if ((len >> mem_width) > dwc->block_size) { |
| 772 | dlen = DWC_MAX_COUNT << mem_width; | 854 | dlen = dwc->block_size << mem_width; |
| 773 | mem += dlen; | 855 | mem += dlen; |
| 774 | len -= dlen; | 856 | len -= dlen; |
| 775 | } else { | 857 | } else { |
| @@ -808,6 +890,8 @@ slave_sg_todev_fill_desc: | |||
| 808 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 890 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
| 809 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 891 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
| 810 | 892 | ||
| 893 | data_width = dwc->dw->data_width[dwc_get_dms(dws)]; | ||
| 894 | |||
| 811 | for_each_sg(sgl, sg, sg_len, i) { | 895 | for_each_sg(sgl, sg, sg_len, i) { |
| 812 | struct dw_desc *desc; | 896 | struct dw_desc *desc; |
| 813 | u32 len, dlen, mem; | 897 | u32 len, dlen, mem; |
| @@ -815,7 +899,8 @@ slave_sg_todev_fill_desc: | |||
| 815 | mem = sg_dma_address(sg); | 899 | mem = sg_dma_address(sg); |
| 816 | len = sg_dma_len(sg); | 900 | len = sg_dma_len(sg); |
| 817 | 901 | ||
| 818 | mem_width = dwc_fast_fls(mem | len); | 902 | mem_width = min_t(unsigned int, |
| 903 | data_width, dwc_fast_fls(mem | len)); | ||
| 819 | 904 | ||
| 820 | slave_sg_fromdev_fill_desc: | 905 | slave_sg_fromdev_fill_desc: |
| 821 | desc = dwc_desc_get(dwc); | 906 | desc = dwc_desc_get(dwc); |
| @@ -828,8 +913,8 @@ slave_sg_fromdev_fill_desc: | |||
| 828 | desc->lli.sar = reg; | 913 | desc->lli.sar = reg; |
| 829 | desc->lli.dar = mem; | 914 | desc->lli.dar = mem; |
| 830 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 915 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
| 831 | if ((len >> reg_width) > DWC_MAX_COUNT) { | 916 | if ((len >> reg_width) > dwc->block_size) { |
| 832 | dlen = DWC_MAX_COUNT << reg_width; | 917 | dlen = dwc->block_size << reg_width; |
| 833 | mem += dlen; | 918 | mem += dlen; |
| 834 | len -= dlen; | 919 | len -= dlen; |
| 835 | } else { | 920 | } else { |
| @@ -945,6 +1030,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 945 | } else if (cmd == DMA_TERMINATE_ALL) { | 1030 | } else if (cmd == DMA_TERMINATE_ALL) { |
| 946 | spin_lock_irqsave(&dwc->lock, flags); | 1031 | spin_lock_irqsave(&dwc->lock, flags); |
| 947 | 1032 | ||
| 1033 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
| 1034 | |||
| 948 | dwc_chan_disable(dw, dwc); | 1035 | dwc_chan_disable(dw, dwc); |
| 949 | 1036 | ||
| 950 | dwc->paused = false; | 1037 | dwc->paused = false; |
| @@ -1187,6 +1274,13 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
| 1187 | unsigned long flags; | 1274 | unsigned long flags; |
| 1188 | 1275 | ||
| 1189 | spin_lock_irqsave(&dwc->lock, flags); | 1276 | spin_lock_irqsave(&dwc->lock, flags); |
| 1277 | if (dwc->nollp) { | ||
| 1278 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
| 1279 | dev_dbg(chan2dev(&dwc->chan), | ||
| 1280 | "channel doesn't support LLP transfers\n"); | ||
| 1281 | return ERR_PTR(-EINVAL); | ||
| 1282 | } | ||
| 1283 | |||
| 1190 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1284 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
| 1191 | spin_unlock_irqrestore(&dwc->lock, flags); | 1285 | spin_unlock_irqrestore(&dwc->lock, flags); |
| 1192 | dev_dbg(chan2dev(&dwc->chan), | 1286 | dev_dbg(chan2dev(&dwc->chan), |
| @@ -1212,7 +1306,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
| 1212 | periods = buf_len / period_len; | 1306 | periods = buf_len / period_len; |
| 1213 | 1307 | ||
| 1214 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 1308 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
| 1215 | if (period_len > (DWC_MAX_COUNT << reg_width)) | 1309 | if (period_len > (dwc->block_size << reg_width)) |
| 1216 | goto out_err; | 1310 | goto out_err; |
| 1217 | if (unlikely(period_len & ((1 << reg_width) - 1))) | 1311 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
| 1218 | goto out_err; | 1312 | goto out_err; |
| @@ -1374,6 +1468,11 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1374 | struct resource *io; | 1468 | struct resource *io; |
| 1375 | struct dw_dma *dw; | 1469 | struct dw_dma *dw; |
| 1376 | size_t size; | 1470 | size_t size; |
| 1471 | void __iomem *regs; | ||
| 1472 | bool autocfg; | ||
| 1473 | unsigned int dw_params; | ||
| 1474 | unsigned int nr_channels; | ||
| 1475 | unsigned int max_blk_size = 0; | ||
| 1377 | int irq; | 1476 | int irq; |
| 1378 | int err; | 1477 | int err; |
| 1379 | int i; | 1478 | int i; |
| @@ -1390,32 +1489,46 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1390 | if (irq < 0) | 1489 | if (irq < 0) |
| 1391 | return irq; | 1490 | return irq; |
| 1392 | 1491 | ||
| 1393 | size = sizeof(struct dw_dma); | 1492 | regs = devm_request_and_ioremap(&pdev->dev, io); |
| 1394 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | 1493 | if (!regs) |
| 1395 | dw = kzalloc(size, GFP_KERNEL); | 1494 | return -EBUSY; |
| 1495 | |||
| 1496 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | ||
| 1497 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | ||
| 1498 | |||
| 1499 | if (autocfg) | ||
| 1500 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | ||
| 1501 | else | ||
| 1502 | nr_channels = pdata->nr_channels; | ||
| 1503 | |||
| 1504 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); | ||
| 1505 | dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
| 1396 | if (!dw) | 1506 | if (!dw) |
| 1397 | return -ENOMEM; | 1507 | return -ENOMEM; |
| 1398 | 1508 | ||
| 1399 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | 1509 | dw->clk = devm_clk_get(&pdev->dev, "hclk"); |
| 1400 | err = -EBUSY; | 1510 | if (IS_ERR(dw->clk)) |
| 1401 | goto err_kfree; | 1511 | return PTR_ERR(dw->clk); |
| 1402 | } | 1512 | clk_prepare_enable(dw->clk); |
| 1403 | 1513 | ||
| 1404 | dw->regs = ioremap(io->start, DW_REGLEN); | 1514 | dw->regs = regs; |
| 1405 | if (!dw->regs) { | 1515 | |
| 1406 | err = -ENOMEM; | 1516 | /* get hardware configuration parameters */ |
| 1407 | goto err_release_r; | 1517 | if (autocfg) { |
| 1408 | } | 1518 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); |
| 1409 | 1519 | ||
| 1410 | dw->clk = clk_get(&pdev->dev, "hclk"); | 1520 | dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; |
| 1411 | if (IS_ERR(dw->clk)) { | 1521 | for (i = 0; i < dw->nr_masters; i++) { |
| 1412 | err = PTR_ERR(dw->clk); | 1522 | dw->data_width[i] = |
| 1413 | goto err_clk; | 1523 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; |
| 1524 | } | ||
| 1525 | } else { | ||
| 1526 | dw->nr_masters = pdata->nr_masters; | ||
| 1527 | memcpy(dw->data_width, pdata->data_width, 4); | ||
| 1414 | } | 1528 | } |
| 1415 | clk_prepare_enable(dw->clk); | ||
| 1416 | 1529 | ||
| 1417 | /* Calculate all channel mask before DMA setup */ | 1530 | /* Calculate all channel mask before DMA setup */ |
| 1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1531 | dw->all_chan_mask = (1 << nr_channels) - 1; |
| 1419 | 1532 | ||
| 1420 | /* force dma off, just in case */ | 1533 | /* force dma off, just in case */ |
| 1421 | dw_dma_off(dw); | 1534 | dw_dma_off(dw); |
| @@ -1423,17 +1536,19 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1423 | /* disable BLOCK interrupts as well */ | 1536 | /* disable BLOCK interrupts as well */ |
| 1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1537 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
| 1425 | 1538 | ||
| 1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 1539 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, |
| 1540 | "dw_dmac", dw); | ||
| 1427 | if (err) | 1541 | if (err) |
| 1428 | goto err_irq; | 1542 | return err; |
| 1429 | 1543 | ||
| 1430 | platform_set_drvdata(pdev, dw); | 1544 | platform_set_drvdata(pdev, dw); |
| 1431 | 1545 | ||
| 1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1546 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
| 1433 | 1547 | ||
| 1434 | INIT_LIST_HEAD(&dw->dma.channels); | 1548 | INIT_LIST_HEAD(&dw->dma.channels); |
| 1435 | for (i = 0; i < pdata->nr_channels; i++) { | 1549 | for (i = 0; i < nr_channels; i++) { |
| 1436 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1550 | struct dw_dma_chan *dwc = &dw->chan[i]; |
| 1551 | int r = nr_channels - i - 1; | ||
| 1437 | 1552 | ||
| 1438 | dwc->chan.device = &dw->dma; | 1553 | dwc->chan.device = &dw->dma; |
| 1439 | dma_cookie_init(&dwc->chan); | 1554 | dma_cookie_init(&dwc->chan); |
| @@ -1445,7 +1560,7 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1445 | 1560 | ||
| 1446 | /* 7 is highest priority & 0 is lowest. */ | 1561 | /* 7 is highest priority & 0 is lowest. */ |
| 1447 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1562 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
| 1448 | dwc->priority = pdata->nr_channels - i - 1; | 1563 | dwc->priority = r; |
| 1449 | else | 1564 | else |
| 1450 | dwc->priority = i; | 1565 | dwc->priority = i; |
| 1451 | 1566 | ||
| @@ -1458,6 +1573,32 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1458 | INIT_LIST_HEAD(&dwc->free_list); | 1573 | INIT_LIST_HEAD(&dwc->free_list); |
| 1459 | 1574 | ||
| 1460 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1575 | channel_clear_bit(dw, CH_EN, dwc->mask); |
| 1576 | |||
| 1577 | dwc->dw = dw; | ||
| 1578 | |||
| 1579 | /* hardware configuration */ | ||
| 1580 | if (autocfg) { | ||
| 1581 | unsigned int dwc_params; | ||
| 1582 | |||
| 1583 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | ||
| 1584 | DWC_PARAMS); | ||
| 1585 | |||
| 1586 | /* Decode maximum block size for given channel. The | ||
| 1587 | * stored 4 bit value represents blocks from 0x00 for 3 | ||
| 1588 | * up to 0x0a for 4095. */ | ||
| 1589 | dwc->block_size = | ||
| 1590 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | ||
| 1591 | dwc->nollp = | ||
| 1592 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | ||
| 1593 | } else { | ||
| 1594 | dwc->block_size = pdata->block_size; | ||
| 1595 | |||
| 1596 | /* Check if channel supports multi block transfer */ | ||
| 1597 | channel_writel(dwc, LLP, 0xfffffffc); | ||
| 1598 | dwc->nollp = | ||
| 1599 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
| 1600 | channel_writel(dwc, LLP, 0); | ||
| 1601 | } | ||
| 1461 | } | 1602 | } |
| 1462 | 1603 | ||
| 1463 | /* Clear all interrupts on all channels. */ | 1604 | /* Clear all interrupts on all channels. */ |
| @@ -1486,35 +1627,21 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
| 1486 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1627 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
| 1487 | 1628 | ||
| 1488 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1629 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", |
| 1489 | dev_name(&pdev->dev), pdata->nr_channels); | 1630 | dev_name(&pdev->dev), nr_channels); |
| 1490 | 1631 | ||
| 1491 | dma_async_device_register(&dw->dma); | 1632 | dma_async_device_register(&dw->dma); |
| 1492 | 1633 | ||
| 1493 | return 0; | 1634 | return 0; |
| 1494 | |||
| 1495 | err_irq: | ||
| 1496 | clk_disable_unprepare(dw->clk); | ||
| 1497 | clk_put(dw->clk); | ||
| 1498 | err_clk: | ||
| 1499 | iounmap(dw->regs); | ||
| 1500 | dw->regs = NULL; | ||
| 1501 | err_release_r: | ||
| 1502 | release_resource(io); | ||
| 1503 | err_kfree: | ||
| 1504 | kfree(dw); | ||
| 1505 | return err; | ||
| 1506 | } | 1635 | } |
| 1507 | 1636 | ||
| 1508 | static int __devexit dw_remove(struct platform_device *pdev) | 1637 | static int __devexit dw_remove(struct platform_device *pdev) |
| 1509 | { | 1638 | { |
| 1510 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1639 | struct dw_dma *dw = platform_get_drvdata(pdev); |
| 1511 | struct dw_dma_chan *dwc, *_dwc; | 1640 | struct dw_dma_chan *dwc, *_dwc; |
| 1512 | struct resource *io; | ||
| 1513 | 1641 | ||
| 1514 | dw_dma_off(dw); | 1642 | dw_dma_off(dw); |
| 1515 | dma_async_device_unregister(&dw->dma); | 1643 | dma_async_device_unregister(&dw->dma); |
| 1516 | 1644 | ||
| 1517 | free_irq(platform_get_irq(pdev, 0), dw); | ||
| 1518 | tasklet_kill(&dw->tasklet); | 1645 | tasklet_kill(&dw->tasklet); |
| 1519 | 1646 | ||
| 1520 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 1647 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
| @@ -1523,17 +1650,6 @@ static int __devexit dw_remove(struct platform_device *pdev) | |||
| 1523 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1650 | channel_clear_bit(dw, CH_EN, dwc->mask); |
| 1524 | } | 1651 | } |
| 1525 | 1652 | ||
| 1526 | clk_disable_unprepare(dw->clk); | ||
| 1527 | clk_put(dw->clk); | ||
| 1528 | |||
| 1529 | iounmap(dw->regs); | ||
| 1530 | dw->regs = NULL; | ||
| 1531 | |||
| 1532 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1533 | release_mem_region(io->start, DW_REGLEN); | ||
| 1534 | |||
| 1535 | kfree(dw); | ||
| 1536 | |||
| 1537 | return 0; | 1653 | return 0; |
| 1538 | } | 1654 | } |
| 1539 | 1655 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 50830bee087a..ff39fa6cd2bc 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
| @@ -82,9 +82,39 @@ struct dw_dma_regs { | |||
| 82 | DW_REG(ID); | 82 | DW_REG(ID); |
| 83 | DW_REG(TEST); | 83 | DW_REG(TEST); |
| 84 | 84 | ||
| 85 | /* reserved */ | ||
| 86 | DW_REG(__reserved0); | ||
| 87 | DW_REG(__reserved1); | ||
| 88 | |||
| 85 | /* optional encoded params, 0x3c8..0x3f7 */ | 89 | /* optional encoded params, 0x3c8..0x3f7 */ |
| 90 | u32 __reserved; | ||
| 91 | |||
| 92 | /* per-channel configuration registers */ | ||
| 93 | u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; | ||
| 94 | u32 MULTI_BLK_TYPE; | ||
| 95 | u32 MAX_BLK_SIZE; | ||
| 96 | |||
| 97 | /* top-level parameters */ | ||
| 98 | u32 DW_PARAMS; | ||
| 86 | }; | 99 | }; |
| 87 | 100 | ||
| 101 | /* To access the registers in early stage of probe */ | ||
| 102 | #define dma_read_byaddr(addr, name) \ | ||
| 103 | readl((addr) + offsetof(struct dw_dma_regs, name)) | ||
| 104 | |||
| 105 | /* Bitfields in DW_PARAMS */ | ||
| 106 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ | ||
| 107 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ | ||
| 108 | #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) | ||
| 109 | #define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ | ||
| 110 | #define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ | ||
| 111 | #define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ | ||
| 112 | #define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ | ||
| 113 | #define DW_PARAMS_EN 28 /* encoded parameters */ | ||
| 114 | |||
| 115 | /* Bitfields in DWC_PARAMS */ | ||
| 116 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ | ||
| 117 | |||
| 88 | /* Bitfields in CTL_LO */ | 118 | /* Bitfields in CTL_LO */ |
| 89 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 119 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
| 90 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 120 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
| @@ -140,10 +170,9 @@ struct dw_dma_regs { | |||
| 140 | /* Bitfields in CFG */ | 170 | /* Bitfields in CFG */ |
| 141 | #define DW_CFG_DMA_EN (1 << 0) | 171 | #define DW_CFG_DMA_EN (1 << 0) |
| 142 | 172 | ||
| 143 | #define DW_REGLEN 0x400 | ||
| 144 | |||
| 145 | enum dw_dmac_flags { | 173 | enum dw_dmac_flags { |
| 146 | DW_DMA_IS_CYCLIC = 0, | 174 | DW_DMA_IS_CYCLIC = 0, |
| 175 | DW_DMA_IS_SOFT_LLP = 1, | ||
| 147 | }; | 176 | }; |
| 148 | 177 | ||
| 149 | struct dw_dma_chan { | 178 | struct dw_dma_chan { |
| @@ -154,6 +183,10 @@ struct dw_dma_chan { | |||
| 154 | bool paused; | 183 | bool paused; |
| 155 | bool initialized; | 184 | bool initialized; |
| 156 | 185 | ||
| 186 | /* software emulation of the LLP transfers */ | ||
| 187 | struct list_head *tx_list; | ||
| 188 | struct list_head *tx_node_active; | ||
| 189 | |||
| 157 | spinlock_t lock; | 190 | spinlock_t lock; |
| 158 | 191 | ||
| 159 | /* these other elements are all protected by lock */ | 192 | /* these other elements are all protected by lock */ |
| @@ -165,8 +198,15 @@ struct dw_dma_chan { | |||
| 165 | 198 | ||
| 166 | unsigned int descs_allocated; | 199 | unsigned int descs_allocated; |
| 167 | 200 | ||
| 201 | /* hardware configuration */ | ||
| 202 | unsigned int block_size; | ||
| 203 | bool nollp; | ||
| 204 | |||
| 168 | /* configuration passed via DMA_SLAVE_CONFIG */ | 205 | /* configuration passed via DMA_SLAVE_CONFIG */ |
| 169 | struct dma_slave_config dma_sconfig; | 206 | struct dma_slave_config dma_sconfig; |
| 207 | |||
| 208 | /* backlink to dw_dma */ | ||
| 209 | struct dw_dma *dw; | ||
| 170 | }; | 210 | }; |
| 171 | 211 | ||
| 172 | static inline struct dw_dma_chan_regs __iomem * | 212 | static inline struct dw_dma_chan_regs __iomem * |
| @@ -193,6 +233,10 @@ struct dw_dma { | |||
| 193 | 233 | ||
| 194 | u8 all_chan_mask; | 234 | u8 all_chan_mask; |
| 195 | 235 | ||
| 236 | /* hardware configuration */ | ||
| 237 | unsigned char nr_masters; | ||
| 238 | unsigned char data_width[4]; | ||
| 239 | |||
| 196 | struct dw_dma_chan chan[0]; | 240 | struct dw_dma_chan chan[0]; |
| 197 | }; | 241 | }; |
| 198 | 242 | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c new file mode 100644 index 000000000000..05aea3ce8506 --- /dev/null +++ b/drivers/dma/edma.c | |||
| @@ -0,0 +1,671 @@ | |||
| 1 | /* | ||
| 2 | * TI EDMA DMA engine driver | ||
| 3 | * | ||
| 4 | * Copyright 2012 Texas Instruments | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License as | ||
| 8 | * published by the Free Software Foundation version 2. | ||
| 9 | * | ||
| 10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 11 | * kind, whether express or implied; without even the implied warranty | ||
| 12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/dmaengine.h> | ||
| 17 | #include <linux/dma-mapping.h> | ||
| 18 | #include <linux/err.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/list.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/spinlock.h> | ||
| 26 | |||
| 27 | #include <mach/edma.h> | ||
| 28 | |||
| 29 | #include "dmaengine.h" | ||
| 30 | #include "virt-dma.h" | ||
| 31 | |||
| 32 | /* | ||
| 33 | * This will go away when the private EDMA API is folded | ||
| 34 | * into this driver and the platform device(s) are | ||
| 35 | * instantiated in the arch code. We can only get away | ||
| 36 | * with this simplification because DA8XX may not be built | ||
| 37 | * in the same kernel image with other DaVinci parts. This | ||
| 38 | * avoids having to sprinkle dmaengine driver platform devices | ||
| 39 | * and data throughout all the existing board files. | ||
| 40 | */ | ||
| 41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | ||
| 42 | #define EDMA_CTLRS 2 | ||
| 43 | #define EDMA_CHANS 32 | ||
| 44 | #else | ||
| 45 | #define EDMA_CTLRS 1 | ||
| 46 | #define EDMA_CHANS 64 | ||
| 47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | ||
| 48 | |||
| 49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | ||
| 50 | #define MAX_NR_SG 16 | ||
| 51 | #define EDMA_MAX_SLOTS MAX_NR_SG | ||
| 52 | #define EDMA_DESCRIPTORS 16 | ||
| 53 | |||
| 54 | struct edma_desc { | ||
| 55 | struct virt_dma_desc vdesc; | ||
| 56 | struct list_head node; | ||
| 57 | int absync; | ||
| 58 | int pset_nr; | ||
| 59 | struct edmacc_param pset[0]; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct edma_cc; | ||
| 63 | |||
| 64 | struct edma_chan { | ||
| 65 | struct virt_dma_chan vchan; | ||
| 66 | struct list_head node; | ||
| 67 | struct edma_desc *edesc; | ||
| 68 | struct edma_cc *ecc; | ||
| 69 | int ch_num; | ||
| 70 | bool alloced; | ||
| 71 | int slot[EDMA_MAX_SLOTS]; | ||
| 72 | dma_addr_t addr; | ||
| 73 | int addr_width; | ||
| 74 | int maxburst; | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct edma_cc { | ||
| 78 | int ctlr; | ||
| 79 | struct dma_device dma_slave; | ||
| 80 | struct edma_chan slave_chans[EDMA_CHANS]; | ||
| 81 | int num_slave_chans; | ||
| 82 | int dummy_slot; | ||
| 83 | }; | ||
| 84 | |||
| 85 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | ||
| 86 | { | ||
| 87 | return container_of(d, struct edma_cc, dma_slave); | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | ||
| 91 | { | ||
| 92 | return container_of(c, struct edma_chan, vchan.chan); | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline struct edma_desc | ||
| 96 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
| 97 | { | ||
| 98 | return container_of(tx, struct edma_desc, vdesc.tx); | ||
| 99 | } | ||
| 100 | |||
| 101 | static void edma_desc_free(struct virt_dma_desc *vdesc) | ||
| 102 | { | ||
| 103 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | ||
| 104 | } | ||
| 105 | |||
| 106 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | ||
| 107 | static void edma_execute(struct edma_chan *echan) | ||
| 108 | { | ||
| 109 | struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); | ||
| 110 | struct edma_desc *edesc; | ||
| 111 | int i; | ||
| 112 | |||
| 113 | if (!vdesc) { | ||
| 114 | echan->edesc = NULL; | ||
| 115 | return; | ||
| 116 | } | ||
| 117 | |||
| 118 | list_del(&vdesc->node); | ||
| 119 | |||
| 120 | echan->edesc = edesc = to_edma_desc(&vdesc->tx); | ||
| 121 | |||
| 122 | /* Write descriptor PaRAM set(s) */ | ||
| 123 | for (i = 0; i < edesc->pset_nr; i++) { | ||
| 124 | edma_write_slot(echan->slot[i], &edesc->pset[i]); | ||
| 125 | dev_dbg(echan->vchan.chan.device->dev, | ||
| 126 | "\n pset[%d]:\n" | ||
| 127 | " chnum\t%d\n" | ||
| 128 | " slot\t%d\n" | ||
| 129 | " opt\t%08x\n" | ||
| 130 | " src\t%08x\n" | ||
| 131 | " dst\t%08x\n" | ||
| 132 | " abcnt\t%08x\n" | ||
| 133 | " ccnt\t%08x\n" | ||
| 134 | " bidx\t%08x\n" | ||
| 135 | " cidx\t%08x\n" | ||
| 136 | " lkrld\t%08x\n", | ||
| 137 | i, echan->ch_num, echan->slot[i], | ||
| 138 | edesc->pset[i].opt, | ||
| 139 | edesc->pset[i].src, | ||
| 140 | edesc->pset[i].dst, | ||
| 141 | edesc->pset[i].a_b_cnt, | ||
| 142 | edesc->pset[i].ccnt, | ||
| 143 | edesc->pset[i].src_dst_bidx, | ||
| 144 | edesc->pset[i].src_dst_cidx, | ||
| 145 | edesc->pset[i].link_bcntrld); | ||
| 146 | /* Link to the previous slot if not the last set */ | ||
| 147 | if (i != (edesc->pset_nr - 1)) | ||
| 148 | edma_link(echan->slot[i], echan->slot[i+1]); | ||
| 149 | /* Final pset links to the dummy pset */ | ||
| 150 | else | ||
| 151 | edma_link(echan->slot[i], echan->ecc->dummy_slot); | ||
| 152 | } | ||
| 153 | |||
| 154 | edma_start(echan->ch_num); | ||
| 155 | } | ||
| 156 | |||
| 157 | static int edma_terminate_all(struct edma_chan *echan) | ||
| 158 | { | ||
| 159 | unsigned long flags; | ||
| 160 | LIST_HEAD(head); | ||
| 161 | |||
| 162 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
| 163 | |||
| 164 | /* | ||
| 165 | * Stop DMA activity: we assume the callback will not be called | ||
| 166 | * after edma_dma() returns (even if it does, it will see | ||
| 167 | * echan->edesc is NULL and exit.) | ||
| 168 | */ | ||
| 169 | if (echan->edesc) { | ||
| 170 | echan->edesc = NULL; | ||
| 171 | edma_stop(echan->ch_num); | ||
| 172 | } | ||
| 173 | |||
| 174 | vchan_get_all_descriptors(&echan->vchan, &head); | ||
| 175 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
| 176 | vchan_dma_desc_free_list(&echan->vchan, &head); | ||
| 177 | |||
| 178 | return 0; | ||
| 179 | } | ||
| 180 | |||
| 181 | |||
| 182 | static int edma_slave_config(struct edma_chan *echan, | ||
| 183 | struct dma_slave_config *config) | ||
| 184 | { | ||
| 185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
| 186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | if (config->direction == DMA_MEM_TO_DEV) { | ||
| 190 | if (config->dst_addr) | ||
| 191 | echan->addr = config->dst_addr; | ||
| 192 | if (config->dst_addr_width) | ||
| 193 | echan->addr_width = config->dst_addr_width; | ||
| 194 | if (config->dst_maxburst) | ||
| 195 | echan->maxburst = config->dst_maxburst; | ||
| 196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
| 197 | if (config->src_addr) | ||
| 198 | echan->addr = config->src_addr; | ||
| 199 | if (config->src_addr_width) | ||
| 200 | echan->addr_width = config->src_addr_width; | ||
| 201 | if (config->src_maxburst) | ||
| 202 | echan->maxburst = config->src_maxburst; | ||
| 203 | } | ||
| 204 | |||
| 205 | return 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 209 | unsigned long arg) | ||
| 210 | { | ||
| 211 | int ret = 0; | ||
| 212 | struct dma_slave_config *config; | ||
| 213 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 214 | |||
| 215 | switch (cmd) { | ||
| 216 | case DMA_TERMINATE_ALL: | ||
| 217 | edma_terminate_all(echan); | ||
| 218 | break; | ||
| 219 | case DMA_SLAVE_CONFIG: | ||
| 220 | config = (struct dma_slave_config *)arg; | ||
| 221 | ret = edma_slave_config(echan, config); | ||
| 222 | break; | ||
| 223 | default: | ||
| 224 | ret = -ENOSYS; | ||
| 225 | } | ||
| 226 | |||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | ||
| 231 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 232 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
| 233 | unsigned long tx_flags, void *context) | ||
| 234 | { | ||
| 235 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 236 | struct device *dev = chan->device->dev; | ||
| 237 | struct edma_desc *edesc; | ||
| 238 | struct scatterlist *sg; | ||
| 239 | int i; | ||
| 240 | int acnt, bcnt, ccnt, src, dst, cidx; | ||
| 241 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
| 242 | |||
| 243 | if (unlikely(!echan || !sgl || !sg_len)) | ||
| 244 | return NULL; | ||
| 245 | |||
| 246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
| 247 | dev_err(dev, "Undefined slave buswidth\n"); | ||
| 248 | return NULL; | ||
| 249 | } | ||
| 250 | |||
| 251 | if (sg_len > MAX_NR_SG) { | ||
| 252 | dev_err(dev, "Exceeded max SG segments %d > %d\n", | ||
| 253 | sg_len, MAX_NR_SG); | ||
| 254 | return NULL; | ||
| 255 | } | ||
| 256 | |||
| 257 | edesc = kzalloc(sizeof(*edesc) + sg_len * | ||
| 258 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
| 259 | if (!edesc) { | ||
| 260 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
| 261 | return NULL; | ||
| 262 | } | ||
| 263 | |||
| 264 | edesc->pset_nr = sg_len; | ||
| 265 | |||
| 266 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 267 | /* Allocate a PaRAM slot, if needed */ | ||
| 268 | if (echan->slot[i] < 0) { | ||
| 269 | echan->slot[i] = | ||
| 270 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
| 271 | EDMA_SLOT_ANY); | ||
| 272 | if (echan->slot[i] < 0) { | ||
| 273 | dev_err(dev, "Failed to allocate slot\n"); | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | acnt = echan->addr_width; | ||
| 279 | |||
| 280 | /* | ||
| 281 | * If the maxburst is equal to the fifo width, use | ||
| 282 | * A-synced transfers. This allows for large contiguous | ||
| 283 | * buffer transfers using only one PaRAM set. | ||
| 284 | */ | ||
| 285 | if (echan->maxburst == 1) { | ||
| 286 | edesc->absync = false; | ||
| 287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | ||
| 288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | ||
| 289 | if (bcnt) | ||
| 290 | ccnt++; | ||
| 291 | else | ||
| 292 | bcnt = SZ_64K - 1; | ||
| 293 | cidx = acnt; | ||
| 294 | /* | ||
| 295 | * If maxburst is greater than the fifo address_width, | ||
| 296 | * use AB-synced transfers where A count is the fifo | ||
| 297 | * address_width and B count is the maxburst. In this | ||
| 298 | * case, we are limited to transfers of C count frames | ||
| 299 | * of (address_width * maxburst) where C count is limited | ||
| 300 | * to SZ_64K-1. This places an upper bound on the length | ||
| 301 | * of an SG segment that can be handled. | ||
| 302 | */ | ||
| 303 | } else { | ||
| 304 | edesc->absync = true; | ||
| 305 | bcnt = echan->maxburst; | ||
| 306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
| 307 | if (ccnt > (SZ_64K - 1)) { | ||
| 308 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
| 309 | return NULL; | ||
| 310 | } | ||
| 311 | cidx = acnt * bcnt; | ||
| 312 | } | ||
| 313 | |||
| 314 | if (direction == DMA_MEM_TO_DEV) { | ||
| 315 | src = sg_dma_address(sg); | ||
| 316 | dst = echan->addr; | ||
| 317 | src_bidx = acnt; | ||
| 318 | src_cidx = cidx; | ||
| 319 | dst_bidx = 0; | ||
| 320 | dst_cidx = 0; | ||
| 321 | } else { | ||
| 322 | src = echan->addr; | ||
| 323 | dst = sg_dma_address(sg); | ||
| 324 | src_bidx = 0; | ||
| 325 | src_cidx = 0; | ||
| 326 | dst_bidx = acnt; | ||
| 327 | dst_cidx = cidx; | ||
| 328 | } | ||
| 329 | |||
| 330 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
| 331 | /* Configure A or AB synchronized transfers */ | ||
| 332 | if (edesc->absync) | ||
| 333 | edesc->pset[i].opt |= SYNCDIM; | ||
| 334 | /* If this is the last set, enable completion interrupt flag */ | ||
| 335 | if (i == sg_len - 1) | ||
| 336 | edesc->pset[i].opt |= TCINTEN; | ||
| 337 | |||
| 338 | edesc->pset[i].src = src; | ||
| 339 | edesc->pset[i].dst = dst; | ||
| 340 | |||
| 341 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
| 342 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
| 343 | |||
| 344 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | ||
| 345 | edesc->pset[i].ccnt = ccnt; | ||
| 346 | edesc->pset[i].link_bcntrld = 0xffffffff; | ||
| 347 | |||
| 348 | } | ||
| 349 | |||
| 350 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
| 351 | } | ||
| 352 | |||
| 353 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | ||
| 354 | { | ||
| 355 | struct edma_chan *echan = data; | ||
| 356 | struct device *dev = echan->vchan.chan.device->dev; | ||
| 357 | struct edma_desc *edesc; | ||
| 358 | unsigned long flags; | ||
| 359 | |||
| 360 | /* Stop the channel */ | ||
| 361 | edma_stop(echan->ch_num); | ||
| 362 | |||
| 363 | switch (ch_status) { | ||
| 364 | case DMA_COMPLETE: | ||
| 365 | dev_dbg(dev, "transfer complete on channel %d\n", ch_num); | ||
| 366 | |||
| 367 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
| 368 | |||
| 369 | edesc = echan->edesc; | ||
| 370 | if (edesc) { | ||
| 371 | edma_execute(echan); | ||
| 372 | vchan_cookie_complete(&edesc->vdesc); | ||
| 373 | } | ||
| 374 | |||
| 375 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
| 376 | |||
| 377 | break; | ||
| 378 | case DMA_CC_ERROR: | ||
| 379 | dev_dbg(dev, "transfer error on channel %d\n", ch_num); | ||
| 380 | break; | ||
| 381 | default: | ||
| 382 | break; | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | /* Alloc channel resources */ | ||
| 387 | static int edma_alloc_chan_resources(struct dma_chan *chan) | ||
| 388 | { | ||
| 389 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 390 | struct device *dev = chan->device->dev; | ||
| 391 | int ret; | ||
| 392 | int a_ch_num; | ||
| 393 | LIST_HEAD(descs); | ||
| 394 | |||
| 395 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | ||
| 396 | chan, EVENTQ_DEFAULT); | ||
| 397 | |||
| 398 | if (a_ch_num < 0) { | ||
| 399 | ret = -ENODEV; | ||
| 400 | goto err_no_chan; | ||
| 401 | } | ||
| 402 | |||
| 403 | if (a_ch_num != echan->ch_num) { | ||
| 404 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | ||
| 405 | EDMA_CTLR(echan->ch_num), | ||
| 406 | EDMA_CHAN_SLOT(echan->ch_num)); | ||
| 407 | ret = -ENODEV; | ||
| 408 | goto err_wrong_chan; | ||
| 409 | } | ||
| 410 | |||
| 411 | echan->alloced = true; | ||
| 412 | echan->slot[0] = echan->ch_num; | ||
| 413 | |||
| 414 | dev_info(dev, "allocated channel for %u:%u\n", | ||
| 415 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | ||
| 416 | |||
| 417 | return 0; | ||
| 418 | |||
| 419 | err_wrong_chan: | ||
| 420 | edma_free_channel(a_ch_num); | ||
| 421 | err_no_chan: | ||
| 422 | return ret; | ||
| 423 | } | ||
| 424 | |||
| 425 | /* Free channel resources */ | ||
| 426 | static void edma_free_chan_resources(struct dma_chan *chan) | ||
| 427 | { | ||
| 428 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 429 | struct device *dev = chan->device->dev; | ||
| 430 | int i; | ||
| 431 | |||
| 432 | /* Terminate transfers */ | ||
| 433 | edma_stop(echan->ch_num); | ||
| 434 | |||
| 435 | vchan_free_chan_resources(&echan->vchan); | ||
| 436 | |||
| 437 | /* Free EDMA PaRAM slots */ | ||
| 438 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | ||
| 439 | if (echan->slot[i] >= 0) { | ||
| 440 | edma_free_slot(echan->slot[i]); | ||
| 441 | echan->slot[i] = -1; | ||
| 442 | } | ||
| 443 | } | ||
| 444 | |||
| 445 | /* Free EDMA channel */ | ||
| 446 | if (echan->alloced) { | ||
| 447 | edma_free_channel(echan->ch_num); | ||
| 448 | echan->alloced = false; | ||
| 449 | } | ||
| 450 | |||
| 451 | dev_info(dev, "freeing channel for %u\n", echan->ch_num); | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Send pending descriptor to hardware */ | ||
| 455 | static void edma_issue_pending(struct dma_chan *chan) | ||
| 456 | { | ||
| 457 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 458 | unsigned long flags; | ||
| 459 | |||
| 460 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
| 461 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | ||
| 462 | edma_execute(echan); | ||
| 463 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
| 464 | } | ||
| 465 | |||
| 466 | static size_t edma_desc_size(struct edma_desc *edesc) | ||
| 467 | { | ||
| 468 | int i; | ||
| 469 | size_t size; | ||
| 470 | |||
| 471 | if (edesc->absync) | ||
| 472 | for (size = i = 0; i < edesc->pset_nr; i++) | ||
| 473 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | ||
| 474 | (edesc->pset[i].a_b_cnt >> 16) * | ||
| 475 | edesc->pset[i].ccnt; | ||
| 476 | else | ||
| 477 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | ||
| 478 | (edesc->pset[0].a_b_cnt >> 16) + | ||
| 479 | (edesc->pset[0].a_b_cnt & 0xffff) * | ||
| 480 | (SZ_64K - 1) * edesc->pset[0].ccnt; | ||
| 481 | |||
| 482 | return size; | ||
| 483 | } | ||
| 484 | |||
| 485 | /* Check request completion status */ | ||
| 486 | static enum dma_status edma_tx_status(struct dma_chan *chan, | ||
| 487 | dma_cookie_t cookie, | ||
| 488 | struct dma_tx_state *txstate) | ||
| 489 | { | ||
| 490 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 491 | struct virt_dma_desc *vdesc; | ||
| 492 | enum dma_status ret; | ||
| 493 | unsigned long flags; | ||
| 494 | |||
| 495 | ret = dma_cookie_status(chan, cookie, txstate); | ||
| 496 | if (ret == DMA_SUCCESS || !txstate) | ||
| 497 | return ret; | ||
| 498 | |||
| 499 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
| 500 | vdesc = vchan_find_desc(&echan->vchan, cookie); | ||
| 501 | if (vdesc) { | ||
| 502 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | ||
| 503 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | ||
| 504 | struct edma_desc *edesc = echan->edesc; | ||
| 505 | txstate->residue = edma_desc_size(edesc); | ||
| 506 | } else { | ||
| 507 | txstate->residue = 0; | ||
| 508 | } | ||
| 509 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
| 510 | |||
| 511 | return ret; | ||
| 512 | } | ||
| 513 | |||
| 514 | static void __init edma_chan_init(struct edma_cc *ecc, | ||
| 515 | struct dma_device *dma, | ||
| 516 | struct edma_chan *echans) | ||
| 517 | { | ||
| 518 | int i, j; | ||
| 519 | |||
| 520 | for (i = 0; i < EDMA_CHANS; i++) { | ||
| 521 | struct edma_chan *echan = &echans[i]; | ||
| 522 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | ||
| 523 | echan->ecc = ecc; | ||
| 524 | echan->vchan.desc_free = edma_desc_free; | ||
| 525 | |||
| 526 | vchan_init(&echan->vchan, dma); | ||
| 527 | |||
| 528 | INIT_LIST_HEAD(&echan->node); | ||
| 529 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | ||
| 530 | echan->slot[j] = -1; | ||
| 531 | } | ||
| 532 | } | ||
| 533 | |||
| 534 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
| 535 | struct device *dev) | ||
| 536 | { | ||
| 537 | dma->device_prep_slave_sg = edma_prep_slave_sg; | ||
| 538 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
| 539 | dma->device_free_chan_resources = edma_free_chan_resources; | ||
| 540 | dma->device_issue_pending = edma_issue_pending; | ||
| 541 | dma->device_tx_status = edma_tx_status; | ||
| 542 | dma->device_control = edma_control; | ||
| 543 | dma->dev = dev; | ||
| 544 | |||
| 545 | INIT_LIST_HEAD(&dma->channels); | ||
| 546 | } | ||
| 547 | |||
| 548 | static int __devinit edma_probe(struct platform_device *pdev) | ||
| 549 | { | ||
| 550 | struct edma_cc *ecc; | ||
| 551 | int ret; | ||
| 552 | |||
| 553 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | ||
| 554 | if (!ecc) { | ||
| 555 | dev_err(&pdev->dev, "Can't allocate controller\n"); | ||
| 556 | return -ENOMEM; | ||
| 557 | } | ||
| 558 | |||
| 559 | ecc->ctlr = pdev->id; | ||
| 560 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | ||
| 561 | if (ecc->dummy_slot < 0) { | ||
| 562 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | ||
| 563 | return -EIO; | ||
| 564 | } | ||
| 565 | |||
| 566 | dma_cap_zero(ecc->dma_slave.cap_mask); | ||
| 567 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | ||
| 568 | |||
| 569 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | ||
| 570 | |||
| 571 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | ||
| 572 | |||
| 573 | ret = dma_async_device_register(&ecc->dma_slave); | ||
| 574 | if (ret) | ||
| 575 | goto err_reg1; | ||
| 576 | |||
| 577 | platform_set_drvdata(pdev, ecc); | ||
| 578 | |||
| 579 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | ||
| 580 | |||
| 581 | return 0; | ||
| 582 | |||
| 583 | err_reg1: | ||
| 584 | edma_free_slot(ecc->dummy_slot); | ||
| 585 | return ret; | ||
| 586 | } | ||
| 587 | |||
| 588 | static int __devexit edma_remove(struct platform_device *pdev) | ||
| 589 | { | ||
| 590 | struct device *dev = &pdev->dev; | ||
| 591 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
| 592 | |||
| 593 | dma_async_device_unregister(&ecc->dma_slave); | ||
| 594 | edma_free_slot(ecc->dummy_slot); | ||
| 595 | |||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | |||
| 599 | static struct platform_driver edma_driver = { | ||
| 600 | .probe = edma_probe, | ||
| 601 | .remove = __devexit_p(edma_remove), | ||
| 602 | .driver = { | ||
| 603 | .name = "edma-dma-engine", | ||
| 604 | .owner = THIS_MODULE, | ||
| 605 | }, | ||
| 606 | }; | ||
| 607 | |||
| 608 | bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
| 609 | { | ||
| 610 | if (chan->device->dev->driver == &edma_driver.driver) { | ||
| 611 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 612 | unsigned ch_req = *(unsigned *)param; | ||
| 613 | return ch_req == echan->ch_num; | ||
| 614 | } | ||
| 615 | return false; | ||
| 616 | } | ||
| 617 | EXPORT_SYMBOL(edma_filter_fn); | ||
| 618 | |||
| 619 | static struct platform_device *pdev0, *pdev1; | ||
| 620 | |||
| 621 | static const struct platform_device_info edma_dev_info0 = { | ||
| 622 | .name = "edma-dma-engine", | ||
| 623 | .id = 0, | ||
| 624 | .dma_mask = DMA_BIT_MASK(32), | ||
| 625 | }; | ||
| 626 | |||
| 627 | static const struct platform_device_info edma_dev_info1 = { | ||
| 628 | .name = "edma-dma-engine", | ||
| 629 | .id = 1, | ||
| 630 | .dma_mask = DMA_BIT_MASK(32), | ||
| 631 | }; | ||
| 632 | |||
| 633 | static int edma_init(void) | ||
| 634 | { | ||
| 635 | int ret = platform_driver_register(&edma_driver); | ||
| 636 | |||
| 637 | if (ret == 0) { | ||
| 638 | pdev0 = platform_device_register_full(&edma_dev_info0); | ||
| 639 | if (IS_ERR(pdev0)) { | ||
| 640 | platform_driver_unregister(&edma_driver); | ||
| 641 | ret = PTR_ERR(pdev0); | ||
| 642 | goto out; | ||
| 643 | } | ||
| 644 | } | ||
| 645 | |||
| 646 | if (EDMA_CTLRS == 2) { | ||
| 647 | pdev1 = platform_device_register_full(&edma_dev_info1); | ||
| 648 | if (IS_ERR(pdev1)) { | ||
| 649 | platform_driver_unregister(&edma_driver); | ||
| 650 | platform_device_unregister(pdev0); | ||
| 651 | ret = PTR_ERR(pdev1); | ||
| 652 | } | ||
| 653 | } | ||
| 654 | |||
| 655 | out: | ||
| 656 | return ret; | ||
| 657 | } | ||
| 658 | subsys_initcall(edma_init); | ||
| 659 | |||
| 660 | static void __exit edma_exit(void) | ||
| 661 | { | ||
| 662 | platform_device_unregister(pdev0); | ||
| 663 | if (pdev1) | ||
| 664 | platform_device_unregister(pdev1); | ||
| 665 | platform_driver_unregister(&edma_driver); | ||
| 666 | } | ||
| 667 | module_exit(edma_exit); | ||
| 668 | |||
| 669 | MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); | ||
| 670 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); | ||
| 671 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 86895760b598..b9d667851445 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f | |||
| 434 | return NULL; | 434 | return NULL; |
| 435 | memset(hw, 0, sizeof(*hw)); | 435 | memset(hw, 0, sizeof(*hw)); |
| 436 | 436 | ||
| 437 | desc = kmem_cache_alloc(ioat2_cache, flags); | 437 | desc = kmem_cache_zalloc(ioat2_cache, flags); |
| 438 | if (!desc) { | 438 | if (!desc) { |
| 439 | pci_pool_free(dma->dma_pool, hw, phys); | 439 | pci_pool_free(dma->dma_pool, hw, phys); |
| 440 | return NULL; | 440 | return NULL; |
| 441 | } | 441 | } |
| 442 | memset(desc, 0, sizeof(*desc)); | ||
| 443 | 442 | ||
| 444 | dma_async_tx_descriptor_init(&desc->txd, chan); | 443 | dma_async_tx_descriptor_init(&desc->txd, chan); |
| 445 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | 444 | desc->txd.tx_submit = ioat2_tx_submit_unlock; |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 5e3a40f79945..c0573061b45d 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
| @@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION); | |||
| 40 | MODULE_LICENSE("Dual BSD/GPL"); | 40 | MODULE_LICENSE("Dual BSD/GPL"); |
| 41 | MODULE_AUTHOR("Intel Corporation"); | 41 | MODULE_AUTHOR("Intel Corporation"); |
| 42 | 42 | ||
| 43 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | ||
| 44 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | ||
| 45 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | ||
| 46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 | ||
| 47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 | ||
| 48 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 | ||
| 49 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 | ||
| 50 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 | ||
| 51 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | ||
| 52 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | ||
| 53 | |||
| 43 | static struct pci_device_id ioat_pci_tbl[] = { | 54 | static struct pci_device_id ioat_pci_tbl[] = { |
| 44 | /* I/OAT v1 platforms */ | 55 | /* I/OAT v1 platforms */ |
| 45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | 56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, |
| @@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
| 83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | 94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, |
| 84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | 95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, |
| 85 | 96 | ||
| 97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | ||
| 98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | ||
| 99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | ||
| 100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | ||
| 101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | ||
| 102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | ||
| 103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | ||
| 104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | ||
| 105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | ||
| 106 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | ||
| 107 | |||
| 86 | { 0, } | 108 | { 0, } |
| 87 | }; | 109 | }; |
| 88 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 110 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c new file mode 100644 index 000000000000..14da1f403edf --- /dev/null +++ b/drivers/dma/mmp_pdma.c | |||
| @@ -0,0 +1,875 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Marvell International Ltd. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/types.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/dma-mapping.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/dmaengine.h> | ||
| 15 | #include <linux/platform_device.h> | ||
| 16 | #include <linux/device.h> | ||
| 17 | #include <linux/platform_data/mmp_dma.h> | ||
| 18 | #include <linux/dmapool.h> | ||
| 19 | #include <linux/of_device.h> | ||
| 20 | #include <linux/of.h> | ||
| 21 | |||
| 22 | #include "dmaengine.h" | ||
| 23 | |||
| 24 | #define DCSR 0x0000 | ||
| 25 | #define DALGN 0x00a0 | ||
| 26 | #define DINT 0x00f0 | ||
| 27 | #define DDADR 0x0200 | ||
| 28 | #define DSADR 0x0204 | ||
| 29 | #define DTADR 0x0208 | ||
| 30 | #define DCMD 0x020c | ||
| 31 | |||
| 32 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | ||
| 33 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | ||
| 34 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | ||
| 35 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | ||
| 36 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | ||
| 37 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | ||
| 38 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | ||
| 39 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | ||
| 40 | |||
| 41 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | ||
| 42 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | ||
| 43 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | ||
| 44 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | ||
| 45 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | ||
| 46 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | ||
| 47 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | ||
| 48 | |||
| 49 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | ||
| 50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | ||
| 51 | |||
| 52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | ||
| 53 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | ||
| 54 | |||
| 55 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | ||
| 56 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | ||
| 57 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | ||
| 58 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | ||
| 59 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | ||
| 60 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | ||
| 61 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | ||
| 62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | ||
| 63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | ||
| 64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | ||
| 65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | ||
| 66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | ||
| 67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | ||
| 68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | ||
| 69 | |||
| 70 | #define PDMA_ALIGNMENT 3 | ||
| 71 | #define PDMA_MAX_DESC_BYTES 0x1000 | ||
| 72 | |||
| 73 | struct mmp_pdma_desc_hw { | ||
| 74 | u32 ddadr; /* Points to the next descriptor + flags */ | ||
| 75 | u32 dsadr; /* DSADR value for the current transfer */ | ||
| 76 | u32 dtadr; /* DTADR value for the current transfer */ | ||
| 77 | u32 dcmd; /* DCMD value for the current transfer */ | ||
| 78 | } __aligned(32); | ||
| 79 | |||
| 80 | struct mmp_pdma_desc_sw { | ||
| 81 | struct mmp_pdma_desc_hw desc; | ||
| 82 | struct list_head node; | ||
| 83 | struct list_head tx_list; | ||
| 84 | struct dma_async_tx_descriptor async_tx; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct mmp_pdma_phy; | ||
| 88 | |||
| 89 | struct mmp_pdma_chan { | ||
| 90 | struct device *dev; | ||
| 91 | struct dma_chan chan; | ||
| 92 | struct dma_async_tx_descriptor desc; | ||
| 93 | struct mmp_pdma_phy *phy; | ||
| 94 | enum dma_transfer_direction dir; | ||
| 95 | |||
| 96 | /* channel's basic info */ | ||
| 97 | struct tasklet_struct tasklet; | ||
| 98 | u32 dcmd; | ||
| 99 | u32 drcmr; | ||
| 100 | u32 dev_addr; | ||
| 101 | |||
| 102 | /* list for desc */ | ||
| 103 | spinlock_t desc_lock; /* Descriptor list lock */ | ||
| 104 | struct list_head chain_pending; /* Link descriptors queue for pending */ | ||
| 105 | struct list_head chain_running; /* Link descriptors queue for running */ | ||
| 106 | bool idle; /* channel statue machine */ | ||
| 107 | |||
| 108 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | struct mmp_pdma_phy { | ||
| 112 | int idx; | ||
| 113 | void __iomem *base; | ||
| 114 | struct mmp_pdma_chan *vchan; | ||
| 115 | }; | ||
| 116 | |||
| 117 | struct mmp_pdma_device { | ||
| 118 | int dma_channels; | ||
| 119 | void __iomem *base; | ||
| 120 | struct device *dev; | ||
| 121 | struct dma_device device; | ||
| 122 | struct mmp_pdma_phy *phy; | ||
| 123 | }; | ||
| 124 | |||
| 125 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | ||
| 126 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | ||
| 127 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | ||
| 128 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | ||
| 129 | |||
| 130 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | ||
| 131 | { | ||
| 132 | u32 reg = (phy->idx << 4) + DDADR; | ||
| 133 | |||
| 134 | writel(addr, phy->base + reg); | ||
| 135 | } | ||
| 136 | |||
| 137 | static void enable_chan(struct mmp_pdma_phy *phy) | ||
| 138 | { | ||
| 139 | u32 reg; | ||
| 140 | |||
| 141 | if (!phy->vchan) | ||
| 142 | return; | ||
| 143 | |||
| 144 | reg = phy->vchan->drcmr; | ||
| 145 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); | ||
| 146 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
| 147 | |||
| 148 | reg = (phy->idx << 2) + DCSR; | ||
| 149 | writel(readl(phy->base + reg) | DCSR_RUN, | ||
| 150 | phy->base + reg); | ||
| 151 | } | ||
| 152 | |||
| 153 | static void disable_chan(struct mmp_pdma_phy *phy) | ||
| 154 | { | ||
| 155 | u32 reg; | ||
| 156 | |||
| 157 | if (phy) { | ||
| 158 | reg = (phy->idx << 2) + DCSR; | ||
| 159 | writel(readl(phy->base + reg) & ~DCSR_RUN, | ||
| 160 | phy->base + reg); | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | ||
| 165 | { | ||
| 166 | u32 dcsr; | ||
| 167 | u32 dint = readl(phy->base + DINT); | ||
| 168 | u32 reg = (phy->idx << 2) + DCSR; | ||
| 169 | |||
| 170 | if (dint & BIT(phy->idx)) { | ||
| 171 | /* clear irq */ | ||
| 172 | dcsr = readl(phy->base + reg); | ||
| 173 | writel(dcsr, phy->base + reg); | ||
| 174 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | ||
| 175 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | return -EAGAIN; | ||
| 179 | } | ||
| 180 | |||
| 181 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | ||
| 182 | { | ||
| 183 | struct mmp_pdma_phy *phy = dev_id; | ||
| 184 | |||
| 185 | if (clear_chan_irq(phy) == 0) { | ||
| 186 | tasklet_schedule(&phy->vchan->tasklet); | ||
| 187 | return IRQ_HANDLED; | ||
| 188 | } else | ||
| 189 | return IRQ_NONE; | ||
| 190 | } | ||
| 191 | |||
| 192 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | ||
| 193 | { | ||
| 194 | struct mmp_pdma_device *pdev = dev_id; | ||
| 195 | struct mmp_pdma_phy *phy; | ||
| 196 | u32 dint = readl(pdev->base + DINT); | ||
| 197 | int i, ret; | ||
| 198 | int irq_num = 0; | ||
| 199 | |||
| 200 | while (dint) { | ||
| 201 | i = __ffs(dint); | ||
| 202 | dint &= (dint - 1); | ||
| 203 | phy = &pdev->phy[i]; | ||
| 204 | ret = mmp_pdma_chan_handler(irq, phy); | ||
| 205 | if (ret == IRQ_HANDLED) | ||
| 206 | irq_num++; | ||
| 207 | } | ||
| 208 | |||
| 209 | if (irq_num) | ||
| 210 | return IRQ_HANDLED; | ||
| 211 | else | ||
| 212 | return IRQ_NONE; | ||
| 213 | } | ||
| 214 | |||
| 215 | /* lookup free phy channel as descending priority */ | ||
| 216 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | ||
| 217 | { | ||
| 218 | int prio, i; | ||
| 219 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | ||
| 220 | struct mmp_pdma_phy *phy; | ||
| 221 | |||
| 222 | /* | ||
| 223 | * dma channel priorities | ||
| 224 | * ch 0 - 3, 16 - 19 <--> (0) | ||
| 225 | * ch 4 - 7, 20 - 23 <--> (1) | ||
| 226 | * ch 8 - 11, 24 - 27 <--> (2) | ||
| 227 | * ch 12 - 15, 28 - 31 <--> (3) | ||
| 228 | */ | ||
| 229 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | ||
| 230 | for (i = 0; i < pdev->dma_channels; i++) { | ||
| 231 | if (prio != ((i & 0xf) >> 2)) | ||
| 232 | continue; | ||
| 233 | phy = &pdev->phy[i]; | ||
| 234 | if (!phy->vchan) { | ||
| 235 | phy->vchan = pchan; | ||
| 236 | return phy; | ||
| 237 | } | ||
| 238 | } | ||
| 239 | } | ||
| 240 | |||
| 241 | return NULL; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* desc->tx_list ==> pending list */ | ||
| 245 | static void append_pending_queue(struct mmp_pdma_chan *chan, | ||
| 246 | struct mmp_pdma_desc_sw *desc) | ||
| 247 | { | ||
| 248 | struct mmp_pdma_desc_sw *tail = | ||
| 249 | to_mmp_pdma_desc(chan->chain_pending.prev); | ||
| 250 | |||
| 251 | if (list_empty(&chan->chain_pending)) | ||
| 252 | goto out_splice; | ||
| 253 | |||
| 254 | /* one irq per queue, even appended */ | ||
| 255 | tail->desc.ddadr = desc->async_tx.phys; | ||
| 256 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; | ||
| 257 | |||
| 258 | /* softly link to pending list */ | ||
| 259 | out_splice: | ||
| 260 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | ||
| 261 | } | ||
| 262 | |||
| 263 | /** | ||
| 264 | * start_pending_queue - transfer any pending transactions | ||
| 265 | * pending list ==> running list | ||
| 266 | */ | ||
| 267 | static void start_pending_queue(struct mmp_pdma_chan *chan) | ||
| 268 | { | ||
| 269 | struct mmp_pdma_desc_sw *desc; | ||
| 270 | |||
| 271 | /* still in running, irq will start the pending list */ | ||
| 272 | if (!chan->idle) { | ||
| 273 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
| 274 | return; | ||
| 275 | } | ||
| 276 | |||
| 277 | if (list_empty(&chan->chain_pending)) { | ||
| 278 | /* chance to re-fetch phy channel with higher prio */ | ||
| 279 | if (chan->phy) { | ||
| 280 | chan->phy->vchan = NULL; | ||
| 281 | chan->phy = NULL; | ||
| 282 | } | ||
| 283 | dev_dbg(chan->dev, "no pending list\n"); | ||
| 284 | return; | ||
| 285 | } | ||
| 286 | |||
| 287 | if (!chan->phy) { | ||
| 288 | chan->phy = lookup_phy(chan); | ||
| 289 | if (!chan->phy) { | ||
| 290 | dev_dbg(chan->dev, "no free dma channel\n"); | ||
| 291 | return; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 296 | * pending -> running | ||
| 297 | * reintilize pending list | ||
| 298 | */ | ||
| 299 | desc = list_first_entry(&chan->chain_pending, | ||
| 300 | struct mmp_pdma_desc_sw, node); | ||
| 301 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Program the descriptor's address into the DMA controller, | ||
| 305 | * then start the DMA transaction | ||
| 306 | */ | ||
| 307 | set_desc(chan->phy, desc->async_tx.phys); | ||
| 308 | enable_chan(chan->phy); | ||
| 309 | chan->idle = false; | ||
| 310 | } | ||
| 311 | |||
| 312 | |||
| 313 | /* desc->tx_list ==> pending list */ | ||
| 314 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 315 | { | ||
| 316 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | ||
| 317 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | ||
| 318 | struct mmp_pdma_desc_sw *child; | ||
| 319 | unsigned long flags; | ||
| 320 | dma_cookie_t cookie = -EBUSY; | ||
| 321 | |||
| 322 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 323 | |||
| 324 | list_for_each_entry(child, &desc->tx_list, node) { | ||
| 325 | cookie = dma_cookie_assign(&child->async_tx); | ||
| 326 | } | ||
| 327 | |||
| 328 | append_pending_queue(chan, desc); | ||
| 329 | |||
| 330 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 331 | |||
| 332 | return cookie; | ||
| 333 | } | ||
| 334 | |||
| 335 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | ||
| 336 | { | ||
| 337 | struct mmp_pdma_desc_sw *desc; | ||
| 338 | dma_addr_t pdesc; | ||
| 339 | |||
| 340 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
| 341 | if (!desc) { | ||
| 342 | dev_err(chan->dev, "out of memory for link descriptor\n"); | ||
| 343 | return NULL; | ||
| 344 | } | ||
| 345 | |||
| 346 | memset(desc, 0, sizeof(*desc)); | ||
| 347 | INIT_LIST_HEAD(&desc->tx_list); | ||
| 348 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | ||
| 349 | /* each desc has submit */ | ||
| 350 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | ||
| 351 | desc->async_tx.phys = pdesc; | ||
| 352 | |||
| 353 | return desc; | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. | ||
| 358 | * | ||
| 359 | * This function will create a dma pool for descriptor allocation. | ||
| 360 | * Request irq only when channel is requested | ||
| 361 | * Return - The number of allocated descriptors. | ||
| 362 | */ | ||
| 363 | |||
| 364 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | ||
| 365 | { | ||
| 366 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 367 | |||
| 368 | if (chan->desc_pool) | ||
| 369 | return 1; | ||
| 370 | |||
| 371 | chan->desc_pool = | ||
| 372 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | ||
| 373 | sizeof(struct mmp_pdma_desc_sw), | ||
| 374 | __alignof__(struct mmp_pdma_desc_sw), 0); | ||
| 375 | if (!chan->desc_pool) { | ||
| 376 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | ||
| 377 | return -ENOMEM; | ||
| 378 | } | ||
| 379 | if (chan->phy) { | ||
| 380 | chan->phy->vchan = NULL; | ||
| 381 | chan->phy = NULL; | ||
| 382 | } | ||
| 383 | chan->idle = true; | ||
| 384 | chan->dev_addr = 0; | ||
| 385 | return 1; | ||
| 386 | } | ||
| 387 | |||
| 388 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | ||
| 389 | struct list_head *list) | ||
| 390 | { | ||
| 391 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
| 392 | |||
| 393 | list_for_each_entry_safe(desc, _desc, list, node) { | ||
| 394 | list_del(&desc->node); | ||
| 395 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
| 396 | } | ||
| 397 | } | ||
| 398 | |||
| 399 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | ||
| 400 | { | ||
| 401 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 402 | unsigned long flags; | ||
| 403 | |||
| 404 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 405 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
| 406 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
| 407 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 408 | |||
| 409 | dma_pool_destroy(chan->desc_pool); | ||
| 410 | chan->desc_pool = NULL; | ||
| 411 | chan->idle = true; | ||
| 412 | chan->dev_addr = 0; | ||
| 413 | if (chan->phy) { | ||
| 414 | chan->phy->vchan = NULL; | ||
| 415 | chan->phy = NULL; | ||
| 416 | } | ||
| 417 | return; | ||
| 418 | } | ||
| 419 | |||
| 420 | static struct dma_async_tx_descriptor * | ||
| 421 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | ||
| 422 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
| 423 | size_t len, unsigned long flags) | ||
| 424 | { | ||
| 425 | struct mmp_pdma_chan *chan; | ||
| 426 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | ||
| 427 | size_t copy = 0; | ||
| 428 | |||
| 429 | if (!dchan) | ||
| 430 | return NULL; | ||
| 431 | |||
| 432 | if (!len) | ||
| 433 | return NULL; | ||
| 434 | |||
| 435 | chan = to_mmp_pdma_chan(dchan); | ||
| 436 | |||
| 437 | if (!chan->dir) { | ||
| 438 | chan->dir = DMA_MEM_TO_MEM; | ||
| 439 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | ||
| 440 | chan->dcmd |= DCMD_BURST32; | ||
| 441 | } | ||
| 442 | |||
| 443 | do { | ||
| 444 | /* Allocate the link descriptor from DMA pool */ | ||
| 445 | new = mmp_pdma_alloc_descriptor(chan); | ||
| 446 | if (!new) { | ||
| 447 | dev_err(chan->dev, "no memory for desc\n"); | ||
| 448 | goto fail; | ||
| 449 | } | ||
| 450 | |||
| 451 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | ||
| 452 | |||
| 453 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | ||
| 454 | new->desc.dsadr = dma_src; | ||
| 455 | new->desc.dtadr = dma_dst; | ||
| 456 | |||
| 457 | if (!first) | ||
| 458 | first = new; | ||
| 459 | else | ||
| 460 | prev->desc.ddadr = new->async_tx.phys; | ||
| 461 | |||
| 462 | new->async_tx.cookie = 0; | ||
| 463 | async_tx_ack(&new->async_tx); | ||
| 464 | |||
| 465 | prev = new; | ||
| 466 | len -= copy; | ||
| 467 | |||
| 468 | if (chan->dir == DMA_MEM_TO_DEV) { | ||
| 469 | dma_src += copy; | ||
| 470 | } else if (chan->dir == DMA_DEV_TO_MEM) { | ||
| 471 | dma_dst += copy; | ||
| 472 | } else if (chan->dir == DMA_MEM_TO_MEM) { | ||
| 473 | dma_src += copy; | ||
| 474 | dma_dst += copy; | ||
| 475 | } | ||
| 476 | |||
| 477 | /* Insert the link descriptor to the LD ring */ | ||
| 478 | list_add_tail(&new->node, &first->tx_list); | ||
| 479 | } while (len); | ||
| 480 | |||
| 481 | first->async_tx.flags = flags; /* client is in control of this ack */ | ||
| 482 | first->async_tx.cookie = -EBUSY; | ||
| 483 | |||
| 484 | /* last desc and fire IRQ */ | ||
| 485 | new->desc.ddadr = DDADR_STOP; | ||
| 486 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
| 487 | |||
| 488 | return &first->async_tx; | ||
| 489 | |||
| 490 | fail: | ||
| 491 | if (first) | ||
| 492 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
| 493 | return NULL; | ||
| 494 | } | ||
| 495 | |||
| 496 | static struct dma_async_tx_descriptor * | ||
| 497 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
| 498 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
| 499 | unsigned long flags, void *context) | ||
| 500 | { | ||
| 501 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 502 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
| 503 | size_t len, avail; | ||
| 504 | struct scatterlist *sg; | ||
| 505 | dma_addr_t addr; | ||
| 506 | int i; | ||
| 507 | |||
| 508 | if ((sgl == NULL) || (sg_len == 0)) | ||
| 509 | return NULL; | ||
| 510 | |||
| 511 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 512 | addr = sg_dma_address(sg); | ||
| 513 | avail = sg_dma_len(sgl); | ||
| 514 | |||
| 515 | do { | ||
| 516 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | ||
| 517 | |||
| 518 | /* allocate and populate the descriptor */ | ||
| 519 | new = mmp_pdma_alloc_descriptor(chan); | ||
| 520 | if (!new) { | ||
| 521 | dev_err(chan->dev, "no memory for desc\n"); | ||
| 522 | goto fail; | ||
| 523 | } | ||
| 524 | |||
| 525 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | ||
| 526 | if (dir == DMA_MEM_TO_DEV) { | ||
| 527 | new->desc.dsadr = addr; | ||
| 528 | new->desc.dtadr = chan->dev_addr; | ||
| 529 | } else { | ||
| 530 | new->desc.dsadr = chan->dev_addr; | ||
| 531 | new->desc.dtadr = addr; | ||
| 532 | } | ||
| 533 | |||
| 534 | if (!first) | ||
| 535 | first = new; | ||
| 536 | else | ||
| 537 | prev->desc.ddadr = new->async_tx.phys; | ||
| 538 | |||
| 539 | new->async_tx.cookie = 0; | ||
| 540 | async_tx_ack(&new->async_tx); | ||
| 541 | prev = new; | ||
| 542 | |||
| 543 | /* Insert the link descriptor to the LD ring */ | ||
| 544 | list_add_tail(&new->node, &first->tx_list); | ||
| 545 | |||
| 546 | /* update metadata */ | ||
| 547 | addr += len; | ||
| 548 | avail -= len; | ||
| 549 | } while (avail); | ||
| 550 | } | ||
| 551 | |||
| 552 | first->async_tx.cookie = -EBUSY; | ||
| 553 | first->async_tx.flags = flags; | ||
| 554 | |||
| 555 | /* last desc and fire IRQ */ | ||
| 556 | new->desc.ddadr = DDADR_STOP; | ||
| 557 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
| 558 | |||
| 559 | return &first->async_tx; | ||
| 560 | |||
| 561 | fail: | ||
| 562 | if (first) | ||
| 563 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
| 564 | return NULL; | ||
| 565 | } | ||
| 566 | |||
| 567 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
| 568 | unsigned long arg) | ||
| 569 | { | ||
| 570 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 571 | struct dma_slave_config *cfg = (void *)arg; | ||
| 572 | unsigned long flags; | ||
| 573 | int ret = 0; | ||
| 574 | u32 maxburst = 0, addr = 0; | ||
| 575 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
| 576 | |||
| 577 | if (!dchan) | ||
| 578 | return -EINVAL; | ||
| 579 | |||
| 580 | switch (cmd) { | ||
| 581 | case DMA_TERMINATE_ALL: | ||
| 582 | disable_chan(chan->phy); | ||
| 583 | if (chan->phy) { | ||
| 584 | chan->phy->vchan = NULL; | ||
| 585 | chan->phy = NULL; | ||
| 586 | } | ||
| 587 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 588 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
| 589 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
| 590 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 591 | chan->idle = true; | ||
| 592 | break; | ||
| 593 | case DMA_SLAVE_CONFIG: | ||
| 594 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
| 595 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | ||
| 596 | maxburst = cfg->src_maxburst; | ||
| 597 | width = cfg->src_addr_width; | ||
| 598 | addr = cfg->src_addr; | ||
| 599 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
| 600 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | ||
| 601 | maxburst = cfg->dst_maxburst; | ||
| 602 | width = cfg->dst_addr_width; | ||
| 603 | addr = cfg->dst_addr; | ||
| 604 | } | ||
| 605 | |||
| 606 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
| 607 | chan->dcmd |= DCMD_WIDTH1; | ||
| 608 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
| 609 | chan->dcmd |= DCMD_WIDTH2; | ||
| 610 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 611 | chan->dcmd |= DCMD_WIDTH4; | ||
| 612 | |||
| 613 | if (maxburst == 8) | ||
| 614 | chan->dcmd |= DCMD_BURST8; | ||
| 615 | else if (maxburst == 16) | ||
| 616 | chan->dcmd |= DCMD_BURST16; | ||
| 617 | else if (maxburst == 32) | ||
| 618 | chan->dcmd |= DCMD_BURST32; | ||
| 619 | |||
| 620 | if (cfg) { | ||
| 621 | chan->dir = cfg->direction; | ||
| 622 | chan->drcmr = cfg->slave_id; | ||
| 623 | } | ||
| 624 | chan->dev_addr = addr; | ||
| 625 | break; | ||
| 626 | default: | ||
| 627 | return -ENOSYS; | ||
| 628 | } | ||
| 629 | |||
| 630 | return ret; | ||
| 631 | } | ||
| 632 | |||
| 633 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | ||
| 634 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
| 635 | { | ||
| 636 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 637 | enum dma_status ret; | ||
| 638 | unsigned long flags; | ||
| 639 | |||
| 640 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 641 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
| 642 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 643 | |||
| 644 | return ret; | ||
| 645 | } | ||
| 646 | |||
| 647 | /** | ||
| 648 | * mmp_pdma_issue_pending - Issue the DMA start command | ||
| 649 | * pending list ==> running list | ||
| 650 | */ | ||
| 651 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | ||
| 652 | { | ||
| 653 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 654 | unsigned long flags; | ||
| 655 | |||
| 656 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 657 | start_pending_queue(chan); | ||
| 658 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 659 | } | ||
| 660 | |||
| 661 | /* | ||
| 662 | * dma_do_tasklet | ||
| 663 | * Do call back | ||
| 664 | * Start pending list | ||
| 665 | */ | ||
| 666 | static void dma_do_tasklet(unsigned long data) | ||
| 667 | { | ||
| 668 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; | ||
| 669 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
| 670 | LIST_HEAD(chain_cleanup); | ||
| 671 | unsigned long flags; | ||
| 672 | |||
| 673 | /* submit pending list; callback for each desc; free desc */ | ||
| 674 | |||
| 675 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 676 | |||
| 677 | /* update the cookie if we have some descriptors to cleanup */ | ||
| 678 | if (!list_empty(&chan->chain_running)) { | ||
| 679 | dma_cookie_t cookie; | ||
| 680 | |||
| 681 | desc = to_mmp_pdma_desc(chan->chain_running.prev); | ||
| 682 | cookie = desc->async_tx.cookie; | ||
| 683 | dma_cookie_complete(&desc->async_tx); | ||
| 684 | |||
| 685 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | ||
| 686 | } | ||
| 687 | |||
| 688 | /* | ||
| 689 | * move the descriptors to a temporary list so we can drop the lock | ||
| 690 | * during the entire cleanup operation | ||
| 691 | */ | ||
| 692 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); | ||
| 693 | |||
| 694 | /* the hardware is now idle and ready for more */ | ||
| 695 | chan->idle = true; | ||
| 696 | |||
| 697 | /* Start any pending transactions automatically */ | ||
| 698 | start_pending_queue(chan); | ||
| 699 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 700 | |||
| 701 | /* Run the callback for each descriptor, in order */ | ||
| 702 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | ||
| 703 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
| 704 | |||
| 705 | /* Remove from the list of transactions */ | ||
| 706 | list_del(&desc->node); | ||
| 707 | /* Run the link descriptor callback function */ | ||
| 708 | if (txd->callback) | ||
| 709 | txd->callback(txd->callback_param); | ||
| 710 | |||
| 711 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
| 712 | } | ||
| 713 | } | ||
| 714 | |||
| 715 | static int __devexit mmp_pdma_remove(struct platform_device *op) | ||
| 716 | { | ||
| 717 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | ||
| 718 | |||
| 719 | dma_async_device_unregister(&pdev->device); | ||
| 720 | return 0; | ||
| 721 | } | ||
| 722 | |||
| 723 | static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | ||
| 724 | int idx, int irq) | ||
| 725 | { | ||
| 726 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | ||
| 727 | struct mmp_pdma_chan *chan; | ||
| 728 | int ret; | ||
| 729 | |||
| 730 | chan = devm_kzalloc(pdev->dev, | ||
| 731 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
| 732 | if (chan == NULL) | ||
| 733 | return -ENOMEM; | ||
| 734 | |||
| 735 | phy->idx = idx; | ||
| 736 | phy->base = pdev->base; | ||
| 737 | |||
| 738 | if (irq) { | ||
| 739 | ret = devm_request_irq(pdev->dev, irq, | ||
| 740 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | ||
| 741 | if (ret) { | ||
| 742 | dev_err(pdev->dev, "channel request irq fail!\n"); | ||
| 743 | return ret; | ||
| 744 | } | ||
| 745 | } | ||
| 746 | |||
| 747 | spin_lock_init(&chan->desc_lock); | ||
| 748 | chan->dev = pdev->dev; | ||
| 749 | chan->chan.device = &pdev->device; | ||
| 750 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | ||
| 751 | INIT_LIST_HEAD(&chan->chain_pending); | ||
| 752 | INIT_LIST_HEAD(&chan->chain_running); | ||
| 753 | |||
| 754 | /* register virt channel to dma engine */ | ||
| 755 | list_add_tail(&chan->chan.device_node, | ||
| 756 | &pdev->device.channels); | ||
| 757 | |||
| 758 | return 0; | ||
| 759 | } | ||
| 760 | |||
| 761 | static struct of_device_id mmp_pdma_dt_ids[] = { | ||
| 762 | { .compatible = "marvell,pdma-1.0", }, | ||
| 763 | {} | ||
| 764 | }; | ||
| 765 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | ||
| 766 | |||
| 767 | static int __devinit mmp_pdma_probe(struct platform_device *op) | ||
| 768 | { | ||
| 769 | struct mmp_pdma_device *pdev; | ||
| 770 | const struct of_device_id *of_id; | ||
| 771 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | ||
| 772 | struct resource *iores; | ||
| 773 | int i, ret, irq = 0; | ||
| 774 | int dma_channels = 0, irq_num = 0; | ||
| 775 | |||
| 776 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | ||
| 777 | if (!pdev) | ||
| 778 | return -ENOMEM; | ||
| 779 | pdev->dev = &op->dev; | ||
| 780 | |||
| 781 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
| 782 | if (!iores) | ||
| 783 | return -EINVAL; | ||
| 784 | |||
| 785 | pdev->base = devm_request_and_ioremap(pdev->dev, iores); | ||
| 786 | if (!pdev->base) | ||
| 787 | return -EADDRNOTAVAIL; | ||
| 788 | |||
| 789 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | ||
| 790 | if (of_id) | ||
| 791 | of_property_read_u32(pdev->dev->of_node, | ||
| 792 | "#dma-channels", &dma_channels); | ||
| 793 | else if (pdata && pdata->dma_channels) | ||
| 794 | dma_channels = pdata->dma_channels; | ||
| 795 | else | ||
| 796 | dma_channels = 32; /* default 32 channel */ | ||
| 797 | pdev->dma_channels = dma_channels; | ||
| 798 | |||
| 799 | for (i = 0; i < dma_channels; i++) { | ||
| 800 | if (platform_get_irq(op, i) > 0) | ||
| 801 | irq_num++; | ||
| 802 | } | ||
| 803 | |||
| 804 | pdev->phy = devm_kzalloc(pdev->dev, | ||
| 805 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
| 806 | if (pdev->phy == NULL) | ||
| 807 | return -ENOMEM; | ||
| 808 | |||
| 809 | INIT_LIST_HEAD(&pdev->device.channels); | ||
| 810 | |||
| 811 | if (irq_num != dma_channels) { | ||
| 812 | /* all chan share one irq, demux inside */ | ||
| 813 | irq = platform_get_irq(op, 0); | ||
| 814 | ret = devm_request_irq(pdev->dev, irq, | ||
| 815 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | ||
| 816 | if (ret) | ||
| 817 | return ret; | ||
| 818 | } | ||
| 819 | |||
| 820 | for (i = 0; i < dma_channels; i++) { | ||
| 821 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | ||
| 822 | ret = mmp_pdma_chan_init(pdev, i, irq); | ||
| 823 | if (ret) | ||
| 824 | return ret; | ||
| 825 | } | ||
| 826 | |||
| 827 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
| 828 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | ||
| 829 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
| 830 | pdev->device.dev = &op->dev; | ||
| 831 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | ||
| 832 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | ||
| 833 | pdev->device.device_tx_status = mmp_pdma_tx_status; | ||
| 834 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | ||
| 835 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | ||
| 836 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | ||
| 837 | pdev->device.device_control = mmp_pdma_control; | ||
| 838 | pdev->device.copy_align = PDMA_ALIGNMENT; | ||
| 839 | |||
| 840 | if (pdev->dev->coherent_dma_mask) | ||
| 841 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | ||
| 842 | else | ||
| 843 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | ||
| 844 | |||
| 845 | ret = dma_async_device_register(&pdev->device); | ||
| 846 | if (ret) { | ||
| 847 | dev_err(pdev->device.dev, "unable to register\n"); | ||
| 848 | return ret; | ||
| 849 | } | ||
| 850 | |||
| 851 | dev_info(pdev->device.dev, "initialized\n"); | ||
| 852 | return 0; | ||
| 853 | } | ||
| 854 | |||
| 855 | static const struct platform_device_id mmp_pdma_id_table[] = { | ||
| 856 | { "mmp-pdma", }, | ||
| 857 | { }, | ||
| 858 | }; | ||
| 859 | |||
| 860 | static struct platform_driver mmp_pdma_driver = { | ||
| 861 | .driver = { | ||
| 862 | .name = "mmp-pdma", | ||
| 863 | .owner = THIS_MODULE, | ||
| 864 | .of_match_table = mmp_pdma_dt_ids, | ||
| 865 | }, | ||
| 866 | .id_table = mmp_pdma_id_table, | ||
| 867 | .probe = mmp_pdma_probe, | ||
| 868 | .remove = __devexit_p(mmp_pdma_remove), | ||
| 869 | }; | ||
| 870 | |||
| 871 | module_platform_driver(mmp_pdma_driver); | ||
| 872 | |||
| 873 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | ||
| 874 | MODULE_AUTHOR("Marvell International Ltd."); | ||
| 875 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 6d9c82e891d7..f3e8d71bcbc7 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 21 | #include <mach/regs-icu.h> | 21 | #include <mach/regs-icu.h> |
| 22 | #include <linux/platform_data/dma-mmp_tdma.h> | 22 | #include <linux/platform_data/dma-mmp_tdma.h> |
| 23 | #include <linux/of_device.h> | ||
| 23 | 24 | ||
| 24 | #include "dmaengine.h" | 25 | #include "dmaengine.h" |
| 25 | 26 | ||
| @@ -127,7 +128,6 @@ struct mmp_tdma_device { | |||
| 127 | void __iomem *base; | 128 | void __iomem *base; |
| 128 | struct dma_device device; | 129 | struct dma_device device; |
| 129 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; | 130 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; |
| 130 | int irq; | ||
| 131 | }; | 131 | }; |
| 132 | 132 | ||
| 133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) | 133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) |
| @@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
| 492 | return -ENOMEM; | 492 | return -ENOMEM; |
| 493 | } | 493 | } |
| 494 | if (irq) | 494 | if (irq) |
| 495 | tdmac->irq = irq + idx; | 495 | tdmac->irq = irq; |
| 496 | tdmac->dev = tdev->dev; | 496 | tdmac->dev = tdev->dev; |
| 497 | tdmac->chan.device = &tdev->device; | 497 | tdmac->chan.device = &tdev->device; |
| 498 | tdmac->idx = idx; | 498 | tdmac->idx = idx; |
| @@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
| 505 | /* add the channel to tdma_chan list */ | 505 | /* add the channel to tdma_chan list */ |
| 506 | list_add_tail(&tdmac->chan.device_node, | 506 | list_add_tail(&tdmac->chan.device_node, |
| 507 | &tdev->device.channels); | 507 | &tdev->device.channels); |
| 508 | |||
| 509 | return 0; | 508 | return 0; |
| 510 | } | 509 | } |
| 511 | 510 | ||
| 511 | static struct of_device_id mmp_tdma_dt_ids[] = { | ||
| 512 | { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, | ||
| 513 | { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, | ||
| 514 | {} | ||
| 515 | }; | ||
| 516 | MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids); | ||
| 517 | |||
| 512 | static int __devinit mmp_tdma_probe(struct platform_device *pdev) | 518 | static int __devinit mmp_tdma_probe(struct platform_device *pdev) |
| 513 | { | 519 | { |
| 514 | const struct platform_device_id *id = platform_get_device_id(pdev); | 520 | enum mmp_tdma_type type; |
| 515 | enum mmp_tdma_type type = id->driver_data; | 521 | const struct of_device_id *of_id; |
| 516 | struct mmp_tdma_device *tdev; | 522 | struct mmp_tdma_device *tdev; |
| 517 | struct resource *iores; | 523 | struct resource *iores; |
| 518 | int i, ret; | 524 | int i, ret; |
| 519 | int irq = 0; | 525 | int irq = 0, irq_num = 0; |
| 520 | int chan_num = TDMA_CHANNEL_NUM; | 526 | int chan_num = TDMA_CHANNEL_NUM; |
| 521 | 527 | ||
| 528 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); | ||
| 529 | if (of_id) | ||
| 530 | type = (enum mmp_tdma_type) of_id->data; | ||
| 531 | else | ||
| 532 | type = platform_get_device_id(pdev)->driver_data; | ||
| 533 | |||
| 522 | /* always have couple channels */ | 534 | /* always have couple channels */ |
| 523 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); | 535 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); |
| 524 | if (!tdev) | 536 | if (!tdev) |
| 525 | return -ENOMEM; | 537 | return -ENOMEM; |
| 526 | 538 | ||
| 527 | tdev->dev = &pdev->dev; | 539 | tdev->dev = &pdev->dev; |
| 528 | iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
| 529 | if (!iores) | ||
| 530 | return -EINVAL; | ||
| 531 | 540 | ||
| 532 | if (resource_size(iores) != chan_num) | 541 | for (i = 0; i < chan_num; i++) { |
| 533 | tdev->irq = iores->start; | 542 | if (platform_get_irq(pdev, i) > 0) |
| 534 | else | 543 | irq_num++; |
| 535 | irq = iores->start; | 544 | } |
| 536 | 545 | ||
| 537 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 546 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 538 | if (!iores) | 547 | if (!iores) |
| @@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev) | |||
| 542 | if (!tdev->base) | 551 | if (!tdev->base) |
| 543 | return -EADDRNOTAVAIL; | 552 | return -EADDRNOTAVAIL; |
| 544 | 553 | ||
| 545 | if (tdev->irq) { | 554 | INIT_LIST_HEAD(&tdev->device.channels); |
| 546 | ret = devm_request_irq(&pdev->dev, tdev->irq, | 555 | |
| 556 | if (irq_num != chan_num) { | ||
| 557 | irq = platform_get_irq(pdev, 0); | ||
| 558 | ret = devm_request_irq(&pdev->dev, irq, | ||
| 547 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | 559 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); |
| 548 | if (ret) | 560 | if (ret) |
| 549 | return ret; | 561 | return ret; |
| 550 | } | 562 | } |
| 551 | 563 | ||
| 552 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
| 553 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
| 554 | |||
| 555 | INIT_LIST_HEAD(&tdev->device.channels); | ||
| 556 | |||
| 557 | /* initialize channel parameters */ | 564 | /* initialize channel parameters */ |
| 558 | for (i = 0; i < chan_num; i++) { | 565 | for (i = 0; i < chan_num; i++) { |
| 566 | irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); | ||
| 559 | ret = mmp_tdma_chan_init(tdev, i, irq, type); | 567 | ret = mmp_tdma_chan_init(tdev, i, irq, type); |
| 560 | if (ret) | 568 | if (ret) |
| 561 | return ret; | 569 | return ret; |
| 562 | } | 570 | } |
| 563 | 571 | ||
| 572 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
| 573 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
| 564 | tdev->device.dev = &pdev->dev; | 574 | tdev->device.dev = &pdev->dev; |
| 565 | tdev->device.device_alloc_chan_resources = | 575 | tdev->device.device_alloc_chan_resources = |
| 566 | mmp_tdma_alloc_chan_resources; | 576 | mmp_tdma_alloc_chan_resources; |
| @@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = { | |||
| 595 | .driver = { | 605 | .driver = { |
| 596 | .name = "mmp-tdma", | 606 | .name = "mmp-tdma", |
| 597 | .owner = THIS_MODULE, | 607 | .owner = THIS_MODULE, |
| 608 | .of_match_table = mmp_tdma_dt_ids, | ||
| 598 | }, | 609 | }, |
| 599 | .id_table = mmp_tdma_id_table, | 610 | .id_table = mmp_tdma_id_table, |
| 600 | .probe = mmp_tdma_probe, | 611 | .probe = mmp_tdma_probe, |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 734a4eb84d65..9f02e794b12b 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
| @@ -101,7 +101,8 @@ struct mxs_dma_ccw { | |||
| 101 | u32 pio_words[MXS_PIO_WORDS]; | 101 | u32 pio_words[MXS_PIO_WORDS]; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | 104 | #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) |
| 105 | #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) | ||
| 105 | 106 | ||
| 106 | struct mxs_dma_chan { | 107 | struct mxs_dma_chan { |
| 107 | struct mxs_dma_engine *mxs_dma; | 108 | struct mxs_dma_engine *mxs_dma; |
| @@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 354 | 355 | ||
| 355 | mxs_chan->chan_irq = data->chan_irq; | 356 | mxs_chan->chan_irq = data->chan_irq; |
| 356 | 357 | ||
| 357 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 358 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, |
| 358 | &mxs_chan->ccw_phys, GFP_KERNEL); | 359 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, |
| 360 | GFP_KERNEL); | ||
| 359 | if (!mxs_chan->ccw) { | 361 | if (!mxs_chan->ccw) { |
| 360 | ret = -ENOMEM; | 362 | ret = -ENOMEM; |
| 361 | goto err_alloc; | 363 | goto err_alloc; |
| 362 | } | 364 | } |
| 363 | 365 | ||
| 364 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 366 | memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); |
| 365 | 367 | ||
| 366 | if (mxs_chan->chan_irq != NO_IRQ) { | 368 | if (mxs_chan->chan_irq != NO_IRQ) { |
| 367 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 369 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
| @@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 387 | err_clk: | 389 | err_clk: |
| 388 | free_irq(mxs_chan->chan_irq, mxs_dma); | 390 | free_irq(mxs_chan->chan_irq, mxs_dma); |
| 389 | err_irq: | 391 | err_irq: |
| 390 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 392 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
| 391 | mxs_chan->ccw, mxs_chan->ccw_phys); | 393 | mxs_chan->ccw, mxs_chan->ccw_phys); |
| 392 | err_alloc: | 394 | err_alloc: |
| 393 | return ret; | 395 | return ret; |
| @@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
| 402 | 404 | ||
| 403 | free_irq(mxs_chan->chan_irq, mxs_dma); | 405 | free_irq(mxs_chan->chan_irq, mxs_dma); |
| 404 | 406 | ||
| 405 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 407 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
| 406 | mxs_chan->ccw, mxs_chan->ccw_phys); | 408 | mxs_chan->ccw, mxs_chan->ccw_phys); |
| 407 | 409 | ||
| 408 | clk_disable_unprepare(mxs_dma->clk); | 410 | clk_disable_unprepare(mxs_dma->clk); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 169c0dbd71ae..665668b6f2b1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
| 24 | #include <linux/amba/bus.h> | 24 | #include <linux/amba/bus.h> |
| 25 | #include <linux/amba/pl330.h> | 25 | #include <linux/amba/pl330.h> |
| 26 | #include <linux/pm_runtime.h> | ||
| 27 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
| 28 | #include <linux/of.h> | 27 | #include <linux/of.h> |
| 29 | 28 | ||
| @@ -586,8 +585,6 @@ struct dma_pl330_dmac { | |||
| 586 | 585 | ||
| 587 | /* Peripheral channels connected to this DMAC */ | 586 | /* Peripheral channels connected to this DMAC */ |
| 588 | struct dma_pl330_chan *peripherals; /* keep at end */ | 587 | struct dma_pl330_chan *peripherals; /* keep at end */ |
| 589 | |||
| 590 | struct clk *clk; | ||
| 591 | }; | 588 | }; |
| 592 | 589 | ||
| 593 | struct dma_pl330_desc { | 590 | struct dma_pl330_desc { |
| @@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
| 2395 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 2392 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
| 2396 | if (!pch->pl330_chid) { | 2393 | if (!pch->pl330_chid) { |
| 2397 | spin_unlock_irqrestore(&pch->lock, flags); | 2394 | spin_unlock_irqrestore(&pch->lock, flags); |
| 2398 | return 0; | 2395 | return -ENOMEM; |
| 2399 | } | 2396 | } |
| 2400 | 2397 | ||
| 2401 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | 2398 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
| @@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2889 | goto probe_err1; | 2886 | goto probe_err1; |
| 2890 | } | 2887 | } |
| 2891 | 2888 | ||
| 2892 | pdmac->clk = clk_get(&adev->dev, "dma"); | ||
| 2893 | if (IS_ERR(pdmac->clk)) { | ||
| 2894 | dev_err(&adev->dev, "Cannot get operation clock.\n"); | ||
| 2895 | ret = -EINVAL; | ||
| 2896 | goto probe_err2; | ||
| 2897 | } | ||
| 2898 | |||
| 2899 | amba_set_drvdata(adev, pdmac); | 2889 | amba_set_drvdata(adev, pdmac); |
| 2900 | 2890 | ||
| 2901 | #ifndef CONFIG_PM_RUNTIME | ||
| 2902 | /* enable dma clk */ | ||
| 2903 | clk_enable(pdmac->clk); | ||
| 2904 | #endif | ||
| 2905 | |||
| 2906 | irq = adev->irq[0]; | 2891 | irq = adev->irq[0]; |
| 2907 | ret = request_irq(irq, pl330_irq_handler, 0, | 2892 | ret = request_irq(irq, pl330_irq_handler, 0, |
| 2908 | dev_name(&adev->dev), pi); | 2893 | dev_name(&adev->dev), pi); |
| 2909 | if (ret) | 2894 | if (ret) |
| 2910 | goto probe_err3; | 2895 | goto probe_err2; |
| 2911 | 2896 | ||
| 2912 | ret = pl330_add(pi); | 2897 | ret = pl330_add(pi); |
| 2913 | if (ret) | 2898 | if (ret) |
| 2914 | goto probe_err4; | 2899 | goto probe_err3; |
| 2915 | 2900 | ||
| 2916 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2901 | INIT_LIST_HEAD(&pdmac->desc_pool); |
| 2917 | spin_lock_init(&pdmac->pool_lock); | 2902 | spin_lock_init(&pdmac->pool_lock); |
| @@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2933 | if (!pdmac->peripherals) { | 2918 | if (!pdmac->peripherals) { |
| 2934 | ret = -ENOMEM; | 2919 | ret = -ENOMEM; |
| 2935 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); | 2920 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); |
| 2936 | goto probe_err5; | 2921 | goto probe_err4; |
| 2937 | } | 2922 | } |
| 2938 | 2923 | ||
| 2939 | for (i = 0; i < num_chan; i++) { | 2924 | for (i = 0; i < num_chan; i++) { |
| @@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2961 | if (pi->pcfg.num_peri) { | 2946 | if (pi->pcfg.num_peri) { |
| 2962 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | 2947 | dma_cap_set(DMA_SLAVE, pd->cap_mask); |
| 2963 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | 2948 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); |
| 2949 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); | ||
| 2964 | } | 2950 | } |
| 2965 | } | 2951 | } |
| 2966 | 2952 | ||
| @@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2976 | ret = dma_async_device_register(pd); | 2962 | ret = dma_async_device_register(pd); |
| 2977 | if (ret) { | 2963 | if (ret) { |
| 2978 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2964 | dev_err(&adev->dev, "unable to register DMAC\n"); |
| 2979 | goto probe_err5; | 2965 | goto probe_err4; |
| 2980 | } | 2966 | } |
| 2981 | 2967 | ||
| 2982 | dev_info(&adev->dev, | 2968 | dev_info(&adev->dev, |
| @@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2989 | 2975 | ||
| 2990 | return 0; | 2976 | return 0; |
| 2991 | 2977 | ||
| 2992 | probe_err5: | ||
| 2993 | pl330_del(pi); | ||
| 2994 | probe_err4: | 2978 | probe_err4: |
| 2995 | free_irq(irq, pi); | 2979 | pl330_del(pi); |
| 2996 | probe_err3: | 2980 | probe_err3: |
| 2997 | #ifndef CONFIG_PM_RUNTIME | 2981 | free_irq(irq, pi); |
| 2998 | clk_disable(pdmac->clk); | ||
| 2999 | #endif | ||
| 3000 | clk_put(pdmac->clk); | ||
| 3001 | probe_err2: | 2982 | probe_err2: |
| 3002 | iounmap(pi->base); | 2983 | iounmap(pi->base); |
| 3003 | probe_err1: | 2984 | probe_err1: |
| @@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev) | |||
| 3044 | res = &adev->res; | 3025 | res = &adev->res; |
| 3045 | release_mem_region(res->start, resource_size(res)); | 3026 | release_mem_region(res->start, resource_size(res)); |
| 3046 | 3027 | ||
| 3047 | #ifndef CONFIG_PM_RUNTIME | ||
| 3048 | clk_disable(pdmac->clk); | ||
| 3049 | #endif | ||
| 3050 | |||
| 3051 | kfree(pdmac); | 3028 | kfree(pdmac); |
| 3052 | 3029 | ||
| 3053 | return 0; | 3030 | return 0; |
| @@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = { | |||
| 3063 | 3040 | ||
| 3064 | MODULE_DEVICE_TABLE(amba, pl330_ids); | 3041 | MODULE_DEVICE_TABLE(amba, pl330_ids); |
| 3065 | 3042 | ||
| 3066 | #ifdef CONFIG_PM_RUNTIME | ||
| 3067 | static int pl330_runtime_suspend(struct device *dev) | ||
| 3068 | { | ||
| 3069 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | ||
| 3070 | |||
| 3071 | if (!pdmac) { | ||
| 3072 | dev_err(dev, "failed to get dmac\n"); | ||
| 3073 | return -ENODEV; | ||
| 3074 | } | ||
| 3075 | |||
| 3076 | clk_disable(pdmac->clk); | ||
| 3077 | |||
| 3078 | return 0; | ||
| 3079 | } | ||
| 3080 | |||
| 3081 | static int pl330_runtime_resume(struct device *dev) | ||
| 3082 | { | ||
| 3083 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | ||
| 3084 | |||
| 3085 | if (!pdmac) { | ||
| 3086 | dev_err(dev, "failed to get dmac\n"); | ||
| 3087 | return -ENODEV; | ||
| 3088 | } | ||
| 3089 | |||
| 3090 | clk_enable(pdmac->clk); | ||
| 3091 | |||
| 3092 | return 0; | ||
| 3093 | } | ||
| 3094 | #else | ||
| 3095 | #define pl330_runtime_suspend NULL | ||
| 3096 | #define pl330_runtime_resume NULL | ||
| 3097 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 3098 | |||
| 3099 | static const struct dev_pm_ops pl330_pm_ops = { | ||
| 3100 | .runtime_suspend = pl330_runtime_suspend, | ||
| 3101 | .runtime_resume = pl330_runtime_resume, | ||
| 3102 | }; | ||
| 3103 | |||
| 3104 | static struct amba_driver pl330_driver = { | 3043 | static struct amba_driver pl330_driver = { |
| 3105 | .drv = { | 3044 | .drv = { |
| 3106 | .owner = THIS_MODULE, | 3045 | .owner = THIS_MODULE, |
| 3107 | .name = "dma-pl330", | 3046 | .name = "dma-pl330", |
| 3108 | .pm = &pl330_pm_ops, | ||
| 3109 | }, | 3047 | }, |
| 3110 | .id_table = pl330_ids, | 3048 | .id_table = pl330_ids, |
| 3111 | .probe = pl330_probe, | 3049 | .probe = pl330_probe, |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 3eed8b35b0f1..64385cde044b 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
| @@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
| 570 | 570 | ||
| 571 | if (of_property_read_u32(dn, "cell-index", &id)) { | 571 | if (of_property_read_u32(dn, "cell-index", &id)) { |
| 572 | dev_err(dev, "Fail to get DMAC index\n"); | 572 | dev_err(dev, "Fail to get DMAC index\n"); |
| 573 | ret = -ENODEV; | 573 | return -ENODEV; |
| 574 | goto free_mem; | ||
| 575 | } | 574 | } |
| 576 | 575 | ||
| 577 | sdma->irq = irq_of_parse_and_map(dn, 0); | 576 | sdma->irq = irq_of_parse_and_map(dn, 0); |
| 578 | if (sdma->irq == NO_IRQ) { | 577 | if (sdma->irq == NO_IRQ) { |
| 579 | dev_err(dev, "Error mapping IRQ!\n"); | 578 | dev_err(dev, "Error mapping IRQ!\n"); |
| 580 | ret = -EINVAL; | 579 | return -EINVAL; |
| 581 | goto free_mem; | ||
| 582 | } | 580 | } |
| 583 | 581 | ||
| 584 | ret = of_address_to_resource(dn, 0, &res); | 582 | ret = of_address_to_resource(dn, 0, &res); |
| 585 | if (ret) { | 583 | if (ret) { |
| 586 | dev_err(dev, "Error parsing memory region!\n"); | 584 | dev_err(dev, "Error parsing memory region!\n"); |
| 587 | goto free_mem; | 585 | goto irq_dispose; |
| 588 | } | 586 | } |
| 589 | 587 | ||
| 590 | regs_start = res.start; | 588 | regs_start = res.start; |
| @@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
| 597 | goto irq_dispose; | 595 | goto irq_dispose; |
| 598 | } | 596 | } |
| 599 | 597 | ||
| 600 | ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, | 598 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
| 601 | sdma); | ||
| 602 | if (ret) { | 599 | if (ret) { |
| 603 | dev_err(dev, "Error requesting IRQ!\n"); | 600 | dev_err(dev, "Error requesting IRQ!\n"); |
| 604 | ret = -EINVAL; | 601 | ret = -EINVAL; |
| 605 | goto unmap_mem; | 602 | goto irq_dispose; |
| 606 | } | 603 | } |
| 607 | 604 | ||
| 608 | dma = &sdma->dma; | 605 | dma = &sdma->dma; |
| @@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
| 652 | return 0; | 649 | return 0; |
| 653 | 650 | ||
| 654 | free_irq: | 651 | free_irq: |
| 655 | devm_free_irq(dev, sdma->irq, sdma); | 652 | free_irq(sdma->irq, sdma); |
| 656 | irq_dispose: | 653 | irq_dispose: |
| 657 | irq_dispose_mapping(sdma->irq); | 654 | irq_dispose_mapping(sdma->irq); |
| 658 | unmap_mem: | ||
| 659 | iounmap(sdma->base); | ||
| 660 | free_mem: | ||
| 661 | devm_kfree(dev, sdma); | ||
| 662 | return ret; | 655 | return ret; |
| 663 | } | 656 | } |
| 664 | 657 | ||
| @@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op) | |||
| 668 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 661 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
| 669 | 662 | ||
| 670 | dma_async_device_unregister(&sdma->dma); | 663 | dma_async_device_unregister(&sdma->dma); |
| 671 | devm_free_irq(dev, sdma->irq, sdma); | 664 | free_irq(sdma->irq, sdma); |
| 672 | irq_dispose_mapping(sdma->irq); | 665 | irq_dispose_mapping(sdma->irq); |
| 673 | iounmap(sdma->base); | ||
| 674 | devm_kfree(dev, sdma); | ||
| 675 | return 0; | 666 | return 0; |
| 676 | } | 667 | } |
| 677 | 668 | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index eee8d9b9a20b..ae55091c2272 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -2921,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2921 | struct d40_base *base = NULL; | 2921 | struct d40_base *base = NULL; |
| 2922 | int num_log_chans = 0; | 2922 | int num_log_chans = 0; |
| 2923 | int num_phy_chans; | 2923 | int num_phy_chans; |
| 2924 | int clk_ret = -EINVAL; | ||
| 2924 | int i; | 2925 | int i; |
| 2925 | u32 pid; | 2926 | u32 pid; |
| 2926 | u32 cid; | 2927 | u32 cid; |
| 2927 | u8 rev; | 2928 | u8 rev; |
| 2928 | 2929 | ||
| 2929 | clk = clk_get(&pdev->dev, NULL); | 2930 | clk = clk_get(&pdev->dev, NULL); |
| 2930 | |||
| 2931 | if (IS_ERR(clk)) { | 2931 | if (IS_ERR(clk)) { |
| 2932 | d40_err(&pdev->dev, "No matching clock found\n"); | 2932 | d40_err(&pdev->dev, "No matching clock found\n"); |
| 2933 | goto failure; | 2933 | goto failure; |
| 2934 | } | 2934 | } |
| 2935 | 2935 | ||
| 2936 | clk_enable(clk); | 2936 | clk_ret = clk_prepare_enable(clk); |
| 2937 | if (clk_ret) { | ||
| 2938 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | ||
| 2939 | goto failure; | ||
| 2940 | } | ||
| 2937 | 2941 | ||
| 2938 | /* Get IO for DMAC base address */ | 2942 | /* Get IO for DMAC base address */ |
| 2939 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 2943 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
| @@ -3063,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 3063 | return base; | 3067 | return base; |
| 3064 | 3068 | ||
| 3065 | failure: | 3069 | failure: |
| 3066 | if (!IS_ERR(clk)) { | 3070 | if (!clk_ret) |
| 3067 | clk_disable(clk); | 3071 | clk_disable_unprepare(clk); |
| 3072 | if (!IS_ERR(clk)) | ||
| 3068 | clk_put(clk); | 3073 | clk_put(clk); |
| 3069 | } | ||
| 3070 | if (virtbase) | 3074 | if (virtbase) |
| 3071 | iounmap(virtbase); | 3075 | iounmap(virtbase); |
| 3072 | if (res) | 3076 | if (res) |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 45fbeed1c1a5..528c62dd4b00 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
| @@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | |||
| 169 | /* tegra_dma_channel: Channel specific information */ | 169 | /* tegra_dma_channel: Channel specific information */ |
| 170 | struct tegra_dma_channel { | 170 | struct tegra_dma_channel { |
| 171 | struct dma_chan dma_chan; | 171 | struct dma_chan dma_chan; |
| 172 | char name[30]; | ||
| 172 | bool config_init; | 173 | bool config_init; |
| 173 | int id; | 174 | int id; |
| 174 | int irq; | 175 | int irq; |
| @@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) | |||
| 475 | while (!list_empty(&tdc->pending_sg_req)) { | 476 | while (!list_empty(&tdc->pending_sg_req)) { |
| 476 | sgreq = list_first_entry(&tdc->pending_sg_req, | 477 | sgreq = list_first_entry(&tdc->pending_sg_req, |
| 477 | typeof(*sgreq), node); | 478 | typeof(*sgreq), node); |
| 478 | list_del(&sgreq->node); | 479 | list_move_tail(&sgreq->node, &tdc->free_sg_req); |
| 479 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
| 480 | if (sgreq->last_sg) { | 480 | if (sgreq->last_sg) { |
| 481 | dma_desc = sgreq->dma_desc; | 481 | dma_desc = sgreq->dma_desc; |
| 482 | dma_desc->dma_status = DMA_ERROR; | 482 | dma_desc->dma_status = DMA_ERROR; |
| @@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | |||
| 570 | 570 | ||
| 571 | /* If not last req then put at end of pending list */ | 571 | /* If not last req then put at end of pending list */ |
| 572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { | 572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { |
| 573 | list_del(&sgreq->node); | 573 | list_move_tail(&sgreq->node, &tdc->pending_sg_req); |
| 574 | list_add_tail(&sgreq->node, &tdc->pending_sg_req); | ||
| 575 | sgreq->configured = false; | 574 | sgreq->configured = false; |
| 576 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); | 575 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); |
| 577 | if (!st) | 576 | if (!st) |
| @@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) | |||
| 1284 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | 1283 | INIT_LIST_HEAD(&tdma->dma_dev.channels); |
| 1285 | for (i = 0; i < cdata->nr_channels; i++) { | 1284 | for (i = 0; i < cdata->nr_channels; i++) { |
| 1286 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | 1285 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
| 1287 | char irq_name[30]; | ||
| 1288 | 1286 | ||
| 1289 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + | 1287 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + |
| 1290 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; | 1288 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; |
| @@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) | |||
| 1296 | goto err_irq; | 1294 | goto err_irq; |
| 1297 | } | 1295 | } |
| 1298 | tdc->irq = res->start; | 1296 | tdc->irq = res->start; |
| 1299 | snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); | 1297 | snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); |
| 1300 | ret = devm_request_irq(&pdev->dev, tdc->irq, | 1298 | ret = devm_request_irq(&pdev->dev, tdc->irq, |
| 1301 | tegra_dma_isr, 0, irq_name, tdc); | 1299 | tegra_dma_isr, 0, tdc->name, tdc); |
| 1302 | if (ret) { | 1300 | if (ret) { |
| 1303 | dev_err(&pdev->dev, | 1301 | dev_err(&pdev->dev, |
| 1304 | "request_irq failed with err %d channel %d\n", | 1302 | "request_irq failed with err %d channel %d\n", |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8ac5246e2ab2..06c42cfb7c34 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/suspend.h> | 26 | #include <linux/suspend.h> |
| 27 | #include <linux/fault-inject.h> | 27 | #include <linux/fault-inject.h> |
| 28 | #include <linux/random.h> | 28 | #include <linux/random.h> |
| 29 | #include <linux/slab.h> | ||
| 29 | 30 | ||
| 30 | #include <linux/mmc/card.h> | 31 | #include <linux/mmc/card.h> |
| 31 | #include <linux/mmc/host.h> | 32 | #include <linux/mmc/host.h> |
| @@ -41,6 +42,12 @@ | |||
| 41 | #include "sd_ops.h" | 42 | #include "sd_ops.h" |
| 42 | #include "sdio_ops.h" | 43 | #include "sdio_ops.h" |
| 43 | 44 | ||
| 45 | /* | ||
| 46 | * Background operations can take a long time, depending on the housekeeping | ||
| 47 | * operations the card has to perform. | ||
| 48 | */ | ||
| 49 | #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ | ||
| 50 | |||
| 44 | static struct workqueue_struct *workqueue; | 51 | static struct workqueue_struct *workqueue; |
| 45 | static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; | 52 | static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; |
| 46 | 53 | ||
| @@ -245,6 +252,70 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
| 245 | host->ops->request(host, mrq); | 252 | host->ops->request(host, mrq); |
| 246 | } | 253 | } |
| 247 | 254 | ||
| 255 | /** | ||
| 256 | * mmc_start_bkops - start BKOPS for supported cards | ||
| 257 | * @card: MMC card to start BKOPS | ||
| 258 | * @form_exception: A flag to indicate if this function was | ||
| 259 | * called due to an exception raised by the card | ||
| 260 | * | ||
| 261 | * Start background operations whenever requested. | ||
| 262 | * When the urgent BKOPS bit is set in a R1 command response | ||
| 263 | * then background operations should be started immediately. | ||
| 264 | */ | ||
| 265 | void mmc_start_bkops(struct mmc_card *card, bool from_exception) | ||
| 266 | { | ||
| 267 | int err; | ||
| 268 | int timeout; | ||
| 269 | bool use_busy_signal; | ||
| 270 | |||
| 271 | BUG_ON(!card); | ||
| 272 | |||
| 273 | if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) | ||
| 274 | return; | ||
| 275 | |||
| 276 | err = mmc_read_bkops_status(card); | ||
| 277 | if (err) { | ||
| 278 | pr_err("%s: Failed to read bkops status: %d\n", | ||
| 279 | mmc_hostname(card->host), err); | ||
| 280 | return; | ||
| 281 | } | ||
| 282 | |||
| 283 | if (!card->ext_csd.raw_bkops_status) | ||
| 284 | return; | ||
| 285 | |||
| 286 | if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && | ||
| 287 | from_exception) | ||
| 288 | return; | ||
| 289 | |||
| 290 | mmc_claim_host(card->host); | ||
| 291 | if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { | ||
| 292 | timeout = MMC_BKOPS_MAX_TIMEOUT; | ||
| 293 | use_busy_signal = true; | ||
| 294 | } else { | ||
| 295 | timeout = 0; | ||
| 296 | use_busy_signal = false; | ||
| 297 | } | ||
| 298 | |||
| 299 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
| 300 | EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); | ||
| 301 | if (err) { | ||
| 302 | pr_warn("%s: Error %d starting bkops\n", | ||
| 303 | mmc_hostname(card->host), err); | ||
| 304 | goto out; | ||
| 305 | } | ||
| 306 | |||
| 307 | /* | ||
| 308 | * For urgent bkops status (LEVEL_2 and more) | ||
| 309 | * bkops executed synchronously, otherwise | ||
| 310 | * the operation is in progress | ||
| 311 | */ | ||
| 312 | if (!use_busy_signal) | ||
| 313 | mmc_card_set_doing_bkops(card); | ||
| 314 | out: | ||
| 315 | mmc_release_host(card->host); | ||
| 316 | } | ||
| 317 | EXPORT_SYMBOL(mmc_start_bkops); | ||
| 318 | |||
| 248 | static void mmc_wait_done(struct mmc_request *mrq) | 319 | static void mmc_wait_done(struct mmc_request *mrq) |
| 249 | { | 320 | { |
| 250 | complete(&mrq->completion); | 321 | complete(&mrq->completion); |
| @@ -354,6 +425,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, | |||
| 354 | if (host->areq) { | 425 | if (host->areq) { |
| 355 | mmc_wait_for_req_done(host, host->areq->mrq); | 426 | mmc_wait_for_req_done(host, host->areq->mrq); |
| 356 | err = host->areq->err_check(host->card, host->areq); | 427 | err = host->areq->err_check(host->card, host->areq); |
| 428 | /* | ||
| 429 | * Check BKOPS urgency for each R1 response | ||
| 430 | */ | ||
| 431 | if (host->card && mmc_card_mmc(host->card) && | ||
| 432 | ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || | ||
| 433 | (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && | ||
| 434 | (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) | ||
| 435 | mmc_start_bkops(host->card, true); | ||
| 357 | } | 436 | } |
| 358 | 437 | ||
| 359 | if (!err && areq) | 438 | if (!err && areq) |
| @@ -398,7 +477,7 @@ EXPORT_SYMBOL(mmc_wait_for_req); | |||
| 398 | * @card: the MMC card associated with the HPI transfer | 477 | * @card: the MMC card associated with the HPI transfer |
| 399 | * | 478 | * |
| 400 | * Issued High Priority Interrupt, and check for card status | 479 | * Issued High Priority Interrupt, and check for card status |
| 401 | * util out-of prg-state. | 480 | * until out-of prg-state. |
| 402 | */ | 481 | */ |
| 403 | int mmc_interrupt_hpi(struct mmc_card *card) | 482 | int mmc_interrupt_hpi(struct mmc_card *card) |
| 404 | { | 483 | { |
| @@ -424,8 +503,9 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
| 424 | case R1_STATE_IDLE: | 503 | case R1_STATE_IDLE: |
| 425 | case R1_STATE_READY: | 504 | case R1_STATE_READY: |
| 426 | case R1_STATE_STBY: | 505 | case R1_STATE_STBY: |
| 506 | case R1_STATE_TRAN: | ||
| 427 | /* | 507 | /* |
| 428 | * In idle states, HPI is not needed and the caller | 508 | * In idle and transfer states, HPI is not needed and the caller |
| 429 | * can issue the next intended command immediately | 509 | * can issue the next intended command immediately |
| 430 | */ | 510 | */ |
| 431 | goto out; | 511 | goto out; |
| @@ -489,6 +569,64 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries | |||
| 489 | EXPORT_SYMBOL(mmc_wait_for_cmd); | 569 | EXPORT_SYMBOL(mmc_wait_for_cmd); |
| 490 | 570 | ||
| 491 | /** | 571 | /** |
| 572 | * mmc_stop_bkops - stop ongoing BKOPS | ||
| 573 | * @card: MMC card to check BKOPS | ||
| 574 | * | ||
| 575 | * Send HPI command to stop ongoing background operations to | ||
| 576 | * allow rapid servicing of foreground operations, e.g. read/ | ||
| 577 | * writes. Wait until the card comes out of the programming state | ||
| 578 | * to avoid errors in servicing read/write requests. | ||
| 579 | */ | ||
| 580 | int mmc_stop_bkops(struct mmc_card *card) | ||
| 581 | { | ||
| 582 | int err = 0; | ||
| 583 | |||
| 584 | BUG_ON(!card); | ||
| 585 | err = mmc_interrupt_hpi(card); | ||
| 586 | |||
| 587 | /* | ||
| 588 | * If err is EINVAL, we can't issue an HPI. | ||
| 589 | * It should complete the BKOPS. | ||
| 590 | */ | ||
| 591 | if (!err || (err == -EINVAL)) { | ||
| 592 | mmc_card_clr_doing_bkops(card); | ||
| 593 | err = 0; | ||
| 594 | } | ||
| 595 | |||
| 596 | return err; | ||
| 597 | } | ||
| 598 | EXPORT_SYMBOL(mmc_stop_bkops); | ||
| 599 | |||
| 600 | int mmc_read_bkops_status(struct mmc_card *card) | ||
| 601 | { | ||
| 602 | int err; | ||
| 603 | u8 *ext_csd; | ||
| 604 | |||
| 605 | /* | ||
| 606 | * In future work, we should consider storing the entire ext_csd. | ||
| 607 | */ | ||
| 608 | ext_csd = kmalloc(512, GFP_KERNEL); | ||
| 609 | if (!ext_csd) { | ||
| 610 | pr_err("%s: could not allocate buffer to receive the ext_csd.\n", | ||
| 611 | mmc_hostname(card->host)); | ||
| 612 | return -ENOMEM; | ||
| 613 | } | ||
| 614 | |||
| 615 | mmc_claim_host(card->host); | ||
| 616 | err = mmc_send_ext_csd(card, ext_csd); | ||
| 617 | mmc_release_host(card->host); | ||
| 618 | if (err) | ||
| 619 | goto out; | ||
| 620 | |||
| 621 | card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; | ||
| 622 | card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; | ||
| 623 | out: | ||
| 624 | kfree(ext_csd); | ||
| 625 | return err; | ||
| 626 | } | ||
| 627 | EXPORT_SYMBOL(mmc_read_bkops_status); | ||
| 628 | |||
| 629 | /** | ||
| 492 | * mmc_set_data_timeout - set the timeout for a data command | 630 | * mmc_set_data_timeout - set the timeout for a data command |
| 493 | * @data: data phase for command | 631 | * @data: data phase for command |
| 494 | * @card: the MMC card associated with the data transfer | 632 | * @card: the MMC card associated with the data transfer |
| @@ -975,7 +1113,8 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
| 975 | int tmp; | 1113 | int tmp; |
| 976 | int voltage; | 1114 | int voltage; |
| 977 | 1115 | ||
| 978 | /* REVISIT mmc_vddrange_to_ocrmask() may have set some | 1116 | /* |
| 1117 | * REVISIT mmc_vddrange_to_ocrmask() may have set some | ||
| 979 | * bits this regulator doesn't quite support ... don't | 1118 | * bits this regulator doesn't quite support ... don't |
| 980 | * be too picky, most cards and regulators are OK with | 1119 | * be too picky, most cards and regulators are OK with |
| 981 | * a 0.1V range goof (it's a small error percentage). | 1120 | * a 0.1V range goof (it's a small error percentage). |
| @@ -989,12 +1128,13 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
| 989 | max_uV = min_uV + 100 * 1000; | 1128 | max_uV = min_uV + 100 * 1000; |
| 990 | } | 1129 | } |
| 991 | 1130 | ||
| 992 | /* avoid needless changes to this voltage; the regulator | 1131 | /* |
| 993 | * might not allow this operation | 1132 | * If we're using a fixed/static regulator, don't call |
| 1133 | * regulator_set_voltage; it would fail. | ||
| 994 | */ | 1134 | */ |
| 995 | voltage = regulator_get_voltage(supply); | 1135 | voltage = regulator_get_voltage(supply); |
| 996 | 1136 | ||
| 997 | if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) | 1137 | if (regulator_count_voltages(supply) == 1) |
| 998 | min_uV = max_uV = voltage; | 1138 | min_uV = max_uV = voltage; |
| 999 | 1139 | ||
| 1000 | if (voltage < 0) | 1140 | if (voltage < 0) |
| @@ -1133,48 +1273,6 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) | |||
| 1133 | mmc_host_clk_release(host); | 1273 | mmc_host_clk_release(host); |
| 1134 | } | 1274 | } |
| 1135 | 1275 | ||
| 1136 | static void mmc_poweroff_notify(struct mmc_host *host) | ||
| 1137 | { | ||
| 1138 | struct mmc_card *card; | ||
| 1139 | unsigned int timeout; | ||
| 1140 | unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION; | ||
| 1141 | int err = 0; | ||
| 1142 | |||
| 1143 | card = host->card; | ||
| 1144 | mmc_claim_host(host); | ||
| 1145 | |||
| 1146 | /* | ||
| 1147 | * Send power notify command only if card | ||
| 1148 | * is mmc and notify state is powered ON | ||
| 1149 | */ | ||
| 1150 | if (card && mmc_card_mmc(card) && | ||
| 1151 | (card->poweroff_notify_state == MMC_POWERED_ON)) { | ||
| 1152 | |||
| 1153 | if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { | ||
| 1154 | notify_type = EXT_CSD_POWER_OFF_SHORT; | ||
| 1155 | timeout = card->ext_csd.generic_cmd6_time; | ||
| 1156 | card->poweroff_notify_state = MMC_POWEROFF_SHORT; | ||
| 1157 | } else { | ||
| 1158 | notify_type = EXT_CSD_POWER_OFF_LONG; | ||
| 1159 | timeout = card->ext_csd.power_off_longtime; | ||
| 1160 | card->poweroff_notify_state = MMC_POWEROFF_LONG; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
| 1164 | EXT_CSD_POWER_OFF_NOTIFICATION, | ||
| 1165 | notify_type, timeout); | ||
| 1166 | |||
| 1167 | if (err && err != -EBADMSG) | ||
| 1168 | pr_err("Device failed to respond within %d poweroff " | ||
| 1169 | "time. Forcefully powering down the device\n", | ||
| 1170 | timeout); | ||
| 1171 | |||
| 1172 | /* Set the card state to no notification after the poweroff */ | ||
| 1173 | card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; | ||
| 1174 | } | ||
| 1175 | mmc_release_host(host); | ||
| 1176 | } | ||
| 1177 | |||
| 1178 | /* | 1276 | /* |
| 1179 | * Apply power to the MMC stack. This is a two-stage process. | 1277 | * Apply power to the MMC stack. This is a two-stage process. |
| 1180 | * First, we enable power to the card without the clock running. | 1278 | * First, we enable power to the card without the clock running. |
| @@ -1237,8 +1335,6 @@ static void mmc_power_up(struct mmc_host *host) | |||
| 1237 | 1335 | ||
| 1238 | void mmc_power_off(struct mmc_host *host) | 1336 | void mmc_power_off(struct mmc_host *host) |
| 1239 | { | 1337 | { |
| 1240 | int err = 0; | ||
| 1241 | |||
| 1242 | if (host->ios.power_mode == MMC_POWER_OFF) | 1338 | if (host->ios.power_mode == MMC_POWER_OFF) |
| 1243 | return; | 1339 | return; |
| 1244 | 1340 | ||
| @@ -1247,22 +1343,6 @@ void mmc_power_off(struct mmc_host *host) | |||
| 1247 | host->ios.clock = 0; | 1343 | host->ios.clock = 0; |
| 1248 | host->ios.vdd = 0; | 1344 | host->ios.vdd = 0; |
| 1249 | 1345 | ||
| 1250 | /* | ||
| 1251 | * For eMMC 4.5 device send AWAKE command before | ||
| 1252 | * POWER_OFF_NOTIFY command, because in sleep state | ||
| 1253 | * eMMC 4.5 devices respond to only RESET and AWAKE cmd | ||
| 1254 | */ | ||
| 1255 | if (host->card && mmc_card_is_sleep(host->card) && | ||
| 1256 | host->bus_ops->resume) { | ||
| 1257 | err = host->bus_ops->resume(host); | ||
| 1258 | |||
| 1259 | if (!err) | ||
| 1260 | mmc_poweroff_notify(host); | ||
| 1261 | else | ||
| 1262 | pr_warning("%s: error %d during resume " | ||
| 1263 | "(continue with poweroff sequence)\n", | ||
| 1264 | mmc_hostname(host), err); | ||
| 1265 | } | ||
| 1266 | 1346 | ||
| 1267 | /* | 1347 | /* |
| 1268 | * Reset ocr mask to be the highest possible voltage supported for | 1348 | * Reset ocr mask to be the highest possible voltage supported for |
| @@ -2052,6 +2132,11 @@ void mmc_rescan(struct work_struct *work) | |||
| 2052 | if (host->rescan_disable) | 2132 | if (host->rescan_disable) |
| 2053 | return; | 2133 | return; |
| 2054 | 2134 | ||
| 2135 | /* If there is a non-removable card registered, only scan once */ | ||
| 2136 | if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) | ||
| 2137 | return; | ||
| 2138 | host->rescan_entered = 1; | ||
| 2139 | |||
| 2055 | mmc_bus_get(host); | 2140 | mmc_bus_get(host); |
| 2056 | 2141 | ||
| 2057 | /* | 2142 | /* |
| @@ -2327,9 +2412,14 @@ int mmc_suspend_host(struct mmc_host *host) | |||
| 2327 | 2412 | ||
| 2328 | mmc_bus_get(host); | 2413 | mmc_bus_get(host); |
| 2329 | if (host->bus_ops && !host->bus_dead) { | 2414 | if (host->bus_ops && !host->bus_dead) { |
| 2330 | 2415 | if (host->bus_ops->suspend) { | |
| 2331 | if (host->bus_ops->suspend) | 2416 | if (mmc_card_doing_bkops(host->card)) { |
| 2417 | err = mmc_stop_bkops(host->card); | ||
| 2418 | if (err) | ||
| 2419 | goto out; | ||
| 2420 | } | ||
| 2332 | err = host->bus_ops->suspend(host); | 2421 | err = host->bus_ops->suspend(host); |
| 2422 | } | ||
| 2333 | 2423 | ||
| 2334 | if (err == -ENOSYS || !host->bus_ops->resume) { | 2424 | if (err == -ENOSYS || !host->bus_ops->resume) { |
| 2335 | /* | 2425 | /* |
| @@ -2411,15 +2501,24 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
| 2411 | struct mmc_host *host = container_of( | 2501 | struct mmc_host *host = container_of( |
| 2412 | notify_block, struct mmc_host, pm_notify); | 2502 | notify_block, struct mmc_host, pm_notify); |
| 2413 | unsigned long flags; | 2503 | unsigned long flags; |
| 2414 | 2504 | int err = 0; | |
| 2415 | 2505 | ||
| 2416 | switch (mode) { | 2506 | switch (mode) { |
| 2417 | case PM_HIBERNATION_PREPARE: | 2507 | case PM_HIBERNATION_PREPARE: |
| 2418 | case PM_SUSPEND_PREPARE: | 2508 | case PM_SUSPEND_PREPARE: |
| 2509 | if (host->card && mmc_card_mmc(host->card) && | ||
| 2510 | mmc_card_doing_bkops(host->card)) { | ||
| 2511 | err = mmc_stop_bkops(host->card); | ||
| 2512 | if (err) { | ||
| 2513 | pr_err("%s: didn't stop bkops\n", | ||
| 2514 | mmc_hostname(host)); | ||
| 2515 | return err; | ||
| 2516 | } | ||
| 2517 | mmc_card_clr_doing_bkops(host->card); | ||
| 2518 | } | ||
| 2419 | 2519 | ||
| 2420 | spin_lock_irqsave(&host->lock, flags); | 2520 | spin_lock_irqsave(&host->lock, flags); |
| 2421 | host->rescan_disable = 1; | 2521 | host->rescan_disable = 1; |
| 2422 | host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; | ||
| 2423 | spin_unlock_irqrestore(&host->lock, flags); | 2522 | spin_unlock_irqrestore(&host->lock, flags); |
| 2424 | cancel_delayed_work_sync(&host->detect); | 2523 | cancel_delayed_work_sync(&host->detect); |
| 2425 | 2524 | ||
| @@ -2443,7 +2542,6 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
| 2443 | 2542 | ||
| 2444 | spin_lock_irqsave(&host->lock, flags); | 2543 | spin_lock_irqsave(&host->lock, flags); |
| 2445 | host->rescan_disable = 0; | 2544 | host->rescan_disable = 0; |
| 2446 | host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; | ||
| 2447 | spin_unlock_irqrestore(&host->lock, flags); | 2545 | spin_unlock_irqrestore(&host->lock, flags); |
| 2448 | mmc_detect_change(host, 0); | 2546 | mmc_detect_change(host, 0); |
| 2449 | 2547 | ||
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 9ab5b17d488a..d96c643dde1c 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c | |||
| @@ -281,7 +281,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) | |||
| 281 | if (err) | 281 | if (err) |
| 282 | goto out_free; | 282 | goto out_free; |
| 283 | 283 | ||
| 284 | for (i = 511; i >= 0; i--) | 284 | for (i = 0; i < 512; i++) |
| 285 | n += sprintf(buf + n, "%02x", ext_csd[i]); | 285 | n += sprintf(buf + n, "%02x", ext_csd[i]); |
| 286 | n += sprintf(buf + n, "\n"); | 286 | n += sprintf(buf + n, "\n"); |
| 287 | BUG_ON(n != EXT_CSD_STR_LEN); | 287 | BUG_ON(n != EXT_CSD_STR_LEN); |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 396b25891bb9..7cc46382fd64 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -463,6 +463,17 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | if (card->ext_csd.rev >= 5) { | 465 | if (card->ext_csd.rev >= 5) { |
| 466 | /* check whether the eMMC card supports BKOPS */ | ||
| 467 | if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { | ||
| 468 | card->ext_csd.bkops = 1; | ||
| 469 | card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; | ||
| 470 | card->ext_csd.raw_bkops_status = | ||
| 471 | ext_csd[EXT_CSD_BKOPS_STATUS]; | ||
| 472 | if (!card->ext_csd.bkops_en) | ||
| 473 | pr_info("%s: BKOPS_EN bit is not set\n", | ||
| 474 | mmc_hostname(card->host)); | ||
| 475 | } | ||
| 476 | |||
| 466 | /* check whether the eMMC card supports HPI */ | 477 | /* check whether the eMMC card supports HPI */ |
| 467 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { | 478 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { |
| 468 | card->ext_csd.hpi = 1; | 479 | card->ext_csd.hpi = 1; |
| @@ -996,7 +1007,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 996 | * so check for success and update the flag | 1007 | * so check for success and update the flag |
| 997 | */ | 1008 | */ |
| 998 | if (!err) | 1009 | if (!err) |
| 999 | card->poweroff_notify_state = MMC_POWERED_ON; | 1010 | card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; |
| 1000 | } | 1011 | } |
| 1001 | 1012 | ||
| 1002 | /* | 1013 | /* |
| @@ -1262,6 +1273,35 @@ err: | |||
| 1262 | return err; | 1273 | return err; |
| 1263 | } | 1274 | } |
| 1264 | 1275 | ||
| 1276 | static int mmc_can_poweroff_notify(const struct mmc_card *card) | ||
| 1277 | { | ||
| 1278 | return card && | ||
| 1279 | mmc_card_mmc(card) && | ||
| 1280 | (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) | ||
| 1284 | { | ||
| 1285 | unsigned int timeout = card->ext_csd.generic_cmd6_time; | ||
| 1286 | int err; | ||
| 1287 | |||
| 1288 | /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ | ||
| 1289 | if (notify_type == EXT_CSD_POWER_OFF_LONG) | ||
| 1290 | timeout = card->ext_csd.power_off_longtime; | ||
| 1291 | |||
| 1292 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
| 1293 | EXT_CSD_POWER_OFF_NOTIFICATION, | ||
| 1294 | notify_type, timeout); | ||
| 1295 | if (err) | ||
| 1296 | pr_err("%s: Power Off Notification timed out, %u\n", | ||
| 1297 | mmc_hostname(card->host), timeout); | ||
| 1298 | |||
| 1299 | /* Disable the power off notification after the switch operation. */ | ||
| 1300 | card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; | ||
| 1301 | |||
| 1302 | return err; | ||
| 1303 | } | ||
| 1304 | |||
| 1265 | /* | 1305 | /* |
| 1266 | * Host is being removed. Free up the current card. | 1306 | * Host is being removed. Free up the current card. |
| 1267 | */ | 1307 | */ |
| @@ -1322,11 +1362,11 @@ static int mmc_suspend(struct mmc_host *host) | |||
| 1322 | BUG_ON(!host->card); | 1362 | BUG_ON(!host->card); |
| 1323 | 1363 | ||
| 1324 | mmc_claim_host(host); | 1364 | mmc_claim_host(host); |
| 1325 | if (mmc_card_can_sleep(host)) { | 1365 | if (mmc_can_poweroff_notify(host->card)) |
| 1366 | err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); | ||
| 1367 | else if (mmc_card_can_sleep(host)) | ||
| 1326 | err = mmc_card_sleep(host); | 1368 | err = mmc_card_sleep(host); |
| 1327 | if (!err) | 1369 | else if (!mmc_host_is_spi(host)) |
| 1328 | mmc_card_set_sleep(host->card); | ||
| 1329 | } else if (!mmc_host_is_spi(host)) | ||
| 1330 | err = mmc_deselect_cards(host); | 1370 | err = mmc_deselect_cards(host); |
| 1331 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | 1371 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
| 1332 | mmc_release_host(host); | 1372 | mmc_release_host(host); |
| @@ -1348,11 +1388,7 @@ static int mmc_resume(struct mmc_host *host) | |||
| 1348 | BUG_ON(!host->card); | 1388 | BUG_ON(!host->card); |
| 1349 | 1389 | ||
| 1350 | mmc_claim_host(host); | 1390 | mmc_claim_host(host); |
| 1351 | if (mmc_card_is_sleep(host->card)) { | 1391 | err = mmc_init_card(host, host->ocr, host->card); |
| 1352 | err = mmc_card_awake(host); | ||
| 1353 | mmc_card_clr_sleep(host->card); | ||
| 1354 | } else | ||
| 1355 | err = mmc_init_card(host, host->ocr, host->card); | ||
| 1356 | mmc_release_host(host); | 1392 | mmc_release_host(host); |
| 1357 | 1393 | ||
| 1358 | return err; | 1394 | return err; |
| @@ -1363,7 +1399,6 @@ static int mmc_power_restore(struct mmc_host *host) | |||
| 1363 | int ret; | 1399 | int ret; |
| 1364 | 1400 | ||
| 1365 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | 1401 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
| 1366 | mmc_card_clr_sleep(host->card); | ||
| 1367 | mmc_claim_host(host); | 1402 | mmc_claim_host(host); |
| 1368 | ret = mmc_init_card(host, host->ocr, host->card); | 1403 | ret = mmc_init_card(host, host->ocr, host->card); |
| 1369 | mmc_release_host(host); | 1404 | mmc_release_host(host); |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 0ed2cc5f35b6..a0e172042e65 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
| @@ -230,6 +230,10 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) | |||
| 230 | return 0; | 230 | return 0; |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | /* | ||
| 234 | * NOTE: void *buf, caller for the buf is required to use DMA-capable | ||
| 235 | * buffer or on-stack buffer (with some overhead in callee). | ||
| 236 | */ | ||
| 233 | static int | 237 | static int |
| 234 | mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | 238 | mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, |
| 235 | u32 opcode, void *buf, unsigned len) | 239 | u32 opcode, void *buf, unsigned len) |
| @@ -239,13 +243,19 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
| 239 | struct mmc_data data = {0}; | 243 | struct mmc_data data = {0}; |
| 240 | struct scatterlist sg; | 244 | struct scatterlist sg; |
| 241 | void *data_buf; | 245 | void *data_buf; |
| 246 | int is_on_stack; | ||
| 242 | 247 | ||
| 243 | /* dma onto stack is unsafe/nonportable, but callers to this | 248 | is_on_stack = object_is_on_stack(buf); |
| 244 | * routine normally provide temporary on-stack buffers ... | 249 | if (is_on_stack) { |
| 245 | */ | 250 | /* |
| 246 | data_buf = kmalloc(len, GFP_KERNEL); | 251 | * dma onto stack is unsafe/nonportable, but callers to this |
| 247 | if (data_buf == NULL) | 252 | * routine normally provide temporary on-stack buffers ... |
| 248 | return -ENOMEM; | 253 | */ |
| 254 | data_buf = kmalloc(len, GFP_KERNEL); | ||
| 255 | if (!data_buf) | ||
| 256 | return -ENOMEM; | ||
| 257 | } else | ||
| 258 | data_buf = buf; | ||
| 249 | 259 | ||
| 250 | mrq.cmd = &cmd; | 260 | mrq.cmd = &cmd; |
| 251 | mrq.data = &data; | 261 | mrq.data = &data; |
| @@ -280,8 +290,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
| 280 | 290 | ||
| 281 | mmc_wait_for_req(host, &mrq); | 291 | mmc_wait_for_req(host, &mrq); |
| 282 | 292 | ||
| 283 | memcpy(buf, data_buf, len); | 293 | if (is_on_stack) { |
| 284 | kfree(data_buf); | 294 | memcpy(buf, data_buf, len); |
| 295 | kfree(data_buf); | ||
| 296 | } | ||
| 285 | 297 | ||
| 286 | if (cmd.error) | 298 | if (cmd.error) |
| 287 | return cmd.error; | 299 | return cmd.error; |
| @@ -294,24 +306,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
| 294 | int mmc_send_csd(struct mmc_card *card, u32 *csd) | 306 | int mmc_send_csd(struct mmc_card *card, u32 *csd) |
| 295 | { | 307 | { |
| 296 | int ret, i; | 308 | int ret, i; |
| 309 | u32 *csd_tmp; | ||
| 297 | 310 | ||
| 298 | if (!mmc_host_is_spi(card->host)) | 311 | if (!mmc_host_is_spi(card->host)) |
| 299 | return mmc_send_cxd_native(card->host, card->rca << 16, | 312 | return mmc_send_cxd_native(card->host, card->rca << 16, |
| 300 | csd, MMC_SEND_CSD); | 313 | csd, MMC_SEND_CSD); |
| 301 | 314 | ||
| 302 | ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); | 315 | csd_tmp = kmalloc(16, GFP_KERNEL); |
| 316 | if (!csd_tmp) | ||
| 317 | return -ENOMEM; | ||
| 318 | |||
| 319 | ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); | ||
| 303 | if (ret) | 320 | if (ret) |
| 304 | return ret; | 321 | goto err; |
| 305 | 322 | ||
| 306 | for (i = 0;i < 4;i++) | 323 | for (i = 0;i < 4;i++) |
| 307 | csd[i] = be32_to_cpu(csd[i]); | 324 | csd[i] = be32_to_cpu(csd_tmp[i]); |
| 308 | 325 | ||
| 309 | return 0; | 326 | err: |
| 327 | kfree(csd_tmp); | ||
| 328 | return ret; | ||
| 310 | } | 329 | } |
| 311 | 330 | ||
| 312 | int mmc_send_cid(struct mmc_host *host, u32 *cid) | 331 | int mmc_send_cid(struct mmc_host *host, u32 *cid) |
| 313 | { | 332 | { |
| 314 | int ret, i; | 333 | int ret, i; |
| 334 | u32 *cid_tmp; | ||
| 315 | 335 | ||
| 316 | if (!mmc_host_is_spi(host)) { | 336 | if (!mmc_host_is_spi(host)) { |
| 317 | if (!host->card) | 337 | if (!host->card) |
| @@ -320,14 +340,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid) | |||
| 320 | cid, MMC_SEND_CID); | 340 | cid, MMC_SEND_CID); |
| 321 | } | 341 | } |
| 322 | 342 | ||
| 323 | ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); | 343 | cid_tmp = kmalloc(16, GFP_KERNEL); |
| 344 | if (!cid_tmp) | ||
| 345 | return -ENOMEM; | ||
| 346 | |||
| 347 | ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); | ||
| 324 | if (ret) | 348 | if (ret) |
| 325 | return ret; | 349 | goto err; |
| 326 | 350 | ||
| 327 | for (i = 0;i < 4;i++) | 351 | for (i = 0;i < 4;i++) |
| 328 | cid[i] = be32_to_cpu(cid[i]); | 352 | cid[i] = be32_to_cpu(cid_tmp[i]); |
| 329 | 353 | ||
| 330 | return 0; | 354 | err: |
| 355 | kfree(cid_tmp); | ||
| 356 | return ret; | ||
| 331 | } | 357 | } |
| 332 | 358 | ||
| 333 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) | 359 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) |
| @@ -367,18 +393,19 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) | |||
| 367 | } | 393 | } |
| 368 | 394 | ||
| 369 | /** | 395 | /** |
| 370 | * mmc_switch - modify EXT_CSD register | 396 | * __mmc_switch - modify EXT_CSD register |
| 371 | * @card: the MMC card associated with the data transfer | 397 | * @card: the MMC card associated with the data transfer |
| 372 | * @set: cmd set values | 398 | * @set: cmd set values |
| 373 | * @index: EXT_CSD register index | 399 | * @index: EXT_CSD register index |
| 374 | * @value: value to program into EXT_CSD register | 400 | * @value: value to program into EXT_CSD register |
| 375 | * @timeout_ms: timeout (ms) for operation performed by register write, | 401 | * @timeout_ms: timeout (ms) for operation performed by register write, |
| 376 | * timeout of zero implies maximum possible timeout | 402 | * timeout of zero implies maximum possible timeout |
| 403 | * @use_busy_signal: use the busy signal as response type | ||
| 377 | * | 404 | * |
| 378 | * Modifies the EXT_CSD register for selected card. | 405 | * Modifies the EXT_CSD register for selected card. |
| 379 | */ | 406 | */ |
| 380 | int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | 407 | int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, |
| 381 | unsigned int timeout_ms) | 408 | unsigned int timeout_ms, bool use_busy_signal) |
| 382 | { | 409 | { |
| 383 | int err; | 410 | int err; |
| 384 | struct mmc_command cmd = {0}; | 411 | struct mmc_command cmd = {0}; |
| @@ -392,13 +419,23 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
| 392 | (index << 16) | | 419 | (index << 16) | |
| 393 | (value << 8) | | 420 | (value << 8) | |
| 394 | set; | 421 | set; |
| 395 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | 422 | cmd.flags = MMC_CMD_AC; |
| 423 | if (use_busy_signal) | ||
| 424 | cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; | ||
| 425 | else | ||
| 426 | cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; | ||
| 427 | |||
| 428 | |||
| 396 | cmd.cmd_timeout_ms = timeout_ms; | 429 | cmd.cmd_timeout_ms = timeout_ms; |
| 397 | 430 | ||
| 398 | err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); | 431 | err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); |
| 399 | if (err) | 432 | if (err) |
| 400 | return err; | 433 | return err; |
| 401 | 434 | ||
| 435 | /* No need to check card status in case of unblocking command */ | ||
| 436 | if (!use_busy_signal) | ||
| 437 | return 0; | ||
| 438 | |||
| 402 | /* Must check status to be sure of no errors */ | 439 | /* Must check status to be sure of no errors */ |
| 403 | do { | 440 | do { |
| 404 | err = mmc_send_status(card, &status); | 441 | err = mmc_send_status(card, &status); |
| @@ -423,6 +460,13 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
| 423 | 460 | ||
| 424 | return 0; | 461 | return 0; |
| 425 | } | 462 | } |
| 463 | EXPORT_SYMBOL_GPL(__mmc_switch); | ||
| 464 | |||
| 465 | int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | ||
| 466 | unsigned int timeout_ms) | ||
| 467 | { | ||
| 468 | return __mmc_switch(card, set, index, value, timeout_ms, true); | ||
| 469 | } | ||
| 426 | EXPORT_SYMBOL_GPL(mmc_switch); | 470 | EXPORT_SYMBOL_GPL(mmc_switch); |
| 427 | 471 | ||
| 428 | int mmc_send_status(struct mmc_card *card, u32 *status) | 472 | int mmc_send_status(struct mmc_card *card, u32 *status) |
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 236842ec955a..6bf68799fe97 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
| @@ -193,14 +193,7 @@ static int sdio_bus_remove(struct device *dev) | |||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | #ifdef CONFIG_PM | 195 | #ifdef CONFIG_PM |
| 196 | |||
| 197 | static int pm_no_operation(struct device *dev) | ||
| 198 | { | ||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static const struct dev_pm_ops sdio_bus_pm_ops = { | 196 | static const struct dev_pm_ops sdio_bus_pm_ops = { |
| 203 | SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation) | ||
| 204 | SET_RUNTIME_PM_OPS( | 197 | SET_RUNTIME_PM_OPS( |
| 205 | pm_generic_runtime_suspend, | 198 | pm_generic_runtime_suspend, |
| 206 | pm_generic_runtime_resume, | 199 | pm_generic_runtime_resume, |
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 058242916cef..08c6b3dfe080 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c | |||
| @@ -100,7 +100,13 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) | |||
| 100 | 100 | ||
| 101 | ctx = host->slot.handler_priv; | 101 | ctx = host->slot.handler_priv; |
| 102 | 102 | ||
| 103 | return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); | 103 | ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); |
| 104 | if (ret < 0) | ||
| 105 | return ret; | ||
| 106 | |||
| 107 | ctx->ro_gpio = gpio; | ||
| 108 | |||
| 109 | return 0; | ||
| 104 | } | 110 | } |
| 105 | EXPORT_SYMBOL(mmc_gpio_request_ro); | 111 | EXPORT_SYMBOL(mmc_gpio_request_ro); |
| 106 | 112 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index aa131b32e3b2..9bf10e7bbfaf 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
| @@ -540,6 +540,15 @@ config MMC_DW_PLTFM | |||
| 540 | 540 | ||
| 541 | If unsure, say Y. | 541 | If unsure, say Y. |
| 542 | 542 | ||
| 543 | config MMC_DW_EXYNOS | ||
| 544 | tristate "Exynos specific extentions for Synopsys DW Memory Card Interface" | ||
| 545 | depends on MMC_DW | ||
| 546 | select MMC_DW_PLTFM | ||
| 547 | help | ||
| 548 | This selects support for Samsung Exynos SoC specific extensions to the | ||
| 549 | Synopsys DesignWare Memory Card Interface driver. Select this option | ||
| 550 | for platforms based on Exynos4 and Exynos5 SoC's. | ||
| 551 | |||
| 543 | config MMC_DW_PCI | 552 | config MMC_DW_PCI |
| 544 | tristate "Synopsys Designware MCI support on PCI bus" | 553 | tristate "Synopsys Designware MCI support on PCI bus" |
| 545 | depends on MMC_DW && PCI | 554 | depends on MMC_DW && PCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 8922b06be925..17ad0a7ba40b 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
| @@ -39,6 +39,7 @@ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | |||
| 39 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 39 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
| 40 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 40 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
| 41 | obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o | 41 | obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o |
| 42 | obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o | ||
| 42 | obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o | 43 | obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o |
| 43 | obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o | 44 | obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o |
| 44 | obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o | 45 | obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o |
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index ab56f7db5315..c97001e15227 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h | |||
| @@ -140,6 +140,13 @@ | |||
| 140 | #define atmci_writel(port,reg,value) \ | 140 | #define atmci_writel(port,reg,value) \ |
| 141 | __raw_writel((value), (port)->regs + reg) | 141 | __raw_writel((value), (port)->regs + reg) |
| 142 | 142 | ||
| 143 | /* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ | ||
| 144 | #ifdef CONFIG_AVR32 | ||
| 145 | # define ATMCI_PDC_CONNECTED 0 | ||
| 146 | #else | ||
| 147 | # define ATMCI_PDC_CONNECTED 1 | ||
| 148 | #endif | ||
| 149 | |||
| 143 | /* | 150 | /* |
| 144 | * Fix sconfig's burst size according to atmel MCI. We need to convert them as: | 151 | * Fix sconfig's burst size according to atmel MCI. We need to convert them as: |
| 145 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | 152 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 852d5fbda630..ddf096e3803f 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
| @@ -19,6 +19,9 @@ | |||
| 19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/of.h> | ||
| 23 | #include <linux/of_device.h> | ||
| 24 | #include <linux/of_gpio.h> | ||
| 22 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 23 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
| 24 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
| @@ -71,7 +74,7 @@ enum atmci_pdc_buf { | |||
| 71 | }; | 74 | }; |
| 72 | 75 | ||
| 73 | struct atmel_mci_caps { | 76 | struct atmel_mci_caps { |
| 74 | bool has_dma; | 77 | bool has_dma_conf_reg; |
| 75 | bool has_pdc; | 78 | bool has_pdc; |
| 76 | bool has_cfg_reg; | 79 | bool has_cfg_reg; |
| 77 | bool has_cstor_reg; | 80 | bool has_cstor_reg; |
| @@ -418,7 +421,7 @@ static int atmci_regs_show(struct seq_file *s, void *v) | |||
| 418 | atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); | 421 | atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); |
| 419 | atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); | 422 | atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); |
| 420 | 423 | ||
| 421 | if (host->caps.has_dma) { | 424 | if (host->caps.has_dma_conf_reg) { |
| 422 | u32 val; | 425 | u32 val; |
| 423 | 426 | ||
| 424 | val = buf[ATMCI_DMA / 4]; | 427 | val = buf[ATMCI_DMA / 4]; |
| @@ -500,6 +503,70 @@ err: | |||
| 500 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); | 503 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); |
| 501 | } | 504 | } |
| 502 | 505 | ||
| 506 | #if defined(CONFIG_OF) | ||
| 507 | static const struct of_device_id atmci_dt_ids[] = { | ||
| 508 | { .compatible = "atmel,hsmci" }, | ||
| 509 | { /* sentinel */ } | ||
| 510 | }; | ||
| 511 | |||
| 512 | MODULE_DEVICE_TABLE(of, atmci_dt_ids); | ||
| 513 | |||
| 514 | static struct mci_platform_data __devinit* | ||
| 515 | atmci_of_init(struct platform_device *pdev) | ||
| 516 | { | ||
| 517 | struct device_node *np = pdev->dev.of_node; | ||
| 518 | struct device_node *cnp; | ||
| 519 | struct mci_platform_data *pdata; | ||
| 520 | u32 slot_id; | ||
| 521 | |||
| 522 | if (!np) { | ||
| 523 | dev_err(&pdev->dev, "device node not found\n"); | ||
| 524 | return ERR_PTR(-EINVAL); | ||
| 525 | } | ||
| 526 | |||
| 527 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
| 528 | if (!pdata) { | ||
| 529 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | ||
| 530 | return ERR_PTR(-ENOMEM); | ||
| 531 | } | ||
| 532 | |||
| 533 | for_each_child_of_node(np, cnp) { | ||
| 534 | if (of_property_read_u32(cnp, "reg", &slot_id)) { | ||
| 535 | dev_warn(&pdev->dev, "reg property is missing for %s\n", | ||
| 536 | cnp->full_name); | ||
| 537 | continue; | ||
| 538 | } | ||
| 539 | |||
| 540 | if (slot_id >= ATMCI_MAX_NR_SLOTS) { | ||
| 541 | dev_warn(&pdev->dev, "can't have more than %d slots\n", | ||
| 542 | ATMCI_MAX_NR_SLOTS); | ||
| 543 | break; | ||
| 544 | } | ||
| 545 | |||
| 546 | if (of_property_read_u32(cnp, "bus-width", | ||
| 547 | &pdata->slot[slot_id].bus_width)) | ||
| 548 | pdata->slot[slot_id].bus_width = 1; | ||
| 549 | |||
| 550 | pdata->slot[slot_id].detect_pin = | ||
| 551 | of_get_named_gpio(cnp, "cd-gpios", 0); | ||
| 552 | |||
| 553 | pdata->slot[slot_id].detect_is_active_high = | ||
| 554 | of_property_read_bool(cnp, "cd-inverted"); | ||
| 555 | |||
| 556 | pdata->slot[slot_id].wp_pin = | ||
| 557 | of_get_named_gpio(cnp, "wp-gpios", 0); | ||
| 558 | } | ||
| 559 | |||
| 560 | return pdata; | ||
| 561 | } | ||
| 562 | #else /* CONFIG_OF */ | ||
| 563 | static inline struct mci_platform_data* | ||
| 564 | atmci_of_init(struct platform_device *dev) | ||
| 565 | { | ||
| 566 | return ERR_PTR(-EINVAL); | ||
| 567 | } | ||
| 568 | #endif | ||
| 569 | |||
| 503 | static inline unsigned int atmci_get_version(struct atmel_mci *host) | 570 | static inline unsigned int atmci_get_version(struct atmel_mci *host) |
| 504 | { | 571 | { |
| 505 | return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; | 572 | return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; |
| @@ -774,7 +841,7 @@ static void atmci_dma_complete(void *arg) | |||
| 774 | 841 | ||
| 775 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); | 842 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); |
| 776 | 843 | ||
| 777 | if (host->caps.has_dma) | 844 | if (host->caps.has_dma_conf_reg) |
| 778 | /* Disable DMA hardware handshaking on MCI */ | 845 | /* Disable DMA hardware handshaking on MCI */ |
| 779 | atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); | 846 | atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); |
| 780 | 847 | ||
| @@ -961,7 +1028,9 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
| 961 | maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); | 1028 | maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); |
| 962 | } | 1029 | } |
| 963 | 1030 | ||
| 964 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN); | 1031 | if (host->caps.has_dma_conf_reg) |
| 1032 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | | ||
| 1033 | ATMCI_DMAEN); | ||
| 965 | 1034 | ||
| 966 | sglen = dma_map_sg(chan->device->dev, data->sg, | 1035 | sglen = dma_map_sg(chan->device->dev, data->sg, |
| 967 | data->sg_len, direction); | 1036 | data->sg_len, direction); |
| @@ -2046,6 +2115,13 @@ static int __init atmci_init_slot(struct atmel_mci *host, | |||
| 2046 | slot->sdc_reg = sdc_reg; | 2115 | slot->sdc_reg = sdc_reg; |
| 2047 | slot->sdio_irq = sdio_irq; | 2116 | slot->sdio_irq = sdio_irq; |
| 2048 | 2117 | ||
| 2118 | dev_dbg(&mmc->class_dev, | ||
| 2119 | "slot[%u]: bus_width=%u, detect_pin=%d, " | ||
| 2120 | "detect_is_active_high=%s, wp_pin=%d\n", | ||
| 2121 | id, slot_data->bus_width, slot_data->detect_pin, | ||
| 2122 | slot_data->detect_is_active_high ? "true" : "false", | ||
| 2123 | slot_data->wp_pin); | ||
| 2124 | |||
| 2049 | mmc->ops = &atmci_ops; | 2125 | mmc->ops = &atmci_ops; |
| 2050 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); | 2126 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); |
| 2051 | mmc->f_max = host->bus_hz / 2; | 2127 | mmc->f_max = host->bus_hz / 2; |
| @@ -2169,7 +2245,10 @@ static bool atmci_configure_dma(struct atmel_mci *host) | |||
| 2169 | 2245 | ||
| 2170 | pdata = host->pdev->dev.platform_data; | 2246 | pdata = host->pdev->dev.platform_data; |
| 2171 | 2247 | ||
| 2172 | if (pdata && find_slave_dev(pdata->dma_slave)) { | 2248 | if (!pdata) |
| 2249 | return false; | ||
| 2250 | |||
| 2251 | if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) { | ||
| 2173 | dma_cap_mask_t mask; | 2252 | dma_cap_mask_t mask; |
| 2174 | 2253 | ||
| 2175 | /* Try to grab a DMA channel */ | 2254 | /* Try to grab a DMA channel */ |
| @@ -2210,8 +2289,8 @@ static void __init atmci_get_cap(struct atmel_mci *host) | |||
| 2210 | dev_info(&host->pdev->dev, | 2289 | dev_info(&host->pdev->dev, |
| 2211 | "version: 0x%x\n", version); | 2290 | "version: 0x%x\n", version); |
| 2212 | 2291 | ||
| 2213 | host->caps.has_dma = 0; | 2292 | host->caps.has_dma_conf_reg = 0; |
| 2214 | host->caps.has_pdc = 1; | 2293 | host->caps.has_pdc = ATMCI_PDC_CONNECTED; |
| 2215 | host->caps.has_cfg_reg = 0; | 2294 | host->caps.has_cfg_reg = 0; |
| 2216 | host->caps.has_cstor_reg = 0; | 2295 | host->caps.has_cstor_reg = 0; |
| 2217 | host->caps.has_highspeed = 0; | 2296 | host->caps.has_highspeed = 0; |
| @@ -2228,12 +2307,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) | |||
| 2228 | host->caps.has_odd_clk_div = 1; | 2307 | host->caps.has_odd_clk_div = 1; |
| 2229 | case 0x400: | 2308 | case 0x400: |
| 2230 | case 0x300: | 2309 | case 0x300: |
| 2231 | #ifdef CONFIG_AT_HDMAC | 2310 | host->caps.has_dma_conf_reg = 1; |
| 2232 | host->caps.has_dma = 1; | ||
| 2233 | #else | ||
| 2234 | dev_info(&host->pdev->dev, | ||
| 2235 | "has dma capability but dma engine is not selected, then use pio\n"); | ||
| 2236 | #endif | ||
| 2237 | host->caps.has_pdc = 0; | 2311 | host->caps.has_pdc = 0; |
| 2238 | host->caps.has_cfg_reg = 1; | 2312 | host->caps.has_cfg_reg = 1; |
| 2239 | host->caps.has_cstor_reg = 1; | 2313 | host->caps.has_cstor_reg = 1; |
| @@ -2268,8 +2342,14 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
| 2268 | if (!regs) | 2342 | if (!regs) |
| 2269 | return -ENXIO; | 2343 | return -ENXIO; |
| 2270 | pdata = pdev->dev.platform_data; | 2344 | pdata = pdev->dev.platform_data; |
| 2271 | if (!pdata) | 2345 | if (!pdata) { |
| 2272 | return -ENXIO; | 2346 | pdata = atmci_of_init(pdev); |
| 2347 | if (IS_ERR(pdata)) { | ||
| 2348 | dev_err(&pdev->dev, "platform data not available\n"); | ||
| 2349 | return PTR_ERR(pdata); | ||
| 2350 | } | ||
| 2351 | } | ||
| 2352 | |||
| 2273 | irq = platform_get_irq(pdev, 0); | 2353 | irq = platform_get_irq(pdev, 0); |
| 2274 | if (irq < 0) | 2354 | if (irq < 0) |
| 2275 | return irq; | 2355 | return irq; |
| @@ -2308,7 +2388,7 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
| 2308 | 2388 | ||
| 2309 | /* Get MCI capabilities and set operations according to it */ | 2389 | /* Get MCI capabilities and set operations according to it */ |
| 2310 | atmci_get_cap(host); | 2390 | atmci_get_cap(host); |
| 2311 | if (host->caps.has_dma && atmci_configure_dma(host)) { | 2391 | if (atmci_configure_dma(host)) { |
| 2312 | host->prepare_data = &atmci_prepare_data_dma; | 2392 | host->prepare_data = &atmci_prepare_data_dma; |
| 2313 | host->submit_data = &atmci_submit_data_dma; | 2393 | host->submit_data = &atmci_submit_data_dma; |
| 2314 | host->stop_transfer = &atmci_stop_transfer_dma; | 2394 | host->stop_transfer = &atmci_stop_transfer_dma; |
| @@ -2487,6 +2567,7 @@ static struct platform_driver atmci_driver = { | |||
| 2487 | .driver = { | 2567 | .driver = { |
| 2488 | .name = "atmel_mci", | 2568 | .name = "atmel_mci", |
| 2489 | .pm = ATMCI_PM_OPS, | 2569 | .pm = ATMCI_PM_OPS, |
| 2570 | .of_match_table = of_match_ptr(atmci_dt_ids), | ||
| 2490 | }, | 2571 | }, |
| 2491 | }; | 2572 | }; |
| 2492 | 2573 | ||
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index a17dd7363ceb..b9b463eca1ec 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c | |||
| @@ -24,9 +24,7 @@ | |||
| 24 | #include <asm/portmux.h> | 24 | #include <asm/portmux.h> |
| 25 | #include <asm/bfin_sdh.h> | 25 | #include <asm/bfin_sdh.h> |
| 26 | 26 | ||
| 27 | #if defined(CONFIG_BF51x) | 27 | #if defined(CONFIG_BF51x) || defined(__ADSPBF60x__) |
| 28 | #define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL | ||
| 29 | #define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL | ||
| 30 | #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL | 28 | #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL |
| 31 | #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL | 29 | #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL |
| 32 | #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT | 30 | #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT |
| @@ -45,8 +43,16 @@ | |||
| 45 | #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS | 43 | #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS |
| 46 | #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS | 44 | #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS |
| 47 | #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 | 45 | #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 |
| 46 | #define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK | ||
| 48 | #define bfin_read_SDH_CFG bfin_read_RSI_CFG | 47 | #define bfin_read_SDH_CFG bfin_read_RSI_CFG |
| 49 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG | 48 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG |
| 49 | # if defined(__ADSPBF60x__) | ||
| 50 | # define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ | ||
| 51 | # define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ | ||
| 52 | # else | ||
| 53 | # define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL | ||
| 54 | # define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL | ||
| 55 | # endif | ||
| 50 | #endif | 56 | #endif |
| 51 | 57 | ||
| 52 | struct sdh_host { | 58 | struct sdh_host { |
| @@ -62,6 +68,7 @@ struct sdh_host { | |||
| 62 | dma_addr_t sg_dma; | 68 | dma_addr_t sg_dma; |
| 63 | int dma_len; | 69 | int dma_len; |
| 64 | 70 | ||
| 71 | unsigned long sclk; | ||
| 65 | unsigned int imask; | 72 | unsigned int imask; |
| 66 | unsigned int power_mode; | 73 | unsigned int power_mode; |
| 67 | unsigned int clk_div; | 74 | unsigned int clk_div; |
| @@ -127,11 +134,15 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
| 127 | /* Only supports power-of-2 block size */ | 134 | /* Only supports power-of-2 block size */ |
| 128 | if (data->blksz & (data->blksz - 1)) | 135 | if (data->blksz & (data->blksz - 1)) |
| 129 | return -EINVAL; | 136 | return -EINVAL; |
| 137 | #ifndef RSI_BLKSZ | ||
| 130 | data_ctl |= ((ffs(data->blksz) - 1) << 4); | 138 | data_ctl |= ((ffs(data->blksz) - 1) << 4); |
| 139 | #else | ||
| 140 | bfin_write_SDH_BLK_SIZE(data->blksz); | ||
| 141 | #endif | ||
| 131 | 142 | ||
| 132 | bfin_write_SDH_DATA_CTL(data_ctl); | 143 | bfin_write_SDH_DATA_CTL(data_ctl); |
| 133 | /* the time of a host clock period in ns */ | 144 | /* the time of a host clock period in ns */ |
| 134 | cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); | 145 | cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1))); |
| 135 | timeout = data->timeout_ns / cycle_ns; | 146 | timeout = data->timeout_ns / cycle_ns; |
| 136 | timeout += data->timeout_clks; | 147 | timeout += data->timeout_clks; |
| 137 | bfin_write_SDH_DATA_TIMER(timeout); | 148 | bfin_write_SDH_DATA_TIMER(timeout); |
| @@ -145,8 +156,13 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
| 145 | 156 | ||
| 146 | sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); | 157 | sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); |
| 147 | host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); | 158 | host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); |
| 148 | #if defined(CONFIG_BF54x) | 159 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) |
| 149 | dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; | 160 | dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN; |
| 161 | # ifdef RSI_BLKSZ | ||
| 162 | dma_cfg |= PSIZE_32 | NDSIZE_3; | ||
| 163 | # else | ||
| 164 | dma_cfg |= NDSIZE_5; | ||
| 165 | # endif | ||
| 150 | { | 166 | { |
| 151 | struct scatterlist *sg; | 167 | struct scatterlist *sg; |
| 152 | int i; | 168 | int i; |
| @@ -156,7 +172,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
| 156 | host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; | 172 | host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; |
| 157 | host->sg_cpu[i].x_modify = 4; | 173 | host->sg_cpu[i].x_modify = 4; |
| 158 | dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " | 174 | dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " |
| 159 | "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", | 175 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", |
| 160 | i, host->sg_cpu[i].start_addr, | 176 | i, host->sg_cpu[i].start_addr, |
| 161 | host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, | 177 | host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, |
| 162 | host->sg_cpu[i].x_modify); | 178 | host->sg_cpu[i].x_modify); |
| @@ -172,6 +188,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
| 172 | set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); | 188 | set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); |
| 173 | set_dma_x_count(host->dma_ch, 0); | 189 | set_dma_x_count(host->dma_ch, 0); |
| 174 | set_dma_x_modify(host->dma_ch, 0); | 190 | set_dma_x_modify(host->dma_ch, 0); |
| 191 | SSYNC(); | ||
| 175 | set_dma_config(host->dma_ch, dma_cfg); | 192 | set_dma_config(host->dma_ch, dma_cfg); |
| 176 | #elif defined(CONFIG_BF51x) | 193 | #elif defined(CONFIG_BF51x) |
| 177 | /* RSI DMA doesn't work in array mode */ | 194 | /* RSI DMA doesn't work in array mode */ |
| @@ -179,6 +196,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
| 179 | set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); | 196 | set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); |
| 180 | set_dma_x_count(host->dma_ch, length / 4); | 197 | set_dma_x_count(host->dma_ch, length / 4); |
| 181 | set_dma_x_modify(host->dma_ch, 4); | 198 | set_dma_x_modify(host->dma_ch, 4); |
| 199 | SSYNC(); | ||
| 182 | set_dma_config(host->dma_ch, dma_cfg); | 200 | set_dma_config(host->dma_ch, dma_cfg); |
| 183 | #endif | 201 | #endif |
| 184 | bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); | 202 | bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); |
| @@ -296,7 +314,6 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat) | |||
| 296 | else | 314 | else |
| 297 | data->bytes_xfered = 0; | 315 | data->bytes_xfered = 0; |
| 298 | 316 | ||
| 299 | sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); | ||
| 300 | bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ | 317 | bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ |
| 301 | DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); | 318 | DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); |
| 302 | bfin_write_SDH_DATA_CTL(0); | 319 | bfin_write_SDH_DATA_CTL(0); |
| @@ -321,74 +338,115 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
| 321 | dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); | 338 | dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); |
| 322 | WARN_ON(host->mrq != NULL); | 339 | WARN_ON(host->mrq != NULL); |
| 323 | 340 | ||
| 341 | spin_lock(&host->lock); | ||
| 324 | host->mrq = mrq; | 342 | host->mrq = mrq; |
| 325 | host->data = mrq->data; | 343 | host->data = mrq->data; |
| 326 | 344 | ||
| 327 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) { | 345 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) { |
| 328 | ret = sdh_setup_data(host, mrq->data); | 346 | ret = sdh_setup_data(host, mrq->data); |
| 329 | if (ret) | 347 | if (ret) |
| 330 | return; | 348 | goto data_err; |
| 331 | } | 349 | } |
| 332 | 350 | ||
| 333 | sdh_start_cmd(host, mrq->cmd); | 351 | sdh_start_cmd(host, mrq->cmd); |
| 352 | data_err: | ||
| 353 | spin_unlock(&host->lock); | ||
| 334 | } | 354 | } |
| 335 | 355 | ||
| 336 | static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 356 | static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
| 337 | { | 357 | { |
| 338 | struct sdh_host *host; | 358 | struct sdh_host *host; |
| 339 | unsigned long flags; | ||
| 340 | u16 clk_ctl = 0; | 359 | u16 clk_ctl = 0; |
| 360 | #ifndef RSI_BLKSZ | ||
| 341 | u16 pwr_ctl = 0; | 361 | u16 pwr_ctl = 0; |
| 362 | #endif | ||
| 342 | u16 cfg; | 363 | u16 cfg; |
| 343 | host = mmc_priv(mmc); | 364 | host = mmc_priv(mmc); |
| 344 | 365 | ||
| 345 | spin_lock_irqsave(&host->lock, flags); | 366 | spin_lock(&host->lock); |
| 346 | if (ios->clock) { | ||
| 347 | unsigned long sys_clk, ios_clk; | ||
| 348 | unsigned char clk_div; | ||
| 349 | ios_clk = 2 * ios->clock; | ||
| 350 | sys_clk = get_sclk(); | ||
| 351 | clk_div = sys_clk / ios_clk; | ||
| 352 | if (sys_clk % ios_clk == 0) | ||
| 353 | clk_div -= 1; | ||
| 354 | clk_div = min_t(unsigned char, clk_div, 0xFF); | ||
| 355 | clk_ctl |= clk_div; | ||
| 356 | clk_ctl |= CLK_E; | ||
| 357 | host->clk_div = clk_div; | ||
| 358 | } else | ||
| 359 | sdh_stop_clock(host); | ||
| 360 | |||
| 361 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
| 362 | #ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND | ||
| 363 | pwr_ctl |= ROD_CTL; | ||
| 364 | #else | ||
| 365 | pwr_ctl |= SD_CMD_OD | ROD_CTL; | ||
| 366 | #endif | ||
| 367 | 367 | ||
| 368 | if (ios->bus_width == MMC_BUS_WIDTH_4) { | 368 | cfg = bfin_read_SDH_CFG(); |
| 369 | cfg = bfin_read_SDH_CFG(); | 369 | cfg |= MWE; |
| 370 | switch (ios->bus_width) { | ||
| 371 | case MMC_BUS_WIDTH_4: | ||
| 372 | #ifndef RSI_BLKSZ | ||
| 370 | cfg &= ~PD_SDDAT3; | 373 | cfg &= ~PD_SDDAT3; |
| 374 | #endif | ||
| 371 | cfg |= PUP_SDDAT3; | 375 | cfg |= PUP_SDDAT3; |
| 372 | /* Enable 4 bit SDIO */ | 376 | /* Enable 4 bit SDIO */ |
| 373 | cfg |= (SD4E | MWE); | 377 | cfg |= SD4E; |
| 374 | bfin_write_SDH_CFG(cfg); | 378 | clk_ctl |= WIDE_BUS_4; |
| 375 | clk_ctl |= WIDE_BUS; | 379 | break; |
| 376 | } else { | 380 | case MMC_BUS_WIDTH_8: |
| 377 | cfg = bfin_read_SDH_CFG(); | 381 | #ifndef RSI_BLKSZ |
| 378 | cfg |= MWE; | 382 | cfg &= ~PD_SDDAT3; |
| 379 | bfin_write_SDH_CFG(cfg); | 383 | #endif |
| 384 | cfg |= PUP_SDDAT3; | ||
| 385 | /* Disable 4 bit SDIO */ | ||
| 386 | cfg &= ~SD4E; | ||
| 387 | clk_ctl |= BYTE_BUS_8; | ||
| 388 | break; | ||
| 389 | default: | ||
| 390 | cfg &= ~PUP_SDDAT3; | ||
| 391 | /* Disable 4 bit SDIO */ | ||
| 392 | cfg &= ~SD4E; | ||
| 380 | } | 393 | } |
| 381 | 394 | ||
| 382 | bfin_write_SDH_CLK_CTL(clk_ctl); | ||
| 383 | |||
| 384 | host->power_mode = ios->power_mode; | 395 | host->power_mode = ios->power_mode; |
| 385 | if (ios->power_mode == MMC_POWER_ON) | 396 | #ifndef RSI_BLKSZ |
| 397 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { | ||
| 398 | pwr_ctl |= ROD_CTL; | ||
| 399 | # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND | ||
| 400 | pwr_ctl |= SD_CMD_OD; | ||
| 401 | # endif | ||
| 402 | } | ||
| 403 | |||
| 404 | if (ios->power_mode != MMC_POWER_OFF) | ||
| 386 | pwr_ctl |= PWR_ON; | 405 | pwr_ctl |= PWR_ON; |
| 406 | else | ||
| 407 | pwr_ctl &= ~PWR_ON; | ||
| 387 | 408 | ||
| 388 | bfin_write_SDH_PWR_CTL(pwr_ctl); | 409 | bfin_write_SDH_PWR_CTL(pwr_ctl); |
| 410 | #else | ||
| 411 | # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND | ||
| 412 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
| 413 | cfg |= SD_CMD_OD; | ||
| 414 | else | ||
| 415 | cfg &= ~SD_CMD_OD; | ||
| 416 | # endif | ||
| 417 | |||
| 418 | |||
| 419 | if (ios->power_mode != MMC_POWER_OFF) | ||
| 420 | cfg |= PWR_ON; | ||
| 421 | else | ||
| 422 | cfg &= ~PWR_ON; | ||
| 423 | |||
| 424 | bfin_write_SDH_CFG(cfg); | ||
| 425 | #endif | ||
| 389 | SSYNC(); | 426 | SSYNC(); |
| 390 | 427 | ||
| 391 | spin_unlock_irqrestore(&host->lock, flags); | 428 | if (ios->power_mode == MMC_POWER_ON && ios->clock) { |
| 429 | unsigned char clk_div; | ||
| 430 | clk_div = (get_sclk() / ios->clock - 1) / 2; | ||
| 431 | clk_div = min_t(unsigned char, clk_div, 0xFF); | ||
| 432 | clk_ctl |= clk_div; | ||
| 433 | clk_ctl |= CLK_E; | ||
| 434 | host->clk_div = clk_div; | ||
| 435 | bfin_write_SDH_CLK_CTL(clk_ctl); | ||
| 436 | |||
| 437 | } else | ||
| 438 | sdh_stop_clock(host); | ||
| 439 | |||
| 440 | /* set up sdh interrupt mask*/ | ||
| 441 | if (ios->power_mode == MMC_POWER_ON) | ||
| 442 | bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | | ||
| 443 | RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END | | ||
| 444 | CMD_TIME_OUT | CMD_CRC_FAIL); | ||
| 445 | else | ||
| 446 | bfin_write_SDH_MASK0(0); | ||
| 447 | SSYNC(); | ||
| 448 | |||
| 449 | spin_unlock(&host->lock); | ||
| 392 | 450 | ||
| 393 | dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", | 451 | dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", |
| 394 | host->clk_div, | 452 | host->clk_div, |
| @@ -405,7 +463,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid) | |||
| 405 | { | 463 | { |
| 406 | struct sdh_host *host = devid; | 464 | struct sdh_host *host = devid; |
| 407 | 465 | ||
| 408 | dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, | 466 | dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__, |
| 409 | get_dma_curr_irqstat(host->dma_ch)); | 467 | get_dma_curr_irqstat(host->dma_ch)); |
| 410 | clear_dma_irqstat(host->dma_ch); | 468 | clear_dma_irqstat(host->dma_ch); |
| 411 | SSYNC(); | 469 | SSYNC(); |
| @@ -420,6 +478,9 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) | |||
| 420 | int handled = 0; | 478 | int handled = 0; |
| 421 | 479 | ||
| 422 | dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); | 480 | dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); |
| 481 | |||
| 482 | spin_lock(&host->lock); | ||
| 483 | |||
| 423 | status = bfin_read_SDH_E_STATUS(); | 484 | status = bfin_read_SDH_E_STATUS(); |
| 424 | if (status & SD_CARD_DET) { | 485 | if (status & SD_CARD_DET) { |
| 425 | mmc_detect_change(host->mmc, 0); | 486 | mmc_detect_change(host->mmc, 0); |
| @@ -437,11 +498,30 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) | |||
| 437 | if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) | 498 | if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) |
| 438 | handled |= sdh_data_done(host, status); | 499 | handled |= sdh_data_done(host, status); |
| 439 | 500 | ||
| 501 | spin_unlock(&host->lock); | ||
| 502 | |||
| 440 | dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); | 503 | dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); |
| 441 | 504 | ||
| 442 | return IRQ_RETVAL(handled); | 505 | return IRQ_RETVAL(handled); |
| 443 | } | 506 | } |
| 444 | 507 | ||
| 508 | static void sdh_reset(void) | ||
| 509 | { | ||
| 510 | #if defined(CONFIG_BF54x) | ||
| 511 | /* Secure Digital Host shares DMA with Nand controller */ | ||
| 512 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
| 513 | #endif | ||
| 514 | |||
| 515 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); | ||
| 516 | SSYNC(); | ||
| 517 | |||
| 518 | /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and | ||
| 519 | * mmc stack will do the detection. | ||
| 520 | */ | ||
| 521 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
| 522 | SSYNC(); | ||
| 523 | } | ||
| 524 | |||
| 445 | static int __devinit sdh_probe(struct platform_device *pdev) | 525 | static int __devinit sdh_probe(struct platform_device *pdev) |
| 446 | { | 526 | { |
| 447 | struct mmc_host *mmc; | 527 | struct mmc_host *mmc; |
| @@ -462,8 +542,16 @@ static int __devinit sdh_probe(struct platform_device *pdev) | |||
| 462 | } | 542 | } |
| 463 | 543 | ||
| 464 | mmc->ops = &sdh_ops; | 544 | mmc->ops = &sdh_ops; |
| 465 | mmc->max_segs = 32; | 545 | #if defined(CONFIG_BF51x) |
| 546 | mmc->max_segs = 1; | ||
| 547 | #else | ||
| 548 | mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array); | ||
| 549 | #endif | ||
| 550 | #ifdef RSI_BLKSZ | ||
| 551 | mmc->max_seg_size = -1; | ||
| 552 | #else | ||
| 466 | mmc->max_seg_size = 1 << 16; | 553 | mmc->max_seg_size = 1 << 16; |
| 554 | #endif | ||
| 467 | mmc->max_blk_size = 1 << 11; | 555 | mmc->max_blk_size = 1 << 11; |
| 468 | mmc->max_blk_count = 1 << 11; | 556 | mmc->max_blk_count = 1 << 11; |
| 469 | mmc->max_req_size = PAGE_SIZE; | 557 | mmc->max_req_size = PAGE_SIZE; |
| @@ -473,6 +561,7 @@ static int __devinit sdh_probe(struct platform_device *pdev) | |||
| 473 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; | 561 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; |
| 474 | host = mmc_priv(mmc); | 562 | host = mmc_priv(mmc); |
| 475 | host->mmc = mmc; | 563 | host->mmc = mmc; |
| 564 | host->sclk = get_sclk(); | ||
| 476 | 565 | ||
| 477 | spin_lock_init(&host->lock); | 566 | spin_lock_init(&host->lock); |
| 478 | host->irq = drv_data->irq_int0; | 567 | host->irq = drv_data->irq_int0; |
| @@ -497,7 +586,6 @@ static int __devinit sdh_probe(struct platform_device *pdev) | |||
| 497 | } | 586 | } |
| 498 | 587 | ||
| 499 | platform_set_drvdata(pdev, mmc); | 588 | platform_set_drvdata(pdev, mmc); |
| 500 | mmc_add_host(mmc); | ||
| 501 | 589 | ||
| 502 | ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); | 590 | ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); |
| 503 | if (ret) { | 591 | if (ret) { |
| @@ -510,20 +598,10 @@ static int __devinit sdh_probe(struct platform_device *pdev) | |||
| 510 | dev_err(&pdev->dev, "unable to request peripheral pins\n"); | 598 | dev_err(&pdev->dev, "unable to request peripheral pins\n"); |
| 511 | goto out4; | 599 | goto out4; |
| 512 | } | 600 | } |
| 513 | #if defined(CONFIG_BF54x) | ||
| 514 | /* Secure Digital Host shares DMA with Nand controller */ | ||
| 515 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
| 516 | #endif | ||
| 517 | |||
| 518 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); | ||
| 519 | SSYNC(); | ||
| 520 | 601 | ||
| 521 | /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and | 602 | sdh_reset(); |
| 522 | * mmc stack will do the detection. | ||
| 523 | */ | ||
| 524 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
| 525 | SSYNC(); | ||
| 526 | 603 | ||
| 604 | mmc_add_host(mmc); | ||
| 527 | return 0; | 605 | return 0; |
| 528 | 606 | ||
| 529 | out4: | 607 | out4: |
| @@ -571,7 +649,6 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state) | |||
| 571 | if (mmc) | 649 | if (mmc) |
| 572 | ret = mmc_suspend_host(mmc); | 650 | ret = mmc_suspend_host(mmc); |
| 573 | 651 | ||
| 574 | bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); | ||
| 575 | peripheral_free_list(drv_data->pin_req); | 652 | peripheral_free_list(drv_data->pin_req); |
| 576 | 653 | ||
| 577 | return ret; | 654 | return ret; |
| @@ -589,16 +666,7 @@ static int sdh_resume(struct platform_device *dev) | |||
| 589 | return ret; | 666 | return ret; |
| 590 | } | 667 | } |
| 591 | 668 | ||
| 592 | bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); | 669 | sdh_reset(); |
| 593 | #if defined(CONFIG_BF54x) | ||
| 594 | /* Secure Digital Host shares DMA with Nand controller */ | ||
| 595 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
| 596 | #endif | ||
| 597 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); | ||
| 598 | SSYNC(); | ||
| 599 | |||
| 600 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
| 601 | SSYNC(); | ||
| 602 | 670 | ||
| 603 | if (mmc) | 671 | if (mmc) |
| 604 | ret = mmc_resume_host(mmc); | 672 | ret = mmc_resume_host(mmc); |
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 3dfd3473269d..20636772c09b 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c | |||
| @@ -30,11 +30,12 @@ | |||
| 30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
| 31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
| 32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
| 33 | #include <linux/dmaengine.h> | ||
| 33 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
| 35 | #include <linux/edma.h> | ||
| 34 | #include <linux/mmc/mmc.h> | 36 | #include <linux/mmc/mmc.h> |
| 35 | 37 | ||
| 36 | #include <linux/platform_data/mmc-davinci.h> | 38 | #include <linux/platform_data/mmc-davinci.h> |
| 37 | #include <mach/edma.h> | ||
| 38 | 39 | ||
| 39 | /* | 40 | /* |
| 40 | * Register Definitions | 41 | * Register Definitions |
| @@ -200,21 +201,13 @@ struct mmc_davinci_host { | |||
| 200 | u32 bytes_left; | 201 | u32 bytes_left; |
| 201 | 202 | ||
| 202 | u32 rxdma, txdma; | 203 | u32 rxdma, txdma; |
| 204 | struct dma_chan *dma_tx; | ||
| 205 | struct dma_chan *dma_rx; | ||
| 203 | bool use_dma; | 206 | bool use_dma; |
| 204 | bool do_dma; | 207 | bool do_dma; |
| 205 | bool sdio_int; | 208 | bool sdio_int; |
| 206 | bool active_request; | 209 | bool active_request; |
| 207 | 210 | ||
| 208 | /* Scatterlist DMA uses one or more parameter RAM entries: | ||
| 209 | * the main one (associated with rxdma or txdma) plus zero or | ||
| 210 | * more links. The entries for a given transfer differ only | ||
| 211 | * by memory buffer (address, length) and link field. | ||
| 212 | */ | ||
| 213 | struct edmacc_param tx_template; | ||
| 214 | struct edmacc_param rx_template; | ||
| 215 | unsigned n_link; | ||
| 216 | u32 links[MAX_NR_SG - 1]; | ||
| 217 | |||
| 218 | /* For PIO we walk scatterlists one segment at a time. */ | 211 | /* For PIO we walk scatterlists one segment at a time. */ |
| 219 | unsigned int sg_len; | 212 | unsigned int sg_len; |
| 220 | struct scatterlist *sg; | 213 | struct scatterlist *sg; |
| @@ -410,153 +403,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, | |||
| 410 | 403 | ||
| 411 | static void davinci_abort_dma(struct mmc_davinci_host *host) | 404 | static void davinci_abort_dma(struct mmc_davinci_host *host) |
| 412 | { | 405 | { |
| 413 | int sync_dev; | 406 | struct dma_chan *sync_dev; |
| 414 | 407 | ||
| 415 | if (host->data_dir == DAVINCI_MMC_DATADIR_READ) | 408 | if (host->data_dir == DAVINCI_MMC_DATADIR_READ) |
| 416 | sync_dev = host->rxdma; | 409 | sync_dev = host->dma_rx; |
| 417 | else | 410 | else |
| 418 | sync_dev = host->txdma; | 411 | sync_dev = host->dma_tx; |
| 419 | |||
| 420 | edma_stop(sync_dev); | ||
| 421 | edma_clean_channel(sync_dev); | ||
| 422 | } | ||
| 423 | |||
| 424 | static void | ||
| 425 | mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); | ||
| 426 | |||
| 427 | static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) | ||
| 428 | { | ||
| 429 | if (DMA_COMPLETE != ch_status) { | ||
| 430 | struct mmc_davinci_host *host = data; | ||
| 431 | |||
| 432 | /* Currently means: DMA Event Missed, or "null" transfer | ||
| 433 | * request was seen. In the future, TC errors (like bad | ||
| 434 | * addresses) might be presented too. | ||
| 435 | */ | ||
| 436 | dev_warn(mmc_dev(host->mmc), "DMA %s error\n", | ||
| 437 | (host->data->flags & MMC_DATA_WRITE) | ||
| 438 | ? "write" : "read"); | ||
| 439 | host->data->error = -EIO; | ||
| 440 | mmc_davinci_xfer_done(host, host->data); | ||
| 441 | } | ||
| 442 | } | ||
| 443 | |||
| 444 | /* Set up tx or rx template, to be modified and updated later */ | ||
| 445 | static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, | ||
| 446 | bool tx, struct edmacc_param *template) | ||
| 447 | { | ||
| 448 | unsigned sync_dev; | ||
| 449 | const u16 acnt = 4; | ||
| 450 | const u16 bcnt = rw_threshold >> 2; | ||
| 451 | const u16 ccnt = 0; | ||
| 452 | u32 src_port = 0; | ||
| 453 | u32 dst_port = 0; | ||
| 454 | s16 src_bidx, dst_bidx; | ||
| 455 | s16 src_cidx, dst_cidx; | ||
| 456 | |||
| 457 | /* | ||
| 458 | * A-B Sync transfer: each DMA request is for one "frame" of | ||
| 459 | * rw_threshold bytes, broken into "acnt"-size chunks repeated | ||
| 460 | * "bcnt" times. Each segment needs "ccnt" such frames; since | ||
| 461 | * we tell the block layer our mmc->max_seg_size limit, we can | ||
| 462 | * trust (later) that it's within bounds. | ||
| 463 | * | ||
| 464 | * The FIFOs are read/written in 4-byte chunks (acnt == 4) and | ||
| 465 | * EDMA will optimize memory operations to use larger bursts. | ||
| 466 | */ | ||
| 467 | if (tx) { | ||
| 468 | sync_dev = host->txdma; | ||
| 469 | |||
| 470 | /* src_prt, ccnt, and link to be set up later */ | ||
| 471 | src_bidx = acnt; | ||
| 472 | src_cidx = acnt * bcnt; | ||
| 473 | |||
| 474 | dst_port = host->mem_res->start + DAVINCI_MMCDXR; | ||
| 475 | dst_bidx = 0; | ||
| 476 | dst_cidx = 0; | ||
| 477 | } else { | ||
| 478 | sync_dev = host->rxdma; | ||
| 479 | |||
| 480 | src_port = host->mem_res->start + DAVINCI_MMCDRR; | ||
| 481 | src_bidx = 0; | ||
| 482 | src_cidx = 0; | ||
| 483 | |||
| 484 | /* dst_prt, ccnt, and link to be set up later */ | ||
| 485 | dst_bidx = acnt; | ||
| 486 | dst_cidx = acnt * bcnt; | ||
| 487 | } | ||
| 488 | |||
| 489 | /* | ||
| 490 | * We can't use FIFO mode for the FIFOs because MMC FIFO addresses | ||
| 491 | * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT | ||
| 492 | * parameter is ignored. | ||
| 493 | */ | ||
| 494 | edma_set_src(sync_dev, src_port, INCR, W8BIT); | ||
| 495 | edma_set_dest(sync_dev, dst_port, INCR, W8BIT); | ||
| 496 | 412 | ||
| 497 | edma_set_src_index(sync_dev, src_bidx, src_cidx); | 413 | dmaengine_terminate_all(sync_dev); |
| 498 | edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); | ||
| 499 | |||
| 500 | edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); | ||
| 501 | |||
| 502 | edma_read_slot(sync_dev, template); | ||
| 503 | |||
| 504 | /* don't bother with irqs or chaining */ | ||
| 505 | template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; | ||
| 506 | } | 414 | } |
| 507 | 415 | ||
| 508 | static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, | 416 | static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, |
| 509 | struct mmc_data *data) | 417 | struct mmc_data *data) |
| 510 | { | 418 | { |
| 511 | struct edmacc_param *template; | 419 | struct dma_chan *chan; |
| 512 | int channel, slot; | 420 | struct dma_async_tx_descriptor *desc; |
| 513 | unsigned link; | 421 | int ret = 0; |
| 514 | struct scatterlist *sg; | ||
| 515 | unsigned sg_len; | ||
| 516 | unsigned bytes_left = host->bytes_left; | ||
| 517 | const unsigned shift = ffs(rw_threshold) - 1; | ||
| 518 | 422 | ||
| 519 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { | 423 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { |
| 520 | template = &host->tx_template; | 424 | struct dma_slave_config dma_tx_conf = { |
| 521 | channel = host->txdma; | 425 | .direction = DMA_MEM_TO_DEV, |
| 426 | .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, | ||
| 427 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
| 428 | .dst_maxburst = | ||
| 429 | rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
| 430 | }; | ||
| 431 | chan = host->dma_tx; | ||
| 432 | dmaengine_slave_config(host->dma_tx, &dma_tx_conf); | ||
| 433 | |||
| 434 | desc = dmaengine_prep_slave_sg(host->dma_tx, | ||
| 435 | data->sg, | ||
| 436 | host->sg_len, | ||
| 437 | DMA_MEM_TO_DEV, | ||
| 438 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 439 | if (!desc) { | ||
| 440 | dev_dbg(mmc_dev(host->mmc), | ||
| 441 | "failed to allocate DMA TX descriptor"); | ||
| 442 | ret = -1; | ||
| 443 | goto out; | ||
| 444 | } | ||
| 522 | } else { | 445 | } else { |
| 523 | template = &host->rx_template; | 446 | struct dma_slave_config dma_rx_conf = { |
| 524 | channel = host->rxdma; | 447 | .direction = DMA_DEV_TO_MEM, |
| 525 | } | 448 | .src_addr = host->mem_res->start + DAVINCI_MMCDRR, |
| 526 | 449 | .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | |
| 527 | /* We know sg_len and ccnt will never be out of range because | 450 | .src_maxburst = |
| 528 | * we told the mmc layer which in turn tells the block layer | 451 | rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, |
| 529 | * to ensure that it only hands us one scatterlist segment | 452 | }; |
| 530 | * per EDMA PARAM entry. Update the PARAM | 453 | chan = host->dma_rx; |
| 531 | * entries needed for each segment of this scatterlist. | 454 | dmaengine_slave_config(host->dma_rx, &dma_rx_conf); |
| 532 | */ | 455 | |
| 533 | for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; | 456 | desc = dmaengine_prep_slave_sg(host->dma_rx, |
| 534 | sg_len-- != 0 && bytes_left; | 457 | data->sg, |
| 535 | sg = sg_next(sg), slot = host->links[link++]) { | 458 | host->sg_len, |
| 536 | u32 buf = sg_dma_address(sg); | 459 | DMA_DEV_TO_MEM, |
| 537 | unsigned count = sg_dma_len(sg); | 460 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 538 | 461 | if (!desc) { | |
| 539 | template->link_bcntrld = sg_len | 462 | dev_dbg(mmc_dev(host->mmc), |
| 540 | ? (EDMA_CHAN_SLOT(host->links[link]) << 5) | 463 | "failed to allocate DMA RX descriptor"); |
| 541 | : 0xffff; | 464 | ret = -1; |
| 542 | 465 | goto out; | |
| 543 | if (count > bytes_left) | 466 | } |
| 544 | count = bytes_left; | ||
| 545 | bytes_left -= count; | ||
| 546 | |||
| 547 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) | ||
| 548 | template->src = buf; | ||
| 549 | else | ||
| 550 | template->dst = buf; | ||
| 551 | template->ccnt = count >> shift; | ||
| 552 | |||
| 553 | edma_write_slot(slot, template); | ||
| 554 | } | 467 | } |
| 555 | 468 | ||
| 556 | if (host->version == MMC_CTLR_VERSION_2) | 469 | dmaengine_submit(desc); |
| 557 | edma_clear_event(channel); | 470 | dma_async_issue_pending(chan); |
| 558 | 471 | ||
| 559 | edma_start(channel); | 472 | out: |
| 473 | return ret; | ||
| 560 | } | 474 | } |
| 561 | 475 | ||
| 562 | static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | 476 | static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, |
| @@ -564,6 +478,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | |||
| 564 | { | 478 | { |
| 565 | int i; | 479 | int i; |
| 566 | int mask = rw_threshold - 1; | 480 | int mask = rw_threshold - 1; |
| 481 | int ret = 0; | ||
| 567 | 482 | ||
| 568 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 483 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
| 569 | ((data->flags & MMC_DATA_WRITE) | 484 | ((data->flags & MMC_DATA_WRITE) |
| @@ -583,70 +498,48 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | |||
| 583 | } | 498 | } |
| 584 | 499 | ||
| 585 | host->do_dma = 1; | 500 | host->do_dma = 1; |
| 586 | mmc_davinci_send_dma_request(host, data); | 501 | ret = mmc_davinci_send_dma_request(host, data); |
| 587 | 502 | ||
| 588 | return 0; | 503 | return ret; |
| 589 | } | 504 | } |
| 590 | 505 | ||
| 591 | static void __init_or_module | 506 | static void __init_or_module |
| 592 | davinci_release_dma_channels(struct mmc_davinci_host *host) | 507 | davinci_release_dma_channels(struct mmc_davinci_host *host) |
| 593 | { | 508 | { |
| 594 | unsigned i; | ||
| 595 | |||
| 596 | if (!host->use_dma) | 509 | if (!host->use_dma) |
| 597 | return; | 510 | return; |
| 598 | 511 | ||
| 599 | for (i = 0; i < host->n_link; i++) | 512 | dma_release_channel(host->dma_tx); |
| 600 | edma_free_slot(host->links[i]); | 513 | dma_release_channel(host->dma_rx); |
| 601 | |||
| 602 | edma_free_channel(host->txdma); | ||
| 603 | edma_free_channel(host->rxdma); | ||
| 604 | } | 514 | } |
| 605 | 515 | ||
| 606 | static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) | 516 | static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) |
| 607 | { | 517 | { |
| 608 | u32 link_size; | 518 | int r; |
| 609 | int r, i; | 519 | dma_cap_mask_t mask; |
| 610 | 520 | ||
| 611 | /* Acquire master DMA write channel */ | 521 | dma_cap_zero(mask); |
| 612 | r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, | 522 | dma_cap_set(DMA_SLAVE, mask); |
| 613 | EVENTQ_DEFAULT); | 523 | |
| 614 | if (r < 0) { | 524 | host->dma_tx = |
| 615 | dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", | 525 | dma_request_channel(mask, edma_filter_fn, &host->txdma); |
| 616 | "tx", r); | 526 | if (!host->dma_tx) { |
| 617 | return r; | 527 | dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); |
| 618 | } | 528 | return -ENODEV; |
| 619 | mmc_davinci_dma_setup(host, true, &host->tx_template); | ||
| 620 | |||
| 621 | /* Acquire master DMA read channel */ | ||
| 622 | r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, | ||
| 623 | EVENTQ_DEFAULT); | ||
| 624 | if (r < 0) { | ||
| 625 | dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", | ||
| 626 | "rx", r); | ||
| 627 | goto free_master_write; | ||
| 628 | } | 529 | } |
| 629 | mmc_davinci_dma_setup(host, false, &host->rx_template); | ||
| 630 | 530 | ||
| 631 | /* Allocate parameter RAM slots, which will later be bound to a | 531 | host->dma_rx = |
| 632 | * channel as needed to handle a scatterlist. | 532 | dma_request_channel(mask, edma_filter_fn, &host->rxdma); |
| 633 | */ | 533 | if (!host->dma_rx) { |
| 634 | link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); | 534 | dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); |
| 635 | for (i = 0; i < link_size; i++) { | 535 | r = -ENODEV; |
| 636 | r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); | 536 | goto free_master_write; |
| 637 | if (r < 0) { | ||
| 638 | dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", | ||
| 639 | r); | ||
| 640 | break; | ||
| 641 | } | ||
| 642 | host->links[i] = r; | ||
| 643 | } | 537 | } |
| 644 | host->n_link = i; | ||
| 645 | 538 | ||
| 646 | return 0; | 539 | return 0; |
| 647 | 540 | ||
| 648 | free_master_write: | 541 | free_master_write: |
| 649 | edma_free_channel(host->txdma); | 542 | dma_release_channel(host->dma_tx); |
| 650 | 543 | ||
| 651 | return r; | 544 | return r; |
| 652 | } | 545 | } |
| @@ -1359,7 +1252,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) | |||
| 1359 | * Each hw_seg uses one EDMA parameter RAM slot, always one | 1252 | * Each hw_seg uses one EDMA parameter RAM slot, always one |
| 1360 | * channel and then usually some linked slots. | 1253 | * channel and then usually some linked slots. |
| 1361 | */ | 1254 | */ |
| 1362 | mmc->max_segs = 1 + host->n_link; | 1255 | mmc->max_segs = MAX_NR_SG; |
| 1363 | 1256 | ||
| 1364 | /* EDMA limit per hw segment (one or two MBytes) */ | 1257 | /* EDMA limit per hw segment (one or two MBytes) */ |
| 1365 | mmc->max_seg_size = MAX_CCNT * rw_threshold; | 1258 | mmc->max_seg_size = MAX_CCNT * rw_threshold; |
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c new file mode 100644 index 000000000000..660bbc528862 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-exynos.c | |||
| @@ -0,0 +1,253 @@ | |||
| 1 | /* | ||
| 2 | * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012, Samsung Electronics Co., Ltd. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/platform_device.h> | ||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/mmc/host.h> | ||
| 16 | #include <linux/mmc/dw_mmc.h> | ||
| 17 | #include <linux/of.h> | ||
| 18 | #include <linux/of_gpio.h> | ||
| 19 | |||
| 20 | #include "dw_mmc.h" | ||
| 21 | #include "dw_mmc-pltfm.h" | ||
| 22 | |||
| 23 | #define NUM_PINS(x) (x + 2) | ||
| 24 | |||
| 25 | #define SDMMC_CLKSEL 0x09C | ||
| 26 | #define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0) | ||
| 27 | #define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16) | ||
| 28 | #define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24) | ||
| 29 | #define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7) | ||
| 30 | #define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ | ||
| 31 | SDMMC_CLKSEL_CCLK_DRIVE(y) | \ | ||
| 32 | SDMMC_CLKSEL_CCLK_DIVIDER(z)) | ||
| 33 | |||
| 34 | #define SDMMC_CMD_USE_HOLD_REG BIT(29) | ||
| 35 | |||
| 36 | #define EXYNOS4210_FIXED_CIU_CLK_DIV 2 | ||
| 37 | #define EXYNOS4412_FIXED_CIU_CLK_DIV 4 | ||
| 38 | |||
| 39 | /* Variations in Exynos specific dw-mshc controller */ | ||
| 40 | enum dw_mci_exynos_type { | ||
| 41 | DW_MCI_TYPE_EXYNOS4210, | ||
| 42 | DW_MCI_TYPE_EXYNOS4412, | ||
| 43 | DW_MCI_TYPE_EXYNOS5250, | ||
| 44 | }; | ||
| 45 | |||
| 46 | /* Exynos implementation specific driver private data */ | ||
| 47 | struct dw_mci_exynos_priv_data { | ||
| 48 | enum dw_mci_exynos_type ctrl_type; | ||
| 49 | u8 ciu_div; | ||
| 50 | u32 sdr_timing; | ||
| 51 | u32 ddr_timing; | ||
| 52 | }; | ||
| 53 | |||
| 54 | static struct dw_mci_exynos_compatible { | ||
| 55 | char *compatible; | ||
| 56 | enum dw_mci_exynos_type ctrl_type; | ||
| 57 | } exynos_compat[] = { | ||
| 58 | { | ||
| 59 | .compatible = "samsung,exynos4210-dw-mshc", | ||
| 60 | .ctrl_type = DW_MCI_TYPE_EXYNOS4210, | ||
| 61 | }, { | ||
| 62 | .compatible = "samsung,exynos4412-dw-mshc", | ||
| 63 | .ctrl_type = DW_MCI_TYPE_EXYNOS4412, | ||
| 64 | }, { | ||
| 65 | .compatible = "samsung,exynos5250-dw-mshc", | ||
| 66 | .ctrl_type = DW_MCI_TYPE_EXYNOS5250, | ||
| 67 | }, | ||
| 68 | }; | ||
| 69 | |||
| 70 | static int dw_mci_exynos_priv_init(struct dw_mci *host) | ||
| 71 | { | ||
| 72 | struct dw_mci_exynos_priv_data *priv; | ||
| 73 | int idx; | ||
| 74 | |||
| 75 | priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); | ||
| 76 | if (!priv) { | ||
| 77 | dev_err(host->dev, "mem alloc failed for private data\n"); | ||
| 78 | return -ENOMEM; | ||
| 79 | } | ||
| 80 | |||
| 81 | for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { | ||
| 82 | if (of_device_is_compatible(host->dev->of_node, | ||
| 83 | exynos_compat[idx].compatible)) | ||
| 84 | priv->ctrl_type = exynos_compat[idx].ctrl_type; | ||
| 85 | } | ||
| 86 | |||
| 87 | host->priv = priv; | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int dw_mci_exynos_setup_clock(struct dw_mci *host) | ||
| 92 | { | ||
| 93 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
| 94 | |||
| 95 | if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250) | ||
| 96 | host->bus_hz /= (priv->ciu_div + 1); | ||
| 97 | else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) | ||
| 98 | host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV; | ||
| 99 | else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) | ||
| 100 | host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV; | ||
| 101 | |||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) | ||
| 106 | { | ||
| 107 | /* | ||
| 108 | * Exynos4412 and Exynos5250 extends the use of CMD register with the | ||
| 109 | * use of bit 29 (which is reserved on standard MSHC controllers) for | ||
| 110 | * optionally bypassing the HOLD register for command and data. The | ||
| 111 | * HOLD register should be bypassed in case there is no phase shift | ||
| 112 | * applied on CMD/DATA that is sent to the card. | ||
| 113 | */ | ||
| 114 | if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) | ||
| 115 | *cmdr |= SDMMC_CMD_USE_HOLD_REG; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) | ||
| 119 | { | ||
| 120 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
| 121 | |||
| 122 | if (ios->timing == MMC_TIMING_UHS_DDR50) | ||
| 123 | mci_writel(host, CLKSEL, priv->ddr_timing); | ||
| 124 | else | ||
| 125 | mci_writel(host, CLKSEL, priv->sdr_timing); | ||
| 126 | } | ||
| 127 | |||
| 128 | static int dw_mci_exynos_parse_dt(struct dw_mci *host) | ||
| 129 | { | ||
| 130 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
| 131 | struct device_node *np = host->dev->of_node; | ||
| 132 | u32 timing[2]; | ||
| 133 | u32 div = 0; | ||
| 134 | int ret; | ||
| 135 | |||
| 136 | of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); | ||
| 137 | priv->ciu_div = div; | ||
| 138 | |||
| 139 | ret = of_property_read_u32_array(np, | ||
| 140 | "samsung,dw-mshc-sdr-timing", timing, 2); | ||
| 141 | if (ret) | ||
| 142 | return ret; | ||
| 143 | |||
| 144 | priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); | ||
| 145 | |||
| 146 | ret = of_property_read_u32_array(np, | ||
| 147 | "samsung,dw-mshc-ddr-timing", timing, 2); | ||
| 148 | if (ret) | ||
| 149 | return ret; | ||
| 150 | |||
| 151 | priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | static int dw_mci_exynos_setup_bus(struct dw_mci *host, | ||
| 156 | struct device_node *slot_np, u8 bus_width) | ||
| 157 | { | ||
| 158 | int idx, gpio, ret; | ||
| 159 | |||
| 160 | if (!slot_np) | ||
| 161 | return -EINVAL; | ||
| 162 | |||
| 163 | /* cmd + clock + bus-width pins */ | ||
| 164 | for (idx = 0; idx < NUM_PINS(bus_width); idx++) { | ||
| 165 | gpio = of_get_gpio(slot_np, idx); | ||
| 166 | if (!gpio_is_valid(gpio)) { | ||
| 167 | dev_err(host->dev, "invalid gpio: %d\n", gpio); | ||
| 168 | return -EINVAL; | ||
| 169 | } | ||
| 170 | |||
| 171 | ret = devm_gpio_request(host->dev, gpio, "dw-mci-bus"); | ||
| 172 | if (ret) { | ||
| 173 | dev_err(host->dev, "gpio [%d] request failed\n", gpio); | ||
| 174 | return -EBUSY; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | gpio = of_get_named_gpio(slot_np, "wp-gpios", 0); | ||
| 179 | if (gpio_is_valid(gpio)) { | ||
| 180 | if (devm_gpio_request(host->dev, gpio, "dw-mci-wp")) | ||
| 181 | dev_info(host->dev, "gpio [%d] request failed\n", | ||
| 182 | gpio); | ||
| 183 | } else { | ||
| 184 | dev_info(host->dev, "wp gpio not available"); | ||
| 185 | host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT; | ||
| 186 | } | ||
| 187 | |||
| 188 | if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) | ||
| 189 | return 0; | ||
| 190 | |||
| 191 | gpio = of_get_named_gpio(slot_np, "samsung,cd-pinmux-gpio", 0); | ||
| 192 | if (gpio_is_valid(gpio)) { | ||
| 193 | if (devm_gpio_request(host->dev, gpio, "dw-mci-cd")) | ||
| 194 | dev_err(host->dev, "gpio [%d] request failed\n", gpio); | ||
| 195 | } else { | ||
| 196 | dev_info(host->dev, "cd gpio not available"); | ||
| 197 | } | ||
| 198 | |||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | /* Exynos5250 controller specific capabilities */ | ||
| 203 | static unsigned long exynos5250_dwmmc_caps[4] = { | ||
| 204 | MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | | ||
| 205 | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, | ||
| 206 | MMC_CAP_CMD23, | ||
| 207 | MMC_CAP_CMD23, | ||
| 208 | MMC_CAP_CMD23, | ||
| 209 | }; | ||
| 210 | |||
| 211 | static struct dw_mci_drv_data exynos5250_drv_data = { | ||
| 212 | .caps = exynos5250_dwmmc_caps, | ||
| 213 | .init = dw_mci_exynos_priv_init, | ||
| 214 | .setup_clock = dw_mci_exynos_setup_clock, | ||
| 215 | .prepare_command = dw_mci_exynos_prepare_command, | ||
| 216 | .set_ios = dw_mci_exynos_set_ios, | ||
| 217 | .parse_dt = dw_mci_exynos_parse_dt, | ||
| 218 | .setup_bus = dw_mci_exynos_setup_bus, | ||
| 219 | }; | ||
| 220 | |||
| 221 | static const struct of_device_id dw_mci_exynos_match[] = { | ||
| 222 | { .compatible = "samsung,exynos5250-dw-mshc", | ||
| 223 | .data = (void *)&exynos5250_drv_data, }, | ||
| 224 | {}, | ||
| 225 | }; | ||
| 226 | MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); | ||
| 227 | |||
| 228 | int dw_mci_exynos_probe(struct platform_device *pdev) | ||
| 229 | { | ||
| 230 | struct dw_mci_drv_data *drv_data; | ||
| 231 | const struct of_device_id *match; | ||
| 232 | |||
| 233 | match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); | ||
| 234 | drv_data = match->data; | ||
| 235 | return dw_mci_pltfm_register(pdev, drv_data); | ||
| 236 | } | ||
| 237 | |||
| 238 | static struct platform_driver dw_mci_exynos_pltfm_driver = { | ||
| 239 | .probe = dw_mci_exynos_probe, | ||
| 240 | .remove = __exit_p(dw_mci_pltfm_remove), | ||
| 241 | .driver = { | ||
| 242 | .name = "dwmmc_exynos", | ||
| 243 | .of_match_table = of_match_ptr(dw_mci_exynos_match), | ||
| 244 | .pm = &dw_mci_pltfm_pmops, | ||
| 245 | }, | ||
| 246 | }; | ||
| 247 | |||
| 248 | module_platform_driver(dw_mci_exynos_pltfm_driver); | ||
| 249 | |||
| 250 | MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); | ||
| 251 | MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); | ||
| 252 | MODULE_LICENSE("GPL v2"); | ||
| 253 | MODULE_ALIAS("platform:dwmmc-exynos"); | ||
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c index dc0d25a013e0..edb37e9135ae 100644 --- a/drivers/mmc/host/dw_mmc-pci.c +++ b/drivers/mmc/host/dw_mmc-pci.c | |||
| @@ -59,7 +59,7 @@ static int __devinit dw_mci_pci_probe(struct pci_dev *pdev, | |||
| 59 | 59 | ||
| 60 | host->irq = pdev->irq; | 60 | host->irq = pdev->irq; |
| 61 | host->irq_flags = IRQF_SHARED; | 61 | host->irq_flags = IRQF_SHARED; |
| 62 | host->dev = pdev->dev; | 62 | host->dev = &pdev->dev; |
| 63 | host->pdata = &pci_board_data; | 63 | host->pdata = &pci_board_data; |
| 64 | 64 | ||
| 65 | host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); | 65 | host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); |
| @@ -140,18 +140,7 @@ static struct pci_driver dw_mci_pci_driver = { | |||
| 140 | }, | 140 | }, |
| 141 | }; | 141 | }; |
| 142 | 142 | ||
| 143 | static int __init dw_mci_init(void) | 143 | module_pci_driver(dw_mci_pci_driver); |
| 144 | { | ||
| 145 | return pci_register_driver(&dw_mci_pci_driver); | ||
| 146 | } | ||
| 147 | |||
| 148 | static void __exit dw_mci_exit(void) | ||
| 149 | { | ||
| 150 | pci_unregister_driver(&dw_mci_pci_driver); | ||
| 151 | } | ||
| 152 | |||
| 153 | module_init(dw_mci_init); | ||
| 154 | module_exit(dw_mci_exit); | ||
| 155 | 144 | ||
| 156 | MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); | 145 | MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); |
| 157 | MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); | 146 | MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); |
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 92ec3eb3aae7..c960ca7ffbe6 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c | |||
| @@ -19,59 +19,63 @@ | |||
| 19 | #include <linux/mmc/host.h> | 19 | #include <linux/mmc/host.h> |
| 20 | #include <linux/mmc/mmc.h> | 20 | #include <linux/mmc/mmc.h> |
| 21 | #include <linux/mmc/dw_mmc.h> | 21 | #include <linux/mmc/dw_mmc.h> |
| 22 | #include <linux/of.h> | ||
| 23 | |||
| 22 | #include "dw_mmc.h" | 24 | #include "dw_mmc.h" |
| 23 | 25 | ||
| 24 | static int dw_mci_pltfm_probe(struct platform_device *pdev) | 26 | int dw_mci_pltfm_register(struct platform_device *pdev, |
| 27 | struct dw_mci_drv_data *drv_data) | ||
| 25 | { | 28 | { |
| 26 | struct dw_mci *host; | 29 | struct dw_mci *host; |
| 27 | struct resource *regs; | 30 | struct resource *regs; |
| 28 | int ret; | 31 | int ret; |
| 29 | 32 | ||
| 30 | host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); | 33 | host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); |
| 31 | if (!host) | 34 | if (!host) |
| 32 | return -ENOMEM; | 35 | return -ENOMEM; |
| 33 | 36 | ||
| 34 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 37 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 35 | if (!regs) { | 38 | if (!regs) |
| 36 | ret = -ENXIO; | 39 | return -ENXIO; |
| 37 | goto err_free; | ||
| 38 | } | ||
| 39 | 40 | ||
| 40 | host->irq = platform_get_irq(pdev, 0); | 41 | host->irq = platform_get_irq(pdev, 0); |
| 41 | if (host->irq < 0) { | 42 | if (host->irq < 0) |
| 42 | ret = host->irq; | 43 | return host->irq; |
| 43 | goto err_free; | ||
| 44 | } | ||
| 45 | 44 | ||
| 46 | host->dev = pdev->dev; | 45 | host->drv_data = drv_data; |
| 46 | host->dev = &pdev->dev; | ||
| 47 | host->irq_flags = 0; | 47 | host->irq_flags = 0; |
| 48 | host->pdata = pdev->dev.platform_data; | 48 | host->pdata = pdev->dev.platform_data; |
| 49 | ret = -ENOMEM; | 49 | host->regs = devm_request_and_ioremap(&pdev->dev, regs); |
| 50 | host->regs = ioremap(regs->start, resource_size(regs)); | ||
| 51 | if (!host->regs) | 50 | if (!host->regs) |
| 52 | goto err_free; | 51 | return -ENOMEM; |
| 52 | |||
| 53 | if (host->drv_data->init) { | ||
| 54 | ret = host->drv_data->init(host); | ||
| 55 | if (ret) | ||
| 56 | return ret; | ||
| 57 | } | ||
| 58 | |||
| 53 | platform_set_drvdata(pdev, host); | 59 | platform_set_drvdata(pdev, host); |
| 54 | ret = dw_mci_probe(host); | 60 | ret = dw_mci_probe(host); |
| 55 | if (ret) | ||
| 56 | goto err_out; | ||
| 57 | return ret; | ||
| 58 | err_out: | ||
| 59 | iounmap(host->regs); | ||
| 60 | err_free: | ||
| 61 | kfree(host); | ||
| 62 | return ret; | 61 | return ret; |
| 63 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_register); | ||
| 64 | 64 | ||
| 65 | static int __exit dw_mci_pltfm_remove(struct platform_device *pdev) | 65 | static int __devinit dw_mci_pltfm_probe(struct platform_device *pdev) |
| 66 | { | ||
| 67 | return dw_mci_pltfm_register(pdev, NULL); | ||
| 68 | } | ||
| 69 | |||
| 70 | static int __devexit dw_mci_pltfm_remove(struct platform_device *pdev) | ||
| 66 | { | 71 | { |
| 67 | struct dw_mci *host = platform_get_drvdata(pdev); | 72 | struct dw_mci *host = platform_get_drvdata(pdev); |
| 68 | 73 | ||
| 69 | platform_set_drvdata(pdev, NULL); | 74 | platform_set_drvdata(pdev, NULL); |
| 70 | dw_mci_remove(host); | 75 | dw_mci_remove(host); |
| 71 | iounmap(host->regs); | ||
| 72 | kfree(host); | ||
| 73 | return 0; | 76 | return 0; |
| 74 | } | 77 | } |
| 78 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove); | ||
| 75 | 79 | ||
| 76 | #ifdef CONFIG_PM_SLEEP | 80 | #ifdef CONFIG_PM_SLEEP |
| 77 | /* | 81 | /* |
| @@ -105,12 +109,20 @@ static int dw_mci_pltfm_resume(struct device *dev) | |||
| 105 | #define dw_mci_pltfm_resume NULL | 109 | #define dw_mci_pltfm_resume NULL |
| 106 | #endif /* CONFIG_PM_SLEEP */ | 110 | #endif /* CONFIG_PM_SLEEP */ |
| 107 | 111 | ||
| 108 | static SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); | 112 | SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); |
| 113 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops); | ||
| 114 | |||
| 115 | static const struct of_device_id dw_mci_pltfm_match[] = { | ||
| 116 | { .compatible = "snps,dw-mshc", }, | ||
| 117 | {}, | ||
| 118 | }; | ||
| 119 | MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); | ||
| 109 | 120 | ||
| 110 | static struct platform_driver dw_mci_pltfm_driver = { | 121 | static struct platform_driver dw_mci_pltfm_driver = { |
| 111 | .remove = __exit_p(dw_mci_pltfm_remove), | 122 | .remove = __exit_p(dw_mci_pltfm_remove), |
| 112 | .driver = { | 123 | .driver = { |
| 113 | .name = "dw_mmc", | 124 | .name = "dw_mmc", |
| 125 | .of_match_table = of_match_ptr(dw_mci_pltfm_match), | ||
| 114 | .pm = &dw_mci_pltfm_pmops, | 126 | .pm = &dw_mci_pltfm_pmops, |
| 115 | }, | 127 | }, |
| 116 | }; | 128 | }; |
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h new file mode 100644 index 000000000000..301f24541fc2 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Synopsys DesignWare Multimedia Card Interface Platform driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012, Samsung Electronics Co., Ltd. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef _DW_MMC_PLTFM_H_ | ||
| 13 | #define _DW_MMC_PLTFM_H_ | ||
| 14 | |||
| 15 | extern int dw_mci_pltfm_register(struct platform_device *pdev, | ||
| 16 | struct dw_mci_drv_data *drv_data); | ||
| 17 | extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev); | ||
| 18 | extern const struct dev_pm_ops dw_mci_pltfm_pmops; | ||
| 19 | |||
| 20 | #endif /* _DW_MMC_PLTFM_H_ */ | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index af40d227bece..c2828f35c3b8 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
| 34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
| 35 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
| 36 | #include <linux/of.h> | ||
| 36 | 37 | ||
| 37 | #include "dw_mmc.h" | 38 | #include "dw_mmc.h" |
| 38 | 39 | ||
| @@ -230,6 +231,7 @@ static void dw_mci_set_timeout(struct dw_mci *host) | |||
| 230 | static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) | 231 | static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) |
| 231 | { | 232 | { |
| 232 | struct mmc_data *data; | 233 | struct mmc_data *data; |
| 234 | struct dw_mci_slot *slot = mmc_priv(mmc); | ||
| 233 | u32 cmdr; | 235 | u32 cmdr; |
| 234 | cmd->error = -EINPROGRESS; | 236 | cmd->error = -EINPROGRESS; |
| 235 | 237 | ||
| @@ -259,6 +261,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) | |||
| 259 | cmdr |= SDMMC_CMD_DAT_WR; | 261 | cmdr |= SDMMC_CMD_DAT_WR; |
| 260 | } | 262 | } |
| 261 | 263 | ||
| 264 | if (slot->host->drv_data->prepare_command) | ||
| 265 | slot->host->drv_data->prepare_command(slot->host, &cmdr); | ||
| 266 | |||
| 262 | return cmdr; | 267 | return cmdr; |
| 263 | } | 268 | } |
| 264 | 269 | ||
| @@ -266,7 +271,7 @@ static void dw_mci_start_command(struct dw_mci *host, | |||
| 266 | struct mmc_command *cmd, u32 cmd_flags) | 271 | struct mmc_command *cmd, u32 cmd_flags) |
| 267 | { | 272 | { |
| 268 | host->cmd = cmd; | 273 | host->cmd = cmd; |
| 269 | dev_vdbg(&host->dev, | 274 | dev_vdbg(host->dev, |
| 270 | "start command: ARGR=0x%08x CMDR=0x%08x\n", | 275 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
| 271 | cmd->arg, cmd_flags); | 276 | cmd->arg, cmd_flags); |
| 272 | 277 | ||
| @@ -308,7 +313,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host) | |||
| 308 | 313 | ||
| 309 | if (data) | 314 | if (data) |
| 310 | if (!data->host_cookie) | 315 | if (!data->host_cookie) |
| 311 | dma_unmap_sg(&host->dev, | 316 | dma_unmap_sg(host->dev, |
| 312 | data->sg, | 317 | data->sg, |
| 313 | data->sg_len, | 318 | data->sg_len, |
| 314 | dw_mci_get_dma_dir(data)); | 319 | dw_mci_get_dma_dir(data)); |
| @@ -334,7 +339,7 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host) | |||
| 334 | { | 339 | { |
| 335 | struct mmc_data *data = host->data; | 340 | struct mmc_data *data = host->data; |
| 336 | 341 | ||
| 337 | dev_vdbg(&host->dev, "DMA complete\n"); | 342 | dev_vdbg(host->dev, "DMA complete\n"); |
| 338 | 343 | ||
| 339 | host->dma_ops->cleanup(host); | 344 | host->dma_ops->cleanup(host); |
| 340 | 345 | ||
| @@ -405,23 +410,11 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) | |||
| 405 | static int dw_mci_idmac_init(struct dw_mci *host) | 410 | static int dw_mci_idmac_init(struct dw_mci *host) |
| 406 | { | 411 | { |
| 407 | struct idmac_desc *p; | 412 | struct idmac_desc *p; |
| 408 | int i, dma_support; | 413 | int i; |
| 409 | 414 | ||
| 410 | /* Number of descriptors in the ring buffer */ | 415 | /* Number of descriptors in the ring buffer */ |
| 411 | host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); | 416 | host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); |
| 412 | 417 | ||
| 413 | /* Check if Hardware Configuration Register has support for DMA */ | ||
| 414 | dma_support = (mci_readl(host, HCON) >> 16) & 0x3; | ||
| 415 | |||
| 416 | if (!dma_support || dma_support > 2) { | ||
| 417 | dev_err(&host->dev, | ||
| 418 | "Host Controller does not support IDMA Tx.\n"); | ||
| 419 | host->dma_ops = NULL; | ||
| 420 | return -ENODEV; | ||
| 421 | } | ||
| 422 | |||
| 423 | dev_info(&host->dev, "Using internal DMA controller.\n"); | ||
| 424 | |||
| 425 | /* Forward link the descriptor list */ | 418 | /* Forward link the descriptor list */ |
| 426 | for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) | 419 | for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) |
| 427 | p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); | 420 | p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); |
| @@ -476,7 +469,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host, | |||
| 476 | return -EINVAL; | 469 | return -EINVAL; |
| 477 | } | 470 | } |
| 478 | 471 | ||
| 479 | sg_len = dma_map_sg(&host->dev, | 472 | sg_len = dma_map_sg(host->dev, |
| 480 | data->sg, | 473 | data->sg, |
| 481 | data->sg_len, | 474 | data->sg_len, |
| 482 | dw_mci_get_dma_dir(data)); | 475 | dw_mci_get_dma_dir(data)); |
| @@ -519,7 +512,7 @@ static void dw_mci_post_req(struct mmc_host *mmc, | |||
| 519 | return; | 512 | return; |
| 520 | 513 | ||
| 521 | if (data->host_cookie) | 514 | if (data->host_cookie) |
| 522 | dma_unmap_sg(&slot->host->dev, | 515 | dma_unmap_sg(slot->host->dev, |
| 523 | data->sg, | 516 | data->sg, |
| 524 | data->sg_len, | 517 | data->sg_len, |
| 525 | dw_mci_get_dma_dir(data)); | 518 | dw_mci_get_dma_dir(data)); |
| @@ -545,7 +538,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) | |||
| 545 | 538 | ||
| 546 | host->using_dma = 1; | 539 | host->using_dma = 1; |
| 547 | 540 | ||
| 548 | dev_vdbg(&host->dev, | 541 | dev_vdbg(host->dev, |
| 549 | "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", | 542 | "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", |
| 550 | (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, | 543 | (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, |
| 551 | sg_len); | 544 | sg_len); |
| @@ -814,6 +807,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 814 | slot->clock = ios->clock; | 807 | slot->clock = ios->clock; |
| 815 | } | 808 | } |
| 816 | 809 | ||
| 810 | if (slot->host->drv_data->set_ios) | ||
| 811 | slot->host->drv_data->set_ios(slot->host, ios); | ||
| 812 | |||
| 817 | switch (ios->power_mode) { | 813 | switch (ios->power_mode) { |
| 818 | case MMC_POWER_UP: | 814 | case MMC_POWER_UP: |
| 819 | set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); | 815 | set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); |
| @@ -830,7 +826,9 @@ static int dw_mci_get_ro(struct mmc_host *mmc) | |||
| 830 | struct dw_mci_board *brd = slot->host->pdata; | 826 | struct dw_mci_board *brd = slot->host->pdata; |
| 831 | 827 | ||
| 832 | /* Use platform get_ro function, else try on board write protect */ | 828 | /* Use platform get_ro function, else try on board write protect */ |
| 833 | if (brd->get_ro) | 829 | if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT) |
| 830 | read_only = 0; | ||
| 831 | else if (brd->get_ro) | ||
| 834 | read_only = brd->get_ro(slot->id); | 832 | read_only = brd->get_ro(slot->id); |
| 835 | else | 833 | else |
| 836 | read_only = | 834 | read_only = |
| @@ -939,12 +937,12 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) | |||
| 939 | slot = list_entry(host->queue.next, | 937 | slot = list_entry(host->queue.next, |
| 940 | struct dw_mci_slot, queue_node); | 938 | struct dw_mci_slot, queue_node); |
| 941 | list_del(&slot->queue_node); | 939 | list_del(&slot->queue_node); |
| 942 | dev_vdbg(&host->dev, "list not empty: %s is next\n", | 940 | dev_vdbg(host->dev, "list not empty: %s is next\n", |
| 943 | mmc_hostname(slot->mmc)); | 941 | mmc_hostname(slot->mmc)); |
| 944 | host->state = STATE_SENDING_CMD; | 942 | host->state = STATE_SENDING_CMD; |
| 945 | dw_mci_start_request(host, slot); | 943 | dw_mci_start_request(host, slot); |
| 946 | } else { | 944 | } else { |
| 947 | dev_vdbg(&host->dev, "list empty\n"); | 945 | dev_vdbg(host->dev, "list empty\n"); |
| 948 | host->state = STATE_IDLE; | 946 | host->state = STATE_IDLE; |
| 949 | } | 947 | } |
| 950 | 948 | ||
| @@ -1083,7 +1081,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
| 1083 | data->bytes_xfered = 0; | 1081 | data->bytes_xfered = 0; |
| 1084 | data->error = -ETIMEDOUT; | 1082 | data->error = -ETIMEDOUT; |
| 1085 | } else { | 1083 | } else { |
| 1086 | dev_err(&host->dev, | 1084 | dev_err(host->dev, |
| 1087 | "data FIFO error " | 1085 | "data FIFO error " |
| 1088 | "(status=%08x)\n", | 1086 | "(status=%08x)\n", |
| 1089 | status); | 1087 | status); |
| @@ -1767,12 +1765,60 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
| 1767 | } | 1765 | } |
| 1768 | } | 1766 | } |
| 1769 | 1767 | ||
| 1770 | static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | 1768 | #ifdef CONFIG_OF |
| 1769 | /* given a slot id, find out the device node representing that slot */ | ||
| 1770 | static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | ||
| 1771 | { | ||
| 1772 | struct device_node *np; | ||
| 1773 | const __be32 *addr; | ||
| 1774 | int len; | ||
| 1775 | |||
| 1776 | if (!dev || !dev->of_node) | ||
| 1777 | return NULL; | ||
| 1778 | |||
| 1779 | for_each_child_of_node(dev->of_node, np) { | ||
| 1780 | addr = of_get_property(np, "reg", &len); | ||
| 1781 | if (!addr || (len < sizeof(int))) | ||
| 1782 | continue; | ||
| 1783 | if (be32_to_cpup(addr) == slot) | ||
| 1784 | return np; | ||
| 1785 | } | ||
| 1786 | return NULL; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | /* find out bus-width for a given slot */ | ||
| 1790 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | ||
| 1791 | { | ||
| 1792 | struct device_node *np = dw_mci_of_find_slot_node(dev, slot); | ||
| 1793 | u32 bus_wd = 1; | ||
| 1794 | |||
| 1795 | if (!np) | ||
| 1796 | return 1; | ||
| 1797 | |||
| 1798 | if (of_property_read_u32(np, "bus-width", &bus_wd)) | ||
| 1799 | dev_err(dev, "bus-width property not found, assuming width" | ||
| 1800 | " as 1\n"); | ||
| 1801 | return bus_wd; | ||
| 1802 | } | ||
| 1803 | #else /* CONFIG_OF */ | ||
| 1804 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | ||
| 1805 | { | ||
| 1806 | return 1; | ||
| 1807 | } | ||
| 1808 | static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | ||
| 1809 | { | ||
| 1810 | return NULL; | ||
| 1811 | } | ||
| 1812 | #endif /* CONFIG_OF */ | ||
| 1813 | |||
| 1814 | static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | ||
| 1771 | { | 1815 | { |
| 1772 | struct mmc_host *mmc; | 1816 | struct mmc_host *mmc; |
| 1773 | struct dw_mci_slot *slot; | 1817 | struct dw_mci_slot *slot; |
| 1818 | int ctrl_id, ret; | ||
| 1819 | u8 bus_width; | ||
| 1774 | 1820 | ||
| 1775 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); | 1821 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); |
| 1776 | if (!mmc) | 1822 | if (!mmc) |
| 1777 | return -ENOMEM; | 1823 | return -ENOMEM; |
| 1778 | 1824 | ||
| @@ -1780,6 +1826,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
| 1780 | slot->id = id; | 1826 | slot->id = id; |
| 1781 | slot->mmc = mmc; | 1827 | slot->mmc = mmc; |
| 1782 | slot->host = host; | 1828 | slot->host = host; |
| 1829 | host->slot[id] = slot; | ||
| 1783 | 1830 | ||
| 1784 | mmc->ops = &dw_mci_ops; | 1831 | mmc->ops = &dw_mci_ops; |
| 1785 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); | 1832 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); |
| @@ -1800,21 +1847,44 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
| 1800 | if (host->pdata->caps) | 1847 | if (host->pdata->caps) |
| 1801 | mmc->caps = host->pdata->caps; | 1848 | mmc->caps = host->pdata->caps; |
| 1802 | 1849 | ||
| 1850 | if (host->dev->of_node) { | ||
| 1851 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
| 1852 | if (ctrl_id < 0) | ||
| 1853 | ctrl_id = 0; | ||
| 1854 | } else { | ||
| 1855 | ctrl_id = to_platform_device(host->dev)->id; | ||
| 1856 | } | ||
| 1857 | if (host->drv_data && host->drv_data->caps) | ||
| 1858 | mmc->caps |= host->drv_data->caps[ctrl_id]; | ||
| 1859 | |||
| 1803 | if (host->pdata->caps2) | 1860 | if (host->pdata->caps2) |
| 1804 | mmc->caps2 = host->pdata->caps2; | 1861 | mmc->caps2 = host->pdata->caps2; |
| 1805 | 1862 | ||
| 1806 | if (host->pdata->get_bus_wd) | 1863 | if (host->pdata->get_bus_wd) |
| 1807 | if (host->pdata->get_bus_wd(slot->id) >= 4) | 1864 | bus_width = host->pdata->get_bus_wd(slot->id); |
| 1808 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 1865 | else if (host->dev->of_node) |
| 1866 | bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id); | ||
| 1867 | else | ||
| 1868 | bus_width = 1; | ||
| 1869 | |||
| 1870 | if (host->drv_data->setup_bus) { | ||
| 1871 | struct device_node *slot_np; | ||
| 1872 | slot_np = dw_mci_of_find_slot_node(host->dev, slot->id); | ||
| 1873 | ret = host->drv_data->setup_bus(host, slot_np, bus_width); | ||
| 1874 | if (ret) | ||
| 1875 | goto err_setup_bus; | ||
| 1876 | } | ||
| 1877 | |||
| 1878 | switch (bus_width) { | ||
| 1879 | case 8: | ||
| 1880 | mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
| 1881 | case 4: | ||
| 1882 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
| 1883 | } | ||
| 1809 | 1884 | ||
| 1810 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) | 1885 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) |
| 1811 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; | 1886 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
| 1812 | 1887 | ||
| 1813 | if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) | ||
| 1814 | mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; | ||
| 1815 | else | ||
| 1816 | mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; | ||
| 1817 | |||
| 1818 | if (host->pdata->blk_settings) { | 1888 | if (host->pdata->blk_settings) { |
| 1819 | mmc->max_segs = host->pdata->blk_settings->max_segs; | 1889 | mmc->max_segs = host->pdata->blk_settings->max_segs; |
| 1820 | mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; | 1890 | mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; |
| @@ -1850,7 +1920,6 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
| 1850 | else | 1920 | else |
| 1851 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); | 1921 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); |
| 1852 | 1922 | ||
| 1853 | host->slot[id] = slot; | ||
| 1854 | mmc_add_host(mmc); | 1923 | mmc_add_host(mmc); |
| 1855 | 1924 | ||
| 1856 | #if defined(CONFIG_DEBUG_FS) | 1925 | #if defined(CONFIG_DEBUG_FS) |
| @@ -1867,6 +1936,10 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
| 1867 | queue_work(host->card_workqueue, &host->card_work); | 1936 | queue_work(host->card_workqueue, &host->card_work); |
| 1868 | 1937 | ||
| 1869 | return 0; | 1938 | return 0; |
| 1939 | |||
| 1940 | err_setup_bus: | ||
| 1941 | mmc_free_host(mmc); | ||
| 1942 | return -EINVAL; | ||
| 1870 | } | 1943 | } |
| 1871 | 1944 | ||
| 1872 | static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) | 1945 | static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) |
| @@ -1884,10 +1957,10 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) | |||
| 1884 | static void dw_mci_init_dma(struct dw_mci *host) | 1957 | static void dw_mci_init_dma(struct dw_mci *host) |
| 1885 | { | 1958 | { |
| 1886 | /* Alloc memory for sg translation */ | 1959 | /* Alloc memory for sg translation */ |
| 1887 | host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, | 1960 | host->sg_cpu = dma_alloc_coherent(host->dev, PAGE_SIZE, |
| 1888 | &host->sg_dma, GFP_KERNEL); | 1961 | &host->sg_dma, GFP_KERNEL); |
| 1889 | if (!host->sg_cpu) { | 1962 | if (!host->sg_cpu) { |
| 1890 | dev_err(&host->dev, "%s: could not alloc DMA memory\n", | 1963 | dev_err(host->dev, "%s: could not alloc DMA memory\n", |
| 1891 | __func__); | 1964 | __func__); |
| 1892 | goto no_dma; | 1965 | goto no_dma; |
| 1893 | } | 1966 | } |
| @@ -1895,6 +1968,7 @@ static void dw_mci_init_dma(struct dw_mci *host) | |||
| 1895 | /* Determine which DMA interface to use */ | 1968 | /* Determine which DMA interface to use */ |
| 1896 | #ifdef CONFIG_MMC_DW_IDMAC | 1969 | #ifdef CONFIG_MMC_DW_IDMAC |
| 1897 | host->dma_ops = &dw_mci_idmac_ops; | 1970 | host->dma_ops = &dw_mci_idmac_ops; |
| 1971 | dev_info(&host->dev, "Using internal DMA controller.\n"); | ||
| 1898 | #endif | 1972 | #endif |
| 1899 | 1973 | ||
| 1900 | if (!host->dma_ops) | 1974 | if (!host->dma_ops) |
| @@ -1903,12 +1977,12 @@ static void dw_mci_init_dma(struct dw_mci *host) | |||
| 1903 | if (host->dma_ops->init && host->dma_ops->start && | 1977 | if (host->dma_ops->init && host->dma_ops->start && |
| 1904 | host->dma_ops->stop && host->dma_ops->cleanup) { | 1978 | host->dma_ops->stop && host->dma_ops->cleanup) { |
| 1905 | if (host->dma_ops->init(host)) { | 1979 | if (host->dma_ops->init(host)) { |
| 1906 | dev_err(&host->dev, "%s: Unable to initialize " | 1980 | dev_err(host->dev, "%s: Unable to initialize " |
| 1907 | "DMA Controller.\n", __func__); | 1981 | "DMA Controller.\n", __func__); |
| 1908 | goto no_dma; | 1982 | goto no_dma; |
| 1909 | } | 1983 | } |
| 1910 | } else { | 1984 | } else { |
| 1911 | dev_err(&host->dev, "DMA initialization not found.\n"); | 1985 | dev_err(host->dev, "DMA initialization not found.\n"); |
| 1912 | goto no_dma; | 1986 | goto no_dma; |
| 1913 | } | 1987 | } |
| 1914 | 1988 | ||
| @@ -1916,7 +1990,7 @@ static void dw_mci_init_dma(struct dw_mci *host) | |||
| 1916 | return; | 1990 | return; |
| 1917 | 1991 | ||
| 1918 | no_dma: | 1992 | no_dma: |
| 1919 | dev_info(&host->dev, "Using PIO mode.\n"); | 1993 | dev_info(host->dev, "Using PIO mode.\n"); |
| 1920 | host->use_dma = 0; | 1994 | host->use_dma = 0; |
| 1921 | return; | 1995 | return; |
| 1922 | } | 1996 | } |
| @@ -1942,30 +2016,133 @@ static bool mci_wait_reset(struct device *dev, struct dw_mci *host) | |||
| 1942 | return false; | 2016 | return false; |
| 1943 | } | 2017 | } |
| 1944 | 2018 | ||
| 2019 | #ifdef CONFIG_OF | ||
| 2020 | static struct dw_mci_of_quirks { | ||
| 2021 | char *quirk; | ||
| 2022 | int id; | ||
| 2023 | } of_quirks[] = { | ||
| 2024 | { | ||
| 2025 | .quirk = "supports-highspeed", | ||
| 2026 | .id = DW_MCI_QUIRK_HIGHSPEED, | ||
| 2027 | }, { | ||
| 2028 | .quirk = "broken-cd", | ||
| 2029 | .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, | ||
| 2030 | }, | ||
| 2031 | }; | ||
| 2032 | |||
| 2033 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | ||
| 2034 | { | ||
| 2035 | struct dw_mci_board *pdata; | ||
| 2036 | struct device *dev = host->dev; | ||
| 2037 | struct device_node *np = dev->of_node; | ||
| 2038 | int idx, ret; | ||
| 2039 | |||
| 2040 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
| 2041 | if (!pdata) { | ||
| 2042 | dev_err(dev, "could not allocate memory for pdata\n"); | ||
| 2043 | return ERR_PTR(-ENOMEM); | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | /* find out number of slots supported */ | ||
| 2047 | if (of_property_read_u32(dev->of_node, "num-slots", | ||
| 2048 | &pdata->num_slots)) { | ||
| 2049 | dev_info(dev, "num-slots property not found, " | ||
| 2050 | "assuming 1 slot is available\n"); | ||
| 2051 | pdata->num_slots = 1; | ||
| 2052 | } | ||
| 2053 | |||
| 2054 | /* get quirks */ | ||
| 2055 | for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) | ||
| 2056 | if (of_get_property(np, of_quirks[idx].quirk, NULL)) | ||
| 2057 | pdata->quirks |= of_quirks[idx].id; | ||
| 2058 | |||
| 2059 | if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) | ||
| 2060 | dev_info(dev, "fifo-depth property not found, using " | ||
| 2061 | "value of FIFOTH register as default\n"); | ||
| 2062 | |||
| 2063 | of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); | ||
| 2064 | |||
| 2065 | if (host->drv_data->parse_dt) { | ||
| 2066 | ret = host->drv_data->parse_dt(host); | ||
| 2067 | if (ret) | ||
| 2068 | return ERR_PTR(ret); | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | return pdata; | ||
| 2072 | } | ||
| 2073 | |||
| 2074 | #else /* CONFIG_OF */ | ||
| 2075 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | ||
| 2076 | { | ||
| 2077 | return ERR_PTR(-EINVAL); | ||
| 2078 | } | ||
| 2079 | #endif /* CONFIG_OF */ | ||
| 2080 | |||
| 1945 | int dw_mci_probe(struct dw_mci *host) | 2081 | int dw_mci_probe(struct dw_mci *host) |
| 1946 | { | 2082 | { |
| 1947 | int width, i, ret = 0; | 2083 | int width, i, ret = 0; |
| 1948 | u32 fifo_size; | 2084 | u32 fifo_size; |
| 2085 | int init_slots = 0; | ||
| 1949 | 2086 | ||
| 1950 | if (!host->pdata || !host->pdata->init) { | 2087 | if (!host->pdata) { |
| 1951 | dev_err(&host->dev, | 2088 | host->pdata = dw_mci_parse_dt(host); |
| 1952 | "Platform data must supply init function\n"); | 2089 | if (IS_ERR(host->pdata)) { |
| 1953 | return -ENODEV; | 2090 | dev_err(host->dev, "platform data not available\n"); |
| 2091 | return -EINVAL; | ||
| 2092 | } | ||
| 1954 | } | 2093 | } |
| 1955 | 2094 | ||
| 1956 | if (!host->pdata->select_slot && host->pdata->num_slots > 1) { | 2095 | if (!host->pdata->select_slot && host->pdata->num_slots > 1) { |
| 1957 | dev_err(&host->dev, | 2096 | dev_err(host->dev, |
| 1958 | "Platform data must supply select_slot function\n"); | 2097 | "Platform data must supply select_slot function\n"); |
| 1959 | return -ENODEV; | 2098 | return -ENODEV; |
| 1960 | } | 2099 | } |
| 1961 | 2100 | ||
| 1962 | if (!host->pdata->bus_hz) { | 2101 | host->biu_clk = clk_get(host->dev, "biu"); |
| 1963 | dev_err(&host->dev, | 2102 | if (IS_ERR(host->biu_clk)) { |
| 2103 | dev_dbg(host->dev, "biu clock not available\n"); | ||
| 2104 | } else { | ||
| 2105 | ret = clk_prepare_enable(host->biu_clk); | ||
| 2106 | if (ret) { | ||
| 2107 | dev_err(host->dev, "failed to enable biu clock\n"); | ||
| 2108 | clk_put(host->biu_clk); | ||
| 2109 | return ret; | ||
| 2110 | } | ||
| 2111 | } | ||
| 2112 | |||
| 2113 | host->ciu_clk = clk_get(host->dev, "ciu"); | ||
| 2114 | if (IS_ERR(host->ciu_clk)) { | ||
| 2115 | dev_dbg(host->dev, "ciu clock not available\n"); | ||
| 2116 | } else { | ||
| 2117 | ret = clk_prepare_enable(host->ciu_clk); | ||
| 2118 | if (ret) { | ||
| 2119 | dev_err(host->dev, "failed to enable ciu clock\n"); | ||
| 2120 | clk_put(host->ciu_clk); | ||
| 2121 | goto err_clk_biu; | ||
| 2122 | } | ||
| 2123 | } | ||
| 2124 | |||
| 2125 | if (IS_ERR(host->ciu_clk)) | ||
| 2126 | host->bus_hz = host->pdata->bus_hz; | ||
| 2127 | else | ||
| 2128 | host->bus_hz = clk_get_rate(host->ciu_clk); | ||
| 2129 | |||
| 2130 | if (host->drv_data->setup_clock) { | ||
| 2131 | ret = host->drv_data->setup_clock(host); | ||
| 2132 | if (ret) { | ||
| 2133 | dev_err(host->dev, | ||
| 2134 | "implementation specific clock setup failed\n"); | ||
| 2135 | goto err_clk_ciu; | ||
| 2136 | } | ||
| 2137 | } | ||
| 2138 | |||
| 2139 | if (!host->bus_hz) { | ||
| 2140 | dev_err(host->dev, | ||
| 1964 | "Platform data must supply bus speed\n"); | 2141 | "Platform data must supply bus speed\n"); |
| 1965 | return -ENODEV; | 2142 | ret = -ENODEV; |
| 2143 | goto err_clk_ciu; | ||
| 1966 | } | 2144 | } |
| 1967 | 2145 | ||
| 1968 | host->bus_hz = host->pdata->bus_hz; | ||
| 1969 | host->quirks = host->pdata->quirks; | 2146 | host->quirks = host->pdata->quirks; |
| 1970 | 2147 | ||
| 1971 | spin_lock_init(&host->lock); | 2148 | spin_lock_init(&host->lock); |
| @@ -1998,7 +2175,7 @@ int dw_mci_probe(struct dw_mci *host) | |||
| 1998 | } | 2175 | } |
| 1999 | 2176 | ||
| 2000 | /* Reset all blocks */ | 2177 | /* Reset all blocks */ |
| 2001 | if (!mci_wait_reset(&host->dev, host)) | 2178 | if (!mci_wait_reset(host->dev, host)) |
| 2002 | return -ENODEV; | 2179 | return -ENODEV; |
| 2003 | 2180 | ||
| 2004 | host->dma_ops = host->pdata->dma_ops; | 2181 | host->dma_ops = host->pdata->dma_ops; |
| @@ -2054,10 +2231,18 @@ int dw_mci_probe(struct dw_mci *host) | |||
| 2054 | /* We need at least one slot to succeed */ | 2231 | /* We need at least one slot to succeed */ |
| 2055 | for (i = 0; i < host->num_slots; i++) { | 2232 | for (i = 0; i < host->num_slots; i++) { |
| 2056 | ret = dw_mci_init_slot(host, i); | 2233 | ret = dw_mci_init_slot(host, i); |
| 2057 | if (ret) { | 2234 | if (ret) |
| 2058 | ret = -ENODEV; | 2235 | dev_dbg(host->dev, "slot %d init failed\n", i); |
| 2059 | goto err_init_slot; | 2236 | else |
| 2060 | } | 2237 | init_slots++; |
| 2238 | } | ||
| 2239 | |||
| 2240 | if (init_slots) { | ||
| 2241 | dev_info(host->dev, "%d slots initialized\n", init_slots); | ||
| 2242 | } else { | ||
| 2243 | dev_dbg(host->dev, "attempted to initialize %d slots, " | ||
| 2244 | "but failed on all\n", host->num_slots); | ||
| 2245 | goto err_init_slot; | ||
| 2061 | } | 2246 | } |
| 2062 | 2247 | ||
| 2063 | /* | 2248 | /* |
| @@ -2065,7 +2250,7 @@ int dw_mci_probe(struct dw_mci *host) | |||
| 2065 | * Need to check the version-id and set data-offset for DATA register. | 2250 | * Need to check the version-id and set data-offset for DATA register. |
| 2066 | */ | 2251 | */ |
| 2067 | host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); | 2252 | host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); |
| 2068 | dev_info(&host->dev, "Version ID is %04x\n", host->verid); | 2253 | dev_info(host->dev, "Version ID is %04x\n", host->verid); |
| 2069 | 2254 | ||
| 2070 | if (host->verid < DW_MMC_240A) | 2255 | if (host->verid < DW_MMC_240A) |
| 2071 | host->data_offset = DATA_OFFSET; | 2256 | host->data_offset = DATA_OFFSET; |
| @@ -2082,22 +2267,16 @@ int dw_mci_probe(struct dw_mci *host) | |||
| 2082 | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); | 2267 | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); |
| 2083 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ | 2268 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ |
| 2084 | 2269 | ||
| 2085 | dev_info(&host->dev, "DW MMC controller at irq %d, " | 2270 | dev_info(host->dev, "DW MMC controller at irq %d, " |
| 2086 | "%d bit host data width, " | 2271 | "%d bit host data width, " |
| 2087 | "%u deep fifo\n", | 2272 | "%u deep fifo\n", |
| 2088 | host->irq, width, fifo_size); | 2273 | host->irq, width, fifo_size); |
| 2089 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) | 2274 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) |
| 2090 | dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); | 2275 | dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); |
| 2091 | 2276 | ||
| 2092 | return 0; | 2277 | return 0; |
| 2093 | 2278 | ||
| 2094 | err_init_slot: | 2279 | err_init_slot: |
| 2095 | /* De-init any initialized slots */ | ||
| 2096 | while (i > 0) { | ||
| 2097 | if (host->slot[i]) | ||
| 2098 | dw_mci_cleanup_slot(host->slot[i], i); | ||
| 2099 | i--; | ||
| 2100 | } | ||
| 2101 | free_irq(host->irq, host); | 2280 | free_irq(host->irq, host); |
| 2102 | 2281 | ||
| 2103 | err_workqueue: | 2282 | err_workqueue: |
| @@ -2106,13 +2285,24 @@ err_workqueue: | |||
| 2106 | err_dmaunmap: | 2285 | err_dmaunmap: |
| 2107 | if (host->use_dma && host->dma_ops->exit) | 2286 | if (host->use_dma && host->dma_ops->exit) |
| 2108 | host->dma_ops->exit(host); | 2287 | host->dma_ops->exit(host); |
| 2109 | dma_free_coherent(&host->dev, PAGE_SIZE, | 2288 | dma_free_coherent(host->dev, PAGE_SIZE, |
| 2110 | host->sg_cpu, host->sg_dma); | 2289 | host->sg_cpu, host->sg_dma); |
| 2111 | 2290 | ||
| 2112 | if (host->vmmc) { | 2291 | if (host->vmmc) { |
| 2113 | regulator_disable(host->vmmc); | 2292 | regulator_disable(host->vmmc); |
| 2114 | regulator_put(host->vmmc); | 2293 | regulator_put(host->vmmc); |
| 2115 | } | 2294 | } |
| 2295 | |||
| 2296 | err_clk_ciu: | ||
| 2297 | if (!IS_ERR(host->ciu_clk)) { | ||
| 2298 | clk_disable_unprepare(host->ciu_clk); | ||
| 2299 | clk_put(host->ciu_clk); | ||
| 2300 | } | ||
| 2301 | err_clk_biu: | ||
| 2302 | if (!IS_ERR(host->biu_clk)) { | ||
| 2303 | clk_disable_unprepare(host->biu_clk); | ||
| 2304 | clk_put(host->biu_clk); | ||
| 2305 | } | ||
| 2116 | return ret; | 2306 | return ret; |
| 2117 | } | 2307 | } |
| 2118 | EXPORT_SYMBOL(dw_mci_probe); | 2308 | EXPORT_SYMBOL(dw_mci_probe); |
| @@ -2125,7 +2315,7 @@ void dw_mci_remove(struct dw_mci *host) | |||
| 2125 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ | 2315 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ |
| 2126 | 2316 | ||
| 2127 | for (i = 0; i < host->num_slots; i++) { | 2317 | for (i = 0; i < host->num_slots; i++) { |
| 2128 | dev_dbg(&host->dev, "remove slot %d\n", i); | 2318 | dev_dbg(host->dev, "remove slot %d\n", i); |
| 2129 | if (host->slot[i]) | 2319 | if (host->slot[i]) |
| 2130 | dw_mci_cleanup_slot(host->slot[i], i); | 2320 | dw_mci_cleanup_slot(host->slot[i], i); |
| 2131 | } | 2321 | } |
| @@ -2136,7 +2326,7 @@ void dw_mci_remove(struct dw_mci *host) | |||
| 2136 | 2326 | ||
| 2137 | free_irq(host->irq, host); | 2327 | free_irq(host->irq, host); |
| 2138 | destroy_workqueue(host->card_workqueue); | 2328 | destroy_workqueue(host->card_workqueue); |
| 2139 | dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); | 2329 | dma_free_coherent(host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); |
| 2140 | 2330 | ||
| 2141 | if (host->use_dma && host->dma_ops->exit) | 2331 | if (host->use_dma && host->dma_ops->exit) |
| 2142 | host->dma_ops->exit(host); | 2332 | host->dma_ops->exit(host); |
| @@ -2146,6 +2336,12 @@ void dw_mci_remove(struct dw_mci *host) | |||
| 2146 | regulator_put(host->vmmc); | 2336 | regulator_put(host->vmmc); |
| 2147 | } | 2337 | } |
| 2148 | 2338 | ||
| 2339 | if (!IS_ERR(host->ciu_clk)) | ||
| 2340 | clk_disable_unprepare(host->ciu_clk); | ||
| 2341 | if (!IS_ERR(host->biu_clk)) | ||
| 2342 | clk_disable_unprepare(host->biu_clk); | ||
| 2343 | clk_put(host->ciu_clk); | ||
| 2344 | clk_put(host->biu_clk); | ||
| 2149 | } | 2345 | } |
| 2150 | EXPORT_SYMBOL(dw_mci_remove); | 2346 | EXPORT_SYMBOL(dw_mci_remove); |
| 2151 | 2347 | ||
| @@ -2188,7 +2384,7 @@ int dw_mci_resume(struct dw_mci *host) | |||
| 2188 | if (host->vmmc) | 2384 | if (host->vmmc) |
| 2189 | regulator_enable(host->vmmc); | 2385 | regulator_enable(host->vmmc); |
| 2190 | 2386 | ||
| 2191 | if (!mci_wait_reset(&host->dev, host)) { | 2387 | if (!mci_wait_reset(host->dev, host)) { |
| 2192 | ret = -ENODEV; | 2388 | ret = -ENODEV; |
| 2193 | return ret; | 2389 | return ret; |
| 2194 | } | 2390 | } |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 15c27e17c23f..53b8fd987e47 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
| @@ -182,4 +182,28 @@ extern int dw_mci_suspend(struct dw_mci *host); | |||
| 182 | extern int dw_mci_resume(struct dw_mci *host); | 182 | extern int dw_mci_resume(struct dw_mci *host); |
| 183 | #endif | 183 | #endif |
| 184 | 184 | ||
| 185 | /** | ||
| 186 | * dw_mci driver data - dw-mshc implementation specific driver data. | ||
| 187 | * @caps: mmc subsystem specified capabilities of the controller(s). | ||
| 188 | * @init: early implementation specific initialization. | ||
| 189 | * @setup_clock: implementation specific clock configuration. | ||
| 190 | * @prepare_command: handle CMD register extensions. | ||
| 191 | * @set_ios: handle bus specific extensions. | ||
| 192 | * @parse_dt: parse implementation specific device tree properties. | ||
| 193 | * @setup_bus: initialize io-interface | ||
| 194 | * | ||
| 195 | * Provide controller implementation specific extensions. The usage of this | ||
| 196 | * data structure is fully optional and usage of each member in this structure | ||
| 197 | * is optional as well. | ||
| 198 | */ | ||
| 199 | struct dw_mci_drv_data { | ||
| 200 | unsigned long *caps; | ||
| 201 | int (*init)(struct dw_mci *host); | ||
| 202 | int (*setup_clock)(struct dw_mci *host); | ||
| 203 | void (*prepare_command)(struct dw_mci *host, u32 *cmdr); | ||
| 204 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); | ||
| 205 | int (*parse_dt)(struct dw_mci *host); | ||
| 206 | int (*setup_bus)(struct dw_mci *host, | ||
| 207 | struct device_node *slot_np, u8 bus_width); | ||
| 208 | }; | ||
| 185 | #endif /* _DW_MMC_H_ */ | 209 | #endif /* _DW_MMC_H_ */ |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 273306c68d58..a600eabbd6c3 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
| @@ -1532,20 +1532,7 @@ static struct spi_driver mmc_spi_driver = { | |||
| 1532 | .remove = __devexit_p(mmc_spi_remove), | 1532 | .remove = __devexit_p(mmc_spi_remove), |
| 1533 | }; | 1533 | }; |
| 1534 | 1534 | ||
| 1535 | 1535 | module_spi_driver(mmc_spi_driver); | |
| 1536 | static int __init mmc_spi_init(void) | ||
| 1537 | { | ||
| 1538 | return spi_register_driver(&mmc_spi_driver); | ||
| 1539 | } | ||
| 1540 | module_init(mmc_spi_init); | ||
| 1541 | |||
| 1542 | |||
| 1543 | static void __exit mmc_spi_exit(void) | ||
| 1544 | { | ||
| 1545 | spi_unregister_driver(&mmc_spi_driver); | ||
| 1546 | } | ||
| 1547 | module_exit(mmc_spi_exit); | ||
| 1548 | |||
| 1549 | 1536 | ||
| 1550 | MODULE_AUTHOR("Mike Lavender, David Brownell, " | 1537 | MODULE_AUTHOR("Mike Lavender, David Brownell, " |
| 1551 | "Hans-Peter Nilsson, Jan Nikitenko"); | 1538 | "Hans-Peter Nilsson, Jan Nikitenko"); |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 50ff19a62368..edc3e9baf0e7 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
| @@ -1309,14 +1309,10 @@ static int __devinit mmci_probe(struct amba_device *dev, | |||
| 1309 | goto host_free; | 1309 | goto host_free; |
| 1310 | } | 1310 | } |
| 1311 | 1311 | ||
| 1312 | ret = clk_prepare(host->clk); | 1312 | ret = clk_prepare_enable(host->clk); |
| 1313 | if (ret) | 1313 | if (ret) |
| 1314 | goto clk_free; | 1314 | goto clk_free; |
| 1315 | 1315 | ||
| 1316 | ret = clk_enable(host->clk); | ||
| 1317 | if (ret) | ||
| 1318 | goto clk_unprep; | ||
| 1319 | |||
| 1320 | host->plat = plat; | 1316 | host->plat = plat; |
| 1321 | host->variant = variant; | 1317 | host->variant = variant; |
| 1322 | host->mclk = clk_get_rate(host->clk); | 1318 | host->mclk = clk_get_rate(host->clk); |
| @@ -1515,9 +1511,7 @@ static int __devinit mmci_probe(struct amba_device *dev, | |||
| 1515 | err_gpio_cd: | 1511 | err_gpio_cd: |
| 1516 | iounmap(host->base); | 1512 | iounmap(host->base); |
| 1517 | clk_disable: | 1513 | clk_disable: |
| 1518 | clk_disable(host->clk); | 1514 | clk_disable_unprepare(host->clk); |
| 1519 | clk_unprep: | ||
| 1520 | clk_unprepare(host->clk); | ||
| 1521 | clk_free: | 1515 | clk_free: |
| 1522 | clk_put(host->clk); | 1516 | clk_put(host->clk); |
| 1523 | host_free: | 1517 | host_free: |
| @@ -1564,8 +1558,7 @@ static int __devexit mmci_remove(struct amba_device *dev) | |||
| 1564 | gpio_free(host->gpio_cd); | 1558 | gpio_free(host->gpio_cd); |
| 1565 | 1559 | ||
| 1566 | iounmap(host->base); | 1560 | iounmap(host->base); |
| 1567 | clk_disable(host->clk); | 1561 | clk_disable_unprepare(host->clk); |
| 1568 | clk_unprepare(host->clk); | ||
| 1569 | clk_put(host->clk); | 1562 | clk_put(host->clk); |
| 1570 | 1563 | ||
| 1571 | if (host->vcc) | 1564 | if (host->vcc) |
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 7b1161de01d6..565c2e4fac75 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <mach/hardware.h> | 44 | #include <mach/hardware.h> |
| 45 | 45 | ||
| 46 | #define DRIVER_NAME "mxc-mmc" | 46 | #define DRIVER_NAME "mxc-mmc" |
| 47 | #define MXCMCI_TIMEOUT_MS 10000 | ||
| 47 | 48 | ||
| 48 | #define MMC_REG_STR_STP_CLK 0x00 | 49 | #define MMC_REG_STR_STP_CLK 0x00 |
| 49 | #define MMC_REG_STATUS 0x04 | 50 | #define MMC_REG_STATUS 0x04 |
| @@ -150,6 +151,8 @@ struct mxcmci_host { | |||
| 150 | int dmareq; | 151 | int dmareq; |
| 151 | struct dma_slave_config dma_slave_config; | 152 | struct dma_slave_config dma_slave_config; |
| 152 | struct imx_dma_data dma_data; | 153 | struct imx_dma_data dma_data; |
| 154 | |||
| 155 | struct timer_list watchdog; | ||
| 153 | }; | 156 | }; |
| 154 | 157 | ||
| 155 | static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); | 158 | static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); |
| @@ -271,9 +274,32 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
| 271 | dmaengine_submit(host->desc); | 274 | dmaengine_submit(host->desc); |
| 272 | dma_async_issue_pending(host->dma); | 275 | dma_async_issue_pending(host->dma); |
| 273 | 276 | ||
| 277 | mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); | ||
| 278 | |||
| 274 | return 0; | 279 | return 0; |
| 275 | } | 280 | } |
| 276 | 281 | ||
| 282 | static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); | ||
| 283 | static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); | ||
| 284 | |||
| 285 | static void mxcmci_dma_callback(void *data) | ||
| 286 | { | ||
| 287 | struct mxcmci_host *host = data; | ||
| 288 | u32 stat; | ||
| 289 | |||
| 290 | del_timer(&host->watchdog); | ||
| 291 | |||
| 292 | stat = readl(host->base + MMC_REG_STATUS); | ||
| 293 | writel(stat & ~STATUS_DATA_TRANS_DONE, host->base + MMC_REG_STATUS); | ||
| 294 | |||
| 295 | dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); | ||
| 296 | |||
| 297 | if (stat & STATUS_READ_OP_DONE) | ||
| 298 | writel(STATUS_READ_OP_DONE, host->base + MMC_REG_STATUS); | ||
| 299 | |||
| 300 | mxcmci_data_done(host, stat); | ||
| 301 | } | ||
| 302 | |||
| 277 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, | 303 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, |
| 278 | unsigned int cmdat) | 304 | unsigned int cmdat) |
| 279 | { | 305 | { |
| @@ -305,8 +331,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, | |||
| 305 | 331 | ||
| 306 | int_cntr = INT_END_CMD_RES_EN; | 332 | int_cntr = INT_END_CMD_RES_EN; |
| 307 | 333 | ||
| 308 | if (mxcmci_use_dma(host)) | 334 | if (mxcmci_use_dma(host)) { |
| 309 | int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; | 335 | if (host->dma_dir == DMA_FROM_DEVICE) { |
| 336 | host->desc->callback = mxcmci_dma_callback; | ||
| 337 | host->desc->callback_param = host; | ||
| 338 | } else { | ||
| 339 | int_cntr |= INT_WRITE_OP_DONE_EN; | ||
| 340 | } | ||
| 341 | } | ||
| 310 | 342 | ||
| 311 | spin_lock_irqsave(&host->lock, flags); | 343 | spin_lock_irqsave(&host->lock, flags); |
| 312 | if (host->use_sdio) | 344 | if (host->use_sdio) |
| @@ -345,11 +377,9 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) | |||
| 345 | struct mmc_data *data = host->data; | 377 | struct mmc_data *data = host->data; |
| 346 | int data_error; | 378 | int data_error; |
| 347 | 379 | ||
| 348 | if (mxcmci_use_dma(host)) { | 380 | if (mxcmci_use_dma(host)) |
| 349 | dmaengine_terminate_all(host->dma); | ||
| 350 | dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, | 381 | dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, |
| 351 | host->dma_dir); | 382 | host->dma_dir); |
| 352 | } | ||
| 353 | 383 | ||
| 354 | if (stat & STATUS_ERR_MASK) { | 384 | if (stat & STATUS_ERR_MASK) { |
| 355 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", | 385 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", |
| @@ -624,8 +654,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) | |||
| 624 | mxcmci_cmd_done(host, stat); | 654 | mxcmci_cmd_done(host, stat); |
| 625 | 655 | ||
| 626 | if (mxcmci_use_dma(host) && | 656 | if (mxcmci_use_dma(host) && |
| 627 | (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) | 657 | (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) { |
| 658 | del_timer(&host->watchdog); | ||
| 628 | mxcmci_data_done(host, stat); | 659 | mxcmci_data_done(host, stat); |
| 660 | } | ||
| 629 | 661 | ||
| 630 | if (host->default_irq_mask && | 662 | if (host->default_irq_mask && |
| 631 | (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) | 663 | (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) |
| @@ -836,6 +868,34 @@ static bool filter(struct dma_chan *chan, void *param) | |||
| 836 | return true; | 868 | return true; |
| 837 | } | 869 | } |
| 838 | 870 | ||
| 871 | static void mxcmci_watchdog(unsigned long data) | ||
| 872 | { | ||
| 873 | struct mmc_host *mmc = (struct mmc_host *)data; | ||
| 874 | struct mxcmci_host *host = mmc_priv(mmc); | ||
| 875 | struct mmc_request *req = host->req; | ||
| 876 | unsigned int stat = readl(host->base + MMC_REG_STATUS); | ||
| 877 | |||
| 878 | if (host->dma_dir == DMA_FROM_DEVICE) { | ||
| 879 | dmaengine_terminate_all(host->dma); | ||
| 880 | dev_err(mmc_dev(host->mmc), | ||
| 881 | "%s: read time out (status = 0x%08x)\n", | ||
| 882 | __func__, stat); | ||
| 883 | } else { | ||
| 884 | dev_err(mmc_dev(host->mmc), | ||
| 885 | "%s: write time out (status = 0x%08x)\n", | ||
| 886 | __func__, stat); | ||
| 887 | mxcmci_softreset(host); | ||
| 888 | } | ||
| 889 | |||
| 890 | /* Mark transfer as erroneus and inform the upper layers */ | ||
| 891 | |||
| 892 | host->data->error = -ETIMEDOUT; | ||
| 893 | host->req = NULL; | ||
| 894 | host->cmd = NULL; | ||
| 895 | host->data = NULL; | ||
| 896 | mmc_request_done(host->mmc, req); | ||
| 897 | } | ||
| 898 | |||
| 839 | static const struct mmc_host_ops mxcmci_ops = { | 899 | static const struct mmc_host_ops mxcmci_ops = { |
| 840 | .request = mxcmci_request, | 900 | .request = mxcmci_request, |
| 841 | .set_ios = mxcmci_set_ios, | 901 | .set_ios = mxcmci_set_ios, |
| @@ -968,6 +1028,10 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
| 968 | 1028 | ||
| 969 | mmc_add_host(mmc); | 1029 | mmc_add_host(mmc); |
| 970 | 1030 | ||
| 1031 | init_timer(&host->watchdog); | ||
| 1032 | host->watchdog.function = &mxcmci_watchdog; | ||
| 1033 | host->watchdog.data = (unsigned long)mmc; | ||
| 1034 | |||
| 971 | return 0; | 1035 | return 0; |
| 972 | 1036 | ||
| 973 | out_free_irq: | 1037 | out_free_irq: |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index c6259a829544..48ad361613ef 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
| @@ -27,16 +27,10 @@ | |||
| 27 | #include <linux/mmc/card.h> | 27 | #include <linux/mmc/card.h> |
| 28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
| 29 | #include <linux/scatterlist.h> | 29 | #include <linux/scatterlist.h> |
| 30 | #include <linux/i2c/tps65010.h> | ||
| 31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 32 | 31 | ||
| 33 | #include <asm/io.h> | ||
| 34 | #include <asm/irq.h> | ||
| 35 | |||
| 36 | #include <plat/mmc.h> | 32 | #include <plat/mmc.h> |
| 37 | #include <asm/gpio.h> | ||
| 38 | #include <plat/dma.h> | 33 | #include <plat/dma.h> |
| 39 | #include <plat/fpga.h> | ||
| 40 | 34 | ||
| 41 | #define OMAP_MMC_REG_CMD 0x00 | 35 | #define OMAP_MMC_REG_CMD 0x00 |
| 42 | #define OMAP_MMC_REG_ARGL 0x01 | 36 | #define OMAP_MMC_REG_ARGL 0x01 |
| @@ -105,7 +99,6 @@ struct mmc_omap_slot { | |||
| 105 | u16 saved_con; | 99 | u16 saved_con; |
| 106 | u16 bus_mode; | 100 | u16 bus_mode; |
| 107 | unsigned int fclk_freq; | 101 | unsigned int fclk_freq; |
| 108 | unsigned powered:1; | ||
| 109 | 102 | ||
| 110 | struct tasklet_struct cover_tasklet; | 103 | struct tasklet_struct cover_tasklet; |
| 111 | struct timer_list cover_timer; | 104 | struct timer_list cover_timer; |
| @@ -137,7 +130,6 @@ struct mmc_omap_host { | |||
| 137 | unsigned int phys_base; | 130 | unsigned int phys_base; |
| 138 | int irq; | 131 | int irq; |
| 139 | unsigned char bus_mode; | 132 | unsigned char bus_mode; |
| 140 | unsigned char hw_bus_mode; | ||
| 141 | unsigned int reg_shift; | 133 | unsigned int reg_shift; |
| 142 | 134 | ||
| 143 | struct work_struct cmd_abort_work; | 135 | struct work_struct cmd_abort_work; |
| @@ -695,22 +687,29 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) | |||
| 695 | host->buffer += nwords; | 687 | host->buffer += nwords; |
| 696 | } | 688 | } |
| 697 | 689 | ||
| 698 | static inline void mmc_omap_report_irq(u16 status) | 690 | #ifdef CONFIG_MMC_DEBUG |
| 691 | static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) | ||
| 699 | { | 692 | { |
| 700 | static const char *mmc_omap_status_bits[] = { | 693 | static const char *mmc_omap_status_bits[] = { |
| 701 | "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", | 694 | "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", |
| 702 | "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" | 695 | "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" |
| 703 | }; | 696 | }; |
| 704 | int i, c = 0; | 697 | int i; |
| 698 | char res[64], *buf = res; | ||
| 699 | |||
| 700 | buf += sprintf(buf, "MMC IRQ 0x%x:", status); | ||
| 705 | 701 | ||
| 706 | for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) | 702 | for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) |
| 707 | if (status & (1 << i)) { | 703 | if (status & (1 << i)) |
| 708 | if (c) | 704 | buf += sprintf(buf, " %s", mmc_omap_status_bits[i]); |
| 709 | printk(" "); | 705 | dev_vdbg(mmc_dev(host->mmc), "%s\n", res); |
| 710 | printk("%s", mmc_omap_status_bits[i]); | ||
| 711 | c++; | ||
| 712 | } | ||
| 713 | } | 706 | } |
| 707 | #else | ||
| 708 | static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) | ||
| 709 | { | ||
| 710 | } | ||
| 711 | #endif | ||
| 712 | |||
| 714 | 713 | ||
| 715 | static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | 714 | static irqreturn_t mmc_omap_irq(int irq, void *dev_id) |
| 716 | { | 715 | { |
| @@ -744,12 +743,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
| 744 | cmd = host->cmd->opcode; | 743 | cmd = host->cmd->opcode; |
| 745 | else | 744 | else |
| 746 | cmd = -1; | 745 | cmd = -1; |
| 747 | #ifdef CONFIG_MMC_DEBUG | ||
| 748 | dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", | 746 | dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", |
| 749 | status, cmd); | 747 | status, cmd); |
| 750 | mmc_omap_report_irq(status); | 748 | mmc_omap_report_irq(host, status); |
| 751 | printk("\n"); | 749 | |
| 752 | #endif | ||
| 753 | if (host->total_bytes_left) { | 750 | if (host->total_bytes_left) { |
| 754 | if ((status & OMAP_MMC_STAT_A_FULL) || | 751 | if ((status & OMAP_MMC_STAT_A_FULL) || |
| 755 | (status & OMAP_MMC_STAT_END_OF_DATA)) | 752 | (status & OMAP_MMC_STAT_END_OF_DATA)) |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 38adc330c007..54bfd0cc106b 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | #include <linux/mmc/core.h> | 35 | #include <linux/mmc/core.h> |
| 36 | #include <linux/mmc/mmc.h> | 36 | #include <linux/mmc/mmc.h> |
| 37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
| 38 | #include <linux/semaphore.h> | ||
| 39 | #include <linux/gpio.h> | 38 | #include <linux/gpio.h> |
| 40 | #include <linux/regulator/consumer.h> | 39 | #include <linux/regulator/consumer.h> |
| 41 | #include <linux/pm_runtime.h> | 40 | #include <linux/pm_runtime.h> |
| @@ -44,7 +43,6 @@ | |||
| 44 | #include <plat/cpu.h> | 43 | #include <plat/cpu.h> |
| 45 | 44 | ||
| 46 | /* OMAP HSMMC Host Controller Registers */ | 45 | /* OMAP HSMMC Host Controller Registers */ |
| 47 | #define OMAP_HSMMC_SYSCONFIG 0x0010 | ||
| 48 | #define OMAP_HSMMC_SYSSTATUS 0x0014 | 46 | #define OMAP_HSMMC_SYSSTATUS 0x0014 |
| 49 | #define OMAP_HSMMC_CON 0x002C | 47 | #define OMAP_HSMMC_CON 0x002C |
| 50 | #define OMAP_HSMMC_BLK 0x0104 | 48 | #define OMAP_HSMMC_BLK 0x0104 |
| @@ -161,8 +159,6 @@ struct omap_hsmmc_host { | |||
| 161 | unsigned int dma_sg_idx; | 159 | unsigned int dma_sg_idx; |
| 162 | unsigned char bus_mode; | 160 | unsigned char bus_mode; |
| 163 | unsigned char power_mode; | 161 | unsigned char power_mode; |
| 164 | u32 *buffer; | ||
| 165 | u32 bytesleft; | ||
| 166 | int suspended; | 162 | int suspended; |
| 167 | int irq; | 163 | int irq; |
| 168 | int use_dma, dma_ch; | 164 | int use_dma, dma_ch; |
| @@ -171,7 +167,6 @@ struct omap_hsmmc_host { | |||
| 171 | int slot_id; | 167 | int slot_id; |
| 172 | int response_busy; | 168 | int response_busy; |
| 173 | int context_loss; | 169 | int context_loss; |
| 174 | int vdd; | ||
| 175 | int protect_card; | 170 | int protect_card; |
| 176 | int reqs_blocked; | 171 | int reqs_blocked; |
| 177 | int use_reg; | 172 | int use_reg; |
| @@ -300,12 +295,12 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
| 300 | struct regulator *reg; | 295 | struct regulator *reg; |
| 301 | int ocr_value = 0; | 296 | int ocr_value = 0; |
| 302 | 297 | ||
| 303 | mmc_slot(host).set_power = omap_hsmmc_set_power; | ||
| 304 | |||
| 305 | reg = regulator_get(host->dev, "vmmc"); | 298 | reg = regulator_get(host->dev, "vmmc"); |
| 306 | if (IS_ERR(reg)) { | 299 | if (IS_ERR(reg)) { |
| 307 | dev_dbg(host->dev, "vmmc regulator missing\n"); | 300 | dev_dbg(host->dev, "vmmc regulator missing\n"); |
| 301 | return PTR_ERR(reg); | ||
| 308 | } else { | 302 | } else { |
| 303 | mmc_slot(host).set_power = omap_hsmmc_set_power; | ||
| 309 | host->vcc = reg; | 304 | host->vcc = reg; |
| 310 | ocr_value = mmc_regulator_get_ocrmask(reg); | 305 | ocr_value = mmc_regulator_get_ocrmask(reg); |
| 311 | if (!mmc_slot(host).ocr_mask) { | 306 | if (!mmc_slot(host).ocr_mask) { |
| @@ -495,7 +490,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) | |||
| 495 | unsigned long regval; | 490 | unsigned long regval; |
| 496 | unsigned long timeout; | 491 | unsigned long timeout; |
| 497 | 492 | ||
| 498 | dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); | 493 | dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); |
| 499 | 494 | ||
| 500 | omap_hsmmc_stop_clock(host); | 495 | omap_hsmmc_stop_clock(host); |
| 501 | 496 | ||
| @@ -579,21 +574,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) | |||
| 579 | if (host->context_loss == context_loss) | 574 | if (host->context_loss == context_loss) |
| 580 | return 1; | 575 | return 1; |
| 581 | 576 | ||
| 582 | /* Wait for hardware reset */ | 577 | if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) |
| 583 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | 578 | return 1; |
| 584 | while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE | ||
| 585 | && time_before(jiffies, timeout)) | ||
| 586 | ; | ||
| 587 | |||
| 588 | /* Do software reset */ | ||
| 589 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); | ||
| 590 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | ||
| 591 | while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE | ||
| 592 | && time_before(jiffies, timeout)) | ||
| 593 | ; | ||
| 594 | |||
| 595 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, | ||
| 596 | OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); | ||
| 597 | 579 | ||
| 598 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { | 580 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { |
| 599 | if (host->power_mode != MMC_POWER_OFF && | 581 | if (host->power_mode != MMC_POWER_OFF && |
| @@ -745,7 +727,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, | |||
| 745 | { | 727 | { |
| 746 | int cmdreg = 0, resptype = 0, cmdtype = 0; | 728 | int cmdreg = 0, resptype = 0, cmdtype = 0; |
| 747 | 729 | ||
| 748 | dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", | 730 | dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", |
| 749 | mmc_hostname(host->mmc), cmd->opcode, cmd->arg); | 731 | mmc_hostname(host->mmc), cmd->opcode, cmd->arg); |
| 750 | host->cmd = cmd; | 732 | host->cmd = cmd; |
| 751 | 733 | ||
| @@ -934,7 +916,7 @@ static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) | |||
| 934 | buf += len; | 916 | buf += len; |
| 935 | } | 917 | } |
| 936 | 918 | ||
| 937 | dev_dbg(mmc_dev(host->mmc), "%s\n", res); | 919 | dev_vdbg(mmc_dev(host->mmc), "%s\n", res); |
| 938 | } | 920 | } |
| 939 | #else | 921 | #else |
| 940 | static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, | 922 | static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, |
| @@ -981,72 +963,40 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, | |||
| 981 | __func__); | 963 | __func__); |
| 982 | } | 964 | } |
| 983 | 965 | ||
| 966 | static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, int err) | ||
| 967 | { | ||
| 968 | omap_hsmmc_reset_controller_fsm(host, SRC); | ||
| 969 | host->cmd->error = err; | ||
| 970 | |||
| 971 | if (host->data) { | ||
| 972 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
| 973 | omap_hsmmc_dma_cleanup(host, err); | ||
| 974 | } | ||
| 975 | |||
| 976 | } | ||
| 977 | |||
| 984 | static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) | 978 | static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) |
| 985 | { | 979 | { |
| 986 | struct mmc_data *data; | 980 | struct mmc_data *data; |
| 987 | int end_cmd = 0, end_trans = 0; | 981 | int end_cmd = 0, end_trans = 0; |
| 988 | 982 | ||
| 989 | if (!host->req_in_progress) { | ||
| 990 | do { | ||
| 991 | OMAP_HSMMC_WRITE(host->base, STAT, status); | ||
| 992 | /* Flush posted write */ | ||
| 993 | status = OMAP_HSMMC_READ(host->base, STAT); | ||
| 994 | } while (status & INT_EN_MASK); | ||
| 995 | return; | ||
| 996 | } | ||
| 997 | |||
| 998 | data = host->data; | 983 | data = host->data; |
| 999 | dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); | 984 | dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); |
| 1000 | 985 | ||
| 1001 | if (status & ERR) { | 986 | if (status & ERR) { |
| 1002 | omap_hsmmc_dbg_report_irq(host, status); | 987 | omap_hsmmc_dbg_report_irq(host, status); |
| 1003 | if ((status & CMD_TIMEOUT) || | 988 | if (status & (CMD_TIMEOUT | DATA_TIMEOUT)) |
| 1004 | (status & CMD_CRC)) { | 989 | hsmmc_command_incomplete(host, -ETIMEDOUT); |
| 1005 | if (host->cmd) { | 990 | else if (status & (CMD_CRC | DATA_CRC)) |
| 1006 | if (status & CMD_TIMEOUT) { | 991 | hsmmc_command_incomplete(host, -EILSEQ); |
| 1007 | omap_hsmmc_reset_controller_fsm(host, | 992 | |
| 1008 | SRC); | 993 | end_cmd = 1; |
| 1009 | host->cmd->error = -ETIMEDOUT; | 994 | if (host->data || host->response_busy) { |
| 1010 | } else { | 995 | end_trans = 1; |
| 1011 | host->cmd->error = -EILSEQ; | 996 | host->response_busy = 0; |
| 1012 | } | ||
| 1013 | end_cmd = 1; | ||
| 1014 | } | ||
| 1015 | if (host->data || host->response_busy) { | ||
| 1016 | if (host->data) | ||
| 1017 | omap_hsmmc_dma_cleanup(host, | ||
| 1018 | -ETIMEDOUT); | ||
| 1019 | host->response_busy = 0; | ||
| 1020 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
| 1021 | } | ||
| 1022 | } | ||
| 1023 | if ((status & DATA_TIMEOUT) || | ||
| 1024 | (status & DATA_CRC)) { | ||
| 1025 | if (host->data || host->response_busy) { | ||
| 1026 | int err = (status & DATA_TIMEOUT) ? | ||
| 1027 | -ETIMEDOUT : -EILSEQ; | ||
| 1028 | |||
| 1029 | if (host->data) | ||
| 1030 | omap_hsmmc_dma_cleanup(host, err); | ||
| 1031 | else | ||
| 1032 | host->mrq->cmd->error = err; | ||
| 1033 | host->response_busy = 0; | ||
| 1034 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
| 1035 | end_trans = 1; | ||
| 1036 | } | ||
| 1037 | } | ||
| 1038 | if (status & CARD_ERR) { | ||
| 1039 | dev_dbg(mmc_dev(host->mmc), | ||
| 1040 | "Ignoring card err CMD%d\n", host->cmd->opcode); | ||
| 1041 | if (host->cmd) | ||
| 1042 | end_cmd = 1; | ||
| 1043 | if (host->data) | ||
| 1044 | end_trans = 1; | ||
| 1045 | } | 997 | } |
| 1046 | } | 998 | } |
| 1047 | 999 | ||
| 1048 | OMAP_HSMMC_WRITE(host->base, STAT, status); | ||
| 1049 | |||
| 1050 | if (end_cmd || ((status & CC) && host->cmd)) | 1000 | if (end_cmd || ((status & CC) && host->cmd)) |
| 1051 | omap_hsmmc_cmd_done(host, host->cmd); | 1001 | omap_hsmmc_cmd_done(host, host->cmd); |
| 1052 | if ((end_trans || (status & TC)) && host->mrq) | 1002 | if ((end_trans || (status & TC)) && host->mrq) |
| @@ -1062,11 +1012,13 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) | |||
| 1062 | int status; | 1012 | int status; |
| 1063 | 1013 | ||
| 1064 | status = OMAP_HSMMC_READ(host->base, STAT); | 1014 | status = OMAP_HSMMC_READ(host->base, STAT); |
| 1065 | do { | 1015 | while (status & INT_EN_MASK && host->req_in_progress) { |
| 1066 | omap_hsmmc_do_irq(host, status); | 1016 | omap_hsmmc_do_irq(host, status); |
| 1017 | |||
| 1067 | /* Flush posted write */ | 1018 | /* Flush posted write */ |
| 1019 | OMAP_HSMMC_WRITE(host->base, STAT, status); | ||
| 1068 | status = OMAP_HSMMC_READ(host->base, STAT); | 1020 | status = OMAP_HSMMC_READ(host->base, STAT); |
| 1069 | } while (status & INT_EN_MASK); | 1021 | } |
| 1070 | 1022 | ||
| 1071 | return IRQ_HANDLED; | 1023 | return IRQ_HANDLED; |
| 1072 | } | 1024 | } |
| @@ -1501,12 +1453,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1501 | case MMC_POWER_OFF: | 1453 | case MMC_POWER_OFF: |
| 1502 | mmc_slot(host).set_power(host->dev, host->slot_id, | 1454 | mmc_slot(host).set_power(host->dev, host->slot_id, |
| 1503 | 0, 0); | 1455 | 0, 0); |
| 1504 | host->vdd = 0; | ||
| 1505 | break; | 1456 | break; |
| 1506 | case MMC_POWER_UP: | 1457 | case MMC_POWER_UP: |
| 1507 | mmc_slot(host).set_power(host->dev, host->slot_id, | 1458 | mmc_slot(host).set_power(host->dev, host->slot_id, |
| 1508 | 1, ios->vdd); | 1459 | 1, ios->vdd); |
| 1509 | host->vdd = ios->vdd; | ||
| 1510 | break; | 1460 | break; |
| 1511 | case MMC_POWER_ON: | 1461 | case MMC_POWER_ON: |
| 1512 | do_send_init_stream = 1; | 1462 | do_send_init_stream = 1; |
| @@ -1598,10 +1548,6 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) | |||
| 1598 | value = OMAP_HSMMC_READ(host->base, CAPA); | 1548 | value = OMAP_HSMMC_READ(host->base, CAPA); |
| 1599 | OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); | 1549 | OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); |
| 1600 | 1550 | ||
| 1601 | /* Set the controller to AUTO IDLE mode */ | ||
| 1602 | value = OMAP_HSMMC_READ(host->base, SYSCONFIG); | ||
| 1603 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE); | ||
| 1604 | |||
| 1605 | /* Set SD bus power bit */ | 1551 | /* Set SD bus power bit */ |
| 1606 | set_sd_bus_power(host); | 1552 | set_sd_bus_power(host); |
| 1607 | } | 1553 | } |
| @@ -1659,8 +1605,6 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) | |||
| 1659 | 1605 | ||
| 1660 | pm_runtime_get_sync(host->dev); | 1606 | pm_runtime_get_sync(host->dev); |
| 1661 | 1607 | ||
| 1662 | seq_printf(s, "SYSCONFIG:\t0x%08x\n", | ||
| 1663 | OMAP_HSMMC_READ(host->base, SYSCONFIG)); | ||
| 1664 | seq_printf(s, "CON:\t\t0x%08x\n", | 1608 | seq_printf(s, "CON:\t\t0x%08x\n", |
| 1665 | OMAP_HSMMC_READ(host->base, CON)); | 1609 | OMAP_HSMMC_READ(host->base, CON)); |
| 1666 | seq_printf(s, "HCTL:\t\t0x%08x\n", | 1610 | seq_printf(s, "HCTL:\t\t0x%08x\n", |
| @@ -2105,8 +2049,7 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
| 2105 | if (ret) { | 2049 | if (ret) { |
| 2106 | host->suspended = 0; | 2050 | host->suspended = 0; |
| 2107 | if (host->pdata->resume) { | 2051 | if (host->pdata->resume) { |
| 2108 | ret = host->pdata->resume(dev, host->slot_id); | 2052 | if (host->pdata->resume(dev, host->slot_id)) |
| 2109 | if (ret) | ||
| 2110 | dev_dbg(dev, "Unmask interrupt failed\n"); | 2053 | dev_dbg(dev, "Unmask interrupt failed\n"); |
| 2111 | } | 2054 | } |
| 2112 | goto err; | 2055 | goto err; |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index ca3915dac03d..3f9d6d577a91 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
| @@ -30,6 +30,9 @@ | |||
| 30 | #include <linux/regulator/consumer.h> | 30 | #include <linux/regulator/consumer.h> |
| 31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
| 32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
| 33 | #include <linux/of.h> | ||
| 34 | #include <linux/of_gpio.h> | ||
| 35 | #include <linux/of_device.h> | ||
| 33 | 36 | ||
| 34 | #include <asm/sizes.h> | 37 | #include <asm/sizes.h> |
| 35 | 38 | ||
| @@ -573,6 +576,50 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid) | |||
| 573 | return IRQ_HANDLED; | 576 | return IRQ_HANDLED; |
| 574 | } | 577 | } |
| 575 | 578 | ||
| 579 | #ifdef CONFIG_OF | ||
| 580 | static const struct of_device_id pxa_mmc_dt_ids[] = { | ||
| 581 | { .compatible = "marvell,pxa-mmc" }, | ||
| 582 | { } | ||
| 583 | }; | ||
| 584 | |||
| 585 | MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); | ||
| 586 | |||
| 587 | static int __devinit pxamci_of_init(struct platform_device *pdev) | ||
| 588 | { | ||
| 589 | struct device_node *np = pdev->dev.of_node; | ||
| 590 | struct pxamci_platform_data *pdata; | ||
| 591 | u32 tmp; | ||
| 592 | |||
| 593 | if (!np) | ||
| 594 | return 0; | ||
| 595 | |||
| 596 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
| 597 | if (!pdata) | ||
| 598 | return -ENOMEM; | ||
| 599 | |||
| 600 | pdata->gpio_card_detect = | ||
| 601 | of_get_named_gpio(np, "cd-gpios", 0); | ||
| 602 | pdata->gpio_card_ro = | ||
| 603 | of_get_named_gpio(np, "wp-gpios", 0); | ||
| 604 | |||
| 605 | /* pxa-mmc specific */ | ||
| 606 | pdata->gpio_power = | ||
| 607 | of_get_named_gpio(np, "pxa-mmc,gpio-power", 0); | ||
| 608 | |||
| 609 | if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) | ||
| 610 | pdata->detect_delay_ms = tmp; | ||
| 611 | |||
| 612 | pdev->dev.platform_data = pdata; | ||
| 613 | |||
| 614 | return 0; | ||
| 615 | } | ||
| 616 | #else | ||
| 617 | static int __devinit pxamci_of_init(struct platform_device *pdev) | ||
| 618 | { | ||
| 619 | return 0; | ||
| 620 | } | ||
| 621 | #endif | ||
| 622 | |||
| 576 | static int pxamci_probe(struct platform_device *pdev) | 623 | static int pxamci_probe(struct platform_device *pdev) |
| 577 | { | 624 | { |
| 578 | struct mmc_host *mmc; | 625 | struct mmc_host *mmc; |
| @@ -580,6 +627,10 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 580 | struct resource *r, *dmarx, *dmatx; | 627 | struct resource *r, *dmarx, *dmatx; |
| 581 | int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; | 628 | int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; |
| 582 | 629 | ||
| 630 | ret = pxamci_of_init(pdev); | ||
| 631 | if (ret) | ||
| 632 | return ret; | ||
| 633 | |||
| 583 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 634 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 584 | irq = platform_get_irq(pdev, 0); | 635 | irq = platform_get_irq(pdev, 0); |
| 585 | if (!r || irq < 0) | 636 | if (!r || irq < 0) |
| @@ -866,6 +917,7 @@ static struct platform_driver pxamci_driver = { | |||
| 866 | .driver = { | 917 | .driver = { |
| 867 | .name = DRIVER_NAME, | 918 | .name = DRIVER_NAME, |
| 868 | .owner = THIS_MODULE, | 919 | .owner = THIS_MODULE, |
| 920 | .of_match_table = of_match_ptr(pxa_mmc_dt_ids), | ||
| 869 | #ifdef CONFIG_PM | 921 | #ifdef CONFIG_PM |
| 870 | .pm = &pxamci_pm_ops, | 922 | .pm = &pxamci_pm_ops, |
| 871 | #endif | 923 | #endif |
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c index a6e53a1ebb08..90140eb03e36 100644 --- a/drivers/mmc/host/sdhci-dove.c +++ b/drivers/mmc/host/sdhci-dove.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
| 27 | #include <linux/of.h> | ||
| 27 | 28 | ||
| 28 | #include "sdhci-pltfm.h" | 29 | #include "sdhci-pltfm.h" |
| 29 | 30 | ||
| @@ -126,11 +127,18 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev) | |||
| 126 | return sdhci_pltfm_unregister(pdev); | 127 | return sdhci_pltfm_unregister(pdev); |
| 127 | } | 128 | } |
| 128 | 129 | ||
| 130 | static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = { | ||
| 131 | { .compatible = "marvell,dove-sdhci", }, | ||
| 132 | {} | ||
| 133 | }; | ||
| 134 | MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); | ||
| 135 | |||
| 129 | static struct platform_driver sdhci_dove_driver = { | 136 | static struct platform_driver sdhci_dove_driver = { |
| 130 | .driver = { | 137 | .driver = { |
| 131 | .name = "sdhci-dove", | 138 | .name = "sdhci-dove", |
| 132 | .owner = THIS_MODULE, | 139 | .owner = THIS_MODULE, |
| 133 | .pm = SDHCI_PLTFM_PMOPS, | 140 | .pm = SDHCI_PLTFM_PMOPS, |
| 141 | .of_match_table = of_match_ptr(sdhci_dove_of_match_table), | ||
| 134 | }, | 142 | }, |
| 135 | .probe = sdhci_dove_probe, | 143 | .probe = sdhci_dove_probe, |
| 136 | .remove = __devexit_p(sdhci_dove_remove), | 144 | .remove = __devexit_p(sdhci_dove_remove), |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index f8eb1fb0c921..ae5fcbfa1eef 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
| @@ -21,6 +21,32 @@ | |||
| 21 | #include "sdhci-pltfm.h" | 21 | #include "sdhci-pltfm.h" |
| 22 | #include "sdhci-esdhc.h" | 22 | #include "sdhci-esdhc.h" |
| 23 | 23 | ||
| 24 | #define VENDOR_V_22 0x12 | ||
| 25 | static u32 esdhc_readl(struct sdhci_host *host, int reg) | ||
| 26 | { | ||
| 27 | u32 ret; | ||
| 28 | |||
| 29 | ret = in_be32(host->ioaddr + reg); | ||
| 30 | /* | ||
| 31 | * The bit of ADMA flag in eSDHC is not compatible with standard | ||
| 32 | * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is | ||
| 33 | * supported by eSDHC. | ||
| 34 | * And for many FSL eSDHC controller, the reset value of field | ||
| 35 | * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, | ||
| 36 | * only these vendor version is greater than 2.2/0x12 support ADMA. | ||
| 37 | * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the | ||
| 38 | * the verdor version number, oxFE is SDHCI_HOST_VERSION. | ||
| 39 | */ | ||
| 40 | if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { | ||
| 41 | u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); | ||
| 42 | tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; | ||
| 43 | if (tmp > VENDOR_V_22) | ||
| 44 | ret |= SDHCI_CAN_DO_ADMA2; | ||
| 45 | } | ||
| 46 | |||
| 47 | return ret; | ||
| 48 | } | ||
| 49 | |||
| 24 | static u16 esdhc_readw(struct sdhci_host *host, int reg) | 50 | static u16 esdhc_readw(struct sdhci_host *host, int reg) |
| 25 | { | 51 | { |
| 26 | u16 ret; | 52 | u16 ret; |
| @@ -144,7 +170,7 @@ static void esdhc_of_resume(struct sdhci_host *host) | |||
| 144 | #endif | 170 | #endif |
| 145 | 171 | ||
| 146 | static struct sdhci_ops sdhci_esdhc_ops = { | 172 | static struct sdhci_ops sdhci_esdhc_ops = { |
| 147 | .read_l = sdhci_be32bs_readl, | 173 | .read_l = esdhc_readl, |
| 148 | .read_w = esdhc_readw, | 174 | .read_w = esdhc_readw, |
| 149 | .read_b = esdhc_readb, | 175 | .read_b = esdhc_readb, |
| 150 | .write_l = sdhci_be32bs_writel, | 176 | .write_l = sdhci_be32bs_writel, |
| @@ -161,9 +187,13 @@ static struct sdhci_ops sdhci_esdhc_ops = { | |||
| 161 | }; | 187 | }; |
| 162 | 188 | ||
| 163 | static struct sdhci_pltfm_data sdhci_esdhc_pdata = { | 189 | static struct sdhci_pltfm_data sdhci_esdhc_pdata = { |
| 164 | /* card detection could be handled via GPIO */ | 190 | /* |
| 191 | * card detection could be handled via GPIO | ||
| 192 | * eSDHC cannot support End Attribute in NOP ADMA descriptor | ||
| 193 | */ | ||
| 165 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION | 194 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
| 166 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | 195 | | SDHCI_QUIRK_NO_CARD_NO_RESET |
| 196 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | ||
| 167 | .ops = &sdhci_esdhc_ops, | 197 | .ops = &sdhci_esdhc_ops, |
| 168 | }; | 198 | }; |
| 169 | 199 | ||
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 9722d43d6140..4bb74b042a06 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
| @@ -1476,24 +1476,7 @@ static struct pci_driver sdhci_driver = { | |||
| 1476 | }, | 1476 | }, |
| 1477 | }; | 1477 | }; |
| 1478 | 1478 | ||
| 1479 | /*****************************************************************************\ | 1479 | module_pci_driver(sdhci_driver); |
| 1480 | * * | ||
| 1481 | * Driver init/exit * | ||
| 1482 | * * | ||
| 1483 | \*****************************************************************************/ | ||
| 1484 | |||
| 1485 | static int __init sdhci_drv_init(void) | ||
| 1486 | { | ||
| 1487 | return pci_register_driver(&sdhci_driver); | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | static void __exit sdhci_drv_exit(void) | ||
| 1491 | { | ||
| 1492 | pci_unregister_driver(&sdhci_driver); | ||
| 1493 | } | ||
| 1494 | |||
| 1495 | module_init(sdhci_drv_init); | ||
| 1496 | module_exit(sdhci_drv_exit); | ||
| 1497 | 1480 | ||
| 1498 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); | 1481 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
| 1499 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); | 1482 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); |
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index d9a4ef4f1ed0..65551a9709cc 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c | |||
| @@ -75,6 +75,9 @@ void sdhci_get_of_property(struct platform_device *pdev) | |||
| 75 | if (sdhci_of_wp_inverted(np)) | 75 | if (sdhci_of_wp_inverted(np)) |
| 76 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; | 76 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; |
| 77 | 77 | ||
| 78 | if (of_get_property(np, "broken-cd", NULL)) | ||
| 79 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
| 80 | |||
| 78 | if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) | 81 | if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) |
| 79 | host->quirks |= SDHCI_QUIRK_BROKEN_DMA; | 82 | host->quirks |= SDHCI_QUIRK_BROKEN_DMA; |
| 80 | 83 | ||
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index b6ee8857e226..8e63a9c04e31 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c | |||
| @@ -197,7 +197,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) | |||
| 197 | goto err_clk_get; | 197 | goto err_clk_get; |
| 198 | } | 198 | } |
| 199 | pltfm_host->clk = clk; | 199 | pltfm_host->clk = clk; |
| 200 | clk_enable(clk); | 200 | clk_prepare_enable(clk); |
| 201 | 201 | ||
| 202 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA | 202 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA |
| 203 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 203 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
| @@ -239,7 +239,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) | |||
| 239 | return 0; | 239 | return 0; |
| 240 | 240 | ||
| 241 | err_add_host: | 241 | err_add_host: |
| 242 | clk_disable(clk); | 242 | clk_disable_unprepare(clk); |
| 243 | clk_put(clk); | 243 | clk_put(clk); |
| 244 | err_clk_get: | 244 | err_clk_get: |
| 245 | sdhci_pltfm_free(pdev); | 245 | sdhci_pltfm_free(pdev); |
| @@ -255,7 +255,7 @@ static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) | |||
| 255 | 255 | ||
| 256 | sdhci_remove_host(host, 1); | 256 | sdhci_remove_host(host, 1); |
| 257 | 257 | ||
| 258 | clk_disable(pltfm_host->clk); | 258 | clk_disable_unprepare(pltfm_host->clk); |
| 259 | clk_put(pltfm_host->clk); | 259 | clk_put(pltfm_host->clk); |
| 260 | sdhci_pltfm_free(pdev); | 260 | sdhci_pltfm_free(pdev); |
| 261 | kfree(pxa); | 261 | kfree(pxa); |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 07fe3834fe0b..e918a2bb3af1 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
| @@ -24,12 +24,14 @@ | |||
| 24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
| 25 | #include <linux/mmc/card.h> | 25 | #include <linux/mmc/card.h> |
| 26 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
| 27 | #include <linux/mmc/slot-gpio.h> | ||
| 27 | #include <linux/platform_data/pxa_sdhci.h> | 28 | #include <linux/platform_data/pxa_sdhci.h> |
| 28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 29 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
| 30 | #include <linux/module.h> | 31 | #include <linux/module.h> |
| 31 | #include <linux/of.h> | 32 | #include <linux/of.h> |
| 32 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
| 34 | #include <linux/of_gpio.h> | ||
| 33 | 35 | ||
| 34 | #include "sdhci.h" | 36 | #include "sdhci.h" |
| 35 | #include "sdhci-pltfm.h" | 37 | #include "sdhci-pltfm.h" |
| @@ -182,6 +184,7 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) | |||
| 182 | struct device_node *np = dev->of_node; | 184 | struct device_node *np = dev->of_node; |
| 183 | u32 bus_width; | 185 | u32 bus_width; |
| 184 | u32 clk_delay_cycles; | 186 | u32 clk_delay_cycles; |
| 187 | enum of_gpio_flags gpio_flags; | ||
| 185 | 188 | ||
| 186 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 189 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
| 187 | if (!pdata) | 190 | if (!pdata) |
| @@ -198,6 +201,10 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) | |||
| 198 | if (clk_delay_cycles > 0) | 201 | if (clk_delay_cycles > 0) |
| 199 | pdata->clk_delay_cycles = clk_delay_cycles; | 202 | pdata->clk_delay_cycles = clk_delay_cycles; |
| 200 | 203 | ||
| 204 | pdata->ext_cd_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &gpio_flags); | ||
| 205 | if (gpio_flags != OF_GPIO_ACTIVE_LOW) | ||
| 206 | pdata->host_caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; | ||
| 207 | |||
| 201 | return pdata; | 208 | return pdata; |
| 202 | } | 209 | } |
| 203 | #else | 210 | #else |
| @@ -231,14 +238,14 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | |||
| 231 | pltfm_host = sdhci_priv(host); | 238 | pltfm_host = sdhci_priv(host); |
| 232 | pltfm_host->priv = pxa; | 239 | pltfm_host->priv = pxa; |
| 233 | 240 | ||
| 234 | clk = clk_get(dev, "PXA-SDHCLK"); | 241 | clk = clk_get(dev, NULL); |
| 235 | if (IS_ERR(clk)) { | 242 | if (IS_ERR(clk)) { |
| 236 | dev_err(dev, "failed to get io clock\n"); | 243 | dev_err(dev, "failed to get io clock\n"); |
| 237 | ret = PTR_ERR(clk); | 244 | ret = PTR_ERR(clk); |
| 238 | goto err_clk_get; | 245 | goto err_clk_get; |
| 239 | } | 246 | } |
| 240 | pltfm_host->clk = clk; | 247 | pltfm_host->clk = clk; |
| 241 | clk_enable(clk); | 248 | clk_prepare_enable(clk); |
| 242 | 249 | ||
| 243 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 250 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
| 244 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 251 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
| @@ -266,12 +273,25 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | |||
| 266 | host->quirks |= pdata->quirks; | 273 | host->quirks |= pdata->quirks; |
| 267 | if (pdata->host_caps) | 274 | if (pdata->host_caps) |
| 268 | host->mmc->caps |= pdata->host_caps; | 275 | host->mmc->caps |= pdata->host_caps; |
| 276 | if (pdata->host_caps2) | ||
| 277 | host->mmc->caps2 |= pdata->host_caps2; | ||
| 269 | if (pdata->pm_caps) | 278 | if (pdata->pm_caps) |
| 270 | host->mmc->pm_caps |= pdata->pm_caps; | 279 | host->mmc->pm_caps |= pdata->pm_caps; |
| 280 | |||
| 281 | if (gpio_is_valid(pdata->ext_cd_gpio)) { | ||
| 282 | ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio); | ||
| 283 | if (ret) { | ||
| 284 | dev_err(mmc_dev(host->mmc), | ||
| 285 | "failed to allocate card detect gpio\n"); | ||
| 286 | goto err_cd_req; | ||
| 287 | } | ||
| 288 | } | ||
| 271 | } | 289 | } |
| 272 | 290 | ||
| 273 | host->ops = &pxav3_sdhci_ops; | 291 | host->ops = &pxav3_sdhci_ops; |
| 274 | 292 | ||
| 293 | sdhci_get_of_property(pdev); | ||
| 294 | |||
| 275 | ret = sdhci_add_host(host); | 295 | ret = sdhci_add_host(host); |
| 276 | if (ret) { | 296 | if (ret) { |
| 277 | dev_err(&pdev->dev, "failed to add host\n"); | 297 | dev_err(&pdev->dev, "failed to add host\n"); |
| @@ -283,8 +303,10 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | |||
| 283 | return 0; | 303 | return 0; |
| 284 | 304 | ||
| 285 | err_add_host: | 305 | err_add_host: |
| 286 | clk_disable(clk); | 306 | clk_disable_unprepare(clk); |
| 287 | clk_put(clk); | 307 | clk_put(clk); |
| 308 | mmc_gpio_free_cd(host->mmc); | ||
| 309 | err_cd_req: | ||
| 288 | err_clk_get: | 310 | err_clk_get: |
| 289 | sdhci_pltfm_free(pdev); | 311 | sdhci_pltfm_free(pdev); |
| 290 | kfree(pxa); | 312 | kfree(pxa); |
| @@ -296,11 +318,16 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) | |||
| 296 | struct sdhci_host *host = platform_get_drvdata(pdev); | 318 | struct sdhci_host *host = platform_get_drvdata(pdev); |
| 297 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 319 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| 298 | struct sdhci_pxa *pxa = pltfm_host->priv; | 320 | struct sdhci_pxa *pxa = pltfm_host->priv; |
| 321 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
| 299 | 322 | ||
| 300 | sdhci_remove_host(host, 1); | 323 | sdhci_remove_host(host, 1); |
| 301 | 324 | ||
| 302 | clk_disable(pltfm_host->clk); | 325 | clk_disable_unprepare(pltfm_host->clk); |
| 303 | clk_put(pltfm_host->clk); | 326 | clk_put(pltfm_host->clk); |
| 327 | |||
| 328 | if (gpio_is_valid(pdata->ext_cd_gpio)) | ||
| 329 | mmc_gpio_free_cd(host->mmc); | ||
| 330 | |||
| 304 | sdhci_pltfm_free(pdev); | 331 | sdhci_pltfm_free(pdev); |
| 305 | kfree(pxa); | 332 | kfree(pxa); |
| 306 | 333 | ||
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index a50c205ea208..2903949594c6 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
| @@ -34,6 +34,9 @@ | |||
| 34 | 34 | ||
| 35 | #define MAX_BUS_CLK (4) | 35 | #define MAX_BUS_CLK (4) |
| 36 | 36 | ||
| 37 | /* Number of gpio's used is max data bus width + command and clock lines */ | ||
| 38 | #define NUM_GPIOS(x) (x + 2) | ||
| 39 | |||
| 37 | /** | 40 | /** |
| 38 | * struct sdhci_s3c - S3C SDHCI instance | 41 | * struct sdhci_s3c - S3C SDHCI instance |
| 39 | * @host: The SDHCI host created | 42 | * @host: The SDHCI host created |
| @@ -41,6 +44,7 @@ | |||
| 41 | * @ioarea: The resource created when we claimed the IO area. | 44 | * @ioarea: The resource created when we claimed the IO area. |
| 42 | * @pdata: The platform data for this controller. | 45 | * @pdata: The platform data for this controller. |
| 43 | * @cur_clk: The index of the current bus clock. | 46 | * @cur_clk: The index of the current bus clock. |
| 47 | * @gpios: List of gpio numbers parsed from device tree. | ||
| 44 | * @clk_io: The clock for the internal bus interface. | 48 | * @clk_io: The clock for the internal bus interface. |
| 45 | * @clk_bus: The clocks that are available for the SD/MMC bus clock. | 49 | * @clk_bus: The clocks that are available for the SD/MMC bus clock. |
| 46 | */ | 50 | */ |
| @@ -52,6 +56,7 @@ struct sdhci_s3c { | |||
| 52 | unsigned int cur_clk; | 56 | unsigned int cur_clk; |
| 53 | int ext_cd_irq; | 57 | int ext_cd_irq; |
| 54 | int ext_cd_gpio; | 58 | int ext_cd_gpio; |
| 59 | int *gpios; | ||
| 55 | 60 | ||
| 56 | struct clk *clk_io; | 61 | struct clk *clk_io; |
| 57 | struct clk *clk_bus[MAX_BUS_CLK]; | 62 | struct clk *clk_bus[MAX_BUS_CLK]; |
| @@ -166,7 +171,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, | |||
| 166 | dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", | 171 | dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", |
| 167 | src, rate, wanted, rate / div); | 172 | src, rate, wanted, rate / div); |
| 168 | 173 | ||
| 169 | return (wanted - (rate / div)); | 174 | return wanted - (rate / div); |
| 170 | } | 175 | } |
| 171 | 176 | ||
| 172 | /** | 177 | /** |
| @@ -203,10 +208,12 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) | |||
| 203 | best_src, clock, best); | 208 | best_src, clock, best); |
| 204 | 209 | ||
| 205 | /* select the new clock source */ | 210 | /* select the new clock source */ |
| 206 | |||
| 207 | if (ourhost->cur_clk != best_src) { | 211 | if (ourhost->cur_clk != best_src) { |
| 208 | struct clk *clk = ourhost->clk_bus[best_src]; | 212 | struct clk *clk = ourhost->clk_bus[best_src]; |
| 209 | 213 | ||
| 214 | clk_enable(clk); | ||
| 215 | clk_disable(ourhost->clk_bus[ourhost->cur_clk]); | ||
| 216 | |||
| 210 | /* turn clock off to card before changing clock source */ | 217 | /* turn clock off to card before changing clock source */ |
| 211 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); | 218 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); |
| 212 | 219 | ||
| @@ -288,6 +295,7 @@ static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) | |||
| 288 | static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) | 295 | static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) |
| 289 | { | 296 | { |
| 290 | struct sdhci_s3c *ourhost = to_s3c(host); | 297 | struct sdhci_s3c *ourhost = to_s3c(host); |
| 298 | struct device *dev = &ourhost->pdev->dev; | ||
| 291 | unsigned long timeout; | 299 | unsigned long timeout; |
| 292 | u16 clk = 0; | 300 | u16 clk = 0; |
| 293 | 301 | ||
| @@ -309,8 +317,8 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) | |||
| 309 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) | 317 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) |
| 310 | & SDHCI_CLOCK_INT_STABLE)) { | 318 | & SDHCI_CLOCK_INT_STABLE)) { |
| 311 | if (timeout == 0) { | 319 | if (timeout == 0) { |
| 312 | printk(KERN_ERR "%s: Internal clock never " | 320 | dev_err(dev, "%s: Internal clock never stabilised.\n", |
| 313 | "stabilised.\n", mmc_hostname(host->mmc)); | 321 | mmc_hostname(host->mmc)); |
| 314 | return; | 322 | return; |
| 315 | } | 323 | } |
| 316 | timeout--; | 324 | timeout--; |
| @@ -404,7 +412,9 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) | |||
| 404 | if (sc->ext_cd_irq && | 412 | if (sc->ext_cd_irq && |
| 405 | request_threaded_irq(sc->ext_cd_irq, NULL, | 413 | request_threaded_irq(sc->ext_cd_irq, NULL, |
| 406 | sdhci_s3c_gpio_card_detect_thread, | 414 | sdhci_s3c_gpio_card_detect_thread, |
| 407 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 415 | IRQF_TRIGGER_RISING | |
| 416 | IRQF_TRIGGER_FALLING | | ||
| 417 | IRQF_ONESHOT, | ||
| 408 | dev_name(dev), sc) == 0) { | 418 | dev_name(dev), sc) == 0) { |
| 409 | int status = gpio_get_value(sc->ext_cd_gpio); | 419 | int status = gpio_get_value(sc->ext_cd_gpio); |
| 410 | if (pdata->ext_cd_gpio_invert) | 420 | if (pdata->ext_cd_gpio_invert) |
| @@ -419,9 +429,121 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) | |||
| 419 | } | 429 | } |
| 420 | } | 430 | } |
| 421 | 431 | ||
| 432 | #ifdef CONFIG_OF | ||
| 433 | static int __devinit sdhci_s3c_parse_dt(struct device *dev, | ||
| 434 | struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) | ||
| 435 | { | ||
| 436 | struct device_node *node = dev->of_node; | ||
| 437 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
| 438 | u32 max_width; | ||
| 439 | int gpio, cnt, ret; | ||
| 440 | |||
| 441 | /* if the bus-width property is not specified, assume width as 1 */ | ||
| 442 | if (of_property_read_u32(node, "bus-width", &max_width)) | ||
| 443 | max_width = 1; | ||
| 444 | pdata->max_width = max_width; | ||
| 445 | |||
| 446 | ourhost->gpios = devm_kzalloc(dev, NUM_GPIOS(pdata->max_width) * | ||
| 447 | sizeof(int), GFP_KERNEL); | ||
| 448 | if (!ourhost->gpios) | ||
| 449 | return -ENOMEM; | ||
| 450 | |||
| 451 | /* get the card detection method */ | ||
| 452 | if (of_get_property(node, "broken-cd", 0)) { | ||
| 453 | pdata->cd_type = S3C_SDHCI_CD_NONE; | ||
| 454 | goto setup_bus; | ||
| 455 | } | ||
| 456 | |||
| 457 | if (of_get_property(node, "non-removable", 0)) { | ||
| 458 | pdata->cd_type = S3C_SDHCI_CD_PERMANENT; | ||
| 459 | goto setup_bus; | ||
| 460 | } | ||
| 461 | |||
| 462 | gpio = of_get_named_gpio(node, "cd-gpios", 0); | ||
| 463 | if (gpio_is_valid(gpio)) { | ||
| 464 | pdata->cd_type = S3C_SDHCI_CD_GPIO; | ||
| 465 | goto found_cd; | ||
| 466 | } else if (gpio != -ENOENT) { | ||
| 467 | dev_err(dev, "invalid card detect gpio specified\n"); | ||
| 468 | return -EINVAL; | ||
| 469 | } | ||
| 470 | |||
| 471 | gpio = of_get_named_gpio(node, "samsung,cd-pinmux-gpio", 0); | ||
| 472 | if (gpio_is_valid(gpio)) { | ||
| 473 | pdata->cd_type = S3C_SDHCI_CD_INTERNAL; | ||
| 474 | goto found_cd; | ||
| 475 | } else if (gpio != -ENOENT) { | ||
| 476 | dev_err(dev, "invalid card detect gpio specified\n"); | ||
| 477 | return -EINVAL; | ||
| 478 | } | ||
| 479 | |||
| 480 | dev_info(dev, "assuming no card detect line available\n"); | ||
| 481 | pdata->cd_type = S3C_SDHCI_CD_NONE; | ||
| 482 | |||
| 483 | found_cd: | ||
| 484 | if (pdata->cd_type == S3C_SDHCI_CD_GPIO) { | ||
| 485 | pdata->ext_cd_gpio = gpio; | ||
| 486 | ourhost->ext_cd_gpio = -1; | ||
| 487 | if (of_get_property(node, "cd-inverted", NULL)) | ||
| 488 | pdata->ext_cd_gpio_invert = 1; | ||
| 489 | } else if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) { | ||
| 490 | ret = gpio_request(gpio, "sdhci-cd"); | ||
| 491 | if (ret) { | ||
| 492 | dev_err(dev, "card detect gpio request failed\n"); | ||
| 493 | return -EINVAL; | ||
| 494 | } | ||
| 495 | ourhost->ext_cd_gpio = gpio; | ||
| 496 | } | ||
| 497 | |||
| 498 | setup_bus: | ||
| 499 | /* get the gpios for command, clock and data lines */ | ||
| 500 | for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) { | ||
| 501 | gpio = of_get_gpio(node, cnt); | ||
| 502 | if (!gpio_is_valid(gpio)) { | ||
| 503 | dev_err(dev, "invalid gpio[%d]\n", cnt); | ||
| 504 | goto err_free_dt_cd_gpio; | ||
| 505 | } | ||
| 506 | ourhost->gpios[cnt] = gpio; | ||
| 507 | } | ||
| 508 | |||
| 509 | for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) { | ||
| 510 | ret = gpio_request(ourhost->gpios[cnt], "sdhci-gpio"); | ||
| 511 | if (ret) { | ||
| 512 | dev_err(dev, "gpio[%d] request failed\n", cnt); | ||
| 513 | goto err_free_dt_gpios; | ||
| 514 | } | ||
| 515 | } | ||
| 516 | |||
| 517 | return 0; | ||
| 518 | |||
| 519 | err_free_dt_gpios: | ||
| 520 | while (--cnt >= 0) | ||
| 521 | gpio_free(ourhost->gpios[cnt]); | ||
| 522 | err_free_dt_cd_gpio: | ||
| 523 | if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) | ||
| 524 | gpio_free(ourhost->ext_cd_gpio); | ||
| 525 | return -EINVAL; | ||
| 526 | } | ||
| 527 | #else | ||
| 528 | static int __devinit sdhci_s3c_parse_dt(struct device *dev, | ||
| 529 | struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) | ||
| 530 | { | ||
| 531 | return -EINVAL; | ||
| 532 | } | ||
| 533 | #endif | ||
| 534 | |||
| 535 | static const struct of_device_id sdhci_s3c_dt_match[]; | ||
| 536 | |||
| 422 | static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( | 537 | static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( |
| 423 | struct platform_device *pdev) | 538 | struct platform_device *pdev) |
| 424 | { | 539 | { |
| 540 | #ifdef CONFIG_OF | ||
| 541 | if (pdev->dev.of_node) { | ||
| 542 | const struct of_device_id *match; | ||
| 543 | match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node); | ||
| 544 | return (struct sdhci_s3c_drv_data *)match->data; | ||
| 545 | } | ||
| 546 | #endif | ||
| 425 | return (struct sdhci_s3c_drv_data *) | 547 | return (struct sdhci_s3c_drv_data *) |
| 426 | platform_get_device_id(pdev)->driver_data; | 548 | platform_get_device_id(pdev)->driver_data; |
| 427 | } | 549 | } |
| @@ -436,7 +558,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 436 | struct resource *res; | 558 | struct resource *res; |
| 437 | int ret, irq, ptr, clks; | 559 | int ret, irq, ptr, clks; |
| 438 | 560 | ||
| 439 | if (!pdev->dev.platform_data) { | 561 | if (!pdev->dev.platform_data && !pdev->dev.of_node) { |
| 440 | dev_err(dev, "no device data specified\n"); | 562 | dev_err(dev, "no device data specified\n"); |
| 441 | return -ENOENT; | 563 | return -ENOENT; |
| 442 | } | 564 | } |
| @@ -452,21 +574,28 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 452 | dev_err(dev, "sdhci_alloc_host() failed\n"); | 574 | dev_err(dev, "sdhci_alloc_host() failed\n"); |
| 453 | return PTR_ERR(host); | 575 | return PTR_ERR(host); |
| 454 | } | 576 | } |
| 577 | sc = sdhci_priv(host); | ||
| 455 | 578 | ||
| 456 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 579 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
| 457 | if (!pdata) { | 580 | if (!pdata) { |
| 458 | ret = -ENOMEM; | 581 | ret = -ENOMEM; |
| 459 | goto err_io_clk; | 582 | goto err_pdata; |
| 583 | } | ||
| 584 | |||
| 585 | if (pdev->dev.of_node) { | ||
| 586 | ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); | ||
| 587 | if (ret) | ||
| 588 | goto err_pdata; | ||
| 589 | } else { | ||
| 590 | memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); | ||
| 591 | sc->ext_cd_gpio = -1; /* invalid gpio number */ | ||
| 460 | } | 592 | } |
| 461 | memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); | ||
| 462 | 593 | ||
| 463 | drv_data = sdhci_s3c_get_driver_data(pdev); | 594 | drv_data = sdhci_s3c_get_driver_data(pdev); |
| 464 | sc = sdhci_priv(host); | ||
| 465 | 595 | ||
| 466 | sc->host = host; | 596 | sc->host = host; |
| 467 | sc->pdev = pdev; | 597 | sc->pdev = pdev; |
| 468 | sc->pdata = pdata; | 598 | sc->pdata = pdata; |
| 469 | sc->ext_cd_gpio = -1; /* invalid gpio number */ | ||
| 470 | 599 | ||
| 471 | platform_set_drvdata(pdev, host); | 600 | platform_set_drvdata(pdev, host); |
| 472 | 601 | ||
| @@ -486,9 +615,8 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 486 | 615 | ||
| 487 | snprintf(name, 14, "mmc_busclk.%d", ptr); | 616 | snprintf(name, 14, "mmc_busclk.%d", ptr); |
| 488 | clk = clk_get(dev, name); | 617 | clk = clk_get(dev, name); |
| 489 | if (IS_ERR(clk)) { | 618 | if (IS_ERR(clk)) |
| 490 | continue; | 619 | continue; |
| 491 | } | ||
| 492 | 620 | ||
| 493 | clks++; | 621 | clks++; |
| 494 | sc->clk_bus[ptr] = clk; | 622 | sc->clk_bus[ptr] = clk; |
| @@ -499,8 +627,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 499 | */ | 627 | */ |
| 500 | sc->cur_clk = ptr; | 628 | sc->cur_clk = ptr; |
| 501 | 629 | ||
| 502 | clk_enable(clk); | ||
| 503 | |||
| 504 | dev_info(dev, "clock source %d: %s (%ld Hz)\n", | 630 | dev_info(dev, "clock source %d: %s (%ld Hz)\n", |
| 505 | ptr, name, clk_get_rate(clk)); | 631 | ptr, name, clk_get_rate(clk)); |
| 506 | } | 632 | } |
| @@ -511,6 +637,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 511 | goto err_no_busclks; | 637 | goto err_no_busclks; |
| 512 | } | 638 | } |
| 513 | 639 | ||
| 640 | #ifndef CONFIG_PM_RUNTIME | ||
| 641 | clk_enable(sc->clk_bus[sc->cur_clk]); | ||
| 642 | #endif | ||
| 643 | |||
| 514 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 644 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 515 | host->ioaddr = devm_request_and_ioremap(&pdev->dev, res); | 645 | host->ioaddr = devm_request_and_ioremap(&pdev->dev, res); |
| 516 | if (!host->ioaddr) { | 646 | if (!host->ioaddr) { |
| @@ -616,12 +746,17 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 616 | gpio_is_valid(pdata->ext_cd_gpio)) | 746 | gpio_is_valid(pdata->ext_cd_gpio)) |
| 617 | sdhci_s3c_setup_card_detect_gpio(sc); | 747 | sdhci_s3c_setup_card_detect_gpio(sc); |
| 618 | 748 | ||
| 749 | #ifdef CONFIG_PM_RUNTIME | ||
| 750 | clk_disable(sc->clk_io); | ||
| 751 | #endif | ||
| 619 | return 0; | 752 | return 0; |
| 620 | 753 | ||
| 621 | err_req_regs: | 754 | err_req_regs: |
| 755 | #ifndef CONFIG_PM_RUNTIME | ||
| 756 | clk_disable(sc->clk_bus[sc->cur_clk]); | ||
| 757 | #endif | ||
| 622 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { | 758 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { |
| 623 | if (sc->clk_bus[ptr]) { | 759 | if (sc->clk_bus[ptr]) { |
| 624 | clk_disable(sc->clk_bus[ptr]); | ||
| 625 | clk_put(sc->clk_bus[ptr]); | 760 | clk_put(sc->clk_bus[ptr]); |
| 626 | } | 761 | } |
| 627 | } | 762 | } |
| @@ -631,6 +766,12 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 631 | clk_put(sc->clk_io); | 766 | clk_put(sc->clk_io); |
| 632 | 767 | ||
| 633 | err_io_clk: | 768 | err_io_clk: |
| 769 | for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++) | ||
| 770 | gpio_free(sc->gpios[ptr]); | ||
| 771 | if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) | ||
| 772 | gpio_free(sc->ext_cd_gpio); | ||
| 773 | |||
| 774 | err_pdata: | ||
| 634 | sdhci_free_host(host); | 775 | sdhci_free_host(host); |
| 635 | 776 | ||
| 636 | return ret; | 777 | return ret; |
| @@ -638,9 +779,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
| 638 | 779 | ||
| 639 | static int __devexit sdhci_s3c_remove(struct platform_device *pdev) | 780 | static int __devexit sdhci_s3c_remove(struct platform_device *pdev) |
| 640 | { | 781 | { |
| 641 | struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; | ||
| 642 | struct sdhci_host *host = platform_get_drvdata(pdev); | 782 | struct sdhci_host *host = platform_get_drvdata(pdev); |
| 643 | struct sdhci_s3c *sc = sdhci_priv(host); | 783 | struct sdhci_s3c *sc = sdhci_priv(host); |
| 784 | struct s3c_sdhci_platdata *pdata = sc->pdata; | ||
| 644 | int ptr; | 785 | int ptr; |
| 645 | 786 | ||
| 646 | if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) | 787 | if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) |
| @@ -652,19 +793,30 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) | |||
| 652 | if (gpio_is_valid(sc->ext_cd_gpio)) | 793 | if (gpio_is_valid(sc->ext_cd_gpio)) |
| 653 | gpio_free(sc->ext_cd_gpio); | 794 | gpio_free(sc->ext_cd_gpio); |
| 654 | 795 | ||
| 796 | #ifdef CONFIG_PM_RUNTIME | ||
| 797 | clk_enable(sc->clk_io); | ||
| 798 | #endif | ||
| 655 | sdhci_remove_host(host, 1); | 799 | sdhci_remove_host(host, 1); |
| 656 | 800 | ||
| 801 | pm_runtime_dont_use_autosuspend(&pdev->dev); | ||
| 657 | pm_runtime_disable(&pdev->dev); | 802 | pm_runtime_disable(&pdev->dev); |
| 658 | 803 | ||
| 659 | for (ptr = 0; ptr < 3; ptr++) { | 804 | #ifndef CONFIG_PM_RUNTIME |
| 805 | clk_disable(sc->clk_bus[sc->cur_clk]); | ||
| 806 | #endif | ||
| 807 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { | ||
| 660 | if (sc->clk_bus[ptr]) { | 808 | if (sc->clk_bus[ptr]) { |
| 661 | clk_disable(sc->clk_bus[ptr]); | ||
| 662 | clk_put(sc->clk_bus[ptr]); | 809 | clk_put(sc->clk_bus[ptr]); |
| 663 | } | 810 | } |
| 664 | } | 811 | } |
| 665 | clk_disable(sc->clk_io); | 812 | clk_disable(sc->clk_io); |
| 666 | clk_put(sc->clk_io); | 813 | clk_put(sc->clk_io); |
| 667 | 814 | ||
| 815 | if (pdev->dev.of_node) { | ||
| 816 | for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++) | ||
| 817 | gpio_free(sc->gpios[ptr]); | ||
| 818 | } | ||
| 819 | |||
| 668 | sdhci_free_host(host); | 820 | sdhci_free_host(host); |
| 669 | platform_set_drvdata(pdev, NULL); | 821 | platform_set_drvdata(pdev, NULL); |
| 670 | 822 | ||
| @@ -691,15 +843,28 @@ static int sdhci_s3c_resume(struct device *dev) | |||
| 691 | static int sdhci_s3c_runtime_suspend(struct device *dev) | 843 | static int sdhci_s3c_runtime_suspend(struct device *dev) |
| 692 | { | 844 | { |
| 693 | struct sdhci_host *host = dev_get_drvdata(dev); | 845 | struct sdhci_host *host = dev_get_drvdata(dev); |
| 846 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
| 847 | struct clk *busclk = ourhost->clk_io; | ||
| 848 | int ret; | ||
| 849 | |||
| 850 | ret = sdhci_runtime_suspend_host(host); | ||
| 694 | 851 | ||
| 695 | return sdhci_runtime_suspend_host(host); | 852 | clk_disable(ourhost->clk_bus[ourhost->cur_clk]); |
| 853 | clk_disable(busclk); | ||
| 854 | return ret; | ||
| 696 | } | 855 | } |
| 697 | 856 | ||
| 698 | static int sdhci_s3c_runtime_resume(struct device *dev) | 857 | static int sdhci_s3c_runtime_resume(struct device *dev) |
| 699 | { | 858 | { |
| 700 | struct sdhci_host *host = dev_get_drvdata(dev); | 859 | struct sdhci_host *host = dev_get_drvdata(dev); |
| 860 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
| 861 | struct clk *busclk = ourhost->clk_io; | ||
| 862 | int ret; | ||
| 701 | 863 | ||
| 702 | return sdhci_runtime_resume_host(host); | 864 | clk_enable(busclk); |
| 865 | clk_enable(ourhost->clk_bus[ourhost->cur_clk]); | ||
| 866 | ret = sdhci_runtime_resume_host(host); | ||
| 867 | return ret; | ||
| 703 | } | 868 | } |
| 704 | #endif | 869 | #endif |
| 705 | 870 | ||
| @@ -737,6 +902,16 @@ static struct platform_device_id sdhci_s3c_driver_ids[] = { | |||
| 737 | }; | 902 | }; |
| 738 | MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); | 903 | MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); |
| 739 | 904 | ||
| 905 | #ifdef CONFIG_OF | ||
| 906 | static const struct of_device_id sdhci_s3c_dt_match[] = { | ||
| 907 | { .compatible = "samsung,s3c6410-sdhci", }, | ||
| 908 | { .compatible = "samsung,exynos4210-sdhci", | ||
| 909 | .data = (void *)EXYNOS4_SDHCI_DRV_DATA }, | ||
| 910 | {}, | ||
| 911 | }; | ||
| 912 | MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); | ||
| 913 | #endif | ||
| 914 | |||
| 740 | static struct platform_driver sdhci_s3c_driver = { | 915 | static struct platform_driver sdhci_s3c_driver = { |
| 741 | .probe = sdhci_s3c_probe, | 916 | .probe = sdhci_s3c_probe, |
| 742 | .remove = __devexit_p(sdhci_s3c_remove), | 917 | .remove = __devexit_p(sdhci_s3c_remove), |
| @@ -744,6 +919,7 @@ static struct platform_driver sdhci_s3c_driver = { | |||
| 744 | .driver = { | 919 | .driver = { |
| 745 | .owner = THIS_MODULE, | 920 | .owner = THIS_MODULE, |
| 746 | .name = "s3c-sdhci", | 921 | .name = "s3c-sdhci", |
| 922 | .of_match_table = of_match_ptr(sdhci_s3c_dt_match), | ||
| 747 | .pm = SDHCI_S3C_PMOPS, | 923 | .pm = SDHCI_S3C_PMOPS, |
| 748 | }, | 924 | }, |
| 749 | }; | 925 | }; |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 423da8194cd8..6be89c032deb 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
| 23 | #include <linux/of.h> | ||
| 24 | #include <linux/of_gpio.h> | ||
| 23 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 24 | #include <linux/pm.h> | 26 | #include <linux/pm.h> |
| 25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| @@ -68,8 +70,42 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) | |||
| 68 | return IRQ_HANDLED; | 70 | return IRQ_HANDLED; |
| 69 | } | 71 | } |
| 70 | 72 | ||
| 73 | #ifdef CONFIG_OF | ||
| 74 | static struct sdhci_plat_data * __devinit | ||
| 75 | sdhci_probe_config_dt(struct platform_device *pdev) | ||
| 76 | { | ||
| 77 | struct device_node *np = pdev->dev.of_node; | ||
| 78 | struct sdhci_plat_data *pdata = NULL; | ||
| 79 | int cd_gpio; | ||
| 80 | |||
| 81 | cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); | ||
| 82 | if (!gpio_is_valid(cd_gpio)) | ||
| 83 | cd_gpio = -1; | ||
| 84 | |||
| 85 | /* If pdata is required */ | ||
| 86 | if (cd_gpio != -1) { | ||
| 87 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
| 88 | if (!pdata) { | ||
| 89 | dev_err(&pdev->dev, "DT: kzalloc failed\n"); | ||
| 90 | return ERR_PTR(-ENOMEM); | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | pdata->card_int_gpio = cd_gpio; | ||
| 95 | |||
| 96 | return pdata; | ||
| 97 | } | ||
| 98 | #else | ||
| 99 | static struct sdhci_plat_data * __devinit | ||
| 100 | sdhci_probe_config_dt(struct platform_device *pdev) | ||
| 101 | { | ||
| 102 | return ERR_PTR(-ENOSYS); | ||
| 103 | } | ||
| 104 | #endif | ||
| 105 | |||
| 71 | static int __devinit sdhci_probe(struct platform_device *pdev) | 106 | static int __devinit sdhci_probe(struct platform_device *pdev) |
| 72 | { | 107 | { |
| 108 | struct device_node *np = pdev->dev.of_node; | ||
| 73 | struct sdhci_host *host; | 109 | struct sdhci_host *host; |
| 74 | struct resource *iomem; | 110 | struct resource *iomem; |
| 75 | struct spear_sdhci *sdhci; | 111 | struct spear_sdhci *sdhci; |
| @@ -104,14 +140,22 @@ static int __devinit sdhci_probe(struct platform_device *pdev) | |||
| 104 | goto err; | 140 | goto err; |
| 105 | } | 141 | } |
| 106 | 142 | ||
| 107 | ret = clk_enable(sdhci->clk); | 143 | ret = clk_prepare_enable(sdhci->clk); |
| 108 | if (ret) { | 144 | if (ret) { |
| 109 | dev_dbg(&pdev->dev, "Error enabling clock\n"); | 145 | dev_dbg(&pdev->dev, "Error enabling clock\n"); |
| 110 | goto put_clk; | 146 | goto put_clk; |
| 111 | } | 147 | } |
| 112 | 148 | ||
| 113 | /* overwrite platform_data */ | 149 | if (np) { |
| 114 | sdhci->data = dev_get_platdata(&pdev->dev); | 150 | sdhci->data = sdhci_probe_config_dt(pdev); |
| 151 | if (IS_ERR(sdhci->data)) { | ||
| 152 | dev_err(&pdev->dev, "DT: Failed to get pdata\n"); | ||
| 153 | return -ENODEV; | ||
| 154 | } | ||
| 155 | } else { | ||
| 156 | sdhci->data = dev_get_platdata(&pdev->dev); | ||
| 157 | } | ||
| 158 | |||
| 115 | pdev->dev.platform_data = sdhci; | 159 | pdev->dev.platform_data = sdhci; |
| 116 | 160 | ||
| 117 | if (pdev->dev.parent) | 161 | if (pdev->dev.parent) |
| @@ -216,7 +260,7 @@ set_drvdata: | |||
| 216 | free_host: | 260 | free_host: |
| 217 | sdhci_free_host(host); | 261 | sdhci_free_host(host); |
| 218 | disable_clk: | 262 | disable_clk: |
| 219 | clk_disable(sdhci->clk); | 263 | clk_disable_unprepare(sdhci->clk); |
| 220 | put_clk: | 264 | put_clk: |
| 221 | clk_put(sdhci->clk); | 265 | clk_put(sdhci->clk); |
| 222 | err: | 266 | err: |
| @@ -238,7 +282,7 @@ static int __devexit sdhci_remove(struct platform_device *pdev) | |||
| 238 | 282 | ||
| 239 | sdhci_remove_host(host, dead); | 283 | sdhci_remove_host(host, dead); |
| 240 | sdhci_free_host(host); | 284 | sdhci_free_host(host); |
| 241 | clk_disable(sdhci->clk); | 285 | clk_disable_unprepare(sdhci->clk); |
| 242 | clk_put(sdhci->clk); | 286 | clk_put(sdhci->clk); |
| 243 | 287 | ||
| 244 | return 0; | 288 | return 0; |
| @@ -253,7 +297,7 @@ static int sdhci_suspend(struct device *dev) | |||
| 253 | 297 | ||
| 254 | ret = sdhci_suspend_host(host); | 298 | ret = sdhci_suspend_host(host); |
| 255 | if (!ret) | 299 | if (!ret) |
| 256 | clk_disable(sdhci->clk); | 300 | clk_disable_unprepare(sdhci->clk); |
| 257 | 301 | ||
| 258 | return ret; | 302 | return ret; |
| 259 | } | 303 | } |
| @@ -264,7 +308,7 @@ static int sdhci_resume(struct device *dev) | |||
| 264 | struct spear_sdhci *sdhci = dev_get_platdata(dev); | 308 | struct spear_sdhci *sdhci = dev_get_platdata(dev); |
| 265 | int ret; | 309 | int ret; |
| 266 | 310 | ||
| 267 | ret = clk_enable(sdhci->clk); | 311 | ret = clk_prepare_enable(sdhci->clk); |
| 268 | if (ret) { | 312 | if (ret) { |
| 269 | dev_dbg(dev, "Resume: Error enabling clock\n"); | 313 | dev_dbg(dev, "Resume: Error enabling clock\n"); |
| 270 | return ret; | 314 | return ret; |
| @@ -276,11 +320,20 @@ static int sdhci_resume(struct device *dev) | |||
| 276 | 320 | ||
| 277 | static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); | 321 | static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); |
| 278 | 322 | ||
| 323 | #ifdef CONFIG_OF | ||
| 324 | static const struct of_device_id sdhci_spear_id_table[] = { | ||
| 325 | { .compatible = "st,spear300-sdhci" }, | ||
| 326 | {} | ||
| 327 | }; | ||
| 328 | MODULE_DEVICE_TABLE(of, sdhci_spear_id_table); | ||
| 329 | #endif | ||
| 330 | |||
| 279 | static struct platform_driver sdhci_driver = { | 331 | static struct platform_driver sdhci_driver = { |
| 280 | .driver = { | 332 | .driver = { |
| 281 | .name = "sdhci", | 333 | .name = "sdhci", |
| 282 | .owner = THIS_MODULE, | 334 | .owner = THIS_MODULE, |
| 283 | .pm = &sdhci_pm_ops, | 335 | .pm = &sdhci_pm_ops, |
| 336 | .of_match_table = of_match_ptr(sdhci_spear_id_table), | ||
| 284 | }, | 337 | }, |
| 285 | .probe = sdhci_probe, | 338 | .probe = sdhci_probe, |
| 286 | .remove = __devexit_p(sdhci_remove), | 339 | .remove = __devexit_p(sdhci_remove), |
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index d43e7462941f..84e8d0c59ee5 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c | |||
| @@ -257,10 +257,9 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev) | |||
| 257 | int rc; | 257 | int rc; |
| 258 | 258 | ||
| 259 | match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); | 259 | match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); |
| 260 | if (match) | 260 | if (!match) |
| 261 | soc_data = match->data; | 261 | return -EINVAL; |
| 262 | else | 262 | soc_data = match->data; |
| 263 | soc_data = &soc_data_tegra20; | ||
| 264 | 263 | ||
| 265 | host = sdhci_pltfm_init(pdev, soc_data->pdata); | 264 | host = sdhci_pltfm_init(pdev, soc_data->pdata); |
| 266 | if (IS_ERR(host)) | 265 | if (IS_ERR(host)) |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 9a11dc39921c..7922adb42386 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/mmc/mmc.h> | 28 | #include <linux/mmc/mmc.h> |
| 29 | #include <linux/mmc/host.h> | 29 | #include <linux/mmc/host.h> |
| 30 | #include <linux/mmc/card.h> | 30 | #include <linux/mmc/card.h> |
| 31 | #include <linux/mmc/slot-gpio.h> | ||
| 31 | 32 | ||
| 32 | #include "sdhci.h" | 33 | #include "sdhci.h" |
| 33 | 34 | ||
| @@ -1293,6 +1294,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
| 1293 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | 1294 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & |
| 1294 | SDHCI_CARD_PRESENT; | 1295 | SDHCI_CARD_PRESENT; |
| 1295 | 1296 | ||
| 1297 | /* If we're using a cd-gpio, testing the presence bit might fail. */ | ||
| 1298 | if (!present) { | ||
| 1299 | int ret = mmc_gpio_get_cd(host->mmc); | ||
| 1300 | if (ret > 0) | ||
| 1301 | present = true; | ||
| 1302 | } | ||
| 1303 | |||
| 1296 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { | 1304 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { |
| 1297 | host->mrq->cmd->error = -ENOMEDIUM; | 1305 | host->mrq->cmd->error = -ENOMEDIUM; |
| 1298 | tasklet_schedule(&host->finish_tasklet); | 1306 | tasklet_schedule(&host->finish_tasklet); |
| @@ -1597,57 +1605,65 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
| 1597 | spin_unlock_irqrestore(&host->lock, flags); | 1605 | spin_unlock_irqrestore(&host->lock, flags); |
| 1598 | } | 1606 | } |
| 1599 | 1607 | ||
| 1600 | static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | 1608 | static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host, |
| 1601 | struct mmc_ios *ios) | 1609 | u16 ctrl) |
| 1602 | { | 1610 | { |
| 1603 | u8 pwr; | 1611 | int ret; |
| 1604 | u16 clk, ctrl; | ||
| 1605 | u32 present_state; | ||
| 1606 | 1612 | ||
| 1607 | /* | 1613 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ |
| 1608 | * Signal Voltage Switching is only applicable for Host Controllers | 1614 | ctrl &= ~SDHCI_CTRL_VDD_180; |
| 1609 | * v3.00 and above. | 1615 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
| 1610 | */ | ||
| 1611 | if (host->version < SDHCI_SPEC_300) | ||
| 1612 | return 0; | ||
| 1613 | 1616 | ||
| 1614 | /* | 1617 | if (host->vqmmc) { |
| 1615 | * We first check whether the request is to set signalling voltage | 1618 | ret = regulator_set_voltage(host->vqmmc, 3300000, 3300000); |
| 1616 | * to 3.3V. If so, we change the voltage to 3.3V and return quickly. | 1619 | if (ret) { |
| 1617 | */ | 1620 | pr_warning("%s: Switching to 3.3V signalling voltage " |
| 1621 | " failed\n", mmc_hostname(host->mmc)); | ||
| 1622 | return -EIO; | ||
| 1623 | } | ||
| 1624 | } | ||
| 1625 | /* Wait for 5ms */ | ||
| 1626 | usleep_range(5000, 5500); | ||
| 1627 | |||
| 1628 | /* 3.3V regulator output should be stable within 5 ms */ | ||
| 1618 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1629 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
| 1619 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { | 1630 | if (!(ctrl & SDHCI_CTRL_VDD_180)) |
| 1620 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ | 1631 | return 0; |
| 1621 | ctrl &= ~SDHCI_CTRL_VDD_180; | ||
| 1622 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | ||
| 1623 | 1632 | ||
| 1624 | /* Wait for 5ms */ | 1633 | pr_warning("%s: 3.3V regulator output did not became stable\n", |
| 1625 | usleep_range(5000, 5500); | 1634 | mmc_hostname(host->mmc)); |
| 1626 | 1635 | ||
| 1627 | /* 3.3V regulator output should be stable within 5 ms */ | 1636 | return -EIO; |
| 1628 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1637 | } |
| 1629 | if (!(ctrl & SDHCI_CTRL_VDD_180)) | ||
| 1630 | return 0; | ||
| 1631 | else { | ||
| 1632 | pr_info(DRIVER_NAME ": Switching to 3.3V " | ||
| 1633 | "signalling voltage failed\n"); | ||
| 1634 | return -EIO; | ||
| 1635 | } | ||
| 1636 | } else if (!(ctrl & SDHCI_CTRL_VDD_180) && | ||
| 1637 | (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { | ||
| 1638 | /* Stop SDCLK */ | ||
| 1639 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
| 1640 | clk &= ~SDHCI_CLOCK_CARD_EN; | ||
| 1641 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
| 1642 | 1638 | ||
| 1643 | /* Check whether DAT[3:0] is 0000 */ | 1639 | static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host, |
| 1644 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | 1640 | u16 ctrl) |
| 1645 | if (!((present_state & SDHCI_DATA_LVL_MASK) >> | 1641 | { |
| 1646 | SDHCI_DATA_LVL_SHIFT)) { | 1642 | u8 pwr; |
| 1647 | /* | 1643 | u16 clk; |
| 1648 | * Enable 1.8V Signal Enable in the Host Control2 | 1644 | u32 present_state; |
| 1649 | * register | 1645 | int ret; |
| 1650 | */ | 1646 | |
| 1647 | /* Stop SDCLK */ | ||
| 1648 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
| 1649 | clk &= ~SDHCI_CLOCK_CARD_EN; | ||
| 1650 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
| 1651 | |||
| 1652 | /* Check whether DAT[3:0] is 0000 */ | ||
| 1653 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | ||
| 1654 | if (!((present_state & SDHCI_DATA_LVL_MASK) >> | ||
| 1655 | SDHCI_DATA_LVL_SHIFT)) { | ||
| 1656 | /* | ||
| 1657 | * Enable 1.8V Signal Enable in the Host Control2 | ||
| 1658 | * register | ||
| 1659 | */ | ||
| 1660 | if (host->vqmmc) | ||
| 1661 | ret = regulator_set_voltage(host->vqmmc, | ||
| 1662 | 1800000, 1800000); | ||
| 1663 | else | ||
| 1664 | ret = 0; | ||
| 1665 | |||
| 1666 | if (!ret) { | ||
| 1651 | ctrl |= SDHCI_CTRL_VDD_180; | 1667 | ctrl |= SDHCI_CTRL_VDD_180; |
| 1652 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1668 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
| 1653 | 1669 | ||
| @@ -1656,7 +1672,7 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | |||
| 1656 | 1672 | ||
| 1657 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1673 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
| 1658 | if (ctrl & SDHCI_CTRL_VDD_180) { | 1674 | if (ctrl & SDHCI_CTRL_VDD_180) { |
| 1659 | /* Provide SDCLK again and wait for 1ms*/ | 1675 | /* Provide SDCLK again and wait for 1ms */ |
| 1660 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | 1676 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); |
| 1661 | clk |= SDHCI_CLOCK_CARD_EN; | 1677 | clk |= SDHCI_CLOCK_CARD_EN; |
| 1662 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 1678 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); |
| @@ -1673,29 +1689,55 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | |||
| 1673 | return 0; | 1689 | return 0; |
| 1674 | } | 1690 | } |
| 1675 | } | 1691 | } |
| 1692 | } | ||
| 1676 | 1693 | ||
| 1677 | /* | 1694 | /* |
| 1678 | * If we are here, that means the switch to 1.8V signaling | 1695 | * If we are here, that means the switch to 1.8V signaling |
| 1679 | * failed. We power cycle the card, and retry initialization | 1696 | * failed. We power cycle the card, and retry initialization |
| 1680 | * sequence by setting S18R to 0. | 1697 | * sequence by setting S18R to 0. |
| 1681 | */ | 1698 | */ |
| 1682 | pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); | 1699 | pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); |
| 1683 | pwr &= ~SDHCI_POWER_ON; | 1700 | pwr &= ~SDHCI_POWER_ON; |
| 1684 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | 1701 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); |
| 1685 | if (host->vmmc) | 1702 | if (host->vmmc) |
| 1686 | regulator_disable(host->vmmc); | 1703 | regulator_disable(host->vmmc); |
| 1687 | 1704 | ||
| 1688 | /* Wait for 1ms as per the spec */ | 1705 | /* Wait for 1ms as per the spec */ |
| 1689 | usleep_range(1000, 1500); | 1706 | usleep_range(1000, 1500); |
| 1690 | pwr |= SDHCI_POWER_ON; | 1707 | pwr |= SDHCI_POWER_ON; |
| 1691 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | 1708 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); |
| 1692 | if (host->vmmc) | 1709 | if (host->vmmc) |
| 1693 | regulator_enable(host->vmmc); | 1710 | regulator_enable(host->vmmc); |
| 1694 | 1711 | ||
| 1695 | pr_info(DRIVER_NAME ": Switching to 1.8V signalling " | 1712 | pr_warning("%s: Switching to 1.8V signalling voltage failed, " |
| 1696 | "voltage failed, retrying with S18R set to 0\n"); | 1713 | "retrying with S18R set to 0\n", mmc_hostname(host->mmc)); |
| 1697 | return -EAGAIN; | 1714 | |
| 1698 | } else | 1715 | return -EAGAIN; |
| 1716 | } | ||
| 1717 | |||
| 1718 | static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | ||
| 1719 | struct mmc_ios *ios) | ||
| 1720 | { | ||
| 1721 | u16 ctrl; | ||
| 1722 | |||
| 1723 | /* | ||
| 1724 | * Signal Voltage Switching is only applicable for Host Controllers | ||
| 1725 | * v3.00 and above. | ||
| 1726 | */ | ||
| 1727 | if (host->version < SDHCI_SPEC_300) | ||
| 1728 | return 0; | ||
| 1729 | |||
| 1730 | /* | ||
| 1731 | * We first check whether the request is to set signalling voltage | ||
| 1732 | * to 3.3V. If so, we change the voltage to 3.3V and return quickly. | ||
| 1733 | */ | ||
| 1734 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
| 1735 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) | ||
| 1736 | return sdhci_do_3_3v_signal_voltage_switch(host, ctrl); | ||
| 1737 | else if (!(ctrl & SDHCI_CTRL_VDD_180) && | ||
| 1738 | (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) | ||
| 1739 | return sdhci_do_1_8v_signal_voltage_switch(host, ctrl); | ||
| 1740 | else | ||
| 1699 | /* No signal voltage switch required */ | 1741 | /* No signal voltage switch required */ |
| 1700 | return 0; | 1742 | return 0; |
| 1701 | } | 1743 | } |
| @@ -2802,6 +2844,18 @@ int sdhci_add_host(struct sdhci_host *host) | |||
| 2802 | !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) | 2844 | !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) |
| 2803 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 2845 | mmc->caps |= MMC_CAP_NEEDS_POLL; |
| 2804 | 2846 | ||
| 2847 | /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ | ||
| 2848 | host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc"); | ||
| 2849 | if (IS_ERR(host->vqmmc)) { | ||
| 2850 | pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc)); | ||
| 2851 | host->vqmmc = NULL; | ||
| 2852 | } | ||
| 2853 | else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000)) | ||
| 2854 | regulator_enable(host->vqmmc); | ||
| 2855 | else | ||
| 2856 | caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | | ||
| 2857 | SDHCI_SUPPORT_DDR50); | ||
| 2858 | |||
| 2805 | /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ | 2859 | /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ |
| 2806 | if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | | 2860 | if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | |
| 2807 | SDHCI_SUPPORT_DDR50)) | 2861 | SDHCI_SUPPORT_DDR50)) |
| @@ -2832,15 +2886,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
| 2832 | if (caps[1] & SDHCI_DRIVER_TYPE_D) | 2886 | if (caps[1] & SDHCI_DRIVER_TYPE_D) |
| 2833 | mmc->caps |= MMC_CAP_DRIVER_TYPE_D; | 2887 | mmc->caps |= MMC_CAP_DRIVER_TYPE_D; |
| 2834 | 2888 | ||
| 2835 | /* | ||
| 2836 | * If Power Off Notify capability is enabled by the host, | ||
| 2837 | * set notify to short power off notify timeout value. | ||
| 2838 | */ | ||
| 2839 | if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) | ||
| 2840 | mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; | ||
| 2841 | else | ||
| 2842 | mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; | ||
| 2843 | |||
| 2844 | /* Initial value for re-tuning timer count */ | 2889 | /* Initial value for re-tuning timer count */ |
| 2845 | host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> | 2890 | host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> |
| 2846 | SDHCI_RETUNING_TIMER_COUNT_SHIFT; | 2891 | SDHCI_RETUNING_TIMER_COUNT_SHIFT; |
| @@ -2862,7 +2907,8 @@ int sdhci_add_host(struct sdhci_host *host) | |||
| 2862 | if (IS_ERR(host->vmmc)) { | 2907 | if (IS_ERR(host->vmmc)) { |
| 2863 | pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); | 2908 | pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); |
| 2864 | host->vmmc = NULL; | 2909 | host->vmmc = NULL; |
| 2865 | } | 2910 | } else |
| 2911 | regulator_enable(host->vmmc); | ||
| 2866 | 2912 | ||
| 2867 | #ifdef CONFIG_REGULATOR | 2913 | #ifdef CONFIG_REGULATOR |
| 2868 | if (host->vmmc) { | 2914 | if (host->vmmc) { |
| @@ -3119,8 +3165,15 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
| 3119 | tasklet_kill(&host->card_tasklet); | 3165 | tasklet_kill(&host->card_tasklet); |
| 3120 | tasklet_kill(&host->finish_tasklet); | 3166 | tasklet_kill(&host->finish_tasklet); |
| 3121 | 3167 | ||
| 3122 | if (host->vmmc) | 3168 | if (host->vmmc) { |
| 3169 | regulator_disable(host->vmmc); | ||
| 3123 | regulator_put(host->vmmc); | 3170 | regulator_put(host->vmmc); |
| 3171 | } | ||
| 3172 | |||
| 3173 | if (host->vqmmc) { | ||
| 3174 | regulator_disable(host->vqmmc); | ||
| 3175 | regulator_put(host->vqmmc); | ||
| 3176 | } | ||
| 3124 | 3177 | ||
| 3125 | kfree(host->adma_desc); | 3178 | kfree(host->adma_desc); |
| 3126 | kfree(host->align_buffer); | 3179 | kfree(host->align_buffer); |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 5d8142773fac..11d2bc3b51d5 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
| @@ -1213,7 +1213,9 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
| 1213 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); | 1213 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); |
| 1214 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | 1214 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); |
| 1215 | } else if (state & INT_DTRANE) { | 1215 | } else if (state & INT_DTRANE) { |
| 1216 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); | 1216 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
| 1217 | ~(INT_CMD12DRE | INT_CMD12RBE | | ||
| 1218 | INT_CMD12CRE | INT_DTRANE)); | ||
| 1217 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | 1219 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); |
| 1218 | } else if (state & INT_CMD12RBE) { | 1220 | } else if (state & INT_CMD12RBE) { |
| 1219 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 1221 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
| @@ -1229,6 +1231,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
| 1229 | host->sd_error = true; | 1231 | host->sd_error = true; |
| 1230 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); | 1232 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); |
| 1231 | } | 1233 | } |
| 1234 | if (host->state == STATE_IDLE) { | ||
| 1235 | dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state); | ||
| 1236 | return IRQ_HANDLED; | ||
| 1237 | } | ||
| 1232 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { | 1238 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { |
| 1233 | if (!host->dma_active) | 1239 | if (!host->dma_active) |
| 1234 | return IRQ_WAKE_THREAD; | 1240 | return IRQ_WAKE_THREAD; |
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 4b83c43f950d..f18becef156d 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
| @@ -1337,21 +1337,7 @@ static struct pci_driver via_sd_driver = { | |||
| 1337 | .resume = via_sd_resume, | 1337 | .resume = via_sd_resume, |
| 1338 | }; | 1338 | }; |
| 1339 | 1339 | ||
| 1340 | static int __init via_sd_drv_init(void) | 1340 | module_pci_driver(via_sd_driver); |
| 1341 | { | ||
| 1342 | pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver " | ||
| 1343 | "(C) 2008 VIA Technologies, Inc.\n"); | ||
| 1344 | |||
| 1345 | return pci_register_driver(&via_sd_driver); | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | static void __exit via_sd_drv_exit(void) | ||
| 1349 | { | ||
| 1350 | pci_unregister_driver(&via_sd_driver); | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | module_init(via_sd_drv_init); | ||
| 1354 | module_exit(via_sd_drv_exit); | ||
| 1355 | 1341 | ||
| 1356 | MODULE_LICENSE("GPL"); | 1342 | MODULE_LICENSE("GPL"); |
| 1357 | MODULE_AUTHOR("VIA Technologies Inc."); | 1343 | MODULE_AUTHOR("VIA Technologies Inc."); |
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 58eab9ac1d01..d5655a63eda4 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c | |||
| @@ -2358,9 +2358,9 @@ error5: | |||
| 2358 | * which is contained at the end of struct mmc | 2358 | * which is contained at the end of struct mmc |
| 2359 | */ | 2359 | */ |
| 2360 | error4: | 2360 | error4: |
| 2361 | usb_free_urb(command_out_urb); | ||
| 2362 | error1: | ||
| 2363 | usb_free_urb(command_res_urb); | 2361 | usb_free_urb(command_res_urb); |
| 2362 | error1: | ||
| 2363 | usb_free_urb(command_out_urb); | ||
| 2364 | error0: | 2364 | error0: |
| 2365 | return retval; | 2365 | return retval; |
| 2366 | } | 2366 | } |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 27143e042af5..73fcbbeb78d0 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
| @@ -148,6 +148,13 @@ config MTD_BCM63XX_PARTS | |||
| 148 | This provides partions parsing for BCM63xx devices with CFE | 148 | This provides partions parsing for BCM63xx devices with CFE |
| 149 | bootloaders. | 149 | bootloaders. |
| 150 | 150 | ||
| 151 | config MTD_BCM47XX_PARTS | ||
| 152 | tristate "BCM47XX partitioning support" | ||
| 153 | depends on BCM47XX | ||
| 154 | help | ||
| 155 | This provides partitions parser for devices based on BCM47xx | ||
| 156 | boards. | ||
| 157 | |||
| 151 | comment "User Modules And Translation Layers" | 158 | comment "User Modules And Translation Layers" |
| 152 | 159 | ||
| 153 | config MTD_CHAR | 160 | config MTD_CHAR |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index f90135429dc7..18a38e55b2f0 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
| @@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | |||
| 12 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 12 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
| 13 | obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o | 13 | obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o |
| 14 | obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o | 14 | obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o |
| 15 | obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o | ||
| 15 | 16 | ||
| 16 | # 'Users' - code which presents functionality to userspace. | 17 | # 'Users' - code which presents functionality to userspace. |
| 17 | obj-$(CONFIG_MTD_CHAR) += mtdchar.o | 18 | obj-$(CONFIG_MTD_CHAR) += mtdchar.o |
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c new file mode 100644 index 000000000000..e06d782489a6 --- /dev/null +++ b/drivers/mtd/bcm47xxpart.c | |||
| @@ -0,0 +1,202 @@ | |||
| 1 | /* | ||
| 2 | * BCM47XX MTD partitioning | ||
| 3 | * | ||
| 4 | * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/slab.h> | ||
| 15 | #include <linux/mtd/mtd.h> | ||
| 16 | #include <linux/mtd/partitions.h> | ||
| 17 | #include <asm/mach-bcm47xx/nvram.h> | ||
| 18 | |||
| 19 | /* 10 parts were found on sflash on Netgear WNDR4500 */ | ||
| 20 | #define BCM47XXPART_MAX_PARTS 12 | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Amount of bytes we read when analyzing each block of flash memory. | ||
| 24 | * Set it big enough to allow detecting partition and reading important data. | ||
| 25 | */ | ||
| 26 | #define BCM47XXPART_BYTES_TO_READ 0x404 | ||
| 27 | |||
| 28 | /* Magics */ | ||
| 29 | #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ | ||
| 30 | #define POT_MAGIC1 0x54544f50 /* POTT */ | ||
| 31 | #define POT_MAGIC2 0x504f /* OP */ | ||
| 32 | #define ML_MAGIC1 0x39685a42 | ||
| 33 | #define ML_MAGIC2 0x26594131 | ||
| 34 | #define TRX_MAGIC 0x30524448 | ||
| 35 | |||
| 36 | struct trx_header { | ||
| 37 | uint32_t magic; | ||
| 38 | uint32_t length; | ||
| 39 | uint32_t crc32; | ||
| 40 | uint16_t flags; | ||
| 41 | uint16_t version; | ||
| 42 | uint32_t offset[3]; | ||
| 43 | } __packed; | ||
| 44 | |||
| 45 | static void bcm47xxpart_add_part(struct mtd_partition *part, char *name, | ||
| 46 | u64 offset, uint32_t mask_flags) | ||
| 47 | { | ||
| 48 | part->name = name; | ||
| 49 | part->offset = offset; | ||
| 50 | part->mask_flags = mask_flags; | ||
| 51 | } | ||
| 52 | |||
| 53 | static int bcm47xxpart_parse(struct mtd_info *master, | ||
| 54 | struct mtd_partition **pparts, | ||
| 55 | struct mtd_part_parser_data *data) | ||
| 56 | { | ||
| 57 | struct mtd_partition *parts; | ||
| 58 | uint8_t i, curr_part = 0; | ||
| 59 | uint32_t *buf; | ||
| 60 | size_t bytes_read; | ||
| 61 | uint32_t offset; | ||
| 62 | uint32_t blocksize = 0x10000; | ||
| 63 | struct trx_header *trx; | ||
| 64 | |||
| 65 | /* Alloc */ | ||
| 66 | parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, | ||
| 67 | GFP_KERNEL); | ||
| 68 | buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); | ||
| 69 | |||
| 70 | /* Parse block by block looking for magics */ | ||
| 71 | for (offset = 0; offset <= master->size - blocksize; | ||
| 72 | offset += blocksize) { | ||
| 73 | /* Nothing more in higher memory */ | ||
| 74 | if (offset >= 0x2000000) | ||
| 75 | break; | ||
| 76 | |||
| 77 | if (curr_part > BCM47XXPART_MAX_PARTS) { | ||
| 78 | pr_warn("Reached maximum number of partitions, scanning stopped!\n"); | ||
| 79 | break; | ||
| 80 | } | ||
| 81 | |||
| 82 | /* Read beginning of the block */ | ||
| 83 | if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, | ||
| 84 | &bytes_read, (uint8_t *)buf) < 0) { | ||
| 85 | pr_err("mtd_read error while parsing (offset: 0x%X)!\n", | ||
| 86 | offset); | ||
| 87 | continue; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* CFE has small NVRAM at 0x400 */ | ||
| 91 | if (buf[0x400 / 4] == NVRAM_HEADER) { | ||
| 92 | bcm47xxpart_add_part(&parts[curr_part++], "boot", | ||
| 93 | offset, MTD_WRITEABLE); | ||
| 94 | continue; | ||
| 95 | } | ||
| 96 | |||
| 97 | /* Standard NVRAM */ | ||
| 98 | if (buf[0x000 / 4] == NVRAM_HEADER) { | ||
| 99 | bcm47xxpart_add_part(&parts[curr_part++], "nvram", | ||
| 100 | offset, 0); | ||
| 101 | continue; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * board_data starts with board_id which differs across boards, | ||
| 106 | * but we can use 'MPFR' (hopefully) magic at 0x100 | ||
| 107 | */ | ||
| 108 | if (buf[0x100 / 4] == BOARD_DATA_MAGIC) { | ||
| 109 | bcm47xxpart_add_part(&parts[curr_part++], "board_data", | ||
| 110 | offset, MTD_WRITEABLE); | ||
| 111 | continue; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* POT(TOP) */ | ||
| 115 | if (buf[0x000 / 4] == POT_MAGIC1 && | ||
| 116 | (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) { | ||
| 117 | bcm47xxpart_add_part(&parts[curr_part++], "POT", offset, | ||
| 118 | MTD_WRITEABLE); | ||
| 119 | continue; | ||
| 120 | } | ||
| 121 | |||
| 122 | /* ML */ | ||
| 123 | if (buf[0x010 / 4] == ML_MAGIC1 && | ||
| 124 | buf[0x014 / 4] == ML_MAGIC2) { | ||
| 125 | bcm47xxpart_add_part(&parts[curr_part++], "ML", offset, | ||
| 126 | MTD_WRITEABLE); | ||
| 127 | continue; | ||
| 128 | } | ||
| 129 | |||
| 130 | /* TRX */ | ||
| 131 | if (buf[0x000 / 4] == TRX_MAGIC) { | ||
| 132 | trx = (struct trx_header *)buf; | ||
| 133 | |||
| 134 | i = 0; | ||
| 135 | /* We have LZMA loader if offset[2] points to sth */ | ||
| 136 | if (trx->offset[2]) { | ||
| 137 | bcm47xxpart_add_part(&parts[curr_part++], | ||
| 138 | "loader", | ||
| 139 | offset + trx->offset[i], | ||
| 140 | 0); | ||
| 141 | i++; | ||
| 142 | } | ||
| 143 | |||
| 144 | bcm47xxpart_add_part(&parts[curr_part++], "linux", | ||
| 145 | offset + trx->offset[i], 0); | ||
| 146 | i++; | ||
| 147 | |||
| 148 | /* | ||
| 149 | * Pure rootfs size is known and can be calculated as: | ||
| 150 | * trx->length - trx->offset[i]. We don't fill it as | ||
| 151 | * we want to have jffs2 (overlay) in the same mtd. | ||
| 152 | */ | ||
| 153 | bcm47xxpart_add_part(&parts[curr_part++], "rootfs", | ||
| 154 | offset + trx->offset[i], 0); | ||
| 155 | i++; | ||
| 156 | |||
| 157 | /* | ||
| 158 | * We have whole TRX scanned, skip to the next part. Use | ||
| 159 | * roundown (not roundup), as the loop will increase | ||
| 160 | * offset in next step. | ||
| 161 | */ | ||
| 162 | offset = rounddown(offset + trx->length, blocksize); | ||
| 163 | continue; | ||
| 164 | } | ||
| 165 | } | ||
| 166 | kfree(buf); | ||
| 167 | |||
| 168 | /* | ||
| 169 | * Assume that partitions end at the beginning of the one they are | ||
| 170 | * followed by. | ||
| 171 | */ | ||
| 172 | for (i = 0; i < curr_part - 1; i++) | ||
| 173 | parts[i].size = parts[i + 1].offset - parts[i].offset; | ||
| 174 | if (curr_part > 0) | ||
| 175 | parts[curr_part - 1].size = | ||
| 176 | master->size - parts[curr_part - 1].offset; | ||
| 177 | |||
| 178 | *pparts = parts; | ||
| 179 | return curr_part; | ||
| 180 | }; | ||
| 181 | |||
| 182 | static struct mtd_part_parser bcm47xxpart_mtd_parser = { | ||
| 183 | .owner = THIS_MODULE, | ||
| 184 | .parse_fn = bcm47xxpart_parse, | ||
| 185 | .name = "bcm47xxpart", | ||
| 186 | }; | ||
| 187 | |||
| 188 | static int __init bcm47xxpart_init(void) | ||
| 189 | { | ||
| 190 | return register_mtd_parser(&bcm47xxpart_mtd_parser); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void __exit bcm47xxpart_exit(void) | ||
| 194 | { | ||
| 195 | deregister_mtd_parser(&bcm47xxpart_mtd_parser); | ||
| 196 | } | ||
| 197 | |||
| 198 | module_init(bcm47xxpart_init); | ||
| 199 | module_exit(bcm47xxpart_exit); | ||
| 200 | |||
| 201 | MODULE_LICENSE("GPL"); | ||
| 202 | MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories"); | ||
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index b1e3c26edd6d..e469b01d40d2 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig | |||
| @@ -43,9 +43,6 @@ choice | |||
| 43 | prompt "Flash cmd/query data swapping" | 43 | prompt "Flash cmd/query data swapping" |
| 44 | depends on MTD_CFI_ADV_OPTIONS | 44 | depends on MTD_CFI_ADV_OPTIONS |
| 45 | default MTD_CFI_NOSWAP | 45 | default MTD_CFI_NOSWAP |
| 46 | |||
| 47 | config MTD_CFI_NOSWAP | ||
| 48 | bool "NO" | ||
| 49 | ---help--- | 46 | ---help--- |
| 50 | This option defines the way in which the CPU attempts to arrange | 47 | This option defines the way in which the CPU attempts to arrange |
| 51 | data bits when writing the 'magic' commands to the chips. Saying | 48 | data bits when writing the 'magic' commands to the chips. Saying |
| @@ -55,12 +52,8 @@ config MTD_CFI_NOSWAP | |||
| 55 | Specific arrangements are possible with the BIG_ENDIAN_BYTE and | 52 | Specific arrangements are possible with the BIG_ENDIAN_BYTE and |
| 56 | LITTLE_ENDIAN_BYTE, if the bytes are reversed. | 53 | LITTLE_ENDIAN_BYTE, if the bytes are reversed. |
| 57 | 54 | ||
| 58 | If you have a LART, on which the data (and address) lines were | 55 | config MTD_CFI_NOSWAP |
| 59 | connected in a fashion which ensured that the nets were as short | 56 | bool "NO" |
| 60 | as possible, resulting in a bit-shuffling which seems utterly | ||
| 61 | random to the untrained eye, you need the LART_ENDIAN_BYTE option. | ||
| 62 | |||
| 63 | Yes, there really exists something sicker than PDP-endian :) | ||
| 64 | 57 | ||
| 65 | config MTD_CFI_BE_BYTE_SWAP | 58 | config MTD_CFI_BE_BYTE_SWAP |
| 66 | bool "BIG_ENDIAN_BYTE" | 59 | bool "BIG_ENDIAN_BYTE" |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index dbbd2edfb812..77514430f1fe 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
| @@ -2043,7 +2043,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
| 2043 | { | 2043 | { |
| 2044 | struct cfi_private *cfi = map->fldrv_priv; | 2044 | struct cfi_private *cfi = map->fldrv_priv; |
| 2045 | struct cfi_pri_intelext *extp = cfi->cmdset_priv; | 2045 | struct cfi_pri_intelext *extp = cfi->cmdset_priv; |
| 2046 | int udelay; | 2046 | int mdelay; |
| 2047 | int ret; | 2047 | int ret; |
| 2048 | 2048 | ||
| 2049 | adr += chip->start; | 2049 | adr += chip->start; |
| @@ -2072,9 +2072,17 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
| 2072 | * If Instant Individual Block Locking supported then no need | 2072 | * If Instant Individual Block Locking supported then no need |
| 2073 | * to delay. | 2073 | * to delay. |
| 2074 | */ | 2074 | */ |
| 2075 | udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0; | 2075 | /* |
| 2076 | * Unlocking may take up to 1.4 seconds on some Intel flashes. So | ||
| 2077 | * lets use a max of 1.5 seconds (1500ms) as timeout. | ||
| 2078 | * | ||
| 2079 | * See "Clear Block Lock-Bits Time" on page 40 in | ||
| 2080 | * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual | ||
| 2081 | * from February 2003 | ||
| 2082 | */ | ||
| 2083 | mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0; | ||
| 2076 | 2084 | ||
| 2077 | ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100); | 2085 | ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000); |
| 2078 | if (ret) { | 2086 | if (ret) { |
| 2079 | map_write(map, CMD(0x70), adr); | 2087 | map_write(map, CMD(0x70), adr); |
| 2080 | chip->state = FL_STATUS; | 2088 | chip->state = FL_STATUS; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 22d0493a026f..5ff5c4a16943 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
| @@ -431,6 +431,68 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi, | |||
| 431 | } | 431 | } |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static int is_m29ew(struct cfi_private *cfi) | ||
| 435 | { | ||
| 436 | if (cfi->mfr == CFI_MFR_INTEL && | ||
| 437 | ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || | ||
| 438 | (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) | ||
| 439 | return 1; | ||
| 440 | return 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | /* | ||
| 444 | * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: | ||
| 445 | * Some revisions of the M29EW suffer from erase suspend hang ups. In | ||
| 446 | * particular, it can occur when the sequence | ||
| 447 | * Erase Confirm -> Suspend -> Program -> Resume | ||
| 448 | * causes a lockup due to internal timing issues. The consequence is that the | ||
| 449 | * erase cannot be resumed without inserting a dummy command after programming | ||
| 450 | * and prior to resuming. [...] The work-around is to issue a dummy write cycle | ||
| 451 | * that writes an F0 command code before the RESUME command. | ||
| 452 | */ | ||
| 453 | static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, | ||
| 454 | unsigned long adr) | ||
| 455 | { | ||
| 456 | struct cfi_private *cfi = map->fldrv_priv; | ||
| 457 | /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ | ||
| 458 | if (is_m29ew(cfi)) | ||
| 459 | map_write(map, CMD(0xF0), adr); | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | ||
| 463 | * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: | ||
| 464 | * | ||
| 465 | * Some revisions of the M29EW (for example, A1 and A2 step revisions) | ||
| 466 | * are affected by a problem that could cause a hang up when an ERASE SUSPEND | ||
| 467 | * command is issued after an ERASE RESUME operation without waiting for a | ||
| 468 | * minimum delay. The result is that once the ERASE seems to be completed | ||
| 469 | * (no bits are toggling), the contents of the Flash memory block on which | ||
| 470 | * the erase was ongoing could be inconsistent with the expected values | ||
| 471 | * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 | ||
| 472 | * values), causing a consequent failure of the ERASE operation. | ||
| 473 | * The occurrence of this issue could be high, especially when file system | ||
| 474 | * operations on the Flash are intensive. As a result, it is recommended | ||
| 475 | * that a patch be applied. Intensive file system operations can cause many | ||
| 476 | * calls to the garbage routine to free Flash space (also by erasing physical | ||
| 477 | * Flash blocks) and as a result, many consecutive SUSPEND and RESUME | ||
| 478 | * commands can occur. The problem disappears when a delay is inserted after | ||
| 479 | * the RESUME command by using the udelay() function available in Linux. | ||
| 480 | * The DELAY value must be tuned based on the customer's platform. | ||
| 481 | * The maximum value that fixes the problem in all cases is 500us. | ||
| 482 | * But, in our experience, a delay of 30 µs to 50 µs is sufficient | ||
| 483 | * in most cases. | ||
| 484 | * We have chosen 500µs because this latency is acceptable. | ||
| 485 | */ | ||
| 486 | static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) | ||
| 487 | { | ||
| 488 | /* | ||
| 489 | * Resolving the Delay After Resume Issue see Micron TN-13-07 | ||
| 490 | * Worst case delay must be 500µs but 30-50µs should be ok as well | ||
| 491 | */ | ||
| 492 | if (is_m29ew(cfi)) | ||
| 493 | cfi_udelay(500); | ||
| 494 | } | ||
| 495 | |||
| 434 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | 496 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) |
| 435 | { | 497 | { |
| 436 | struct cfi_private *cfi = map->fldrv_priv; | 498 | struct cfi_private *cfi = map->fldrv_priv; |
| @@ -776,7 +838,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad | |||
| 776 | 838 | ||
| 777 | switch(chip->oldstate) { | 839 | switch(chip->oldstate) { |
| 778 | case FL_ERASING: | 840 | case FL_ERASING: |
| 841 | cfi_fixup_m29ew_erase_suspend(map, | ||
| 842 | chip->in_progress_block_addr); | ||
| 779 | map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); | 843 | map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); |
| 844 | cfi_fixup_m29ew_delay_after_resume(cfi); | ||
| 780 | chip->oldstate = FL_READY; | 845 | chip->oldstate = FL_READY; |
| 781 | chip->state = FL_ERASING; | 846 | chip->state = FL_ERASING; |
| 782 | break; | 847 | break; |
| @@ -916,6 +981,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
| 916 | /* Disallow XIP again */ | 981 | /* Disallow XIP again */ |
| 917 | local_irq_disable(); | 982 | local_irq_disable(); |
| 918 | 983 | ||
| 984 | /* Correct Erase Suspend Hangups for M29EW */ | ||
| 985 | cfi_fixup_m29ew_erase_suspend(map, adr); | ||
| 919 | /* Resume the write or erase operation */ | 986 | /* Resume the write or erase operation */ |
| 920 | map_write(map, cfi->sector_erase_cmd, adr); | 987 | map_write(map, cfi->sector_erase_cmd, adr); |
| 921 | chip->state = oldstate; | 988 | chip->state = oldstate; |
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 4558e0f4d07f..aed1b8a63c9f 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c | |||
| @@ -39,11 +39,10 @@ | |||
| 39 | 39 | ||
| 40 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
| 41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
| 42 | |||
| 43 | #include <linux/mtd/mtd.h> | 42 | #include <linux/mtd/mtd.h> |
| 44 | #include <linux/mtd/partitions.h> | 43 | #include <linux/mtd/partitions.h> |
| 45 | #include <linux/bootmem.h> | ||
| 46 | #include <linux/module.h> | 44 | #include <linux/module.h> |
| 45 | #include <linux/err.h> | ||
| 47 | 46 | ||
| 48 | /* error message prefix */ | 47 | /* error message prefix */ |
| 49 | #define ERRP "mtd: " | 48 | #define ERRP "mtd: " |
| @@ -72,7 +71,7 @@ static struct cmdline_mtd_partition *partitions; | |||
| 72 | 71 | ||
| 73 | /* the command line passed to mtdpart_setup() */ | 72 | /* the command line passed to mtdpart_setup() */ |
| 74 | static char *cmdline; | 73 | static char *cmdline; |
| 75 | static int cmdline_parsed = 0; | 74 | static int cmdline_parsed; |
| 76 | 75 | ||
| 77 | /* | 76 | /* |
| 78 | * Parse one partition definition for an MTD. Since there can be many | 77 | * Parse one partition definition for an MTD. Since there can be many |
| @@ -83,15 +82,14 @@ static int cmdline_parsed = 0; | |||
| 83 | * syntax has been verified ok. | 82 | * syntax has been verified ok. |
| 84 | */ | 83 | */ |
| 85 | static struct mtd_partition * newpart(char *s, | 84 | static struct mtd_partition * newpart(char *s, |
| 86 | char **retptr, | 85 | char **retptr, |
| 87 | int *num_parts, | 86 | int *num_parts, |
| 88 | int this_part, | 87 | int this_part, |
| 89 | unsigned char **extra_mem_ptr, | 88 | unsigned char **extra_mem_ptr, |
| 90 | int extra_mem_size) | 89 | int extra_mem_size) |
| 91 | { | 90 | { |
| 92 | struct mtd_partition *parts; | 91 | struct mtd_partition *parts; |
| 93 | unsigned long size; | 92 | unsigned long size, offset = OFFSET_CONTINUOUS; |
| 94 | unsigned long offset = OFFSET_CONTINUOUS; | ||
| 95 | char *name; | 93 | char *name; |
| 96 | int name_len; | 94 | int name_len; |
| 97 | unsigned char *extra_mem; | 95 | unsigned char *extra_mem; |
| @@ -99,124 +97,106 @@ static struct mtd_partition * newpart(char *s, | |||
| 99 | unsigned int mask_flags; | 97 | unsigned int mask_flags; |
| 100 | 98 | ||
| 101 | /* fetch the partition size */ | 99 | /* fetch the partition size */ |
| 102 | if (*s == '-') | 100 | if (*s == '-') { |
| 103 | { /* assign all remaining space to this partition */ | 101 | /* assign all remaining space to this partition */ |
| 104 | size = SIZE_REMAINING; | 102 | size = SIZE_REMAINING; |
| 105 | s++; | 103 | s++; |
| 106 | } | 104 | } else { |
| 107 | else | ||
| 108 | { | ||
| 109 | size = memparse(s, &s); | 105 | size = memparse(s, &s); |
| 110 | if (size < PAGE_SIZE) | 106 | if (size < PAGE_SIZE) { |
| 111 | { | ||
| 112 | printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); | 107 | printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); |
| 113 | return NULL; | 108 | return ERR_PTR(-EINVAL); |
| 114 | } | 109 | } |
| 115 | } | 110 | } |
| 116 | 111 | ||
| 117 | /* fetch partition name and flags */ | 112 | /* fetch partition name and flags */ |
| 118 | mask_flags = 0; /* this is going to be a regular partition */ | 113 | mask_flags = 0; /* this is going to be a regular partition */ |
| 119 | delim = 0; | 114 | delim = 0; |
| 120 | /* check for offset */ | 115 | |
| 121 | if (*s == '@') | 116 | /* check for offset */ |
| 122 | { | 117 | if (*s == '@') { |
| 123 | s++; | 118 | s++; |
| 124 | offset = memparse(s, &s); | 119 | offset = memparse(s, &s); |
| 125 | } | 120 | } |
| 126 | /* now look for name */ | 121 | |
| 122 | /* now look for name */ | ||
| 127 | if (*s == '(') | 123 | if (*s == '(') |
| 128 | { | ||
| 129 | delim = ')'; | 124 | delim = ')'; |
| 130 | } | ||
| 131 | 125 | ||
| 132 | if (delim) | 126 | if (delim) { |
| 133 | { | ||
| 134 | char *p; | 127 | char *p; |
| 135 | 128 | ||
| 136 | name = ++s; | 129 | name = ++s; |
| 137 | p = strchr(name, delim); | 130 | p = strchr(name, delim); |
| 138 | if (!p) | 131 | if (!p) { |
| 139 | { | ||
| 140 | printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); | 132 | printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); |
| 141 | return NULL; | 133 | return ERR_PTR(-EINVAL); |
| 142 | } | 134 | } |
| 143 | name_len = p - name; | 135 | name_len = p - name; |
| 144 | s = p + 1; | 136 | s = p + 1; |
| 145 | } | 137 | } else { |
| 146 | else | 138 | name = NULL; |
| 147 | { | ||
| 148 | name = NULL; | ||
| 149 | name_len = 13; /* Partition_000 */ | 139 | name_len = 13; /* Partition_000 */ |
| 150 | } | 140 | } |
| 151 | 141 | ||
| 152 | /* record name length for memory allocation later */ | 142 | /* record name length for memory allocation later */ |
| 153 | extra_mem_size += name_len + 1; | 143 | extra_mem_size += name_len + 1; |
| 154 | 144 | ||
| 155 | /* test for options */ | 145 | /* test for options */ |
| 156 | if (strncmp(s, "ro", 2) == 0) | 146 | if (strncmp(s, "ro", 2) == 0) { |
| 157 | { | ||
| 158 | mask_flags |= MTD_WRITEABLE; | 147 | mask_flags |= MTD_WRITEABLE; |
| 159 | s += 2; | 148 | s += 2; |
| 160 | } | 149 | } |
| 161 | 150 | ||
| 162 | /* if lk is found do NOT unlock the MTD partition*/ | 151 | /* if lk is found do NOT unlock the MTD partition*/ |
| 163 | if (strncmp(s, "lk", 2) == 0) | 152 | if (strncmp(s, "lk", 2) == 0) { |
| 164 | { | ||
| 165 | mask_flags |= MTD_POWERUP_LOCK; | 153 | mask_flags |= MTD_POWERUP_LOCK; |
| 166 | s += 2; | 154 | s += 2; |
| 167 | } | 155 | } |
| 168 | 156 | ||
| 169 | /* test if more partitions are following */ | 157 | /* test if more partitions are following */ |
| 170 | if (*s == ',') | 158 | if (*s == ',') { |
| 171 | { | 159 | if (size == SIZE_REMAINING) { |
| 172 | if (size == SIZE_REMAINING) | ||
| 173 | { | ||
| 174 | printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n"); | 160 | printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n"); |
| 175 | return NULL; | 161 | return ERR_PTR(-EINVAL); |
| 176 | } | 162 | } |
| 177 | /* more partitions follow, parse them */ | 163 | /* more partitions follow, parse them */ |
| 178 | parts = newpart(s + 1, &s, num_parts, this_part + 1, | 164 | parts = newpart(s + 1, &s, num_parts, this_part + 1, |
| 179 | &extra_mem, extra_mem_size); | 165 | &extra_mem, extra_mem_size); |
| 180 | if (!parts) | 166 | if (IS_ERR(parts)) |
| 181 | return NULL; | 167 | return parts; |
| 182 | } | 168 | } else { |
| 183 | else | 169 | /* this is the last partition: allocate space for all */ |
| 184 | { /* this is the last partition: allocate space for all */ | ||
| 185 | int alloc_size; | 170 | int alloc_size; |
| 186 | 171 | ||
| 187 | *num_parts = this_part + 1; | 172 | *num_parts = this_part + 1; |
| 188 | alloc_size = *num_parts * sizeof(struct mtd_partition) + | 173 | alloc_size = *num_parts * sizeof(struct mtd_partition) + |
| 189 | extra_mem_size; | 174 | extra_mem_size; |
| 175 | |||
| 190 | parts = kzalloc(alloc_size, GFP_KERNEL); | 176 | parts = kzalloc(alloc_size, GFP_KERNEL); |
| 191 | if (!parts) | 177 | if (!parts) |
| 192 | return NULL; | 178 | return ERR_PTR(-ENOMEM); |
| 193 | extra_mem = (unsigned char *)(parts + *num_parts); | 179 | extra_mem = (unsigned char *)(parts + *num_parts); |
| 194 | } | 180 | } |
| 181 | |||
| 195 | /* enter this partition (offset will be calculated later if it is zero at this point) */ | 182 | /* enter this partition (offset will be calculated later if it is zero at this point) */ |
| 196 | parts[this_part].size = size; | 183 | parts[this_part].size = size; |
| 197 | parts[this_part].offset = offset; | 184 | parts[this_part].offset = offset; |
| 198 | parts[this_part].mask_flags = mask_flags; | 185 | parts[this_part].mask_flags = mask_flags; |
| 199 | if (name) | 186 | if (name) |
| 200 | { | ||
| 201 | strlcpy(extra_mem, name, name_len + 1); | 187 | strlcpy(extra_mem, name, name_len + 1); |
| 202 | } | ||
| 203 | else | 188 | else |
| 204 | { | ||
| 205 | sprintf(extra_mem, "Partition_%03d", this_part); | 189 | sprintf(extra_mem, "Partition_%03d", this_part); |
| 206 | } | ||
| 207 | parts[this_part].name = extra_mem; | 190 | parts[this_part].name = extra_mem; |
| 208 | extra_mem += name_len + 1; | 191 | extra_mem += name_len + 1; |
| 209 | 192 | ||
| 210 | dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", | 193 | dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", |
| 211 | this_part, | 194 | this_part, parts[this_part].name, parts[this_part].offset, |
| 212 | parts[this_part].name, | 195 | parts[this_part].size, parts[this_part].mask_flags)); |
| 213 | parts[this_part].offset, | ||
| 214 | parts[this_part].size, | ||
| 215 | parts[this_part].mask_flags)); | ||
| 216 | 196 | ||
| 217 | /* return (updated) pointer to extra_mem memory */ | 197 | /* return (updated) pointer to extra_mem memory */ |
| 218 | if (extra_mem_ptr) | 198 | if (extra_mem_ptr) |
| 219 | *extra_mem_ptr = extra_mem; | 199 | *extra_mem_ptr = extra_mem; |
| 220 | 200 | ||
| 221 | /* return (updated) pointer command line string */ | 201 | /* return (updated) pointer command line string */ |
| 222 | *retptr = s; | 202 | *retptr = s; |
| @@ -236,16 +216,16 @@ static int mtdpart_setup_real(char *s) | |||
| 236 | { | 216 | { |
| 237 | struct cmdline_mtd_partition *this_mtd; | 217 | struct cmdline_mtd_partition *this_mtd; |
| 238 | struct mtd_partition *parts; | 218 | struct mtd_partition *parts; |
| 239 | int mtd_id_len; | 219 | int mtd_id_len, num_parts; |
| 240 | int num_parts; | ||
| 241 | char *p, *mtd_id; | 220 | char *p, *mtd_id; |
| 242 | 221 | ||
| 243 | mtd_id = s; | 222 | mtd_id = s; |
| 223 | |||
| 244 | /* fetch <mtd-id> */ | 224 | /* fetch <mtd-id> */ |
| 245 | if (!(p = strchr(s, ':'))) | 225 | p = strchr(s, ':'); |
| 246 | { | 226 | if (!p) { |
| 247 | printk(KERN_ERR ERRP "no mtd-id\n"); | 227 | printk(KERN_ERR ERRP "no mtd-id\n"); |
| 248 | return 0; | 228 | return -EINVAL; |
| 249 | } | 229 | } |
| 250 | mtd_id_len = p - mtd_id; | 230 | mtd_id_len = p - mtd_id; |
| 251 | 231 | ||
| @@ -262,8 +242,7 @@ static int mtdpart_setup_real(char *s) | |||
| 262 | (unsigned char**)&this_mtd, /* out: extra mem */ | 242 | (unsigned char**)&this_mtd, /* out: extra mem */ |
| 263 | mtd_id_len + 1 + sizeof(*this_mtd) + | 243 | mtd_id_len + 1 + sizeof(*this_mtd) + |
| 264 | sizeof(void*)-1 /*alignment*/); | 244 | sizeof(void*)-1 /*alignment*/); |
| 265 | if(!parts) | 245 | if (IS_ERR(parts)) { |
| 266 | { | ||
| 267 | /* | 246 | /* |
| 268 | * An error occurred. We're either: | 247 | * An error occurred. We're either: |
| 269 | * a) out of memory, or | 248 | * a) out of memory, or |
| @@ -271,12 +250,12 @@ static int mtdpart_setup_real(char *s) | |||
| 271 | * Either way, this mtd is hosed and we're | 250 | * Either way, this mtd is hosed and we're |
| 272 | * unlikely to succeed in parsing any more | 251 | * unlikely to succeed in parsing any more |
| 273 | */ | 252 | */ |
| 274 | return 0; | 253 | return PTR_ERR(parts); |
| 275 | } | 254 | } |
| 276 | 255 | ||
| 277 | /* align this_mtd */ | 256 | /* align this_mtd */ |
| 278 | this_mtd = (struct cmdline_mtd_partition *) | 257 | this_mtd = (struct cmdline_mtd_partition *) |
| 279 | ALIGN((unsigned long)this_mtd, sizeof(void*)); | 258 | ALIGN((unsigned long)this_mtd, sizeof(void *)); |
| 280 | /* enter results */ | 259 | /* enter results */ |
| 281 | this_mtd->parts = parts; | 260 | this_mtd->parts = parts; |
| 282 | this_mtd->num_parts = num_parts; | 261 | this_mtd->num_parts = num_parts; |
| @@ -296,14 +275,14 @@ static int mtdpart_setup_real(char *s) | |||
| 296 | break; | 275 | break; |
| 297 | 276 | ||
| 298 | /* does another spec follow? */ | 277 | /* does another spec follow? */ |
| 299 | if (*s != ';') | 278 | if (*s != ';') { |
| 300 | { | ||
| 301 | printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s); | 279 | printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s); |
| 302 | return 0; | 280 | return -EINVAL; |
| 303 | } | 281 | } |
| 304 | s++; | 282 | s++; |
| 305 | } | 283 | } |
| 306 | return 1; | 284 | |
| 285 | return 0; | ||
| 307 | } | 286 | } |
| 308 | 287 | ||
| 309 | /* | 288 | /* |
| @@ -318,44 +297,58 @@ static int parse_cmdline_partitions(struct mtd_info *master, | |||
| 318 | struct mtd_part_parser_data *data) | 297 | struct mtd_part_parser_data *data) |
| 319 | { | 298 | { |
| 320 | unsigned long offset; | 299 | unsigned long offset; |
| 321 | int i; | 300 | int i, err; |
| 322 | struct cmdline_mtd_partition *part; | 301 | struct cmdline_mtd_partition *part; |
| 323 | const char *mtd_id = master->name; | 302 | const char *mtd_id = master->name; |
| 324 | 303 | ||
| 325 | /* parse command line */ | 304 | /* parse command line */ |
| 326 | if (!cmdline_parsed) | 305 | if (!cmdline_parsed) { |
| 327 | mtdpart_setup_real(cmdline); | 306 | err = mtdpart_setup_real(cmdline); |
| 307 | if (err) | ||
| 308 | return err; | ||
| 309 | } | ||
| 328 | 310 | ||
| 329 | for(part = partitions; part; part = part->next) | 311 | for (part = partitions; part; part = part->next) { |
| 330 | { | 312 | if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) { |
| 331 | if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) | 313 | for (i = 0, offset = 0; i < part->num_parts; i++) { |
| 332 | { | ||
| 333 | for(i = 0, offset = 0; i < part->num_parts; i++) | ||
| 334 | { | ||
| 335 | if (part->parts[i].offset == OFFSET_CONTINUOUS) | 314 | if (part->parts[i].offset == OFFSET_CONTINUOUS) |
| 336 | part->parts[i].offset = offset; | 315 | part->parts[i].offset = offset; |
| 337 | else | 316 | else |
| 338 | offset = part->parts[i].offset; | 317 | offset = part->parts[i].offset; |
| 318 | |||
| 339 | if (part->parts[i].size == SIZE_REMAINING) | 319 | if (part->parts[i].size == SIZE_REMAINING) |
| 340 | part->parts[i].size = master->size - offset; | 320 | part->parts[i].size = master->size - offset; |
| 341 | if (offset + part->parts[i].size > master->size) | 321 | |
| 342 | { | 322 | if (part->parts[i].size == 0) { |
| 323 | printk(KERN_WARNING ERRP | ||
| 324 | "%s: skipping zero sized partition\n", | ||
| 325 | part->mtd_id); | ||
| 326 | part->num_parts--; | ||
| 327 | memmove(&part->parts[i], | ||
| 328 | &part->parts[i + 1], | ||
| 329 | sizeof(*part->parts) * (part->num_parts - i)); | ||
| 330 | continue; | ||
| 331 | } | ||
| 332 | |||
| 333 | if (offset + part->parts[i].size > master->size) { | ||
| 343 | printk(KERN_WARNING ERRP | 334 | printk(KERN_WARNING ERRP |
| 344 | "%s: partitioning exceeds flash size, truncating\n", | 335 | "%s: partitioning exceeds flash size, truncating\n", |
| 345 | part->mtd_id); | 336 | part->mtd_id); |
| 346 | part->parts[i].size = master->size - offset; | 337 | part->parts[i].size = master->size - offset; |
| 347 | part->num_parts = i; | ||
| 348 | } | 338 | } |
| 349 | offset += part->parts[i].size; | 339 | offset += part->parts[i].size; |
| 350 | } | 340 | } |
| 341 | |||
| 351 | *pparts = kmemdup(part->parts, | 342 | *pparts = kmemdup(part->parts, |
| 352 | sizeof(*part->parts) * part->num_parts, | 343 | sizeof(*part->parts) * part->num_parts, |
| 353 | GFP_KERNEL); | 344 | GFP_KERNEL); |
| 354 | if (!*pparts) | 345 | if (!*pparts) |
| 355 | return -ENOMEM; | 346 | return -ENOMEM; |
| 347 | |||
| 356 | return part->num_parts; | 348 | return part->num_parts; |
| 357 | } | 349 | } |
| 358 | } | 350 | } |
| 351 | |||
| 359 | return 0; | 352 | return 0; |
| 360 | } | 353 | } |
| 361 | 354 | ||
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 4cdb2af7bf44..27f80cd8aef3 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
| @@ -97,7 +97,7 @@ config MTD_M25P80 | |||
| 97 | doesn't support the JEDEC ID instruction. | 97 | doesn't support the JEDEC ID instruction. |
| 98 | 98 | ||
| 99 | config M25PXX_USE_FAST_READ | 99 | config M25PXX_USE_FAST_READ |
| 100 | bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz" | 100 | bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz" |
| 101 | depends on MTD_M25P80 | 101 | depends on MTD_M25P80 |
| 102 | default y | 102 | default y |
| 103 | help | 103 | help |
| @@ -120,6 +120,14 @@ config MTD_SST25L | |||
| 120 | Set up your spi devices with the right board-specific platform data, | 120 | Set up your spi devices with the right board-specific platform data, |
| 121 | if you want to specify device partitioning. | 121 | if you want to specify device partitioning. |
| 122 | 122 | ||
| 123 | config MTD_BCM47XXSFLASH | ||
| 124 | tristate "R/O support for serial flash on BCMA bus" | ||
| 125 | depends on BCMA_SFLASH | ||
| 126 | help | ||
| 127 | BCMA bus can have various flash memories attached, they are | ||
| 128 | registered by bcma as platform devices. This enables driver for | ||
| 129 | serial flash memories (only read-only mode is implemented). | ||
| 130 | |||
| 123 | config MTD_SLRAM | 131 | config MTD_SLRAM |
| 124 | tristate "Uncached system RAM" | 132 | tristate "Uncached system RAM" |
| 125 | help | 133 | help |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index a4dd1d822b6c..395733a30ef4 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
| @@ -19,5 +19,6 @@ obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | |||
| 19 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 19 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
| 20 | obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o | 20 | obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o |
| 21 | obj-$(CONFIG_MTD_SST25L) += sst25l.o | 21 | obj-$(CONFIG_MTD_SST25L) += sst25l.o |
| 22 | obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o | ||
| 22 | 23 | ||
| 23 | CFLAGS_docg3.o += -I$(src) \ No newline at end of file | 24 | CFLAGS_docg3.o += -I$(src) \ No newline at end of file |
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c new file mode 100644 index 000000000000..2dc5a6f3fd57 --- /dev/null +++ b/drivers/mtd/devices/bcm47xxsflash.c | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/slab.h> | ||
| 4 | #include <linux/mtd/mtd.h> | ||
| 5 | #include <linux/platform_device.h> | ||
| 6 | #include <linux/bcma/bcma.h> | ||
| 7 | |||
| 8 | MODULE_LICENSE("GPL"); | ||
| 9 | MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); | ||
| 10 | |||
| 11 | static const char *probes[] = { "bcm47xxpart", NULL }; | ||
| 12 | |||
| 13 | static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
| 14 | size_t *retlen, u_char *buf) | ||
| 15 | { | ||
| 16 | struct bcma_sflash *sflash = mtd->priv; | ||
| 17 | |||
| 18 | /* Check address range */ | ||
| 19 | if ((from + len) > mtd->size) | ||
| 20 | return -EINVAL; | ||
| 21 | |||
| 22 | memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from), | ||
| 23 | len); | ||
| 24 | |||
| 25 | return len; | ||
| 26 | } | ||
| 27 | |||
| 28 | static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash, | ||
| 29 | struct mtd_info *mtd) | ||
| 30 | { | ||
| 31 | mtd->priv = sflash; | ||
| 32 | mtd->name = "bcm47xxsflash"; | ||
| 33 | mtd->owner = THIS_MODULE; | ||
| 34 | mtd->type = MTD_ROM; | ||
| 35 | mtd->size = sflash->size; | ||
| 36 | mtd->_read = bcm47xxsflash_read; | ||
| 37 | |||
| 38 | /* TODO: implement writing support and verify/change following code */ | ||
| 39 | mtd->flags = MTD_CAP_ROM; | ||
| 40 | mtd->writebufsize = mtd->writesize = 1; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int bcm47xxsflash_probe(struct platform_device *pdev) | ||
| 44 | { | ||
| 45 | struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); | ||
| 46 | int err; | ||
| 47 | |||
| 48 | sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); | ||
| 49 | if (!sflash->mtd) { | ||
| 50 | err = -ENOMEM; | ||
| 51 | goto out; | ||
| 52 | } | ||
| 53 | bcm47xxsflash_fill_mtd(sflash, sflash->mtd); | ||
| 54 | |||
| 55 | err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0); | ||
| 56 | if (err) { | ||
| 57 | pr_err("Failed to register MTD device: %d\n", err); | ||
| 58 | goto err_dev_reg; | ||
| 59 | } | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | |||
| 63 | err_dev_reg: | ||
| 64 | kfree(sflash->mtd); | ||
| 65 | out: | ||
| 66 | return err; | ||
| 67 | } | ||
| 68 | |||
| 69 | static int __devexit bcm47xxsflash_remove(struct platform_device *pdev) | ||
| 70 | { | ||
| 71 | struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); | ||
| 72 | |||
| 73 | mtd_device_unregister(sflash->mtd); | ||
| 74 | kfree(sflash->mtd); | ||
| 75 | |||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct platform_driver bcma_sflash_driver = { | ||
| 80 | .remove = __devexit_p(bcm47xxsflash_remove), | ||
| 81 | .driver = { | ||
| 82 | .name = "bcma_sflash", | ||
| 83 | .owner = THIS_MODULE, | ||
| 84 | }, | ||
| 85 | }; | ||
| 86 | |||
| 87 | static int __init bcm47xxsflash_init(void) | ||
| 88 | { | ||
| 89 | int err; | ||
| 90 | |||
| 91 | err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe); | ||
| 92 | if (err) | ||
| 93 | pr_err("Failed to register BCMA serial flash driver: %d\n", | ||
| 94 | err); | ||
| 95 | |||
| 96 | return err; | ||
| 97 | } | ||
| 98 | |||
| 99 | static void __exit bcm47xxsflash_exit(void) | ||
| 100 | { | ||
| 101 | platform_driver_unregister(&bcma_sflash_driver); | ||
| 102 | } | ||
| 103 | |||
| 104 | module_init(bcm47xxsflash_init); | ||
| 105 | module_exit(bcm47xxsflash_exit); | ||
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c index 04eb2e4aa50f..4f2220ad8924 100644 --- a/drivers/mtd/devices/doc2001plus.c +++ b/drivers/mtd/devices/doc2001plus.c | |||
| @@ -659,23 +659,15 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
| 659 | #ifdef ECC_DEBUG | 659 | #ifdef ECC_DEBUG |
| 660 | printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", | 660 | printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", |
| 661 | __FILE__, __LINE__, (int)from); | 661 | __FILE__, __LINE__, (int)from); |
| 662 | printk(" syndrome= %02x:%02x:%02x:%02x:%02x:" | 662 | printk(" syndrome= %*phC\n", 6, syndrome); |
| 663 | "%02x\n", | 663 | printk(" eccbuf= %*phC\n", 6, eccbuf); |
| 664 | syndrome[0], syndrome[1], syndrome[2], | ||
| 665 | syndrome[3], syndrome[4], syndrome[5]); | ||
| 666 | printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:" | ||
| 667 | "%02x\n", | ||
| 668 | eccbuf[0], eccbuf[1], eccbuf[2], | ||
| 669 | eccbuf[3], eccbuf[4], eccbuf[5]); | ||
| 670 | #endif | 664 | #endif |
| 671 | ret = -EIO; | 665 | ret = -EIO; |
| 672 | } | 666 | } |
| 673 | } | 667 | } |
| 674 | 668 | ||
| 675 | #ifdef PSYCHO_DEBUG | 669 | #ifdef PSYCHO_DEBUG |
| 676 | printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", | 670 | printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf); |
| 677 | (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], | ||
| 678 | eccbuf[4], eccbuf[5]); | ||
| 679 | #endif | 671 | #endif |
| 680 | /* disable the ECC engine */ | 672 | /* disable the ECC engine */ |
| 681 | WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); | 673 | WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); |
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index f70854d728fe..d34d83b8f9c2 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c | |||
| @@ -919,19 +919,13 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, | |||
| 919 | eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); | 919 | eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); |
| 920 | 920 | ||
| 921 | if (nboob >= DOC_LAYOUT_OOB_SIZE) { | 921 | if (nboob >= DOC_LAYOUT_OOB_SIZE) { |
| 922 | doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | 922 | doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf); |
| 923 | oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3], | ||
| 924 | oobbuf[4], oobbuf[5], oobbuf[6]); | ||
| 925 | doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]); | 923 | doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]); |
| 926 | doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | 924 | doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8); |
| 927 | oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11], | ||
| 928 | oobbuf[12], oobbuf[13], oobbuf[14]); | ||
| 929 | doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]); | 925 | doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]); |
| 930 | } | 926 | } |
| 931 | doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); | 927 | doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); |
| 932 | doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | 928 | doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc); |
| 933 | hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4], | ||
| 934 | hwecc[5], hwecc[6]); | ||
| 935 | 929 | ||
| 936 | ret = -EIO; | 930 | ret = -EIO; |
| 937 | if (is_prot_seq_error(docg3)) | 931 | if (is_prot_seq_error(docg3)) |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 5d0d68c3fe27..03838bab1f59 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
| @@ -633,11 +633,14 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 633 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, | 633 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
| 634 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, | 634 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
| 635 | 635 | ||
| 636 | { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, | ||
| 637 | |||
| 636 | /* EON -- en25xxx */ | 638 | /* EON -- en25xxx */ |
| 637 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, | 639 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, |
| 638 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, | 640 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
| 639 | { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, | 641 | { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, |
| 640 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, | 642 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
| 643 | { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, | ||
| 641 | 644 | ||
| 642 | /* Everspin */ | 645 | /* Everspin */ |
| 643 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, | 646 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, |
| @@ -646,6 +649,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 646 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, | 649 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
| 647 | { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, | 650 | { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, |
| 648 | { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, | 651 | { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, |
| 652 | { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, | ||
| 649 | 653 | ||
| 650 | /* Macronix */ | 654 | /* Macronix */ |
| 651 | { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, | 655 | { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, |
| @@ -659,15 +663,15 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 659 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, | 663 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, |
| 660 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, | 664 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, |
| 661 | 665 | ||
| 666 | /* Micron */ | ||
| 667 | { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, | ||
| 668 | { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, | ||
| 669 | |||
| 662 | /* Spansion -- single (large) sector size only, at least | 670 | /* Spansion -- single (large) sector size only, at least |
| 663 | * for the chips listed here (without boot sectors). | 671 | * for the chips listed here (without boot sectors). |
| 664 | */ | 672 | */ |
| 665 | { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, | 673 | { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) }, |
| 666 | { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, | 674 | { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) }, |
| 667 | { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, | ||
| 668 | { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, | ||
| 669 | { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) }, | ||
| 670 | { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, | ||
| 671 | { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, | 675 | { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, |
| 672 | { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) }, | 676 | { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) }, |
| 673 | { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, | 677 | { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, |
| @@ -676,6 +680,11 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 676 | { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, | 680 | { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, |
| 677 | { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, | 681 | { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, |
| 678 | { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, | 682 | { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, |
| 683 | { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, | ||
| 684 | { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, | ||
| 685 | { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, | ||
| 686 | { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, | ||
| 687 | { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, | ||
| 679 | { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, | 688 | { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, |
| 680 | { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, | 689 | { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
| 681 | 690 | ||
| @@ -699,6 +708,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 699 | { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, | 708 | { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, |
| 700 | { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, | 709 | { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, |
| 701 | { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, | 710 | { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, |
| 711 | { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, | ||
| 702 | 712 | ||
| 703 | { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, | 713 | { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, |
| 704 | { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, | 714 | { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, |
| @@ -714,6 +724,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 714 | { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, | 724 | { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, |
| 715 | { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, | 725 | { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, |
| 716 | 726 | ||
| 727 | { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) }, | ||
| 717 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, | 728 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
| 718 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, | 729 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
| 719 | 730 | ||
| @@ -730,6 +741,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
| 730 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, | 741 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, |
| 731 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, | 742 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, |
| 732 | { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, | 743 | { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, |
| 744 | { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) }, | ||
| 733 | { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, | 745 | { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, |
| 734 | { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, | 746 | { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
| 735 | { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, | 747 | { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, |
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 67960362681e..dcc3c9511530 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/param.h> | 27 | #include <linux/param.h> |
| 28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
| 29 | #include <linux/pm.h> | ||
| 29 | #include <linux/mtd/mtd.h> | 30 | #include <linux/mtd/mtd.h> |
| 30 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
| 31 | #include <linux/mtd/spear_smi.h> | 32 | #include <linux/mtd/spear_smi.h> |
| @@ -240,8 +241,8 @@ static int spear_smi_read_sr(struct spear_smi *dev, u32 bank) | |||
| 240 | /* copy dev->status (lower 16 bits) in order to release lock */ | 241 | /* copy dev->status (lower 16 bits) in order to release lock */ |
| 241 | if (ret > 0) | 242 | if (ret > 0) |
| 242 | ret = dev->status & 0xffff; | 243 | ret = dev->status & 0xffff; |
| 243 | else | 244 | else if (ret == 0) |
| 244 | ret = -EIO; | 245 | ret = -ETIMEDOUT; |
| 245 | 246 | ||
| 246 | /* restore the ctrl regs state */ | 247 | /* restore the ctrl regs state */ |
| 247 | writel(ctrlreg1, dev->io_base + SMI_CR1); | 248 | writel(ctrlreg1, dev->io_base + SMI_CR1); |
| @@ -269,16 +270,19 @@ static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank, | |||
| 269 | finish = jiffies + timeout; | 270 | finish = jiffies + timeout; |
| 270 | do { | 271 | do { |
| 271 | status = spear_smi_read_sr(dev, bank); | 272 | status = spear_smi_read_sr(dev, bank); |
| 272 | if (status < 0) | 273 | if (status < 0) { |
| 273 | continue; /* try till timeout */ | 274 | if (status == -ETIMEDOUT) |
| 274 | else if (!(status & SR_WIP)) | 275 | continue; /* try till finish */ |
| 276 | return status; | ||
| 277 | } else if (!(status & SR_WIP)) { | ||
| 275 | return 0; | 278 | return 0; |
| 279 | } | ||
| 276 | 280 | ||
| 277 | cond_resched(); | 281 | cond_resched(); |
| 278 | } while (!time_after_eq(jiffies, finish)); | 282 | } while (!time_after_eq(jiffies, finish)); |
| 279 | 283 | ||
| 280 | dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n"); | 284 | dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n"); |
| 281 | return status; | 285 | return -EBUSY; |
| 282 | } | 286 | } |
| 283 | 287 | ||
| 284 | /** | 288 | /** |
| @@ -335,6 +339,9 @@ static void spear_smi_hw_init(struct spear_smi *dev) | |||
| 335 | val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8); | 339 | val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8); |
| 336 | 340 | ||
| 337 | mutex_lock(&dev->lock); | 341 | mutex_lock(&dev->lock); |
| 342 | /* clear all interrupt conditions */ | ||
| 343 | writel(0, dev->io_base + SMI_SR); | ||
| 344 | |||
| 338 | writel(val, dev->io_base + SMI_CR1); | 345 | writel(val, dev->io_base + SMI_CR1); |
| 339 | mutex_unlock(&dev->lock); | 346 | mutex_unlock(&dev->lock); |
| 340 | } | 347 | } |
| @@ -391,11 +398,11 @@ static int spear_smi_write_enable(struct spear_smi *dev, u32 bank) | |||
| 391 | writel(ctrlreg1, dev->io_base + SMI_CR1); | 398 | writel(ctrlreg1, dev->io_base + SMI_CR1); |
| 392 | writel(0, dev->io_base + SMI_CR2); | 399 | writel(0, dev->io_base + SMI_CR2); |
| 393 | 400 | ||
| 394 | if (ret <= 0) { | 401 | if (ret == 0) { |
| 395 | ret = -EIO; | 402 | ret = -EIO; |
| 396 | dev_err(&dev->pdev->dev, | 403 | dev_err(&dev->pdev->dev, |
| 397 | "smi controller failed on write enable\n"); | 404 | "smi controller failed on write enable\n"); |
| 398 | } else { | 405 | } else if (ret > 0) { |
| 399 | /* check whether write mode status is set for required bank */ | 406 | /* check whether write mode status is set for required bank */ |
| 400 | if (dev->status & (1 << (bank + WM_SHIFT))) | 407 | if (dev->status & (1 << (bank + WM_SHIFT))) |
| 401 | ret = 0; | 408 | ret = 0; |
| @@ -462,10 +469,10 @@ static int spear_smi_erase_sector(struct spear_smi *dev, | |||
| 462 | ret = wait_event_interruptible_timeout(dev->cmd_complete, | 469 | ret = wait_event_interruptible_timeout(dev->cmd_complete, |
| 463 | dev->status & TFF, SMI_CMD_TIMEOUT); | 470 | dev->status & TFF, SMI_CMD_TIMEOUT); |
| 464 | 471 | ||
| 465 | if (ret <= 0) { | 472 | if (ret == 0) { |
| 466 | ret = -EIO; | 473 | ret = -EIO; |
| 467 | dev_err(&dev->pdev->dev, "sector erase failed\n"); | 474 | dev_err(&dev->pdev->dev, "sector erase failed\n"); |
| 468 | } else | 475 | } else if (ret > 0) |
| 469 | ret = 0; /* success */ | 476 | ret = 0; /* success */ |
| 470 | 477 | ||
| 471 | /* restore ctrl regs */ | 478 | /* restore ctrl regs */ |
| @@ -820,7 +827,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev, | |||
| 820 | if (!flash_info) | 827 | if (!flash_info) |
| 821 | return -ENODEV; | 828 | return -ENODEV; |
| 822 | 829 | ||
| 823 | flash = kzalloc(sizeof(*flash), GFP_ATOMIC); | 830 | flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC); |
| 824 | if (!flash) | 831 | if (!flash) |
| 825 | return -ENOMEM; | 832 | return -ENOMEM; |
| 826 | flash->bank = bank; | 833 | flash->bank = bank; |
| @@ -831,15 +838,13 @@ static int spear_smi_setup_banks(struct platform_device *pdev, | |||
| 831 | flash_index = spear_smi_probe_flash(dev, bank); | 838 | flash_index = spear_smi_probe_flash(dev, bank); |
| 832 | if (flash_index < 0) { | 839 | if (flash_index < 0) { |
| 833 | dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank); | 840 | dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank); |
| 834 | ret = flash_index; | 841 | return flash_index; |
| 835 | goto err_probe; | ||
| 836 | } | 842 | } |
| 837 | /* map the memory for nor flash chip */ | 843 | /* map the memory for nor flash chip */ |
| 838 | flash->base_addr = ioremap(flash_info->mem_base, flash_info->size); | 844 | flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base, |
| 839 | if (!flash->base_addr) { | 845 | flash_info->size); |
| 840 | ret = -EIO; | 846 | if (!flash->base_addr) |
| 841 | goto err_probe; | 847 | return -EIO; |
| 842 | } | ||
| 843 | 848 | ||
| 844 | dev->flash[bank] = flash; | 849 | dev->flash[bank] = flash; |
| 845 | flash->mtd.priv = dev; | 850 | flash->mtd.priv = dev; |
| @@ -881,17 +886,10 @@ static int spear_smi_setup_banks(struct platform_device *pdev, | |||
| 881 | count); | 886 | count); |
| 882 | if (ret) { | 887 | if (ret) { |
| 883 | dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret); | 888 | dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret); |
| 884 | goto err_map; | 889 | return ret; |
| 885 | } | 890 | } |
| 886 | 891 | ||
| 887 | return 0; | 892 | return 0; |
| 888 | |||
| 889 | err_map: | ||
| 890 | iounmap(flash->base_addr); | ||
| 891 | |||
| 892 | err_probe: | ||
| 893 | kfree(flash); | ||
| 894 | return ret; | ||
| 895 | } | 893 | } |
| 896 | 894 | ||
| 897 | /** | 895 | /** |
| @@ -928,20 +926,13 @@ static int __devinit spear_smi_probe(struct platform_device *pdev) | |||
| 928 | } | 926 | } |
| 929 | } else { | 927 | } else { |
| 930 | pdata = dev_get_platdata(&pdev->dev); | 928 | pdata = dev_get_platdata(&pdev->dev); |
| 931 | if (pdata < 0) { | 929 | if (!pdata) { |
| 932 | ret = -ENODEV; | 930 | ret = -ENODEV; |
| 933 | dev_err(&pdev->dev, "no platform data\n"); | 931 | dev_err(&pdev->dev, "no platform data\n"); |
| 934 | goto err; | 932 | goto err; |
| 935 | } | 933 | } |
| 936 | } | 934 | } |
| 937 | 935 | ||
| 938 | smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 939 | if (!smi_base) { | ||
| 940 | ret = -ENODEV; | ||
| 941 | dev_err(&pdev->dev, "invalid smi base address\n"); | ||
| 942 | goto err; | ||
| 943 | } | ||
| 944 | |||
| 945 | irq = platform_get_irq(pdev, 0); | 936 | irq = platform_get_irq(pdev, 0); |
| 946 | if (irq < 0) { | 937 | if (irq < 0) { |
| 947 | ret = -ENODEV; | 938 | ret = -ENODEV; |
| @@ -949,32 +940,26 @@ static int __devinit spear_smi_probe(struct platform_device *pdev) | |||
| 949 | goto err; | 940 | goto err; |
| 950 | } | 941 | } |
| 951 | 942 | ||
| 952 | dev = kzalloc(sizeof(*dev), GFP_ATOMIC); | 943 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC); |
| 953 | if (!dev) { | 944 | if (!dev) { |
| 954 | ret = -ENOMEM; | 945 | ret = -ENOMEM; |
| 955 | dev_err(&pdev->dev, "mem alloc fail\n"); | 946 | dev_err(&pdev->dev, "mem alloc fail\n"); |
| 956 | goto err; | 947 | goto err; |
| 957 | } | 948 | } |
| 958 | 949 | ||
| 959 | smi_base = request_mem_region(smi_base->start, resource_size(smi_base), | 950 | smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 960 | pdev->name); | ||
| 961 | if (!smi_base) { | ||
| 962 | ret = -EBUSY; | ||
| 963 | dev_err(&pdev->dev, "request mem region fail\n"); | ||
| 964 | goto err_mem; | ||
| 965 | } | ||
| 966 | 951 | ||
| 967 | dev->io_base = ioremap(smi_base->start, resource_size(smi_base)); | 952 | dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base); |
| 968 | if (!dev->io_base) { | 953 | if (!dev->io_base) { |
| 969 | ret = -EIO; | 954 | ret = -EIO; |
| 970 | dev_err(&pdev->dev, "ioremap fail\n"); | 955 | dev_err(&pdev->dev, "devm_request_and_ioremap fail\n"); |
| 971 | goto err_ioremap; | 956 | goto err; |
| 972 | } | 957 | } |
| 973 | 958 | ||
| 974 | dev->pdev = pdev; | 959 | dev->pdev = pdev; |
| 975 | dev->clk_rate = pdata->clk_rate; | 960 | dev->clk_rate = pdata->clk_rate; |
| 976 | 961 | ||
| 977 | if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ) | 962 | if (dev->clk_rate > SMI_MAX_CLOCK_FREQ) |
| 978 | dev->clk_rate = SMI_MAX_CLOCK_FREQ; | 963 | dev->clk_rate = SMI_MAX_CLOCK_FREQ; |
| 979 | 964 | ||
| 980 | dev->num_flashes = pdata->num_flashes; | 965 | dev->num_flashes = pdata->num_flashes; |
| @@ -984,17 +969,18 @@ static int __devinit spear_smi_probe(struct platform_device *pdev) | |||
| 984 | dev->num_flashes = MAX_NUM_FLASH_CHIP; | 969 | dev->num_flashes = MAX_NUM_FLASH_CHIP; |
| 985 | } | 970 | } |
| 986 | 971 | ||
| 987 | dev->clk = clk_get(&pdev->dev, NULL); | 972 | dev->clk = devm_clk_get(&pdev->dev, NULL); |
| 988 | if (IS_ERR(dev->clk)) { | 973 | if (IS_ERR(dev->clk)) { |
| 989 | ret = PTR_ERR(dev->clk); | 974 | ret = PTR_ERR(dev->clk); |
| 990 | goto err_clk; | 975 | goto err; |
| 991 | } | 976 | } |
| 992 | 977 | ||
| 993 | ret = clk_prepare_enable(dev->clk); | 978 | ret = clk_prepare_enable(dev->clk); |
| 994 | if (ret) | 979 | if (ret) |
| 995 | goto err_clk_prepare_enable; | 980 | goto err; |
| 996 | 981 | ||
| 997 | ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); | 982 | ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0, |
| 983 | pdev->name, dev); | ||
| 998 | if (ret) { | 984 | if (ret) { |
| 999 | dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n"); | 985 | dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n"); |
| 1000 | goto err_irq; | 986 | goto err_irq; |
| @@ -1017,18 +1003,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev) | |||
| 1017 | return 0; | 1003 | return 0; |
| 1018 | 1004 | ||
| 1019 | err_bank_setup: | 1005 | err_bank_setup: |
| 1020 | free_irq(irq, dev); | ||
| 1021 | platform_set_drvdata(pdev, NULL); | 1006 | platform_set_drvdata(pdev, NULL); |
| 1022 | err_irq: | 1007 | err_irq: |
| 1023 | clk_disable_unprepare(dev->clk); | 1008 | clk_disable_unprepare(dev->clk); |
| 1024 | err_clk_prepare_enable: | ||
| 1025 | clk_put(dev->clk); | ||
| 1026 | err_clk: | ||
| 1027 | iounmap(dev->io_base); | ||
| 1028 | err_ioremap: | ||
| 1029 | release_mem_region(smi_base->start, resource_size(smi_base)); | ||
| 1030 | err_mem: | ||
| 1031 | kfree(dev); | ||
| 1032 | err: | 1009 | err: |
| 1033 | return ret; | 1010 | return ret; |
| 1034 | } | 1011 | } |
| @@ -1042,11 +1019,8 @@ err: | |||
| 1042 | static int __devexit spear_smi_remove(struct platform_device *pdev) | 1019 | static int __devexit spear_smi_remove(struct platform_device *pdev) |
| 1043 | { | 1020 | { |
| 1044 | struct spear_smi *dev; | 1021 | struct spear_smi *dev; |
| 1045 | struct spear_smi_plat_data *pdata; | ||
| 1046 | struct spear_snor_flash *flash; | 1022 | struct spear_snor_flash *flash; |
| 1047 | struct resource *smi_base; | 1023 | int ret, i; |
| 1048 | int ret; | ||
| 1049 | int i, irq; | ||
| 1050 | 1024 | ||
| 1051 | dev = platform_get_drvdata(pdev); | 1025 | dev = platform_get_drvdata(pdev); |
| 1052 | if (!dev) { | 1026 | if (!dev) { |
| @@ -1054,8 +1028,6 @@ static int __devexit spear_smi_remove(struct platform_device *pdev) | |||
| 1054 | return -ENODEV; | 1028 | return -ENODEV; |
| 1055 | } | 1029 | } |
| 1056 | 1030 | ||
| 1057 | pdata = dev_get_platdata(&pdev->dev); | ||
| 1058 | |||
| 1059 | /* clean up for all nor flash */ | 1031 | /* clean up for all nor flash */ |
| 1060 | for (i = 0; i < dev->num_flashes; i++) { | 1032 | for (i = 0; i < dev->num_flashes; i++) { |
| 1061 | flash = dev->flash[i]; | 1033 | flash = dev->flash[i]; |
| @@ -1066,49 +1038,41 @@ static int __devexit spear_smi_remove(struct platform_device *pdev) | |||
| 1066 | ret = mtd_device_unregister(&flash->mtd); | 1038 | ret = mtd_device_unregister(&flash->mtd); |
| 1067 | if (ret) | 1039 | if (ret) |
| 1068 | dev_err(&pdev->dev, "error removing mtd\n"); | 1040 | dev_err(&pdev->dev, "error removing mtd\n"); |
| 1069 | |||
| 1070 | iounmap(flash->base_addr); | ||
| 1071 | kfree(flash); | ||
| 1072 | } | 1041 | } |
| 1073 | 1042 | ||
| 1074 | irq = platform_get_irq(pdev, 0); | ||
| 1075 | free_irq(irq, dev); | ||
| 1076 | |||
| 1077 | clk_disable_unprepare(dev->clk); | 1043 | clk_disable_unprepare(dev->clk); |
| 1078 | clk_put(dev->clk); | ||
| 1079 | iounmap(dev->io_base); | ||
| 1080 | kfree(dev); | ||
| 1081 | |||
| 1082 | smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1083 | release_mem_region(smi_base->start, resource_size(smi_base)); | ||
| 1084 | platform_set_drvdata(pdev, NULL); | 1044 | platform_set_drvdata(pdev, NULL); |
| 1085 | 1045 | ||
| 1086 | return 0; | 1046 | return 0; |
| 1087 | } | 1047 | } |
| 1088 | 1048 | ||
| 1089 | int spear_smi_suspend(struct platform_device *pdev, pm_message_t state) | 1049 | #ifdef CONFIG_PM |
| 1050 | static int spear_smi_suspend(struct device *dev) | ||
| 1090 | { | 1051 | { |
| 1091 | struct spear_smi *dev = platform_get_drvdata(pdev); | 1052 | struct spear_smi *sdev = dev_get_drvdata(dev); |
| 1092 | 1053 | ||
| 1093 | if (dev && dev->clk) | 1054 | if (sdev && sdev->clk) |
| 1094 | clk_disable_unprepare(dev->clk); | 1055 | clk_disable_unprepare(sdev->clk); |
| 1095 | 1056 | ||
| 1096 | return 0; | 1057 | return 0; |
| 1097 | } | 1058 | } |
| 1098 | 1059 | ||
| 1099 | int spear_smi_resume(struct platform_device *pdev) | 1060 | static int spear_smi_resume(struct device *dev) |
| 1100 | { | 1061 | { |
| 1101 | struct spear_smi *dev = platform_get_drvdata(pdev); | 1062 | struct spear_smi *sdev = dev_get_drvdata(dev); |
| 1102 | int ret = -EPERM; | 1063 | int ret = -EPERM; |
| 1103 | 1064 | ||
| 1104 | if (dev && dev->clk) | 1065 | if (sdev && sdev->clk) |
| 1105 | ret = clk_prepare_enable(dev->clk); | 1066 | ret = clk_prepare_enable(sdev->clk); |
| 1106 | 1067 | ||
| 1107 | if (!ret) | 1068 | if (!ret) |
| 1108 | spear_smi_hw_init(dev); | 1069 | spear_smi_hw_init(sdev); |
| 1109 | return ret; | 1070 | return ret; |
| 1110 | } | 1071 | } |
| 1111 | 1072 | ||
| 1073 | static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume); | ||
| 1074 | #endif | ||
| 1075 | |||
| 1112 | #ifdef CONFIG_OF | 1076 | #ifdef CONFIG_OF |
| 1113 | static const struct of_device_id spear_smi_id_table[] = { | 1077 | static const struct of_device_id spear_smi_id_table[] = { |
| 1114 | { .compatible = "st,spear600-smi" }, | 1078 | { .compatible = "st,spear600-smi" }, |
| @@ -1123,11 +1087,12 @@ static struct platform_driver spear_smi_driver = { | |||
| 1123 | .bus = &platform_bus_type, | 1087 | .bus = &platform_bus_type, |
| 1124 | .owner = THIS_MODULE, | 1088 | .owner = THIS_MODULE, |
| 1125 | .of_match_table = of_match_ptr(spear_smi_id_table), | 1089 | .of_match_table = of_match_ptr(spear_smi_id_table), |
| 1090 | #ifdef CONFIG_PM | ||
| 1091 | .pm = &spear_smi_pm_ops, | ||
| 1092 | #endif | ||
| 1126 | }, | 1093 | }, |
| 1127 | .probe = spear_smi_probe, | 1094 | .probe = spear_smi_probe, |
| 1128 | .remove = __devexit_p(spear_smi_remove), | 1095 | .remove = __devexit_p(spear_smi_remove), |
| 1129 | .suspend = spear_smi_suspend, | ||
| 1130 | .resume = spear_smi_resume, | ||
| 1131 | }; | 1096 | }; |
| 1132 | 1097 | ||
| 1133 | static int spear_smi_init(void) | 1098 | static int spear_smi_init(void) |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5ba2458e799a..2e47c2ed0a2d 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
| @@ -373,7 +373,7 @@ config MTD_FORTUNET | |||
| 373 | have such a board, say 'Y'. | 373 | have such a board, say 'Y'. |
| 374 | 374 | ||
| 375 | config MTD_AUTCPU12 | 375 | config MTD_AUTCPU12 |
| 376 | tristate "NV-RAM mapping AUTCPU12 board" | 376 | bool "NV-RAM mapping AUTCPU12 board" |
| 377 | depends on ARCH_AUTCPU12 | 377 | depends on ARCH_AUTCPU12 |
| 378 | help | 378 | help |
| 379 | This enables access to the NV-RAM on autronix autcpu12 board. | 379 | This enables access to the NV-RAM on autronix autcpu12 board. |
| @@ -443,22 +443,10 @@ config MTD_GPIO_ADDR | |||
| 443 | 443 | ||
| 444 | config MTD_UCLINUX | 444 | config MTD_UCLINUX |
| 445 | bool "Generic uClinux RAM/ROM filesystem support" | 445 | bool "Generic uClinux RAM/ROM filesystem support" |
| 446 | depends on MTD_RAM=y && !MMU | 446 | depends on MTD_RAM=y && (!MMU || COLDFIRE) |
| 447 | help | 447 | help |
| 448 | Map driver to support image based filesystems for uClinux. | 448 | Map driver to support image based filesystems for uClinux. |
| 449 | 449 | ||
| 450 | config MTD_WRSBC8260 | ||
| 451 | tristate "Map driver for WindRiver PowerQUICC II MPC82xx board" | ||
| 452 | depends on (SBC82xx || SBC8560) | ||
| 453 | select MTD_MAP_BANK_WIDTH_4 | ||
| 454 | select MTD_MAP_BANK_WIDTH_1 | ||
| 455 | select MTD_CFI_I1 | ||
| 456 | select MTD_CFI_I4 | ||
| 457 | help | ||
| 458 | Map driver for WindRiver PowerQUICC II MPC82xx board. Drives | ||
| 459 | all three flash regions on CS0, CS1 and CS6 if they are configured | ||
| 460 | correctly by the boot loader. | ||
| 461 | |||
| 462 | config MTD_DMV182 | 450 | config MTD_DMV182 |
| 463 | tristate "Map driver for Dy-4 SVME/DMV-182 board." | 451 | tristate "Map driver for Dy-4 SVME/DMV-182 board." |
| 464 | depends on DMV182 | 452 | depends on DMV182 |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 68a9a91d344f..deb43e9a1e7f 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
| @@ -47,7 +47,6 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o | |||
| 47 | obj-$(CONFIG_MTD_H720X) += h720x-flash.o | 47 | obj-$(CONFIG_MTD_H720X) += h720x-flash.o |
| 48 | obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o | 48 | obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o |
| 49 | obj-$(CONFIG_MTD_IXP2000) += ixp2000.o | 49 | obj-$(CONFIG_MTD_IXP2000) += ixp2000.o |
| 50 | obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o | ||
| 51 | obj-$(CONFIG_MTD_DMV182) += dmv182.o | 50 | obj-$(CONFIG_MTD_DMV182) += dmv182.o |
| 52 | obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o | 51 | obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o |
| 53 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o | 52 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o |
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c index e5bfd0e093bb..76fb594bb1d9 100644 --- a/drivers/mtd/maps/autcpu12-nvram.c +++ b/drivers/mtd/maps/autcpu12-nvram.c | |||
| @@ -15,43 +15,54 @@ | |||
| 15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | * | ||
| 19 | */ | 18 | */ |
| 19 | #include <linux/sizes.h> | ||
| 20 | 20 | ||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 23 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 24 | #include <linux/ioport.h> | ||
| 25 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 26 | #include <asm/io.h> | 24 | #include <linux/device.h> |
| 27 | #include <asm/sizes.h> | 25 | #include <linux/module.h> |
| 28 | #include <mach/hardware.h> | 26 | #include <linux/platform_device.h> |
| 29 | #include <mach/autcpu12.h> | 27 | |
| 30 | #include <linux/mtd/mtd.h> | 28 | #include <linux/mtd/mtd.h> |
| 31 | #include <linux/mtd/map.h> | 29 | #include <linux/mtd/map.h> |
| 32 | #include <linux/mtd/partitions.h> | ||
| 33 | |||
| 34 | |||
| 35 | static struct mtd_info *sram_mtd; | ||
| 36 | 30 | ||
| 37 | struct map_info autcpu12_sram_map = { | 31 | struct autcpu12_nvram_priv { |
| 38 | .name = "SRAM", | 32 | struct mtd_info *mtd; |
| 39 | .size = 32768, | 33 | struct map_info map; |
| 40 | .bankwidth = 4, | ||
| 41 | .phys = 0x12000000, | ||
| 42 | }; | 34 | }; |
| 43 | 35 | ||
| 44 | static int __init init_autcpu12_sram (void) | 36 | static int __devinit autcpu12_nvram_probe(struct platform_device *pdev) |
| 45 | { | 37 | { |
| 46 | int err, save0, save1; | 38 | map_word tmp, save0, save1; |
| 39 | struct resource *res; | ||
| 40 | struct autcpu12_nvram_priv *priv; | ||
| 47 | 41 | ||
| 48 | autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); | 42 | priv = devm_kzalloc(&pdev->dev, |
| 49 | if (!autcpu12_sram_map.virt) { | 43 | sizeof(struct autcpu12_nvram_priv), GFP_KERNEL); |
| 50 | printk("Failed to ioremap autcpu12 NV-RAM space\n"); | 44 | if (!priv) |
| 51 | err = -EIO; | 45 | return -ENOMEM; |
| 52 | goto out; | 46 | |
| 47 | platform_set_drvdata(pdev, priv); | ||
| 48 | |||
| 49 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 50 | if (!res) { | ||
| 51 | dev_err(&pdev->dev, "failed to get memory resource\n"); | ||
| 52 | return -ENOENT; | ||
| 53 | } | ||
| 54 | |||
| 55 | priv->map.bankwidth = 4; | ||
| 56 | priv->map.phys = res->start; | ||
| 57 | priv->map.size = resource_size(res); | ||
| 58 | priv->map.virt = devm_request_and_ioremap(&pdev->dev, res); | ||
| 59 | strcpy((char *)priv->map.name, res->name); | ||
| 60 | if (!priv->map.virt) { | ||
| 61 | dev_err(&pdev->dev, "failed to remap mem resource\n"); | ||
| 62 | return -EBUSY; | ||
| 53 | } | 63 | } |
| 54 | simple_map_init(&autcpu_sram_map); | 64 | |
| 65 | simple_map_init(&priv->map); | ||
| 55 | 66 | ||
| 56 | /* | 67 | /* |
| 57 | * Check for 32K/128K | 68 | * Check for 32K/128K |
| @@ -61,65 +72,59 @@ static int __init init_autcpu12_sram (void) | |||
| 61 | * Read and check result on ofs 0x0 | 72 | * Read and check result on ofs 0x0 |
| 62 | * Restore contents | 73 | * Restore contents |
| 63 | */ | 74 | */ |
| 64 | save0 = map_read32(&autcpu12_sram_map,0); | 75 | save0 = map_read(&priv->map, 0); |
| 65 | save1 = map_read32(&autcpu12_sram_map,0x10000); | 76 | save1 = map_read(&priv->map, 0x10000); |
| 66 | map_write32(&autcpu12_sram_map,~save0,0x10000); | 77 | tmp.x[0] = ~save0.x[0]; |
| 67 | /* if we find this pattern on 0x0, we have 32K size | 78 | map_write(&priv->map, tmp, 0x10000); |
| 68 | * restore contents and exit | 79 | tmp = map_read(&priv->map, 0); |
| 69 | */ | 80 | /* if we find this pattern on 0x0, we have 32K size */ |
| 70 | if ( map_read32(&autcpu12_sram_map,0) != save0) { | 81 | if (!map_word_equal(&priv->map, tmp, save0)) { |
| 71 | map_write32(&autcpu12_sram_map,save0,0x0); | 82 | map_write(&priv->map, save0, 0x0); |
| 72 | goto map; | 83 | priv->map.size = SZ_32K; |
| 84 | } else | ||
| 85 | map_write(&priv->map, save1, 0x10000); | ||
| 86 | |||
| 87 | priv->mtd = do_map_probe("map_ram", &priv->map); | ||
| 88 | if (!priv->mtd) { | ||
| 89 | dev_err(&pdev->dev, "probing failed\n"); | ||
| 90 | return -ENXIO; | ||
| 73 | } | 91 | } |
| 74 | /* We have a 128K found, restore 0x10000 and set size | ||
| 75 | * to 128K | ||
| 76 | */ | ||
| 77 | map_write32(&autcpu12_sram_map,save1,0x10000); | ||
| 78 | autcpu12_sram_map.size = SZ_128K; | ||
| 79 | |||
| 80 | map: | ||
| 81 | sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map); | ||
| 82 | if (!sram_mtd) { | ||
| 83 | printk("NV-RAM probe failed\n"); | ||
| 84 | err = -ENXIO; | ||
| 85 | goto out_ioremap; | ||
| 86 | } | ||
| 87 | |||
| 88 | sram_mtd->owner = THIS_MODULE; | ||
| 89 | sram_mtd->erasesize = 16; | ||
| 90 | 92 | ||
| 91 | if (mtd_device_register(sram_mtd, NULL, 0)) { | 93 | priv->mtd->owner = THIS_MODULE; |
| 92 | printk("NV-RAM device addition failed\n"); | 94 | priv->mtd->erasesize = 16; |
| 93 | err = -ENOMEM; | 95 | priv->mtd->dev.parent = &pdev->dev; |
| 94 | goto out_probe; | 96 | if (!mtd_device_register(priv->mtd, NULL, 0)) { |
| 97 | dev_info(&pdev->dev, | ||
| 98 | "NV-RAM device size %ldKiB registered on AUTCPU12\n", | ||
| 99 | priv->map.size / SZ_1K); | ||
| 100 | return 0; | ||
| 95 | } | 101 | } |
| 96 | 102 | ||
| 97 | printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K); | 103 | map_destroy(priv->mtd); |
| 98 | 104 | dev_err(&pdev->dev, "NV-RAM device addition failed\n"); | |
| 99 | return 0; | 105 | return -ENOMEM; |
| 100 | |||
| 101 | out_probe: | ||
| 102 | map_destroy(sram_mtd); | ||
| 103 | sram_mtd = 0; | ||
| 104 | |||
| 105 | out_ioremap: | ||
| 106 | iounmap((void *)autcpu12_sram_map.virt); | ||
| 107 | out: | ||
| 108 | return err; | ||
| 109 | } | 106 | } |
| 110 | 107 | ||
| 111 | static void __exit cleanup_autcpu12_maps(void) | 108 | static int __devexit autcpu12_nvram_remove(struct platform_device *pdev) |
| 112 | { | 109 | { |
| 113 | if (sram_mtd) { | 110 | struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev); |
| 114 | mtd_device_unregister(sram_mtd); | 111 | |
| 115 | map_destroy(sram_mtd); | 112 | mtd_device_unregister(priv->mtd); |
| 116 | iounmap((void *)autcpu12_sram_map.virt); | 113 | map_destroy(priv->mtd); |
| 117 | } | 114 | |
| 115 | return 0; | ||
| 118 | } | 116 | } |
| 119 | 117 | ||
| 120 | module_init(init_autcpu12_sram); | 118 | static struct platform_driver autcpu12_nvram_driver = { |
| 121 | module_exit(cleanup_autcpu12_maps); | 119 | .driver = { |
| 120 | .name = "autcpu12_nvram", | ||
| 121 | .owner = THIS_MODULE, | ||
| 122 | }, | ||
| 123 | .probe = autcpu12_nvram_probe, | ||
| 124 | .remove = __devexit_p(autcpu12_nvram_remove), | ||
| 125 | }; | ||
| 126 | module_platform_driver(autcpu12_nvram_driver); | ||
| 122 | 127 | ||
| 123 | MODULE_AUTHOR("Thomas Gleixner"); | 128 | MODULE_AUTHOR("Thomas Gleixner"); |
| 124 | MODULE_DESCRIPTION("autcpu12 NV-RAM map driver"); | 129 | MODULE_DESCRIPTION("autcpu12 NVRAM map driver"); |
| 125 | MODULE_LICENSE("GPL"); | 130 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index f14ce0af763f..1c30c1a307f4 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c | |||
| @@ -43,26 +43,14 @@ static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs) | |||
| 43 | struct map_pci_info *map = (struct map_pci_info *)_map; | 43 | struct map_pci_info *map = (struct map_pci_info *)_map; |
| 44 | map_word val; | 44 | map_word val; |
| 45 | val.x[0]= readb(map->base + map->translate(map, ofs)); | 45 | val.x[0]= readb(map->base + map->translate(map, ofs)); |
| 46 | // printk("read8 : %08lx => %02x\n", ofs, val.x[0]); | ||
| 47 | return val; | 46 | return val; |
| 48 | } | 47 | } |
| 49 | 48 | ||
| 50 | #if 0 | ||
| 51 | static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs) | ||
| 52 | { | ||
| 53 | struct map_pci_info *map = (struct map_pci_info *)_map; | ||
| 54 | map_word val; | ||
| 55 | val.x[0] = readw(map->base + map->translate(map, ofs)); | ||
| 56 | // printk("read16: %08lx => %04x\n", ofs, val.x[0]); | ||
| 57 | return val; | ||
| 58 | } | ||
| 59 | #endif | ||
| 60 | static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs) | 49 | static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs) |
| 61 | { | 50 | { |
| 62 | struct map_pci_info *map = (struct map_pci_info *)_map; | 51 | struct map_pci_info *map = (struct map_pci_info *)_map; |
| 63 | map_word val; | 52 | map_word val; |
| 64 | val.x[0] = readl(map->base + map->translate(map, ofs)); | 53 | val.x[0] = readl(map->base + map->translate(map, ofs)); |
| 65 | // printk("read32: %08lx => %08x\n", ofs, val.x[0]); | ||
| 66 | return val; | 54 | return val; |
| 67 | } | 55 | } |
| 68 | 56 | ||
| @@ -75,22 +63,12 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from | |||
| 75 | static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs) | 63 | static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs) |
| 76 | { | 64 | { |
| 77 | struct map_pci_info *map = (struct map_pci_info *)_map; | 65 | struct map_pci_info *map = (struct map_pci_info *)_map; |
| 78 | // printk("write8 : %08lx <= %02x\n", ofs, val.x[0]); | ||
| 79 | writeb(val.x[0], map->base + map->translate(map, ofs)); | 66 | writeb(val.x[0], map->base + map->translate(map, ofs)); |
| 80 | } | 67 | } |
| 81 | 68 | ||
| 82 | #if 0 | ||
| 83 | static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs) | ||
| 84 | { | ||
| 85 | struct map_pci_info *map = (struct map_pci_info *)_map; | ||
| 86 | // printk("write16: %08lx <= %04x\n", ofs, val.x[0]); | ||
| 87 | writew(val.x[0], map->base + map->translate(map, ofs)); | ||
| 88 | } | ||
| 89 | #endif | ||
| 90 | static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs) | 69 | static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs) |
| 91 | { | 70 | { |
| 92 | struct map_pci_info *map = (struct map_pci_info *)_map; | 71 | struct map_pci_info *map = (struct map_pci_info *)_map; |
| 93 | // printk("write32: %08lx <= %08x\n", ofs, val.x[0]); | ||
| 94 | writel(val.x[0], map->base + map->translate(map, ofs)); | 72 | writel(val.x[0], map->base + map->translate(map, ofs)); |
| 95 | } | 73 | } |
| 96 | 74 | ||
| @@ -358,4 +336,3 @@ MODULE_LICENSE("GPL"); | |||
| 358 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); | 336 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); |
| 359 | MODULE_DESCRIPTION("Generic PCI map driver"); | 337 | MODULE_DESCRIPTION("Generic PCI map driver"); |
| 360 | MODULE_DEVICE_TABLE(pci, mtd_pci_ids); | 338 | MODULE_DEVICE_TABLE(pci, mtd_pci_ids); |
| 361 | |||
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 2e6fb6831d55..6f19acadb06c 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
| @@ -169,6 +169,7 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
| 169 | struct mtd_info **mtd_list = NULL; | 169 | struct mtd_info **mtd_list = NULL; |
| 170 | resource_size_t res_size; | 170 | resource_size_t res_size; |
| 171 | struct mtd_part_parser_data ppdata; | 171 | struct mtd_part_parser_data ppdata; |
| 172 | bool map_indirect; | ||
| 172 | 173 | ||
| 173 | match = of_match_device(of_flash_match, &dev->dev); | 174 | match = of_match_device(of_flash_match, &dev->dev); |
| 174 | if (!match) | 175 | if (!match) |
| @@ -192,6 +193,8 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
| 192 | } | 193 | } |
| 193 | count /= reg_tuple_size; | 194 | count /= reg_tuple_size; |
| 194 | 195 | ||
| 196 | map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access"); | ||
| 197 | |||
| 195 | err = -ENOMEM; | 198 | err = -ENOMEM; |
| 196 | info = kzalloc(sizeof(struct of_flash) + | 199 | info = kzalloc(sizeof(struct of_flash) + |
| 197 | sizeof(struct of_flash_list) * count, GFP_KERNEL); | 200 | sizeof(struct of_flash_list) * count, GFP_KERNEL); |
| @@ -247,6 +250,17 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
| 247 | 250 | ||
| 248 | simple_map_init(&info->list[i].map); | 251 | simple_map_init(&info->list[i].map); |
| 249 | 252 | ||
| 253 | /* | ||
| 254 | * On some platforms (e.g. MPC5200) a direct 1:1 mapping | ||
| 255 | * may cause problems with JFFS2 usage, as the local bus (LPB) | ||
| 256 | * doesn't support unaligned accesses as implemented in the | ||
| 257 | * JFFS2 code via memcpy(). By setting NO_XIP, the | ||
| 258 | * flash will not be exposed directly to the MTD users | ||
| 259 | * (e.g. JFFS2) any more. | ||
| 260 | */ | ||
| 261 | if (map_indirect) | ||
| 262 | info->list[i].map.phys = NO_XIP; | ||
| 263 | |||
| 250 | if (probe_type) { | 264 | if (probe_type) { |
| 251 | info->list[i].mtd = do_map_probe(probe_type, | 265 | info->list[i].mtd = do_map_probe(probe_type, |
| 252 | &info->list[i].map); | 266 | &info->list[i].map); |
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 6f52e1f288b6..49c3fe715eee 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c | |||
| @@ -100,8 +100,6 @@ static int rbtx4939_flash_probe(struct platform_device *dev) | |||
| 100 | goto err_out; | 100 | goto err_out; |
| 101 | } | 101 | } |
| 102 | info->mtd->owner = THIS_MODULE; | 102 | info->mtd->owner = THIS_MODULE; |
| 103 | if (err) | ||
| 104 | goto err_out; | ||
| 105 | err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts, | 103 | err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts, |
| 106 | pdata->nr_parts); | 104 | pdata->nr_parts); |
| 107 | 105 | ||
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index c3bb304eca07..299bf88a6f41 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c | |||
| @@ -67,10 +67,16 @@ static int __init uclinux_mtd_init(void) | |||
| 67 | printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", | 67 | printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", |
| 68 | (int) mapp->phys, (int) mapp->size); | 68 | (int) mapp->phys, (int) mapp->size); |
| 69 | 69 | ||
| 70 | mapp->virt = ioremap_nocache(mapp->phys, mapp->size); | 70 | /* |
| 71 | * The filesystem is guaranteed to be in direct mapped memory. It is | ||
| 72 | * directly following the kernels own bss region. Following the same | ||
| 73 | * mechanism used by architectures setting up traditional initrds we | ||
| 74 | * use phys_to_virt to get the virtual address of its start. | ||
| 75 | */ | ||
| 76 | mapp->virt = phys_to_virt(mapp->phys); | ||
| 71 | 77 | ||
| 72 | if (mapp->virt == 0) { | 78 | if (mapp->virt == 0) { |
| 73 | printk("uclinux[mtd]: ioremap_nocache() failed\n"); | 79 | printk("uclinux[mtd]: no virtual mapping?\n"); |
| 74 | return(-EIO); | 80 | return(-EIO); |
| 75 | } | 81 | } |
| 76 | 82 | ||
| @@ -79,7 +85,6 @@ static int __init uclinux_mtd_init(void) | |||
| 79 | mtd = do_map_probe("map_ram", mapp); | 85 | mtd = do_map_probe("map_ram", mapp); |
| 80 | if (!mtd) { | 86 | if (!mtd) { |
| 81 | printk("uclinux[mtd]: failed to find a mapping?\n"); | 87 | printk("uclinux[mtd]: failed to find a mapping?\n"); |
| 82 | iounmap(mapp->virt); | ||
| 83 | return(-ENXIO); | 88 | return(-ENXIO); |
| 84 | } | 89 | } |
| 85 | 90 | ||
| @@ -102,10 +107,8 @@ static void __exit uclinux_mtd_cleanup(void) | |||
| 102 | map_destroy(uclinux_ram_mtdinfo); | 107 | map_destroy(uclinux_ram_mtdinfo); |
| 103 | uclinux_ram_mtdinfo = NULL; | 108 | uclinux_ram_mtdinfo = NULL; |
| 104 | } | 109 | } |
| 105 | if (uclinux_ram_map.virt) { | 110 | if (uclinux_ram_map.virt) |
| 106 | iounmap((void *) uclinux_ram_map.virt); | ||
| 107 | uclinux_ram_map.virt = 0; | 111 | uclinux_ram_map.virt = 0; |
| 108 | } | ||
| 109 | } | 112 | } |
| 110 | 113 | ||
| 111 | /****************************************************************************/ | 114 | /****************************************************************************/ |
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c deleted file mode 100644 index e7534c82f93a..000000000000 --- a/drivers/mtd/maps/wr_sbc82xx_flash.c +++ /dev/null | |||
| @@ -1,174 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Map for flash chips on Wind River PowerQUICC II SBC82xx board. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2004 Red Hat, Inc. | ||
| 5 | * | ||
| 6 | * Author: David Woodhouse <dwmw2@infradead.org> | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/slab.h> | ||
| 15 | #include <asm/io.h> | ||
| 16 | #include <linux/mtd/mtd.h> | ||
| 17 | #include <linux/mtd/map.h> | ||
| 18 | #include <linux/mtd/partitions.h> | ||
| 19 | |||
| 20 | #include <asm/immap_cpm2.h> | ||
| 21 | |||
| 22 | static struct mtd_info *sbcmtd[3]; | ||
| 23 | |||
| 24 | struct map_info sbc82xx_flash_map[3] = { | ||
| 25 | {.name = "Boot flash"}, | ||
| 26 | {.name = "Alternate boot flash"}, | ||
| 27 | {.name = "User flash"} | ||
| 28 | }; | ||
| 29 | |||
| 30 | static struct mtd_partition smallflash_parts[] = { | ||
| 31 | { | ||
| 32 | .name = "space", | ||
| 33 | .size = 0x100000, | ||
| 34 | .offset = 0, | ||
| 35 | }, { | ||
| 36 | .name = "bootloader", | ||
| 37 | .size = MTDPART_SIZ_FULL, | ||
| 38 | .offset = MTDPART_OFS_APPEND, | ||
| 39 | } | ||
| 40 | }; | ||
| 41 | |||
| 42 | static struct mtd_partition bigflash_parts[] = { | ||
| 43 | { | ||
| 44 | .name = "bootloader", | ||
| 45 | .size = 0x00100000, | ||
| 46 | .offset = 0, | ||
| 47 | }, { | ||
| 48 | .name = "file system", | ||
| 49 | .size = 0x01f00000, | ||
| 50 | .offset = MTDPART_OFS_APPEND, | ||
| 51 | }, { | ||
| 52 | .name = "boot config", | ||
| 53 | .size = 0x00100000, | ||
| 54 | .offset = MTDPART_OFS_APPEND, | ||
| 55 | }, { | ||
| 56 | .name = "space", | ||
| 57 | .size = 0x01f00000, | ||
| 58 | .offset = MTDPART_OFS_APPEND, | ||
| 59 | } | ||
| 60 | }; | ||
| 61 | |||
| 62 | static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL}; | ||
| 63 | |||
| 64 | #define init_sbc82xx_one_flash(map, br, or) \ | ||
| 65 | do { \ | ||
| 66 | (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \ | ||
| 67 | (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \ | ||
| 68 | switch (br & 0x00001800) { \ | ||
| 69 | case 0x00000000: \ | ||
| 70 | case 0x00000800: (map).bankwidth = 1; break; \ | ||
| 71 | case 0x00001000: (map).bankwidth = 2; break; \ | ||
| 72 | case 0x00001800: (map).bankwidth = 4; break; \ | ||
| 73 | } \ | ||
| 74 | } while (0); | ||
| 75 | |||
| 76 | static int __init init_sbc82xx_flash(void) | ||
| 77 | { | ||
| 78 | volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl; | ||
| 79 | int bigflash; | ||
| 80 | int i; | ||
| 81 | |||
| 82 | #ifdef CONFIG_SBC8560 | ||
| 83 | mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t)); | ||
| 84 | #else | ||
| 85 | mc = &cpm2_immr->im_memctl; | ||
| 86 | #endif | ||
| 87 | |||
| 88 | bigflash = 1; | ||
| 89 | if ((mc->memc_br0 & 0x00001800) == 0x00001800) | ||
| 90 | bigflash = 0; | ||
| 91 | |||
| 92 | init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0); | ||
| 93 | init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6); | ||
| 94 | init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1); | ||
| 95 | |||
| 96 | #ifdef CONFIG_SBC8560 | ||
| 97 | iounmap((void *) mc); | ||
| 98 | #endif | ||
| 99 | |||
| 100 | for (i=0; i<3; i++) { | ||
| 101 | int8_t flashcs[3] = { 0, 6, 1 }; | ||
| 102 | int nr_parts; | ||
| 103 | struct mtd_partition *defparts; | ||
| 104 | |||
| 105 | printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", | ||
| 106 | sbc82xx_flash_map[i].name, | ||
| 107 | (sbc82xx_flash_map[i].size >> 20), | ||
| 108 | flashcs[i]); | ||
| 109 | if (!sbc82xx_flash_map[i].phys) { | ||
| 110 | /* We know it can't be at zero. */ | ||
| 111 | printk("): disabled by bootloader.\n"); | ||
| 112 | continue; | ||
| 113 | } | ||
| 114 | printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); | ||
| 115 | |||
| 116 | sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, | ||
| 117 | sbc82xx_flash_map[i].size); | ||
| 118 | |||
| 119 | if (!sbc82xx_flash_map[i].virt) { | ||
| 120 | printk("Failed to ioremap\n"); | ||
| 121 | continue; | ||
| 122 | } | ||
| 123 | |||
| 124 | simple_map_init(&sbc82xx_flash_map[i]); | ||
| 125 | |||
| 126 | sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]); | ||
| 127 | |||
| 128 | if (!sbcmtd[i]) | ||
| 129 | continue; | ||
| 130 | |||
| 131 | sbcmtd[i]->owner = THIS_MODULE; | ||
| 132 | |||
| 133 | /* No partitioning detected. Use default */ | ||
| 134 | if (i == 2) { | ||
| 135 | defparts = NULL; | ||
| 136 | nr_parts = 0; | ||
| 137 | } else if (i == bigflash) { | ||
| 138 | defparts = bigflash_parts; | ||
| 139 | nr_parts = ARRAY_SIZE(bigflash_parts); | ||
| 140 | } else { | ||
| 141 | defparts = smallflash_parts; | ||
| 142 | nr_parts = ARRAY_SIZE(smallflash_parts); | ||
| 143 | } | ||
| 144 | |||
| 145 | mtd_device_parse_register(sbcmtd[i], part_probes, NULL, | ||
| 146 | defparts, nr_parts); | ||
| 147 | } | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static void __exit cleanup_sbc82xx_flash(void) | ||
| 152 | { | ||
| 153 | int i; | ||
| 154 | |||
| 155 | for (i=0; i<3; i++) { | ||
| 156 | if (!sbcmtd[i]) | ||
| 157 | continue; | ||
| 158 | |||
| 159 | mtd_device_unregister(sbcmtd[i]); | ||
| 160 | |||
| 161 | map_destroy(sbcmtd[i]); | ||
| 162 | |||
| 163 | iounmap((void *)sbc82xx_flash_map[i].virt); | ||
| 164 | sbc82xx_flash_map[i].virt = 0; | ||
| 165 | } | ||
| 166 | } | ||
| 167 | |||
| 168 | module_init(init_sbc82xx_flash); | ||
| 169 | module_exit(cleanup_sbc82xx_flash); | ||
| 170 | |||
| 171 | |||
| 172 | MODULE_LICENSE("GPL"); | ||
| 173 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); | ||
| 174 | MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II"); | ||
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 73ae81a629f2..82c06165d3d2 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
| @@ -1162,7 +1162,11 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 1162 | resource_size_t start, off; | 1162 | resource_size_t start, off; |
| 1163 | unsigned long len, vma_len; | 1163 | unsigned long len, vma_len; |
| 1164 | 1164 | ||
| 1165 | if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { | 1165 | /* This is broken because it assumes the MTD device is map-based |
| 1166 | and that mtd->priv is a valid struct map_info. It should be | ||
| 1167 | replaced with something that uses the mtd_get_unmapped_area() | ||
| 1168 | operation properly. */ | ||
| 1169 | if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { | ||
| 1166 | off = get_vm_offset(vma); | 1170 | off = get_vm_offset(vma); |
| 1167 | start = map->phys; | 1171 | start = map->phys; |
| 1168 | len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); | 1172 | len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index b9adff543f5f..374c46dff7dd 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
| @@ -858,6 +858,27 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | |||
| 858 | } | 858 | } |
| 859 | EXPORT_SYMBOL_GPL(mtd_panic_write); | 859 | EXPORT_SYMBOL_GPL(mtd_panic_write); |
| 860 | 860 | ||
| 861 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | ||
| 862 | { | ||
| 863 | int ret_code; | ||
| 864 | ops->retlen = ops->oobretlen = 0; | ||
| 865 | if (!mtd->_read_oob) | ||
| 866 | return -EOPNOTSUPP; | ||
| 867 | /* | ||
| 868 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics | ||
| 869 | * similar to mtd->_read(), returning a non-negative integer | ||
| 870 | * representing max bitflips. In other cases, mtd->_read_oob() may | ||
| 871 | * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). | ||
| 872 | */ | ||
| 873 | ret_code = mtd->_read_oob(mtd, from, ops); | ||
| 874 | if (unlikely(ret_code < 0)) | ||
| 875 | return ret_code; | ||
| 876 | if (mtd->ecc_strength == 0) | ||
| 877 | return 0; /* device lacks ecc */ | ||
| 878 | return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; | ||
| 879 | } | ||
| 880 | EXPORT_SYMBOL_GPL(mtd_read_oob); | ||
| 881 | |||
| 861 | /* | 882 | /* |
| 862 | * Method to access the protection register area, present in some flash | 883 | * Method to access the protection register area, present in some flash |
| 863 | * devices. The user data is one time programmable but the factory data is read | 884 | * devices. The user data is one time programmable but the factory data is read |
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 438737a1f59a..f5b3f91fa1cc 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
| @@ -169,14 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
| 169 | cxt->nextpage = 0; | 169 | cxt->nextpage = 0; |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | while (1) { | 172 | while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { |
| 173 | ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); | ||
| 174 | if (!ret) | ||
| 175 | break; | ||
| 176 | if (ret < 0) { | ||
| 177 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); | ||
| 178 | return; | ||
| 179 | } | ||
| 180 | badblock: | 173 | badblock: |
| 181 | printk(KERN_WARNING "mtdoops: bad block at %08lx\n", | 174 | printk(KERN_WARNING "mtdoops: bad block at %08lx\n", |
| 182 | cxt->nextpage * record_size); | 175 | cxt->nextpage * record_size); |
| @@ -190,6 +183,11 @@ badblock: | |||
| 190 | } | 183 | } |
| 191 | } | 184 | } |
| 192 | 185 | ||
| 186 | if (ret < 0) { | ||
| 187 | printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n"); | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | |||
| 193 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) | 191 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
| 194 | ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); | 192 | ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); |
| 195 | 193 | ||
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 3a49e6de5e60..70fa70a8318f 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
| @@ -711,6 +711,8 @@ static const char *default_mtd_part_types[] = { | |||
| 711 | * partition parsers, specified in @types. However, if @types is %NULL, then | 711 | * partition parsers, specified in @types. However, if @types is %NULL, then |
| 712 | * the default list of parsers is used. The default list contains only the | 712 | * the default list of parsers is used. The default list contains only the |
| 713 | * "cmdlinepart" and "ofpart" parsers ATM. | 713 | * "cmdlinepart" and "ofpart" parsers ATM. |
| 714 | * Note: If there are more then one parser in @types, the kernel only takes the | ||
| 715 | * partitions parsed out by the first parser. | ||
| 714 | * | 716 | * |
| 715 | * This function may return: | 717 | * This function may return: |
| 716 | * o a negative error code in case of failure | 718 | * o a negative error code in case of failure |
| @@ -735,11 +737,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types, | |||
| 735 | if (!parser) | 737 | if (!parser) |
| 736 | continue; | 738 | continue; |
| 737 | ret = (*parser->parse_fn)(master, pparts, data); | 739 | ret = (*parser->parse_fn)(master, pparts, data); |
| 740 | put_partition_parser(parser); | ||
| 738 | if (ret > 0) { | 741 | if (ret > 0) { |
| 739 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", | 742 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", |
| 740 | ret, parser->name, master->name); | 743 | ret, parser->name, master->name); |
| 744 | break; | ||
| 741 | } | 745 | } |
| 742 | put_partition_parser(parser); | ||
| 743 | } | 746 | } |
| 744 | return ret; | 747 | return ret; |
| 745 | } | 748 | } |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 598cd0a3adee..4883139460be 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
| @@ -22,15 +22,6 @@ menuconfig MTD_NAND | |||
| 22 | 22 | ||
| 23 | if MTD_NAND | 23 | if MTD_NAND |
| 24 | 24 | ||
| 25 | config MTD_NAND_VERIFY_WRITE | ||
| 26 | bool "Verify NAND page writes" | ||
| 27 | help | ||
| 28 | This adds an extra check when data is written to the flash. The | ||
| 29 | NAND flash device internally checks only bits transitioning | ||
| 30 | from 1 to 0. There is a rare possibility that even though the | ||
| 31 | device thinks the write was successful, a bit could have been | ||
| 32 | flipped accidentally due to device wear or something else. | ||
| 33 | |||
| 34 | config MTD_NAND_BCH | 25 | config MTD_NAND_BCH |
| 35 | tristate | 26 | tristate |
| 36 | select BCH | 27 | select BCH |
| @@ -267,22 +258,6 @@ config MTD_NAND_S3C2410_CLKSTOP | |||
| 267 | when the is NAND chip selected or released, but will save | 258 | when the is NAND chip selected or released, but will save |
| 268 | approximately 5mA of power when there is nothing happening. | 259 | approximately 5mA of power when there is nothing happening. |
| 269 | 260 | ||
| 270 | config MTD_NAND_BCM_UMI | ||
| 271 | tristate "NAND Flash support for BCM Reference Boards" | ||
| 272 | depends on ARCH_BCMRING | ||
| 273 | help | ||
| 274 | This enables the NAND flash controller on the BCM UMI block. | ||
| 275 | |||
| 276 | No board specific support is done by this driver, each board | ||
| 277 | must advertise a platform_device for the driver to attach. | ||
| 278 | |||
| 279 | config MTD_NAND_BCM_UMI_HWCS | ||
| 280 | bool "BCM UMI NAND Hardware CS" | ||
| 281 | depends on MTD_NAND_BCM_UMI | ||
| 282 | help | ||
| 283 | Enable the use of the BCM UMI block's internal CS using NAND. | ||
| 284 | This should only be used if you know the external NAND CS can toggle. | ||
| 285 | |||
| 286 | config MTD_NAND_DISKONCHIP | 261 | config MTD_NAND_DISKONCHIP |
| 287 | tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" | 262 | tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" |
| 288 | depends on EXPERIMENTAL | 263 | depends on EXPERIMENTAL |
| @@ -356,7 +331,7 @@ config MTD_NAND_DISKONCHIP_BBTWRITE | |||
| 356 | 331 | ||
| 357 | config MTD_NAND_DOCG4 | 332 | config MTD_NAND_DOCG4 |
| 358 | tristate "Support for DiskOnChip G4 (EXPERIMENTAL)" | 333 | tristate "Support for DiskOnChip G4 (EXPERIMENTAL)" |
| 359 | depends on EXPERIMENTAL | 334 | depends on EXPERIMENTAL && HAS_IOMEM |
| 360 | select BCH | 335 | select BCH |
| 361 | select BITREVERSE | 336 | select BITREVERSE |
| 362 | help | 337 | help |
| @@ -414,6 +389,28 @@ config MTD_NAND_PXA3xx | |||
| 414 | This enables the driver for the NAND flash device found on | 389 | This enables the driver for the NAND flash device found on |
| 415 | PXA3xx processors | 390 | PXA3xx processors |
| 416 | 391 | ||
| 392 | config MTD_NAND_SLC_LPC32XX | ||
| 393 | tristate "NXP LPC32xx SLC Controller" | ||
| 394 | depends on ARCH_LPC32XX | ||
| 395 | help | ||
| 396 | Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell | ||
| 397 | chips) NAND controller. This is the default for the PHYTEC 3250 | ||
| 398 | reference board which contains a NAND256R3A2CZA6 chip. | ||
| 399 | |||
| 400 | Please check the actual NAND chip connected and its support | ||
| 401 | by the SLC NAND controller. | ||
| 402 | |||
| 403 | config MTD_NAND_MLC_LPC32XX | ||
| 404 | tristate "NXP LPC32xx MLC Controller" | ||
| 405 | depends on ARCH_LPC32XX | ||
| 406 | help | ||
| 407 | Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND | ||
| 408 | controller. This is the default for the WORK92105 controller | ||
| 409 | board. | ||
| 410 | |||
| 411 | Please check the actual NAND chip connected and its support | ||
| 412 | by the MLC NAND controller. | ||
| 413 | |||
| 417 | config MTD_NAND_CM_X270 | 414 | config MTD_NAND_CM_X270 |
| 418 | tristate "Support for NAND Flash on CM-X270 modules" | 415 | tristate "Support for NAND Flash on CM-X270 modules" |
| 419 | depends on MACH_ARMCORE | 416 | depends on MACH_ARMCORE |
| @@ -439,10 +436,10 @@ config MTD_NAND_NANDSIM | |||
| 439 | MTD nand layer. | 436 | MTD nand layer. |
| 440 | 437 | ||
| 441 | config MTD_NAND_GPMI_NAND | 438 | config MTD_NAND_GPMI_NAND |
| 442 | bool "GPMI NAND Flash Controller driver" | 439 | tristate "GPMI NAND Flash Controller driver" |
| 443 | depends on MTD_NAND && MXS_DMA | 440 | depends on MTD_NAND && MXS_DMA |
| 444 | help | 441 | help |
| 445 | Enables NAND Flash support for IMX23 or IMX28. | 442 | Enables NAND Flash support for IMX23, IMX28 or IMX6. |
| 446 | The GPMI controller is very powerful, with the help of BCH | 443 | The GPMI controller is very powerful, with the help of BCH |
| 447 | module, it can do the hardware ECC. The GPMI supports several | 444 | module, it can do the hardware ECC. The GPMI supports several |
| 448 | NAND flashs at the same time. The GPMI may conflicts with other | 445 | NAND flashs at the same time. The GPMI may conflicts with other |
| @@ -510,7 +507,7 @@ config MTD_NAND_MPC5121_NFC | |||
| 510 | 507 | ||
| 511 | config MTD_NAND_MXC | 508 | config MTD_NAND_MXC |
| 512 | tristate "MXC NAND support" | 509 | tristate "MXC NAND support" |
| 513 | depends on IMX_HAVE_PLATFORM_MXC_NAND | 510 | depends on ARCH_MXC |
| 514 | help | 511 | help |
| 515 | This enables the driver for the NAND flash controller on the | 512 | This enables the driver for the NAND flash controller on the |
| 516 | MXC processors. | 513 | MXC processors. |
| @@ -567,4 +564,12 @@ config MTD_NAND_FSMC | |||
| 567 | Enables support for NAND Flash chips on the ST Microelectronics | 564 | Enables support for NAND Flash chips on the ST Microelectronics |
| 568 | Flexible Static Memory Controller (FSMC) | 565 | Flexible Static Memory Controller (FSMC) |
| 569 | 566 | ||
| 567 | config MTD_NAND_XWAY | ||
| 568 | tristate "Support for NAND on Lantiq XWAY SoC" | ||
| 569 | depends on LANTIQ && SOC_TYPE_XWAY | ||
| 570 | select MTD_NAND_PLATFORM | ||
| 571 | help | ||
| 572 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached | ||
| 573 | to the External Bus Unit (EBU). | ||
| 574 | |||
| 570 | endif # MTD_NAND | 575 | endif # MTD_NAND |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index d4b4d8739bd8..2cbd0916b733 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
| @@ -40,16 +40,18 @@ obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o | |||
| 40 | obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o | 40 | obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o |
| 41 | obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o | 41 | obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o |
| 42 | obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o | 42 | obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o |
| 43 | obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o | ||
| 44 | obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o | ||
| 43 | obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o | 45 | obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o |
| 44 | obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o | 46 | obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o |
| 45 | obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o | 47 | obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o |
| 46 | obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o | 48 | obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o |
| 47 | obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o | 49 | obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o |
| 48 | obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o | 50 | obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o |
| 49 | obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o | ||
| 50 | obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o | 51 | obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o |
| 51 | obj-$(CONFIG_MTD_NAND_RICOH) += r852.o | 52 | obj-$(CONFIG_MTD_NAND_RICOH) += r852.o |
| 52 | obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o | 53 | obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o |
| 53 | obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ | 54 | obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ |
| 55 | obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o | ||
| 54 | 56 | ||
| 55 | nand-objs := nand_base.o nand_bbt.o | 57 | nand-objs := nand_base.o nand_bbt.o |
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index a7040af08536..9e7723aa7acc 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c | |||
| @@ -107,18 +107,6 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 107 | buf[i] = ams_delta_read_byte(mtd); | 107 | buf[i] = ams_delta_read_byte(mtd); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf, | ||
| 111 | int len) | ||
| 112 | { | ||
| 113 | int i; | ||
| 114 | |||
| 115 | for (i=0; i<len; i++) | ||
| 116 | if (buf[i] != ams_delta_read_byte(mtd)) | ||
| 117 | return -EFAULT; | ||
| 118 | |||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | /* | 110 | /* |
| 123 | * Command control function | 111 | * Command control function |
| 124 | * | 112 | * |
| @@ -237,7 +225,6 @@ static int __devinit ams_delta_init(struct platform_device *pdev) | |||
| 237 | this->read_byte = ams_delta_read_byte; | 225 | this->read_byte = ams_delta_read_byte; |
| 238 | this->write_buf = ams_delta_write_buf; | 226 | this->write_buf = ams_delta_write_buf; |
| 239 | this->read_buf = ams_delta_read_buf; | 227 | this->read_buf = ams_delta_read_buf; |
| 240 | this->verify_buf = ams_delta_verify_buf; | ||
| 241 | this->cmd_ctrl = ams_delta_hwcontrol; | 228 | this->cmd_ctrl = ams_delta_hwcontrol; |
| 242 | if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { | 229 | if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { |
| 243 | this->dev_ready = ams_delta_nand_ready; | 230 | this->dev_ready = ams_delta_nand_ready; |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 97ac6712bb19..914455783302 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
| @@ -1,20 +1,22 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2003 Rick Bronson | 2 | * Copyright © 2003 Rick Bronson |
| 3 | * | 3 | * |
| 4 | * Derived from drivers/mtd/nand/autcpu12.c | 4 | * Derived from drivers/mtd/nand/autcpu12.c |
| 5 | * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) | 5 | * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de) |
| 6 | * | 6 | * |
| 7 | * Derived from drivers/mtd/spia.c | 7 | * Derived from drivers/mtd/spia.c |
| 8 | * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com) | 8 | * Copyright © 2000 Steven J. Hill (sjhill@cotw.com) |
| 9 | * | 9 | * |
| 10 | * | 10 | * |
| 11 | * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 | 11 | * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 |
| 12 | * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007 | 12 | * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007 |
| 13 | * | 13 | * |
| 14 | * Derived from Das U-Boot source code | 14 | * Derived from Das U-Boot source code |
| 15 | * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) | 15 | * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) |
| 16 | * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas | 16 | * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas |
| 17 | * | 17 | * |
| 18 | * Add Programmable Multibit ECC support for various AT91 SoC | ||
| 19 | * © Copyright 2012 ATMEL, Hong Xu | ||
| 18 | * | 20 | * |
| 19 | * This program is free software; you can redistribute it and/or modify | 21 | * This program is free software; you can redistribute it and/or modify |
| 20 | * it under the terms of the GNU General Public License version 2 as | 22 | * it under the terms of the GNU General Public License version 2 as |
| @@ -93,8 +95,36 @@ struct atmel_nand_host { | |||
| 93 | 95 | ||
| 94 | struct completion comp; | 96 | struct completion comp; |
| 95 | struct dma_chan *dma_chan; | 97 | struct dma_chan *dma_chan; |
| 98 | |||
| 99 | bool has_pmecc; | ||
| 100 | u8 pmecc_corr_cap; | ||
| 101 | u16 pmecc_sector_size; | ||
| 102 | u32 pmecc_lookup_table_offset; | ||
| 103 | |||
| 104 | int pmecc_bytes_per_sector; | ||
| 105 | int pmecc_sector_number; | ||
| 106 | int pmecc_degree; /* Degree of remainders */ | ||
| 107 | int pmecc_cw_len; /* Length of codeword */ | ||
| 108 | |||
| 109 | void __iomem *pmerrloc_base; | ||
| 110 | void __iomem *pmecc_rom_base; | ||
| 111 | |||
| 112 | /* lookup table for alpha_to and index_of */ | ||
| 113 | void __iomem *pmecc_alpha_to; | ||
| 114 | void __iomem *pmecc_index_of; | ||
| 115 | |||
| 116 | /* data for pmecc computation */ | ||
| 117 | int16_t *pmecc_partial_syn; | ||
| 118 | int16_t *pmecc_si; | ||
| 119 | int16_t *pmecc_smu; /* Sigma table */ | ||
| 120 | int16_t *pmecc_lmu; /* polynomal order */ | ||
| 121 | int *pmecc_mu; | ||
| 122 | int *pmecc_dmu; | ||
| 123 | int *pmecc_delta; | ||
| 96 | }; | 124 | }; |
| 97 | 125 | ||
| 126 | static struct nand_ecclayout atmel_pmecc_oobinfo; | ||
| 127 | |||
| 98 | static int cpu_has_dma(void) | 128 | static int cpu_has_dma(void) |
| 99 | { | 129 | { |
| 100 | return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); | 130 | return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); |
| @@ -288,6 +318,703 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | |||
| 288 | } | 318 | } |
| 289 | 319 | ||
| 290 | /* | 320 | /* |
| 321 | * Return number of ecc bytes per sector according to sector size and | ||
| 322 | * correction capability | ||
| 323 | * | ||
| 324 | * Following table shows what at91 PMECC supported: | ||
| 325 | * Correction Capability Sector_512_bytes Sector_1024_bytes | ||
| 326 | * ===================== ================ ================= | ||
| 327 | * 2-bits 4-bytes 4-bytes | ||
| 328 | * 4-bits 7-bytes 7-bytes | ||
| 329 | * 8-bits 13-bytes 14-bytes | ||
| 330 | * 12-bits 20-bytes 21-bytes | ||
| 331 | * 24-bits 39-bytes 42-bytes | ||
| 332 | */ | ||
| 333 | static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size) | ||
| 334 | { | ||
| 335 | int m = 12 + sector_size / 512; | ||
| 336 | return (m * cap + 7) / 8; | ||
| 337 | } | ||
| 338 | |||
| 339 | static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout, | ||
| 340 | int oobsize, int ecc_len) | ||
| 341 | { | ||
| 342 | int i; | ||
| 343 | |||
| 344 | layout->eccbytes = ecc_len; | ||
| 345 | |||
| 346 | /* ECC will occupy the last ecc_len bytes continuously */ | ||
| 347 | for (i = 0; i < ecc_len; i++) | ||
| 348 | layout->eccpos[i] = oobsize - ecc_len + i; | ||
| 349 | |||
| 350 | layout->oobfree[0].offset = 2; | ||
| 351 | layout->oobfree[0].length = | ||
| 352 | oobsize - ecc_len - layout->oobfree[0].offset; | ||
| 353 | } | ||
| 354 | |||
| 355 | static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) | ||
| 356 | { | ||
| 357 | int table_size; | ||
| 358 | |||
| 359 | table_size = host->pmecc_sector_size == 512 ? | ||
| 360 | PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024; | ||
| 361 | |||
| 362 | return host->pmecc_rom_base + host->pmecc_lookup_table_offset + | ||
| 363 | table_size * sizeof(int16_t); | ||
| 364 | } | ||
| 365 | |||
| 366 | static void pmecc_data_free(struct atmel_nand_host *host) | ||
| 367 | { | ||
| 368 | kfree(host->pmecc_partial_syn); | ||
| 369 | kfree(host->pmecc_si); | ||
| 370 | kfree(host->pmecc_lmu); | ||
| 371 | kfree(host->pmecc_smu); | ||
| 372 | kfree(host->pmecc_mu); | ||
| 373 | kfree(host->pmecc_dmu); | ||
| 374 | kfree(host->pmecc_delta); | ||
| 375 | } | ||
| 376 | |||
| 377 | static int __devinit pmecc_data_alloc(struct atmel_nand_host *host) | ||
| 378 | { | ||
| 379 | const int cap = host->pmecc_corr_cap; | ||
| 380 | |||
| 381 | host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t), | ||
| 382 | GFP_KERNEL); | ||
| 383 | host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL); | ||
| 384 | host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL); | ||
| 385 | host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t), | ||
| 386 | GFP_KERNEL); | ||
| 387 | host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL); | ||
| 388 | host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL); | ||
| 389 | host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL); | ||
| 390 | |||
| 391 | if (host->pmecc_partial_syn && | ||
| 392 | host->pmecc_si && | ||
| 393 | host->pmecc_lmu && | ||
| 394 | host->pmecc_smu && | ||
| 395 | host->pmecc_mu && | ||
| 396 | host->pmecc_dmu && | ||
| 397 | host->pmecc_delta) | ||
| 398 | return 0; | ||
| 399 | |||
| 400 | /* error happened */ | ||
| 401 | pmecc_data_free(host); | ||
| 402 | return -ENOMEM; | ||
| 403 | } | ||
| 404 | |||
| 405 | static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector) | ||
| 406 | { | ||
| 407 | struct nand_chip *nand_chip = mtd->priv; | ||
| 408 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 409 | int i; | ||
| 410 | uint32_t value; | ||
| 411 | |||
| 412 | /* Fill odd syndromes */ | ||
| 413 | for (i = 0; i < host->pmecc_corr_cap; i++) { | ||
| 414 | value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2); | ||
| 415 | if (i & 1) | ||
| 416 | value >>= 16; | ||
| 417 | value &= 0xffff; | ||
| 418 | host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value; | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 422 | static void pmecc_substitute(struct mtd_info *mtd) | ||
| 423 | { | ||
| 424 | struct nand_chip *nand_chip = mtd->priv; | ||
| 425 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 426 | int16_t __iomem *alpha_to = host->pmecc_alpha_to; | ||
| 427 | int16_t __iomem *index_of = host->pmecc_index_of; | ||
| 428 | int16_t *partial_syn = host->pmecc_partial_syn; | ||
| 429 | const int cap = host->pmecc_corr_cap; | ||
| 430 | int16_t *si; | ||
| 431 | int i, j; | ||
| 432 | |||
| 433 | /* si[] is a table that holds the current syndrome value, | ||
| 434 | * an element of that table belongs to the field | ||
| 435 | */ | ||
| 436 | si = host->pmecc_si; | ||
| 437 | |||
| 438 | memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1)); | ||
| 439 | |||
| 440 | /* Computation 2t syndromes based on S(x) */ | ||
| 441 | /* Odd syndromes */ | ||
| 442 | for (i = 1; i < 2 * cap; i += 2) { | ||
| 443 | for (j = 0; j < host->pmecc_degree; j++) { | ||
| 444 | if (partial_syn[i] & ((unsigned short)0x1 << j)) | ||
| 445 | si[i] = readw_relaxed(alpha_to + i * j) ^ si[i]; | ||
| 446 | } | ||
| 447 | } | ||
| 448 | /* Even syndrome = (Odd syndrome) ** 2 */ | ||
| 449 | for (i = 2, j = 1; j <= cap; i = ++j << 1) { | ||
| 450 | if (si[j] == 0) { | ||
| 451 | si[i] = 0; | ||
| 452 | } else { | ||
| 453 | int16_t tmp; | ||
| 454 | |||
| 455 | tmp = readw_relaxed(index_of + si[j]); | ||
| 456 | tmp = (tmp * 2) % host->pmecc_cw_len; | ||
| 457 | si[i] = readw_relaxed(alpha_to + tmp); | ||
| 458 | } | ||
| 459 | } | ||
| 460 | |||
| 461 | return; | ||
| 462 | } | ||
| 463 | |||
| 464 | static void pmecc_get_sigma(struct mtd_info *mtd) | ||
| 465 | { | ||
| 466 | struct nand_chip *nand_chip = mtd->priv; | ||
| 467 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 468 | |||
| 469 | int16_t *lmu = host->pmecc_lmu; | ||
| 470 | int16_t *si = host->pmecc_si; | ||
| 471 | int *mu = host->pmecc_mu; | ||
| 472 | int *dmu = host->pmecc_dmu; /* Discrepancy */ | ||
| 473 | int *delta = host->pmecc_delta; /* Delta order */ | ||
| 474 | int cw_len = host->pmecc_cw_len; | ||
| 475 | const int16_t cap = host->pmecc_corr_cap; | ||
| 476 | const int num = 2 * cap + 1; | ||
| 477 | int16_t __iomem *index_of = host->pmecc_index_of; | ||
| 478 | int16_t __iomem *alpha_to = host->pmecc_alpha_to; | ||
| 479 | int i, j, k; | ||
| 480 | uint32_t dmu_0_count, tmp; | ||
| 481 | int16_t *smu = host->pmecc_smu; | ||
| 482 | |||
| 483 | /* index of largest delta */ | ||
| 484 | int ro; | ||
| 485 | int largest; | ||
| 486 | int diff; | ||
| 487 | |||
| 488 | dmu_0_count = 0; | ||
| 489 | |||
| 490 | /* First Row */ | ||
| 491 | |||
| 492 | /* Mu */ | ||
| 493 | mu[0] = -1; | ||
| 494 | |||
| 495 | memset(smu, 0, sizeof(int16_t) * num); | ||
| 496 | smu[0] = 1; | ||
| 497 | |||
| 498 | /* discrepancy set to 1 */ | ||
| 499 | dmu[0] = 1; | ||
| 500 | /* polynom order set to 0 */ | ||
| 501 | lmu[0] = 0; | ||
| 502 | delta[0] = (mu[0] * 2 - lmu[0]) >> 1; | ||
| 503 | |||
| 504 | /* Second Row */ | ||
| 505 | |||
| 506 | /* Mu */ | ||
| 507 | mu[1] = 0; | ||
| 508 | /* Sigma(x) set to 1 */ | ||
| 509 | memset(&smu[num], 0, sizeof(int16_t) * num); | ||
| 510 | smu[num] = 1; | ||
| 511 | |||
| 512 | /* discrepancy set to S1 */ | ||
| 513 | dmu[1] = si[1]; | ||
| 514 | |||
| 515 | /* polynom order set to 0 */ | ||
| 516 | lmu[1] = 0; | ||
| 517 | |||
| 518 | delta[1] = (mu[1] * 2 - lmu[1]) >> 1; | ||
| 519 | |||
| 520 | /* Init the Sigma(x) last row */ | ||
| 521 | memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num); | ||
| 522 | |||
| 523 | for (i = 1; i <= cap; i++) { | ||
| 524 | mu[i + 1] = i << 1; | ||
| 525 | /* Begin Computing Sigma (Mu+1) and L(mu) */ | ||
| 526 | /* check if discrepancy is set to 0 */ | ||
| 527 | if (dmu[i] == 0) { | ||
| 528 | dmu_0_count++; | ||
| 529 | |||
| 530 | tmp = ((cap - (lmu[i] >> 1) - 1) / 2); | ||
| 531 | if ((cap - (lmu[i] >> 1) - 1) & 0x1) | ||
| 532 | tmp += 2; | ||
| 533 | else | ||
| 534 | tmp += 1; | ||
| 535 | |||
| 536 | if (dmu_0_count == tmp) { | ||
| 537 | for (j = 0; j <= (lmu[i] >> 1) + 1; j++) | ||
| 538 | smu[(cap + 1) * num + j] = | ||
| 539 | smu[i * num + j]; | ||
| 540 | |||
| 541 | lmu[cap + 1] = lmu[i]; | ||
| 542 | return; | ||
| 543 | } | ||
| 544 | |||
| 545 | /* copy polynom */ | ||
| 546 | for (j = 0; j <= lmu[i] >> 1; j++) | ||
| 547 | smu[(i + 1) * num + j] = smu[i * num + j]; | ||
| 548 | |||
| 549 | /* copy previous polynom order to the next */ | ||
| 550 | lmu[i + 1] = lmu[i]; | ||
| 551 | } else { | ||
| 552 | ro = 0; | ||
| 553 | largest = -1; | ||
| 554 | /* find largest delta with dmu != 0 */ | ||
| 555 | for (j = 0; j < i; j++) { | ||
| 556 | if ((dmu[j]) && (delta[j] > largest)) { | ||
| 557 | largest = delta[j]; | ||
| 558 | ro = j; | ||
| 559 | } | ||
| 560 | } | ||
| 561 | |||
| 562 | /* compute difference */ | ||
| 563 | diff = (mu[i] - mu[ro]); | ||
| 564 | |||
| 565 | /* Compute degree of the new smu polynomial */ | ||
| 566 | if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff)) | ||
| 567 | lmu[i + 1] = lmu[i]; | ||
| 568 | else | ||
| 569 | lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2; | ||
| 570 | |||
| 571 | /* Init smu[i+1] with 0 */ | ||
| 572 | for (k = 0; k < num; k++) | ||
| 573 | smu[(i + 1) * num + k] = 0; | ||
| 574 | |||
| 575 | /* Compute smu[i+1] */ | ||
| 576 | for (k = 0; k <= lmu[ro] >> 1; k++) { | ||
| 577 | int16_t a, b, c; | ||
| 578 | |||
| 579 | if (!(smu[ro * num + k] && dmu[i])) | ||
| 580 | continue; | ||
| 581 | a = readw_relaxed(index_of + dmu[i]); | ||
| 582 | b = readw_relaxed(index_of + dmu[ro]); | ||
| 583 | c = readw_relaxed(index_of + smu[ro * num + k]); | ||
| 584 | tmp = a + (cw_len - b) + c; | ||
| 585 | a = readw_relaxed(alpha_to + tmp % cw_len); | ||
| 586 | smu[(i + 1) * num + (k + diff)] = a; | ||
| 587 | } | ||
| 588 | |||
| 589 | for (k = 0; k <= lmu[i] >> 1; k++) | ||
| 590 | smu[(i + 1) * num + k] ^= smu[i * num + k]; | ||
| 591 | } | ||
| 592 | |||
| 593 | /* End Computing Sigma (Mu+1) and L(mu) */ | ||
| 594 | /* In either case compute delta */ | ||
| 595 | delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1; | ||
| 596 | |||
| 597 | /* Do not compute discrepancy for the last iteration */ | ||
| 598 | if (i >= cap) | ||
| 599 | continue; | ||
| 600 | |||
| 601 | for (k = 0; k <= (lmu[i + 1] >> 1); k++) { | ||
| 602 | tmp = 2 * (i - 1); | ||
| 603 | if (k == 0) { | ||
| 604 | dmu[i + 1] = si[tmp + 3]; | ||
| 605 | } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) { | ||
| 606 | int16_t a, b, c; | ||
| 607 | a = readw_relaxed(index_of + | ||
| 608 | smu[(i + 1) * num + k]); | ||
| 609 | b = si[2 * (i - 1) + 3 - k]; | ||
| 610 | c = readw_relaxed(index_of + b); | ||
| 611 | tmp = a + c; | ||
| 612 | tmp %= cw_len; | ||
| 613 | dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^ | ||
| 614 | dmu[i + 1]; | ||
| 615 | } | ||
| 616 | } | ||
| 617 | } | ||
| 618 | |||
| 619 | return; | ||
| 620 | } | ||
| 621 | |||
| 622 | static int pmecc_err_location(struct mtd_info *mtd) | ||
| 623 | { | ||
| 624 | struct nand_chip *nand_chip = mtd->priv; | ||
| 625 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 626 | unsigned long end_time; | ||
| 627 | const int cap = host->pmecc_corr_cap; | ||
| 628 | const int num = 2 * cap + 1; | ||
| 629 | int sector_size = host->pmecc_sector_size; | ||
| 630 | int err_nbr = 0; /* number of error */ | ||
| 631 | int roots_nbr; /* number of roots */ | ||
| 632 | int i; | ||
| 633 | uint32_t val; | ||
| 634 | int16_t *smu = host->pmecc_smu; | ||
| 635 | |||
| 636 | pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE); | ||
| 637 | |||
| 638 | for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) { | ||
| 639 | pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i, | ||
| 640 | smu[(cap + 1) * num + i]); | ||
| 641 | err_nbr++; | ||
| 642 | } | ||
| 643 | |||
| 644 | val = (err_nbr - 1) << 16; | ||
| 645 | if (sector_size == 1024) | ||
| 646 | val |= 1; | ||
| 647 | |||
| 648 | pmerrloc_writel(host->pmerrloc_base, ELCFG, val); | ||
| 649 | pmerrloc_writel(host->pmerrloc_base, ELEN, | ||
| 650 | sector_size * 8 + host->pmecc_degree * cap); | ||
| 651 | |||
| 652 | end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); | ||
| 653 | while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) | ||
| 654 | & PMERRLOC_CALC_DONE)) { | ||
| 655 | if (unlikely(time_after(jiffies, end_time))) { | ||
| 656 | dev_err(host->dev, "PMECC: Timeout to calculate error location.\n"); | ||
| 657 | return -1; | ||
| 658 | } | ||
| 659 | cpu_relax(); | ||
| 660 | } | ||
| 661 | |||
| 662 | roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) | ||
| 663 | & PMERRLOC_ERR_NUM_MASK) >> 8; | ||
| 664 | /* Number of roots == degree of smu hence <= cap */ | ||
| 665 | if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1) | ||
| 666 | return err_nbr - 1; | ||
| 667 | |||
| 668 | /* Number of roots does not match the degree of smu | ||
| 669 | * unable to correct error */ | ||
| 670 | return -1; | ||
| 671 | } | ||
| 672 | |||
| 673 | static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, | ||
| 674 | int sector_num, int extra_bytes, int err_nbr) | ||
| 675 | { | ||
| 676 | struct nand_chip *nand_chip = mtd->priv; | ||
| 677 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 678 | int i = 0; | ||
| 679 | int byte_pos, bit_pos, sector_size, pos; | ||
| 680 | uint32_t tmp; | ||
| 681 | uint8_t err_byte; | ||
| 682 | |||
| 683 | sector_size = host->pmecc_sector_size; | ||
| 684 | |||
| 685 | while (err_nbr) { | ||
| 686 | tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1; | ||
| 687 | byte_pos = tmp / 8; | ||
| 688 | bit_pos = tmp % 8; | ||
| 689 | |||
| 690 | if (byte_pos >= (sector_size + extra_bytes)) | ||
| 691 | BUG(); /* should never happen */ | ||
| 692 | |||
| 693 | if (byte_pos < sector_size) { | ||
| 694 | err_byte = *(buf + byte_pos); | ||
| 695 | *(buf + byte_pos) ^= (1 << bit_pos); | ||
| 696 | |||
| 697 | pos = sector_num * host->pmecc_sector_size + byte_pos; | ||
| 698 | dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", | ||
| 699 | pos, bit_pos, err_byte, *(buf + byte_pos)); | ||
| 700 | } else { | ||
| 701 | /* Bit flip in OOB area */ | ||
| 702 | tmp = sector_num * host->pmecc_bytes_per_sector | ||
| 703 | + (byte_pos - sector_size); | ||
| 704 | err_byte = ecc[tmp]; | ||
| 705 | ecc[tmp] ^= (1 << bit_pos); | ||
| 706 | |||
| 707 | pos = tmp + nand_chip->ecc.layout->eccpos[0]; | ||
| 708 | dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", | ||
| 709 | pos, bit_pos, err_byte, ecc[tmp]); | ||
| 710 | } | ||
| 711 | |||
| 712 | i++; | ||
| 713 | err_nbr--; | ||
| 714 | } | ||
| 715 | |||
| 716 | return; | ||
| 717 | } | ||
| 718 | |||
| 719 | static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, | ||
| 720 | u8 *ecc) | ||
| 721 | { | ||
| 722 | struct nand_chip *nand_chip = mtd->priv; | ||
| 723 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 724 | int i, err_nbr, eccbytes; | ||
| 725 | uint8_t *buf_pos; | ||
| 726 | |||
| 727 | eccbytes = nand_chip->ecc.bytes; | ||
| 728 | for (i = 0; i < eccbytes; i++) | ||
| 729 | if (ecc[i] != 0xff) | ||
| 730 | goto normal_check; | ||
| 731 | /* Erased page, return OK */ | ||
| 732 | return 0; | ||
| 733 | |||
| 734 | normal_check: | ||
| 735 | for (i = 0; i < host->pmecc_sector_number; i++) { | ||
| 736 | err_nbr = 0; | ||
| 737 | if (pmecc_stat & 0x1) { | ||
| 738 | buf_pos = buf + i * host->pmecc_sector_size; | ||
| 739 | |||
| 740 | pmecc_gen_syndrome(mtd, i); | ||
| 741 | pmecc_substitute(mtd); | ||
| 742 | pmecc_get_sigma(mtd); | ||
| 743 | |||
| 744 | err_nbr = pmecc_err_location(mtd); | ||
| 745 | if (err_nbr == -1) { | ||
| 746 | dev_err(host->dev, "PMECC: Too many errors\n"); | ||
| 747 | mtd->ecc_stats.failed++; | ||
| 748 | return -EIO; | ||
| 749 | } else { | ||
| 750 | pmecc_correct_data(mtd, buf_pos, ecc, i, | ||
| 751 | host->pmecc_bytes_per_sector, err_nbr); | ||
| 752 | mtd->ecc_stats.corrected += err_nbr; | ||
| 753 | } | ||
| 754 | } | ||
| 755 | pmecc_stat >>= 1; | ||
| 756 | } | ||
| 757 | |||
| 758 | return 0; | ||
| 759 | } | ||
| 760 | |||
| 761 | static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, | ||
| 762 | struct nand_chip *chip, uint8_t *buf, int oob_required, int page) | ||
| 763 | { | ||
| 764 | struct atmel_nand_host *host = chip->priv; | ||
| 765 | int eccsize = chip->ecc.size; | ||
| 766 | uint8_t *oob = chip->oob_poi; | ||
| 767 | uint32_t *eccpos = chip->ecc.layout->eccpos; | ||
| 768 | uint32_t stat; | ||
| 769 | unsigned long end_time; | ||
| 770 | |||
| 771 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); | ||
| 772 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); | ||
| 773 | pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) | ||
| 774 | & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE); | ||
| 775 | |||
| 776 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); | ||
| 777 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA); | ||
| 778 | |||
| 779 | chip->read_buf(mtd, buf, eccsize); | ||
| 780 | chip->read_buf(mtd, oob, mtd->oobsize); | ||
| 781 | |||
| 782 | end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); | ||
| 783 | while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { | ||
| 784 | if (unlikely(time_after(jiffies, end_time))) { | ||
| 785 | dev_err(host->dev, "PMECC: Timeout to get error status.\n"); | ||
| 786 | return -EIO; | ||
| 787 | } | ||
| 788 | cpu_relax(); | ||
| 789 | } | ||
| 790 | |||
| 791 | stat = pmecc_readl_relaxed(host->ecc, ISR); | ||
| 792 | if (stat != 0) | ||
| 793 | if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0) | ||
| 794 | return -EIO; | ||
| 795 | |||
| 796 | return 0; | ||
| 797 | } | ||
| 798 | |||
| 799 | static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, | ||
| 800 | struct nand_chip *chip, const uint8_t *buf, int oob_required) | ||
| 801 | { | ||
| 802 | struct atmel_nand_host *host = chip->priv; | ||
| 803 | uint32_t *eccpos = chip->ecc.layout->eccpos; | ||
| 804 | int i, j; | ||
| 805 | unsigned long end_time; | ||
| 806 | |||
| 807 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); | ||
| 808 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); | ||
| 809 | |||
| 810 | pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) | | ||
| 811 | PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE); | ||
| 812 | |||
| 813 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); | ||
| 814 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA); | ||
| 815 | |||
| 816 | chip->write_buf(mtd, (u8 *)buf, mtd->writesize); | ||
| 817 | |||
| 818 | end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS); | ||
| 819 | while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { | ||
| 820 | if (unlikely(time_after(jiffies, end_time))) { | ||
| 821 | dev_err(host->dev, "PMECC: Timeout to get ECC value.\n"); | ||
| 822 | return -EIO; | ||
| 823 | } | ||
| 824 | cpu_relax(); | ||
| 825 | } | ||
| 826 | |||
| 827 | for (i = 0; i < host->pmecc_sector_number; i++) { | ||
| 828 | for (j = 0; j < host->pmecc_bytes_per_sector; j++) { | ||
| 829 | int pos; | ||
| 830 | |||
| 831 | pos = i * host->pmecc_bytes_per_sector + j; | ||
| 832 | chip->oob_poi[eccpos[pos]] = | ||
| 833 | pmecc_readb_ecc_relaxed(host->ecc, i, j); | ||
| 834 | } | ||
| 835 | } | ||
| 836 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 837 | |||
| 838 | return 0; | ||
| 839 | } | ||
| 840 | |||
| 841 | static void atmel_pmecc_core_init(struct mtd_info *mtd) | ||
| 842 | { | ||
| 843 | struct nand_chip *nand_chip = mtd->priv; | ||
| 844 | struct atmel_nand_host *host = nand_chip->priv; | ||
| 845 | uint32_t val = 0; | ||
| 846 | struct nand_ecclayout *ecc_layout; | ||
| 847 | |||
| 848 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); | ||
| 849 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); | ||
| 850 | |||
| 851 | switch (host->pmecc_corr_cap) { | ||
| 852 | case 2: | ||
| 853 | val = PMECC_CFG_BCH_ERR2; | ||
| 854 | break; | ||
| 855 | case 4: | ||
| 856 | val = PMECC_CFG_BCH_ERR4; | ||
| 857 | break; | ||
| 858 | case 8: | ||
| 859 | val = PMECC_CFG_BCH_ERR8; | ||
| 860 | break; | ||
| 861 | case 12: | ||
| 862 | val = PMECC_CFG_BCH_ERR12; | ||
| 863 | break; | ||
| 864 | case 24: | ||
| 865 | val = PMECC_CFG_BCH_ERR24; | ||
| 866 | break; | ||
| 867 | } | ||
| 868 | |||
| 869 | if (host->pmecc_sector_size == 512) | ||
| 870 | val |= PMECC_CFG_SECTOR512; | ||
| 871 | else if (host->pmecc_sector_size == 1024) | ||
| 872 | val |= PMECC_CFG_SECTOR1024; | ||
| 873 | |||
| 874 | switch (host->pmecc_sector_number) { | ||
| 875 | case 1: | ||
| 876 | val |= PMECC_CFG_PAGE_1SECTOR; | ||
| 877 | break; | ||
| 878 | case 2: | ||
| 879 | val |= PMECC_CFG_PAGE_2SECTORS; | ||
| 880 | break; | ||
| 881 | case 4: | ||
| 882 | val |= PMECC_CFG_PAGE_4SECTORS; | ||
| 883 | break; | ||
| 884 | case 8: | ||
| 885 | val |= PMECC_CFG_PAGE_8SECTORS; | ||
| 886 | break; | ||
| 887 | } | ||
| 888 | |||
| 889 | val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE | ||
| 890 | | PMECC_CFG_AUTO_DISABLE); | ||
| 891 | pmecc_writel(host->ecc, CFG, val); | ||
| 892 | |||
| 893 | ecc_layout = nand_chip->ecc.layout; | ||
| 894 | pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1); | ||
| 895 | pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]); | ||
| 896 | pmecc_writel(host->ecc, EADDR, | ||
| 897 | ecc_layout->eccpos[ecc_layout->eccbytes - 1]); | ||
| 898 | /* See datasheet about PMECC Clock Control Register */ | ||
| 899 | pmecc_writel(host->ecc, CLK, 2); | ||
| 900 | pmecc_writel(host->ecc, IDR, 0xff); | ||
| 901 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); | ||
| 902 | } | ||
| 903 | |||
| 904 | static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev, | ||
| 905 | struct atmel_nand_host *host) | ||
| 906 | { | ||
| 907 | struct mtd_info *mtd = &host->mtd; | ||
| 908 | struct nand_chip *nand_chip = &host->nand_chip; | ||
| 909 | struct resource *regs, *regs_pmerr, *regs_rom; | ||
| 910 | int cap, sector_size, err_no; | ||
| 911 | |||
| 912 | cap = host->pmecc_corr_cap; | ||
| 913 | sector_size = host->pmecc_sector_size; | ||
| 914 | dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n", | ||
| 915 | cap, sector_size); | ||
| 916 | |||
| 917 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 918 | if (!regs) { | ||
| 919 | dev_warn(host->dev, | ||
| 920 | "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n"); | ||
| 921 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 922 | return 0; | ||
| 923 | } | ||
| 924 | |||
| 925 | host->ecc = ioremap(regs->start, resource_size(regs)); | ||
| 926 | if (host->ecc == NULL) { | ||
| 927 | dev_err(host->dev, "ioremap failed\n"); | ||
| 928 | err_no = -EIO; | ||
| 929 | goto err_pmecc_ioremap; | ||
| 930 | } | ||
| 931 | |||
| 932 | regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 933 | regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
| 934 | if (regs_pmerr && regs_rom) { | ||
| 935 | host->pmerrloc_base = ioremap(regs_pmerr->start, | ||
| 936 | resource_size(regs_pmerr)); | ||
| 937 | host->pmecc_rom_base = ioremap(regs_rom->start, | ||
| 938 | resource_size(regs_rom)); | ||
| 939 | } | ||
| 940 | |||
| 941 | if (!host->pmerrloc_base || !host->pmecc_rom_base) { | ||
| 942 | dev_err(host->dev, | ||
| 943 | "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n"); | ||
| 944 | err_no = -EIO; | ||
| 945 | goto err_pmloc_ioremap; | ||
| 946 | } | ||
| 947 | |||
| 948 | /* ECC is calculated for the whole page (1 step) */ | ||
| 949 | nand_chip->ecc.size = mtd->writesize; | ||
| 950 | |||
| 951 | /* set ECC page size and oob layout */ | ||
| 952 | switch (mtd->writesize) { | ||
| 953 | case 2048: | ||
| 954 | host->pmecc_degree = PMECC_GF_DIMENSION_13; | ||
| 955 | host->pmecc_cw_len = (1 << host->pmecc_degree) - 1; | ||
| 956 | host->pmecc_sector_number = mtd->writesize / sector_size; | ||
| 957 | host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes( | ||
| 958 | cap, sector_size); | ||
| 959 | host->pmecc_alpha_to = pmecc_get_alpha_to(host); | ||
| 960 | host->pmecc_index_of = host->pmecc_rom_base + | ||
| 961 | host->pmecc_lookup_table_offset; | ||
| 962 | |||
| 963 | nand_chip->ecc.steps = 1; | ||
| 964 | nand_chip->ecc.strength = cap; | ||
| 965 | nand_chip->ecc.bytes = host->pmecc_bytes_per_sector * | ||
| 966 | host->pmecc_sector_number; | ||
| 967 | if (nand_chip->ecc.bytes > mtd->oobsize - 2) { | ||
| 968 | dev_err(host->dev, "No room for ECC bytes\n"); | ||
| 969 | err_no = -EINVAL; | ||
| 970 | goto err_no_ecc_room; | ||
| 971 | } | ||
| 972 | pmecc_config_ecc_layout(&atmel_pmecc_oobinfo, | ||
| 973 | mtd->oobsize, | ||
| 974 | nand_chip->ecc.bytes); | ||
| 975 | nand_chip->ecc.layout = &atmel_pmecc_oobinfo; | ||
| 976 | break; | ||
| 977 | case 512: | ||
| 978 | case 1024: | ||
| 979 | case 4096: | ||
| 980 | /* TODO */ | ||
| 981 | dev_warn(host->dev, | ||
| 982 | "Unsupported page size for PMECC, use Software ECC\n"); | ||
| 983 | default: | ||
| 984 | /* page size not handled by HW ECC */ | ||
| 985 | /* switching back to soft ECC */ | ||
| 986 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 987 | return 0; | ||
| 988 | } | ||
| 989 | |||
| 990 | /* Allocate data for PMECC computation */ | ||
| 991 | err_no = pmecc_data_alloc(host); | ||
| 992 | if (err_no) { | ||
| 993 | dev_err(host->dev, | ||
| 994 | "Cannot allocate memory for PMECC computation!\n"); | ||
| 995 | goto err_pmecc_data_alloc; | ||
| 996 | } | ||
| 997 | |||
| 998 | nand_chip->ecc.read_page = atmel_nand_pmecc_read_page; | ||
| 999 | nand_chip->ecc.write_page = atmel_nand_pmecc_write_page; | ||
| 1000 | |||
| 1001 | atmel_pmecc_core_init(mtd); | ||
| 1002 | |||
| 1003 | return 0; | ||
| 1004 | |||
| 1005 | err_pmecc_data_alloc: | ||
| 1006 | err_no_ecc_room: | ||
| 1007 | err_pmloc_ioremap: | ||
| 1008 | iounmap(host->ecc); | ||
| 1009 | if (host->pmerrloc_base) | ||
| 1010 | iounmap(host->pmerrloc_base); | ||
| 1011 | if (host->pmecc_rom_base) | ||
| 1012 | iounmap(host->pmecc_rom_base); | ||
| 1013 | err_pmecc_ioremap: | ||
| 1014 | return err_no; | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | /* | ||
| 291 | * Calculate HW ECC | 1018 | * Calculate HW ECC |
| 292 | * | 1019 | * |
| 293 | * function called after a write | 1020 | * function called after a write |
| @@ -481,7 +1208,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) | |||
| 481 | static int __devinit atmel_of_init_port(struct atmel_nand_host *host, | 1208 | static int __devinit atmel_of_init_port(struct atmel_nand_host *host, |
| 482 | struct device_node *np) | 1209 | struct device_node *np) |
| 483 | { | 1210 | { |
| 484 | u32 val; | 1211 | u32 val, table_offset; |
| 1212 | u32 offset[2]; | ||
| 485 | int ecc_mode; | 1213 | int ecc_mode; |
| 486 | struct atmel_nand_data *board = &host->board; | 1214 | struct atmel_nand_data *board = &host->board; |
| 487 | enum of_gpio_flags flags; | 1215 | enum of_gpio_flags flags; |
| @@ -517,6 +1245,50 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host, | |||
| 517 | board->enable_pin = of_get_gpio(np, 1); | 1245 | board->enable_pin = of_get_gpio(np, 1); |
| 518 | board->det_pin = of_get_gpio(np, 2); | 1246 | board->det_pin = of_get_gpio(np, 2); |
| 519 | 1247 | ||
| 1248 | host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc"); | ||
| 1249 | |||
| 1250 | if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc) | ||
| 1251 | return 0; /* Not using PMECC */ | ||
| 1252 | |||
| 1253 | /* use PMECC, get correction capability, sector size and lookup | ||
| 1254 | * table offset. | ||
| 1255 | */ | ||
| 1256 | if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) { | ||
| 1257 | dev_err(host->dev, "Cannot decide PMECC Capability\n"); | ||
| 1258 | return -EINVAL; | ||
| 1259 | } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && | ||
| 1260 | (val != 24)) { | ||
| 1261 | dev_err(host->dev, | ||
| 1262 | "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", | ||
| 1263 | val); | ||
| 1264 | return -EINVAL; | ||
| 1265 | } | ||
| 1266 | host->pmecc_corr_cap = (u8)val; | ||
| 1267 | |||
| 1268 | if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) { | ||
| 1269 | dev_err(host->dev, "Cannot decide PMECC Sector Size\n"); | ||
| 1270 | return -EINVAL; | ||
| 1271 | } else if ((val != 512) && (val != 1024)) { | ||
| 1272 | dev_err(host->dev, | ||
| 1273 | "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", | ||
| 1274 | val); | ||
| 1275 | return -EINVAL; | ||
| 1276 | } | ||
| 1277 | host->pmecc_sector_size = (u16)val; | ||
| 1278 | |||
| 1279 | if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset", | ||
| 1280 | offset, 2) != 0) { | ||
| 1281 | dev_err(host->dev, "Cannot get PMECC lookup table offset\n"); | ||
| 1282 | return -EINVAL; | ||
| 1283 | } | ||
| 1284 | table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1]; | ||
| 1285 | |||
| 1286 | if (!table_offset) { | ||
| 1287 | dev_err(host->dev, "Invalid PMECC lookup table offset\n"); | ||
| 1288 | return -EINVAL; | ||
| 1289 | } | ||
| 1290 | host->pmecc_lookup_table_offset = table_offset; | ||
| 1291 | |||
| 520 | return 0; | 1292 | return 0; |
| 521 | } | 1293 | } |
| 522 | #else | 1294 | #else |
| @@ -527,6 +1299,66 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host, | |||
| 527 | } | 1299 | } |
| 528 | #endif | 1300 | #endif |
| 529 | 1301 | ||
| 1302 | static int __init atmel_hw_nand_init_params(struct platform_device *pdev, | ||
| 1303 | struct atmel_nand_host *host) | ||
| 1304 | { | ||
| 1305 | struct mtd_info *mtd = &host->mtd; | ||
| 1306 | struct nand_chip *nand_chip = &host->nand_chip; | ||
| 1307 | struct resource *regs; | ||
| 1308 | |||
| 1309 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1310 | if (!regs) { | ||
| 1311 | dev_err(host->dev, | ||
| 1312 | "Can't get I/O resource regs, use software ECC\n"); | ||
| 1313 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 1314 | return 0; | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | host->ecc = ioremap(regs->start, resource_size(regs)); | ||
| 1318 | if (host->ecc == NULL) { | ||
| 1319 | dev_err(host->dev, "ioremap failed\n"); | ||
| 1320 | return -EIO; | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | /* ECC is calculated for the whole page (1 step) */ | ||
| 1324 | nand_chip->ecc.size = mtd->writesize; | ||
| 1325 | |||
| 1326 | /* set ECC page size and oob layout */ | ||
| 1327 | switch (mtd->writesize) { | ||
| 1328 | case 512: | ||
| 1329 | nand_chip->ecc.layout = &atmel_oobinfo_small; | ||
| 1330 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); | ||
| 1331 | break; | ||
| 1332 | case 1024: | ||
| 1333 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 1334 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); | ||
| 1335 | break; | ||
| 1336 | case 2048: | ||
| 1337 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 1338 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); | ||
| 1339 | break; | ||
| 1340 | case 4096: | ||
| 1341 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 1342 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); | ||
| 1343 | break; | ||
| 1344 | default: | ||
| 1345 | /* page size not handled by HW ECC */ | ||
| 1346 | /* switching back to soft ECC */ | ||
| 1347 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 1348 | return 0; | ||
| 1349 | } | ||
| 1350 | |||
| 1351 | /* set up for HW ECC */ | ||
| 1352 | nand_chip->ecc.calculate = atmel_nand_calculate; | ||
| 1353 | nand_chip->ecc.correct = atmel_nand_correct; | ||
| 1354 | nand_chip->ecc.hwctl = atmel_nand_hwctl; | ||
| 1355 | nand_chip->ecc.read_page = atmel_nand_read_page; | ||
| 1356 | nand_chip->ecc.bytes = 4; | ||
| 1357 | nand_chip->ecc.strength = 1; | ||
| 1358 | |||
| 1359 | return 0; | ||
| 1360 | } | ||
| 1361 | |||
| 530 | /* | 1362 | /* |
| 531 | * Probe for the NAND device. | 1363 | * Probe for the NAND device. |
| 532 | */ | 1364 | */ |
| @@ -535,7 +1367,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 535 | struct atmel_nand_host *host; | 1367 | struct atmel_nand_host *host; |
| 536 | struct mtd_info *mtd; | 1368 | struct mtd_info *mtd; |
| 537 | struct nand_chip *nand_chip; | 1369 | struct nand_chip *nand_chip; |
| 538 | struct resource *regs; | ||
| 539 | struct resource *mem; | 1370 | struct resource *mem; |
| 540 | struct mtd_part_parser_data ppdata = {}; | 1371 | struct mtd_part_parser_data ppdata = {}; |
| 541 | int res; | 1372 | int res; |
| @@ -568,7 +1399,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 568 | if (pdev->dev.of_node) { | 1399 | if (pdev->dev.of_node) { |
| 569 | res = atmel_of_init_port(host, pdev->dev.of_node); | 1400 | res = atmel_of_init_port(host, pdev->dev.of_node); |
| 570 | if (res) | 1401 | if (res) |
| 571 | goto err_nand_ioremap; | 1402 | goto err_ecc_ioremap; |
| 572 | } else { | 1403 | } else { |
| 573 | memcpy(&host->board, pdev->dev.platform_data, | 1404 | memcpy(&host->board, pdev->dev.platform_data, |
| 574 | sizeof(struct atmel_nand_data)); | 1405 | sizeof(struct atmel_nand_data)); |
| @@ -583,33 +1414,45 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 583 | nand_chip->IO_ADDR_W = host->io_base; | 1414 | nand_chip->IO_ADDR_W = host->io_base; |
| 584 | nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; | 1415 | nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; |
| 585 | 1416 | ||
| 586 | if (gpio_is_valid(host->board.rdy_pin)) | 1417 | if (gpio_is_valid(host->board.rdy_pin)) { |
| 587 | nand_chip->dev_ready = atmel_nand_device_ready; | 1418 | res = gpio_request(host->board.rdy_pin, "nand_rdy"); |
| 1419 | if (res < 0) { | ||
| 1420 | dev_err(&pdev->dev, | ||
| 1421 | "can't request rdy gpio %d\n", | ||
| 1422 | host->board.rdy_pin); | ||
| 1423 | goto err_ecc_ioremap; | ||
| 1424 | } | ||
| 588 | 1425 | ||
| 589 | nand_chip->ecc.mode = host->board.ecc_mode; | 1426 | res = gpio_direction_input(host->board.rdy_pin); |
| 1427 | if (res < 0) { | ||
| 1428 | dev_err(&pdev->dev, | ||
| 1429 | "can't request input direction rdy gpio %d\n", | ||
| 1430 | host->board.rdy_pin); | ||
| 1431 | goto err_ecc_ioremap; | ||
| 1432 | } | ||
| 590 | 1433 | ||
| 591 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1434 | nand_chip->dev_ready = atmel_nand_device_ready; |
| 592 | if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) { | ||
| 593 | printk(KERN_ERR "atmel_nand: can't get I/O resource " | ||
| 594 | "regs\nFalling back on software ECC\n"); | ||
| 595 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 596 | } | 1435 | } |
| 597 | 1436 | ||
| 598 | if (nand_chip->ecc.mode == NAND_ECC_HW) { | 1437 | if (gpio_is_valid(host->board.enable_pin)) { |
| 599 | host->ecc = ioremap(regs->start, resource_size(regs)); | 1438 | res = gpio_request(host->board.enable_pin, "nand_enable"); |
| 600 | if (host->ecc == NULL) { | 1439 | if (res < 0) { |
| 601 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); | 1440 | dev_err(&pdev->dev, |
| 602 | res = -EIO; | 1441 | "can't request enable gpio %d\n", |
| 1442 | host->board.enable_pin); | ||
| 1443 | goto err_ecc_ioremap; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | res = gpio_direction_output(host->board.enable_pin, 1); | ||
| 1447 | if (res < 0) { | ||
| 1448 | dev_err(&pdev->dev, | ||
| 1449 | "can't request output direction enable gpio %d\n", | ||
| 1450 | host->board.enable_pin); | ||
| 603 | goto err_ecc_ioremap; | 1451 | goto err_ecc_ioremap; |
| 604 | } | 1452 | } |
| 605 | nand_chip->ecc.calculate = atmel_nand_calculate; | ||
| 606 | nand_chip->ecc.correct = atmel_nand_correct; | ||
| 607 | nand_chip->ecc.hwctl = atmel_nand_hwctl; | ||
| 608 | nand_chip->ecc.read_page = atmel_nand_read_page; | ||
| 609 | nand_chip->ecc.bytes = 4; | ||
| 610 | nand_chip->ecc.strength = 1; | ||
| 611 | } | 1453 | } |
| 612 | 1454 | ||
| 1455 | nand_chip->ecc.mode = host->board.ecc_mode; | ||
| 613 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 1456 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
| 614 | 1457 | ||
| 615 | if (host->board.bus_width_16) /* 16-bit bus width */ | 1458 | if (host->board.bus_width_16) /* 16-bit bus width */ |
| @@ -622,6 +1465,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 622 | atmel_nand_enable(host); | 1465 | atmel_nand_enable(host); |
| 623 | 1466 | ||
| 624 | if (gpio_is_valid(host->board.det_pin)) { | 1467 | if (gpio_is_valid(host->board.det_pin)) { |
| 1468 | res = gpio_request(host->board.det_pin, "nand_det"); | ||
| 1469 | if (res < 0) { | ||
| 1470 | dev_err(&pdev->dev, | ||
| 1471 | "can't request det gpio %d\n", | ||
| 1472 | host->board.det_pin); | ||
| 1473 | goto err_no_card; | ||
| 1474 | } | ||
| 1475 | |||
| 1476 | res = gpio_direction_input(host->board.det_pin); | ||
| 1477 | if (res < 0) { | ||
| 1478 | dev_err(&pdev->dev, | ||
| 1479 | "can't request input direction det gpio %d\n", | ||
| 1480 | host->board.det_pin); | ||
| 1481 | goto err_no_card; | ||
| 1482 | } | ||
| 1483 | |||
| 625 | if (gpio_get_value(host->board.det_pin)) { | 1484 | if (gpio_get_value(host->board.det_pin)) { |
| 626 | printk(KERN_INFO "No SmartMedia card inserted.\n"); | 1485 | printk(KERN_INFO "No SmartMedia card inserted.\n"); |
| 627 | res = -ENXIO; | 1486 | res = -ENXIO; |
| @@ -661,40 +1520,13 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 661 | } | 1520 | } |
| 662 | 1521 | ||
| 663 | if (nand_chip->ecc.mode == NAND_ECC_HW) { | 1522 | if (nand_chip->ecc.mode == NAND_ECC_HW) { |
| 664 | /* ECC is calculated for the whole page (1 step) */ | 1523 | if (host->has_pmecc) |
| 665 | nand_chip->ecc.size = mtd->writesize; | 1524 | res = atmel_pmecc_nand_init_params(pdev, host); |
| 666 | 1525 | else | |
| 667 | /* set ECC page size and oob layout */ | 1526 | res = atmel_hw_nand_init_params(pdev, host); |
| 668 | switch (mtd->writesize) { | 1527 | |
| 669 | case 512: | 1528 | if (res != 0) |
| 670 | nand_chip->ecc.layout = &atmel_oobinfo_small; | 1529 | goto err_hw_ecc; |
| 671 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); | ||
| 672 | break; | ||
| 673 | case 1024: | ||
| 674 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 675 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); | ||
| 676 | break; | ||
| 677 | case 2048: | ||
| 678 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 679 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); | ||
| 680 | break; | ||
| 681 | case 4096: | ||
| 682 | nand_chip->ecc.layout = &atmel_oobinfo_large; | ||
| 683 | ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); | ||
| 684 | break; | ||
| 685 | default: | ||
| 686 | /* page size not handled by HW ECC */ | ||
| 687 | /* switching back to soft ECC */ | ||
| 688 | nand_chip->ecc.mode = NAND_ECC_SOFT; | ||
| 689 | nand_chip->ecc.calculate = NULL; | ||
| 690 | nand_chip->ecc.correct = NULL; | ||
| 691 | nand_chip->ecc.hwctl = NULL; | ||
| 692 | nand_chip->ecc.read_page = NULL; | ||
| 693 | nand_chip->ecc.postpad = 0; | ||
| 694 | nand_chip->ecc.prepad = 0; | ||
| 695 | nand_chip->ecc.bytes = 0; | ||
| 696 | break; | ||
| 697 | } | ||
| 698 | } | 1530 | } |
| 699 | 1531 | ||
| 700 | /* second phase scan */ | 1532 | /* second phase scan */ |
| @@ -711,14 +1543,23 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 711 | return res; | 1543 | return res; |
| 712 | 1544 | ||
| 713 | err_scan_tail: | 1545 | err_scan_tail: |
| 1546 | if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) { | ||
| 1547 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); | ||
| 1548 | pmecc_data_free(host); | ||
| 1549 | } | ||
| 1550 | if (host->ecc) | ||
| 1551 | iounmap(host->ecc); | ||
| 1552 | if (host->pmerrloc_base) | ||
| 1553 | iounmap(host->pmerrloc_base); | ||
| 1554 | if (host->pmecc_rom_base) | ||
| 1555 | iounmap(host->pmecc_rom_base); | ||
| 1556 | err_hw_ecc: | ||
| 714 | err_scan_ident: | 1557 | err_scan_ident: |
| 715 | err_no_card: | 1558 | err_no_card: |
| 716 | atmel_nand_disable(host); | 1559 | atmel_nand_disable(host); |
| 717 | platform_set_drvdata(pdev, NULL); | 1560 | platform_set_drvdata(pdev, NULL); |
| 718 | if (host->dma_chan) | 1561 | if (host->dma_chan) |
| 719 | dma_release_channel(host->dma_chan); | 1562 | dma_release_channel(host->dma_chan); |
| 720 | if (host->ecc) | ||
| 721 | iounmap(host->ecc); | ||
| 722 | err_ecc_ioremap: | 1563 | err_ecc_ioremap: |
| 723 | iounmap(host->io_base); | 1564 | iounmap(host->io_base); |
| 724 | err_nand_ioremap: | 1565 | err_nand_ioremap: |
| @@ -738,8 +1579,28 @@ static int __exit atmel_nand_remove(struct platform_device *pdev) | |||
| 738 | 1579 | ||
| 739 | atmel_nand_disable(host); | 1580 | atmel_nand_disable(host); |
| 740 | 1581 | ||
| 1582 | if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) { | ||
| 1583 | pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); | ||
| 1584 | pmerrloc_writel(host->pmerrloc_base, ELDIS, | ||
| 1585 | PMERRLOC_DISABLE); | ||
| 1586 | pmecc_data_free(host); | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | if (gpio_is_valid(host->board.det_pin)) | ||
| 1590 | gpio_free(host->board.det_pin); | ||
| 1591 | |||
| 1592 | if (gpio_is_valid(host->board.enable_pin)) | ||
| 1593 | gpio_free(host->board.enable_pin); | ||
| 1594 | |||
| 1595 | if (gpio_is_valid(host->board.rdy_pin)) | ||
| 1596 | gpio_free(host->board.rdy_pin); | ||
| 1597 | |||
| 741 | if (host->ecc) | 1598 | if (host->ecc) |
| 742 | iounmap(host->ecc); | 1599 | iounmap(host->ecc); |
| 1600 | if (host->pmecc_rom_base) | ||
| 1601 | iounmap(host->pmecc_rom_base); | ||
| 1602 | if (host->pmerrloc_base) | ||
| 1603 | iounmap(host->pmerrloc_base); | ||
| 743 | 1604 | ||
| 744 | if (host->dma_chan) | 1605 | if (host->dma_chan) |
| 745 | dma_release_channel(host->dma_chan); | 1606 | dma_release_channel(host->dma_chan); |
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h index 578c776e1356..8a1e9a686759 100644 --- a/drivers/mtd/nand/atmel_nand_ecc.h +++ b/drivers/mtd/nand/atmel_nand_ecc.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * Based on AT91SAM9260 datasheet revision B. | 3 | * Based on AT91SAM9260 datasheet revision B. |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2007 Andrew Victor | 5 | * Copyright (C) 2007 Andrew Victor |
| 6 | * Copyright (C) 2007 Atmel Corporation. | 6 | * Copyright (C) 2007 - 2012 Atmel Corporation. |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
| @@ -36,4 +36,116 @@ | |||
| 36 | #define ATMEL_ECC_NPR 0x10 /* NParity register */ | 36 | #define ATMEL_ECC_NPR 0x10 /* NParity register */ |
| 37 | #define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */ | 37 | #define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */ |
| 38 | 38 | ||
| 39 | /* PMECC Register Definitions */ | ||
| 40 | #define ATMEL_PMECC_CFG 0x000 /* Configuration Register */ | ||
| 41 | #define PMECC_CFG_BCH_ERR2 (0 << 0) | ||
| 42 | #define PMECC_CFG_BCH_ERR4 (1 << 0) | ||
| 43 | #define PMECC_CFG_BCH_ERR8 (2 << 0) | ||
| 44 | #define PMECC_CFG_BCH_ERR12 (3 << 0) | ||
| 45 | #define PMECC_CFG_BCH_ERR24 (4 << 0) | ||
| 46 | |||
| 47 | #define PMECC_CFG_SECTOR512 (0 << 4) | ||
| 48 | #define PMECC_CFG_SECTOR1024 (1 << 4) | ||
| 49 | |||
| 50 | #define PMECC_CFG_PAGE_1SECTOR (0 << 8) | ||
| 51 | #define PMECC_CFG_PAGE_2SECTORS (1 << 8) | ||
| 52 | #define PMECC_CFG_PAGE_4SECTORS (2 << 8) | ||
| 53 | #define PMECC_CFG_PAGE_8SECTORS (3 << 8) | ||
| 54 | |||
| 55 | #define PMECC_CFG_READ_OP (0 << 12) | ||
| 56 | #define PMECC_CFG_WRITE_OP (1 << 12) | ||
| 57 | |||
| 58 | #define PMECC_CFG_SPARE_ENABLE (1 << 16) | ||
| 59 | #define PMECC_CFG_SPARE_DISABLE (0 << 16) | ||
| 60 | |||
| 61 | #define PMECC_CFG_AUTO_ENABLE (1 << 20) | ||
| 62 | #define PMECC_CFG_AUTO_DISABLE (0 << 20) | ||
| 63 | |||
| 64 | #define ATMEL_PMECC_SAREA 0x004 /* Spare area size */ | ||
| 65 | #define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */ | ||
| 66 | #define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */ | ||
| 67 | #define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */ | ||
| 68 | #define PMECC_CLK_133MHZ (2 << 0) | ||
| 69 | |||
| 70 | #define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */ | ||
| 71 | #define PMECC_CTRL_RST (1 << 0) | ||
| 72 | #define PMECC_CTRL_DATA (1 << 1) | ||
| 73 | #define PMECC_CTRL_USER (1 << 2) | ||
| 74 | #define PMECC_CTRL_ENABLE (1 << 4) | ||
| 75 | #define PMECC_CTRL_DISABLE (1 << 5) | ||
| 76 | |||
| 77 | #define ATMEL_PMECC_SR 0x018 /* PMECC status register */ | ||
| 78 | #define PMECC_SR_BUSY (1 << 0) | ||
| 79 | #define PMECC_SR_ENABLE (1 << 4) | ||
| 80 | |||
| 81 | #define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */ | ||
| 82 | #define PMECC_IER_ENABLE (1 << 0) | ||
| 83 | #define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */ | ||
| 84 | #define PMECC_IER_DISABLE (1 << 0) | ||
| 85 | #define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */ | ||
| 86 | #define PMECC_IER_MASK (1 << 0) | ||
| 87 | #define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */ | ||
| 88 | #define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */ | ||
| 89 | #define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */ | ||
| 90 | |||
| 91 | /* PMERRLOC Register Definitions */ | ||
| 92 | #define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */ | ||
| 93 | #define PMERRLOC_ELCFG_SECTOR_512 (0 << 0) | ||
| 94 | #define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0) | ||
| 95 | #define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16) | ||
| 96 | |||
| 97 | #define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */ | ||
| 98 | #define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */ | ||
| 99 | #define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */ | ||
| 100 | #define PMERRLOC_DISABLE (1 << 0) | ||
| 101 | |||
| 102 | #define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */ | ||
| 103 | #define PMERRLOC_ELSR_BUSY (1 << 0) | ||
| 104 | #define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */ | ||
| 105 | #define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */ | ||
| 106 | #define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */ | ||
| 107 | #define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */ | ||
| 108 | #define PMERRLOC_ERR_NUM_MASK (0x1f << 8) | ||
| 109 | #define PMERRLOC_CALC_DONE (1 << 0) | ||
| 110 | #define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */ | ||
| 111 | #define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */ | ||
| 112 | |||
| 113 | /* Register access macros for PMECC */ | ||
| 114 | #define pmecc_readl_relaxed(addr, reg) \ | ||
| 115 | readl_relaxed((addr) + ATMEL_PMECC_##reg) | ||
| 116 | |||
| 117 | #define pmecc_writel(addr, reg, value) \ | ||
| 118 | writel((value), (addr) + ATMEL_PMECC_##reg) | ||
| 119 | |||
| 120 | #define pmecc_readb_ecc_relaxed(addr, sector, n) \ | ||
| 121 | readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n)) | ||
| 122 | |||
| 123 | #define pmecc_readl_rem_relaxed(addr, sector, n) \ | ||
| 124 | readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4)) | ||
| 125 | |||
| 126 | #define pmerrloc_readl_relaxed(addr, reg) \ | ||
| 127 | readl_relaxed((addr) + ATMEL_PMERRLOC_##reg) | ||
| 128 | |||
| 129 | #define pmerrloc_writel(addr, reg, value) \ | ||
| 130 | writel((value), (addr) + ATMEL_PMERRLOC_##reg) | ||
| 131 | |||
| 132 | #define pmerrloc_writel_sigma_relaxed(addr, n, value) \ | ||
| 133 | writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) | ||
| 134 | |||
| 135 | #define pmerrloc_readl_sigma_relaxed(addr, n) \ | ||
| 136 | readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) | ||
| 137 | |||
| 138 | #define pmerrloc_readl_el_relaxed(addr, n) \ | ||
| 139 | readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4)) | ||
| 140 | |||
| 141 | /* Galois field dimension */ | ||
| 142 | #define PMECC_GF_DIMENSION_13 13 | ||
| 143 | #define PMECC_GF_DIMENSION_14 14 | ||
| 144 | |||
| 145 | #define PMECC_LOOKUP_TABLE_SIZE_512 0x2000 | ||
| 146 | #define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000 | ||
| 147 | |||
| 148 | /* Time out value for reading PMECC status register */ | ||
| 149 | #define PMECC_MAX_TIMEOUT_MS 100 | ||
| 150 | |||
| 39 | #endif | 151 | #endif |
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 9f609d2dcf62..5c47b200045a 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
| @@ -141,28 +141,6 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /** | 143 | /** |
| 144 | * au_verify_buf - Verify chip data against buffer | ||
| 145 | * @mtd: MTD device structure | ||
| 146 | * @buf: buffer containing the data to compare | ||
| 147 | * @len: number of bytes to compare | ||
| 148 | * | ||
| 149 | * verify function for 8bit buswidth | ||
| 150 | */ | ||
| 151 | static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 152 | { | ||
| 153 | int i; | ||
| 154 | struct nand_chip *this = mtd->priv; | ||
| 155 | |||
| 156 | for (i = 0; i < len; i++) { | ||
| 157 | if (buf[i] != readb(this->IO_ADDR_R)) | ||
| 158 | return -EFAULT; | ||
| 159 | au_sync(); | ||
| 160 | } | ||
| 161 | |||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | /** | ||
| 166 | * au_write_buf16 - write buffer to chip | 144 | * au_write_buf16 - write buffer to chip |
| 167 | * @mtd: MTD device structure | 145 | * @mtd: MTD device structure |
| 168 | * @buf: data buffer | 146 | * @buf: data buffer |
| @@ -205,29 +183,6 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) | |||
| 205 | } | 183 | } |
| 206 | } | 184 | } |
| 207 | 185 | ||
| 208 | /** | ||
| 209 | * au_verify_buf16 - Verify chip data against buffer | ||
| 210 | * @mtd: MTD device structure | ||
| 211 | * @buf: buffer containing the data to compare | ||
| 212 | * @len: number of bytes to compare | ||
| 213 | * | ||
| 214 | * verify function for 16bit buswidth | ||
| 215 | */ | ||
| 216 | static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 217 | { | ||
| 218 | int i; | ||
| 219 | struct nand_chip *this = mtd->priv; | ||
| 220 | u16 *p = (u16 *) buf; | ||
| 221 | len >>= 1; | ||
| 222 | |||
| 223 | for (i = 0; i < len; i++) { | ||
| 224 | if (p[i] != readw(this->IO_ADDR_R)) | ||
| 225 | return -EFAULT; | ||
| 226 | au_sync(); | ||
| 227 | } | ||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | /* Select the chip by setting nCE to low */ | 186 | /* Select the chip by setting nCE to low */ |
| 232 | #define NAND_CTL_SETNCE 1 | 187 | #define NAND_CTL_SETNCE 1 |
| 233 | /* Deselect the chip by setting nCE to high */ | 188 | /* Deselect the chip by setting nCE to high */ |
| @@ -516,7 +471,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev) | |||
| 516 | this->read_word = au_read_word; | 471 | this->read_word = au_read_word; |
| 517 | this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; | 472 | this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; |
| 518 | this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; | 473 | this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; |
| 519 | this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf; | ||
| 520 | 474 | ||
| 521 | ret = nand_scan(&ctx->info, 1); | 475 | ret = nand_scan(&ctx->info, 1); |
| 522 | if (ret) { | 476 | if (ret) { |
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c deleted file mode 100644 index 5914bb32e001..000000000000 --- a/drivers/mtd/nand/bcm_umi_bch.c +++ /dev/null | |||
| @@ -1,217 +0,0 @@ | |||
| 1 | /***************************************************************************** | ||
| 2 | * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Unless you and Broadcom execute a separate written software license | ||
| 5 | * agreement governing use of this software, this software is licensed to you | ||
| 6 | * under the terms of the GNU General Public License version 2, available at | ||
| 7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
| 8 | * | ||
| 9 | * Notwithstanding the above, under no circumstances may you combine this | ||
| 10 | * software in any way with any other Broadcom software provided under a | ||
| 11 | * license other than the GPL, without Broadcom's express prior written | ||
| 12 | * consent. | ||
| 13 | *****************************************************************************/ | ||
| 14 | |||
| 15 | /* ---- Include Files ---------------------------------------------------- */ | ||
| 16 | #include "nand_bcm_umi.h" | ||
| 17 | |||
| 18 | /* ---- External Variable Declarations ----------------------------------- */ | ||
| 19 | /* ---- External Function Prototypes ------------------------------------- */ | ||
| 20 | /* ---- Public Variables ------------------------------------------------- */ | ||
| 21 | /* ---- Private Constants and Types -------------------------------------- */ | ||
| 22 | |||
| 23 | /* ---- Private Function Prototypes -------------------------------------- */ | ||
| 24 | static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, | ||
| 25 | struct nand_chip *chip, uint8_t *buf, int oob_required, int page); | ||
| 26 | static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, | ||
| 27 | struct nand_chip *chip, const uint8_t *buf, int oob_required); | ||
| 28 | |||
| 29 | /* ---- Private Variables ------------------------------------------------ */ | ||
| 30 | |||
| 31 | /* | ||
| 32 | ** nand_hw_eccoob | ||
| 33 | ** New oob placement block for use with hardware ecc generation. | ||
| 34 | */ | ||
| 35 | static struct nand_ecclayout nand_hw_eccoob_512 = { | ||
| 36 | /* Reserve 5 for BI indicator */ | ||
| 37 | .oobfree = { | ||
| 38 | #if (NAND_ECC_NUM_BYTES > 3) | ||
| 39 | {.offset = 0, .length = 2} | ||
| 40 | #else | ||
| 41 | {.offset = 0, .length = 5}, | ||
| 42 | {.offset = 6, .length = 7} | ||
| 43 | #endif | ||
| 44 | } | ||
| 45 | }; | ||
| 46 | |||
| 47 | /* | ||
| 48 | ** We treat the OOB for a 2K page as if it were 4 512 byte oobs, | ||
| 49 | ** except the BI is at byte 0. | ||
| 50 | */ | ||
| 51 | static struct nand_ecclayout nand_hw_eccoob_2048 = { | ||
| 52 | /* Reserve 0 as BI indicator */ | ||
| 53 | .oobfree = { | ||
| 54 | #if (NAND_ECC_NUM_BYTES > 10) | ||
| 55 | {.offset = 1, .length = 2}, | ||
| 56 | #elif (NAND_ECC_NUM_BYTES > 7) | ||
| 57 | {.offset = 1, .length = 5}, | ||
| 58 | {.offset = 16, .length = 6}, | ||
| 59 | {.offset = 32, .length = 6}, | ||
| 60 | {.offset = 48, .length = 6} | ||
| 61 | #else | ||
| 62 | {.offset = 1, .length = 8}, | ||
| 63 | {.offset = 16, .length = 9}, | ||
| 64 | {.offset = 32, .length = 9}, | ||
| 65 | {.offset = 48, .length = 9} | ||
| 66 | #endif | ||
| 67 | } | ||
| 68 | }; | ||
| 69 | |||
| 70 | /* We treat the OOB for a 4K page as if it were 8 512 byte oobs, | ||
| 71 | * except the BI is at byte 0. */ | ||
| 72 | static struct nand_ecclayout nand_hw_eccoob_4096 = { | ||
| 73 | /* Reserve 0 as BI indicator */ | ||
| 74 | .oobfree = { | ||
| 75 | #if (NAND_ECC_NUM_BYTES > 10) | ||
| 76 | {.offset = 1, .length = 2}, | ||
| 77 | {.offset = 16, .length = 3}, | ||
| 78 | {.offset = 32, .length = 3}, | ||
| 79 | {.offset = 48, .length = 3}, | ||
| 80 | {.offset = 64, .length = 3}, | ||
| 81 | {.offset = 80, .length = 3}, | ||
| 82 | {.offset = 96, .length = 3}, | ||
| 83 | {.offset = 112, .length = 3} | ||
| 84 | #else | ||
| 85 | {.offset = 1, .length = 5}, | ||
| 86 | {.offset = 16, .length = 6}, | ||
| 87 | {.offset = 32, .length = 6}, | ||
| 88 | {.offset = 48, .length = 6}, | ||
| 89 | {.offset = 64, .length = 6}, | ||
| 90 | {.offset = 80, .length = 6}, | ||
| 91 | {.offset = 96, .length = 6}, | ||
| 92 | {.offset = 112, .length = 6} | ||
| 93 | #endif | ||
| 94 | } | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* ---- Private Functions ------------------------------------------------ */ | ||
| 98 | /* ==== Public Functions ================================================= */ | ||
| 99 | |||
| 100 | /**************************************************************************** | ||
| 101 | * | ||
| 102 | * bcm_umi_bch_read_page_hwecc - hardware ecc based page read function | ||
| 103 | * @mtd: mtd info structure | ||
| 104 | * @chip: nand chip info structure | ||
| 105 | * @buf: buffer to store read data | ||
| 106 | * @oob_required: caller expects OOB data read to chip->oob_poi | ||
| 107 | * | ||
| 108 | ***************************************************************************/ | ||
| 109 | static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, | ||
| 110 | struct nand_chip *chip, uint8_t * buf, | ||
| 111 | int oob_required, int page) | ||
| 112 | { | ||
| 113 | int sectorIdx = 0; | ||
| 114 | int eccsize = chip->ecc.size; | ||
| 115 | int eccsteps = chip->ecc.steps; | ||
| 116 | uint8_t *datap = buf; | ||
| 117 | uint8_t eccCalc[NAND_ECC_NUM_BYTES]; | ||
| 118 | int sectorOobSize = mtd->oobsize / eccsteps; | ||
| 119 | int stat; | ||
| 120 | unsigned int max_bitflips = 0; | ||
| 121 | |||
| 122 | for (sectorIdx = 0; sectorIdx < eccsteps; | ||
| 123 | sectorIdx++, datap += eccsize) { | ||
| 124 | if (sectorIdx > 0) { | ||
| 125 | /* Seek to page location within sector */ | ||
| 126 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize, | ||
| 127 | -1); | ||
| 128 | } | ||
| 129 | |||
| 130 | /* Enable hardware ECC before reading the buf */ | ||
| 131 | nand_bcm_umi_bch_enable_read_hwecc(); | ||
| 132 | |||
| 133 | /* Read in data */ | ||
| 134 | bcm_umi_nand_read_buf(mtd, datap, eccsize); | ||
| 135 | |||
| 136 | /* Pause hardware ECC after reading the buf */ | ||
| 137 | nand_bcm_umi_bch_pause_read_ecc_calc(); | ||
| 138 | |||
| 139 | /* Read the OOB ECC */ | ||
| 140 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
| 141 | mtd->writesize + sectorIdx * sectorOobSize, -1); | ||
| 142 | nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc, | ||
| 143 | NAND_ECC_NUM_BYTES, | ||
| 144 | chip->oob_poi + | ||
| 145 | sectorIdx * sectorOobSize); | ||
| 146 | |||
| 147 | /* Correct any ECC detected errors */ | ||
| 148 | stat = | ||
| 149 | nand_bcm_umi_bch_correct_page(datap, eccCalc, | ||
| 150 | NAND_ECC_NUM_BYTES); | ||
| 151 | |||
| 152 | /* Update Stats */ | ||
| 153 | if (stat < 0) { | ||
| 154 | #if defined(NAND_BCM_UMI_DEBUG) | ||
| 155 | printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n", | ||
| 156 | __func__, sectorIdx); | ||
| 157 | printk(KERN_WARNING | ||
| 158 | "%s data %02x %02x %02x %02x " | ||
| 159 | "%02x %02x %02x %02x\n", | ||
| 160 | __func__, datap[0], datap[1], datap[2], datap[3], | ||
| 161 | datap[4], datap[5], datap[6], datap[7]); | ||
| 162 | printk(KERN_WARNING | ||
| 163 | "%s ecc %02x %02x %02x %02x " | ||
| 164 | "%02x %02x %02x %02x %02x %02x " | ||
| 165 | "%02x %02x %02x\n", | ||
| 166 | __func__, eccCalc[0], eccCalc[1], eccCalc[2], | ||
| 167 | eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6], | ||
| 168 | eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10], | ||
| 169 | eccCalc[11], eccCalc[12]); | ||
| 170 | BUG(); | ||
| 171 | #endif | ||
| 172 | mtd->ecc_stats.failed++; | ||
| 173 | } else { | ||
| 174 | #if defined(NAND_BCM_UMI_DEBUG) | ||
| 175 | if (stat > 0) { | ||
| 176 | printk(KERN_INFO | ||
| 177 | "%s %d correctable_errors detected\n", | ||
| 178 | __func__, stat); | ||
| 179 | } | ||
| 180 | #endif | ||
| 181 | mtd->ecc_stats.corrected += stat; | ||
| 182 | max_bitflips = max_t(unsigned int, max_bitflips, stat); | ||
| 183 | } | ||
| 184 | } | ||
| 185 | return max_bitflips; | ||
| 186 | } | ||
| 187 | |||
| 188 | /**************************************************************************** | ||
| 189 | * | ||
| 190 | * bcm_umi_bch_write_page_hwecc - hardware ecc based page write function | ||
| 191 | * @mtd: mtd info structure | ||
| 192 | * @chip: nand chip info structure | ||
| 193 | * @buf: data buffer | ||
| 194 | * @oob_required: must write chip->oob_poi to OOB | ||
| 195 | * | ||
| 196 | ***************************************************************************/ | ||
| 197 | static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, | ||
| 198 | struct nand_chip *chip, const uint8_t *buf, int oob_required) | ||
| 199 | { | ||
| 200 | int sectorIdx = 0; | ||
| 201 | int eccsize = chip->ecc.size; | ||
| 202 | int eccsteps = chip->ecc.steps; | ||
| 203 | const uint8_t *datap = buf; | ||
| 204 | uint8_t *oobp = chip->oob_poi; | ||
| 205 | int sectorOobSize = mtd->oobsize / eccsteps; | ||
| 206 | |||
| 207 | for (sectorIdx = 0; sectorIdx < eccsteps; | ||
| 208 | sectorIdx++, datap += eccsize, oobp += sectorOobSize) { | ||
| 209 | /* Enable hardware ECC before writing the buf */ | ||
| 210 | nand_bcm_umi_bch_enable_write_hwecc(); | ||
| 211 | bcm_umi_nand_write_buf(mtd, datap, eccsize); | ||
| 212 | nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp, | ||
| 213 | NAND_ECC_NUM_BYTES); | ||
| 214 | } | ||
| 215 | |||
| 216 | bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 217 | } | ||
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c deleted file mode 100644 index d0d1bd4d0e7d..000000000000 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ /dev/null | |||
| @@ -1,555 +0,0 @@ | |||
| 1 | /***************************************************************************** | ||
| 2 | * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Unless you and Broadcom execute a separate written software license | ||
| 5 | * agreement governing use of this software, this software is licensed to you | ||
| 6 | * under the terms of the GNU General Public License version 2, available at | ||
| 7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
| 8 | * | ||
| 9 | * Notwithstanding the above, under no circumstances may you combine this | ||
| 10 | * software in any way with any other Broadcom software provided under a | ||
| 11 | * license other than the GPL, without Broadcom's express prior written | ||
| 12 | * consent. | ||
| 13 | *****************************************************************************/ | ||
| 14 | |||
| 15 | /* ---- Include Files ---------------------------------------------------- */ | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/string.h> | ||
| 22 | #include <linux/ioport.h> | ||
| 23 | #include <linux/device.h> | ||
| 24 | #include <linux/delay.h> | ||
| 25 | #include <linux/err.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | #include <linux/mtd/mtd.h> | ||
| 29 | #include <linux/mtd/nand.h> | ||
| 30 | #include <linux/mtd/nand_ecc.h> | ||
| 31 | #include <linux/mtd/partitions.h> | ||
| 32 | |||
| 33 | #include <asm/mach-types.h> | ||
| 34 | |||
| 35 | #include <mach/reg_nand.h> | ||
| 36 | #include <mach/reg_umi.h> | ||
| 37 | |||
| 38 | #include "nand_bcm_umi.h" | ||
| 39 | |||
| 40 | #include <mach/memory_settings.h> | ||
| 41 | |||
| 42 | #define USE_DMA 1 | ||
| 43 | #include <mach/dma.h> | ||
| 44 | #include <linux/dma-mapping.h> | ||
| 45 | #include <linux/completion.h> | ||
| 46 | |||
| 47 | /* ---- External Variable Declarations ----------------------------------- */ | ||
| 48 | /* ---- External Function Prototypes ------------------------------------- */ | ||
| 49 | /* ---- Public Variables ------------------------------------------------- */ | ||
| 50 | /* ---- Private Constants and Types -------------------------------------- */ | ||
| 51 | static const __devinitconst char gBanner[] = KERN_INFO \ | ||
| 52 | "BCM UMI MTD NAND Driver: 1.00\n"; | ||
| 53 | |||
| 54 | #if NAND_ECC_BCH | ||
| 55 | static uint8_t scan_ff_pattern[] = { 0xff }; | ||
| 56 | |||
| 57 | static struct nand_bbt_descr largepage_bbt = { | ||
| 58 | .options = 0, | ||
| 59 | .offs = 0, | ||
| 60 | .len = 1, | ||
| 61 | .pattern = scan_ff_pattern | ||
| 62 | }; | ||
| 63 | #endif | ||
| 64 | |||
| 65 | /* | ||
| 66 | ** Preallocate a buffer to avoid having to do this every dma operation. | ||
| 67 | ** This is the size of the preallocated coherent DMA buffer. | ||
| 68 | */ | ||
| 69 | #if USE_DMA | ||
| 70 | #define DMA_MIN_BUFLEN 512 | ||
| 71 | #define DMA_MAX_BUFLEN PAGE_SIZE | ||
| 72 | #define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \ | ||
| 73 | ((len) > DMA_MAX_BUFLEN)) | ||
| 74 | |||
| 75 | /* | ||
| 76 | * The current NAND data space goes from 0x80001900 to 0x80001FFF, | ||
| 77 | * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page | ||
| 78 | * size NAND flash. Need to break the DMA down to multiple 1Ks. | ||
| 79 | * | ||
| 80 | * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000 | ||
| 81 | */ | ||
| 82 | #define DMA_MAX_LEN 1024 | ||
| 83 | |||
| 84 | #else /* !USE_DMA */ | ||
| 85 | #define DMA_MIN_BUFLEN 0 | ||
| 86 | #define DMA_MAX_BUFLEN 0 | ||
| 87 | #define USE_DIRECT_IO(len) 1 | ||
| 88 | #endif | ||
| 89 | /* ---- Private Function Prototypes -------------------------------------- */ | ||
| 90 | static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len); | ||
| 91 | static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf, | ||
| 92 | int len); | ||
| 93 | |||
| 94 | /* ---- Private Variables ------------------------------------------------ */ | ||
| 95 | static struct mtd_info *board_mtd; | ||
| 96 | static void __iomem *bcm_umi_io_base; | ||
| 97 | static void *virtPtr; | ||
| 98 | static dma_addr_t physPtr; | ||
| 99 | static struct completion nand_comp; | ||
| 100 | |||
| 101 | /* ---- Private Functions ------------------------------------------------ */ | ||
| 102 | #if NAND_ECC_BCH | ||
| 103 | #include "bcm_umi_bch.c" | ||
| 104 | #else | ||
| 105 | #include "bcm_umi_hamming.c" | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #if USE_DMA | ||
| 109 | |||
| 110 | /* Handler called when the DMA finishes. */ | ||
| 111 | static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData) | ||
| 112 | { | ||
| 113 | complete(&nand_comp); | ||
| 114 | } | ||
| 115 | |||
| 116 | static int nand_dma_init(void) | ||
| 117 | { | ||
| 118 | int rc; | ||
| 119 | |||
| 120 | rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM, | ||
| 121 | nand_dma_handler, NULL); | ||
| 122 | if (rc != 0) { | ||
| 123 | printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc); | ||
| 124 | return rc; | ||
| 125 | } | ||
| 126 | |||
| 127 | virtPtr = | ||
| 128 | dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL); | ||
| 129 | if (virtPtr == NULL) { | ||
| 130 | printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n"); | ||
| 131 | return -ENOMEM; | ||
| 132 | } | ||
| 133 | |||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | static void nand_dma_term(void) | ||
| 138 | { | ||
| 139 | if (virtPtr != NULL) | ||
| 140 | dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void nand_dma_read(void *buf, int len) | ||
| 144 | { | ||
| 145 | int offset = 0; | ||
| 146 | int tmp_len = 0; | ||
| 147 | int len_left = len; | ||
| 148 | DMA_Handle_t hndl; | ||
| 149 | |||
| 150 | if (virtPtr == NULL) | ||
| 151 | panic("nand_dma_read: virtPtr == NULL\n"); | ||
| 152 | |||
| 153 | if ((void *)physPtr == NULL) | ||
| 154 | panic("nand_dma_read: physPtr == NULL\n"); | ||
| 155 | |||
| 156 | hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM); | ||
| 157 | if (hndl < 0) { | ||
| 158 | printk(KERN_ERR | ||
| 159 | "nand_dma_read: unable to allocate dma channel: %d\n", | ||
| 160 | (int)hndl); | ||
| 161 | panic("\n"); | ||
| 162 | } | ||
| 163 | |||
| 164 | while (len_left > 0) { | ||
| 165 | if (len_left > DMA_MAX_LEN) { | ||
| 166 | tmp_len = DMA_MAX_LEN; | ||
| 167 | len_left -= DMA_MAX_LEN; | ||
| 168 | } else { | ||
| 169 | tmp_len = len_left; | ||
| 170 | len_left = 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | init_completion(&nand_comp); | ||
| 174 | dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR, | ||
| 175 | physPtr + offset, tmp_len); | ||
| 176 | wait_for_completion(&nand_comp); | ||
| 177 | |||
| 178 | offset += tmp_len; | ||
| 179 | } | ||
| 180 | |||
| 181 | dma_free_channel(hndl); | ||
| 182 | |||
| 183 | if (buf != NULL) | ||
| 184 | memcpy(buf, virtPtr, len); | ||
| 185 | } | ||
| 186 | |||
| 187 | static void nand_dma_write(const void *buf, int len) | ||
| 188 | { | ||
| 189 | int offset = 0; | ||
| 190 | int tmp_len = 0; | ||
| 191 | int len_left = len; | ||
| 192 | DMA_Handle_t hndl; | ||
| 193 | |||
| 194 | if (buf == NULL) | ||
| 195 | panic("nand_dma_write: buf == NULL\n"); | ||
| 196 | |||
| 197 | if (virtPtr == NULL) | ||
| 198 | panic("nand_dma_write: virtPtr == NULL\n"); | ||
| 199 | |||
| 200 | if ((void *)physPtr == NULL) | ||
| 201 | panic("nand_dma_write: physPtr == NULL\n"); | ||
| 202 | |||
| 203 | memcpy(virtPtr, buf, len); | ||
| 204 | |||
| 205 | |||
| 206 | hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM); | ||
| 207 | if (hndl < 0) { | ||
| 208 | printk(KERN_ERR | ||
| 209 | "nand_dma_write: unable to allocate dma channel: %d\n", | ||
| 210 | (int)hndl); | ||
| 211 | panic("\n"); | ||
| 212 | } | ||
| 213 | |||
| 214 | while (len_left > 0) { | ||
| 215 | if (len_left > DMA_MAX_LEN) { | ||
| 216 | tmp_len = DMA_MAX_LEN; | ||
| 217 | len_left -= DMA_MAX_LEN; | ||
| 218 | } else { | ||
| 219 | tmp_len = len_left; | ||
| 220 | len_left = 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | init_completion(&nand_comp); | ||
| 224 | dma_transfer_mem_to_mem(hndl, physPtr + offset, | ||
| 225 | REG_NAND_DATA_PADDR, tmp_len); | ||
| 226 | wait_for_completion(&nand_comp); | ||
| 227 | |||
| 228 | offset += tmp_len; | ||
| 229 | } | ||
| 230 | |||
| 231 | dma_free_channel(hndl); | ||
| 232 | } | ||
| 233 | |||
| 234 | #endif | ||
| 235 | |||
| 236 | static int nand_dev_ready(struct mtd_info *mtd) | ||
| 237 | { | ||
| 238 | return nand_bcm_umi_dev_ready(); | ||
| 239 | } | ||
| 240 | |||
| 241 | /**************************************************************************** | ||
| 242 | * | ||
| 243 | * bcm_umi_nand_inithw | ||
| 244 | * | ||
| 245 | * This routine does the necessary hardware (board-specific) | ||
| 246 | * initializations. This includes setting up the timings, etc. | ||
| 247 | * | ||
| 248 | ***************************************************************************/ | ||
| 249 | int bcm_umi_nand_inithw(void) | ||
| 250 | { | ||
| 251 | /* Configure nand timing parameters */ | ||
| 252 | writel(readl(®_UMI_NAND_TCR) & ~0x7ffff, ®_UMI_NAND_TCR); | ||
| 253 | writel(readl(®_UMI_NAND_TCR) | HW_CFG_NAND_TCR, ®_UMI_NAND_TCR); | ||
| 254 | |||
| 255 | #if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS) | ||
| 256 | /* enable software control of CS */ | ||
| 257 | writel(readl(®_UMI_NAND_TCR) | REG_UMI_NAND_TCR_CS_SWCTRL, ®_UMI_NAND_TCR); | ||
| 258 | #endif | ||
| 259 | |||
| 260 | /* keep NAND chip select asserted */ | ||
| 261 | writel(readl(®_UMI_NAND_RCSR) | REG_UMI_NAND_RCSR_CS_ASSERTED, ®_UMI_NAND_RCSR); | ||
| 262 | |||
| 263 | writel(readl(®_UMI_NAND_TCR) & ~REG_UMI_NAND_TCR_WORD16, ®_UMI_NAND_TCR); | ||
| 264 | /* enable writes to flash */ | ||
| 265 | writel(readl(®_UMI_MMD_ICR) | REG_UMI_MMD_ICR_FLASH_WP, ®_UMI_MMD_ICR); | ||
| 266 | |||
| 267 | writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET); | ||
| 268 | nand_bcm_umi_wait_till_ready(); | ||
| 269 | |||
| 270 | #if NAND_ECC_BCH | ||
| 271 | nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES); | ||
| 272 | #endif | ||
| 273 | |||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* Used to turn latch the proper register for access. */ | ||
| 278 | static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd, | ||
| 279 | unsigned int ctrl) | ||
| 280 | { | ||
| 281 | /* send command to hardware */ | ||
| 282 | struct nand_chip *chip = mtd->priv; | ||
| 283 | if (ctrl & NAND_CTRL_CHANGE) { | ||
| 284 | if (ctrl & NAND_CLE) { | ||
| 285 | chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET; | ||
| 286 | goto CMD; | ||
| 287 | } | ||
| 288 | if (ctrl & NAND_ALE) { | ||
| 289 | chip->IO_ADDR_W = | ||
| 290 | bcm_umi_io_base + REG_NAND_ADDR_OFFSET; | ||
| 291 | goto CMD; | ||
| 292 | } | ||
| 293 | chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET; | ||
| 294 | } | ||
| 295 | |||
| 296 | CMD: | ||
| 297 | /* Send command to chip directly */ | ||
| 298 | if (cmd != NAND_CMD_NONE) | ||
| 299 | writeb(cmd, chip->IO_ADDR_W); | ||
| 300 | } | ||
| 301 | |||
| 302 | static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf, | ||
| 303 | int len) | ||
| 304 | { | ||
| 305 | if (USE_DIRECT_IO(len)) { | ||
| 306 | /* Do it the old way if the buffer is small or too large. | ||
| 307 | * Probably quicker than starting and checking dma. */ | ||
| 308 | int i; | ||
| 309 | struct nand_chip *this = mtd->priv; | ||
| 310 | |||
| 311 | for (i = 0; i < len; i++) | ||
| 312 | writeb(buf[i], this->IO_ADDR_W); | ||
| 313 | } | ||
| 314 | #if USE_DMA | ||
| 315 | else | ||
| 316 | nand_dma_write(buf, len); | ||
| 317 | #endif | ||
| 318 | } | ||
| 319 | |||
| 320 | static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len) | ||
| 321 | { | ||
| 322 | if (USE_DIRECT_IO(len)) { | ||
| 323 | int i; | ||
| 324 | struct nand_chip *this = mtd->priv; | ||
| 325 | |||
| 326 | for (i = 0; i < len; i++) | ||
| 327 | buf[i] = readb(this->IO_ADDR_R); | ||
| 328 | } | ||
| 329 | #if USE_DMA | ||
| 330 | else | ||
| 331 | nand_dma_read(buf, len); | ||
| 332 | #endif | ||
| 333 | } | ||
| 334 | |||
| 335 | static uint8_t readbackbuf[NAND_MAX_PAGESIZE]; | ||
| 336 | static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf, | ||
| 337 | int len) | ||
| 338 | { | ||
| 339 | /* | ||
| 340 | * Try to readback page with ECC correction. This is necessary | ||
| 341 | * for MLC parts which may have permanently stuck bits. | ||
| 342 | */ | ||
| 343 | struct nand_chip *chip = mtd->priv; | ||
| 344 | int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0); | ||
| 345 | if (ret < 0) | ||
| 346 | return -EFAULT; | ||
| 347 | else { | ||
| 348 | if (memcmp(readbackbuf, buf, len) == 0) | ||
| 349 | return 0; | ||
| 350 | |||
| 351 | return -EFAULT; | ||
| 352 | } | ||
| 353 | return 0; | ||
| 354 | } | ||
| 355 | |||
| 356 | static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | ||
| 357 | { | ||
| 358 | struct nand_chip *this; | ||
| 359 | struct resource *r; | ||
| 360 | int err = 0; | ||
| 361 | |||
| 362 | printk(gBanner); | ||
| 363 | |||
| 364 | /* Allocate memory for MTD device structure and private data */ | ||
| 365 | board_mtd = | ||
| 366 | kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), | ||
| 367 | GFP_KERNEL); | ||
| 368 | if (!board_mtd) { | ||
| 369 | printk(KERN_WARNING | ||
| 370 | "Unable to allocate NAND MTD device structure.\n"); | ||
| 371 | return -ENOMEM; | ||
| 372 | } | ||
| 373 | |||
| 374 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 375 | |||
| 376 | if (!r) { | ||
| 377 | err = -ENXIO; | ||
| 378 | goto out_free; | ||
| 379 | } | ||
| 380 | |||
| 381 | /* map physical address */ | ||
| 382 | bcm_umi_io_base = ioremap(r->start, resource_size(r)); | ||
| 383 | |||
| 384 | if (!bcm_umi_io_base) { | ||
| 385 | printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); | ||
| 386 | err = -EIO; | ||
| 387 | goto out_free; | ||
| 388 | } | ||
| 389 | |||
| 390 | /* Get pointer to private data */ | ||
| 391 | this = (struct nand_chip *)(&board_mtd[1]); | ||
| 392 | |||
| 393 | /* Initialize structures */ | ||
| 394 | memset((char *)board_mtd, 0, sizeof(struct mtd_info)); | ||
| 395 | memset((char *)this, 0, sizeof(struct nand_chip)); | ||
| 396 | |||
| 397 | /* Link the private data with the MTD structure */ | ||
| 398 | board_mtd->priv = this; | ||
| 399 | |||
| 400 | /* Initialize the NAND hardware. */ | ||
| 401 | if (bcm_umi_nand_inithw() < 0) { | ||
| 402 | printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); | ||
| 403 | err = -EIO; | ||
| 404 | goto out_unmap; | ||
| 405 | } | ||
| 406 | |||
| 407 | /* Set address of NAND IO lines */ | ||
| 408 | this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET; | ||
| 409 | this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET; | ||
| 410 | |||
| 411 | /* Set command delay time, see datasheet for correct value */ | ||
| 412 | this->chip_delay = 0; | ||
| 413 | /* Assign the device ready function, if available */ | ||
| 414 | this->dev_ready = nand_dev_ready; | ||
| 415 | this->options = 0; | ||
| 416 | |||
| 417 | this->write_buf = bcm_umi_nand_write_buf; | ||
| 418 | this->read_buf = bcm_umi_nand_read_buf; | ||
| 419 | this->verify_buf = bcm_umi_nand_verify_buf; | ||
| 420 | |||
| 421 | this->cmd_ctrl = bcm_umi_nand_hwcontrol; | ||
| 422 | this->ecc.mode = NAND_ECC_HW; | ||
| 423 | this->ecc.size = 512; | ||
| 424 | this->ecc.bytes = NAND_ECC_NUM_BYTES; | ||
| 425 | #if NAND_ECC_BCH | ||
| 426 | this->ecc.read_page = bcm_umi_bch_read_page_hwecc; | ||
| 427 | this->ecc.write_page = bcm_umi_bch_write_page_hwecc; | ||
| 428 | #else | ||
| 429 | this->ecc.correct = nand_correct_data512; | ||
| 430 | this->ecc.calculate = bcm_umi_hamming_get_hw_ecc; | ||
| 431 | this->ecc.hwctl = bcm_umi_hamming_enable_hwecc; | ||
| 432 | #endif | ||
| 433 | |||
| 434 | #if USE_DMA | ||
| 435 | err = nand_dma_init(); | ||
| 436 | if (err != 0) | ||
| 437 | goto out_unmap; | ||
| 438 | #endif | ||
| 439 | |||
| 440 | /* Figure out the size of the device that we have. | ||
| 441 | * We need to do this to figure out which ECC | ||
| 442 | * layout we'll be using. | ||
| 443 | */ | ||
| 444 | |||
| 445 | err = nand_scan_ident(board_mtd, 1, NULL); | ||
| 446 | if (err) { | ||
| 447 | printk(KERN_ERR "nand_scan failed: %d\n", err); | ||
| 448 | goto out_unmap; | ||
| 449 | } | ||
| 450 | |||
| 451 | /* Now that we know the nand size, we can setup the ECC layout */ | ||
| 452 | |||
| 453 | switch (board_mtd->writesize) { /* writesize is the pagesize */ | ||
| 454 | case 4096: | ||
| 455 | this->ecc.layout = &nand_hw_eccoob_4096; | ||
| 456 | break; | ||
| 457 | case 2048: | ||
| 458 | this->ecc.layout = &nand_hw_eccoob_2048; | ||
| 459 | break; | ||
| 460 | case 512: | ||
| 461 | this->ecc.layout = &nand_hw_eccoob_512; | ||
| 462 | break; | ||
| 463 | default: | ||
| 464 | { | ||
| 465 | printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", | ||
| 466 | board_mtd->writesize); | ||
| 467 | err = -EINVAL; | ||
| 468 | goto out_unmap; | ||
| 469 | } | ||
| 470 | } | ||
| 471 | |||
| 472 | #if NAND_ECC_BCH | ||
| 473 | if (board_mtd->writesize > 512) { | ||
| 474 | if (this->bbt_options & NAND_BBT_USE_FLASH) | ||
| 475 | largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; | ||
| 476 | this->badblock_pattern = &largepage_bbt; | ||
| 477 | } | ||
| 478 | |||
| 479 | this->ecc.strength = 8; | ||
| 480 | |||
| 481 | #endif | ||
| 482 | |||
| 483 | /* Now finish off the scan, now that ecc.layout has been initialized. */ | ||
| 484 | |||
| 485 | err = nand_scan_tail(board_mtd); | ||
| 486 | if (err) { | ||
| 487 | printk(KERN_ERR "nand_scan failed: %d\n", err); | ||
| 488 | goto out_unmap; | ||
| 489 | } | ||
| 490 | |||
| 491 | /* Register the partitions */ | ||
| 492 | board_mtd->name = "bcm_umi-nand"; | ||
| 493 | mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0); | ||
| 494 | |||
| 495 | /* Return happy */ | ||
| 496 | return 0; | ||
| 497 | out_unmap: | ||
| 498 | iounmap(bcm_umi_io_base); | ||
| 499 | out_free: | ||
| 500 | kfree(board_mtd); | ||
| 501 | return err; | ||
| 502 | } | ||
| 503 | |||
| 504 | static int bcm_umi_nand_remove(struct platform_device *pdev) | ||
| 505 | { | ||
| 506 | #if USE_DMA | ||
| 507 | nand_dma_term(); | ||
| 508 | #endif | ||
| 509 | |||
| 510 | /* Release resources, unregister device */ | ||
| 511 | nand_release(board_mtd); | ||
| 512 | |||
| 513 | /* unmap physical address */ | ||
| 514 | iounmap(bcm_umi_io_base); | ||
| 515 | |||
| 516 | /* Free the MTD device structure */ | ||
| 517 | kfree(board_mtd); | ||
| 518 | |||
| 519 | return 0; | ||
| 520 | } | ||
| 521 | |||
| 522 | #ifdef CONFIG_PM | ||
| 523 | static int bcm_umi_nand_suspend(struct platform_device *pdev, | ||
| 524 | pm_message_t state) | ||
| 525 | { | ||
| 526 | printk(KERN_ERR "MTD NAND suspend is being called\n"); | ||
| 527 | return 0; | ||
| 528 | } | ||
| 529 | |||
| 530 | static int bcm_umi_nand_resume(struct platform_device *pdev) | ||
| 531 | { | ||
| 532 | printk(KERN_ERR "MTD NAND resume is being called\n"); | ||
| 533 | return 0; | ||
| 534 | } | ||
| 535 | #else | ||
| 536 | #define bcm_umi_nand_suspend NULL | ||
| 537 | #define bcm_umi_nand_resume NULL | ||
| 538 | #endif | ||
| 539 | |||
| 540 | static struct platform_driver nand_driver = { | ||
| 541 | .driver = { | ||
| 542 | .name = "bcm-nand", | ||
| 543 | .owner = THIS_MODULE, | ||
| 544 | }, | ||
| 545 | .probe = bcm_umi_nand_probe, | ||
| 546 | .remove = bcm_umi_nand_remove, | ||
| 547 | .suspend = bcm_umi_nand_suspend, | ||
| 548 | .resume = bcm_umi_nand_resume, | ||
| 549 | }; | ||
| 550 | |||
| 551 | module_platform_driver(nand_driver); | ||
| 552 | |||
| 553 | MODULE_LICENSE("GPL"); | ||
| 554 | MODULE_AUTHOR("Broadcom"); | ||
| 555 | MODULE_DESCRIPTION("BCM UMI MTD NAND driver"); | ||
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 3f1c18599cbd..ab0caa74eb43 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c | |||
| @@ -566,11 +566,13 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip | |||
| 566 | return 0; | 566 | return 0; |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | 569 | static int bf5xx_nand_write_page_raw(struct mtd_info *mtd, |
| 570 | const uint8_t *buf, int oob_required) | 570 | struct nand_chip *chip, const uint8_t *buf, int oob_required) |
| 571 | { | 571 | { |
| 572 | bf5xx_nand_write_buf(mtd, buf, mtd->writesize); | 572 | bf5xx_nand_write_buf(mtd, buf, mtd->writesize); |
| 573 | bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); | 573 | bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 574 | |||
| 575 | return 0; | ||
| 574 | } | 576 | } |
| 575 | 577 | ||
| 576 | /* | 578 | /* |
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index f3f6cfedd69e..2bb7170502c2 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c | |||
| @@ -377,7 +377,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 377 | * @buf: buffer to store read data | 377 | * @buf: buffer to store read data |
| 378 | * @oob_required: caller expects OOB data read to chip->oob_poi | 378 | * @oob_required: caller expects OOB data read to chip->oob_poi |
| 379 | * | 379 | * |
| 380 | * The hw generator calculates the error syndrome automatically. Therefor | 380 | * The hw generator calculates the error syndrome automatically. Therefore |
| 381 | * we need a special oob layout and handling. | 381 | * we need a special oob layout and handling. |
| 382 | */ | 382 | */ |
| 383 | static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, | 383 | static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
| @@ -520,7 +520,7 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = { | |||
| 520 | }; | 520 | }; |
| 521 | 521 | ||
| 522 | 522 | ||
| 523 | static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, | 523 | static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd, |
| 524 | struct nand_chip *chip, | 524 | struct nand_chip *chip, |
| 525 | const uint8_t *buf, int oob_required) | 525 | const uint8_t *buf, int oob_required) |
| 526 | { | 526 | { |
| @@ -531,6 +531,8 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, | |||
| 531 | 531 | ||
| 532 | /* Set up ECC autogeneration */ | 532 | /* Set up ECC autogeneration */ |
| 533 | cafe->ctl2 |= (1<<30); | 533 | cafe->ctl2 |= (1<<30); |
| 534 | |||
| 535 | return 0; | ||
| 534 | } | 536 | } |
| 535 | 537 | ||
| 536 | static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 538 | static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| @@ -542,9 +544,12 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 542 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | 544 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); |
| 543 | 545 | ||
| 544 | if (unlikely(raw)) | 546 | if (unlikely(raw)) |
| 545 | chip->ecc.write_page_raw(mtd, chip, buf, oob_required); | 547 | status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); |
| 546 | else | 548 | else |
| 547 | chip->ecc.write_page(mtd, chip, buf, oob_required); | 549 | status = chip->ecc.write_page(mtd, chip, buf, oob_required); |
| 550 | |||
| 551 | if (status < 0) | ||
| 552 | return status; | ||
| 548 | 553 | ||
| 549 | /* | 554 | /* |
| 550 | * Cached progamming disabled for now, Not sure if its worth the | 555 | * Cached progamming disabled for now, Not sure if its worth the |
| @@ -571,13 +576,6 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 571 | status = chip->waitfunc(mtd, chip); | 576 | status = chip->waitfunc(mtd, chip); |
| 572 | } | 577 | } |
| 573 | 578 | ||
| 574 | #ifdef CONFIG_MTD_NAND_VERIFY_WRITE | ||
| 575 | /* Send command to read back the data */ | ||
| 576 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
| 577 | |||
| 578 | if (chip->verify_buf(mtd, buf, mtd->writesize)) | ||
| 579 | return -EIO; | ||
| 580 | #endif | ||
| 581 | return 0; | 579 | return 0; |
| 582 | } | 580 | } |
| 583 | 581 | ||
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 1024bfc05c86..39b2ef848811 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
| @@ -76,18 +76,6 @@ static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 76 | *buf++ = readl(this->IO_ADDR_R) >> 16; | 76 | *buf++ = readl(this->IO_ADDR_R) >> 16; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 80 | { | ||
| 81 | int i; | ||
| 82 | struct nand_chip *this = mtd->priv; | ||
| 83 | |||
| 84 | for (i=0; i<len; i++) | ||
| 85 | if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16)) | ||
| 86 | return -EFAULT; | ||
| 87 | |||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline void nand_cs_on(void) | 79 | static inline void nand_cs_on(void) |
| 92 | { | 80 | { |
| 93 | gpio_set_value(GPIO_NAND_CS, 0); | 81 | gpio_set_value(GPIO_NAND_CS, 0); |
| @@ -209,7 +197,6 @@ static int __init cmx270_init(void) | |||
| 209 | this->read_byte = cmx270_read_byte; | 197 | this->read_byte = cmx270_read_byte; |
| 210 | this->read_buf = cmx270_read_buf; | 198 | this->read_buf = cmx270_read_buf; |
| 211 | this->write_buf = cmx270_write_buf; | 199 | this->write_buf = cmx270_write_buf; |
| 212 | this->verify_buf = cmx270_verify_buf; | ||
| 213 | 200 | ||
| 214 | /* Scan to find existence of the device */ | 201 | /* Scan to find existence of the device */ |
| 215 | if (nand_scan (cmx270_nand_mtd, 1)) { | 202 | if (nand_scan (cmx270_nand_mtd, 1)) { |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index f1deb1ee2c95..945047ad0952 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/mtd/nand.h> | 33 | #include <linux/mtd/nand.h> |
| 34 | #include <linux/mtd/partitions.h> | 34 | #include <linux/mtd/partitions.h> |
| 35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
| 36 | #include <linux/of_device.h> | ||
| 36 | 37 | ||
| 37 | #include <linux/platform_data/mtd-davinci.h> | 38 | #include <linux/platform_data/mtd-davinci.h> |
| 38 | #include <linux/platform_data/mtd-davinci-aemif.h> | 39 | #include <linux/platform_data/mtd-davinci-aemif.h> |
| @@ -518,9 +519,75 @@ static struct nand_ecclayout hwecc4_2048 __initconst = { | |||
| 518 | }, | 519 | }, |
| 519 | }; | 520 | }; |
| 520 | 521 | ||
| 522 | #if defined(CONFIG_OF) | ||
| 523 | static const struct of_device_id davinci_nand_of_match[] = { | ||
| 524 | {.compatible = "ti,davinci-nand", }, | ||
| 525 | {}, | ||
| 526 | } | ||
| 527 | MODULE_DEVICE_TABLE(of, davinci_nand_of_match); | ||
| 528 | |||
| 529 | static struct davinci_nand_pdata | ||
| 530 | *nand_davinci_get_pdata(struct platform_device *pdev) | ||
| 531 | { | ||
| 532 | if (!pdev->dev.platform_data && pdev->dev.of_node) { | ||
| 533 | struct davinci_nand_pdata *pdata; | ||
| 534 | const char *mode; | ||
| 535 | u32 prop; | ||
| 536 | int len; | ||
| 537 | |||
| 538 | pdata = devm_kzalloc(&pdev->dev, | ||
| 539 | sizeof(struct davinci_nand_pdata), | ||
| 540 | GFP_KERNEL); | ||
| 541 | pdev->dev.platform_data = pdata; | ||
| 542 | if (!pdata) | ||
| 543 | return NULL; | ||
| 544 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 545 | "ti,davinci-chipselect", &prop)) | ||
| 546 | pdev->id = prop; | ||
| 547 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 548 | "ti,davinci-mask-ale", &prop)) | ||
| 549 | pdata->mask_ale = prop; | ||
| 550 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 551 | "ti,davinci-mask-cle", &prop)) | ||
| 552 | pdata->mask_cle = prop; | ||
| 553 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 554 | "ti,davinci-mask-chipsel", &prop)) | ||
| 555 | pdata->mask_chipsel = prop; | ||
| 556 | if (!of_property_read_string(pdev->dev.of_node, | ||
| 557 | "ti,davinci-ecc-mode", &mode)) { | ||
| 558 | if (!strncmp("none", mode, 4)) | ||
| 559 | pdata->ecc_mode = NAND_ECC_NONE; | ||
| 560 | if (!strncmp("soft", mode, 4)) | ||
| 561 | pdata->ecc_mode = NAND_ECC_SOFT; | ||
| 562 | if (!strncmp("hw", mode, 2)) | ||
| 563 | pdata->ecc_mode = NAND_ECC_HW; | ||
| 564 | } | ||
| 565 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 566 | "ti,davinci-ecc-bits", &prop)) | ||
| 567 | pdata->ecc_bits = prop; | ||
| 568 | if (!of_property_read_u32(pdev->dev.of_node, | ||
| 569 | "ti,davinci-nand-buswidth", &prop)) | ||
| 570 | if (prop == 16) | ||
| 571 | pdata->options |= NAND_BUSWIDTH_16; | ||
| 572 | if (of_find_property(pdev->dev.of_node, | ||
| 573 | "ti,davinci-nand-use-bbt", &len)) | ||
| 574 | pdata->bbt_options = NAND_BBT_USE_FLASH; | ||
| 575 | } | ||
| 576 | |||
| 577 | return pdev->dev.platform_data; | ||
| 578 | } | ||
| 579 | #else | ||
| 580 | #define davinci_nand_of_match NULL | ||
| 581 | static struct davinci_nand_pdata | ||
| 582 | *nand_davinci_get_pdata(struct platform_device *pdev) | ||
| 583 | { | ||
| 584 | return pdev->dev.platform_data; | ||
| 585 | } | ||
| 586 | #endif | ||
| 587 | |||
| 521 | static int __init nand_davinci_probe(struct platform_device *pdev) | 588 | static int __init nand_davinci_probe(struct platform_device *pdev) |
| 522 | { | 589 | { |
| 523 | struct davinci_nand_pdata *pdata = pdev->dev.platform_data; | 590 | struct davinci_nand_pdata *pdata; |
| 524 | struct davinci_nand_info *info; | 591 | struct davinci_nand_info *info; |
| 525 | struct resource *res1; | 592 | struct resource *res1; |
| 526 | struct resource *res2; | 593 | struct resource *res2; |
| @@ -530,6 +597,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
| 530 | uint32_t val; | 597 | uint32_t val; |
| 531 | nand_ecc_modes_t ecc_mode; | 598 | nand_ecc_modes_t ecc_mode; |
| 532 | 599 | ||
| 600 | pdata = nand_davinci_get_pdata(pdev); | ||
| 533 | /* insist on board-specific configuration */ | 601 | /* insist on board-specific configuration */ |
| 534 | if (!pdata) | 602 | if (!pdata) |
| 535 | return -ENODEV; | 603 | return -ENODEV; |
| @@ -656,7 +724,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
| 656 | goto err_clk; | 724 | goto err_clk; |
| 657 | } | 725 | } |
| 658 | 726 | ||
| 659 | ret = clk_enable(info->clk); | 727 | ret = clk_prepare_enable(info->clk); |
| 660 | if (ret < 0) { | 728 | if (ret < 0) { |
| 661 | dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", | 729 | dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", |
| 662 | ret); | 730 | ret); |
| @@ -767,7 +835,7 @@ syndrome_done: | |||
| 767 | 835 | ||
| 768 | err_scan: | 836 | err_scan: |
| 769 | err_timing: | 837 | err_timing: |
| 770 | clk_disable(info->clk); | 838 | clk_disable_unprepare(info->clk); |
| 771 | 839 | ||
| 772 | err_clk_enable: | 840 | err_clk_enable: |
| 773 | clk_put(info->clk); | 841 | clk_put(info->clk); |
| @@ -804,7 +872,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev) | |||
| 804 | 872 | ||
| 805 | nand_release(&info->mtd); | 873 | nand_release(&info->mtd); |
| 806 | 874 | ||
| 807 | clk_disable(info->clk); | 875 | clk_disable_unprepare(info->clk); |
| 808 | clk_put(info->clk); | 876 | clk_put(info->clk); |
| 809 | 877 | ||
| 810 | kfree(info); | 878 | kfree(info); |
| @@ -816,6 +884,8 @@ static struct platform_driver nand_davinci_driver = { | |||
| 816 | .remove = __exit_p(nand_davinci_remove), | 884 | .remove = __exit_p(nand_davinci_remove), |
| 817 | .driver = { | 885 | .driver = { |
| 818 | .name = "davinci_nand", | 886 | .name = "davinci_nand", |
| 887 | .owner = THIS_MODULE, | ||
| 888 | .of_match_table = davinci_nand_of_match, | ||
| 819 | }, | 889 | }, |
| 820 | }; | 890 | }; |
| 821 | MODULE_ALIAS("platform:davinci_nand"); | 891 | MODULE_ALIAS("platform:davinci_nand"); |
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 0650aafa0dd2..e706a237170f 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c | |||
| @@ -1028,7 +1028,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op) | |||
| 1028 | 1028 | ||
| 1029 | /* writes a page. user specifies type, and this function handles the | 1029 | /* writes a page. user specifies type, and this function handles the |
| 1030 | * configuration details. */ | 1030 | * configuration details. */ |
| 1031 | static void write_page(struct mtd_info *mtd, struct nand_chip *chip, | 1031 | static int write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 1032 | const uint8_t *buf, bool raw_xfer) | 1032 | const uint8_t *buf, bool raw_xfer) |
| 1033 | { | 1033 | { |
| 1034 | struct denali_nand_info *denali = mtd_to_denali(mtd); | 1034 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| @@ -1078,6 +1078,8 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 1078 | 1078 | ||
| 1079 | denali_enable_dma(denali, false); | 1079 | denali_enable_dma(denali, false); |
| 1080 | dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); | 1080 | dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); |
| 1081 | |||
| 1082 | return 0; | ||
| 1081 | } | 1083 | } |
| 1082 | 1084 | ||
| 1083 | /* NAND core entry points */ | 1085 | /* NAND core entry points */ |
| @@ -1086,24 +1088,24 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 1086 | * writing a page with ECC or without is similar, all the work is done | 1088 | * writing a page with ECC or without is similar, all the work is done |
| 1087 | * by write_page above. | 1089 | * by write_page above. |
| 1088 | * */ | 1090 | * */ |
| 1089 | static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 1091 | static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 1090 | const uint8_t *buf, int oob_required) | 1092 | const uint8_t *buf, int oob_required) |
| 1091 | { | 1093 | { |
| 1092 | /* for regular page writes, we let HW handle all the ECC | 1094 | /* for regular page writes, we let HW handle all the ECC |
| 1093 | * data written to the device. */ | 1095 | * data written to the device. */ |
| 1094 | write_page(mtd, chip, buf, false); | 1096 | return write_page(mtd, chip, buf, false); |
| 1095 | } | 1097 | } |
| 1096 | 1098 | ||
| 1097 | /* This is the callback that the NAND core calls to write a page without ECC. | 1099 | /* This is the callback that the NAND core calls to write a page without ECC. |
| 1098 | * raw access is similar to ECC page writes, so all the work is done in the | 1100 | * raw access is similar to ECC page writes, so all the work is done in the |
| 1099 | * write_page() function above. | 1101 | * write_page() function above. |
| 1100 | */ | 1102 | */ |
| 1101 | static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | 1103 | static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 1102 | const uint8_t *buf, int oob_required) | 1104 | const uint8_t *buf, int oob_required) |
| 1103 | { | 1105 | { |
| 1104 | /* for raw page writes, we want to disable ECC and simply write | 1106 | /* for raw page writes, we want to disable ECC and simply write |
| 1105 | whatever data is in the buffer. */ | 1107 | whatever data is in the buffer. */ |
| 1106 | write_page(mtd, chip, buf, true); | 1108 | return write_page(mtd, chip, buf, true); |
| 1107 | } | 1109 | } |
| 1108 | 1110 | ||
| 1109 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, | 1111 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, |
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index e2ca067631cf..256eb30f6180 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c | |||
| @@ -376,19 +376,6 @@ static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len) | |||
| 376 | } | 376 | } |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 380 | { | ||
| 381 | struct nand_chip *this = mtd->priv; | ||
| 382 | struct doc_priv *doc = this->priv; | ||
| 383 | void __iomem *docptr = doc->virtadr; | ||
| 384 | int i; | ||
| 385 | |||
| 386 | for (i = 0; i < len; i++) | ||
| 387 | if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) | ||
| 388 | return -EFAULT; | ||
| 389 | return 0; | ||
| 390 | } | ||
| 391 | |||
| 392 | static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) | 379 | static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) |
| 393 | { | 380 | { |
| 394 | struct nand_chip *this = mtd->priv; | 381 | struct nand_chip *this = mtd->priv; |
| @@ -526,26 +513,6 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 526 | buf[i] = ReadDOC(docptr, LastDataRead); | 513 | buf[i] = ReadDOC(docptr, LastDataRead); |
| 527 | } | 514 | } |
| 528 | 515 | ||
| 529 | static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 530 | { | ||
| 531 | struct nand_chip *this = mtd->priv; | ||
| 532 | struct doc_priv *doc = this->priv; | ||
| 533 | void __iomem *docptr = doc->virtadr; | ||
| 534 | int i; | ||
| 535 | |||
| 536 | /* Start read pipeline */ | ||
| 537 | ReadDOC(docptr, ReadPipeInit); | ||
| 538 | |||
| 539 | for (i = 0; i < len - 1; i++) | ||
| 540 | if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { | ||
| 541 | ReadDOC(docptr, LastDataRead); | ||
| 542 | return i; | ||
| 543 | } | ||
| 544 | if (buf[i] != ReadDOC(docptr, LastDataRead)) | ||
| 545 | return i; | ||
| 546 | return 0; | ||
| 547 | } | ||
| 548 | |||
| 549 | static u_char doc2001plus_read_byte(struct mtd_info *mtd) | 516 | static u_char doc2001plus_read_byte(struct mtd_info *mtd) |
| 550 | { | 517 | { |
| 551 | struct nand_chip *this = mtd->priv; | 518 | struct nand_chip *this = mtd->priv; |
| @@ -610,33 +577,6 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 610 | printk("\n"); | 577 | printk("\n"); |
| 611 | } | 578 | } |
| 612 | 579 | ||
| 613 | static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 614 | { | ||
| 615 | struct nand_chip *this = mtd->priv; | ||
| 616 | struct doc_priv *doc = this->priv; | ||
| 617 | void __iomem *docptr = doc->virtadr; | ||
| 618 | int i; | ||
| 619 | |||
| 620 | if (debug) | ||
| 621 | printk("verifybuf of %d bytes: ", len); | ||
| 622 | |||
| 623 | /* Start read pipeline */ | ||
| 624 | ReadDOC(docptr, Mplus_ReadPipeInit); | ||
| 625 | ReadDOC(docptr, Mplus_ReadPipeInit); | ||
| 626 | |||
| 627 | for (i = 0; i < len - 2; i++) | ||
| 628 | if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { | ||
| 629 | ReadDOC(docptr, Mplus_LastDataRead); | ||
| 630 | ReadDOC(docptr, Mplus_LastDataRead); | ||
| 631 | return i; | ||
| 632 | } | ||
| 633 | if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead)) | ||
| 634 | return len - 2; | ||
| 635 | if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead)) | ||
| 636 | return len - 1; | ||
| 637 | return 0; | ||
| 638 | } | ||
| 639 | |||
| 640 | static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) | 580 | static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) |
| 641 | { | 581 | { |
| 642 | struct nand_chip *this = mtd->priv; | 582 | struct nand_chip *this = mtd->priv; |
| @@ -1432,7 +1372,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd) | |||
| 1432 | this->read_byte = doc2000_read_byte; | 1372 | this->read_byte = doc2000_read_byte; |
| 1433 | this->write_buf = doc2000_writebuf; | 1373 | this->write_buf = doc2000_writebuf; |
| 1434 | this->read_buf = doc2000_readbuf; | 1374 | this->read_buf = doc2000_readbuf; |
| 1435 | this->verify_buf = doc2000_verifybuf; | ||
| 1436 | this->scan_bbt = nftl_scan_bbt; | 1375 | this->scan_bbt = nftl_scan_bbt; |
| 1437 | 1376 | ||
| 1438 | doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; | 1377 | doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; |
| @@ -1449,7 +1388,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd) | |||
| 1449 | this->read_byte = doc2001_read_byte; | 1388 | this->read_byte = doc2001_read_byte; |
| 1450 | this->write_buf = doc2001_writebuf; | 1389 | this->write_buf = doc2001_writebuf; |
| 1451 | this->read_buf = doc2001_readbuf; | 1390 | this->read_buf = doc2001_readbuf; |
| 1452 | this->verify_buf = doc2001_verifybuf; | ||
| 1453 | 1391 | ||
| 1454 | ReadDOC(doc->virtadr, ChipID); | 1392 | ReadDOC(doc->virtadr, ChipID); |
| 1455 | ReadDOC(doc->virtadr, ChipID); | 1393 | ReadDOC(doc->virtadr, ChipID); |
| @@ -1480,7 +1418,6 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd) | |||
| 1480 | this->read_byte = doc2001plus_read_byte; | 1418 | this->read_byte = doc2001plus_read_byte; |
| 1481 | this->write_buf = doc2001plus_writebuf; | 1419 | this->write_buf = doc2001plus_writebuf; |
| 1482 | this->read_buf = doc2001plus_readbuf; | 1420 | this->read_buf = doc2001plus_readbuf; |
| 1483 | this->verify_buf = doc2001plus_verifybuf; | ||
| 1484 | this->scan_bbt = inftl_scan_bbt; | 1421 | this->scan_bbt = inftl_scan_bbt; |
| 1485 | this->cmd_ctrl = NULL; | 1422 | this->cmd_ctrl = NULL; |
| 1486 | this->select_chip = doc2001plus_select_chip; | 1423 | this->select_chip = doc2001plus_select_chip; |
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index a225e49a5623..799da5d1c857 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c | |||
| @@ -378,9 +378,9 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page) | |||
| 378 | * bit flips(s) are not reported in stats. | 378 | * bit flips(s) are not reported in stats. |
| 379 | */ | 379 | */ |
| 380 | 380 | ||
| 381 | if (doc->oob_buf[15]) { | 381 | if (nand->oob_poi[15]) { |
| 382 | int bit, numsetbits = 0; | 382 | int bit, numsetbits = 0; |
| 383 | unsigned long written_flag = doc->oob_buf[15]; | 383 | unsigned long written_flag = nand->oob_poi[15]; |
| 384 | for_each_set_bit(bit, &written_flag, 8) | 384 | for_each_set_bit(bit, &written_flag, 8) |
| 385 | numsetbits++; | 385 | numsetbits++; |
| 386 | if (numsetbits > 4) { /* assume blank */ | 386 | if (numsetbits > 4) { /* assume blank */ |
| @@ -428,7 +428,7 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page) | |||
| 428 | /* if error within oob area preceeding ecc bytes... */ | 428 | /* if error within oob area preceeding ecc bytes... */ |
| 429 | if (errpos[i] > DOCG4_PAGE_SIZE * 8) | 429 | if (errpos[i] > DOCG4_PAGE_SIZE * 8) |
| 430 | change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8, | 430 | change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8, |
| 431 | (unsigned long *)doc->oob_buf); | 431 | (unsigned long *)nand->oob_poi); |
| 432 | 432 | ||
| 433 | else /* error in page data */ | 433 | else /* error in page data */ |
| 434 | change_bit(errpos[i], (unsigned long *)buf); | 434 | change_bit(errpos[i], (unsigned long *)buf); |
| @@ -748,18 +748,12 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand, | |||
| 748 | 748 | ||
| 749 | docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */ | 749 | docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */ |
| 750 | 750 | ||
| 751 | /* | 751 | /* this device always reads oob after page data */ |
| 752 | * Diskonchips read oob immediately after a page read. Mtd | ||
| 753 | * infrastructure issues a separate command for reading oob after the | ||
| 754 | * page is read. So we save the oob bytes in a local buffer and just | ||
| 755 | * copy it if the next command reads oob from the same page. | ||
| 756 | */ | ||
| 757 | |||
| 758 | /* first 14 oob bytes read from I/O reg */ | 752 | /* first 14 oob bytes read from I/O reg */ |
| 759 | docg4_read_buf(mtd, doc->oob_buf, 14); | 753 | docg4_read_buf(mtd, nand->oob_poi, 14); |
| 760 | 754 | ||
| 761 | /* last 2 read from another reg */ | 755 | /* last 2 read from another reg */ |
| 762 | buf16 = (uint16_t *)(doc->oob_buf + 14); | 756 | buf16 = (uint16_t *)(nand->oob_poi + 14); |
| 763 | *buf16 = readw(docptr + DOCG4_MYSTERY_REG); | 757 | *buf16 = readw(docptr + DOCG4_MYSTERY_REG); |
| 764 | 758 | ||
| 765 | write_nop(docptr); | 759 | write_nop(docptr); |
| @@ -782,6 +776,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand, | |||
| 782 | } | 776 | } |
| 783 | 777 | ||
| 784 | writew(0, docptr + DOC_DATAEND); | 778 | writew(0, docptr + DOC_DATAEND); |
| 779 | if (bits_corrected == -EBADMSG) /* uncorrectable errors */ | ||
| 780 | return 0; | ||
| 785 | return bits_corrected; | 781 | return bits_corrected; |
| 786 | } | 782 | } |
| 787 | 783 | ||
| @@ -807,21 +803,6 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, | |||
| 807 | 803 | ||
| 808 | dev_dbg(doc->dev, "%s: page %x\n", __func__, page); | 804 | dev_dbg(doc->dev, "%s: page %x\n", __func__, page); |
| 809 | 805 | ||
| 810 | /* | ||
| 811 | * Oob bytes are read as part of a normal page read. If the previous | ||
| 812 | * nand command was a read of the page whose oob is now being read, just | ||
| 813 | * copy the oob bytes that we saved in a local buffer and avoid a | ||
| 814 | * separate oob read. | ||
| 815 | */ | ||
| 816 | if (doc->last_command.command == NAND_CMD_READ0 && | ||
| 817 | doc->last_command.page == page) { | ||
| 818 | memcpy(nand->oob_poi, doc->oob_buf, 16); | ||
| 819 | return 0; | ||
| 820 | } | ||
| 821 | |||
| 822 | /* | ||
| 823 | * Separate read of oob data only. | ||
| 824 | */ | ||
| 825 | docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); | 806 | docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); |
| 826 | 807 | ||
| 827 | writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); | 808 | writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); |
| @@ -898,7 +879,7 @@ static void docg4_erase_block(struct mtd_info *mtd, int page) | |||
| 898 | write_nop(docptr); | 879 | write_nop(docptr); |
| 899 | } | 880 | } |
| 900 | 881 | ||
| 901 | static void write_page(struct mtd_info *mtd, struct nand_chip *nand, | 882 | static int write_page(struct mtd_info *mtd, struct nand_chip *nand, |
| 902 | const uint8_t *buf, bool use_ecc) | 883 | const uint8_t *buf, bool use_ecc) |
| 903 | { | 884 | { |
| 904 | struct docg4_priv *doc = nand->priv; | 885 | struct docg4_priv *doc = nand->priv; |
| @@ -950,15 +931,17 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand, | |||
| 950 | write_nop(docptr); | 931 | write_nop(docptr); |
| 951 | writew(0, docptr + DOC_DATAEND); | 932 | writew(0, docptr + DOC_DATAEND); |
| 952 | write_nop(docptr); | 933 | write_nop(docptr); |
| 934 | |||
| 935 | return 0; | ||
| 953 | } | 936 | } |
| 954 | 937 | ||
| 955 | static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, | 938 | static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, |
| 956 | const uint8_t *buf, int oob_required) | 939 | const uint8_t *buf, int oob_required) |
| 957 | { | 940 | { |
| 958 | return write_page(mtd, nand, buf, false); | 941 | return write_page(mtd, nand, buf, false); |
| 959 | } | 942 | } |
| 960 | 943 | ||
| 961 | static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, | 944 | static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, |
| 962 | const uint8_t *buf, int oob_required) | 945 | const uint8_t *buf, int oob_required) |
| 963 | { | 946 | { |
| 964 | return write_page(mtd, nand, buf, true); | 947 | return write_page(mtd, nand, buf, true); |
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 784293806110..cc1480a5e4c1 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c | |||
| @@ -614,41 +614,6 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len) | |||
| 614 | len, avail); | 614 | len, avail); |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | /* | ||
| 618 | * Verify buffer against the FCM Controller Data Buffer | ||
| 619 | */ | ||
| 620 | static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 621 | { | ||
| 622 | struct nand_chip *chip = mtd->priv; | ||
| 623 | struct fsl_elbc_mtd *priv = chip->priv; | ||
| 624 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; | ||
| 625 | int i; | ||
| 626 | |||
| 627 | if (len < 0) { | ||
| 628 | dev_err(priv->dev, "write_buf of %d bytes", len); | ||
| 629 | return -EINVAL; | ||
| 630 | } | ||
| 631 | |||
| 632 | if ((unsigned int)len > | ||
| 633 | elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) { | ||
| 634 | dev_err(priv->dev, | ||
| 635 | "verify_buf beyond end of buffer " | ||
| 636 | "(%d requested, %u available)\n", | ||
| 637 | len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index); | ||
| 638 | |||
| 639 | elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes; | ||
| 640 | return -EINVAL; | ||
| 641 | } | ||
| 642 | |||
| 643 | for (i = 0; i < len; i++) | ||
| 644 | if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i]) | ||
| 645 | != buf[i]) | ||
| 646 | break; | ||
| 647 | |||
| 648 | elbc_fcm_ctrl->index += len; | ||
| 649 | return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO; | ||
| 650 | } | ||
| 651 | |||
| 652 | /* This function is called after Program and Erase Operations to | 617 | /* This function is called after Program and Erase Operations to |
| 653 | * check for success or failure. | 618 | * check for success or failure. |
| 654 | */ | 619 | */ |
| @@ -766,11 +731,13 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 766 | /* ECC will be calculated automatically, and errors will be detected in | 731 | /* ECC will be calculated automatically, and errors will be detected in |
| 767 | * waitfunc. | 732 | * waitfunc. |
| 768 | */ | 733 | */ |
| 769 | static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 734 | static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 770 | const uint8_t *buf, int oob_required) | 735 | const uint8_t *buf, int oob_required) |
| 771 | { | 736 | { |
| 772 | fsl_elbc_write_buf(mtd, buf, mtd->writesize); | 737 | fsl_elbc_write_buf(mtd, buf, mtd->writesize); |
| 773 | fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); | 738 | fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 739 | |||
| 740 | return 0; | ||
| 774 | } | 741 | } |
| 775 | 742 | ||
| 776 | static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | 743 | static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) |
| @@ -796,7 +763,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | |||
| 796 | chip->read_byte = fsl_elbc_read_byte; | 763 | chip->read_byte = fsl_elbc_read_byte; |
| 797 | chip->write_buf = fsl_elbc_write_buf; | 764 | chip->write_buf = fsl_elbc_write_buf; |
| 798 | chip->read_buf = fsl_elbc_read_buf; | 765 | chip->read_buf = fsl_elbc_read_buf; |
| 799 | chip->verify_buf = fsl_elbc_verify_buf; | ||
| 800 | chip->select_chip = fsl_elbc_select_chip; | 766 | chip->select_chip = fsl_elbc_select_chip; |
| 801 | chip->cmdfunc = fsl_elbc_cmdfunc; | 767 | chip->cmdfunc = fsl_elbc_cmdfunc; |
| 802 | chip->waitfunc = fsl_elbc_wait; | 768 | chip->waitfunc = fsl_elbc_wait; |
| @@ -805,7 +771,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | |||
| 805 | chip->bbt_md = &bbt_mirror_descr; | 771 | chip->bbt_md = &bbt_mirror_descr; |
| 806 | 772 | ||
| 807 | /* set up nand options */ | 773 | /* set up nand options */ |
| 808 | chip->options = NAND_NO_READRDY; | ||
| 809 | chip->bbt_options = NAND_BBT_USE_FLASH; | 774 | chip->bbt_options = NAND_BBT_USE_FLASH; |
| 810 | 775 | ||
| 811 | chip->controller = &elbc_fcm_ctrl->controller; | 776 | chip->controller = &elbc_fcm_ctrl->controller; |
| @@ -916,7 +881,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev) | |||
| 916 | elbc_fcm_ctrl->chips[bank] = priv; | 881 | elbc_fcm_ctrl->chips[bank] = priv; |
| 917 | priv->bank = bank; | 882 | priv->bank = bank; |
| 918 | priv->ctrl = fsl_lbc_ctrl_dev; | 883 | priv->ctrl = fsl_lbc_ctrl_dev; |
| 919 | priv->dev = dev; | 884 | priv->dev = &pdev->dev; |
| 885 | dev_set_drvdata(priv->dev, priv); | ||
| 920 | 886 | ||
| 921 | priv->vbase = ioremap(res.start, resource_size(&res)); | 887 | priv->vbase = ioremap(res.start, resource_size(&res)); |
| 922 | if (!priv->vbase) { | 888 | if (!priv->vbase) { |
| @@ -963,11 +929,10 @@ err: | |||
| 963 | 929 | ||
| 964 | static int fsl_elbc_nand_remove(struct platform_device *pdev) | 930 | static int fsl_elbc_nand_remove(struct platform_device *pdev) |
| 965 | { | 931 | { |
| 966 | int i; | ||
| 967 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; | 932 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; |
| 968 | for (i = 0; i < MAX_BANKS; i++) | 933 | struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev); |
| 969 | if (elbc_fcm_ctrl->chips[i]) | 934 | |
| 970 | fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]); | 935 | fsl_elbc_chip_remove(priv); |
| 971 | 936 | ||
| 972 | mutex_lock(&fsl_elbc_nand_mutex); | 937 | mutex_lock(&fsl_elbc_nand_mutex); |
| 973 | elbc_fcm_ctrl->counter--; | 938 | elbc_fcm_ctrl->counter--; |
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 01e2f2e87d8c..3551a99076ba 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c | |||
| @@ -194,7 +194,7 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum) | |||
| 194 | struct nand_chip *chip = mtd->priv; | 194 | struct nand_chip *chip = mtd->priv; |
| 195 | struct fsl_ifc_mtd *priv = chip->priv; | 195 | struct fsl_ifc_mtd *priv = chip->priv; |
| 196 | u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); | 196 | u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); |
| 197 | u32 __iomem *mainarea = (u32 *)addr; | 197 | u32 __iomem *mainarea = (u32 __iomem *)addr; |
| 198 | u8 __iomem *oob = addr + mtd->writesize; | 198 | u8 __iomem *oob = addr + mtd->writesize; |
| 199 | int i; | 199 | int i; |
| 200 | 200 | ||
| @@ -592,8 +592,8 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) | |||
| 592 | * next byte. | 592 | * next byte. |
| 593 | */ | 593 | */ |
| 594 | if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { | 594 | if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { |
| 595 | data = in_be16((uint16_t *)&ifc_nand_ctrl-> | 595 | data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl-> |
| 596 | addr[ifc_nand_ctrl->index]); | 596 | addr[ifc_nand_ctrl->index]); |
| 597 | ifc_nand_ctrl->index += 2; | 597 | ifc_nand_ctrl->index += 2; |
| 598 | return (uint8_t) data; | 598 | return (uint8_t) data; |
| 599 | } | 599 | } |
| @@ -628,46 +628,6 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len) | |||
| 628 | } | 628 | } |
| 629 | 629 | ||
| 630 | /* | 630 | /* |
| 631 | * Verify buffer against the IFC Controller Data Buffer | ||
| 632 | */ | ||
| 633 | static int fsl_ifc_verify_buf(struct mtd_info *mtd, | ||
| 634 | const u_char *buf, int len) | ||
| 635 | { | ||
| 636 | struct nand_chip *chip = mtd->priv; | ||
| 637 | struct fsl_ifc_mtd *priv = chip->priv; | ||
| 638 | struct fsl_ifc_ctrl *ctrl = priv->ctrl; | ||
| 639 | struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; | ||
| 640 | int i; | ||
| 641 | |||
| 642 | if (len < 0) { | ||
| 643 | dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len); | ||
| 644 | return -EINVAL; | ||
| 645 | } | ||
| 646 | |||
| 647 | if ((unsigned int)len > nctrl->read_bytes - nctrl->index) { | ||
| 648 | dev_err(priv->dev, | ||
| 649 | "%s: beyond end of buffer (%d requested, %u available)\n", | ||
| 650 | __func__, len, nctrl->read_bytes - nctrl->index); | ||
| 651 | |||
| 652 | nctrl->index = nctrl->read_bytes; | ||
| 653 | return -EINVAL; | ||
| 654 | } | ||
| 655 | |||
| 656 | for (i = 0; i < len; i++) | ||
| 657 | if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i]) | ||
| 658 | break; | ||
| 659 | |||
| 660 | nctrl->index += len; | ||
| 661 | |||
| 662 | if (i != len) | ||
| 663 | return -EIO; | ||
| 664 | if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) | ||
| 665 | return -EIO; | ||
| 666 | |||
| 667 | return 0; | ||
| 668 | } | ||
| 669 | |||
| 670 | /* | ||
| 671 | * This function is called after Program and Erase Operations to | 631 | * This function is called after Program and Erase Operations to |
| 672 | * check for success or failure. | 632 | * check for success or failure. |
| 673 | */ | 633 | */ |
| @@ -722,11 +682,13 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 722 | /* ECC will be calculated automatically, and errors will be detected in | 682 | /* ECC will be calculated automatically, and errors will be detected in |
| 723 | * waitfunc. | 683 | * waitfunc. |
| 724 | */ | 684 | */ |
| 725 | static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 685 | static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 726 | const uint8_t *buf, int oob_required) | 686 | const uint8_t *buf, int oob_required) |
| 727 | { | 687 | { |
| 728 | fsl_ifc_write_buf(mtd, buf, mtd->writesize); | 688 | fsl_ifc_write_buf(mtd, buf, mtd->writesize); |
| 729 | fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); | 689 | fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 690 | |||
| 691 | return 0; | ||
| 730 | } | 692 | } |
| 731 | 693 | ||
| 732 | static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) | 694 | static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) |
| @@ -844,7 +806,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) | |||
| 844 | 806 | ||
| 845 | chip->write_buf = fsl_ifc_write_buf; | 807 | chip->write_buf = fsl_ifc_write_buf; |
| 846 | chip->read_buf = fsl_ifc_read_buf; | 808 | chip->read_buf = fsl_ifc_read_buf; |
| 847 | chip->verify_buf = fsl_ifc_verify_buf; | ||
| 848 | chip->select_chip = fsl_ifc_select_chip; | 809 | chip->select_chip = fsl_ifc_select_chip; |
| 849 | chip->cmdfunc = fsl_ifc_cmdfunc; | 810 | chip->cmdfunc = fsl_ifc_cmdfunc; |
| 850 | chip->waitfunc = fsl_ifc_wait; | 811 | chip->waitfunc = fsl_ifc_wait; |
| @@ -855,7 +816,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) | |||
| 855 | out_be32(&ifc->ifc_nand.ncfgr, 0x0); | 816 | out_be32(&ifc->ifc_nand.ncfgr, 0x0); |
| 856 | 817 | ||
| 857 | /* set up nand options */ | 818 | /* set up nand options */ |
| 858 | chip->options = NAND_NO_READRDY; | ||
| 859 | chip->bbt_options = NAND_BBT_USE_FLASH; | 819 | chip->bbt_options = NAND_BBT_USE_FLASH; |
| 860 | 820 | ||
| 861 | 821 | ||
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index 27000a5f5f47..bc73bc5f2713 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c | |||
| @@ -100,23 +100,6 @@ static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 100 | readsb(this->IO_ADDR_R, buf, len); | 100 | readsb(this->IO_ADDR_R, buf, len); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 104 | { | ||
| 105 | struct nand_chip *this = mtd->priv; | ||
| 106 | unsigned char read, *p = (unsigned char *) buf; | ||
| 107 | int i, err = 0; | ||
| 108 | |||
| 109 | for (i = 0; i < len; i++) { | ||
| 110 | read = readb(this->IO_ADDR_R); | ||
| 111 | if (read != p[i]) { | ||
| 112 | pr_debug("%s: err at %d (read %04x vs %04x)\n", | ||
| 113 | __func__, i, read, p[i]); | ||
| 114 | err = -EFAULT; | ||
| 115 | } | ||
| 116 | } | ||
| 117 | return err; | ||
| 118 | } | ||
| 119 | |||
| 120 | static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, | 103 | static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, |
| 121 | int len) | 104 | int len) |
| 122 | { | 105 | { |
| @@ -148,26 +131,6 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len) | |||
| 148 | } | 131 | } |
| 149 | } | 132 | } |
| 150 | 133 | ||
| 151 | static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf, | ||
| 152 | int len) | ||
| 153 | { | ||
| 154 | struct nand_chip *this = mtd->priv; | ||
| 155 | unsigned short read, *p = (unsigned short *) buf; | ||
| 156 | int i, err = 0; | ||
| 157 | len >>= 1; | ||
| 158 | |||
| 159 | for (i = 0; i < len; i++) { | ||
| 160 | read = readw(this->IO_ADDR_R); | ||
| 161 | if (read != p[i]) { | ||
| 162 | pr_debug("%s: err at %d (read %04x vs %04x)\n", | ||
| 163 | __func__, i, read, p[i]); | ||
| 164 | err = -EFAULT; | ||
| 165 | } | ||
| 166 | } | ||
| 167 | return err; | ||
| 168 | } | ||
| 169 | |||
| 170 | |||
| 171 | static int gpio_nand_devready(struct mtd_info *mtd) | 134 | static int gpio_nand_devready(struct mtd_info *mtd) |
| 172 | { | 135 | { |
| 173 | struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); | 136 | struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); |
| @@ -391,11 +354,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev) | |||
| 391 | if (this->options & NAND_BUSWIDTH_16) { | 354 | if (this->options & NAND_BUSWIDTH_16) { |
| 392 | this->read_buf = gpio_nand_readbuf16; | 355 | this->read_buf = gpio_nand_readbuf16; |
| 393 | this->write_buf = gpio_nand_writebuf16; | 356 | this->write_buf = gpio_nand_writebuf16; |
| 394 | this->verify_buf = gpio_nand_verifybuf16; | ||
| 395 | } else { | 357 | } else { |
| 396 | this->read_buf = gpio_nand_readbuf; | 358 | this->read_buf = gpio_nand_readbuf; |
| 397 | this->write_buf = gpio_nand_writebuf; | 359 | this->write_buf = gpio_nand_writebuf; |
| 398 | this->verify_buf = gpio_nand_verifybuf; | ||
| 399 | } | 360 | } |
| 400 | 361 | ||
| 401 | /* set the mtd private data for the nand driver */ | 362 | /* set the mtd private data for the nand driver */ |
| @@ -456,20 +417,7 @@ static struct platform_driver gpio_nand_driver = { | |||
| 456 | }, | 417 | }, |
| 457 | }; | 418 | }; |
| 458 | 419 | ||
| 459 | static int __init gpio_nand_init(void) | 420 | module_platform_driver(gpio_nand_driver); |
| 460 | { | ||
| 461 | printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n"); | ||
| 462 | |||
| 463 | return platform_driver_register(&gpio_nand_driver); | ||
| 464 | } | ||
| 465 | |||
| 466 | static void __exit gpio_nand_exit(void) | ||
| 467 | { | ||
| 468 | platform_driver_unregister(&gpio_nand_driver); | ||
| 469 | } | ||
| 470 | |||
| 471 | module_init(gpio_nand_init); | ||
| 472 | module_exit(gpio_nand_exit); | ||
| 473 | 421 | ||
| 474 | MODULE_LICENSE("GPL"); | 422 | MODULE_LICENSE("GPL"); |
| 475 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); | 423 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index a1f43329ad43..3502accd4bc3 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #include "gpmi-regs.h" | 26 | #include "gpmi-regs.h" |
| 27 | #include "bch-regs.h" | 27 | #include "bch-regs.h" |
| 28 | 28 | ||
| 29 | struct timing_threshod timing_default_threshold = { | 29 | static struct timing_threshod timing_default_threshold = { |
| 30 | .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> | 30 | .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> |
| 31 | BP_GPMI_TIMING0_DATA_SETUP), | 31 | BP_GPMI_TIMING0_DATA_SETUP), |
| 32 | .internal_data_setup_in_ns = 0, | 32 | .internal_data_setup_in_ns = 0, |
| @@ -124,12 +124,42 @@ error: | |||
| 124 | return -ETIMEDOUT; | 124 | return -ETIMEDOUT; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) | ||
| 128 | { | ||
| 129 | struct clk *clk; | ||
| 130 | int ret; | ||
| 131 | int i; | ||
| 132 | |||
| 133 | for (i = 0; i < GPMI_CLK_MAX; i++) { | ||
| 134 | clk = this->resources.clock[i]; | ||
| 135 | if (!clk) | ||
| 136 | break; | ||
| 137 | |||
| 138 | if (v) { | ||
| 139 | ret = clk_prepare_enable(clk); | ||
| 140 | if (ret) | ||
| 141 | goto err_clk; | ||
| 142 | } else { | ||
| 143 | clk_disable_unprepare(clk); | ||
| 144 | } | ||
| 145 | } | ||
| 146 | return 0; | ||
| 147 | |||
| 148 | err_clk: | ||
| 149 | for (; i > 0; i--) | ||
| 150 | clk_disable_unprepare(this->resources.clock[i - 1]); | ||
| 151 | return ret; | ||
| 152 | } | ||
| 153 | |||
| 154 | #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) | ||
| 155 | #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) | ||
| 156 | |||
| 127 | int gpmi_init(struct gpmi_nand_data *this) | 157 | int gpmi_init(struct gpmi_nand_data *this) |
| 128 | { | 158 | { |
| 129 | struct resources *r = &this->resources; | 159 | struct resources *r = &this->resources; |
| 130 | int ret; | 160 | int ret; |
| 131 | 161 | ||
| 132 | ret = clk_prepare_enable(r->clock); | 162 | ret = gpmi_enable_clk(this); |
| 133 | if (ret) | 163 | if (ret) |
| 134 | goto err_out; | 164 | goto err_out; |
| 135 | ret = gpmi_reset_block(r->gpmi_regs, false); | 165 | ret = gpmi_reset_block(r->gpmi_regs, false); |
| @@ -149,7 +179,7 @@ int gpmi_init(struct gpmi_nand_data *this) | |||
| 149 | /* Select BCH ECC. */ | 179 | /* Select BCH ECC. */ |
| 150 | writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); | 180 | writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); |
| 151 | 181 | ||
| 152 | clk_disable_unprepare(r->clock); | 182 | gpmi_disable_clk(this); |
| 153 | return 0; | 183 | return 0; |
| 154 | err_out: | 184 | err_out: |
| 155 | return ret; | 185 | return ret; |
| @@ -205,7 +235,7 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
| 205 | ecc_strength = bch_geo->ecc_strength >> 1; | 235 | ecc_strength = bch_geo->ecc_strength >> 1; |
| 206 | page_size = bch_geo->page_size; | 236 | page_size = bch_geo->page_size; |
| 207 | 237 | ||
| 208 | ret = clk_prepare_enable(r->clock); | 238 | ret = gpmi_enable_clk(this); |
| 209 | if (ret) | 239 | if (ret) |
| 210 | goto err_out; | 240 | goto err_out; |
| 211 | 241 | ||
| @@ -240,7 +270,7 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
| 240 | writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, | 270 | writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, |
| 241 | r->bch_regs + HW_BCH_CTRL_SET); | 271 | r->bch_regs + HW_BCH_CTRL_SET); |
| 242 | 272 | ||
| 243 | clk_disable_unprepare(r->clock); | 273 | gpmi_disable_clk(this); |
| 244 | return 0; | 274 | return 0; |
| 245 | err_out: | 275 | err_out: |
| 246 | return ret; | 276 | return ret; |
| @@ -263,6 +293,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, | |||
| 263 | struct gpmi_nfc_hardware_timing *hw) | 293 | struct gpmi_nfc_hardware_timing *hw) |
| 264 | { | 294 | { |
| 265 | struct timing_threshod *nfc = &timing_default_threshold; | 295 | struct timing_threshod *nfc = &timing_default_threshold; |
| 296 | struct resources *r = &this->resources; | ||
| 266 | struct nand_chip *nand = &this->nand; | 297 | struct nand_chip *nand = &this->nand; |
| 267 | struct nand_timing target = this->timing; | 298 | struct nand_timing target = this->timing; |
| 268 | bool improved_timing_is_available; | 299 | bool improved_timing_is_available; |
| @@ -302,8 +333,9 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, | |||
| 302 | (target.tRHOH_in_ns >= 0) ; | 333 | (target.tRHOH_in_ns >= 0) ; |
| 303 | 334 | ||
| 304 | /* Inspect the clock. */ | 335 | /* Inspect the clock. */ |
| 336 | nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]); | ||
| 305 | clock_frequency_in_hz = nfc->clock_frequency_in_hz; | 337 | clock_frequency_in_hz = nfc->clock_frequency_in_hz; |
| 306 | clock_period_in_ns = 1000000000 / clock_frequency_in_hz; | 338 | clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz; |
| 307 | 339 | ||
| 308 | /* | 340 | /* |
| 309 | * The NFC quantizes setup and hold parameters in terms of clock cycles. | 341 | * The NFC quantizes setup and hold parameters in terms of clock cycles. |
| @@ -698,17 +730,230 @@ return_results: | |||
| 698 | hw->address_setup_in_cycles = address_setup_in_cycles; | 730 | hw->address_setup_in_cycles = address_setup_in_cycles; |
| 699 | hw->use_half_periods = dll_use_half_periods; | 731 | hw->use_half_periods = dll_use_half_periods; |
| 700 | hw->sample_delay_factor = sample_delay_factor; | 732 | hw->sample_delay_factor = sample_delay_factor; |
| 733 | hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT; | ||
| 734 | hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; | ||
| 701 | 735 | ||
| 702 | /* Return success. */ | 736 | /* Return success. */ |
| 703 | return 0; | 737 | return 0; |
| 704 | } | 738 | } |
| 705 | 739 | ||
| 740 | /* | ||
| 741 | * <1> Firstly, we should know what's the GPMI-clock means. | ||
| 742 | * The GPMI-clock is the internal clock in the gpmi nand controller. | ||
| 743 | * If you set 100MHz to gpmi nand controller, the GPMI-clock's period | ||
| 744 | * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. | ||
| 745 | * | ||
| 746 | * <2> Secondly, we should know what's the frequency on the nand chip pins. | ||
| 747 | * The frequency on the nand chip pins is derived from the GPMI-clock. | ||
| 748 | * We can get it from the following equation: | ||
| 749 | * | ||
| 750 | * F = G / (DS + DH) | ||
| 751 | * | ||
| 752 | * F : the frequency on the nand chip pins. | ||
| 753 | * G : the GPMI clock, such as 100MHz. | ||
| 754 | * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP | ||
| 755 | * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD | ||
| 756 | * | ||
| 757 | * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, | ||
| 758 | * the nand EDO(extended Data Out) timing could be applied. | ||
| 759 | * The GPMI implements a feedback read strobe to sample the read data. | ||
| 760 | * The feedback read strobe can be delayed to support the nand EDO timing | ||
| 761 | * where the read strobe may deasserts before the read data is valid, and | ||
| 762 | * read data is valid for some time after read strobe. | ||
| 763 | * | ||
| 764 | * The following figure illustrates some aspects of a NAND Flash read: | ||
| 765 | * | ||
| 766 | * |<---tREA---->| | ||
| 767 | * | | | ||
| 768 | * | | | | ||
| 769 | * |<--tRP-->| | | ||
| 770 | * | | | | ||
| 771 | * __ ___|__________________________________ | ||
| 772 | * RDN \________/ | | ||
| 773 | * | | ||
| 774 | * /---------\ | ||
| 775 | * Read Data --------------< >--------- | ||
| 776 | * \---------/ | ||
| 777 | * | | | ||
| 778 | * |<-D->| | ||
| 779 | * FeedbackRDN ________ ____________ | ||
| 780 | * \___________/ | ||
| 781 | * | ||
| 782 | * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. | ||
| 783 | * | ||
| 784 | * | ||
| 785 | * <4> Now, we begin to describe how to compute the right RDN_DELAY. | ||
| 786 | * | ||
| 787 | * 4.1) From the aspect of the nand chip pins: | ||
| 788 | * Delay = (tREA + C - tRP) {1} | ||
| 789 | * | ||
| 790 | * tREA : the maximum read access time. From the ONFI nand standards, | ||
| 791 | * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4. | ||
| 792 | * Please check it in : www.onfi.org | ||
| 793 | * C : a constant for adjust the delay. default is 4. | ||
| 794 | * tRP : the read pulse width. | ||
| 795 | * Specified by the HW_GPMI_TIMING0:DATA_SETUP: | ||
| 796 | * tRP = (GPMI-clock-period) * DATA_SETUP | ||
| 797 | * | ||
| 798 | * 4.2) From the aspect of the GPMI nand controller: | ||
| 799 | * Delay = RDN_DELAY * 0.125 * RP {2} | ||
| 800 | * | ||
| 801 | * RP : the DLL reference period. | ||
| 802 | * if (GPMI-clock-period > DLL_THRETHOLD) | ||
| 803 | * RP = GPMI-clock-period / 2; | ||
| 804 | * else | ||
| 805 | * RP = GPMI-clock-period; | ||
| 806 | * | ||
| 807 | * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period | ||
| 808 | * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD | ||
| 809 | * is 16ns, but in mx6q, we use 12ns. | ||
| 810 | * | ||
| 811 | * 4.3) since {1} equals {2}, we get: | ||
| 812 | * | ||
| 813 | * (tREA + 4 - tRP) * 8 | ||
| 814 | * RDN_DELAY = --------------------- {3} | ||
| 815 | * RP | ||
| 816 | * | ||
| 817 | * 4.4) We only support the fastest asynchronous mode of ONFI nand. | ||
| 818 | * For some ONFI nand, the mode 4 is the fastest mode; | ||
| 819 | * while for some ONFI nand, the mode 5 is the fastest mode. | ||
| 820 | * So we only support the mode 4 and mode 5. It is no need to | ||
| 821 | * support other modes. | ||
| 822 | */ | ||
| 823 | static void gpmi_compute_edo_timing(struct gpmi_nand_data *this, | ||
| 824 | struct gpmi_nfc_hardware_timing *hw) | ||
| 825 | { | ||
| 826 | struct resources *r = &this->resources; | ||
| 827 | unsigned long rate = clk_get_rate(r->clock[0]); | ||
| 828 | int mode = this->timing_mode; | ||
| 829 | int dll_threshold = 16; /* in ns */ | ||
| 830 | unsigned long delay; | ||
| 831 | unsigned long clk_period; | ||
| 832 | int t_rea; | ||
| 833 | int c = 4; | ||
| 834 | int t_rp; | ||
| 835 | int rp; | ||
| 836 | |||
| 837 | /* | ||
| 838 | * [1] for GPMI_HW_GPMI_TIMING0: | ||
| 839 | * The async mode requires 40MHz for mode 4, 50MHz for mode 5. | ||
| 840 | * The GPMI can support 100MHz at most. So if we want to | ||
| 841 | * get the 40MHz or 50MHz, we have to set DS=1, DH=1. | ||
| 842 | * Set the ADDRESS_SETUP to 0 in mode 4. | ||
| 843 | */ | ||
| 844 | hw->data_setup_in_cycles = 1; | ||
| 845 | hw->data_hold_in_cycles = 1; | ||
| 846 | hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0); | ||
| 847 | |||
| 848 | /* [2] for GPMI_HW_GPMI_TIMING1 */ | ||
| 849 | hw->device_busy_timeout = 0x9000; | ||
| 850 | |||
| 851 | /* [3] for GPMI_HW_GPMI_CTRL1 */ | ||
| 852 | hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; | ||
| 853 | |||
| 854 | if (GPMI_IS_MX6Q(this)) | ||
| 855 | dll_threshold = 12; | ||
| 856 | |||
| 857 | /* | ||
| 858 | * Enlarge 10 times for the numerator and denominator in {3}. | ||
| 859 | * This make us to get more accurate result. | ||
| 860 | */ | ||
| 861 | clk_period = NSEC_PER_SEC / (rate / 10); | ||
| 862 | dll_threshold *= 10; | ||
| 863 | t_rea = ((mode == 5) ? 16 : 20) * 10; | ||
| 864 | c *= 10; | ||
| 865 | |||
| 866 | t_rp = clk_period * 1; /* DATA_SETUP is 1 */ | ||
| 867 | |||
| 868 | if (clk_period > dll_threshold) { | ||
| 869 | hw->use_half_periods = 1; | ||
| 870 | rp = clk_period / 2; | ||
| 871 | } else { | ||
| 872 | hw->use_half_periods = 0; | ||
| 873 | rp = clk_period; | ||
| 874 | } | ||
| 875 | |||
| 876 | /* | ||
| 877 | * Multiply the numerator with 10, we could do a round off: | ||
| 878 | * 7.8 round up to 8; 7.4 round down to 7. | ||
| 879 | */ | ||
| 880 | delay = (((t_rea + c - t_rp) * 8) * 10) / rp; | ||
| 881 | delay = (delay + 5) / 10; | ||
| 882 | |||
| 883 | hw->sample_delay_factor = delay; | ||
| 884 | } | ||
| 885 | |||
| 886 | static int enable_edo_mode(struct gpmi_nand_data *this, int mode) | ||
| 887 | { | ||
| 888 | struct resources *r = &this->resources; | ||
| 889 | struct nand_chip *nand = &this->nand; | ||
| 890 | struct mtd_info *mtd = &this->mtd; | ||
| 891 | uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {}; | ||
| 892 | unsigned long rate; | ||
| 893 | int ret; | ||
| 894 | |||
| 895 | nand->select_chip(mtd, 0); | ||
| 896 | |||
| 897 | /* [1] send SET FEATURE commond to NAND */ | ||
| 898 | feature[0] = mode; | ||
| 899 | ret = nand->onfi_set_features(mtd, nand, | ||
| 900 | ONFI_FEATURE_ADDR_TIMING_MODE, feature); | ||
| 901 | if (ret) | ||
| 902 | goto err_out; | ||
| 903 | |||
| 904 | /* [2] send GET FEATURE command to double-check the timing mode */ | ||
| 905 | memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN); | ||
| 906 | ret = nand->onfi_get_features(mtd, nand, | ||
| 907 | ONFI_FEATURE_ADDR_TIMING_MODE, feature); | ||
| 908 | if (ret || feature[0] != mode) | ||
| 909 | goto err_out; | ||
| 910 | |||
| 911 | nand->select_chip(mtd, -1); | ||
| 912 | |||
| 913 | /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */ | ||
| 914 | rate = (mode == 5) ? 100000000 : 80000000; | ||
| 915 | clk_set_rate(r->clock[0], rate); | ||
| 916 | |||
| 917 | /* Let the gpmi_begin() re-compute the timing again. */ | ||
| 918 | this->flags &= ~GPMI_TIMING_INIT_OK; | ||
| 919 | |||
| 920 | this->flags |= GPMI_ASYNC_EDO_ENABLED; | ||
| 921 | this->timing_mode = mode; | ||
| 922 | dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode); | ||
| 923 | return 0; | ||
| 924 | |||
| 925 | err_out: | ||
| 926 | nand->select_chip(mtd, -1); | ||
| 927 | dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode); | ||
| 928 | return -EINVAL; | ||
| 929 | } | ||
| 930 | |||
| 931 | int gpmi_extra_init(struct gpmi_nand_data *this) | ||
| 932 | { | ||
| 933 | struct nand_chip *chip = &this->nand; | ||
| 934 | |||
| 935 | /* Enable the asynchronous EDO feature. */ | ||
| 936 | if (GPMI_IS_MX6Q(this) && chip->onfi_version) { | ||
| 937 | int mode = onfi_get_async_timing_mode(chip); | ||
| 938 | |||
| 939 | /* We only support the timing mode 4 and mode 5. */ | ||
| 940 | if (mode & ONFI_TIMING_MODE_5) | ||
| 941 | mode = 5; | ||
| 942 | else if (mode & ONFI_TIMING_MODE_4) | ||
| 943 | mode = 4; | ||
| 944 | else | ||
| 945 | return 0; | ||
| 946 | |||
| 947 | return enable_edo_mode(this, mode); | ||
| 948 | } | ||
| 949 | return 0; | ||
| 950 | } | ||
| 951 | |||
| 706 | /* Begin the I/O */ | 952 | /* Begin the I/O */ |
| 707 | void gpmi_begin(struct gpmi_nand_data *this) | 953 | void gpmi_begin(struct gpmi_nand_data *this) |
| 708 | { | 954 | { |
| 709 | struct resources *r = &this->resources; | 955 | struct resources *r = &this->resources; |
| 710 | struct timing_threshod *nfc = &timing_default_threshold; | 956 | void __iomem *gpmi_regs = r->gpmi_regs; |
| 711 | unsigned char *gpmi_regs = r->gpmi_regs; | ||
| 712 | unsigned int clock_period_in_ns; | 957 | unsigned int clock_period_in_ns; |
| 713 | uint32_t reg; | 958 | uint32_t reg; |
| 714 | unsigned int dll_wait_time_in_us; | 959 | unsigned int dll_wait_time_in_us; |
| @@ -716,60 +961,66 @@ void gpmi_begin(struct gpmi_nand_data *this) | |||
| 716 | int ret; | 961 | int ret; |
| 717 | 962 | ||
| 718 | /* Enable the clock. */ | 963 | /* Enable the clock. */ |
| 719 | ret = clk_prepare_enable(r->clock); | 964 | ret = gpmi_enable_clk(this); |
| 720 | if (ret) { | 965 | if (ret) { |
| 721 | pr_err("We failed in enable the clk\n"); | 966 | pr_err("We failed in enable the clk\n"); |
| 722 | goto err_out; | 967 | goto err_out; |
| 723 | } | 968 | } |
| 724 | 969 | ||
| 725 | /* set ready/busy timeout */ | 970 | /* Only initialize the timing once */ |
| 726 | writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT, | 971 | if (this->flags & GPMI_TIMING_INIT_OK) |
| 727 | gpmi_regs + HW_GPMI_TIMING1); | 972 | return; |
| 728 | 973 | this->flags |= GPMI_TIMING_INIT_OK; | |
| 729 | /* Get the timing information we need. */ | ||
| 730 | nfc->clock_frequency_in_hz = clk_get_rate(r->clock); | ||
| 731 | clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz; | ||
| 732 | 974 | ||
| 733 | gpmi_nfc_compute_hardware_timing(this, &hw); | 975 | if (this->flags & GPMI_ASYNC_EDO_ENABLED) |
| 976 | gpmi_compute_edo_timing(this, &hw); | ||
| 977 | else | ||
| 978 | gpmi_nfc_compute_hardware_timing(this, &hw); | ||
| 734 | 979 | ||
| 735 | /* Set up all the simple timing parameters. */ | 980 | /* [1] Set HW_GPMI_TIMING0 */ |
| 736 | reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) | | 981 | reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) | |
| 737 | BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) | | 982 | BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) | |
| 738 | BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ; | 983 | BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ; |
| 739 | 984 | ||
| 740 | writel(reg, gpmi_regs + HW_GPMI_TIMING0); | 985 | writel(reg, gpmi_regs + HW_GPMI_TIMING0); |
| 741 | 986 | ||
| 742 | /* | 987 | /* [2] Set HW_GPMI_TIMING1 */ |
| 743 | * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. | 988 | writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout), |
| 744 | */ | 989 | gpmi_regs + HW_GPMI_TIMING1); |
| 990 | |||
| 991 | /* [3] The following code is to set the HW_GPMI_CTRL1. */ | ||
| 992 | |||
| 993 | /* Set the WRN_DLY_SEL */ | ||
| 994 | writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR); | ||
| 995 | writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel), | ||
| 996 | gpmi_regs + HW_GPMI_CTRL1_SET); | ||
| 997 | |||
| 998 | /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */ | ||
| 745 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR); | 999 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR); |
| 746 | 1000 | ||
| 747 | /* Clear out the DLL control fields. */ | 1001 | /* Clear out the DLL control fields. */ |
| 748 | writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR); | 1002 | reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD; |
| 749 | writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR); | 1003 | writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR); |
| 750 | 1004 | ||
| 751 | /* If no sample delay is called for, return immediately. */ | 1005 | /* If no sample delay is called for, return immediately. */ |
| 752 | if (!hw.sample_delay_factor) | 1006 | if (!hw.sample_delay_factor) |
| 753 | return; | 1007 | return; |
| 754 | 1008 | ||
| 755 | /* Configure the HALF_PERIOD flag. */ | 1009 | /* Set RDN_DELAY or HALF_PERIOD. */ |
| 756 | if (hw.use_half_periods) | 1010 | reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0) |
| 757 | writel(BM_GPMI_CTRL1_HALF_PERIOD, | 1011 | | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor); |
| 758 | gpmi_regs + HW_GPMI_CTRL1_SET); | ||
| 759 | 1012 | ||
| 760 | /* Set the delay factor. */ | 1013 | writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET); |
| 761 | writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor), | ||
| 762 | gpmi_regs + HW_GPMI_CTRL1_SET); | ||
| 763 | 1014 | ||
| 764 | /* Enable the DLL. */ | 1015 | /* At last, we enable the DLL. */ |
| 765 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET); | 1016 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET); |
| 766 | 1017 | ||
| 767 | /* | 1018 | /* |
| 768 | * After we enable the GPMI DLL, we have to wait 64 clock cycles before | 1019 | * After we enable the GPMI DLL, we have to wait 64 clock cycles before |
| 769 | * we can use the GPMI. | 1020 | * we can use the GPMI. Calculate the amount of time we need to wait, |
| 770 | * | 1021 | * in microseconds. |
| 771 | * Calculate the amount of time we need to wait, in microseconds. | ||
| 772 | */ | 1022 | */ |
| 1023 | clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]); | ||
| 773 | dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000; | 1024 | dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000; |
| 774 | 1025 | ||
| 775 | if (!dll_wait_time_in_us) | 1026 | if (!dll_wait_time_in_us) |
| @@ -784,8 +1035,7 @@ err_out: | |||
| 784 | 1035 | ||
| 785 | void gpmi_end(struct gpmi_nand_data *this) | 1036 | void gpmi_end(struct gpmi_nand_data *this) |
| 786 | { | 1037 | { |
| 787 | struct resources *r = &this->resources; | 1038 | gpmi_disable_clk(this); |
| 788 | clk_disable_unprepare(r->clock); | ||
| 789 | } | 1039 | } |
| 790 | 1040 | ||
| 791 | /* Clears a BCH interrupt. */ | 1041 | /* Clears a BCH interrupt. */ |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index a6cad5caba78..d79696b2f19b 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c | |||
| @@ -18,6 +18,9 @@ | |||
| 18 | * with this program; if not, write to the Free Software Foundation, Inc., | 18 | * with this program; if not, write to the Free Software Foundation, Inc., |
| 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
| 20 | */ | 20 | */ |
| 21 | |||
| 22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 23 | |||
| 21 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
| 22 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 23 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| @@ -27,6 +30,7 @@ | |||
| 27 | #include <linux/pinctrl/consumer.h> | 30 | #include <linux/pinctrl/consumer.h> |
| 28 | #include <linux/of.h> | 31 | #include <linux/of.h> |
| 29 | #include <linux/of_device.h> | 32 | #include <linux/of_device.h> |
| 33 | #include <linux/of_mtd.h> | ||
| 30 | #include "gpmi-nand.h" | 34 | #include "gpmi-nand.h" |
| 31 | 35 | ||
| 32 | /* add our owner bbt descriptor */ | 36 | /* add our owner bbt descriptor */ |
| @@ -113,7 +117,7 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this) | |||
| 113 | /* We use the same ECC strength for all chunks. */ | 117 | /* We use the same ECC strength for all chunks. */ |
| 114 | geo->ecc_strength = get_ecc_strength(this); | 118 | geo->ecc_strength = get_ecc_strength(this); |
| 115 | if (!geo->ecc_strength) { | 119 | if (!geo->ecc_strength) { |
| 116 | pr_err("We get a wrong ECC strength.\n"); | 120 | pr_err("wrong ECC strength.\n"); |
| 117 | return -EINVAL; | 121 | return -EINVAL; |
| 118 | } | 122 | } |
| 119 | 123 | ||
| @@ -316,7 +320,7 @@ acquire_register_block(struct gpmi_nand_data *this, const char *res_name) | |||
| 316 | struct platform_device *pdev = this->pdev; | 320 | struct platform_device *pdev = this->pdev; |
| 317 | struct resources *res = &this->resources; | 321 | struct resources *res = &this->resources; |
| 318 | struct resource *r; | 322 | struct resource *r; |
| 319 | void *p; | 323 | void __iomem *p; |
| 320 | 324 | ||
| 321 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); | 325 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); |
| 322 | if (!r) { | 326 | if (!r) { |
| @@ -423,8 +427,8 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) | |||
| 423 | struct platform_device *pdev = this->pdev; | 427 | struct platform_device *pdev = this->pdev; |
| 424 | struct resource *r_dma; | 428 | struct resource *r_dma; |
| 425 | struct device_node *dn; | 429 | struct device_node *dn; |
| 426 | int dma_channel; | 430 | u32 dma_channel; |
| 427 | unsigned int ret; | 431 | int ret; |
| 428 | struct dma_chan *dma_chan; | 432 | struct dma_chan *dma_chan; |
| 429 | dma_cap_mask_t mask; | 433 | dma_cap_mask_t mask; |
| 430 | 434 | ||
| @@ -464,9 +468,73 @@ acquire_err: | |||
| 464 | return -EINVAL; | 468 | return -EINVAL; |
| 465 | } | 469 | } |
| 466 | 470 | ||
| 471 | static void gpmi_put_clks(struct gpmi_nand_data *this) | ||
| 472 | { | ||
| 473 | struct resources *r = &this->resources; | ||
| 474 | struct clk *clk; | ||
| 475 | int i; | ||
| 476 | |||
| 477 | for (i = 0; i < GPMI_CLK_MAX; i++) { | ||
| 478 | clk = r->clock[i]; | ||
| 479 | if (clk) { | ||
| 480 | clk_put(clk); | ||
| 481 | r->clock[i] = NULL; | ||
| 482 | } | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = { | ||
| 487 | "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", | ||
| 488 | }; | ||
| 489 | |||
| 490 | static int __devinit gpmi_get_clks(struct gpmi_nand_data *this) | ||
| 491 | { | ||
| 492 | struct resources *r = &this->resources; | ||
| 493 | char **extra_clks = NULL; | ||
| 494 | struct clk *clk; | ||
| 495 | int i; | ||
| 496 | |||
| 497 | /* The main clock is stored in the first. */ | ||
| 498 | r->clock[0] = clk_get(this->dev, "gpmi_io"); | ||
| 499 | if (IS_ERR(r->clock[0])) | ||
| 500 | goto err_clock; | ||
| 501 | |||
| 502 | /* Get extra clocks */ | ||
| 503 | if (GPMI_IS_MX6Q(this)) | ||
| 504 | extra_clks = extra_clks_for_mx6q; | ||
| 505 | if (!extra_clks) | ||
| 506 | return 0; | ||
| 507 | |||
| 508 | for (i = 1; i < GPMI_CLK_MAX; i++) { | ||
| 509 | if (extra_clks[i - 1] == NULL) | ||
| 510 | break; | ||
| 511 | |||
| 512 | clk = clk_get(this->dev, extra_clks[i - 1]); | ||
| 513 | if (IS_ERR(clk)) | ||
| 514 | goto err_clock; | ||
| 515 | |||
| 516 | r->clock[i] = clk; | ||
| 517 | } | ||
| 518 | |||
| 519 | if (GPMI_IS_MX6Q(this)) | ||
| 520 | /* | ||
| 521 | * Set the default value for the gpmi clock in mx6q: | ||
| 522 | * | ||
| 523 | * If you want to use the ONFI nand which is in the | ||
| 524 | * Synchronous Mode, you should change the clock as you need. | ||
| 525 | */ | ||
| 526 | clk_set_rate(r->clock[0], 22000000); | ||
| 527 | |||
| 528 | return 0; | ||
| 529 | |||
| 530 | err_clock: | ||
| 531 | dev_dbg(this->dev, "failed in finding the clocks.\n"); | ||
| 532 | gpmi_put_clks(this); | ||
| 533 | return -ENOMEM; | ||
| 534 | } | ||
| 535 | |||
| 467 | static int __devinit acquire_resources(struct gpmi_nand_data *this) | 536 | static int __devinit acquire_resources(struct gpmi_nand_data *this) |
| 468 | { | 537 | { |
| 469 | struct resources *res = &this->resources; | ||
| 470 | struct pinctrl *pinctrl; | 538 | struct pinctrl *pinctrl; |
| 471 | int ret; | 539 | int ret; |
| 472 | 540 | ||
| @@ -492,12 +560,9 @@ static int __devinit acquire_resources(struct gpmi_nand_data *this) | |||
| 492 | goto exit_pin; | 560 | goto exit_pin; |
| 493 | } | 561 | } |
| 494 | 562 | ||
| 495 | res->clock = clk_get(&this->pdev->dev, NULL); | 563 | ret = gpmi_get_clks(this); |
| 496 | if (IS_ERR(res->clock)) { | 564 | if (ret) |
| 497 | pr_err("can not get the clock\n"); | ||
| 498 | ret = -ENOENT; | ||
| 499 | goto exit_clock; | 565 | goto exit_clock; |
| 500 | } | ||
| 501 | return 0; | 566 | return 0; |
| 502 | 567 | ||
| 503 | exit_clock: | 568 | exit_clock: |
| @@ -512,9 +577,7 @@ exit_regs: | |||
| 512 | 577 | ||
| 513 | static void release_resources(struct gpmi_nand_data *this) | 578 | static void release_resources(struct gpmi_nand_data *this) |
| 514 | { | 579 | { |
| 515 | struct resources *r = &this->resources; | 580 | gpmi_put_clks(this); |
| 516 | |||
| 517 | clk_put(r->clock); | ||
| 518 | release_register_block(this); | 581 | release_register_block(this); |
| 519 | release_bch_irq(this); | 582 | release_bch_irq(this); |
| 520 | release_dma_channels(this); | 583 | release_dma_channels(this); |
| @@ -667,12 +730,12 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) | |||
| 667 | struct device *dev = this->dev; | 730 | struct device *dev = this->dev; |
| 668 | 731 | ||
| 669 | /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ | 732 | /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ |
| 670 | this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA); | 733 | this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); |
| 671 | if (this->cmd_buffer == NULL) | 734 | if (this->cmd_buffer == NULL) |
| 672 | goto error_alloc; | 735 | goto error_alloc; |
| 673 | 736 | ||
| 674 | /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ | 737 | /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ |
| 675 | this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA); | 738 | this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); |
| 676 | if (this->data_buffer_dma == NULL) | 739 | if (this->data_buffer_dma == NULL) |
| 677 | goto error_alloc; | 740 | goto error_alloc; |
| 678 | 741 | ||
| @@ -930,7 +993,7 @@ exit_nfc: | |||
| 930 | return ret; | 993 | return ret; |
| 931 | } | 994 | } |
| 932 | 995 | ||
| 933 | static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 996 | static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 934 | const uint8_t *buf, int oob_required) | 997 | const uint8_t *buf, int oob_required) |
| 935 | { | 998 | { |
| 936 | struct gpmi_nand_data *this = chip->priv; | 999 | struct gpmi_nand_data *this = chip->priv; |
| @@ -972,7 +1035,7 @@ static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 972 | &payload_virt, &payload_phys); | 1035 | &payload_virt, &payload_phys); |
| 973 | if (ret) { | 1036 | if (ret) { |
| 974 | pr_err("Inadequate payload DMA buffer\n"); | 1037 | pr_err("Inadequate payload DMA buffer\n"); |
| 975 | return; | 1038 | return 0; |
| 976 | } | 1039 | } |
| 977 | 1040 | ||
| 978 | ret = send_page_prepare(this, | 1041 | ret = send_page_prepare(this, |
| @@ -1002,6 +1065,8 @@ exit_auxiliary: | |||
| 1002 | nfc_geo->payload_size, | 1065 | nfc_geo->payload_size, |
| 1003 | payload_virt, payload_phys); | 1066 | payload_virt, payload_phys); |
| 1004 | } | 1067 | } |
| 1068 | |||
| 1069 | return 0; | ||
| 1005 | } | 1070 | } |
| 1006 | 1071 | ||
| 1007 | /* | 1072 | /* |
| @@ -1064,6 +1129,9 @@ exit_auxiliary: | |||
| 1064 | * ECC-based or raw view of the page is implicit in which function it calls | 1129 | * ECC-based or raw view of the page is implicit in which function it calls |
| 1065 | * (there is a similar pair of ECC-based/raw functions for writing). | 1130 | * (there is a similar pair of ECC-based/raw functions for writing). |
| 1066 | * | 1131 | * |
| 1132 | * FIXME: The following paragraph is incorrect, now that there exist | ||
| 1133 | * ecc.read_oob_raw and ecc.write_oob_raw functions. | ||
| 1134 | * | ||
| 1067 | * Since MTD assumes the OOB is not covered by ECC, there is no pair of | 1135 | * Since MTD assumes the OOB is not covered by ECC, there is no pair of |
| 1068 | * ECC-based/raw functions for reading or or writing the OOB. The fact that the | 1136 | * ECC-based/raw functions for reading or or writing the OOB. The fact that the |
| 1069 | * caller wants an ECC-based or raw view of the page is not propagated down to | 1137 | * caller wants an ECC-based or raw view of the page is not propagated down to |
| @@ -1190,7 +1258,6 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) | |||
| 1190 | unsigned int search_area_size_in_strides; | 1258 | unsigned int search_area_size_in_strides; |
| 1191 | unsigned int stride; | 1259 | unsigned int stride; |
| 1192 | unsigned int page; | 1260 | unsigned int page; |
| 1193 | loff_t byte; | ||
| 1194 | uint8_t *buffer = chip->buffers->databuf; | 1261 | uint8_t *buffer = chip->buffers->databuf; |
| 1195 | int saved_chip_number; | 1262 | int saved_chip_number; |
| 1196 | int found_an_ncb_fingerprint = false; | 1263 | int found_an_ncb_fingerprint = false; |
| @@ -1207,9 +1274,8 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) | |||
| 1207 | dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); | 1274 | dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); |
| 1208 | 1275 | ||
| 1209 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | 1276 | for (stride = 0; stride < search_area_size_in_strides; stride++) { |
| 1210 | /* Compute the page and byte addresses. */ | 1277 | /* Compute the page addresses. */ |
| 1211 | page = stride * rom_geo->stride_size_in_pages; | 1278 | page = stride * rom_geo->stride_size_in_pages; |
| 1212 | byte = page * mtd->writesize; | ||
| 1213 | 1279 | ||
| 1214 | dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); | 1280 | dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); |
| 1215 | 1281 | ||
| @@ -1251,7 +1317,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) | |||
| 1251 | unsigned int block; | 1317 | unsigned int block; |
| 1252 | unsigned int stride; | 1318 | unsigned int stride; |
| 1253 | unsigned int page; | 1319 | unsigned int page; |
| 1254 | loff_t byte; | ||
| 1255 | uint8_t *buffer = chip->buffers->databuf; | 1320 | uint8_t *buffer = chip->buffers->databuf; |
| 1256 | int saved_chip_number; | 1321 | int saved_chip_number; |
| 1257 | int status; | 1322 | int status; |
| @@ -1300,9 +1365,8 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) | |||
| 1300 | /* Loop through the first search area, writing NCB fingerprints. */ | 1365 | /* Loop through the first search area, writing NCB fingerprints. */ |
| 1301 | dev_dbg(dev, "Writing NCB fingerprints...\n"); | 1366 | dev_dbg(dev, "Writing NCB fingerprints...\n"); |
| 1302 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | 1367 | for (stride = 0; stride < search_area_size_in_strides; stride++) { |
| 1303 | /* Compute the page and byte addresses. */ | 1368 | /* Compute the page addresses. */ |
| 1304 | page = stride * rom_geo->stride_size_in_pages; | 1369 | page = stride * rom_geo->stride_size_in_pages; |
| 1305 | byte = page * mtd->writesize; | ||
| 1306 | 1370 | ||
| 1307 | /* Write the first page of the current stride. */ | 1371 | /* Write the first page of the current stride. */ |
| 1308 | dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); | 1372 | dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); |
| @@ -1436,6 +1500,7 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) | |||
| 1436 | /* Adjust the ECC strength according to the chip. */ | 1500 | /* Adjust the ECC strength according to the chip. */ |
| 1437 | this->nand.ecc.strength = this->bch_geometry.ecc_strength; | 1501 | this->nand.ecc.strength = this->bch_geometry.ecc_strength; |
| 1438 | this->mtd.ecc_strength = this->bch_geometry.ecc_strength; | 1502 | this->mtd.ecc_strength = this->bch_geometry.ecc_strength; |
| 1503 | this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength; | ||
| 1439 | 1504 | ||
| 1440 | /* NAND boot init, depends on the gpmi_set_geometry(). */ | 1505 | /* NAND boot init, depends on the gpmi_set_geometry(). */ |
| 1441 | return nand_boot_init(this); | 1506 | return nand_boot_init(this); |
| @@ -1452,11 +1517,19 @@ static int gpmi_scan_bbt(struct mtd_info *mtd) | |||
| 1452 | if (ret) | 1517 | if (ret) |
| 1453 | return ret; | 1518 | return ret; |
| 1454 | 1519 | ||
| 1520 | /* | ||
| 1521 | * Can we enable the extra features? such as EDO or Sync mode. | ||
| 1522 | * | ||
| 1523 | * We do not check the return value now. That's means if we fail in | ||
| 1524 | * enable the extra features, we still can run in the normal way. | ||
| 1525 | */ | ||
| 1526 | gpmi_extra_init(this); | ||
| 1527 | |||
| 1455 | /* use the default BBT implementation */ | 1528 | /* use the default BBT implementation */ |
| 1456 | return nand_default_bbt(mtd); | 1529 | return nand_default_bbt(mtd); |
| 1457 | } | 1530 | } |
| 1458 | 1531 | ||
| 1459 | void gpmi_nfc_exit(struct gpmi_nand_data *this) | 1532 | static void gpmi_nfc_exit(struct gpmi_nand_data *this) |
| 1460 | { | 1533 | { |
| 1461 | nand_release(&this->mtd); | 1534 | nand_release(&this->mtd); |
| 1462 | gpmi_free_dma_buffer(this); | 1535 | gpmi_free_dma_buffer(this); |
| @@ -1497,6 +1570,8 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) | |||
| 1497 | chip->ecc.size = 1; | 1570 | chip->ecc.size = 1; |
| 1498 | chip->ecc.strength = 8; | 1571 | chip->ecc.strength = 8; |
| 1499 | chip->ecc.layout = &gpmi_hw_ecclayout; | 1572 | chip->ecc.layout = &gpmi_hw_ecclayout; |
| 1573 | if (of_get_nand_on_flash_bbt(this->dev->of_node)) | ||
| 1574 | chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; | ||
| 1500 | 1575 | ||
| 1501 | /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ | 1576 | /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ |
| 1502 | this->bch_geometry.payload_size = 1024; | 1577 | this->bch_geometry.payload_size = 1024; |
| @@ -1579,6 +1654,8 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev) | |||
| 1579 | if (ret) | 1654 | if (ret) |
| 1580 | goto exit_nfc_init; | 1655 | goto exit_nfc_init; |
| 1581 | 1656 | ||
| 1657 | dev_info(this->dev, "driver registered.\n"); | ||
| 1658 | |||
| 1582 | return 0; | 1659 | return 0; |
| 1583 | 1660 | ||
| 1584 | exit_nfc_init: | 1661 | exit_nfc_init: |
| @@ -1586,10 +1663,12 @@ exit_nfc_init: | |||
| 1586 | exit_acquire_resources: | 1663 | exit_acquire_resources: |
| 1587 | platform_set_drvdata(pdev, NULL); | 1664 | platform_set_drvdata(pdev, NULL); |
| 1588 | kfree(this); | 1665 | kfree(this); |
| 1666 | dev_err(this->dev, "driver registration failed: %d\n", ret); | ||
| 1667 | |||
| 1589 | return ret; | 1668 | return ret; |
| 1590 | } | 1669 | } |
| 1591 | 1670 | ||
| 1592 | static int __exit gpmi_nand_remove(struct platform_device *pdev) | 1671 | static int __devexit gpmi_nand_remove(struct platform_device *pdev) |
| 1593 | { | 1672 | { |
| 1594 | struct gpmi_nand_data *this = platform_get_drvdata(pdev); | 1673 | struct gpmi_nand_data *this = platform_get_drvdata(pdev); |
| 1595 | 1674 | ||
| @@ -1606,29 +1685,10 @@ static struct platform_driver gpmi_nand_driver = { | |||
| 1606 | .of_match_table = gpmi_nand_id_table, | 1685 | .of_match_table = gpmi_nand_id_table, |
| 1607 | }, | 1686 | }, |
| 1608 | .probe = gpmi_nand_probe, | 1687 | .probe = gpmi_nand_probe, |
| 1609 | .remove = __exit_p(gpmi_nand_remove), | 1688 | .remove = __devexit_p(gpmi_nand_remove), |
| 1610 | .id_table = gpmi_ids, | 1689 | .id_table = gpmi_ids, |
| 1611 | }; | 1690 | }; |
| 1612 | 1691 | module_platform_driver(gpmi_nand_driver); | |
| 1613 | static int __init gpmi_nand_init(void) | ||
| 1614 | { | ||
| 1615 | int err; | ||
| 1616 | |||
| 1617 | err = platform_driver_register(&gpmi_nand_driver); | ||
| 1618 | if (err == 0) | ||
| 1619 | printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n"); | ||
| 1620 | else | ||
| 1621 | pr_err("i.MX GPMI NAND driver registration failed\n"); | ||
| 1622 | return err; | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | static void __exit gpmi_nand_exit(void) | ||
| 1626 | { | ||
| 1627 | platform_driver_unregister(&gpmi_nand_driver); | ||
| 1628 | } | ||
| 1629 | |||
| 1630 | module_init(gpmi_nand_init); | ||
| 1631 | module_exit(gpmi_nand_exit); | ||
| 1632 | 1692 | ||
| 1633 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | 1693 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); |
| 1634 | MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); | 1694 | MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index ce5daa160920..7ac25c1e58f9 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h | |||
| @@ -22,14 +22,15 @@ | |||
| 22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
| 23 | #include <linux/fsl/mxs-dma.h> | 23 | #include <linux/fsl/mxs-dma.h> |
| 24 | 24 | ||
| 25 | #define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */ | ||
| 25 | struct resources { | 26 | struct resources { |
| 26 | void *gpmi_regs; | 27 | void __iomem *gpmi_regs; |
| 27 | void *bch_regs; | 28 | void __iomem *bch_regs; |
| 28 | unsigned int bch_low_interrupt; | 29 | unsigned int bch_low_interrupt; |
| 29 | unsigned int bch_high_interrupt; | 30 | unsigned int bch_high_interrupt; |
| 30 | unsigned int dma_low_channel; | 31 | unsigned int dma_low_channel; |
| 31 | unsigned int dma_high_channel; | 32 | unsigned int dma_high_channel; |
| 32 | struct clk *clock; | 33 | struct clk *clock[GPMI_CLK_MAX]; |
| 33 | }; | 34 | }; |
| 34 | 35 | ||
| 35 | /** | 36 | /** |
| @@ -121,6 +122,11 @@ struct nand_timing { | |||
| 121 | }; | 122 | }; |
| 122 | 123 | ||
| 123 | struct gpmi_nand_data { | 124 | struct gpmi_nand_data { |
| 125 | /* flags */ | ||
| 126 | #define GPMI_ASYNC_EDO_ENABLED (1 << 0) | ||
| 127 | #define GPMI_TIMING_INIT_OK (1 << 1) | ||
| 128 | int flags; | ||
| 129 | |||
| 124 | /* System Interface */ | 130 | /* System Interface */ |
| 125 | struct device *dev; | 131 | struct device *dev; |
| 126 | struct platform_device *pdev; | 132 | struct platform_device *pdev; |
| @@ -131,6 +137,7 @@ struct gpmi_nand_data { | |||
| 131 | 137 | ||
| 132 | /* Flash Hardware */ | 138 | /* Flash Hardware */ |
| 133 | struct nand_timing timing; | 139 | struct nand_timing timing; |
| 140 | int timing_mode; | ||
| 134 | 141 | ||
| 135 | /* BCH */ | 142 | /* BCH */ |
| 136 | struct bch_geometry bch_geometry; | 143 | struct bch_geometry bch_geometry; |
| @@ -188,16 +195,28 @@ struct gpmi_nand_data { | |||
| 188 | * @data_setup_in_cycles: The data setup time, in cycles. | 195 | * @data_setup_in_cycles: The data setup time, in cycles. |
| 189 | * @data_hold_in_cycles: The data hold time, in cycles. | 196 | * @data_hold_in_cycles: The data hold time, in cycles. |
| 190 | * @address_setup_in_cycles: The address setup time, in cycles. | 197 | * @address_setup_in_cycles: The address setup time, in cycles. |
| 198 | * @device_busy_timeout: The timeout waiting for NAND Ready/Busy, | ||
| 199 | * this value is the number of cycles multiplied | ||
| 200 | * by 4096. | ||
| 191 | * @use_half_periods: Indicates the clock is running slowly, so the | 201 | * @use_half_periods: Indicates the clock is running slowly, so the |
| 192 | * NFC DLL should use half-periods. | 202 | * NFC DLL should use half-periods. |
| 193 | * @sample_delay_factor: The sample delay factor. | 203 | * @sample_delay_factor: The sample delay factor. |
| 204 | * @wrn_dly_sel: The delay on the GPMI write strobe. | ||
| 194 | */ | 205 | */ |
| 195 | struct gpmi_nfc_hardware_timing { | 206 | struct gpmi_nfc_hardware_timing { |
| 207 | /* for HW_GPMI_TIMING0 */ | ||
| 196 | uint8_t data_setup_in_cycles; | 208 | uint8_t data_setup_in_cycles; |
| 197 | uint8_t data_hold_in_cycles; | 209 | uint8_t data_hold_in_cycles; |
| 198 | uint8_t address_setup_in_cycles; | 210 | uint8_t address_setup_in_cycles; |
| 211 | |||
| 212 | /* for HW_GPMI_TIMING1 */ | ||
| 213 | uint16_t device_busy_timeout; | ||
| 214 | #define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/ | ||
| 215 | |||
| 216 | /* for HW_GPMI_CTRL1 */ | ||
| 199 | bool use_half_periods; | 217 | bool use_half_periods; |
| 200 | uint8_t sample_delay_factor; | 218 | uint8_t sample_delay_factor; |
| 219 | uint8_t wrn_dly_sel; | ||
| 201 | }; | 220 | }; |
| 202 | 221 | ||
| 203 | /** | 222 | /** |
| @@ -246,6 +265,7 @@ extern int start_dma_with_bch_irq(struct gpmi_nand_data *, | |||
| 246 | 265 | ||
| 247 | /* GPMI-NAND helper function library */ | 266 | /* GPMI-NAND helper function library */ |
| 248 | extern int gpmi_init(struct gpmi_nand_data *); | 267 | extern int gpmi_init(struct gpmi_nand_data *); |
| 268 | extern int gpmi_extra_init(struct gpmi_nand_data *); | ||
| 249 | extern void gpmi_clear_bch(struct gpmi_nand_data *); | 269 | extern void gpmi_clear_bch(struct gpmi_nand_data *); |
| 250 | extern void gpmi_dump_info(struct gpmi_nand_data *); | 270 | extern void gpmi_dump_info(struct gpmi_nand_data *); |
| 251 | extern int bch_set_geometry(struct gpmi_nand_data *); | 271 | extern int bch_set_geometry(struct gpmi_nand_data *); |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h index 83431240e2f2..53397cc290fc 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h | |||
| @@ -108,6 +108,15 @@ | |||
| 108 | #define HW_GPMI_CTRL1_CLR 0x00000068 | 108 | #define HW_GPMI_CTRL1_CLR 0x00000068 |
| 109 | #define HW_GPMI_CTRL1_TOG 0x0000006c | 109 | #define HW_GPMI_CTRL1_TOG 0x0000006c |
| 110 | 110 | ||
| 111 | #define BP_GPMI_CTRL1_WRN_DLY_SEL 22 | ||
| 112 | #define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL) | ||
| 113 | #define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \ | ||
| 114 | (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL) | ||
| 115 | #define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0 | ||
| 116 | #define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1 | ||
| 117 | #define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2 | ||
| 118 | #define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3 | ||
| 119 | |||
| 111 | #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) | 120 | #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) |
| 112 | 121 | ||
| 113 | #define BP_GPMI_CTRL1_DLL_ENABLE 17 | 122 | #define BP_GPMI_CTRL1_DLL_ENABLE 17 |
| @@ -154,6 +163,9 @@ | |||
| 154 | 163 | ||
| 155 | #define HW_GPMI_TIMING1 0x00000080 | 164 | #define HW_GPMI_TIMING1 0x00000080 |
| 156 | #define BP_GPMI_TIMING1_BUSY_TIMEOUT 16 | 165 | #define BP_GPMI_TIMING1_BUSY_TIMEOUT 16 |
| 166 | #define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT) | ||
| 167 | #define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \ | ||
| 168 | (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT) | ||
| 157 | 169 | ||
| 158 | #define HW_GPMI_TIMING2 0x00000090 | 170 | #define HW_GPMI_TIMING2 0x00000090 |
| 159 | #define HW_GPMI_DATA 0x000000a0 | 171 | #define HW_GPMI_DATA 0x000000a0 |
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c new file mode 100644 index 000000000000..c29b7ac1f6af --- /dev/null +++ b/drivers/mtd/nand/lpc32xx_mlc.c | |||
| @@ -0,0 +1,924 @@ | |||
| 1 | /* | ||
| 2 | * Driver for NAND MLC Controller in LPC32xx | ||
| 3 | * | ||
| 4 | * Author: Roland Stigge <stigge@antcom.de> | ||
| 5 | * | ||
| 6 | * Copyright © 2011 WORK Microwave GmbH | ||
| 7 | * Copyright © 2011, 2012 Roland Stigge | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * | ||
| 20 | * NAND Flash Controller Operation: | ||
| 21 | * - Read: Auto Decode | ||
| 22 | * - Write: Auto Encode | ||
| 23 | * - Tested Page Sizes: 2048, 4096 | ||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/mtd/mtd.h> | ||
| 30 | #include <linux/mtd/nand.h> | ||
| 31 | #include <linux/mtd/partitions.h> | ||
| 32 | #include <linux/clk.h> | ||
| 33 | #include <linux/err.h> | ||
| 34 | #include <linux/delay.h> | ||
| 35 | #include <linux/completion.h> | ||
| 36 | #include <linux/interrupt.h> | ||
| 37 | #include <linux/of.h> | ||
| 38 | #include <linux/of_mtd.h> | ||
| 39 | #include <linux/of_gpio.h> | ||
| 40 | #include <linux/mtd/lpc32xx_mlc.h> | ||
| 41 | #include <linux/io.h> | ||
| 42 | #include <linux/mm.h> | ||
| 43 | #include <linux/dma-mapping.h> | ||
| 44 | #include <linux/dmaengine.h> | ||
| 45 | #include <linux/mtd/nand_ecc.h> | ||
| 46 | |||
| 47 | #define DRV_NAME "lpc32xx_mlc" | ||
| 48 | |||
| 49 | /********************************************************************** | ||
| 50 | * MLC NAND controller register offsets | ||
| 51 | **********************************************************************/ | ||
| 52 | |||
| 53 | #define MLC_BUFF(x) (x + 0x00000) | ||
| 54 | #define MLC_DATA(x) (x + 0x08000) | ||
| 55 | #define MLC_CMD(x) (x + 0x10000) | ||
| 56 | #define MLC_ADDR(x) (x + 0x10004) | ||
| 57 | #define MLC_ECC_ENC_REG(x) (x + 0x10008) | ||
| 58 | #define MLC_ECC_DEC_REG(x) (x + 0x1000C) | ||
| 59 | #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010) | ||
| 60 | #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014) | ||
| 61 | #define MLC_RPR(x) (x + 0x10018) | ||
| 62 | #define MLC_WPR(x) (x + 0x1001C) | ||
| 63 | #define MLC_RUBP(x) (x + 0x10020) | ||
| 64 | #define MLC_ROBP(x) (x + 0x10024) | ||
| 65 | #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028) | ||
| 66 | #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C) | ||
| 67 | #define MLC_ICR(x) (x + 0x10030) | ||
| 68 | #define MLC_TIME_REG(x) (x + 0x10034) | ||
| 69 | #define MLC_IRQ_MR(x) (x + 0x10038) | ||
| 70 | #define MLC_IRQ_SR(x) (x + 0x1003C) | ||
| 71 | #define MLC_LOCK_PR(x) (x + 0x10044) | ||
| 72 | #define MLC_ISR(x) (x + 0x10048) | ||
| 73 | #define MLC_CEH(x) (x + 0x1004C) | ||
| 74 | |||
| 75 | /********************************************************************** | ||
| 76 | * MLC_CMD bit definitions | ||
| 77 | **********************************************************************/ | ||
| 78 | #define MLCCMD_RESET 0xFF | ||
| 79 | |||
| 80 | /********************************************************************** | ||
| 81 | * MLC_ICR bit definitions | ||
| 82 | **********************************************************************/ | ||
| 83 | #define MLCICR_WPROT (1 << 3) | ||
| 84 | #define MLCICR_LARGEBLOCK (1 << 2) | ||
| 85 | #define MLCICR_LONGADDR (1 << 1) | ||
| 86 | #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */ | ||
| 87 | |||
| 88 | /********************************************************************** | ||
| 89 | * MLC_TIME_REG bit definitions | ||
| 90 | **********************************************************************/ | ||
| 91 | #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24) | ||
| 92 | #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19) | ||
| 93 | #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16) | ||
| 94 | #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12) | ||
| 95 | #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8) | ||
| 96 | #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4) | ||
| 97 | #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0) | ||
| 98 | |||
| 99 | /********************************************************************** | ||
| 100 | * MLC_IRQ_MR and MLC_IRQ_SR bit definitions | ||
| 101 | **********************************************************************/ | ||
| 102 | #define MLCIRQ_NAND_READY (1 << 5) | ||
| 103 | #define MLCIRQ_CONTROLLER_READY (1 << 4) | ||
| 104 | #define MLCIRQ_DECODE_FAILURE (1 << 3) | ||
| 105 | #define MLCIRQ_DECODE_ERROR (1 << 2) | ||
| 106 | #define MLCIRQ_ECC_READY (1 << 1) | ||
| 107 | #define MLCIRQ_WRPROT_FAULT (1 << 0) | ||
| 108 | |||
| 109 | /********************************************************************** | ||
| 110 | * MLC_LOCK_PR bit definitions | ||
| 111 | **********************************************************************/ | ||
| 112 | #define MLCLOCKPR_MAGIC 0xA25E | ||
| 113 | |||
| 114 | /********************************************************************** | ||
| 115 | * MLC_ISR bit definitions | ||
| 116 | **********************************************************************/ | ||
| 117 | #define MLCISR_DECODER_FAILURE (1 << 6) | ||
| 118 | #define MLCISR_ERRORS ((1 << 4) | (1 << 5)) | ||
| 119 | #define MLCISR_ERRORS_DETECTED (1 << 3) | ||
| 120 | #define MLCISR_ECC_READY (1 << 2) | ||
| 121 | #define MLCISR_CONTROLLER_READY (1 << 1) | ||
| 122 | #define MLCISR_NAND_READY (1 << 0) | ||
| 123 | |||
| 124 | /********************************************************************** | ||
| 125 | * MLC_CEH bit definitions | ||
| 126 | **********************************************************************/ | ||
| 127 | #define MLCCEH_NORMAL (1 << 0) | ||
| 128 | |||
| 129 | struct lpc32xx_nand_cfg_mlc { | ||
| 130 | uint32_t tcea_delay; | ||
| 131 | uint32_t busy_delay; | ||
| 132 | uint32_t nand_ta; | ||
| 133 | uint32_t rd_high; | ||
| 134 | uint32_t rd_low; | ||
| 135 | uint32_t wr_high; | ||
| 136 | uint32_t wr_low; | ||
| 137 | int wp_gpio; | ||
| 138 | struct mtd_partition *parts; | ||
| 139 | unsigned num_parts; | ||
| 140 | }; | ||
| 141 | |||
| 142 | static struct nand_ecclayout lpc32xx_nand_oob = { | ||
| 143 | .eccbytes = 40, | ||
| 144 | .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, | ||
| 145 | 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, | ||
| 146 | 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, | ||
| 147 | 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, | ||
| 148 | .oobfree = { | ||
| 149 | { .offset = 0, | ||
| 150 | .length = 6, }, | ||
| 151 | { .offset = 16, | ||
| 152 | .length = 6, }, | ||
| 153 | { .offset = 32, | ||
| 154 | .length = 6, }, | ||
| 155 | { .offset = 48, | ||
| 156 | .length = 6, }, | ||
| 157 | }, | ||
| 158 | }; | ||
| 159 | |||
| 160 | static struct nand_bbt_descr lpc32xx_nand_bbt = { | ||
| 161 | .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | | ||
| 162 | NAND_BBT_WRITE, | ||
| 163 | .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 }, | ||
| 164 | }; | ||
| 165 | |||
| 166 | static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = { | ||
| 167 | .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | | ||
| 168 | NAND_BBT_WRITE, | ||
| 169 | .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 }, | ||
| 170 | }; | ||
| 171 | |||
| 172 | struct lpc32xx_nand_host { | ||
| 173 | struct nand_chip nand_chip; | ||
| 174 | struct lpc32xx_mlc_platform_data *pdata; | ||
| 175 | struct clk *clk; | ||
| 176 | struct mtd_info mtd; | ||
| 177 | void __iomem *io_base; | ||
| 178 | int irq; | ||
| 179 | struct lpc32xx_nand_cfg_mlc *ncfg; | ||
| 180 | struct completion comp_nand; | ||
| 181 | struct completion comp_controller; | ||
| 182 | uint32_t llptr; | ||
| 183 | /* | ||
| 184 | * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer | ||
| 185 | */ | ||
| 186 | dma_addr_t oob_buf_phy; | ||
| 187 | /* | ||
| 188 | * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer | ||
| 189 | */ | ||
| 190 | uint8_t *oob_buf; | ||
| 191 | /* Physical address of DMA base address */ | ||
| 192 | dma_addr_t io_base_phy; | ||
| 193 | |||
| 194 | struct completion comp_dma; | ||
| 195 | struct dma_chan *dma_chan; | ||
| 196 | struct dma_slave_config dma_slave_config; | ||
| 197 | struct scatterlist sgl; | ||
| 198 | uint8_t *dma_buf; | ||
| 199 | uint8_t *dummy_buf; | ||
| 200 | int mlcsubpages; /* number of 512bytes-subpages */ | ||
| 201 | }; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Activate/Deactivate DMA Operation: | ||
| 205 | * | ||
| 206 | * Using the PL080 DMA Controller for transferring the 512 byte subpages | ||
| 207 | * instead of doing readl() / writel() in a loop slows it down significantly. | ||
| 208 | * Measurements via getnstimeofday() upon 512 byte subpage reads reveal: | ||
| 209 | * | ||
| 210 | * - readl() of 128 x 32 bits in a loop: ~20us | ||
| 211 | * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us | ||
| 212 | * - DMA read of 512 bytes (32 bit, no bursts): ~100us | ||
| 213 | * | ||
| 214 | * This applies to the transfer itself. In the DMA case: only the | ||
| 215 | * wait_for_completion() (DMA setup _not_ included). | ||
| 216 | * | ||
| 217 | * Note that the 512 bytes subpage transfer is done directly from/to a | ||
| 218 | * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a | ||
| 219 | * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND | ||
| 220 | * controller transferring data between its internal buffer to/from the NAND | ||
| 221 | * chip.) | ||
| 222 | * | ||
| 223 | * Therefore, using the PL080 DMA is disabled by default, for now. | ||
| 224 | * | ||
| 225 | */ | ||
| 226 | static int use_dma; | ||
| 227 | |||
| 228 | static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) | ||
| 229 | { | ||
| 230 | uint32_t clkrate, tmp; | ||
| 231 | |||
| 232 | /* Reset MLC controller */ | ||
| 233 | writel(MLCCMD_RESET, MLC_CMD(host->io_base)); | ||
| 234 | udelay(1000); | ||
| 235 | |||
| 236 | /* Get base clock for MLC block */ | ||
| 237 | clkrate = clk_get_rate(host->clk); | ||
| 238 | if (clkrate == 0) | ||
| 239 | clkrate = 104000000; | ||
| 240 | |||
| 241 | /* Unlock MLC_ICR | ||
| 242 | * (among others, will be locked again automatically) */ | ||
| 243 | writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); | ||
| 244 | |||
| 245 | /* Configure MLC Controller: Large Block, 5 Byte Address */ | ||
| 246 | tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR; | ||
| 247 | writel(tmp, MLC_ICR(host->io_base)); | ||
| 248 | |||
| 249 | /* Unlock MLC_TIME_REG | ||
| 250 | * (among others, will be locked again automatically) */ | ||
| 251 | writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); | ||
| 252 | |||
| 253 | /* Compute clock setup values, see LPC and NAND manual */ | ||
| 254 | tmp = 0; | ||
| 255 | tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1); | ||
| 256 | tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1); | ||
| 257 | tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1); | ||
| 258 | tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1); | ||
| 259 | tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low); | ||
| 260 | tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1); | ||
| 261 | tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low); | ||
| 262 | writel(tmp, MLC_TIME_REG(host->io_base)); | ||
| 263 | |||
| 264 | /* Enable IRQ for CONTROLLER_READY and NAND_READY */ | ||
| 265 | writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY, | ||
| 266 | MLC_IRQ_MR(host->io_base)); | ||
| 267 | |||
| 268 | /* Normal nCE operation: nCE controlled by controller */ | ||
| 269 | writel(MLCCEH_NORMAL, MLC_CEH(host->io_base)); | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Hardware specific access to control lines | ||
| 274 | */ | ||
| 275 | static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, | ||
| 276 | unsigned int ctrl) | ||
| 277 | { | ||
| 278 | struct nand_chip *nand_chip = mtd->priv; | ||
| 279 | struct lpc32xx_nand_host *host = nand_chip->priv; | ||
| 280 | |||
| 281 | if (cmd != NAND_CMD_NONE) { | ||
| 282 | if (ctrl & NAND_CLE) | ||
| 283 | writel(cmd, MLC_CMD(host->io_base)); | ||
| 284 | else | ||
| 285 | writel(cmd, MLC_ADDR(host->io_base)); | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | /* | ||
| 290 | * Read Device Ready (NAND device _and_ controller ready) | ||
| 291 | */ | ||
| 292 | static int lpc32xx_nand_device_ready(struct mtd_info *mtd) | ||
| 293 | { | ||
| 294 | struct nand_chip *nand_chip = mtd->priv; | ||
| 295 | struct lpc32xx_nand_host *host = nand_chip->priv; | ||
| 296 | |||
| 297 | if ((readb(MLC_ISR(host->io_base)) & | ||
| 298 | (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) == | ||
| 299 | (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) | ||
| 300 | return 1; | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | } | ||
| 304 | |||
| 305 | static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) | ||
| 306 | { | ||
| 307 | uint8_t sr; | ||
| 308 | |||
| 309 | /* Clear interrupt flag by reading status */ | ||
| 310 | sr = readb(MLC_IRQ_SR(host->io_base)); | ||
| 311 | if (sr & MLCIRQ_NAND_READY) | ||
| 312 | complete(&host->comp_nand); | ||
| 313 | if (sr & MLCIRQ_CONTROLLER_READY) | ||
| 314 | complete(&host->comp_controller); | ||
| 315 | |||
| 316 | return IRQ_HANDLED; | ||
| 317 | } | ||
| 318 | |||
| 319 | static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip) | ||
| 320 | { | ||
| 321 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 322 | |||
| 323 | if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY) | ||
| 324 | goto exit; | ||
| 325 | |||
| 326 | wait_for_completion(&host->comp_nand); | ||
| 327 | |||
| 328 | while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) { | ||
| 329 | /* Seems to be delayed sometimes by controller */ | ||
| 330 | dev_dbg(&mtd->dev, "Warning: NAND not ready.\n"); | ||
| 331 | cpu_relax(); | ||
| 332 | } | ||
| 333 | |||
| 334 | exit: | ||
| 335 | return NAND_STATUS_READY; | ||
| 336 | } | ||
| 337 | |||
| 338 | static int lpc32xx_waitfunc_controller(struct mtd_info *mtd, | ||
| 339 | struct nand_chip *chip) | ||
| 340 | { | ||
| 341 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 342 | |||
| 343 | if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY) | ||
| 344 | goto exit; | ||
| 345 | |||
| 346 | wait_for_completion(&host->comp_controller); | ||
| 347 | |||
| 348 | while (!(readb(MLC_ISR(host->io_base)) & | ||
| 349 | MLCISR_CONTROLLER_READY)) { | ||
| 350 | dev_dbg(&mtd->dev, "Warning: Controller not ready.\n"); | ||
| 351 | cpu_relax(); | ||
| 352 | } | ||
| 353 | |||
| 354 | exit: | ||
| 355 | return NAND_STATUS_READY; | ||
| 356 | } | ||
| 357 | |||
| 358 | static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) | ||
| 359 | { | ||
| 360 | lpc32xx_waitfunc_nand(mtd, chip); | ||
| 361 | lpc32xx_waitfunc_controller(mtd, chip); | ||
| 362 | |||
| 363 | return NAND_STATUS_READY; | ||
| 364 | } | ||
| 365 | |||
| 366 | /* | ||
| 367 | * Enable NAND write protect | ||
| 368 | */ | ||
| 369 | static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) | ||
| 370 | { | ||
| 371 | if (gpio_is_valid(host->ncfg->wp_gpio)) | ||
| 372 | gpio_set_value(host->ncfg->wp_gpio, 0); | ||
| 373 | } | ||
| 374 | |||
| 375 | /* | ||
| 376 | * Disable NAND write protect | ||
| 377 | */ | ||
| 378 | static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) | ||
| 379 | { | ||
| 380 | if (gpio_is_valid(host->ncfg->wp_gpio)) | ||
| 381 | gpio_set_value(host->ncfg->wp_gpio, 1); | ||
| 382 | } | ||
| 383 | |||
| 384 | static void lpc32xx_dma_complete_func(void *completion) | ||
| 385 | { | ||
| 386 | complete(completion); | ||
| 387 | } | ||
| 388 | |||
| 389 | static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len, | ||
| 390 | enum dma_transfer_direction dir) | ||
| 391 | { | ||
| 392 | struct nand_chip *chip = mtd->priv; | ||
| 393 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 394 | struct dma_async_tx_descriptor *desc; | ||
| 395 | int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; | ||
| 396 | int res; | ||
| 397 | |||
| 398 | sg_init_one(&host->sgl, mem, len); | ||
| 399 | |||
| 400 | res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 401 | DMA_BIDIRECTIONAL); | ||
| 402 | if (res != 1) { | ||
| 403 | dev_err(mtd->dev.parent, "Failed to map sg list\n"); | ||
| 404 | return -ENXIO; | ||
| 405 | } | ||
| 406 | desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, | ||
| 407 | flags); | ||
| 408 | if (!desc) { | ||
| 409 | dev_err(mtd->dev.parent, "Failed to prepare slave sg\n"); | ||
| 410 | goto out1; | ||
| 411 | } | ||
| 412 | |||
| 413 | init_completion(&host->comp_dma); | ||
| 414 | desc->callback = lpc32xx_dma_complete_func; | ||
| 415 | desc->callback_param = &host->comp_dma; | ||
| 416 | |||
| 417 | dmaengine_submit(desc); | ||
| 418 | dma_async_issue_pending(host->dma_chan); | ||
| 419 | |||
| 420 | wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000)); | ||
| 421 | |||
| 422 | dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 423 | DMA_BIDIRECTIONAL); | ||
| 424 | return 0; | ||
| 425 | out1: | ||
| 426 | dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 427 | DMA_BIDIRECTIONAL); | ||
| 428 | return -ENXIO; | ||
| 429 | } | ||
| 430 | |||
| 431 | static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 432 | uint8_t *buf, int oob_required, int page) | ||
| 433 | { | ||
| 434 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 435 | int i, j; | ||
| 436 | uint8_t *oobbuf = chip->oob_poi; | ||
| 437 | uint32_t mlc_isr; | ||
| 438 | int res; | ||
| 439 | uint8_t *dma_buf; | ||
| 440 | bool dma_mapped; | ||
| 441 | |||
| 442 | if ((void *)buf <= high_memory) { | ||
| 443 | dma_buf = buf; | ||
| 444 | dma_mapped = true; | ||
| 445 | } else { | ||
| 446 | dma_buf = host->dma_buf; | ||
| 447 | dma_mapped = false; | ||
| 448 | } | ||
| 449 | |||
| 450 | /* Writing Command and Address */ | ||
| 451 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
| 452 | |||
| 453 | /* For all sub-pages */ | ||
| 454 | for (i = 0; i < host->mlcsubpages; i++) { | ||
| 455 | /* Start Auto Decode Command */ | ||
| 456 | writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base)); | ||
| 457 | |||
| 458 | /* Wait for Controller Ready */ | ||
| 459 | lpc32xx_waitfunc_controller(mtd, chip); | ||
| 460 | |||
| 461 | /* Check ECC Error status */ | ||
| 462 | mlc_isr = readl(MLC_ISR(host->io_base)); | ||
| 463 | if (mlc_isr & MLCISR_DECODER_FAILURE) { | ||
| 464 | mtd->ecc_stats.failed++; | ||
| 465 | dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__); | ||
| 466 | } else if (mlc_isr & MLCISR_ERRORS_DETECTED) { | ||
| 467 | mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1; | ||
| 468 | } | ||
| 469 | |||
| 470 | /* Read 512 + 16 Bytes */ | ||
| 471 | if (use_dma) { | ||
| 472 | res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, | ||
| 473 | DMA_DEV_TO_MEM); | ||
| 474 | if (res) | ||
| 475 | return res; | ||
| 476 | } else { | ||
| 477 | for (j = 0; j < (512 >> 2); j++) { | ||
| 478 | *((uint32_t *)(buf)) = | ||
| 479 | readl(MLC_BUFF(host->io_base)); | ||
| 480 | buf += 4; | ||
| 481 | } | ||
| 482 | } | ||
| 483 | for (j = 0; j < (16 >> 2); j++) { | ||
| 484 | *((uint32_t *)(oobbuf)) = | ||
| 485 | readl(MLC_BUFF(host->io_base)); | ||
| 486 | oobbuf += 4; | ||
| 487 | } | ||
| 488 | } | ||
| 489 | |||
| 490 | if (use_dma && !dma_mapped) | ||
| 491 | memcpy(buf, dma_buf, mtd->writesize); | ||
| 492 | |||
| 493 | return 0; | ||
| 494 | } | ||
| 495 | |||
| 496 | static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd, | ||
| 497 | struct nand_chip *chip, | ||
| 498 | const uint8_t *buf, int oob_required) | ||
| 499 | { | ||
| 500 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 501 | const uint8_t *oobbuf = chip->oob_poi; | ||
| 502 | uint8_t *dma_buf = (uint8_t *)buf; | ||
| 503 | int res; | ||
| 504 | int i, j; | ||
| 505 | |||
| 506 | if (use_dma && (void *)buf >= high_memory) { | ||
| 507 | dma_buf = host->dma_buf; | ||
| 508 | memcpy(dma_buf, buf, mtd->writesize); | ||
| 509 | } | ||
| 510 | |||
| 511 | for (i = 0; i < host->mlcsubpages; i++) { | ||
| 512 | /* Start Encode */ | ||
| 513 | writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); | ||
| 514 | |||
| 515 | /* Write 512 + 6 Bytes to Buffer */ | ||
| 516 | if (use_dma) { | ||
| 517 | res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, | ||
| 518 | DMA_MEM_TO_DEV); | ||
| 519 | if (res) | ||
| 520 | return res; | ||
| 521 | } else { | ||
| 522 | for (j = 0; j < (512 >> 2); j++) { | ||
| 523 | writel(*((uint32_t *)(buf)), | ||
| 524 | MLC_BUFF(host->io_base)); | ||
| 525 | buf += 4; | ||
| 526 | } | ||
| 527 | } | ||
| 528 | writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base)); | ||
| 529 | oobbuf += 4; | ||
| 530 | writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base)); | ||
| 531 | oobbuf += 12; | ||
| 532 | |||
| 533 | /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */ | ||
| 534 | writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base)); | ||
| 535 | |||
| 536 | /* Wait for Controller Ready */ | ||
| 537 | lpc32xx_waitfunc_controller(mtd, chip); | ||
| 538 | } | ||
| 539 | return 0; | ||
| 540 | } | ||
| 541 | |||
| 542 | static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 543 | const uint8_t *buf, int oob_required, int page, | ||
| 544 | int cached, int raw) | ||
| 545 | { | ||
| 546 | int res; | ||
| 547 | |||
| 548 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | ||
| 549 | res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required); | ||
| 550 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
| 551 | lpc32xx_waitfunc(mtd, chip); | ||
| 552 | |||
| 553 | return res; | ||
| 554 | } | ||
| 555 | |||
| 556 | static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 557 | int page) | ||
| 558 | { | ||
| 559 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 560 | |||
| 561 | /* Read whole page - necessary with MLC controller! */ | ||
| 562 | lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page); | ||
| 563 | |||
| 564 | return 0; | ||
| 565 | } | ||
| 566 | |||
| 567 | static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 568 | int page) | ||
| 569 | { | ||
| 570 | /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */ | ||
| 571 | return 0; | ||
| 572 | } | ||
| 573 | |||
| 574 | /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */ | ||
| 575 | static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode) | ||
| 576 | { | ||
| 577 | /* Always enabled! */ | ||
| 578 | } | ||
| 579 | |||
| 580 | static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host) | ||
| 581 | { | ||
| 582 | struct mtd_info *mtd = &host->mtd; | ||
| 583 | dma_cap_mask_t mask; | ||
| 584 | |||
| 585 | if (!host->pdata || !host->pdata->dma_filter) { | ||
| 586 | dev_err(mtd->dev.parent, "no DMA platform data\n"); | ||
| 587 | return -ENOENT; | ||
| 588 | } | ||
| 589 | |||
| 590 | dma_cap_zero(mask); | ||
| 591 | dma_cap_set(DMA_SLAVE, mask); | ||
| 592 | host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, | ||
| 593 | "nand-mlc"); | ||
| 594 | if (!host->dma_chan) { | ||
| 595 | dev_err(mtd->dev.parent, "Failed to request DMA channel\n"); | ||
| 596 | return -EBUSY; | ||
| 597 | } | ||
| 598 | |||
| 599 | /* | ||
| 600 | * Set direction to a sensible value even if the dmaengine driver | ||
| 601 | * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x | ||
| 602 | * driver criticizes it as "alien transfer direction". | ||
| 603 | */ | ||
| 604 | host->dma_slave_config.direction = DMA_DEV_TO_MEM; | ||
| 605 | host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 606 | host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 607 | host->dma_slave_config.src_maxburst = 128; | ||
| 608 | host->dma_slave_config.dst_maxburst = 128; | ||
| 609 | /* DMA controller does flow control: */ | ||
| 610 | host->dma_slave_config.device_fc = false; | ||
| 611 | host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy); | ||
| 612 | host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy); | ||
| 613 | if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { | ||
| 614 | dev_err(mtd->dev.parent, "Failed to setup DMA slave\n"); | ||
| 615 | goto out1; | ||
| 616 | } | ||
| 617 | |||
| 618 | return 0; | ||
| 619 | out1: | ||
| 620 | dma_release_channel(host->dma_chan); | ||
| 621 | return -ENXIO; | ||
| 622 | } | ||
| 623 | |||
| 624 | static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev) | ||
| 625 | { | ||
| 626 | struct lpc32xx_nand_cfg_mlc *ncfg; | ||
| 627 | struct device_node *np = dev->of_node; | ||
| 628 | |||
| 629 | ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL); | ||
| 630 | if (!ncfg) { | ||
| 631 | dev_err(dev, "could not allocate memory for platform data\n"); | ||
| 632 | return NULL; | ||
| 633 | } | ||
| 634 | |||
| 635 | of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay); | ||
| 636 | of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay); | ||
| 637 | of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta); | ||
| 638 | of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high); | ||
| 639 | of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low); | ||
| 640 | of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high); | ||
| 641 | of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low); | ||
| 642 | |||
| 643 | if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta || | ||
| 644 | !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high || | ||
| 645 | !ncfg->wr_low) { | ||
| 646 | dev_err(dev, "chip parameters not specified correctly\n"); | ||
| 647 | return NULL; | ||
| 648 | } | ||
| 649 | |||
| 650 | ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0); | ||
| 651 | |||
| 652 | return ncfg; | ||
| 653 | } | ||
| 654 | |||
| 655 | /* | ||
| 656 | * Probe for NAND controller | ||
| 657 | */ | ||
| 658 | static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) | ||
| 659 | { | ||
| 660 | struct lpc32xx_nand_host *host; | ||
| 661 | struct mtd_info *mtd; | ||
| 662 | struct nand_chip *nand_chip; | ||
| 663 | struct resource *rc; | ||
| 664 | int res; | ||
| 665 | struct mtd_part_parser_data ppdata = {}; | ||
| 666 | |||
| 667 | /* Allocate memory for the device structure (and zero it) */ | ||
| 668 | host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); | ||
| 669 | if (!host) { | ||
| 670 | dev_err(&pdev->dev, "failed to allocate device structure.\n"); | ||
| 671 | return -ENOMEM; | ||
| 672 | } | ||
| 673 | |||
| 674 | rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 675 | if (rc == NULL) { | ||
| 676 | dev_err(&pdev->dev, "No memory resource found for device!\r\n"); | ||
| 677 | return -ENXIO; | ||
| 678 | } | ||
| 679 | |||
| 680 | host->io_base = devm_request_and_ioremap(&pdev->dev, rc); | ||
| 681 | if (host->io_base == NULL) { | ||
| 682 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
| 683 | return -EIO; | ||
| 684 | } | ||
| 685 | host->io_base_phy = rc->start; | ||
| 686 | |||
| 687 | mtd = &host->mtd; | ||
| 688 | nand_chip = &host->nand_chip; | ||
| 689 | if (pdev->dev.of_node) | ||
| 690 | host->ncfg = lpc32xx_parse_dt(&pdev->dev); | ||
| 691 | if (!host->ncfg) { | ||
| 692 | dev_err(&pdev->dev, | ||
| 693 | "Missing or bad NAND config from device tree\n"); | ||
| 694 | return -ENOENT; | ||
| 695 | } | ||
| 696 | if (host->ncfg->wp_gpio == -EPROBE_DEFER) | ||
| 697 | return -EPROBE_DEFER; | ||
| 698 | if (gpio_is_valid(host->ncfg->wp_gpio) && | ||
| 699 | gpio_request(host->ncfg->wp_gpio, "NAND WP")) { | ||
| 700 | dev_err(&pdev->dev, "GPIO not available\n"); | ||
| 701 | return -EBUSY; | ||
| 702 | } | ||
| 703 | lpc32xx_wp_disable(host); | ||
| 704 | |||
| 705 | host->pdata = pdev->dev.platform_data; | ||
| 706 | |||
| 707 | nand_chip->priv = host; /* link the private data structures */ | ||
| 708 | mtd->priv = nand_chip; | ||
| 709 | mtd->owner = THIS_MODULE; | ||
| 710 | mtd->dev.parent = &pdev->dev; | ||
| 711 | |||
| 712 | /* Get NAND clock */ | ||
| 713 | host->clk = clk_get(&pdev->dev, NULL); | ||
| 714 | if (IS_ERR(host->clk)) { | ||
| 715 | dev_err(&pdev->dev, "Clock initialization failure\n"); | ||
| 716 | res = -ENOENT; | ||
| 717 | goto err_exit1; | ||
| 718 | } | ||
| 719 | clk_enable(host->clk); | ||
| 720 | |||
| 721 | nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; | ||
| 722 | nand_chip->dev_ready = lpc32xx_nand_device_ready; | ||
| 723 | nand_chip->chip_delay = 25; /* us */ | ||
| 724 | nand_chip->IO_ADDR_R = MLC_DATA(host->io_base); | ||
| 725 | nand_chip->IO_ADDR_W = MLC_DATA(host->io_base); | ||
| 726 | |||
| 727 | /* Init NAND controller */ | ||
| 728 | lpc32xx_nand_setup(host); | ||
| 729 | |||
| 730 | platform_set_drvdata(pdev, host); | ||
| 731 | |||
| 732 | /* Initialize function pointers */ | ||
| 733 | nand_chip->ecc.hwctl = lpc32xx_ecc_enable; | ||
| 734 | nand_chip->ecc.read_page_raw = lpc32xx_read_page; | ||
| 735 | nand_chip->ecc.read_page = lpc32xx_read_page; | ||
| 736 | nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel; | ||
| 737 | nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel; | ||
| 738 | nand_chip->ecc.write_oob = lpc32xx_write_oob; | ||
| 739 | nand_chip->ecc.read_oob = lpc32xx_read_oob; | ||
| 740 | nand_chip->ecc.strength = 4; | ||
| 741 | nand_chip->write_page = lpc32xx_write_page; | ||
| 742 | nand_chip->waitfunc = lpc32xx_waitfunc; | ||
| 743 | |||
| 744 | nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; | ||
| 745 | nand_chip->bbt_td = &lpc32xx_nand_bbt; | ||
| 746 | nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; | ||
| 747 | |||
| 748 | /* bitflip_threshold's default is defined as ecc_strength anyway. | ||
| 749 | * Unfortunately, it is set only later at add_mtd_device(). Meanwhile | ||
| 750 | * being 0, it causes bad block table scanning errors in | ||
| 751 | * nand_scan_tail(), so preparing it here. */ | ||
| 752 | mtd->bitflip_threshold = nand_chip->ecc.strength; | ||
| 753 | |||
| 754 | if (use_dma) { | ||
| 755 | res = lpc32xx_dma_setup(host); | ||
| 756 | if (res) { | ||
| 757 | res = -EIO; | ||
| 758 | goto err_exit2; | ||
| 759 | } | ||
| 760 | } | ||
| 761 | |||
| 762 | /* | ||
| 763 | * Scan to find existance of the device and | ||
| 764 | * Get the type of NAND device SMALL block or LARGE block | ||
| 765 | */ | ||
| 766 | if (nand_scan_ident(mtd, 1, NULL)) { | ||
| 767 | res = -ENXIO; | ||
| 768 | goto err_exit3; | ||
| 769 | } | ||
| 770 | |||
| 771 | host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); | ||
| 772 | if (!host->dma_buf) { | ||
| 773 | dev_err(&pdev->dev, "Error allocating dma_buf memory\n"); | ||
| 774 | res = -ENOMEM; | ||
| 775 | goto err_exit3; | ||
| 776 | } | ||
| 777 | |||
| 778 | host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); | ||
| 779 | if (!host->dummy_buf) { | ||
| 780 | dev_err(&pdev->dev, "Error allocating dummy_buf memory\n"); | ||
| 781 | res = -ENOMEM; | ||
| 782 | goto err_exit3; | ||
| 783 | } | ||
| 784 | |||
| 785 | nand_chip->ecc.mode = NAND_ECC_HW; | ||
| 786 | nand_chip->ecc.size = mtd->writesize; | ||
| 787 | nand_chip->ecc.layout = &lpc32xx_nand_oob; | ||
| 788 | host->mlcsubpages = mtd->writesize / 512; | ||
| 789 | |||
| 790 | /* initially clear interrupt status */ | ||
| 791 | readb(MLC_IRQ_SR(host->io_base)); | ||
| 792 | |||
| 793 | init_completion(&host->comp_nand); | ||
| 794 | init_completion(&host->comp_controller); | ||
| 795 | |||
| 796 | host->irq = platform_get_irq(pdev, 0); | ||
| 797 | if ((host->irq < 0) || (host->irq >= NR_IRQS)) { | ||
| 798 | dev_err(&pdev->dev, "failed to get platform irq\n"); | ||
| 799 | res = -EINVAL; | ||
| 800 | goto err_exit3; | ||
| 801 | } | ||
| 802 | |||
| 803 | if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, | ||
| 804 | IRQF_TRIGGER_HIGH, DRV_NAME, host)) { | ||
| 805 | dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); | ||
| 806 | res = -ENXIO; | ||
| 807 | goto err_exit3; | ||
| 808 | } | ||
| 809 | |||
| 810 | /* | ||
| 811 | * Fills out all the uninitialized function pointers with the defaults | ||
| 812 | * And scans for a bad block table if appropriate. | ||
| 813 | */ | ||
| 814 | if (nand_scan_tail(mtd)) { | ||
| 815 | res = -ENXIO; | ||
| 816 | goto err_exit4; | ||
| 817 | } | ||
| 818 | |||
| 819 | mtd->name = DRV_NAME; | ||
| 820 | |||
| 821 | ppdata.of_node = pdev->dev.of_node; | ||
| 822 | res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, | ||
| 823 | host->ncfg->num_parts); | ||
| 824 | if (!res) | ||
| 825 | return res; | ||
| 826 | |||
| 827 | nand_release(mtd); | ||
| 828 | |||
| 829 | err_exit4: | ||
| 830 | free_irq(host->irq, host); | ||
| 831 | err_exit3: | ||
| 832 | if (use_dma) | ||
| 833 | dma_release_channel(host->dma_chan); | ||
| 834 | err_exit2: | ||
| 835 | clk_disable(host->clk); | ||
| 836 | clk_put(host->clk); | ||
| 837 | platform_set_drvdata(pdev, NULL); | ||
| 838 | err_exit1: | ||
| 839 | lpc32xx_wp_enable(host); | ||
| 840 | gpio_free(host->ncfg->wp_gpio); | ||
| 841 | |||
| 842 | return res; | ||
| 843 | } | ||
| 844 | |||
| 845 | /* | ||
| 846 | * Remove NAND device | ||
| 847 | */ | ||
| 848 | static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) | ||
| 849 | { | ||
| 850 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 851 | struct mtd_info *mtd = &host->mtd; | ||
| 852 | |||
| 853 | nand_release(mtd); | ||
| 854 | free_irq(host->irq, host); | ||
| 855 | if (use_dma) | ||
| 856 | dma_release_channel(host->dma_chan); | ||
| 857 | |||
| 858 | clk_disable(host->clk); | ||
| 859 | clk_put(host->clk); | ||
| 860 | platform_set_drvdata(pdev, NULL); | ||
| 861 | |||
| 862 | lpc32xx_wp_enable(host); | ||
| 863 | gpio_free(host->ncfg->wp_gpio); | ||
| 864 | |||
| 865 | return 0; | ||
| 866 | } | ||
| 867 | |||
| 868 | #ifdef CONFIG_PM | ||
| 869 | static int lpc32xx_nand_resume(struct platform_device *pdev) | ||
| 870 | { | ||
| 871 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 872 | |||
| 873 | /* Re-enable NAND clock */ | ||
| 874 | clk_enable(host->clk); | ||
| 875 | |||
| 876 | /* Fresh init of NAND controller */ | ||
| 877 | lpc32xx_nand_setup(host); | ||
| 878 | |||
| 879 | /* Disable write protect */ | ||
| 880 | lpc32xx_wp_disable(host); | ||
| 881 | |||
| 882 | return 0; | ||
| 883 | } | ||
| 884 | |||
| 885 | static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm) | ||
| 886 | { | ||
| 887 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 888 | |||
| 889 | /* Enable write protect for safety */ | ||
| 890 | lpc32xx_wp_enable(host); | ||
| 891 | |||
| 892 | /* Disable clock */ | ||
| 893 | clk_disable(host->clk); | ||
| 894 | return 0; | ||
| 895 | } | ||
| 896 | |||
| 897 | #else | ||
| 898 | #define lpc32xx_nand_resume NULL | ||
| 899 | #define lpc32xx_nand_suspend NULL | ||
| 900 | #endif | ||
| 901 | |||
| 902 | static const struct of_device_id lpc32xx_nand_match[] = { | ||
| 903 | { .compatible = "nxp,lpc3220-mlc" }, | ||
| 904 | { /* sentinel */ }, | ||
| 905 | }; | ||
| 906 | MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); | ||
| 907 | |||
| 908 | static struct platform_driver lpc32xx_nand_driver = { | ||
| 909 | .probe = lpc32xx_nand_probe, | ||
| 910 | .remove = __devexit_p(lpc32xx_nand_remove), | ||
| 911 | .resume = lpc32xx_nand_resume, | ||
| 912 | .suspend = lpc32xx_nand_suspend, | ||
| 913 | .driver = { | ||
| 914 | .name = DRV_NAME, | ||
| 915 | .owner = THIS_MODULE, | ||
| 916 | .of_match_table = of_match_ptr(lpc32xx_nand_match), | ||
| 917 | }, | ||
| 918 | }; | ||
| 919 | |||
| 920 | module_platform_driver(lpc32xx_nand_driver); | ||
| 921 | |||
| 922 | MODULE_LICENSE("GPL"); | ||
| 923 | MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); | ||
| 924 | MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller"); | ||
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c new file mode 100644 index 000000000000..32409c45d479 --- /dev/null +++ b/drivers/mtd/nand/lpc32xx_slc.c | |||
| @@ -0,0 +1,1039 @@ | |||
| 1 | /* | ||
| 2 | * NXP LPC32XX NAND SLC driver | ||
| 3 | * | ||
| 4 | * Authors: | ||
| 5 | * Kevin Wells <kevin.wells@nxp.com> | ||
| 6 | * Roland Stigge <stigge@antcom.de> | ||
| 7 | * | ||
| 8 | * Copyright © 2011 NXP Semiconductors | ||
| 9 | * Copyright © 2012 Roland Stigge | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/slab.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/mtd/mtd.h> | ||
| 26 | #include <linux/mtd/nand.h> | ||
| 27 | #include <linux/mtd/partitions.h> | ||
| 28 | #include <linux/clk.h> | ||
| 29 | #include <linux/err.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/io.h> | ||
| 32 | #include <linux/mm.h> | ||
| 33 | #include <linux/dma-mapping.h> | ||
| 34 | #include <linux/dmaengine.h> | ||
| 35 | #include <linux/mtd/nand_ecc.h> | ||
| 36 | #include <linux/gpio.h> | ||
| 37 | #include <linux/of.h> | ||
| 38 | #include <linux/of_mtd.h> | ||
| 39 | #include <linux/of_gpio.h> | ||
| 40 | #include <linux/mtd/lpc32xx_slc.h> | ||
| 41 | |||
| 42 | #define LPC32XX_MODNAME "lpc32xx-nand" | ||
| 43 | |||
| 44 | /********************************************************************** | ||
| 45 | * SLC NAND controller register offsets | ||
| 46 | **********************************************************************/ | ||
| 47 | |||
| 48 | #define SLC_DATA(x) (x + 0x000) | ||
| 49 | #define SLC_ADDR(x) (x + 0x004) | ||
| 50 | #define SLC_CMD(x) (x + 0x008) | ||
| 51 | #define SLC_STOP(x) (x + 0x00C) | ||
| 52 | #define SLC_CTRL(x) (x + 0x010) | ||
| 53 | #define SLC_CFG(x) (x + 0x014) | ||
| 54 | #define SLC_STAT(x) (x + 0x018) | ||
| 55 | #define SLC_INT_STAT(x) (x + 0x01C) | ||
| 56 | #define SLC_IEN(x) (x + 0x020) | ||
| 57 | #define SLC_ISR(x) (x + 0x024) | ||
| 58 | #define SLC_ICR(x) (x + 0x028) | ||
| 59 | #define SLC_TAC(x) (x + 0x02C) | ||
| 60 | #define SLC_TC(x) (x + 0x030) | ||
| 61 | #define SLC_ECC(x) (x + 0x034) | ||
| 62 | #define SLC_DMA_DATA(x) (x + 0x038) | ||
| 63 | |||
| 64 | /********************************************************************** | ||
| 65 | * slc_ctrl register definitions | ||
| 66 | **********************************************************************/ | ||
| 67 | #define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */ | ||
| 68 | #define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */ | ||
| 69 | #define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */ | ||
| 70 | |||
| 71 | /********************************************************************** | ||
| 72 | * slc_cfg register definitions | ||
| 73 | **********************************************************************/ | ||
| 74 | #define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */ | ||
| 75 | #define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */ | ||
| 76 | #define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */ | ||
| 77 | #define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */ | ||
| 78 | #define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */ | ||
| 79 | #define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */ | ||
| 80 | |||
| 81 | /********************************************************************** | ||
| 82 | * slc_stat register definitions | ||
| 83 | **********************************************************************/ | ||
| 84 | #define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */ | ||
| 85 | #define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */ | ||
| 86 | #define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */ | ||
| 87 | |||
| 88 | /********************************************************************** | ||
| 89 | * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions | ||
| 90 | **********************************************************************/ | ||
| 91 | #define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */ | ||
| 92 | #define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */ | ||
| 93 | |||
| 94 | /********************************************************************** | ||
| 95 | * slc_tac register definitions | ||
| 96 | **********************************************************************/ | ||
| 97 | /* Clock setting for RDY write sample wait time in 2*n clocks */ | ||
| 98 | #define SLCTAC_WDR(n) (((n) & 0xF) << 28) | ||
| 99 | /* Write pulse width in clock cycles, 1 to 16 clocks */ | ||
| 100 | #define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24) | ||
| 101 | /* Write hold time of control and data signals, 1 to 16 clocks */ | ||
| 102 | #define SLCTAC_WHOLD(n) (((n) & 0xF) << 20) | ||
| 103 | /* Write setup time of control and data signals, 1 to 16 clocks */ | ||
| 104 | #define SLCTAC_WSETUP(n) (((n) & 0xF) << 16) | ||
| 105 | /* Clock setting for RDY read sample wait time in 2*n clocks */ | ||
| 106 | #define SLCTAC_RDR(n) (((n) & 0xF) << 12) | ||
| 107 | /* Read pulse width in clock cycles, 1 to 16 clocks */ | ||
| 108 | #define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8) | ||
| 109 | /* Read hold time of control and data signals, 1 to 16 clocks */ | ||
| 110 | #define SLCTAC_RHOLD(n) (((n) & 0xF) << 4) | ||
| 111 | /* Read setup time of control and data signals, 1 to 16 clocks */ | ||
| 112 | #define SLCTAC_RSETUP(n) (((n) & 0xF) << 0) | ||
| 113 | |||
| 114 | /********************************************************************** | ||
| 115 | * slc_ecc register definitions | ||
| 116 | **********************************************************************/ | ||
| 117 | /* ECC line party fetch macro */ | ||
| 118 | #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF) | ||
| 119 | #define SLCECC_TO_COLPAR(n) ((n) & 0x3F) | ||
| 120 | |||
| 121 | /* | ||
| 122 | * DMA requires storage space for the DMA local buffer and the hardware ECC | ||
| 123 | * storage area. The DMA local buffer is only used if DMA mapping fails | ||
| 124 | * during runtime. | ||
| 125 | */ | ||
| 126 | #define LPC32XX_DMA_DATA_SIZE 4096 | ||
| 127 | #define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4) | ||
| 128 | |||
| 129 | /* Number of bytes used for ECC stored in NAND per 256 bytes */ | ||
| 130 | #define LPC32XX_SLC_DEV_ECC_BYTES 3 | ||
| 131 | |||
| 132 | /* | ||
| 133 | * If the NAND base clock frequency can't be fetched, this frequency will be | ||
| 134 | * used instead as the base. This rate is used to setup the timing registers | ||
| 135 | * used for NAND accesses. | ||
| 136 | */ | ||
| 137 | #define LPC32XX_DEF_BUS_RATE 133250000 | ||
| 138 | |||
| 139 | /* Milliseconds for DMA FIFO timeout (unlikely anyway) */ | ||
| 140 | #define LPC32XX_DMA_TIMEOUT 100 | ||
| 141 | |||
| 142 | /* | ||
| 143 | * NAND ECC Layout for small page NAND devices | ||
| 144 | * Note: For large and huge page devices, the default layouts are used | ||
| 145 | */ | ||
| 146 | static struct nand_ecclayout lpc32xx_nand_oob_16 = { | ||
| 147 | .eccbytes = 6, | ||
| 148 | .eccpos = {10, 11, 12, 13, 14, 15}, | ||
| 149 | .oobfree = { | ||
| 150 | { .offset = 0, .length = 4 }, | ||
| 151 | { .offset = 6, .length = 4 }, | ||
| 152 | }, | ||
| 153 | }; | ||
| 154 | |||
| 155 | static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; | ||
| 156 | static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6 | ||
| 160 | * Note: Large page devices used the default layout | ||
| 161 | */ | ||
| 162 | static struct nand_bbt_descr bbt_smallpage_main_descr = { | ||
| 163 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ||
| 164 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, | ||
| 165 | .offs = 0, | ||
| 166 | .len = 4, | ||
| 167 | .veroffs = 6, | ||
| 168 | .maxblocks = 4, | ||
| 169 | .pattern = bbt_pattern | ||
| 170 | }; | ||
| 171 | |||
| 172 | static struct nand_bbt_descr bbt_smallpage_mirror_descr = { | ||
| 173 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ||
| 174 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, | ||
| 175 | .offs = 0, | ||
| 176 | .len = 4, | ||
| 177 | .veroffs = 6, | ||
| 178 | .maxblocks = 4, | ||
| 179 | .pattern = mirror_pattern | ||
| 180 | }; | ||
| 181 | |||
| 182 | /* | ||
| 183 | * NAND platform configuration structure | ||
| 184 | */ | ||
| 185 | struct lpc32xx_nand_cfg_slc { | ||
| 186 | uint32_t wdr_clks; | ||
| 187 | uint32_t wwidth; | ||
| 188 | uint32_t whold; | ||
| 189 | uint32_t wsetup; | ||
| 190 | uint32_t rdr_clks; | ||
| 191 | uint32_t rwidth; | ||
| 192 | uint32_t rhold; | ||
| 193 | uint32_t rsetup; | ||
| 194 | bool use_bbt; | ||
| 195 | int wp_gpio; | ||
| 196 | struct mtd_partition *parts; | ||
| 197 | unsigned num_parts; | ||
| 198 | }; | ||
| 199 | |||
| 200 | struct lpc32xx_nand_host { | ||
| 201 | struct nand_chip nand_chip; | ||
| 202 | struct lpc32xx_slc_platform_data *pdata; | ||
| 203 | struct clk *clk; | ||
| 204 | struct mtd_info mtd; | ||
| 205 | void __iomem *io_base; | ||
| 206 | struct lpc32xx_nand_cfg_slc *ncfg; | ||
| 207 | |||
| 208 | struct completion comp; | ||
| 209 | struct dma_chan *dma_chan; | ||
| 210 | uint32_t dma_buf_len; | ||
| 211 | struct dma_slave_config dma_slave_config; | ||
| 212 | struct scatterlist sgl; | ||
| 213 | |||
| 214 | /* | ||
| 215 | * DMA and CPU addresses of ECC work area and data buffer | ||
| 216 | */ | ||
| 217 | uint32_t *ecc_buf; | ||
| 218 | uint8_t *data_buf; | ||
| 219 | dma_addr_t io_base_dma; | ||
| 220 | }; | ||
| 221 | |||
| 222 | static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) | ||
| 223 | { | ||
| 224 | uint32_t clkrate, tmp; | ||
| 225 | |||
| 226 | /* Reset SLC controller */ | ||
| 227 | writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base)); | ||
| 228 | udelay(1000); | ||
| 229 | |||
| 230 | /* Basic setup */ | ||
| 231 | writel(0, SLC_CFG(host->io_base)); | ||
| 232 | writel(0, SLC_IEN(host->io_base)); | ||
| 233 | writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN), | ||
| 234 | SLC_ICR(host->io_base)); | ||
| 235 | |||
| 236 | /* Get base clock for SLC block */ | ||
| 237 | clkrate = clk_get_rate(host->clk); | ||
| 238 | if (clkrate == 0) | ||
| 239 | clkrate = LPC32XX_DEF_BUS_RATE; | ||
| 240 | |||
| 241 | /* Compute clock setup values */ | ||
| 242 | tmp = SLCTAC_WDR(host->ncfg->wdr_clks) | | ||
| 243 | SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) | | ||
| 244 | SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) | | ||
| 245 | SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) | | ||
| 246 | SLCTAC_RDR(host->ncfg->rdr_clks) | | ||
| 247 | SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) | | ||
| 248 | SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) | | ||
| 249 | SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup)); | ||
| 250 | writel(tmp, SLC_TAC(host->io_base)); | ||
| 251 | } | ||
| 252 | |||
| 253 | /* | ||
| 254 | * Hardware specific access to control lines | ||
| 255 | */ | ||
| 256 | static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, | ||
| 257 | unsigned int ctrl) | ||
| 258 | { | ||
| 259 | uint32_t tmp; | ||
| 260 | struct nand_chip *chip = mtd->priv; | ||
| 261 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 262 | |||
| 263 | /* Does CE state need to be changed? */ | ||
| 264 | tmp = readl(SLC_CFG(host->io_base)); | ||
| 265 | if (ctrl & NAND_NCE) | ||
| 266 | tmp |= SLCCFG_CE_LOW; | ||
| 267 | else | ||
| 268 | tmp &= ~SLCCFG_CE_LOW; | ||
| 269 | writel(tmp, SLC_CFG(host->io_base)); | ||
| 270 | |||
| 271 | if (cmd != NAND_CMD_NONE) { | ||
| 272 | if (ctrl & NAND_CLE) | ||
| 273 | writel(cmd, SLC_CMD(host->io_base)); | ||
| 274 | else | ||
| 275 | writel(cmd, SLC_ADDR(host->io_base)); | ||
| 276 | } | ||
| 277 | } | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Read the Device Ready pin | ||
| 281 | */ | ||
| 282 | static int lpc32xx_nand_device_ready(struct mtd_info *mtd) | ||
| 283 | { | ||
| 284 | struct nand_chip *chip = mtd->priv; | ||
| 285 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 286 | int rdy = 0; | ||
| 287 | |||
| 288 | if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0) | ||
| 289 | rdy = 1; | ||
| 290 | |||
| 291 | return rdy; | ||
| 292 | } | ||
| 293 | |||
| 294 | /* | ||
| 295 | * Enable NAND write protect | ||
| 296 | */ | ||
| 297 | static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) | ||
| 298 | { | ||
| 299 | if (gpio_is_valid(host->ncfg->wp_gpio)) | ||
| 300 | gpio_set_value(host->ncfg->wp_gpio, 0); | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Disable NAND write protect | ||
| 305 | */ | ||
| 306 | static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) | ||
| 307 | { | ||
| 308 | if (gpio_is_valid(host->ncfg->wp_gpio)) | ||
| 309 | gpio_set_value(host->ncfg->wp_gpio, 1); | ||
| 310 | } | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Prepares SLC for transfers with H/W ECC enabled | ||
| 314 | */ | ||
| 315 | static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode) | ||
| 316 | { | ||
| 317 | /* Hardware ECC is enabled automatically in hardware as needed */ | ||
| 318 | } | ||
| 319 | |||
| 320 | /* | ||
| 321 | * Calculates the ECC for the data | ||
| 322 | */ | ||
| 323 | static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd, | ||
| 324 | const unsigned char *buf, | ||
| 325 | unsigned char *code) | ||
| 326 | { | ||
| 327 | /* | ||
| 328 | * ECC is calculated automatically in hardware during syndrome read | ||
| 329 | * and write operations, so it doesn't need to be calculated here. | ||
| 330 | */ | ||
| 331 | return 0; | ||
| 332 | } | ||
| 333 | |||
| 334 | /* | ||
| 335 | * Read a single byte from NAND device | ||
| 336 | */ | ||
| 337 | static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd) | ||
| 338 | { | ||
| 339 | struct nand_chip *chip = mtd->priv; | ||
| 340 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 341 | |||
| 342 | return (uint8_t)readl(SLC_DATA(host->io_base)); | ||
| 343 | } | ||
| 344 | |||
| 345 | /* | ||
| 346 | * Simple device read without ECC | ||
| 347 | */ | ||
| 348 | static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | ||
| 349 | { | ||
| 350 | struct nand_chip *chip = mtd->priv; | ||
| 351 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 352 | |||
| 353 | /* Direct device read with no ECC */ | ||
| 354 | while (len-- > 0) | ||
| 355 | *buf++ = (uint8_t)readl(SLC_DATA(host->io_base)); | ||
| 356 | } | ||
| 357 | |||
| 358 | /* | ||
| 359 | * Simple device write without ECC | ||
| 360 | */ | ||
| 361 | static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
| 362 | { | ||
| 363 | struct nand_chip *chip = mtd->priv; | ||
| 364 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 365 | |||
| 366 | /* Direct device write with no ECC */ | ||
| 367 | while (len-- > 0) | ||
| 368 | writel((uint32_t)*buf++, SLC_DATA(host->io_base)); | ||
| 369 | } | ||
| 370 | |||
| 371 | /* | ||
| 372 | * Read the OOB data from the device without ECC using FIFO method | ||
| 373 | */ | ||
| 374 | static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd, | ||
| 375 | struct nand_chip *chip, int page) | ||
| 376 | { | ||
| 377 | chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); | ||
| 378 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 379 | |||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Write the OOB data to the device without ECC using FIFO method | ||
| 385 | */ | ||
| 386 | static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd, | ||
| 387 | struct nand_chip *chip, int page) | ||
| 388 | { | ||
| 389 | int status; | ||
| 390 | |||
| 391 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); | ||
| 392 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 393 | |||
| 394 | /* Send command to program the OOB data */ | ||
| 395 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
| 396 | |||
| 397 | status = chip->waitfunc(mtd, chip); | ||
| 398 | |||
| 399 | return status & NAND_STATUS_FAIL ? -EIO : 0; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Fills in the ECC fields in the OOB buffer with the hardware generated ECC | ||
| 404 | */ | ||
| 405 | static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count) | ||
| 406 | { | ||
| 407 | int i; | ||
| 408 | |||
| 409 | for (i = 0; i < (count * 3); i += 3) { | ||
| 410 | uint32_t ce = ecc[i / 3]; | ||
| 411 | ce = ~(ce << 2) & 0xFFFFFF; | ||
| 412 | spare[i + 2] = (uint8_t)(ce & 0xFF); | ||
| 413 | ce >>= 8; | ||
| 414 | spare[i + 1] = (uint8_t)(ce & 0xFF); | ||
| 415 | ce >>= 8; | ||
| 416 | spare[i] = (uint8_t)(ce & 0xFF); | ||
| 417 | } | ||
| 418 | } | ||
| 419 | |||
| 420 | static void lpc32xx_dma_complete_func(void *completion) | ||
| 421 | { | ||
| 422 | complete(completion); | ||
| 423 | } | ||
| 424 | |||
| 425 | static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma, | ||
| 426 | void *mem, int len, enum dma_transfer_direction dir) | ||
| 427 | { | ||
| 428 | struct nand_chip *chip = mtd->priv; | ||
| 429 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 430 | struct dma_async_tx_descriptor *desc; | ||
| 431 | int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; | ||
| 432 | int res; | ||
| 433 | |||
| 434 | host->dma_slave_config.direction = dir; | ||
| 435 | host->dma_slave_config.src_addr = dma; | ||
| 436 | host->dma_slave_config.dst_addr = dma; | ||
| 437 | host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 438 | host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 439 | host->dma_slave_config.src_maxburst = 4; | ||
| 440 | host->dma_slave_config.dst_maxburst = 4; | ||
| 441 | /* DMA controller does flow control: */ | ||
| 442 | host->dma_slave_config.device_fc = false; | ||
| 443 | if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { | ||
| 444 | dev_err(mtd->dev.parent, "Failed to setup DMA slave\n"); | ||
| 445 | return -ENXIO; | ||
| 446 | } | ||
| 447 | |||
| 448 | sg_init_one(&host->sgl, mem, len); | ||
| 449 | |||
| 450 | res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 451 | DMA_BIDIRECTIONAL); | ||
| 452 | if (res != 1) { | ||
| 453 | dev_err(mtd->dev.parent, "Failed to map sg list\n"); | ||
| 454 | return -ENXIO; | ||
| 455 | } | ||
| 456 | desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, | ||
| 457 | flags); | ||
| 458 | if (!desc) { | ||
| 459 | dev_err(mtd->dev.parent, "Failed to prepare slave sg\n"); | ||
| 460 | goto out1; | ||
| 461 | } | ||
| 462 | |||
| 463 | init_completion(&host->comp); | ||
| 464 | desc->callback = lpc32xx_dma_complete_func; | ||
| 465 | desc->callback_param = &host->comp; | ||
| 466 | |||
| 467 | dmaengine_submit(desc); | ||
| 468 | dma_async_issue_pending(host->dma_chan); | ||
| 469 | |||
| 470 | wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000)); | ||
| 471 | |||
| 472 | dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 473 | DMA_BIDIRECTIONAL); | ||
| 474 | |||
| 475 | return 0; | ||
| 476 | out1: | ||
| 477 | dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, | ||
| 478 | DMA_BIDIRECTIONAL); | ||
| 479 | return -ENXIO; | ||
| 480 | } | ||
| 481 | |||
| 482 | /* | ||
| 483 | * DMA read/write transfers with ECC support | ||
| 484 | */ | ||
| 485 | static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages, | ||
| 486 | int read) | ||
| 487 | { | ||
| 488 | struct nand_chip *chip = mtd->priv; | ||
| 489 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 490 | int i, status = 0; | ||
| 491 | unsigned long timeout; | ||
| 492 | int res; | ||
| 493 | enum dma_transfer_direction dir = | ||
| 494 | read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; | ||
| 495 | uint8_t *dma_buf; | ||
| 496 | bool dma_mapped; | ||
| 497 | |||
| 498 | if ((void *)buf <= high_memory) { | ||
| 499 | dma_buf = buf; | ||
| 500 | dma_mapped = true; | ||
| 501 | } else { | ||
| 502 | dma_buf = host->data_buf; | ||
| 503 | dma_mapped = false; | ||
| 504 | if (!read) | ||
| 505 | memcpy(host->data_buf, buf, mtd->writesize); | ||
| 506 | } | ||
| 507 | |||
| 508 | if (read) { | ||
| 509 | writel(readl(SLC_CFG(host->io_base)) | | ||
| 510 | SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC | | ||
| 511 | SLCCFG_DMA_BURST, SLC_CFG(host->io_base)); | ||
| 512 | } else { | ||
| 513 | writel((readl(SLC_CFG(host->io_base)) | | ||
| 514 | SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) & | ||
| 515 | ~SLCCFG_DMA_DIR, | ||
| 516 | SLC_CFG(host->io_base)); | ||
| 517 | } | ||
| 518 | |||
| 519 | /* Clear initial ECC */ | ||
| 520 | writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base)); | ||
| 521 | |||
| 522 | /* Transfer size is data area only */ | ||
| 523 | writel(mtd->writesize, SLC_TC(host->io_base)); | ||
| 524 | |||
| 525 | /* Start transfer in the NAND controller */ | ||
| 526 | writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START, | ||
| 527 | SLC_CTRL(host->io_base)); | ||
| 528 | |||
| 529 | for (i = 0; i < chip->ecc.steps; i++) { | ||
| 530 | /* Data */ | ||
| 531 | res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma), | ||
| 532 | dma_buf + i * chip->ecc.size, | ||
| 533 | mtd->writesize / chip->ecc.steps, dir); | ||
| 534 | if (res) | ||
| 535 | return res; | ||
| 536 | |||
| 537 | /* Always _read_ ECC */ | ||
| 538 | if (i == chip->ecc.steps - 1) | ||
| 539 | break; | ||
| 540 | if (!read) /* ECC availability delayed on write */ | ||
| 541 | udelay(10); | ||
| 542 | res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma), | ||
| 543 | &host->ecc_buf[i], 4, DMA_DEV_TO_MEM); | ||
| 544 | if (res) | ||
| 545 | return res; | ||
| 546 | } | ||
| 547 | |||
| 548 | /* | ||
| 549 | * According to NXP, the DMA can be finished here, but the NAND | ||
| 550 | * controller may still have buffered data. After porting to using the | ||
| 551 | * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty) | ||
| 552 | * appears to be always true, according to tests. Keeping the check for | ||
| 553 | * safety reasons for now. | ||
| 554 | */ | ||
| 555 | if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) { | ||
| 556 | dev_warn(mtd->dev.parent, "FIFO not empty!\n"); | ||
| 557 | timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT); | ||
| 558 | while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) && | ||
| 559 | time_before(jiffies, timeout)) | ||
| 560 | cpu_relax(); | ||
| 561 | if (!time_before(jiffies, timeout)) { | ||
| 562 | dev_err(mtd->dev.parent, "FIFO held data too long\n"); | ||
| 563 | status = -EIO; | ||
| 564 | } | ||
| 565 | } | ||
| 566 | |||
| 567 | /* Read last calculated ECC value */ | ||
| 568 | if (!read) | ||
| 569 | udelay(10); | ||
| 570 | host->ecc_buf[chip->ecc.steps - 1] = | ||
| 571 | readl(SLC_ECC(host->io_base)); | ||
| 572 | |||
| 573 | /* Flush DMA */ | ||
| 574 | dmaengine_terminate_all(host->dma_chan); | ||
| 575 | |||
| 576 | if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO || | ||
| 577 | readl(SLC_TC(host->io_base))) { | ||
| 578 | /* Something is left in the FIFO, something is wrong */ | ||
| 579 | dev_err(mtd->dev.parent, "DMA FIFO failure\n"); | ||
| 580 | status = -EIO; | ||
| 581 | } | ||
| 582 | |||
| 583 | /* Stop DMA & HW ECC */ | ||
| 584 | writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START, | ||
| 585 | SLC_CTRL(host->io_base)); | ||
| 586 | writel(readl(SLC_CFG(host->io_base)) & | ||
| 587 | ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC | | ||
| 588 | SLCCFG_DMA_BURST), SLC_CFG(host->io_base)); | ||
| 589 | |||
| 590 | if (!dma_mapped && read) | ||
| 591 | memcpy(buf, host->data_buf, mtd->writesize); | ||
| 592 | |||
| 593 | return status; | ||
| 594 | } | ||
| 595 | |||
| 596 | /* | ||
| 597 | * Read the data and OOB data from the device, use ECC correction with the | ||
| 598 | * data, disable ECC for the OOB data | ||
| 599 | */ | ||
| 600 | static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd, | ||
| 601 | struct nand_chip *chip, uint8_t *buf, | ||
| 602 | int oob_required, int page) | ||
| 603 | { | ||
| 604 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 605 | int stat, i, status; | ||
| 606 | uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE]; | ||
| 607 | |||
| 608 | /* Issue read command */ | ||
| 609 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
| 610 | |||
| 611 | /* Read data and oob, calculate ECC */ | ||
| 612 | status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1); | ||
| 613 | |||
| 614 | /* Get OOB data */ | ||
| 615 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 616 | |||
| 617 | /* Convert to stored ECC format */ | ||
| 618 | lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps); | ||
| 619 | |||
| 620 | /* Pointer to ECC data retrieved from NAND spare area */ | ||
| 621 | oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0]; | ||
| 622 | |||
| 623 | for (i = 0; i < chip->ecc.steps; i++) { | ||
| 624 | stat = chip->ecc.correct(mtd, buf, oobecc, | ||
| 625 | &tmpecc[i * chip->ecc.bytes]); | ||
| 626 | if (stat < 0) | ||
| 627 | mtd->ecc_stats.failed++; | ||
| 628 | else | ||
| 629 | mtd->ecc_stats.corrected += stat; | ||
| 630 | |||
| 631 | buf += chip->ecc.size; | ||
| 632 | oobecc += chip->ecc.bytes; | ||
| 633 | } | ||
| 634 | |||
| 635 | return status; | ||
| 636 | } | ||
| 637 | |||
| 638 | /* | ||
| 639 | * Read the data and OOB data from the device, no ECC correction with the | ||
| 640 | * data or OOB data | ||
| 641 | */ | ||
| 642 | static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd, | ||
| 643 | struct nand_chip *chip, | ||
| 644 | uint8_t *buf, int oob_required, | ||
| 645 | int page) | ||
| 646 | { | ||
| 647 | /* Issue read command */ | ||
| 648 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
| 649 | |||
| 650 | /* Raw reads can just use the FIFO interface */ | ||
| 651 | chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps); | ||
| 652 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 653 | |||
| 654 | return 0; | ||
| 655 | } | ||
| 656 | |||
| 657 | /* | ||
| 658 | * Write the data and OOB data to the device, use ECC with the data, | ||
| 659 | * disable ECC for the OOB data | ||
| 660 | */ | ||
| 661 | static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd, | ||
| 662 | struct nand_chip *chip, | ||
| 663 | const uint8_t *buf, int oob_required) | ||
| 664 | { | ||
| 665 | struct lpc32xx_nand_host *host = chip->priv; | ||
| 666 | uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0]; | ||
| 667 | int error; | ||
| 668 | |||
| 669 | /* Write data, calculate ECC on outbound data */ | ||
| 670 | error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0); | ||
| 671 | if (error) | ||
| 672 | return error; | ||
| 673 | |||
| 674 | /* | ||
| 675 | * The calculated ECC needs some manual work done to it before | ||
| 676 | * committing it to NAND. Process the calculated ECC and place | ||
| 677 | * the resultant values directly into the OOB buffer. */ | ||
| 678 | lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps); | ||
| 679 | |||
| 680 | /* Write ECC data to device */ | ||
| 681 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 682 | return 0; | ||
| 683 | } | ||
| 684 | |||
| 685 | /* | ||
| 686 | * Write the data and OOB data to the device, no ECC correction with the | ||
| 687 | * data or OOB data | ||
| 688 | */ | ||
| 689 | static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd, | ||
| 690 | struct nand_chip *chip, | ||
| 691 | const uint8_t *buf, | ||
| 692 | int oob_required) | ||
| 693 | { | ||
| 694 | /* Raw writes can just use the FIFO interface */ | ||
| 695 | chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps); | ||
| 696 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
| 697 | return 0; | ||
| 698 | } | ||
| 699 | |||
| 700 | static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host) | ||
| 701 | { | ||
| 702 | struct mtd_info *mtd = &host->mtd; | ||
| 703 | dma_cap_mask_t mask; | ||
| 704 | |||
| 705 | if (!host->pdata || !host->pdata->dma_filter) { | ||
| 706 | dev_err(mtd->dev.parent, "no DMA platform data\n"); | ||
| 707 | return -ENOENT; | ||
| 708 | } | ||
| 709 | |||
| 710 | dma_cap_zero(mask); | ||
| 711 | dma_cap_set(DMA_SLAVE, mask); | ||
| 712 | host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, | ||
| 713 | "nand-slc"); | ||
| 714 | if (!host->dma_chan) { | ||
| 715 | dev_err(mtd->dev.parent, "Failed to request DMA channel\n"); | ||
| 716 | return -EBUSY; | ||
| 717 | } | ||
| 718 | |||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev) | ||
| 723 | { | ||
| 724 | struct lpc32xx_nand_cfg_slc *ncfg; | ||
| 725 | struct device_node *np = dev->of_node; | ||
| 726 | |||
| 727 | ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL); | ||
| 728 | if (!ncfg) { | ||
| 729 | dev_err(dev, "could not allocate memory for NAND config\n"); | ||
| 730 | return NULL; | ||
| 731 | } | ||
| 732 | |||
| 733 | of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks); | ||
| 734 | of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth); | ||
| 735 | of_property_read_u32(np, "nxp,whold", &ncfg->whold); | ||
| 736 | of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup); | ||
| 737 | of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks); | ||
| 738 | of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth); | ||
| 739 | of_property_read_u32(np, "nxp,rhold", &ncfg->rhold); | ||
| 740 | of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup); | ||
| 741 | |||
| 742 | if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold || | ||
| 743 | !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth || | ||
| 744 | !ncfg->rhold || !ncfg->rsetup) { | ||
| 745 | dev_err(dev, "chip parameters not specified correctly\n"); | ||
| 746 | return NULL; | ||
| 747 | } | ||
| 748 | |||
| 749 | ncfg->use_bbt = of_get_nand_on_flash_bbt(np); | ||
| 750 | ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0); | ||
| 751 | |||
| 752 | return ncfg; | ||
| 753 | } | ||
| 754 | |||
| 755 | /* | ||
| 756 | * Probe for NAND controller | ||
| 757 | */ | ||
| 758 | static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) | ||
| 759 | { | ||
| 760 | struct lpc32xx_nand_host *host; | ||
| 761 | struct mtd_info *mtd; | ||
| 762 | struct nand_chip *chip; | ||
| 763 | struct resource *rc; | ||
| 764 | struct mtd_part_parser_data ppdata = {}; | ||
| 765 | int res; | ||
| 766 | |||
| 767 | rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 768 | if (rc == NULL) { | ||
| 769 | dev_err(&pdev->dev, "No memory resource found for device\n"); | ||
| 770 | return -EBUSY; | ||
| 771 | } | ||
| 772 | |||
| 773 | /* Allocate memory for the device structure (and zero it) */ | ||
| 774 | host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); | ||
| 775 | if (!host) { | ||
| 776 | dev_err(&pdev->dev, "failed to allocate device structure\n"); | ||
| 777 | return -ENOMEM; | ||
| 778 | } | ||
| 779 | host->io_base_dma = rc->start; | ||
| 780 | |||
| 781 | host->io_base = devm_request_and_ioremap(&pdev->dev, rc); | ||
| 782 | if (host->io_base == NULL) { | ||
| 783 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
| 784 | return -ENOMEM; | ||
| 785 | } | ||
| 786 | |||
| 787 | if (pdev->dev.of_node) | ||
| 788 | host->ncfg = lpc32xx_parse_dt(&pdev->dev); | ||
| 789 | if (!host->ncfg) { | ||
| 790 | dev_err(&pdev->dev, | ||
| 791 | "Missing or bad NAND config from device tree\n"); | ||
| 792 | return -ENOENT; | ||
| 793 | } | ||
| 794 | if (host->ncfg->wp_gpio == -EPROBE_DEFER) | ||
| 795 | return -EPROBE_DEFER; | ||
| 796 | if (gpio_is_valid(host->ncfg->wp_gpio) && | ||
| 797 | gpio_request(host->ncfg->wp_gpio, "NAND WP")) { | ||
| 798 | dev_err(&pdev->dev, "GPIO not available\n"); | ||
| 799 | return -EBUSY; | ||
| 800 | } | ||
| 801 | lpc32xx_wp_disable(host); | ||
| 802 | |||
| 803 | host->pdata = pdev->dev.platform_data; | ||
| 804 | |||
| 805 | mtd = &host->mtd; | ||
| 806 | chip = &host->nand_chip; | ||
| 807 | chip->priv = host; | ||
| 808 | mtd->priv = chip; | ||
| 809 | mtd->owner = THIS_MODULE; | ||
| 810 | mtd->dev.parent = &pdev->dev; | ||
| 811 | |||
| 812 | /* Get NAND clock */ | ||
| 813 | host->clk = clk_get(&pdev->dev, NULL); | ||
| 814 | if (IS_ERR(host->clk)) { | ||
| 815 | dev_err(&pdev->dev, "Clock failure\n"); | ||
| 816 | res = -ENOENT; | ||
| 817 | goto err_exit1; | ||
| 818 | } | ||
| 819 | clk_enable(host->clk); | ||
| 820 | |||
| 821 | /* Set NAND IO addresses and command/ready functions */ | ||
| 822 | chip->IO_ADDR_R = SLC_DATA(host->io_base); | ||
| 823 | chip->IO_ADDR_W = SLC_DATA(host->io_base); | ||
| 824 | chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; | ||
| 825 | chip->dev_ready = lpc32xx_nand_device_ready; | ||
| 826 | chip->chip_delay = 20; /* 20us command delay time */ | ||
| 827 | |||
| 828 | /* Init NAND controller */ | ||
| 829 | lpc32xx_nand_setup(host); | ||
| 830 | |||
| 831 | platform_set_drvdata(pdev, host); | ||
| 832 | |||
| 833 | /* NAND callbacks for LPC32xx SLC hardware */ | ||
| 834 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; | ||
| 835 | chip->read_byte = lpc32xx_nand_read_byte; | ||
| 836 | chip->read_buf = lpc32xx_nand_read_buf; | ||
| 837 | chip->write_buf = lpc32xx_nand_write_buf; | ||
| 838 | chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome; | ||
| 839 | chip->ecc.read_page = lpc32xx_nand_read_page_syndrome; | ||
| 840 | chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome; | ||
| 841 | chip->ecc.write_page = lpc32xx_nand_write_page_syndrome; | ||
| 842 | chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome; | ||
| 843 | chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome; | ||
| 844 | chip->ecc.calculate = lpc32xx_nand_ecc_calculate; | ||
| 845 | chip->ecc.correct = nand_correct_data; | ||
| 846 | chip->ecc.strength = 1; | ||
| 847 | chip->ecc.hwctl = lpc32xx_nand_ecc_enable; | ||
| 848 | |||
| 849 | /* bitflip_threshold's default is defined as ecc_strength anyway. | ||
| 850 | * Unfortunately, it is set only later at add_mtd_device(). Meanwhile | ||
| 851 | * being 0, it causes bad block table scanning errors in | ||
| 852 | * nand_scan_tail(), so preparing it here already. */ | ||
| 853 | mtd->bitflip_threshold = chip->ecc.strength; | ||
| 854 | |||
| 855 | /* | ||
| 856 | * Allocate a large enough buffer for a single huge page plus | ||
| 857 | * extra space for the spare area and ECC storage area | ||
| 858 | */ | ||
| 859 | host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE; | ||
| 860 | host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len, | ||
| 861 | GFP_KERNEL); | ||
| 862 | if (host->data_buf == NULL) { | ||
| 863 | dev_err(&pdev->dev, "Error allocating memory\n"); | ||
| 864 | res = -ENOMEM; | ||
| 865 | goto err_exit2; | ||
| 866 | } | ||
| 867 | |||
| 868 | res = lpc32xx_nand_dma_setup(host); | ||
| 869 | if (res) { | ||
| 870 | res = -EIO; | ||
| 871 | goto err_exit2; | ||
| 872 | } | ||
| 873 | |||
| 874 | /* Find NAND device */ | ||
| 875 | if (nand_scan_ident(mtd, 1, NULL)) { | ||
| 876 | res = -ENXIO; | ||
| 877 | goto err_exit3; | ||
| 878 | } | ||
| 879 | |||
| 880 | /* OOB and ECC CPU and DMA work areas */ | ||
| 881 | host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE); | ||
| 882 | |||
| 883 | /* | ||
| 884 | * Small page FLASH has a unique OOB layout, but large and huge | ||
| 885 | * page FLASH use the standard layout. Small page FLASH uses a | ||
| 886 | * custom BBT marker layout. | ||
| 887 | */ | ||
| 888 | if (mtd->writesize <= 512) | ||
| 889 | chip->ecc.layout = &lpc32xx_nand_oob_16; | ||
| 890 | |||
| 891 | /* These sizes remain the same regardless of page size */ | ||
| 892 | chip->ecc.size = 256; | ||
| 893 | chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES; | ||
| 894 | chip->ecc.prepad = chip->ecc.postpad = 0; | ||
| 895 | |||
| 896 | /* Avoid extra scan if using BBT, setup BBT support */ | ||
| 897 | if (host->ncfg->use_bbt) { | ||
| 898 | chip->options |= NAND_SKIP_BBTSCAN; | ||
| 899 | chip->bbt_options |= NAND_BBT_USE_FLASH; | ||
| 900 | |||
| 901 | /* | ||
| 902 | * Use a custom BBT marker setup for small page FLASH that | ||
| 903 | * won't interfere with the ECC layout. Large and huge page | ||
| 904 | * FLASH use the standard layout. | ||
| 905 | */ | ||
| 906 | if (mtd->writesize <= 512) { | ||
| 907 | chip->bbt_td = &bbt_smallpage_main_descr; | ||
| 908 | chip->bbt_md = &bbt_smallpage_mirror_descr; | ||
| 909 | } | ||
| 910 | } | ||
| 911 | |||
| 912 | /* | ||
| 913 | * Fills out all the uninitialized function pointers with the defaults | ||
| 914 | */ | ||
| 915 | if (nand_scan_tail(mtd)) { | ||
| 916 | res = -ENXIO; | ||
| 917 | goto err_exit3; | ||
| 918 | } | ||
| 919 | |||
| 920 | /* Standard layout in FLASH for bad block tables */ | ||
| 921 | if (host->ncfg->use_bbt) { | ||
| 922 | if (nand_default_bbt(mtd) < 0) | ||
| 923 | dev_err(&pdev->dev, | ||
| 924 | "Error initializing default bad block tables\n"); | ||
| 925 | } | ||
| 926 | |||
| 927 | mtd->name = "nxp_lpc3220_slc"; | ||
| 928 | ppdata.of_node = pdev->dev.of_node; | ||
| 929 | res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, | ||
| 930 | host->ncfg->num_parts); | ||
| 931 | if (!res) | ||
| 932 | return res; | ||
| 933 | |||
| 934 | nand_release(mtd); | ||
| 935 | |||
| 936 | err_exit3: | ||
| 937 | dma_release_channel(host->dma_chan); | ||
| 938 | err_exit2: | ||
| 939 | clk_disable(host->clk); | ||
| 940 | clk_put(host->clk); | ||
| 941 | platform_set_drvdata(pdev, NULL); | ||
| 942 | err_exit1: | ||
| 943 | lpc32xx_wp_enable(host); | ||
| 944 | gpio_free(host->ncfg->wp_gpio); | ||
| 945 | |||
| 946 | return res; | ||
| 947 | } | ||
| 948 | |||
| 949 | /* | ||
| 950 | * Remove NAND device. | ||
| 951 | */ | ||
| 952 | static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) | ||
| 953 | { | ||
| 954 | uint32_t tmp; | ||
| 955 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 956 | struct mtd_info *mtd = &host->mtd; | ||
| 957 | |||
| 958 | nand_release(mtd); | ||
| 959 | dma_release_channel(host->dma_chan); | ||
| 960 | |||
| 961 | /* Force CE high */ | ||
| 962 | tmp = readl(SLC_CTRL(host->io_base)); | ||
| 963 | tmp &= ~SLCCFG_CE_LOW; | ||
| 964 | writel(tmp, SLC_CTRL(host->io_base)); | ||
| 965 | |||
| 966 | clk_disable(host->clk); | ||
| 967 | clk_put(host->clk); | ||
| 968 | platform_set_drvdata(pdev, NULL); | ||
| 969 | lpc32xx_wp_enable(host); | ||
| 970 | gpio_free(host->ncfg->wp_gpio); | ||
| 971 | |||
| 972 | return 0; | ||
| 973 | } | ||
| 974 | |||
| 975 | #ifdef CONFIG_PM | ||
| 976 | static int lpc32xx_nand_resume(struct platform_device *pdev) | ||
| 977 | { | ||
| 978 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 979 | |||
| 980 | /* Re-enable NAND clock */ | ||
| 981 | clk_enable(host->clk); | ||
| 982 | |||
| 983 | /* Fresh init of NAND controller */ | ||
| 984 | lpc32xx_nand_setup(host); | ||
| 985 | |||
| 986 | /* Disable write protect */ | ||
| 987 | lpc32xx_wp_disable(host); | ||
| 988 | |||
| 989 | return 0; | ||
| 990 | } | ||
| 991 | |||
| 992 | static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm) | ||
| 993 | { | ||
| 994 | uint32_t tmp; | ||
| 995 | struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); | ||
| 996 | |||
| 997 | /* Force CE high */ | ||
| 998 | tmp = readl(SLC_CTRL(host->io_base)); | ||
| 999 | tmp &= ~SLCCFG_CE_LOW; | ||
| 1000 | writel(tmp, SLC_CTRL(host->io_base)); | ||
| 1001 | |||
| 1002 | /* Enable write protect for safety */ | ||
| 1003 | lpc32xx_wp_enable(host); | ||
| 1004 | |||
| 1005 | /* Disable clock */ | ||
| 1006 | clk_disable(host->clk); | ||
| 1007 | |||
| 1008 | return 0; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | #else | ||
| 1012 | #define lpc32xx_nand_resume NULL | ||
| 1013 | #define lpc32xx_nand_suspend NULL | ||
| 1014 | #endif | ||
| 1015 | |||
| 1016 | static const struct of_device_id lpc32xx_nand_match[] = { | ||
| 1017 | { .compatible = "nxp,lpc3220-slc" }, | ||
| 1018 | { /* sentinel */ }, | ||
| 1019 | }; | ||
| 1020 | MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); | ||
| 1021 | |||
| 1022 | static struct platform_driver lpc32xx_nand_driver = { | ||
| 1023 | .probe = lpc32xx_nand_probe, | ||
| 1024 | .remove = __devexit_p(lpc32xx_nand_remove), | ||
| 1025 | .resume = lpc32xx_nand_resume, | ||
| 1026 | .suspend = lpc32xx_nand_suspend, | ||
| 1027 | .driver = { | ||
| 1028 | .name = LPC32XX_MODNAME, | ||
| 1029 | .owner = THIS_MODULE, | ||
| 1030 | .of_match_table = of_match_ptr(lpc32xx_nand_match), | ||
| 1031 | }, | ||
| 1032 | }; | ||
| 1033 | |||
| 1034 | module_platform_driver(lpc32xx_nand_driver); | ||
| 1035 | |||
| 1036 | MODULE_LICENSE("GPL"); | ||
| 1037 | MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); | ||
| 1038 | MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); | ||
| 1039 | MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller"); | ||
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index c259c24d7986..f776c8577b8c 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
| @@ -506,27 +506,6 @@ static void mpc5121_nfc_write_buf(struct mtd_info *mtd, | |||
| 506 | mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); | 506 | mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); |
| 507 | } | 507 | } |
| 508 | 508 | ||
| 509 | /* Compare buffer with NAND flash */ | ||
| 510 | static int mpc5121_nfc_verify_buf(struct mtd_info *mtd, | ||
| 511 | const u_char *buf, int len) | ||
| 512 | { | ||
| 513 | u_char tmp[256]; | ||
| 514 | uint bsize; | ||
| 515 | |||
| 516 | while (len) { | ||
| 517 | bsize = min(len, 256); | ||
| 518 | mpc5121_nfc_read_buf(mtd, tmp, bsize); | ||
| 519 | |||
| 520 | if (memcmp(buf, tmp, bsize)) | ||
| 521 | return 1; | ||
| 522 | |||
| 523 | buf += bsize; | ||
| 524 | len -= bsize; | ||
| 525 | } | ||
| 526 | |||
| 527 | return 0; | ||
| 528 | } | ||
| 529 | |||
| 530 | /* Read byte from NFC buffers */ | 509 | /* Read byte from NFC buffers */ |
| 531 | static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) | 510 | static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) |
| 532 | { | 511 | { |
| @@ -732,7 +711,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
| 732 | chip->read_word = mpc5121_nfc_read_word; | 711 | chip->read_word = mpc5121_nfc_read_word; |
| 733 | chip->read_buf = mpc5121_nfc_read_buf; | 712 | chip->read_buf = mpc5121_nfc_read_buf; |
| 734 | chip->write_buf = mpc5121_nfc_write_buf; | 713 | chip->write_buf = mpc5121_nfc_write_buf; |
| 735 | chip->verify_buf = mpc5121_nfc_verify_buf; | ||
| 736 | chip->select_chip = mpc5121_nfc_select_chip; | 714 | chip->select_chip = mpc5121_nfc_select_chip; |
| 737 | chip->bbt_options = NAND_BBT_USE_FLASH; | 715 | chip->bbt_options = NAND_BBT_USE_FLASH; |
| 738 | chip->ecc.mode = NAND_ECC_SOFT; | 716 | chip->ecc.mode = NAND_ECC_SOFT; |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 5683604967d7..72e31d86030d 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
| @@ -43,8 +43,8 @@ | |||
| 43 | 43 | ||
| 44 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) | 44 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) |
| 45 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) | 45 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) |
| 46 | #define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53()) | 46 | #define nfc_is_v3_2a() cpu_is_mx51() |
| 47 | #define nfc_is_v3() nfc_is_v3_2() | 47 | #define nfc_is_v3_2b() cpu_is_mx53() |
| 48 | 48 | ||
| 49 | /* Addresses for NFC registers */ | 49 | /* Addresses for NFC registers */ |
| 50 | #define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) | 50 | #define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) |
| @@ -122,7 +122,7 @@ | |||
| 122 | #define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4) | 122 | #define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4) |
| 123 | #define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5) | 123 | #define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5) |
| 124 | #define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6) | 124 | #define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6) |
| 125 | #define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7) | 125 | #define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift) |
| 126 | #define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12) | 126 | #define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12) |
| 127 | #define NFC_V3_CONFIG2_INT_MSK (1 << 15) | 127 | #define NFC_V3_CONFIG2_INT_MSK (1 << 15) |
| 128 | #define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24) | 128 | #define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24) |
| @@ -174,6 +174,7 @@ struct mxc_nand_devtype_data { | |||
| 174 | int spare_len; | 174 | int spare_len; |
| 175 | int eccbytes; | 175 | int eccbytes; |
| 176 | int eccsize; | 176 | int eccsize; |
| 177 | int ppb_shift; | ||
| 177 | }; | 178 | }; |
| 178 | 179 | ||
| 179 | struct mxc_nand_host { | 180 | struct mxc_nand_host { |
| @@ -745,14 +746,6 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 745 | host->buf_start += n; | 746 | host->buf_start += n; |
| 746 | } | 747 | } |
| 747 | 748 | ||
| 748 | /* Used by the upper layer to verify the data in NAND Flash | ||
| 749 | * with the data in the buf. */ | ||
| 750 | static int mxc_nand_verify_buf(struct mtd_info *mtd, | ||
| 751 | const u_char *buf, int len) | ||
| 752 | { | ||
| 753 | return -EFAULT; | ||
| 754 | } | ||
| 755 | |||
| 756 | /* This function is used by upper layer for select and | 749 | /* This function is used by upper layer for select and |
| 757 | * deselect of the NAND chip */ | 750 | * deselect of the NAND chip */ |
| 758 | static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip) | 751 | static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip) |
| @@ -784,7 +777,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip) | |||
| 784 | if (chip == -1) { | 777 | if (chip == -1) { |
| 785 | /* Disable the NFC clock */ | 778 | /* Disable the NFC clock */ |
| 786 | if (host->clk_act) { | 779 | if (host->clk_act) { |
| 787 | clk_disable(host->clk); | 780 | clk_disable_unprepare(host->clk); |
| 788 | host->clk_act = 0; | 781 | host->clk_act = 0; |
| 789 | } | 782 | } |
| 790 | return; | 783 | return; |
| @@ -792,7 +785,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip) | |||
| 792 | 785 | ||
| 793 | if (!host->clk_act) { | 786 | if (!host->clk_act) { |
| 794 | /* Enable the NFC clock */ | 787 | /* Enable the NFC clock */ |
| 795 | clk_enable(host->clk); | 788 | clk_prepare_enable(host->clk); |
| 796 | host->clk_act = 1; | 789 | host->clk_act = 1; |
| 797 | } | 790 | } |
| 798 | 791 | ||
| @@ -1021,7 +1014,9 @@ static void preset_v3(struct mtd_info *mtd) | |||
| 1021 | } | 1014 | } |
| 1022 | 1015 | ||
| 1023 | if (mtd->writesize) { | 1016 | if (mtd->writesize) { |
| 1024 | config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6); | 1017 | config2 |= NFC_V3_CONFIG2_PPB( |
| 1018 | ffs(mtd->erasesize / mtd->writesize) - 6, | ||
| 1019 | host->devtype_data->ppb_shift); | ||
| 1025 | host->eccsize = get_eccsize(mtd); | 1020 | host->eccsize = get_eccsize(mtd); |
| 1026 | if (host->eccsize == 8) | 1021 | if (host->eccsize == 8) |
| 1027 | config2 |= NFC_V3_CONFIG2_ECC_MODE_8; | 1022 | config2 |= NFC_V3_CONFIG2_ECC_MODE_8; |
| @@ -1234,7 +1229,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { | |||
| 1234 | .eccsize = 0, | 1229 | .eccsize = 0, |
| 1235 | }; | 1230 | }; |
| 1236 | 1231 | ||
| 1237 | /* v3: i.MX51, i.MX53 */ | 1232 | /* v3.2a: i.MX51 */ |
| 1238 | static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { | 1233 | static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { |
| 1239 | .preset = preset_v3, | 1234 | .preset = preset_v3, |
| 1240 | .send_cmd = send_cmd_v3, | 1235 | .send_cmd = send_cmd_v3, |
| @@ -1258,6 +1253,34 @@ static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { | |||
| 1258 | .spare_len = 64, | 1253 | .spare_len = 64, |
| 1259 | .eccbytes = 0, | 1254 | .eccbytes = 0, |
| 1260 | .eccsize = 0, | 1255 | .eccsize = 0, |
| 1256 | .ppb_shift = 7, | ||
| 1257 | }; | ||
| 1258 | |||
| 1259 | /* v3.2b: i.MX53 */ | ||
| 1260 | static const struct mxc_nand_devtype_data imx53_nand_devtype_data = { | ||
| 1261 | .preset = preset_v3, | ||
| 1262 | .send_cmd = send_cmd_v3, | ||
| 1263 | .send_addr = send_addr_v3, | ||
| 1264 | .send_page = send_page_v3, | ||
| 1265 | .send_read_id = send_read_id_v3, | ||
| 1266 | .get_dev_status = get_dev_status_v3, | ||
| 1267 | .check_int = check_int_v3, | ||
| 1268 | .irq_control = irq_control_v3, | ||
| 1269 | .get_ecc_status = get_ecc_status_v3, | ||
| 1270 | .ecclayout_512 = &nandv2_hw_eccoob_smallpage, | ||
| 1271 | .ecclayout_2k = &nandv2_hw_eccoob_largepage, | ||
| 1272 | .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ | ||
| 1273 | .select_chip = mxc_nand_select_chip_v1_v3, | ||
| 1274 | .correct_data = mxc_nand_correct_data_v2_v3, | ||
| 1275 | .irqpending_quirk = 0, | ||
| 1276 | .needs_ip = 1, | ||
| 1277 | .regs_offset = 0, | ||
| 1278 | .spare0_offset = 0x1000, | ||
| 1279 | .axi_offset = 0x1e00, | ||
| 1280 | .spare_len = 64, | ||
| 1281 | .eccbytes = 0, | ||
| 1282 | .eccsize = 0, | ||
| 1283 | .ppb_shift = 8, | ||
| 1261 | }; | 1284 | }; |
| 1262 | 1285 | ||
| 1263 | #ifdef CONFIG_OF_MTD | 1286 | #ifdef CONFIG_OF_MTD |
| @@ -1274,6 +1297,9 @@ static const struct of_device_id mxcnd_dt_ids[] = { | |||
| 1274 | }, { | 1297 | }, { |
| 1275 | .compatible = "fsl,imx51-nand", | 1298 | .compatible = "fsl,imx51-nand", |
| 1276 | .data = &imx51_nand_devtype_data, | 1299 | .data = &imx51_nand_devtype_data, |
| 1300 | }, { | ||
| 1301 | .compatible = "fsl,imx53-nand", | ||
| 1302 | .data = &imx53_nand_devtype_data, | ||
| 1277 | }, | 1303 | }, |
| 1278 | { /* sentinel */ } | 1304 | { /* sentinel */ } |
| 1279 | }; | 1305 | }; |
| @@ -1327,15 +1353,17 @@ static int __init mxcnd_probe_pdata(struct mxc_nand_host *host) | |||
| 1327 | host->devtype_data = &imx27_nand_devtype_data; | 1353 | host->devtype_data = &imx27_nand_devtype_data; |
| 1328 | } else if (nfc_is_v21()) { | 1354 | } else if (nfc_is_v21()) { |
| 1329 | host->devtype_data = &imx25_nand_devtype_data; | 1355 | host->devtype_data = &imx25_nand_devtype_data; |
| 1330 | } else if (nfc_is_v3_2()) { | 1356 | } else if (nfc_is_v3_2a()) { |
| 1331 | host->devtype_data = &imx51_nand_devtype_data; | 1357 | host->devtype_data = &imx51_nand_devtype_data; |
| 1358 | } else if (nfc_is_v3_2b()) { | ||
| 1359 | host->devtype_data = &imx53_nand_devtype_data; | ||
| 1332 | } else | 1360 | } else |
| 1333 | BUG(); | 1361 | BUG(); |
| 1334 | 1362 | ||
| 1335 | return 0; | 1363 | return 0; |
| 1336 | } | 1364 | } |
| 1337 | 1365 | ||
| 1338 | static int __init mxcnd_probe(struct platform_device *pdev) | 1366 | static int __devinit mxcnd_probe(struct platform_device *pdev) |
| 1339 | { | 1367 | { |
| 1340 | struct nand_chip *this; | 1368 | struct nand_chip *this; |
| 1341 | struct mtd_info *mtd; | 1369 | struct mtd_info *mtd; |
| @@ -1344,8 +1372,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1344 | int err = 0; | 1372 | int err = 0; |
| 1345 | 1373 | ||
| 1346 | /* Allocate memory for MTD device structure and private data */ | 1374 | /* Allocate memory for MTD device structure and private data */ |
| 1347 | host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE + | 1375 | host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) + |
| 1348 | NAND_MAX_OOBSIZE, GFP_KERNEL); | 1376 | NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL); |
| 1349 | if (!host) | 1377 | if (!host) |
| 1350 | return -ENOMEM; | 1378 | return -ENOMEM; |
| 1351 | 1379 | ||
| @@ -1370,36 +1398,38 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1370 | this->read_word = mxc_nand_read_word; | 1398 | this->read_word = mxc_nand_read_word; |
| 1371 | this->write_buf = mxc_nand_write_buf; | 1399 | this->write_buf = mxc_nand_write_buf; |
| 1372 | this->read_buf = mxc_nand_read_buf; | 1400 | this->read_buf = mxc_nand_read_buf; |
| 1373 | this->verify_buf = mxc_nand_verify_buf; | ||
| 1374 | 1401 | ||
| 1375 | host->clk = clk_get(&pdev->dev, "nfc"); | 1402 | host->clk = devm_clk_get(&pdev->dev, NULL); |
| 1376 | if (IS_ERR(host->clk)) { | 1403 | if (IS_ERR(host->clk)) |
| 1377 | err = PTR_ERR(host->clk); | 1404 | return PTR_ERR(host->clk); |
| 1378 | goto eclk; | ||
| 1379 | } | ||
| 1380 | 1405 | ||
| 1381 | clk_prepare_enable(host->clk); | 1406 | err = mxcnd_probe_dt(host); |
| 1382 | host->clk_act = 1; | 1407 | if (err > 0) |
| 1408 | err = mxcnd_probe_pdata(host); | ||
| 1409 | if (err < 0) | ||
| 1410 | return err; | ||
| 1383 | 1411 | ||
| 1384 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1412 | if (host->devtype_data->needs_ip) { |
| 1385 | if (!res) { | 1413 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1386 | err = -ENODEV; | 1414 | if (!res) |
| 1387 | goto eres; | 1415 | return -ENODEV; |
| 1388 | } | 1416 | host->regs_ip = devm_request_and_ioremap(&pdev->dev, res); |
| 1417 | if (!host->regs_ip) | ||
| 1418 | return -ENOMEM; | ||
| 1389 | 1419 | ||
| 1390 | host->base = ioremap(res->start, resource_size(res)); | 1420 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 1391 | if (!host->base) { | 1421 | } else { |
| 1392 | err = -ENOMEM; | 1422 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1393 | goto eres; | ||
| 1394 | } | 1423 | } |
| 1395 | 1424 | ||
| 1396 | host->main_area0 = host->base; | 1425 | if (!res) |
| 1426 | return -ENODEV; | ||
| 1397 | 1427 | ||
| 1398 | err = mxcnd_probe_dt(host); | 1428 | host->base = devm_request_and_ioremap(&pdev->dev, res); |
| 1399 | if (err > 0) | 1429 | if (!host->base) |
| 1400 | err = mxcnd_probe_pdata(host); | 1430 | return -ENOMEM; |
| 1401 | if (err < 0) | 1431 | |
| 1402 | goto eirq; | 1432 | host->main_area0 = host->base; |
| 1403 | 1433 | ||
| 1404 | if (host->devtype_data->regs_offset) | 1434 | if (host->devtype_data->regs_offset) |
| 1405 | host->regs = host->base + host->devtype_data->regs_offset; | 1435 | host->regs = host->base + host->devtype_data->regs_offset; |
| @@ -1414,19 +1444,6 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1414 | this->ecc.size = 512; | 1444 | this->ecc.size = 512; |
| 1415 | this->ecc.layout = host->devtype_data->ecclayout_512; | 1445 | this->ecc.layout = host->devtype_data->ecclayout_512; |
| 1416 | 1446 | ||
| 1417 | if (host->devtype_data->needs_ip) { | ||
| 1418 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1419 | if (!res) { | ||
| 1420 | err = -ENODEV; | ||
| 1421 | goto eirq; | ||
| 1422 | } | ||
| 1423 | host->regs_ip = ioremap(res->start, resource_size(res)); | ||
| 1424 | if (!host->regs_ip) { | ||
| 1425 | err = -ENOMEM; | ||
| 1426 | goto eirq; | ||
| 1427 | } | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | if (host->pdata.hw_ecc) { | 1447 | if (host->pdata.hw_ecc) { |
| 1431 | this->ecc.calculate = mxc_nand_calculate_ecc; | 1448 | this->ecc.calculate = mxc_nand_calculate_ecc; |
| 1432 | this->ecc.hwctl = mxc_nand_enable_hwecc; | 1449 | this->ecc.hwctl = mxc_nand_enable_hwecc; |
| @@ -1458,9 +1475,13 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1458 | */ | 1475 | */ |
| 1459 | host->devtype_data->irq_control(host, 0); | 1476 | host->devtype_data->irq_control(host, 0); |
| 1460 | 1477 | ||
| 1461 | err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); | 1478 | err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq, |
| 1479 | IRQF_DISABLED, DRIVER_NAME, host); | ||
| 1462 | if (err) | 1480 | if (err) |
| 1463 | goto eirq; | 1481 | return err; |
| 1482 | |||
| 1483 | clk_prepare_enable(host->clk); | ||
| 1484 | host->clk_act = 1; | ||
| 1464 | 1485 | ||
| 1465 | /* | 1486 | /* |
| 1466 | * Now that we "own" the interrupt make sure the interrupt mask bit is | 1487 | * Now that we "own" the interrupt make sure the interrupt mask bit is |
| @@ -1512,15 +1533,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1512 | return 0; | 1533 | return 0; |
| 1513 | 1534 | ||
| 1514 | escan: | 1535 | escan: |
| 1515 | free_irq(host->irq, host); | 1536 | clk_disable_unprepare(host->clk); |
| 1516 | eirq: | ||
| 1517 | if (host->regs_ip) | ||
| 1518 | iounmap(host->regs_ip); | ||
| 1519 | iounmap(host->base); | ||
| 1520 | eres: | ||
| 1521 | clk_put(host->clk); | ||
| 1522 | eclk: | ||
| 1523 | kfree(host); | ||
| 1524 | 1537 | ||
| 1525 | return err; | 1538 | return err; |
| 1526 | } | 1539 | } |
| @@ -1529,16 +1542,9 @@ static int __devexit mxcnd_remove(struct platform_device *pdev) | |||
| 1529 | { | 1542 | { |
| 1530 | struct mxc_nand_host *host = platform_get_drvdata(pdev); | 1543 | struct mxc_nand_host *host = platform_get_drvdata(pdev); |
| 1531 | 1544 | ||
| 1532 | clk_put(host->clk); | ||
| 1533 | |||
| 1534 | platform_set_drvdata(pdev, NULL); | 1545 | platform_set_drvdata(pdev, NULL); |
| 1535 | 1546 | ||
| 1536 | nand_release(&host->mtd); | 1547 | nand_release(&host->mtd); |
| 1537 | free_irq(host->irq, host); | ||
| 1538 | if (host->regs_ip) | ||
| 1539 | iounmap(host->regs_ip); | ||
| 1540 | iounmap(host->base); | ||
| 1541 | kfree(host); | ||
| 1542 | 1548 | ||
| 1543 | return 0; | 1549 | return 0; |
| 1544 | } | 1550 | } |
| @@ -1549,22 +1555,10 @@ static struct platform_driver mxcnd_driver = { | |||
| 1549 | .owner = THIS_MODULE, | 1555 | .owner = THIS_MODULE, |
| 1550 | .of_match_table = of_match_ptr(mxcnd_dt_ids), | 1556 | .of_match_table = of_match_ptr(mxcnd_dt_ids), |
| 1551 | }, | 1557 | }, |
| 1558 | .probe = mxcnd_probe, | ||
| 1552 | .remove = __devexit_p(mxcnd_remove), | 1559 | .remove = __devexit_p(mxcnd_remove), |
| 1553 | }; | 1560 | }; |
| 1554 | 1561 | module_platform_driver(mxcnd_driver); | |
| 1555 | static int __init mxc_nd_init(void) | ||
| 1556 | { | ||
| 1557 | return platform_driver_probe(&mxcnd_driver, mxcnd_probe); | ||
| 1558 | } | ||
| 1559 | |||
| 1560 | static void __exit mxc_nd_cleanup(void) | ||
| 1561 | { | ||
| 1562 | /* Unregister the device structure */ | ||
| 1563 | platform_driver_unregister(&mxcnd_driver); | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | module_init(mxc_nd_init); | ||
| 1567 | module_exit(mxc_nd_cleanup); | ||
| 1568 | 1562 | ||
| 1569 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | 1563 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); |
| 1570 | MODULE_DESCRIPTION("MXC NAND MTD driver"); | 1564 | MODULE_DESCRIPTION("MXC NAND MTD driver"); |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a11253a0fcab..ec6841d8e956 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -243,25 +243,6 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | /** | 245 | /** |
| 246 | * nand_verify_buf - [DEFAULT] Verify chip data against buffer | ||
| 247 | * @mtd: MTD device structure | ||
| 248 | * @buf: buffer containing the data to compare | ||
| 249 | * @len: number of bytes to compare | ||
| 250 | * | ||
| 251 | * Default verify function for 8bit buswidth. | ||
| 252 | */ | ||
| 253 | static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
| 254 | { | ||
| 255 | int i; | ||
| 256 | struct nand_chip *chip = mtd->priv; | ||
| 257 | |||
| 258 | for (i = 0; i < len; i++) | ||
| 259 | if (buf[i] != readb(chip->IO_ADDR_R)) | ||
| 260 | return -EFAULT; | ||
| 261 | return 0; | ||
| 262 | } | ||
| 263 | |||
| 264 | /** | ||
| 265 | * nand_write_buf16 - [DEFAULT] write buffer to chip | 246 | * nand_write_buf16 - [DEFAULT] write buffer to chip |
| 266 | * @mtd: MTD device structure | 247 | * @mtd: MTD device structure |
| 267 | * @buf: data buffer | 248 | * @buf: data buffer |
| @@ -301,28 +282,6 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) | |||
| 301 | } | 282 | } |
| 302 | 283 | ||
| 303 | /** | 284 | /** |
| 304 | * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer | ||
| 305 | * @mtd: MTD device structure | ||
| 306 | * @buf: buffer containing the data to compare | ||
| 307 | * @len: number of bytes to compare | ||
| 308 | * | ||
| 309 | * Default verify function for 16bit buswidth. | ||
| 310 | */ | ||
| 311 | static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
| 312 | { | ||
| 313 | int i; | ||
| 314 | struct nand_chip *chip = mtd->priv; | ||
| 315 | u16 *p = (u16 *) buf; | ||
| 316 | len >>= 1; | ||
| 317 | |||
| 318 | for (i = 0; i < len; i++) | ||
| 319 | if (p[i] != readw(chip->IO_ADDR_R)) | ||
| 320 | return -EFAULT; | ||
| 321 | |||
| 322 | return 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | /** | ||
| 326 | * nand_block_bad - [DEFAULT] Read bad block marker from the chip | 285 | * nand_block_bad - [DEFAULT] Read bad block marker from the chip |
| 327 | * @mtd: MTD device structure | 286 | * @mtd: MTD device structure |
| 328 | * @ofs: offset from device start | 287 | * @ofs: offset from device start |
| @@ -1525,7 +1484,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
| 1525 | ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, | 1484 | ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, |
| 1526 | oob_required, | 1485 | oob_required, |
| 1527 | page); | 1486 | page); |
| 1528 | else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) | 1487 | else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && |
| 1488 | !oob) | ||
| 1529 | ret = chip->ecc.read_subpage(mtd, chip, | 1489 | ret = chip->ecc.read_subpage(mtd, chip, |
| 1530 | col, bytes, bufpoi); | 1490 | col, bytes, bufpoi); |
| 1531 | else | 1491 | else |
| @@ -1542,7 +1502,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
| 1542 | 1502 | ||
| 1543 | /* Transfer not aligned data */ | 1503 | /* Transfer not aligned data */ |
| 1544 | if (!aligned) { | 1504 | if (!aligned) { |
| 1545 | if (!NAND_SUBPAGE_READ(chip) && !oob && | 1505 | if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && |
| 1546 | !(mtd->ecc_stats.failed - stats.failed) && | 1506 | !(mtd->ecc_stats.failed - stats.failed) && |
| 1547 | (ops->mode != MTD_OPS_RAW)) { | 1507 | (ops->mode != MTD_OPS_RAW)) { |
| 1548 | chip->pagebuf = realpage; | 1508 | chip->pagebuf = realpage; |
| @@ -1565,14 +1525,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
| 1565 | oobreadlen -= toread; | 1525 | oobreadlen -= toread; |
| 1566 | } | 1526 | } |
| 1567 | } | 1527 | } |
| 1568 | |||
| 1569 | if (!(chip->options & NAND_NO_READRDY)) { | ||
| 1570 | /* Apply delay or wait for ready/busy pin */ | ||
| 1571 | if (!chip->dev_ready) | ||
| 1572 | udelay(chip->chip_delay); | ||
| 1573 | else | ||
| 1574 | nand_wait_ready(mtd); | ||
| 1575 | } | ||
| 1576 | } else { | 1528 | } else { |
| 1577 | memcpy(buf, chip->buffers->databuf + col, bytes); | 1529 | memcpy(buf, chip->buffers->databuf + col, bytes); |
| 1578 | buf += bytes; | 1530 | buf += bytes; |
| @@ -1633,7 +1585,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
| 1633 | ops.len = len; | 1585 | ops.len = len; |
| 1634 | ops.datbuf = buf; | 1586 | ops.datbuf = buf; |
| 1635 | ops.oobbuf = NULL; | 1587 | ops.oobbuf = NULL; |
| 1636 | ops.mode = 0; | 1588 | ops.mode = MTD_OPS_PLACE_OOB; |
| 1637 | ret = nand_do_read_ops(mtd, from, &ops); | 1589 | ret = nand_do_read_ops(mtd, from, &ops); |
| 1638 | *retlen = ops.retlen; | 1590 | *retlen = ops.retlen; |
| 1639 | nand_release_device(mtd); | 1591 | nand_release_device(mtd); |
| @@ -1837,14 +1789,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
| 1837 | len = min(len, readlen); | 1789 | len = min(len, readlen); |
| 1838 | buf = nand_transfer_oob(chip, buf, ops, len); | 1790 | buf = nand_transfer_oob(chip, buf, ops, len); |
| 1839 | 1791 | ||
| 1840 | if (!(chip->options & NAND_NO_READRDY)) { | ||
| 1841 | /* Apply delay or wait for ready/busy pin */ | ||
| 1842 | if (!chip->dev_ready) | ||
| 1843 | udelay(chip->chip_delay); | ||
| 1844 | else | ||
| 1845 | nand_wait_ready(mtd); | ||
| 1846 | } | ||
| 1847 | |||
| 1848 | readlen -= len; | 1792 | readlen -= len; |
| 1849 | if (!readlen) | 1793 | if (!readlen) |
| 1850 | break; | 1794 | break; |
| @@ -1927,12 +1871,14 @@ out: | |||
| 1927 | * | 1871 | * |
| 1928 | * Not for syndrome calculating ECC controllers, which use a special oob layout. | 1872 | * Not for syndrome calculating ECC controllers, which use a special oob layout. |
| 1929 | */ | 1873 | */ |
| 1930 | static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | 1874 | static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 1931 | const uint8_t *buf, int oob_required) | 1875 | const uint8_t *buf, int oob_required) |
| 1932 | { | 1876 | { |
| 1933 | chip->write_buf(mtd, buf, mtd->writesize); | 1877 | chip->write_buf(mtd, buf, mtd->writesize); |
| 1934 | if (oob_required) | 1878 | if (oob_required) |
| 1935 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | 1879 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 1880 | |||
| 1881 | return 0; | ||
| 1936 | } | 1882 | } |
| 1937 | 1883 | ||
| 1938 | /** | 1884 | /** |
| @@ -1944,7 +1890,7 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 1944 | * | 1890 | * |
| 1945 | * We need a special oob layout and handling even when ECC isn't checked. | 1891 | * We need a special oob layout and handling even when ECC isn't checked. |
| 1946 | */ | 1892 | */ |
| 1947 | static void nand_write_page_raw_syndrome(struct mtd_info *mtd, | 1893 | static int nand_write_page_raw_syndrome(struct mtd_info *mtd, |
| 1948 | struct nand_chip *chip, | 1894 | struct nand_chip *chip, |
| 1949 | const uint8_t *buf, int oob_required) | 1895 | const uint8_t *buf, int oob_required) |
| 1950 | { | 1896 | { |
| @@ -1974,6 +1920,8 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd, | |||
| 1974 | size = mtd->oobsize - (oob - chip->oob_poi); | 1920 | size = mtd->oobsize - (oob - chip->oob_poi); |
| 1975 | if (size) | 1921 | if (size) |
| 1976 | chip->write_buf(mtd, oob, size); | 1922 | chip->write_buf(mtd, oob, size); |
| 1923 | |||
| 1924 | return 0; | ||
| 1977 | } | 1925 | } |
| 1978 | /** | 1926 | /** |
| 1979 | * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function | 1927 | * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function |
| @@ -1982,7 +1930,7 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd, | |||
| 1982 | * @buf: data buffer | 1930 | * @buf: data buffer |
| 1983 | * @oob_required: must write chip->oob_poi to OOB | 1931 | * @oob_required: must write chip->oob_poi to OOB |
| 1984 | */ | 1932 | */ |
| 1985 | static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | 1933 | static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, |
| 1986 | const uint8_t *buf, int oob_required) | 1934 | const uint8_t *buf, int oob_required) |
| 1987 | { | 1935 | { |
| 1988 | int i, eccsize = chip->ecc.size; | 1936 | int i, eccsize = chip->ecc.size; |
| @@ -1999,7 +1947,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 1999 | for (i = 0; i < chip->ecc.total; i++) | 1947 | for (i = 0; i < chip->ecc.total; i++) |
| 2000 | chip->oob_poi[eccpos[i]] = ecc_calc[i]; | 1948 | chip->oob_poi[eccpos[i]] = ecc_calc[i]; |
| 2001 | 1949 | ||
| 2002 | chip->ecc.write_page_raw(mtd, chip, buf, 1); | 1950 | return chip->ecc.write_page_raw(mtd, chip, buf, 1); |
| 2003 | } | 1951 | } |
| 2004 | 1952 | ||
| 2005 | /** | 1953 | /** |
| @@ -2009,7 +1957,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2009 | * @buf: data buffer | 1957 | * @buf: data buffer |
| 2010 | * @oob_required: must write chip->oob_poi to OOB | 1958 | * @oob_required: must write chip->oob_poi to OOB |
| 2011 | */ | 1959 | */ |
| 2012 | static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | 1960 | static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, |
| 2013 | const uint8_t *buf, int oob_required) | 1961 | const uint8_t *buf, int oob_required) |
| 2014 | { | 1962 | { |
| 2015 | int i, eccsize = chip->ecc.size; | 1963 | int i, eccsize = chip->ecc.size; |
| @@ -2029,6 +1977,8 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2029 | chip->oob_poi[eccpos[i]] = ecc_calc[i]; | 1977 | chip->oob_poi[eccpos[i]] = ecc_calc[i]; |
| 2030 | 1978 | ||
| 2031 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | 1979 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 1980 | |||
| 1981 | return 0; | ||
| 2032 | } | 1982 | } |
| 2033 | 1983 | ||
| 2034 | /** | 1984 | /** |
| @@ -2041,7 +1991,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2041 | * The hw generator calculates the error syndrome automatically. Therefore we | 1991 | * The hw generator calculates the error syndrome automatically. Therefore we |
| 2042 | * need a special oob layout and handling. | 1992 | * need a special oob layout and handling. |
| 2043 | */ | 1993 | */ |
| 2044 | static void nand_write_page_syndrome(struct mtd_info *mtd, | 1994 | static int nand_write_page_syndrome(struct mtd_info *mtd, |
| 2045 | struct nand_chip *chip, | 1995 | struct nand_chip *chip, |
| 2046 | const uint8_t *buf, int oob_required) | 1996 | const uint8_t *buf, int oob_required) |
| 2047 | { | 1997 | { |
| @@ -2075,6 +2025,8 @@ static void nand_write_page_syndrome(struct mtd_info *mtd, | |||
| 2075 | i = mtd->oobsize - (oob - chip->oob_poi); | 2025 | i = mtd->oobsize - (oob - chip->oob_poi); |
| 2076 | if (i) | 2026 | if (i) |
| 2077 | chip->write_buf(mtd, oob, i); | 2027 | chip->write_buf(mtd, oob, i); |
| 2028 | |||
| 2029 | return 0; | ||
| 2078 | } | 2030 | } |
| 2079 | 2031 | ||
| 2080 | /** | 2032 | /** |
| @@ -2096,9 +2048,12 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2096 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | 2048 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); |
| 2097 | 2049 | ||
| 2098 | if (unlikely(raw)) | 2050 | if (unlikely(raw)) |
| 2099 | chip->ecc.write_page_raw(mtd, chip, buf, oob_required); | 2051 | status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); |
| 2100 | else | 2052 | else |
| 2101 | chip->ecc.write_page(mtd, chip, buf, oob_required); | 2053 | status = chip->ecc.write_page(mtd, chip, buf, oob_required); |
| 2054 | |||
| 2055 | if (status < 0) | ||
| 2056 | return status; | ||
| 2102 | 2057 | ||
| 2103 | /* | 2058 | /* |
| 2104 | * Cached progamming disabled for now. Not sure if it's worth the | 2059 | * Cached progamming disabled for now. Not sure if it's worth the |
| @@ -2125,16 +2080,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2125 | status = chip->waitfunc(mtd, chip); | 2080 | status = chip->waitfunc(mtd, chip); |
| 2126 | } | 2081 | } |
| 2127 | 2082 | ||
| 2128 | #ifdef CONFIG_MTD_NAND_VERIFY_WRITE | ||
| 2129 | /* Send command to read back the data */ | ||
| 2130 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
| 2131 | |||
| 2132 | if (chip->verify_buf(mtd, buf, mtd->writesize)) | ||
| 2133 | return -EIO; | ||
| 2134 | |||
| 2135 | /* Make sure the next page prog is preceded by a status read */ | ||
| 2136 | chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); | ||
| 2137 | #endif | ||
| 2138 | return 0; | 2083 | return 0; |
| 2139 | } | 2084 | } |
| 2140 | 2085 | ||
| @@ -2336,7 +2281,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
| 2336 | ops.len = len; | 2281 | ops.len = len; |
| 2337 | ops.datbuf = (uint8_t *)buf; | 2282 | ops.datbuf = (uint8_t *)buf; |
| 2338 | ops.oobbuf = NULL; | 2283 | ops.oobbuf = NULL; |
| 2339 | ops.mode = 0; | 2284 | ops.mode = MTD_OPS_PLACE_OOB; |
| 2340 | 2285 | ||
| 2341 | ret = nand_do_write_ops(mtd, to, &ops); | 2286 | ret = nand_do_write_ops(mtd, to, &ops); |
| 2342 | 2287 | ||
| @@ -2365,7 +2310,7 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
| 2365 | ops.len = len; | 2310 | ops.len = len; |
| 2366 | ops.datbuf = (uint8_t *)buf; | 2311 | ops.datbuf = (uint8_t *)buf; |
| 2367 | ops.oobbuf = NULL; | 2312 | ops.oobbuf = NULL; |
| 2368 | ops.mode = 0; | 2313 | ops.mode = MTD_OPS_PLACE_OOB; |
| 2369 | ret = nand_do_write_ops(mtd, to, &ops); | 2314 | ret = nand_do_write_ops(mtd, to, &ops); |
| 2370 | *retlen = ops.retlen; | 2315 | *retlen = ops.retlen; |
| 2371 | nand_release_device(mtd); | 2316 | nand_release_device(mtd); |
| @@ -2755,6 +2700,50 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
| 2755 | } | 2700 | } |
| 2756 | 2701 | ||
| 2757 | /** | 2702 | /** |
| 2703 | * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand | ||
| 2704 | * @mtd: MTD device structure | ||
| 2705 | * @chip: nand chip info structure | ||
| 2706 | * @addr: feature address. | ||
| 2707 | * @subfeature_param: the subfeature parameters, a four bytes array. | ||
| 2708 | */ | ||
| 2709 | static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 2710 | int addr, uint8_t *subfeature_param) | ||
| 2711 | { | ||
| 2712 | int status; | ||
| 2713 | |||
| 2714 | if (!chip->onfi_version) | ||
| 2715 | return -EINVAL; | ||
| 2716 | |||
| 2717 | chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1); | ||
| 2718 | chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); | ||
| 2719 | status = chip->waitfunc(mtd, chip); | ||
| 2720 | if (status & NAND_STATUS_FAIL) | ||
| 2721 | return -EIO; | ||
| 2722 | return 0; | ||
| 2723 | } | ||
| 2724 | |||
| 2725 | /** | ||
| 2726 | * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand | ||
| 2727 | * @mtd: MTD device structure | ||
| 2728 | * @chip: nand chip info structure | ||
| 2729 | * @addr: feature address. | ||
| 2730 | * @subfeature_param: the subfeature parameters, a four bytes array. | ||
| 2731 | */ | ||
| 2732 | static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 2733 | int addr, uint8_t *subfeature_param) | ||
| 2734 | { | ||
| 2735 | if (!chip->onfi_version) | ||
| 2736 | return -EINVAL; | ||
| 2737 | |||
| 2738 | /* clear the sub feature parameters */ | ||
| 2739 | memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN); | ||
| 2740 | |||
| 2741 | chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1); | ||
| 2742 | chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); | ||
| 2743 | return 0; | ||
| 2744 | } | ||
| 2745 | |||
| 2746 | /** | ||
| 2758 | * nand_suspend - [MTD Interface] Suspend the NAND flash | 2747 | * nand_suspend - [MTD Interface] Suspend the NAND flash |
| 2759 | * @mtd: MTD device structure | 2748 | * @mtd: MTD device structure |
| 2760 | */ | 2749 | */ |
| @@ -2809,8 +2798,6 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) | |||
| 2809 | chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; | 2798 | chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; |
| 2810 | if (!chip->read_buf) | 2799 | if (!chip->read_buf) |
| 2811 | chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; | 2800 | chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; |
| 2812 | if (!chip->verify_buf) | ||
| 2813 | chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf; | ||
| 2814 | if (!chip->scan_bbt) | 2801 | if (!chip->scan_bbt) |
| 2815 | chip->scan_bbt = nand_default_bbt; | 2802 | chip->scan_bbt = nand_default_bbt; |
| 2816 | 2803 | ||
| @@ -2914,14 +2901,250 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 2914 | if (le16_to_cpu(p->features) & 1) | 2901 | if (le16_to_cpu(p->features) & 1) |
| 2915 | *busw = NAND_BUSWIDTH_16; | 2902 | *busw = NAND_BUSWIDTH_16; |
| 2916 | 2903 | ||
| 2917 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | ||
| 2918 | chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK; | ||
| 2919 | |||
| 2920 | pr_info("ONFI flash detected\n"); | 2904 | pr_info("ONFI flash detected\n"); |
| 2921 | return 1; | 2905 | return 1; |
| 2922 | } | 2906 | } |
| 2923 | 2907 | ||
| 2924 | /* | 2908 | /* |
| 2909 | * nand_id_has_period - Check if an ID string has a given wraparound period | ||
| 2910 | * @id_data: the ID string | ||
| 2911 | * @arrlen: the length of the @id_data array | ||
| 2912 | * @period: the period of repitition | ||
| 2913 | * | ||
| 2914 | * Check if an ID string is repeated within a given sequence of bytes at | ||
| 2915 | * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a | ||
| 2916 | * period of 2). This is a helper function for nand_id_len(). Returns non-zero | ||
| 2917 | * if the repetition has a period of @period; otherwise, returns zero. | ||
| 2918 | */ | ||
| 2919 | static int nand_id_has_period(u8 *id_data, int arrlen, int period) | ||
| 2920 | { | ||
| 2921 | int i, j; | ||
| 2922 | for (i = 0; i < period; i++) | ||
| 2923 | for (j = i + period; j < arrlen; j += period) | ||
| 2924 | if (id_data[i] != id_data[j]) | ||
| 2925 | return 0; | ||
| 2926 | return 1; | ||
| 2927 | } | ||
| 2928 | |||
| 2929 | /* | ||
| 2930 | * nand_id_len - Get the length of an ID string returned by CMD_READID | ||
| 2931 | * @id_data: the ID string | ||
| 2932 | * @arrlen: the length of the @id_data array | ||
| 2933 | |||
| 2934 | * Returns the length of the ID string, according to known wraparound/trailing | ||
| 2935 | * zero patterns. If no pattern exists, returns the length of the array. | ||
| 2936 | */ | ||
| 2937 | static int nand_id_len(u8 *id_data, int arrlen) | ||
| 2938 | { | ||
| 2939 | int last_nonzero, period; | ||
| 2940 | |||
| 2941 | /* Find last non-zero byte */ | ||
| 2942 | for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) | ||
| 2943 | if (id_data[last_nonzero]) | ||
| 2944 | break; | ||
| 2945 | |||
| 2946 | /* All zeros */ | ||
| 2947 | if (last_nonzero < 0) | ||
| 2948 | return 0; | ||
| 2949 | |||
| 2950 | /* Calculate wraparound period */ | ||
| 2951 | for (period = 1; period < arrlen; period++) | ||
| 2952 | if (nand_id_has_period(id_data, arrlen, period)) | ||
| 2953 | break; | ||
| 2954 | |||
| 2955 | /* There's a repeated pattern */ | ||
| 2956 | if (period < arrlen) | ||
| 2957 | return period; | ||
| 2958 | |||
| 2959 | /* There are trailing zeros */ | ||
| 2960 | if (last_nonzero < arrlen - 1) | ||
| 2961 | return last_nonzero + 1; | ||
| 2962 | |||
| 2963 | /* No pattern detected */ | ||
| 2964 | return arrlen; | ||
| 2965 | } | ||
| 2966 | |||
| 2967 | /* | ||
| 2968 | * Many new NAND share similar device ID codes, which represent the size of the | ||
| 2969 | * chip. The rest of the parameters must be decoded according to generic or | ||
| 2970 | * manufacturer-specific "extended ID" decoding patterns. | ||
| 2971 | */ | ||
| 2972 | static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 2973 | u8 id_data[8], int *busw) | ||
| 2974 | { | ||
| 2975 | int extid, id_len; | ||
| 2976 | /* The 3rd id byte holds MLC / multichip data */ | ||
| 2977 | chip->cellinfo = id_data[2]; | ||
| 2978 | /* The 4th id byte is the important one */ | ||
| 2979 | extid = id_data[3]; | ||
| 2980 | |||
| 2981 | id_len = nand_id_len(id_data, 8); | ||
| 2982 | |||
| 2983 | /* | ||
| 2984 | * Field definitions are in the following datasheets: | ||
| 2985 | * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) | ||
| 2986 | * New style (6 byte ID): Samsung K9GAG08U0F (p.44) | ||
| 2987 | * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) | ||
| 2988 | * | ||
| 2989 | * Check for ID length, cell type, and Hynix/Samsung ID to decide what | ||
| 2990 | * to do. | ||
| 2991 | */ | ||
| 2992 | if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) { | ||
| 2993 | /* Calc pagesize */ | ||
| 2994 | mtd->writesize = 2048 << (extid & 0x03); | ||
| 2995 | extid >>= 2; | ||
| 2996 | /* Calc oobsize */ | ||
| 2997 | switch (((extid >> 2) & 0x04) | (extid & 0x03)) { | ||
| 2998 | case 1: | ||
| 2999 | mtd->oobsize = 128; | ||
| 3000 | break; | ||
| 3001 | case 2: | ||
| 3002 | mtd->oobsize = 218; | ||
| 3003 | break; | ||
| 3004 | case 3: | ||
| 3005 | mtd->oobsize = 400; | ||
| 3006 | break; | ||
| 3007 | case 4: | ||
| 3008 | mtd->oobsize = 436; | ||
| 3009 | break; | ||
| 3010 | case 5: | ||
| 3011 | mtd->oobsize = 512; | ||
| 3012 | break; | ||
| 3013 | case 6: | ||
| 3014 | default: /* Other cases are "reserved" (unknown) */ | ||
| 3015 | mtd->oobsize = 640; | ||
| 3016 | break; | ||
| 3017 | } | ||
| 3018 | extid >>= 2; | ||
| 3019 | /* Calc blocksize */ | ||
| 3020 | mtd->erasesize = (128 * 1024) << | ||
| 3021 | (((extid >> 1) & 0x04) | (extid & 0x03)); | ||
| 3022 | *busw = 0; | ||
| 3023 | } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && | ||
| 3024 | (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { | ||
| 3025 | unsigned int tmp; | ||
| 3026 | |||
| 3027 | /* Calc pagesize */ | ||
| 3028 | mtd->writesize = 2048 << (extid & 0x03); | ||
| 3029 | extid >>= 2; | ||
| 3030 | /* Calc oobsize */ | ||
| 3031 | switch (((extid >> 2) & 0x04) | (extid & 0x03)) { | ||
| 3032 | case 0: | ||
| 3033 | mtd->oobsize = 128; | ||
| 3034 | break; | ||
| 3035 | case 1: | ||
| 3036 | mtd->oobsize = 224; | ||
| 3037 | break; | ||
| 3038 | case 2: | ||
| 3039 | mtd->oobsize = 448; | ||
| 3040 | break; | ||
| 3041 | case 3: | ||
| 3042 | mtd->oobsize = 64; | ||
| 3043 | break; | ||
| 3044 | case 4: | ||
| 3045 | mtd->oobsize = 32; | ||
| 3046 | break; | ||
| 3047 | case 5: | ||
| 3048 | mtd->oobsize = 16; | ||
| 3049 | break; | ||
| 3050 | default: | ||
| 3051 | mtd->oobsize = 640; | ||
| 3052 | break; | ||
| 3053 | } | ||
| 3054 | extid >>= 2; | ||
| 3055 | /* Calc blocksize */ | ||
| 3056 | tmp = ((extid >> 1) & 0x04) | (extid & 0x03); | ||
| 3057 | if (tmp < 0x03) | ||
| 3058 | mtd->erasesize = (128 * 1024) << tmp; | ||
| 3059 | else if (tmp == 0x03) | ||
| 3060 | mtd->erasesize = 768 * 1024; | ||
| 3061 | else | ||
| 3062 | mtd->erasesize = (64 * 1024) << tmp; | ||
| 3063 | *busw = 0; | ||
| 3064 | } else { | ||
| 3065 | /* Calc pagesize */ | ||
| 3066 | mtd->writesize = 1024 << (extid & 0x03); | ||
| 3067 | extid >>= 2; | ||
| 3068 | /* Calc oobsize */ | ||
| 3069 | mtd->oobsize = (8 << (extid & 0x01)) * | ||
| 3070 | (mtd->writesize >> 9); | ||
| 3071 | extid >>= 2; | ||
| 3072 | /* Calc blocksize. Blocksize is multiples of 64KiB */ | ||
| 3073 | mtd->erasesize = (64 * 1024) << (extid & 0x03); | ||
| 3074 | extid >>= 2; | ||
| 3075 | /* Get buswidth information */ | ||
| 3076 | *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; | ||
| 3077 | } | ||
| 3078 | } | ||
| 3079 | |||
| 3080 | /* | ||
| 3081 | * Old devices have chip data hardcoded in the device ID table. nand_decode_id | ||
| 3082 | * decodes a matching ID table entry and assigns the MTD size parameters for | ||
| 3083 | * the chip. | ||
| 3084 | */ | ||
| 3085 | static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 3086 | struct nand_flash_dev *type, u8 id_data[8], | ||
| 3087 | int *busw) | ||
| 3088 | { | ||
| 3089 | int maf_id = id_data[0]; | ||
| 3090 | |||
| 3091 | mtd->erasesize = type->erasesize; | ||
| 3092 | mtd->writesize = type->pagesize; | ||
| 3093 | mtd->oobsize = mtd->writesize / 32; | ||
| 3094 | *busw = type->options & NAND_BUSWIDTH_16; | ||
| 3095 | |||
| 3096 | /* | ||
| 3097 | * Check for Spansion/AMD ID + repeating 5th, 6th byte since | ||
| 3098 | * some Spansion chips have erasesize that conflicts with size | ||
| 3099 | * listed in nand_ids table. | ||
| 3100 | * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) | ||
| 3101 | */ | ||
| 3102 | if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00 | ||
| 3103 | && id_data[6] == 0x00 && id_data[7] == 0x00 | ||
| 3104 | && mtd->writesize == 512) { | ||
| 3105 | mtd->erasesize = 128 * 1024; | ||
| 3106 | mtd->erasesize <<= ((id_data[3] & 0x03) << 1); | ||
| 3107 | } | ||
| 3108 | } | ||
| 3109 | |||
| 3110 | /* | ||
| 3111 | * Set the bad block marker/indicator (BBM/BBI) patterns according to some | ||
| 3112 | * heuristic patterns using various detected parameters (e.g., manufacturer, | ||
| 3113 | * page size, cell-type information). | ||
| 3114 | */ | ||
| 3115 | static void nand_decode_bbm_options(struct mtd_info *mtd, | ||
| 3116 | struct nand_chip *chip, u8 id_data[8]) | ||
| 3117 | { | ||
| 3118 | int maf_id = id_data[0]; | ||
| 3119 | |||
| 3120 | /* Set the bad block position */ | ||
| 3121 | if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) | ||
| 3122 | chip->badblockpos = NAND_LARGE_BADBLOCK_POS; | ||
| 3123 | else | ||
| 3124 | chip->badblockpos = NAND_SMALL_BADBLOCK_POS; | ||
| 3125 | |||
| 3126 | /* | ||
| 3127 | * Bad block marker is stored in the last page of each block on Samsung | ||
| 3128 | * and Hynix MLC devices; stored in first two pages of each block on | ||
| 3129 | * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, | ||
| 3130 | * AMD/Spansion, and Macronix. All others scan only the first page. | ||
| 3131 | */ | ||
| 3132 | if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
| 3133 | (maf_id == NAND_MFR_SAMSUNG || | ||
| 3134 | maf_id == NAND_MFR_HYNIX)) | ||
| 3135 | chip->bbt_options |= NAND_BBT_SCANLASTPAGE; | ||
| 3136 | else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
| 3137 | (maf_id == NAND_MFR_SAMSUNG || | ||
| 3138 | maf_id == NAND_MFR_HYNIX || | ||
| 3139 | maf_id == NAND_MFR_TOSHIBA || | ||
| 3140 | maf_id == NAND_MFR_AMD || | ||
| 3141 | maf_id == NAND_MFR_MACRONIX)) || | ||
| 3142 | (mtd->writesize == 2048 && | ||
| 3143 | maf_id == NAND_MFR_MICRON)) | ||
| 3144 | chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; | ||
| 3145 | } | ||
| 3146 | |||
| 3147 | /* | ||
| 2925 | * Get the flash and manufacturer id and lookup if the type is supported. | 3148 | * Get the flash and manufacturer id and lookup if the type is supported. |
| 2926 | */ | 3149 | */ |
| 2927 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | 3150 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, |
| @@ -2932,7 +3155,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
| 2932 | { | 3155 | { |
| 2933 | int i, maf_idx; | 3156 | int i, maf_idx; |
| 2934 | u8 id_data[8]; | 3157 | u8 id_data[8]; |
| 2935 | int ret; | ||
| 2936 | 3158 | ||
| 2937 | /* Select the device */ | 3159 | /* Select the device */ |
| 2938 | chip->select_chip(mtd, 0); | 3160 | chip->select_chip(mtd, 0); |
| @@ -2959,7 +3181,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
| 2959 | 3181 | ||
| 2960 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); | 3182 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); |
| 2961 | 3183 | ||
| 2962 | for (i = 0; i < 2; i++) | 3184 | /* Read entire ID string */ |
| 3185 | for (i = 0; i < 8; i++) | ||
| 2963 | id_data[i] = chip->read_byte(mtd); | 3186 | id_data[i] = chip->read_byte(mtd); |
| 2964 | 3187 | ||
| 2965 | if (id_data[0] != *maf_id || id_data[1] != *dev_id) { | 3188 | if (id_data[0] != *maf_id || id_data[1] != *dev_id) { |
| @@ -2979,18 +3202,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
| 2979 | chip->onfi_version = 0; | 3202 | chip->onfi_version = 0; |
| 2980 | if (!type->name || !type->pagesize) { | 3203 | if (!type->name || !type->pagesize) { |
| 2981 | /* Check is chip is ONFI compliant */ | 3204 | /* Check is chip is ONFI compliant */ |
| 2982 | ret = nand_flash_detect_onfi(mtd, chip, &busw); | 3205 | if (nand_flash_detect_onfi(mtd, chip, &busw)) |
| 2983 | if (ret) | ||
| 2984 | goto ident_done; | 3206 | goto ident_done; |
| 2985 | } | 3207 | } |
| 2986 | 3208 | ||
| 2987 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); | ||
| 2988 | |||
| 2989 | /* Read entire ID string */ | ||
| 2990 | |||
| 2991 | for (i = 0; i < 8; i++) | ||
| 2992 | id_data[i] = chip->read_byte(mtd); | ||
| 2993 | |||
| 2994 | if (!type->name) | 3209 | if (!type->name) |
| 2995 | return ERR_PTR(-ENODEV); | 3210 | return ERR_PTR(-ENODEV); |
| 2996 | 3211 | ||
| @@ -3003,86 +3218,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
| 3003 | /* Set the pagesize, oobsize, erasesize by the driver */ | 3218 | /* Set the pagesize, oobsize, erasesize by the driver */ |
| 3004 | busw = chip->init_size(mtd, chip, id_data); | 3219 | busw = chip->init_size(mtd, chip, id_data); |
| 3005 | } else if (!type->pagesize) { | 3220 | } else if (!type->pagesize) { |
| 3006 | int extid; | 3221 | /* Decode parameters from extended ID */ |
| 3007 | /* The 3rd id byte holds MLC / multichip data */ | 3222 | nand_decode_ext_id(mtd, chip, id_data, &busw); |
| 3008 | chip->cellinfo = id_data[2]; | ||
| 3009 | /* The 4th id byte is the important one */ | ||
| 3010 | extid = id_data[3]; | ||
| 3011 | |||
| 3012 | /* | ||
| 3013 | * Field definitions are in the following datasheets: | ||
| 3014 | * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) | ||
| 3015 | * New style (6 byte ID): Samsung K9GBG08U0M (p.40) | ||
| 3016 | * | ||
| 3017 | * Check for wraparound + Samsung ID + nonzero 6th byte | ||
| 3018 | * to decide what to do. | ||
| 3019 | */ | ||
| 3020 | if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && | ||
| 3021 | id_data[0] == NAND_MFR_SAMSUNG && | ||
| 3022 | (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
| 3023 | id_data[5] != 0x00) { | ||
| 3024 | /* Calc pagesize */ | ||
| 3025 | mtd->writesize = 2048 << (extid & 0x03); | ||
| 3026 | extid >>= 2; | ||
| 3027 | /* Calc oobsize */ | ||
| 3028 | switch (extid & 0x03) { | ||
| 3029 | case 1: | ||
| 3030 | mtd->oobsize = 128; | ||
| 3031 | break; | ||
| 3032 | case 2: | ||
| 3033 | mtd->oobsize = 218; | ||
| 3034 | break; | ||
| 3035 | case 3: | ||
| 3036 | mtd->oobsize = 400; | ||
| 3037 | break; | ||
| 3038 | default: | ||
| 3039 | mtd->oobsize = 436; | ||
| 3040 | break; | ||
| 3041 | } | ||
| 3042 | extid >>= 2; | ||
| 3043 | /* Calc blocksize */ | ||
| 3044 | mtd->erasesize = (128 * 1024) << | ||
| 3045 | (((extid >> 1) & 0x04) | (extid & 0x03)); | ||
| 3046 | busw = 0; | ||
| 3047 | } else { | ||
| 3048 | /* Calc pagesize */ | ||
| 3049 | mtd->writesize = 1024 << (extid & 0x03); | ||
| 3050 | extid >>= 2; | ||
| 3051 | /* Calc oobsize */ | ||
| 3052 | mtd->oobsize = (8 << (extid & 0x01)) * | ||
| 3053 | (mtd->writesize >> 9); | ||
| 3054 | extid >>= 2; | ||
| 3055 | /* Calc blocksize. Blocksize is multiples of 64KiB */ | ||
| 3056 | mtd->erasesize = (64 * 1024) << (extid & 0x03); | ||
| 3057 | extid >>= 2; | ||
| 3058 | /* Get buswidth information */ | ||
| 3059 | busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; | ||
| 3060 | } | ||
| 3061 | } else { | 3223 | } else { |
| 3062 | /* | 3224 | nand_decode_id(mtd, chip, type, id_data, &busw); |
| 3063 | * Old devices have chip data hardcoded in the device id table. | ||
| 3064 | */ | ||
| 3065 | mtd->erasesize = type->erasesize; | ||
| 3066 | mtd->writesize = type->pagesize; | ||
| 3067 | mtd->oobsize = mtd->writesize / 32; | ||
| 3068 | busw = type->options & NAND_BUSWIDTH_16; | ||
| 3069 | |||
| 3070 | /* | ||
| 3071 | * Check for Spansion/AMD ID + repeating 5th, 6th byte since | ||
| 3072 | * some Spansion chips have erasesize that conflicts with size | ||
| 3073 | * listed in nand_ids table. | ||
| 3074 | * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) | ||
| 3075 | */ | ||
| 3076 | if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && | ||
| 3077 | id_data[5] == 0x00 && id_data[6] == 0x00 && | ||
| 3078 | id_data[7] == 0x00 && mtd->writesize == 512) { | ||
| 3079 | mtd->erasesize = 128 * 1024; | ||
| 3080 | mtd->erasesize <<= ((id_data[3] & 0x03) << 1); | ||
| 3081 | } | ||
| 3082 | } | 3225 | } |
| 3083 | /* Get chip options, preserve non chip based options */ | 3226 | /* Get chip options */ |
| 3084 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | 3227 | chip->options |= type->options; |
| 3085 | chip->options |= type->options & NAND_CHIPOPTIONS_MSK; | ||
| 3086 | 3228 | ||
| 3087 | /* | 3229 | /* |
| 3088 | * Check if chip is not a Samsung device. Do not clear the | 3230 | * Check if chip is not a Samsung device. Do not clear the |
| @@ -3112,6 +3254,8 @@ ident_done: | |||
| 3112 | return ERR_PTR(-EINVAL); | 3254 | return ERR_PTR(-EINVAL); |
| 3113 | } | 3255 | } |
| 3114 | 3256 | ||
| 3257 | nand_decode_bbm_options(mtd, chip, id_data); | ||
| 3258 | |||
| 3115 | /* Calculate the address shift from the page size */ | 3259 | /* Calculate the address shift from the page size */ |
| 3116 | chip->page_shift = ffs(mtd->writesize) - 1; | 3260 | chip->page_shift = ffs(mtd->writesize) - 1; |
| 3117 | /* Convert chipsize to number of pages per chip -1 */ | 3261 | /* Convert chipsize to number of pages per chip -1 */ |
| @@ -3128,33 +3272,6 @@ ident_done: | |||
| 3128 | 3272 | ||
| 3129 | chip->badblockbits = 8; | 3273 | chip->badblockbits = 8; |
| 3130 | 3274 | ||
| 3131 | /* Set the bad block position */ | ||
| 3132 | if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) | ||
| 3133 | chip->badblockpos = NAND_LARGE_BADBLOCK_POS; | ||
| 3134 | else | ||
| 3135 | chip->badblockpos = NAND_SMALL_BADBLOCK_POS; | ||
| 3136 | |||
| 3137 | /* | ||
| 3138 | * Bad block marker is stored in the last page of each block | ||
| 3139 | * on Samsung and Hynix MLC devices; stored in first two pages | ||
| 3140 | * of each block on Micron devices with 2KiB pages and on | ||
| 3141 | * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix. | ||
| 3142 | * All others scan only the first page. | ||
| 3143 | */ | ||
| 3144 | if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
| 3145 | (*maf_id == NAND_MFR_SAMSUNG || | ||
| 3146 | *maf_id == NAND_MFR_HYNIX)) | ||
| 3147 | chip->bbt_options |= NAND_BBT_SCANLASTPAGE; | ||
| 3148 | else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
| 3149 | (*maf_id == NAND_MFR_SAMSUNG || | ||
| 3150 | *maf_id == NAND_MFR_HYNIX || | ||
| 3151 | *maf_id == NAND_MFR_TOSHIBA || | ||
| 3152 | *maf_id == NAND_MFR_AMD || | ||
| 3153 | *maf_id == NAND_MFR_MACRONIX)) || | ||
| 3154 | (mtd->writesize == 2048 && | ||
| 3155 | *maf_id == NAND_MFR_MICRON)) | ||
| 3156 | chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; | ||
| 3157 | |||
| 3158 | /* Check for AND chips with 4 page planes */ | 3275 | /* Check for AND chips with 4 page planes */ |
| 3159 | if (chip->options & NAND_4PAGE_ARRAY) | 3276 | if (chip->options & NAND_4PAGE_ARRAY) |
| 3160 | chip->erase_cmd = multi_erase_cmd; | 3277 | chip->erase_cmd = multi_erase_cmd; |
| @@ -3284,6 +3401,12 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
| 3284 | if (!chip->write_page) | 3401 | if (!chip->write_page) |
| 3285 | chip->write_page = nand_write_page; | 3402 | chip->write_page = nand_write_page; |
| 3286 | 3403 | ||
| 3404 | /* set for ONFI nand */ | ||
| 3405 | if (!chip->onfi_set_features) | ||
| 3406 | chip->onfi_set_features = nand_onfi_set_features; | ||
| 3407 | if (!chip->onfi_get_features) | ||
| 3408 | chip->onfi_get_features = nand_onfi_get_features; | ||
| 3409 | |||
| 3287 | /* | 3410 | /* |
| 3288 | * Check ECC mode, default to software if 3byte/512byte hardware ECC is | 3411 | * Check ECC mode, default to software if 3byte/512byte hardware ECC is |
| 3289 | * selected and we have 256 byte pagesize fallback to software ECC | 3412 | * selected and we have 256 byte pagesize fallback to software ECC |
| @@ -3477,6 +3600,10 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
| 3477 | /* Invalidate the pagebuffer reference */ | 3600 | /* Invalidate the pagebuffer reference */ |
| 3478 | chip->pagebuf = -1; | 3601 | chip->pagebuf = -1; |
| 3479 | 3602 | ||
| 3603 | /* Large page NAND with SOFT_ECC should support subpage reads */ | ||
| 3604 | if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9)) | ||
| 3605 | chip->options |= NAND_SUBPAGE_READ; | ||
| 3606 | |||
| 3480 | /* Fill in remaining MTD driver data */ | 3607 | /* Fill in remaining MTD driver data */ |
| 3481 | mtd->type = MTD_NANDFLASH; | 3608 | mtd->type = MTD_NANDFLASH; |
| 3482 | mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : | 3609 | mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 30d1319ff065..916d6e9c0ab1 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Overview: | 4 | * Overview: |
| 5 | * Bad block table support for the NAND driver | 5 | * Bad block table support for the NAND driver |
| 6 | * | 6 | * |
| 7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) | 7 | * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de) |
| 8 | * | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -22,7 +22,7 @@ | |||
| 22 | * BBT on flash. If a BBT is found then the contents are read and the memory | 22 | * BBT on flash. If a BBT is found then the contents are read and the memory |
| 23 | * based BBT is created. If a mirrored BBT is selected then the mirror is | 23 | * based BBT is created. If a mirrored BBT is selected then the mirror is |
| 24 | * searched too and the versions are compared. If the mirror has a greater | 24 | * searched too and the versions are compared. If the mirror has a greater |
| 25 | * version number than the mirror BBT is used to build the memory based BBT. | 25 | * version number, then the mirror BBT is used to build the memory based BBT. |
| 26 | * If the tables are not versioned, then we "or" the bad block information. | 26 | * If the tables are not versioned, then we "or" the bad block information. |
| 27 | * If one of the BBTs is out of date or does not exist it is (re)created. | 27 | * If one of the BBTs is out of date or does not exist it is (re)created. |
| 28 | * If no BBT exists at all then the device is scanned for factory marked | 28 | * If no BBT exists at all then the device is scanned for factory marked |
| @@ -62,21 +62,20 @@ | |||
| 62 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
| 63 | #include <linux/types.h> | 63 | #include <linux/types.h> |
| 64 | #include <linux/mtd/mtd.h> | 64 | #include <linux/mtd/mtd.h> |
| 65 | #include <linux/mtd/bbm.h> | ||
| 65 | #include <linux/mtd/nand.h> | 66 | #include <linux/mtd/nand.h> |
| 66 | #include <linux/mtd/nand_ecc.h> | 67 | #include <linux/mtd/nand_ecc.h> |
| 67 | #include <linux/bitops.h> | 68 | #include <linux/bitops.h> |
| 68 | #include <linux/delay.h> | 69 | #include <linux/delay.h> |
| 69 | #include <linux/vmalloc.h> | 70 | #include <linux/vmalloc.h> |
| 70 | #include <linux/export.h> | 71 | #include <linux/export.h> |
| 72 | #include <linux/string.h> | ||
| 71 | 73 | ||
| 72 | static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) | 74 | static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) |
| 73 | { | 75 | { |
| 74 | int ret; | 76 | if (memcmp(buf, td->pattern, td->len)) |
| 75 | 77 | return -1; | |
| 76 | ret = memcmp(buf, td->pattern, td->len); | 78 | return 0; |
| 77 | if (!ret) | ||
| 78 | return ret; | ||
| 79 | return -1; | ||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | /** | 81 | /** |
| @@ -92,19 +91,16 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) | |||
| 92 | */ | 91 | */ |
| 93 | static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) | 92 | static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) |
| 94 | { | 93 | { |
| 95 | int i, end = 0; | 94 | int end = 0; |
| 96 | uint8_t *p = buf; | 95 | uint8_t *p = buf; |
| 97 | 96 | ||
| 98 | if (td->options & NAND_BBT_NO_OOB) | 97 | if (td->options & NAND_BBT_NO_OOB) |
| 99 | return check_pattern_no_oob(buf, td); | 98 | return check_pattern_no_oob(buf, td); |
| 100 | 99 | ||
| 101 | end = paglen + td->offs; | 100 | end = paglen + td->offs; |
| 102 | if (td->options & NAND_BBT_SCANEMPTY) { | 101 | if (td->options & NAND_BBT_SCANEMPTY) |
| 103 | for (i = 0; i < end; i++) { | 102 | if (memchr_inv(p, 0xff, end)) |
| 104 | if (p[i] != 0xff) | 103 | return -1; |
| 105 | return -1; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | p += end; | 104 | p += end; |
| 109 | 105 | ||
| 110 | /* Compare the pattern */ | 106 | /* Compare the pattern */ |
| @@ -114,10 +110,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc | |||
| 114 | if (td->options & NAND_BBT_SCANEMPTY) { | 110 | if (td->options & NAND_BBT_SCANEMPTY) { |
| 115 | p += td->len; | 111 | p += td->len; |
| 116 | end += td->len; | 112 | end += td->len; |
| 117 | for (i = end; i < len; i++) { | 113 | if (memchr_inv(p, 0xff, len - end)) |
| 118 | if (*p++ != 0xff) | 114 | return -1; |
| 119 | return -1; | ||
| 120 | } | ||
| 121 | } | 115 | } |
| 122 | return 0; | 116 | return 0; |
| 123 | } | 117 | } |
| @@ -133,14 +127,9 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc | |||
| 133 | */ | 127 | */ |
| 134 | static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) | 128 | static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) |
| 135 | { | 129 | { |
| 136 | int i; | ||
| 137 | uint8_t *p = buf; | ||
| 138 | |||
| 139 | /* Compare the pattern */ | 130 | /* Compare the pattern */ |
| 140 | for (i = 0; i < td->len; i++) { | 131 | if (memcmp(buf + td->offs, td->pattern, td->len)) |
| 141 | if (p[td->offs + i] != td->pattern[i]) | 132 | return -1; |
| 142 | return -1; | ||
| 143 | } | ||
| 144 | return 0; | 133 | return 0; |
| 145 | } | 134 | } |
| 146 | 135 | ||
| @@ -288,7 +277,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc | |||
| 288 | } | 277 | } |
| 289 | 278 | ||
| 290 | /* BBT marker is in the first page, no OOB */ | 279 | /* BBT marker is in the first page, no OOB */ |
| 291 | static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | 280 | static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, |
| 292 | struct nand_bbt_descr *td) | 281 | struct nand_bbt_descr *td) |
| 293 | { | 282 | { |
| 294 | size_t retlen; | 283 | size_t retlen; |
| @@ -301,14 +290,24 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | |||
| 301 | return mtd_read(mtd, offs, len, &retlen, buf); | 290 | return mtd_read(mtd, offs, len, &retlen, buf); |
| 302 | } | 291 | } |
| 303 | 292 | ||
| 304 | /* Scan read raw data from flash */ | 293 | /** |
| 305 | static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | 294 | * scan_read_oob - [GENERIC] Scan data+OOB region to buffer |
| 295 | * @mtd: MTD device structure | ||
| 296 | * @buf: temporary buffer | ||
| 297 | * @offs: offset at which to scan | ||
| 298 | * @len: length of data region to read | ||
| 299 | * | ||
| 300 | * Scan read data from data+OOB. May traverse multiple pages, interleaving | ||
| 301 | * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest" | ||
| 302 | * ECC condition (error or bitflip). May quit on the first (non-ECC) error. | ||
| 303 | */ | ||
| 304 | static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | ||
| 306 | size_t len) | 305 | size_t len) |
| 307 | { | 306 | { |
| 308 | struct mtd_oob_ops ops; | 307 | struct mtd_oob_ops ops; |
| 309 | int res; | 308 | int res, ret = 0; |
| 310 | 309 | ||
| 311 | ops.mode = MTD_OPS_RAW; | 310 | ops.mode = MTD_OPS_PLACE_OOB; |
| 312 | ops.ooboffs = 0; | 311 | ops.ooboffs = 0; |
| 313 | ops.ooblen = mtd->oobsize; | 312 | ops.ooblen = mtd->oobsize; |
| 314 | 313 | ||
| @@ -318,24 +317,27 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | |||
| 318 | ops.oobbuf = buf + ops.len; | 317 | ops.oobbuf = buf + ops.len; |
| 319 | 318 | ||
| 320 | res = mtd_read_oob(mtd, offs, &ops); | 319 | res = mtd_read_oob(mtd, offs, &ops); |
| 321 | 320 | if (res) { | |
| 322 | if (res) | 321 | if (!mtd_is_bitflip_or_eccerr(res)) |
| 323 | return res; | 322 | return res; |
| 323 | else if (mtd_is_eccerr(res) || !ret) | ||
| 324 | ret = res; | ||
| 325 | } | ||
| 324 | 326 | ||
| 325 | buf += mtd->oobsize + mtd->writesize; | 327 | buf += mtd->oobsize + mtd->writesize; |
| 326 | len -= mtd->writesize; | 328 | len -= mtd->writesize; |
| 327 | offs += mtd->writesize; | 329 | offs += mtd->writesize; |
| 328 | } | 330 | } |
| 329 | return 0; | 331 | return ret; |
| 330 | } | 332 | } |
| 331 | 333 | ||
| 332 | static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | 334 | static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs, |
| 333 | size_t len, struct nand_bbt_descr *td) | 335 | size_t len, struct nand_bbt_descr *td) |
| 334 | { | 336 | { |
| 335 | if (td->options & NAND_BBT_NO_OOB) | 337 | if (td->options & NAND_BBT_NO_OOB) |
| 336 | return scan_read_raw_data(mtd, buf, offs, td); | 338 | return scan_read_data(mtd, buf, offs, td); |
| 337 | else | 339 | else |
| 338 | return scan_read_raw_oob(mtd, buf, offs, len); | 340 | return scan_read_oob(mtd, buf, offs, len); |
| 339 | } | 341 | } |
| 340 | 342 | ||
| 341 | /* Scan write data with oob to flash */ | 343 | /* Scan write data with oob to flash */ |
| @@ -373,14 +375,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
| 373 | * Read the bad block table(s) for all chips starting at a given page. We | 375 | * Read the bad block table(s) for all chips starting at a given page. We |
| 374 | * assume that the bbt bits are in consecutive order. | 376 | * assume that the bbt bits are in consecutive order. |
| 375 | */ | 377 | */ |
| 376 | static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | 378 | static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, |
| 377 | struct nand_bbt_descr *td, struct nand_bbt_descr *md) | 379 | struct nand_bbt_descr *td, struct nand_bbt_descr *md) |
| 378 | { | 380 | { |
| 379 | struct nand_chip *this = mtd->priv; | 381 | struct nand_chip *this = mtd->priv; |
| 380 | 382 | ||
| 381 | /* Read the primary version, if available */ | 383 | /* Read the primary version, if available */ |
| 382 | if (td->options & NAND_BBT_VERSION) { | 384 | if (td->options & NAND_BBT_VERSION) { |
| 383 | scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, | 385 | scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, |
| 384 | mtd->writesize, td); | 386 | mtd->writesize, td); |
| 385 | td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; | 387 | td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; |
| 386 | pr_info("Bad block table at page %d, version 0x%02X\n", | 388 | pr_info("Bad block table at page %d, version 0x%02X\n", |
| @@ -389,28 +391,27 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | |||
| 389 | 391 | ||
| 390 | /* Read the mirror version, if available */ | 392 | /* Read the mirror version, if available */ |
| 391 | if (md && (md->options & NAND_BBT_VERSION)) { | 393 | if (md && (md->options & NAND_BBT_VERSION)) { |
| 392 | scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, | 394 | scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, |
| 393 | mtd->writesize, td); | 395 | mtd->writesize, md); |
| 394 | md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; | 396 | md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; |
| 395 | pr_info("Bad block table at page %d, version 0x%02X\n", | 397 | pr_info("Bad block table at page %d, version 0x%02X\n", |
| 396 | md->pages[0], md->version[0]); | 398 | md->pages[0], md->version[0]); |
| 397 | } | 399 | } |
| 398 | return 1; | ||
| 399 | } | 400 | } |
| 400 | 401 | ||
| 401 | /* Scan a given block full */ | 402 | /* Scan a given block full */ |
| 402 | static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, | 403 | static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, |
| 403 | loff_t offs, uint8_t *buf, size_t readlen, | 404 | loff_t offs, uint8_t *buf, size_t readlen, |
| 404 | int scanlen, int len) | 405 | int scanlen, int numpages) |
| 405 | { | 406 | { |
| 406 | int ret, j; | 407 | int ret, j; |
| 407 | 408 | ||
| 408 | ret = scan_read_raw_oob(mtd, buf, offs, readlen); | 409 | ret = scan_read_oob(mtd, buf, offs, readlen); |
| 409 | /* Ignore ECC errors when checking for BBM */ | 410 | /* Ignore ECC errors when checking for BBM */ |
| 410 | if (ret && !mtd_is_bitflip_or_eccerr(ret)) | 411 | if (ret && !mtd_is_bitflip_or_eccerr(ret)) |
| 411 | return ret; | 412 | return ret; |
| 412 | 413 | ||
| 413 | for (j = 0; j < len; j++, buf += scanlen) { | 414 | for (j = 0; j < numpages; j++, buf += scanlen) { |
| 414 | if (check_pattern(buf, scanlen, mtd->writesize, bd)) | 415 | if (check_pattern(buf, scanlen, mtd->writesize, bd)) |
| 415 | return 1; | 416 | return 1; |
| 416 | } | 417 | } |
| @@ -419,7 +420,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
| 419 | 420 | ||
| 420 | /* Scan a given block partially */ | 421 | /* Scan a given block partially */ |
| 421 | static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, | 422 | static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, |
| 422 | loff_t offs, uint8_t *buf, int len) | 423 | loff_t offs, uint8_t *buf, int numpages) |
| 423 | { | 424 | { |
| 424 | struct mtd_oob_ops ops; | 425 | struct mtd_oob_ops ops; |
| 425 | int j, ret; | 426 | int j, ret; |
| @@ -430,7 +431,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
| 430 | ops.datbuf = NULL; | 431 | ops.datbuf = NULL; |
| 431 | ops.mode = MTD_OPS_PLACE_OOB; | 432 | ops.mode = MTD_OPS_PLACE_OOB; |
| 432 | 433 | ||
| 433 | for (j = 0; j < len; j++) { | 434 | for (j = 0; j < numpages; j++) { |
| 434 | /* | 435 | /* |
| 435 | * Read the full oob until read_oob is fixed to handle single | 436 | * Read the full oob until read_oob is fixed to handle single |
| 436 | * byte reads for 16 bit buswidth. | 437 | * byte reads for 16 bit buswidth. |
| @@ -463,7 +464,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
| 463 | struct nand_bbt_descr *bd, int chip) | 464 | struct nand_bbt_descr *bd, int chip) |
| 464 | { | 465 | { |
| 465 | struct nand_chip *this = mtd->priv; | 466 | struct nand_chip *this = mtd->priv; |
| 466 | int i, numblocks, len, scanlen; | 467 | int i, numblocks, numpages, scanlen; |
| 467 | int startblock; | 468 | int startblock; |
| 468 | loff_t from; | 469 | loff_t from; |
| 469 | size_t readlen; | 470 | size_t readlen; |
| @@ -471,11 +472,11 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
| 471 | pr_info("Scanning device for bad blocks\n"); | 472 | pr_info("Scanning device for bad blocks\n"); |
| 472 | 473 | ||
| 473 | if (bd->options & NAND_BBT_SCANALLPAGES) | 474 | if (bd->options & NAND_BBT_SCANALLPAGES) |
| 474 | len = 1 << (this->bbt_erase_shift - this->page_shift); | 475 | numpages = 1 << (this->bbt_erase_shift - this->page_shift); |
| 475 | else if (bd->options & NAND_BBT_SCAN2NDPAGE) | 476 | else if (bd->options & NAND_BBT_SCAN2NDPAGE) |
| 476 | len = 2; | 477 | numpages = 2; |
| 477 | else | 478 | else |
| 478 | len = 1; | 479 | numpages = 1; |
| 479 | 480 | ||
| 480 | if (!(bd->options & NAND_BBT_SCANEMPTY)) { | 481 | if (!(bd->options & NAND_BBT_SCANEMPTY)) { |
| 481 | /* We need only read few bytes from the OOB area */ | 482 | /* We need only read few bytes from the OOB area */ |
| @@ -484,7 +485,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
| 484 | } else { | 485 | } else { |
| 485 | /* Full page content should be read */ | 486 | /* Full page content should be read */ |
| 486 | scanlen = mtd->writesize + mtd->oobsize; | 487 | scanlen = mtd->writesize + mtd->oobsize; |
| 487 | readlen = len * mtd->writesize; | 488 | readlen = numpages * mtd->writesize; |
| 488 | } | 489 | } |
| 489 | 490 | ||
| 490 | if (chip == -1) { | 491 | if (chip == -1) { |
| @@ -508,7 +509,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
| 508 | } | 509 | } |
| 509 | 510 | ||
| 510 | if (this->bbt_options & NAND_BBT_SCANLASTPAGE) | 511 | if (this->bbt_options & NAND_BBT_SCANLASTPAGE) |
| 511 | from += mtd->erasesize - (mtd->writesize * len); | 512 | from += mtd->erasesize - (mtd->writesize * numpages); |
| 512 | 513 | ||
| 513 | for (i = startblock; i < numblocks;) { | 514 | for (i = startblock; i < numblocks;) { |
| 514 | int ret; | 515 | int ret; |
| @@ -517,9 +518,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
| 517 | 518 | ||
| 518 | if (bd->options & NAND_BBT_SCANALLPAGES) | 519 | if (bd->options & NAND_BBT_SCANALLPAGES) |
| 519 | ret = scan_block_full(mtd, bd, from, buf, readlen, | 520 | ret = scan_block_full(mtd, bd, from, buf, readlen, |
| 520 | scanlen, len); | 521 | scanlen, numpages); |
| 521 | else | 522 | else |
| 522 | ret = scan_block_fast(mtd, bd, from, buf, len); | 523 | ret = scan_block_fast(mtd, bd, from, buf, numpages); |
| 523 | 524 | ||
| 524 | if (ret < 0) | 525 | if (ret < 0) |
| 525 | return ret; | 526 | return ret; |
| @@ -594,7 +595,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
| 594 | loff_t offs = (loff_t)actblock << this->bbt_erase_shift; | 595 | loff_t offs = (loff_t)actblock << this->bbt_erase_shift; |
| 595 | 596 | ||
| 596 | /* Read first page */ | 597 | /* Read first page */ |
| 597 | scan_read_raw(mtd, buf, offs, mtd->writesize, td); | 598 | scan_read(mtd, buf, offs, mtd->writesize, td); |
| 598 | if (!check_pattern(buf, scanlen, mtd->writesize, td)) { | 599 | if (!check_pattern(buf, scanlen, mtd->writesize, td)) { |
| 599 | td->pages[i] = actblock << blocktopage; | 600 | td->pages[i] = actblock << blocktopage; |
| 600 | if (td->options & NAND_BBT_VERSION) { | 601 | if (td->options & NAND_BBT_VERSION) { |
| @@ -626,7 +627,9 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
| 626 | * | 627 | * |
| 627 | * Search and read the bad block table(s). | 628 | * Search and read the bad block table(s). |
| 628 | */ | 629 | */ |
| 629 | static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) | 630 | static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf, |
| 631 | struct nand_bbt_descr *td, | ||
| 632 | struct nand_bbt_descr *md) | ||
| 630 | { | 633 | { |
| 631 | /* Search the primary table */ | 634 | /* Search the primary table */ |
| 632 | search_bbt(mtd, buf, td); | 635 | search_bbt(mtd, buf, td); |
| @@ -634,9 +637,6 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt | |||
| 634 | /* Search the mirror table */ | 637 | /* Search the mirror table */ |
| 635 | if (md) | 638 | if (md) |
| 636 | search_bbt(mtd, buf, md); | 639 | search_bbt(mtd, buf, md); |
| 637 | |||
| 638 | /* Force result check */ | ||
| 639 | return 1; | ||
| 640 | } | 640 | } |
| 641 | 641 | ||
| 642 | /** | 642 | /** |
| @@ -1162,14 +1162,13 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
| 1162 | 1162 | ||
| 1163 | /* Is the bbt at a given page? */ | 1163 | /* Is the bbt at a given page? */ |
| 1164 | if (td->options & NAND_BBT_ABSPAGE) { | 1164 | if (td->options & NAND_BBT_ABSPAGE) { |
| 1165 | res = read_abs_bbts(mtd, buf, td, md); | 1165 | read_abs_bbts(mtd, buf, td, md); |
| 1166 | } else { | 1166 | } else { |
| 1167 | /* Search the bad block table using a pattern in oob */ | 1167 | /* Search the bad block table using a pattern in oob */ |
| 1168 | res = search_read_bbts(mtd, buf, td, md); | 1168 | search_read_bbts(mtd, buf, td, md); |
| 1169 | } | 1169 | } |
| 1170 | 1170 | ||
| 1171 | if (res) | 1171 | res = check_create(mtd, buf, bd); |
| 1172 | res = check_create(mtd, buf, bd); | ||
| 1173 | 1172 | ||
| 1174 | /* Prevent the bbt regions from erasing / writing */ | 1173 | /* Prevent the bbt regions from erasing / writing */ |
| 1175 | mark_bbt_region(mtd, td); | 1174 | mark_bbt_region(mtd, td); |
| @@ -1260,7 +1259,7 @@ static struct nand_bbt_descr bbt_main_descr = { | |||
| 1260 | .offs = 8, | 1259 | .offs = 8, |
| 1261 | .len = 4, | 1260 | .len = 4, |
| 1262 | .veroffs = 12, | 1261 | .veroffs = 12, |
| 1263 | .maxblocks = 4, | 1262 | .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, |
| 1264 | .pattern = bbt_pattern | 1263 | .pattern = bbt_pattern |
| 1265 | }; | 1264 | }; |
| 1266 | 1265 | ||
| @@ -1270,27 +1269,27 @@ static struct nand_bbt_descr bbt_mirror_descr = { | |||
| 1270 | .offs = 8, | 1269 | .offs = 8, |
| 1271 | .len = 4, | 1270 | .len = 4, |
| 1272 | .veroffs = 12, | 1271 | .veroffs = 12, |
| 1273 | .maxblocks = 4, | 1272 | .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, |
| 1274 | .pattern = mirror_pattern | 1273 | .pattern = mirror_pattern |
| 1275 | }; | 1274 | }; |
| 1276 | 1275 | ||
| 1277 | static struct nand_bbt_descr bbt_main_no_bbt_descr = { | 1276 | static struct nand_bbt_descr bbt_main_no_oob_descr = { |
| 1278 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 1277 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
| 1279 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP | 1278 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP |
| 1280 | | NAND_BBT_NO_OOB, | 1279 | | NAND_BBT_NO_OOB, |
| 1281 | .len = 4, | 1280 | .len = 4, |
| 1282 | .veroffs = 4, | 1281 | .veroffs = 4, |
| 1283 | .maxblocks = 4, | 1282 | .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, |
| 1284 | .pattern = bbt_pattern | 1283 | .pattern = bbt_pattern |
| 1285 | }; | 1284 | }; |
| 1286 | 1285 | ||
| 1287 | static struct nand_bbt_descr bbt_mirror_no_bbt_descr = { | 1286 | static struct nand_bbt_descr bbt_mirror_no_oob_descr = { |
| 1288 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 1287 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
| 1289 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP | 1288 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP |
| 1290 | | NAND_BBT_NO_OOB, | 1289 | | NAND_BBT_NO_OOB, |
| 1291 | .len = 4, | 1290 | .len = 4, |
| 1292 | .veroffs = 4, | 1291 | .veroffs = 4, |
| 1293 | .maxblocks = 4, | 1292 | .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, |
| 1294 | .pattern = mirror_pattern | 1293 | .pattern = mirror_pattern |
| 1295 | }; | 1294 | }; |
| 1296 | 1295 | ||
| @@ -1355,8 +1354,8 @@ int nand_default_bbt(struct mtd_info *mtd) | |||
| 1355 | /* Use the default pattern descriptors */ | 1354 | /* Use the default pattern descriptors */ |
| 1356 | if (!this->bbt_td) { | 1355 | if (!this->bbt_td) { |
| 1357 | if (this->bbt_options & NAND_BBT_NO_OOB) { | 1356 | if (this->bbt_options & NAND_BBT_NO_OOB) { |
| 1358 | this->bbt_td = &bbt_main_no_bbt_descr; | 1357 | this->bbt_td = &bbt_main_no_oob_descr; |
| 1359 | this->bbt_md = &bbt_mirror_no_bbt_descr; | 1358 | this->bbt_md = &bbt_mirror_no_oob_descr; |
| 1360 | } else { | 1359 | } else { |
| 1361 | this->bbt_td = &bbt_main_descr; | 1360 | this->bbt_td = &bbt_main_descr; |
| 1362 | this->bbt_md = &bbt_mirror_descr; | 1361 | this->bbt_md = &bbt_mirror_descr; |
| @@ -1406,3 +1405,4 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) | |||
| 1406 | 1405 | ||
| 1407 | EXPORT_SYMBOL(nand_scan_bbt); | 1406 | EXPORT_SYMBOL(nand_scan_bbt); |
| 1408 | EXPORT_SYMBOL(nand_default_bbt); | 1407 | EXPORT_SYMBOL(nand_default_bbt); |
| 1408 | EXPORT_SYMBOL_GPL(nand_update_bbt); | ||
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c deleted file mode 100644 index 46a6bc9c4b74..000000000000 --- a/drivers/mtd/nand/nand_bcm_umi.c +++ /dev/null | |||
| @@ -1,149 +0,0 @@ | |||
| 1 | /***************************************************************************** | ||
| 2 | * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Unless you and Broadcom execute a separate written software license | ||
| 5 | * agreement governing use of this software, this software is licensed to you | ||
| 6 | * under the terms of the GNU General Public License version 2, available at | ||
| 7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
| 8 | * | ||
| 9 | * Notwithstanding the above, under no circumstances may you combine this | ||
| 10 | * software in any way with any other Broadcom software provided under a | ||
| 11 | * license other than the GPL, without Broadcom's express prior written | ||
| 12 | * consent. | ||
| 13 | *****************************************************************************/ | ||
| 14 | |||
| 15 | /* ---- Include Files ---------------------------------------------------- */ | ||
| 16 | #include <mach/reg_umi.h> | ||
| 17 | #include "nand_bcm_umi.h" | ||
| 18 | #ifdef BOOT0_BUILD | ||
| 19 | #include <uart.h> | ||
| 20 | #endif | ||
| 21 | |||
| 22 | /* ---- External Variable Declarations ----------------------------------- */ | ||
| 23 | /* ---- External Function Prototypes ------------------------------------- */ | ||
| 24 | /* ---- Public Variables ------------------------------------------------- */ | ||
| 25 | /* ---- Private Constants and Types -------------------------------------- */ | ||
| 26 | /* ---- Private Function Prototypes -------------------------------------- */ | ||
| 27 | /* ---- Private Variables ------------------------------------------------ */ | ||
| 28 | /* ---- Private Functions ------------------------------------------------ */ | ||
| 29 | |||
| 30 | #if NAND_ECC_BCH | ||
| 31 | /**************************************************************************** | ||
| 32 | * nand_bch_ecc_flip_bit - Routine to flip an errored bit | ||
| 33 | * | ||
| 34 | * PURPOSE: | ||
| 35 | * This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the | ||
| 36 | * errored bit specified | ||
| 37 | * | ||
| 38 | * PARAMETERS: | ||
| 39 | * datap - Container that holds the 512 byte data | ||
| 40 | * errorLocation - Location of the bit that needs to be flipped | ||
| 41 | * | ||
| 42 | * RETURNS: | ||
| 43 | * None | ||
| 44 | ****************************************************************************/ | ||
| 45 | static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation) | ||
| 46 | { | ||
| 47 | int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0; | ||
| 48 | int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3; | ||
| 49 | int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5; | ||
| 50 | |||
| 51 | uint8_t errorByte = 0; | ||
| 52 | uint8_t byteMask = 1 << locWithinAByte; | ||
| 53 | |||
| 54 | /* BCH uses big endian, need to change the location | ||
| 55 | * bits to little endian */ | ||
| 56 | locWithinAWord = 3 - locWithinAWord; | ||
| 57 | |||
| 58 | errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord]; | ||
| 59 | |||
| 60 | #ifdef BOOT0_BUILD | ||
| 61 | puthexs("\nECC Correct Offset: ", | ||
| 62 | locWithinAPage * sizeof(uint32_t) + locWithinAWord); | ||
| 63 | puthexs(" errorByte:", errorByte); | ||
| 64 | puthex8(" Bit: ", locWithinAByte); | ||
| 65 | #endif | ||
| 66 | |||
| 67 | if (errorByte & byteMask) { | ||
| 68 | /* bit needs to be cleared */ | ||
| 69 | errorByte &= ~byteMask; | ||
| 70 | } else { | ||
| 71 | /* bit needs to be set */ | ||
| 72 | errorByte |= byteMask; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* write back the value with the fixed bit */ | ||
| 76 | datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte; | ||
| 77 | } | ||
| 78 | |||
| 79 | /**************************************************************************** | ||
| 80 | * nand_correct_page_bch - Routine to correct bit errors when reading NAND | ||
| 81 | * | ||
| 82 | * PURPOSE: | ||
| 83 | * This routine reads the BCH registers to determine if there are any bit | ||
| 84 | * errors during the read of the last 512 bytes of data + ECC bytes. If | ||
| 85 | * errors exists, the routine fixes it. | ||
| 86 | * | ||
| 87 | * PARAMETERS: | ||
| 88 | * datap - Container that holds the 512 byte data | ||
| 89 | * | ||
| 90 | * RETURNS: | ||
| 91 | * 0 or greater = Number of errors corrected | ||
| 92 | * (No errors are found or errors have been fixed) | ||
| 93 | * -1 = Error(s) cannot be fixed | ||
| 94 | ****************************************************************************/ | ||
| 95 | int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData, | ||
| 96 | int numEccBytes) | ||
| 97 | { | ||
| 98 | int numErrors; | ||
| 99 | int errorLocation; | ||
| 100 | int idx; | ||
| 101 | uint32_t regValue; | ||
| 102 | |||
| 103 | /* wait for read ECC to be valid */ | ||
| 104 | regValue = nand_bcm_umi_bch_poll_read_ecc_calc(); | ||
| 105 | |||
| 106 | /* | ||
| 107 | * read the control status register to determine if there | ||
| 108 | * are error'ed bits | ||
| 109 | * see if errors are correctible | ||
| 110 | */ | ||
| 111 | if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) { | ||
| 112 | int i; | ||
| 113 | |||
| 114 | for (i = 0; i < numEccBytes; i++) { | ||
| 115 | if (readEccData[i] != 0xff) { | ||
| 116 | /* errors cannot be fixed, return -1 */ | ||
| 117 | return -1; | ||
| 118 | } | ||
| 119 | } | ||
| 120 | /* If ECC is unprogrammed then we can't correct, | ||
| 121 | * assume everything OK */ | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) { | ||
| 126 | /* no errors */ | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Fix errored bits by doing the following: | ||
| 132 | * 1. Read the number of errors in the control and status register | ||
| 133 | * 2. Read the error location registers that corresponds to the number | ||
| 134 | * of errors reported | ||
| 135 | * 3. Invert the bit in the data | ||
| 136 | */ | ||
| 137 | numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20; | ||
| 138 | |||
| 139 | for (idx = 0; idx < numErrors; idx++) { | ||
| 140 | errorLocation = | ||
| 141 | REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK; | ||
| 142 | |||
| 143 | /* Flip bit */ | ||
| 144 | nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation); | ||
| 145 | } | ||
| 146 | /* Errors corrected */ | ||
| 147 | return numErrors; | ||
| 148 | } | ||
| 149 | #endif | ||
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h deleted file mode 100644 index d90186684db8..000000000000 --- a/drivers/mtd/nand/nand_bcm_umi.h +++ /dev/null | |||
| @@ -1,336 +0,0 @@ | |||
| 1 | /***************************************************************************** | ||
| 2 | * Copyright 2003 - 2009 Broadcom Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Unless you and Broadcom execute a separate written software license | ||
| 5 | * agreement governing use of this software, this software is licensed to you | ||
| 6 | * under the terms of the GNU General Public License version 2, available at | ||
| 7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
| 8 | * | ||
| 9 | * Notwithstanding the above, under no circumstances may you combine this | ||
| 10 | * software in any way with any other Broadcom software provided under a | ||
| 11 | * license other than the GPL, without Broadcom's express prior written | ||
| 12 | * consent. | ||
| 13 | *****************************************************************************/ | ||
| 14 | #ifndef NAND_BCM_UMI_H | ||
| 15 | #define NAND_BCM_UMI_H | ||
| 16 | |||
| 17 | /* ---- Include Files ---------------------------------------------------- */ | ||
| 18 | #include <mach/reg_umi.h> | ||
| 19 | #include <mach/reg_nand.h> | ||
| 20 | #include <mach/cfg_global.h> | ||
| 21 | |||
| 22 | /* ---- Constants and Types ---------------------------------------------- */ | ||
| 23 | #if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING) | ||
| 24 | #define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0) | ||
| 25 | #else | ||
| 26 | #define NAND_ECC_BCH 0 | ||
| 27 | #endif | ||
| 28 | |||
| 29 | #define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13 | ||
| 30 | |||
| 31 | #if NAND_ECC_BCH | ||
| 32 | #ifdef BOOT0_BUILD | ||
| 33 | #define NAND_ECC_NUM_BYTES 13 | ||
| 34 | #else | ||
| 35 | #define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES | ||
| 36 | #endif | ||
| 37 | #else | ||
| 38 | #define NAND_ECC_NUM_BYTES 3 | ||
| 39 | #endif | ||
| 40 | |||
| 41 | #define NAND_DATA_ACCESS_SIZE 512 | ||
| 42 | |||
| 43 | /* ---- Variable Externs ------------------------------------------ */ | ||
| 44 | /* ---- Function Prototypes --------------------------------------- */ | ||
| 45 | int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData, | ||
| 46 | int numEccBytes); | ||
| 47 | |||
| 48 | /* Check in device is ready */ | ||
| 49 | static inline int nand_bcm_umi_dev_ready(void) | ||
| 50 | { | ||
| 51 | return readl(®_UMI_NAND_RCSR) & REG_UMI_NAND_RCSR_RDY; | ||
| 52 | } | ||
| 53 | |||
| 54 | /* Wait until device is ready */ | ||
| 55 | static inline void nand_bcm_umi_wait_till_ready(void) | ||
| 56 | { | ||
| 57 | while (nand_bcm_umi_dev_ready() == 0) | ||
| 58 | ; | ||
| 59 | } | ||
| 60 | |||
| 61 | /* Enable Hamming ECC */ | ||
| 62 | static inline void nand_bcm_umi_hamming_enable_hwecc(void) | ||
| 63 | { | ||
| 64 | /* disable and reset ECC, 512 byte page */ | ||
| 65 | writel(readl(®_UMI_NAND_ECC_CSR) & ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE | | ||
| 66 | REG_UMI_NAND_ECC_CSR_256BYTE), ®_UMI_NAND_ECC_CSR); | ||
| 67 | /* enable ECC */ | ||
| 68 | writel(readl(®_UMI_NAND_ECC_CSR) | REG_UMI_NAND_ECC_CSR_ECC_ENABLE, | ||
| 69 | ®_UMI_NAND_ECC_CSR); | ||
| 70 | } | ||
| 71 | |||
| 72 | #if NAND_ECC_BCH | ||
| 73 | /* BCH ECC specifics */ | ||
| 74 | #define ECC_BITS_PER_CORRECTABLE_BIT 13 | ||
| 75 | |||
| 76 | /* Enable BCH Read ECC */ | ||
| 77 | static inline void nand_bcm_umi_bch_enable_read_hwecc(void) | ||
| 78 | { | ||
| 79 | /* disable and reset ECC */ | ||
| 80 | writel(REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, ®_UMI_BCH_CTRL_STATUS); | ||
| 81 | /* Turn on ECC */ | ||
| 82 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, ®_UMI_BCH_CTRL_STATUS); | ||
| 83 | } | ||
| 84 | |||
| 85 | /* Enable BCH Write ECC */ | ||
| 86 | static inline void nand_bcm_umi_bch_enable_write_hwecc(void) | ||
| 87 | { | ||
| 88 | /* disable and reset ECC */ | ||
| 89 | writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID, ®_UMI_BCH_CTRL_STATUS); | ||
| 90 | /* Turn on ECC */ | ||
| 91 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN, ®_UMI_BCH_CTRL_STATUS); | ||
| 92 | } | ||
| 93 | |||
| 94 | /* Config number of BCH ECC bytes */ | ||
| 95 | static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes) | ||
| 96 | { | ||
| 97 | uint32_t nValue; | ||
| 98 | uint32_t tValue; | ||
| 99 | uint32_t kValue; | ||
| 100 | uint32_t numBits = numEccBytes * 8; | ||
| 101 | |||
| 102 | /* disable and reset ECC */ | ||
| 103 | writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID | | ||
| 104 | REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, | ||
| 105 | ®_UMI_BCH_CTRL_STATUS); | ||
| 106 | |||
| 107 | /* Every correctible bit requires 13 ECC bits */ | ||
| 108 | tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT); | ||
| 109 | |||
| 110 | /* Total data in number of bits for generating and computing BCH ECC */ | ||
| 111 | nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8; | ||
| 112 | |||
| 113 | /* K parameter is used internally. K = N - (T * 13) */ | ||
| 114 | kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT); | ||
| 115 | |||
| 116 | /* Write the settings */ | ||
| 117 | writel(nValue, ®_UMI_BCH_N); | ||
| 118 | writel(tValue, ®_UMI_BCH_T); | ||
| 119 | writel(kValue, ®_UMI_BCH_K); | ||
| 120 | } | ||
| 121 | |||
| 122 | /* Pause during ECC read calculation to skip bytes in OOB */ | ||
| 123 | static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void) | ||
| 124 | { | ||
| 125 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC, ®_UMI_BCH_CTRL_STATUS); | ||
| 126 | } | ||
| 127 | |||
| 128 | /* Resume during ECC read calculation after skipping bytes in OOB */ | ||
| 129 | static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void) | ||
| 130 | { | ||
| 131 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, ®_UMI_BCH_CTRL_STATUS); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* Poll read ECC calc to check when hardware completes */ | ||
| 135 | static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void) | ||
| 136 | { | ||
| 137 | uint32_t regVal; | ||
| 138 | |||
| 139 | do { | ||
| 140 | /* wait for ECC to be valid */ | ||
| 141 | regVal = readl(®_UMI_BCH_CTRL_STATUS); | ||
| 142 | } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0); | ||
| 143 | |||
| 144 | return regVal; | ||
| 145 | } | ||
| 146 | |||
| 147 | /* Poll write ECC calc to check when hardware completes */ | ||
| 148 | static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void) | ||
| 149 | { | ||
| 150 | /* wait for ECC to be valid */ | ||
| 151 | while ((readl(®_UMI_BCH_CTRL_STATUS) & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID) | ||
| 152 | == 0) | ||
| 153 | ; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Read the OOB and ECC, for kernel write OOB to a buffer */ | ||
| 157 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 158 | static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | ||
| 159 | uint8_t *eccCalc, int numEccBytes, uint8_t *oobp) | ||
| 160 | #else | ||
| 161 | static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | ||
| 162 | uint8_t *eccCalc, int numEccBytes) | ||
| 163 | #endif | ||
| 164 | { | ||
| 165 | int eccPos = 0; | ||
| 166 | int numToRead = 16; /* There are 16 bytes per sector in the OOB */ | ||
| 167 | |||
| 168 | /* ECC is already paused when this function is called */ | ||
| 169 | if (pageSize != NAND_DATA_ACCESS_SIZE) { | ||
| 170 | /* skip BI */ | ||
| 171 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 172 | *oobp++ = readb(®_NAND_DATA8); | ||
| 173 | #else | ||
| 174 | readb(®_NAND_DATA8); | ||
| 175 | #endif | ||
| 176 | numToRead--; | ||
| 177 | } | ||
| 178 | |||
| 179 | while (numToRead > numEccBytes) { | ||
| 180 | /* skip free oob region */ | ||
| 181 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 182 | *oobp++ = readb(®_NAND_DATA8); | ||
| 183 | #else | ||
| 184 | readb(®_NAND_DATA8); | ||
| 185 | #endif | ||
| 186 | numToRead--; | ||
| 187 | } | ||
| 188 | |||
| 189 | if (pageSize == NAND_DATA_ACCESS_SIZE) { | ||
| 190 | /* read ECC bytes before BI */ | ||
| 191 | nand_bcm_umi_bch_resume_read_ecc_calc(); | ||
| 192 | |||
| 193 | while (numToRead > 11) { | ||
| 194 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 195 | *oobp = readb(®_NAND_DATA8); | ||
| 196 | eccCalc[eccPos++] = *oobp; | ||
| 197 | oobp++; | ||
| 198 | #else | ||
| 199 | eccCalc[eccPos++] = readb(®_NAND_DATA8); | ||
| 200 | #endif | ||
| 201 | numToRead--; | ||
| 202 | } | ||
| 203 | |||
| 204 | nand_bcm_umi_bch_pause_read_ecc_calc(); | ||
| 205 | |||
| 206 | if (numToRead == 11) { | ||
| 207 | /* read BI */ | ||
| 208 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 209 | *oobp++ = readb(®_NAND_DATA8); | ||
| 210 | #else | ||
| 211 | readb(®_NAND_DATA8); | ||
| 212 | #endif | ||
| 213 | numToRead--; | ||
| 214 | } | ||
| 215 | |||
| 216 | } | ||
| 217 | /* read ECC bytes */ | ||
| 218 | nand_bcm_umi_bch_resume_read_ecc_calc(); | ||
| 219 | while (numToRead) { | ||
| 220 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
| 221 | *oobp = readb(®_NAND_DATA8); | ||
| 222 | eccCalc[eccPos++] = *oobp; | ||
| 223 | oobp++; | ||
| 224 | #else | ||
| 225 | eccCalc[eccPos++] = readb(®_NAND_DATA8); | ||
| 226 | #endif | ||
| 227 | numToRead--; | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 231 | /* Helper function to write ECC */ | ||
| 232 | static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos, | ||
| 233 | uint8_t *oobp, uint8_t eccVal) | ||
| 234 | { | ||
| 235 | if (eccBytePos <= numEccBytes) | ||
| 236 | *oobp = eccVal; | ||
| 237 | } | ||
| 238 | |||
| 239 | /* Write OOB with ECC */ | ||
| 240 | static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | ||
| 241 | uint8_t *oobp, int numEccBytes) | ||
| 242 | { | ||
| 243 | uint32_t eccVal = 0xffffffff; | ||
| 244 | |||
| 245 | /* wait for write ECC to be valid */ | ||
| 246 | nand_bcm_umi_bch_poll_write_ecc_calc(); | ||
| 247 | |||
| 248 | /* | ||
| 249 | ** Get the hardware ecc from the 32-bit result registers. | ||
| 250 | ** Read after 512 byte accesses. Format B3B2B1B0 | ||
| 251 | ** where B3 = ecc3, etc. | ||
| 252 | */ | ||
| 253 | |||
| 254 | if (pageSize == NAND_DATA_ACCESS_SIZE) { | ||
| 255 | /* Now fill in the ECC bytes */ | ||
| 256 | if (numEccBytes >= 13) | ||
| 257 | eccVal = readl(®_UMI_BCH_WR_ECC_3); | ||
| 258 | |||
| 259 | /* Usually we skip CM in oob[0,1] */ | ||
| 260 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0], | ||
| 261 | (eccVal >> 16) & 0xff); | ||
| 262 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1], | ||
| 263 | (eccVal >> 8) & 0xff); | ||
| 264 | |||
| 265 | /* Write ECC in oob[2,3,4] */ | ||
| 266 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2], | ||
| 267 | eccVal & 0xff); /* ECC 12 */ | ||
| 268 | |||
| 269 | if (numEccBytes >= 9) | ||
| 270 | eccVal = readl(®_UMI_BCH_WR_ECC_2); | ||
| 271 | |||
| 272 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3], | ||
| 273 | (eccVal >> 24) & 0xff); /* ECC11 */ | ||
| 274 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4], | ||
| 275 | (eccVal >> 16) & 0xff); /* ECC10 */ | ||
| 276 | |||
| 277 | /* Always Skip BI in oob[5] */ | ||
| 278 | } else { | ||
| 279 | /* Always Skip BI in oob[0] */ | ||
| 280 | |||
| 281 | /* Now fill in the ECC bytes */ | ||
| 282 | if (numEccBytes >= 13) | ||
| 283 | eccVal = readl(®_UMI_BCH_WR_ECC_3); | ||
| 284 | |||
| 285 | /* Usually skip CM in oob[1,2] */ | ||
| 286 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1], | ||
| 287 | (eccVal >> 16) & 0xff); | ||
| 288 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2], | ||
| 289 | (eccVal >> 8) & 0xff); | ||
| 290 | |||
| 291 | /* Write ECC in oob[3-15] */ | ||
| 292 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3], | ||
| 293 | eccVal & 0xff); /* ECC12 */ | ||
| 294 | |||
| 295 | if (numEccBytes >= 9) | ||
| 296 | eccVal = readl(®_UMI_BCH_WR_ECC_2); | ||
| 297 | |||
| 298 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4], | ||
| 299 | (eccVal >> 24) & 0xff); /* ECC11 */ | ||
| 300 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5], | ||
| 301 | (eccVal >> 16) & 0xff); /* ECC10 */ | ||
| 302 | } | ||
| 303 | |||
| 304 | /* Fill in the remainder of ECC locations */ | ||
| 305 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6], | ||
| 306 | (eccVal >> 8) & 0xff); /* ECC9 */ | ||
| 307 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7], | ||
| 308 | eccVal & 0xff); /* ECC8 */ | ||
| 309 | |||
| 310 | if (numEccBytes >= 5) | ||
| 311 | eccVal = readl(®_UMI_BCH_WR_ECC_1); | ||
| 312 | |||
| 313 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8], | ||
| 314 | (eccVal >> 24) & 0xff); /* ECC7 */ | ||
| 315 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9], | ||
| 316 | (eccVal >> 16) & 0xff); /* ECC6 */ | ||
| 317 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10], | ||
| 318 | (eccVal >> 8) & 0xff); /* ECC5 */ | ||
| 319 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11], | ||
| 320 | eccVal & 0xff); /* ECC4 */ | ||
| 321 | |||
| 322 | if (numEccBytes >= 1) | ||
| 323 | eccVal = readl(®_UMI_BCH_WR_ECC_0); | ||
| 324 | |||
| 325 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12], | ||
| 326 | (eccVal >> 24) & 0xff); /* ECC3 */ | ||
| 327 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13], | ||
| 328 | (eccVal >> 16) & 0xff); /* ECC2 */ | ||
| 329 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14], | ||
| 330 | (eccVal >> 8) & 0xff); /* ECC1 */ | ||
| 331 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15], | ||
| 332 | eccVal & 0xff); /* ECC0 */ | ||
| 333 | } | ||
| 334 | #endif | ||
| 335 | |||
| 336 | #endif /* NAND_BCM_UMI_H */ | ||
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 621b70b7a159..e3aa2748a6e7 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
| @@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = { | |||
| 70 | * These are the new chips with large page size. The pagesize and the | 70 | * These are the new chips with large page size. The pagesize and the |
| 71 | * erasesize is determined from the extended id bytes | 71 | * erasesize is determined from the extended id bytes |
| 72 | */ | 72 | */ |
| 73 | #define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY) | 73 | #define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS |
| 74 | #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) | 74 | #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) |
| 75 | 75 | ||
| 76 | /* 512 Megabit */ | 76 | /* 512 Megabit */ |
| @@ -157,7 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = { | |||
| 157 | * writes possible, but not implemented now | 157 | * writes possible, but not implemented now |
| 158 | */ | 158 | */ |
| 159 | {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, | 159 | {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, |
| 160 | NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, | 160 | NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, |
| 161 | 161 | ||
| 162 | {NULL,} | 162 | {NULL,} |
| 163 | }; | 163 | }; |
| @@ -174,8 +174,9 @@ struct nand_manufacturers nand_manuf_ids[] = { | |||
| 174 | {NAND_MFR_STMICRO, "ST Micro"}, | 174 | {NAND_MFR_STMICRO, "ST Micro"}, |
| 175 | {NAND_MFR_HYNIX, "Hynix"}, | 175 | {NAND_MFR_HYNIX, "Hynix"}, |
| 176 | {NAND_MFR_MICRON, "Micron"}, | 176 | {NAND_MFR_MICRON, "Micron"}, |
| 177 | {NAND_MFR_AMD, "AMD"}, | 177 | {NAND_MFR_AMD, "AMD/Spansion"}, |
| 178 | {NAND_MFR_MACRONIX, "Macronix"}, | 178 | {NAND_MFR_MACRONIX, "Macronix"}, |
| 179 | {NAND_MFR_EON, "Eon"}, | ||
| 179 | {0x0, "Unknown"} | 180 | {0x0, "Unknown"} |
| 180 | }; | 181 | }; |
| 181 | 182 | ||
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index cf0cd3146817..a932c485eb04 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
| @@ -447,8 +447,6 @@ static unsigned int rptwear_cnt = 0; | |||
| 447 | /* MTD structure for NAND controller */ | 447 | /* MTD structure for NAND controller */ |
| 448 | static struct mtd_info *nsmtd; | 448 | static struct mtd_info *nsmtd; |
| 449 | 449 | ||
| 450 | static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; | ||
| 451 | |||
| 452 | /* | 450 | /* |
| 453 | * Allocate array of page pointers, create slab allocation for an array | 451 | * Allocate array of page pointers, create slab allocation for an array |
| 454 | * and initialize the array by NULL pointers. | 452 | * and initialize the array by NULL pointers. |
| @@ -2189,19 +2187,6 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 2189 | return; | 2187 | return; |
| 2190 | } | 2188 | } |
| 2191 | 2189 | ||
| 2192 | static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 2193 | { | ||
| 2194 | ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len); | ||
| 2195 | |||
| 2196 | if (!memcmp(buf, &ns_verify_buf[0], len)) { | ||
| 2197 | NS_DBG("verify_buf: the buffer is OK\n"); | ||
| 2198 | return 0; | ||
| 2199 | } else { | ||
| 2200 | NS_DBG("verify_buf: the buffer is wrong\n"); | ||
| 2201 | return -EFAULT; | ||
| 2202 | } | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | /* | 2190 | /* |
| 2206 | * Module initialization function | 2191 | * Module initialization function |
| 2207 | */ | 2192 | */ |
| @@ -2236,7 +2221,6 @@ static int __init ns_init_module(void) | |||
| 2236 | chip->dev_ready = ns_device_ready; | 2221 | chip->dev_ready = ns_device_ready; |
| 2237 | chip->write_buf = ns_nand_write_buf; | 2222 | chip->write_buf = ns_nand_write_buf; |
| 2238 | chip->read_buf = ns_nand_read_buf; | 2223 | chip->read_buf = ns_nand_read_buf; |
| 2239 | chip->verify_buf = ns_nand_verify_buf; | ||
| 2240 | chip->read_word = ns_nand_read_word; | 2224 | chip->read_word = ns_nand_read_word; |
| 2241 | chip->ecc.mode = NAND_ECC_SOFT; | 2225 | chip->ecc.mode = NAND_ECC_SOFT; |
| 2242 | /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ | 2226 | /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ |
| @@ -2333,6 +2317,7 @@ static int __init ns_init_module(void) | |||
| 2333 | uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; | 2317 | uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; |
| 2334 | if (new_size >> overridesize != nsmtd->erasesize) { | 2318 | if (new_size >> overridesize != nsmtd->erasesize) { |
| 2335 | NS_ERR("overridesize is too big\n"); | 2319 | NS_ERR("overridesize is too big\n"); |
| 2320 | retval = -EINVAL; | ||
| 2336 | goto err_exit; | 2321 | goto err_exit; |
| 2337 | } | 2322 | } |
| 2338 | /* N.B. This relies on nand_scan not doing anything with the size before we change it */ | 2323 | /* N.B. This relies on nand_scan not doing anything with the size before we change it */ |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index 2b6f632cf274..5fd3f010e3ae 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
| @@ -140,18 +140,6 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
| 140 | out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); | 140 | out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
| 144 | { | ||
| 145 | struct nand_chip *chip = mtd->priv; | ||
| 146 | struct ndfc_controller *ndfc = chip->priv; | ||
| 147 | uint32_t *p = (uint32_t *) buf; | ||
| 148 | |||
| 149 | for(;len > 0; len -= 4) | ||
| 150 | if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA)) | ||
| 151 | return -EFAULT; | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | 143 | /* |
| 156 | * Initialize chip structure | 144 | * Initialize chip structure |
| 157 | */ | 145 | */ |
| @@ -172,7 +160,6 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, | |||
| 172 | chip->controller = &ndfc->ndfc_control; | 160 | chip->controller = &ndfc->ndfc_control; |
| 173 | chip->read_buf = ndfc_read_buf; | 161 | chip->read_buf = ndfc_read_buf; |
| 174 | chip->write_buf = ndfc_write_buf; | 162 | chip->write_buf = ndfc_write_buf; |
| 175 | chip->verify_buf = ndfc_verify_buf; | ||
| 176 | chip->ecc.correct = nand_correct_data; | 163 | chip->ecc.correct = nand_correct_data; |
| 177 | chip->ecc.hwctl = ndfc_enable_hwecc; | 164 | chip->ecc.hwctl = ndfc_enable_hwecc; |
| 178 | chip->ecc.calculate = ndfc_calculate_ecc; | 165 | chip->ecc.calculate = ndfc_calculate_ecc; |
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 8febe46e1105..94dc46bc118c 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c | |||
| @@ -112,22 +112,6 @@ static void nuc900_nand_write_buf(struct mtd_info *mtd, | |||
| 112 | write_data_reg(nand, buf[i]); | 112 | write_data_reg(nand, buf[i]); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static int nuc900_verify_buf(struct mtd_info *mtd, | ||
| 116 | const unsigned char *buf, int len) | ||
| 117 | { | ||
| 118 | int i; | ||
| 119 | struct nuc900_nand *nand; | ||
| 120 | |||
| 121 | nand = container_of(mtd, struct nuc900_nand, mtd); | ||
| 122 | |||
| 123 | for (i = 0; i < len; i++) { | ||
| 124 | if (buf[i] != (unsigned char)read_data_reg(nand)) | ||
| 125 | return -EFAULT; | ||
| 126 | } | ||
| 127 | |||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int nuc900_check_rb(struct nuc900_nand *nand) | 115 | static int nuc900_check_rb(struct nuc900_nand *nand) |
| 132 | { | 116 | { |
| 133 | unsigned int val; | 117 | unsigned int val; |
| @@ -292,7 +276,6 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev) | |||
| 292 | chip->read_byte = nuc900_nand_read_byte; | 276 | chip->read_byte = nuc900_nand_read_byte; |
| 293 | chip->write_buf = nuc900_nand_write_buf; | 277 | chip->write_buf = nuc900_nand_write_buf; |
| 294 | chip->read_buf = nuc900_nand_read_buf; | 278 | chip->read_buf = nuc900_nand_read_buf; |
| 295 | chip->verify_buf = nuc900_verify_buf; | ||
| 296 | chip->chip_delay = 50; | 279 | chip->chip_delay = 50; |
| 297 | chip->options = 0; | 280 | chip->options = 0; |
| 298 | chip->ecc.mode = NAND_ECC_SOFT; | 281 | chip->ecc.mode = NAND_ECC_SOFT; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index fc8111278d12..5b3138620646 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
| @@ -425,7 +425,7 @@ static void omap_nand_dma_callback(void *data) | |||
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | /* | 427 | /* |
| 428 | * omap_nand_dma_transfer: configer and start dma transfer | 428 | * omap_nand_dma_transfer: configure and start dma transfer |
| 429 | * @mtd: MTD device structure | 429 | * @mtd: MTD device structure |
| 430 | * @addr: virtual address in RAM of source/destination | 430 | * @addr: virtual address in RAM of source/destination |
| 431 | * @len: number of data bytes to be transferred | 431 | * @len: number of data bytes to be transferred |
| @@ -546,7 +546,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | /* | 548 | /* |
| 549 | * omap_nand_irq - GMPC irq handler | 549 | * omap_nand_irq - GPMC irq handler |
| 550 | * @this_irq: gpmc irq number | 550 | * @this_irq: gpmc irq number |
| 551 | * @dev: omap_nand_info structure pointer is passed here | 551 | * @dev: omap_nand_info structure pointer is passed here |
| 552 | */ | 552 | */ |
| @@ -698,27 +698,6 @@ out_copy: | |||
| 698 | } | 698 | } |
| 699 | 699 | ||
| 700 | /** | 700 | /** |
| 701 | * omap_verify_buf - Verify chip data against buffer | ||
| 702 | * @mtd: MTD device structure | ||
| 703 | * @buf: buffer containing the data to compare | ||
| 704 | * @len: number of bytes to compare | ||
| 705 | */ | ||
| 706 | static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) | ||
| 707 | { | ||
| 708 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | ||
| 709 | mtd); | ||
| 710 | u16 *p = (u16 *) buf; | ||
| 711 | |||
| 712 | len >>= 1; | ||
| 713 | while (len--) { | ||
| 714 | if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R))) | ||
| 715 | return -EFAULT; | ||
| 716 | } | ||
| 717 | |||
| 718 | return 0; | ||
| 719 | } | ||
| 720 | |||
| 721 | /** | ||
| 722 | * gen_true_ecc - This function will generate true ECC value | 701 | * gen_true_ecc - This function will generate true ECC value |
| 723 | * @ecc_buf: buffer to store ecc code | 702 | * @ecc_buf: buffer to store ecc code |
| 724 | * | 703 | * |
| @@ -1326,8 +1305,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
| 1326 | 1305 | ||
| 1327 | /* | 1306 | /* |
| 1328 | * If RDY/BSY line is connected to OMAP then use the omap ready | 1307 | * If RDY/BSY line is connected to OMAP then use the omap ready |
| 1329 | * funcrtion and the generic nand_wait function which reads the status | 1308 | * function and the generic nand_wait function which reads the status |
| 1330 | * register after monitoring the RDY/BSY line.Otherwise use a standard | 1309 | * register after monitoring the RDY/BSY line. Otherwise use a standard |
| 1331 | * chip delay which is slightly more than tR (AC Timing) of the NAND | 1310 | * chip delay which is slightly more than tR (AC Timing) of the NAND |
| 1332 | * device and read status register until you get a failure or success | 1311 | * device and read status register until you get a failure or success |
| 1333 | */ | 1312 | */ |
| @@ -1428,9 +1407,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
| 1428 | goto out_release_mem_region; | 1407 | goto out_release_mem_region; |
| 1429 | } | 1408 | } |
| 1430 | 1409 | ||
| 1431 | info->nand.verify_buf = omap_verify_buf; | 1410 | /* select the ecc type */ |
| 1432 | |||
| 1433 | /* selsect the ecc type */ | ||
| 1434 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) | 1411 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) |
| 1435 | info->nand.ecc.mode = NAND_ECC_SOFT; | 1412 | info->nand.ecc.mode = NAND_ECC_SOFT; |
| 1436 | else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || | 1413 | else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || |
| @@ -1536,7 +1513,8 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
| 1536 | /* Release NAND device, its internal structures and partitions */ | 1513 | /* Release NAND device, its internal structures and partitions */ |
| 1537 | nand_release(&info->mtd); | 1514 | nand_release(&info->mtd); |
| 1538 | iounmap(info->nand.IO_ADDR_R); | 1515 | iounmap(info->nand.IO_ADDR_R); |
| 1539 | kfree(&info->mtd); | 1516 | release_mem_region(info->phys_base, NAND_IO_SIZE); |
| 1517 | kfree(info); | ||
| 1540 | return 0; | 1518 | return 0; |
| 1541 | } | 1519 | } |
| 1542 | 1520 | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 131b58a133f1..aefaf8cd31ef 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
| @@ -21,7 +21,6 @@ | |||
| 21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
| 22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
| 23 | #include <asm/sizes.h> | 23 | #include <asm/sizes.h> |
| 24 | #include <mach/hardware.h> | ||
| 25 | #include <linux/platform_data/mtd-orion_nand.h> | 24 | #include <linux/platform_data/mtd-orion_nand.h> |
| 26 | 25 | ||
| 27 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 26 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 1bcb52040422..a47ee68a0cfa 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c | |||
| @@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) | |||
| 37 | const char **part_types; | 37 | const char **part_types; |
| 38 | int err = 0; | 38 | int err = 0; |
| 39 | 39 | ||
| 40 | if (!pdata) { | ||
| 41 | dev_err(&pdev->dev, "platform_nand_data is missing\n"); | ||
| 42 | return -EINVAL; | ||
| 43 | } | ||
| 44 | |||
| 40 | if (pdata->chip.nr_chips < 1) { | 45 | if (pdata->chip.nr_chips < 1) { |
| 41 | dev_err(&pdev->dev, "invalid number of chips specified\n"); | 46 | dev_err(&pdev->dev, "invalid number of chips specified\n"); |
| 42 | return -EINVAL; | 47 | return -EINVAL; |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index c45227173efd..37ee75c7bacb 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
| @@ -683,11 +683,13 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | |||
| 683 | info->state = STATE_IDLE; | 683 | info->state = STATE_IDLE; |
| 684 | } | 684 | } |
| 685 | 685 | ||
| 686 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | 686 | static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, |
| 687 | struct nand_chip *chip, const uint8_t *buf, int oob_required) | 687 | struct nand_chip *chip, const uint8_t *buf, int oob_required) |
| 688 | { | 688 | { |
| 689 | chip->write_buf(mtd, buf, mtd->writesize); | 689 | chip->write_buf(mtd, buf, mtd->writesize); |
| 690 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | 690 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 691 | |||
| 692 | return 0; | ||
| 691 | } | 693 | } |
| 692 | 694 | ||
| 693 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, | 695 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, |
| @@ -771,12 +773,6 @@ static void pxa3xx_nand_write_buf(struct mtd_info *mtd, | |||
| 771 | info->buf_start += real_len; | 773 | info->buf_start += real_len; |
| 772 | } | 774 | } |
| 773 | 775 | ||
| 774 | static int pxa3xx_nand_verify_buf(struct mtd_info *mtd, | ||
| 775 | const uint8_t *buf, int len) | ||
| 776 | { | ||
| 777 | return 0; | ||
| 778 | } | ||
| 779 | |||
| 780 | static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) | 776 | static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) |
| 781 | { | 777 | { |
| 782 | return; | 778 | return; |
| @@ -1007,7 +1003,6 @@ KEEP_CONFIG: | |||
| 1007 | chip->ecc.size = host->page_size; | 1003 | chip->ecc.size = host->page_size; |
| 1008 | chip->ecc.strength = 1; | 1004 | chip->ecc.strength = 1; |
| 1009 | 1005 | ||
| 1010 | chip->options |= NAND_NO_READRDY; | ||
| 1011 | if (host->reg_ndcr & NDCR_DWIDTH_M) | 1006 | if (host->reg_ndcr & NDCR_DWIDTH_M) |
| 1012 | chip->options |= NAND_BUSWIDTH_16; | 1007 | chip->options |= NAND_BUSWIDTH_16; |
| 1013 | 1008 | ||
| @@ -1070,7 +1065,6 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
| 1070 | chip->read_byte = pxa3xx_nand_read_byte; | 1065 | chip->read_byte = pxa3xx_nand_read_byte; |
| 1071 | chip->read_buf = pxa3xx_nand_read_buf; | 1066 | chip->read_buf = pxa3xx_nand_read_buf; |
| 1072 | chip->write_buf = pxa3xx_nand_write_buf; | 1067 | chip->write_buf = pxa3xx_nand_write_buf; |
| 1073 | chip->verify_buf = pxa3xx_nand_verify_buf; | ||
| 1074 | } | 1068 | } |
| 1075 | 1069 | ||
| 1076 | spin_lock_init(&chip->controller->lock); | 1070 | spin_lock_init(&chip->controller->lock); |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index 8cb627751c9c..4495f8551fa0 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
| @@ -309,27 +309,6 @@ static uint8_t r852_read_byte(struct mtd_info *mtd) | |||
| 309 | return r852_read_reg(dev, R852_DATALINE); | 309 | return r852_read_reg(dev, R852_DATALINE); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | |||
| 313 | /* | ||
| 314 | * Readback the buffer to verify it | ||
| 315 | */ | ||
| 316 | int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
| 317 | { | ||
| 318 | struct r852_device *dev = r852_get_dev(mtd); | ||
| 319 | |||
| 320 | /* We can't be sure about anything here... */ | ||
| 321 | if (dev->card_unstable) | ||
| 322 | return -1; | ||
| 323 | |||
| 324 | /* This will never happen, unless you wired up a nand chip | ||
| 325 | with > 512 bytes page size to the reader */ | ||
| 326 | if (len > SM_SECTOR_SIZE) | ||
| 327 | return 0; | ||
| 328 | |||
| 329 | r852_read_buf(mtd, dev->tmp_buffer, len); | ||
| 330 | return memcmp(buf, dev->tmp_buffer, len); | ||
| 331 | } | ||
| 332 | |||
| 333 | /* | 312 | /* |
| 334 | * Control several chip lines & send commands | 313 | * Control several chip lines & send commands |
| 335 | */ | 314 | */ |
| @@ -882,7 +861,6 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
| 882 | chip->read_byte = r852_read_byte; | 861 | chip->read_byte = r852_read_byte; |
| 883 | chip->read_buf = r852_read_buf; | 862 | chip->read_buf = r852_read_buf; |
| 884 | chip->write_buf = r852_write_buf; | 863 | chip->write_buf = r852_write_buf; |
| 885 | chip->verify_buf = r852_verify_buf; | ||
| 886 | 864 | ||
| 887 | /* ecc */ | 865 | /* ecc */ |
| 888 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; | 866 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; |
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index d8040619ad8d..295e4bedad96 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #define pr_fmt(fmt) "nand-s3c2410: " fmt | ||
| 25 | |||
| 24 | #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG | 26 | #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG |
| 25 | #define DEBUG | 27 | #define DEBUG |
| 26 | #endif | 28 | #endif |
| @@ -30,6 +32,7 @@ | |||
| 30 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 31 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
| 32 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| 35 | #include <linux/io.h> | ||
| 33 | #include <linux/ioport.h> | 36 | #include <linux/ioport.h> |
| 34 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
| 35 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
| @@ -43,24 +46,9 @@ | |||
| 43 | #include <linux/mtd/nand_ecc.h> | 46 | #include <linux/mtd/nand_ecc.h> |
| 44 | #include <linux/mtd/partitions.h> | 47 | #include <linux/mtd/partitions.h> |
| 45 | 48 | ||
| 46 | #include <asm/io.h> | ||
| 47 | |||
| 48 | #include <plat/regs-nand.h> | 49 | #include <plat/regs-nand.h> |
| 49 | #include <linux/platform_data/mtd-nand-s3c2410.h> | 50 | #include <linux/platform_data/mtd-nand-s3c2410.h> |
| 50 | 51 | ||
| 51 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC | ||
| 52 | static int hardware_ecc = 1; | ||
| 53 | #else | ||
| 54 | static int hardware_ecc = 0; | ||
| 55 | #endif | ||
| 56 | |||
| 57 | #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP | ||
| 58 | static const int clock_stop = 1; | ||
| 59 | #else | ||
| 60 | static const int clock_stop = 0; | ||
| 61 | #endif | ||
| 62 | |||
| 63 | |||
| 64 | /* new oob placement block for use with hardware ecc generation | 52 | /* new oob placement block for use with hardware ecc generation |
| 65 | */ | 53 | */ |
| 66 | 54 | ||
| @@ -109,9 +97,8 @@ enum s3c_nand_clk_state { | |||
| 109 | * @mtds: An array of MTD instances on this controoler. | 97 | * @mtds: An array of MTD instances on this controoler. |
| 110 | * @platform: The platform data for this board. | 98 | * @platform: The platform data for this board. |
| 111 | * @device: The platform device we bound to. | 99 | * @device: The platform device we bound to. |
| 112 | * @area: The IO area resource that came from request_mem_region(). | ||
| 113 | * @clk: The clock resource for this controller. | 100 | * @clk: The clock resource for this controller. |
| 114 | * @regs: The area mapped for the hardware registers described by @area. | 101 | * @regs: The area mapped for the hardware registers. |
| 115 | * @sel_reg: Pointer to the register controlling the NAND selection. | 102 | * @sel_reg: Pointer to the register controlling the NAND selection. |
| 116 | * @sel_bit: The bit in @sel_reg to select the NAND chip. | 103 | * @sel_bit: The bit in @sel_reg to select the NAND chip. |
| 117 | * @mtd_count: The number of MTDs created from this controller. | 104 | * @mtd_count: The number of MTDs created from this controller. |
| @@ -128,7 +115,6 @@ struct s3c2410_nand_info { | |||
| 128 | 115 | ||
| 129 | /* device info */ | 116 | /* device info */ |
| 130 | struct device *device; | 117 | struct device *device; |
| 131 | struct resource *area; | ||
| 132 | struct clk *clk; | 118 | struct clk *clk; |
| 133 | void __iomem *regs; | 119 | void __iomem *regs; |
| 134 | void __iomem *sel_reg; | 120 | void __iomem *sel_reg; |
| @@ -169,7 +155,11 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) | |||
| 169 | 155 | ||
| 170 | static inline int allow_clk_suspend(struct s3c2410_nand_info *info) | 156 | static inline int allow_clk_suspend(struct s3c2410_nand_info *info) |
| 171 | { | 157 | { |
| 172 | return clock_stop; | 158 | #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP |
| 159 | return 1; | ||
| 160 | #else | ||
| 161 | return 0; | ||
| 162 | #endif | ||
| 173 | } | 163 | } |
| 174 | 164 | ||
| 175 | /** | 165 | /** |
| @@ -215,7 +205,8 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) | |||
| 215 | pr_debug("result %d from %ld, %d\n", result, clk, wanted); | 205 | pr_debug("result %d from %ld, %d\n", result, clk, wanted); |
| 216 | 206 | ||
| 217 | if (result > max) { | 207 | if (result > max) { |
| 218 | printk("%d ns is too big for current clock rate %ld\n", wanted, clk); | 208 | pr_err("%d ns is too big for current clock rate %ld\n", |
| 209 | wanted, clk); | ||
| 219 | return -1; | 210 | return -1; |
| 220 | } | 211 | } |
| 221 | 212 | ||
| @@ -225,7 +216,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) | |||
| 225 | return result; | 216 | return result; |
| 226 | } | 217 | } |
| 227 | 218 | ||
| 228 | #define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) | 219 | #define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) |
| 229 | 220 | ||
| 230 | /* controller setup */ | 221 | /* controller setup */ |
| 231 | 222 | ||
| @@ -268,7 +259,8 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) | |||
| 268 | } | 259 | } |
| 269 | 260 | ||
| 270 | dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", | 261 | dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", |
| 271 | tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); | 262 | tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), |
| 263 | twrph1, to_ns(twrph1, clkrate)); | ||
| 272 | 264 | ||
| 273 | switch (info->cpu_type) { | 265 | switch (info->cpu_type) { |
| 274 | case TYPE_S3C2410: | 266 | case TYPE_S3C2410: |
| @@ -325,13 +317,13 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) | |||
| 325 | if (ret < 0) | 317 | if (ret < 0) |
| 326 | return ret; | 318 | return ret; |
| 327 | 319 | ||
| 328 | switch (info->cpu_type) { | 320 | switch (info->cpu_type) { |
| 329 | case TYPE_S3C2410: | 321 | case TYPE_S3C2410: |
| 330 | default: | 322 | default: |
| 331 | break; | 323 | break; |
| 332 | 324 | ||
| 333 | case TYPE_S3C2440: | 325 | case TYPE_S3C2440: |
| 334 | case TYPE_S3C2412: | 326 | case TYPE_S3C2412: |
| 335 | /* enable the controller and de-assert nFCE */ | 327 | /* enable the controller and de-assert nFCE */ |
| 336 | 328 | ||
| 337 | writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); | 329 | writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); |
| @@ -450,6 +442,7 @@ static int s3c2412_nand_devready(struct mtd_info *mtd) | |||
| 450 | 442 | ||
| 451 | /* ECC handling functions */ | 443 | /* ECC handling functions */ |
| 452 | 444 | ||
| 445 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC | ||
| 453 | static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, | 446 | static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, |
| 454 | u_char *read_ecc, u_char *calc_ecc) | 447 | u_char *read_ecc, u_char *calc_ecc) |
| 455 | { | 448 | { |
| @@ -463,10 +456,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, | |||
| 463 | diff1 = read_ecc[1] ^ calc_ecc[1]; | 456 | diff1 = read_ecc[1] ^ calc_ecc[1]; |
| 464 | diff2 = read_ecc[2] ^ calc_ecc[2]; | 457 | diff2 = read_ecc[2] ^ calc_ecc[2]; |
| 465 | 458 | ||
| 466 | pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n", | 459 | pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n", |
| 467 | __func__, | 460 | __func__, 3, read_ecc, 3, calc_ecc, |
| 468 | read_ecc[0], read_ecc[1], read_ecc[2], | ||
| 469 | calc_ecc[0], calc_ecc[1], calc_ecc[2], | ||
| 470 | diff0, diff1, diff2); | 461 | diff0, diff1, diff2); |
| 471 | 462 | ||
| 472 | if (diff0 == 0 && diff1 == 0 && diff2 == 0) | 463 | if (diff0 == 0 && diff1 == 0 && diff2 == 0) |
| @@ -546,7 +537,8 @@ static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode) | |||
| 546 | unsigned long ctrl; | 537 | unsigned long ctrl; |
| 547 | 538 | ||
| 548 | ctrl = readl(info->regs + S3C2440_NFCONT); | 539 | ctrl = readl(info->regs + S3C2440_NFCONT); |
| 549 | writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT); | 540 | writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, |
| 541 | info->regs + S3C2440_NFCONT); | ||
| 550 | } | 542 | } |
| 551 | 543 | ||
| 552 | static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) | 544 | static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) |
| @@ -558,7 +550,8 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) | |||
| 558 | writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); | 550 | writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); |
| 559 | } | 551 | } |
| 560 | 552 | ||
| 561 | static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) | 553 | static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, |
| 554 | u_char *ecc_code) | ||
| 562 | { | 555 | { |
| 563 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | 556 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); |
| 564 | 557 | ||
| @@ -566,13 +559,13 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u | |||
| 566 | ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); | 559 | ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); |
| 567 | ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); | 560 | ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); |
| 568 | 561 | ||
| 569 | pr_debug("%s: returning ecc %02x%02x%02x\n", __func__, | 562 | pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); |
| 570 | ecc_code[0], ecc_code[1], ecc_code[2]); | ||
| 571 | 563 | ||
| 572 | return 0; | 564 | return 0; |
| 573 | } | 565 | } |
| 574 | 566 | ||
| 575 | static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) | 567 | static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, |
| 568 | u_char *ecc_code) | ||
| 576 | { | 569 | { |
| 577 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | 570 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); |
| 578 | unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); | 571 | unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); |
| @@ -581,12 +574,13 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u | |||
| 581 | ecc_code[1] = ecc >> 8; | 574 | ecc_code[1] = ecc >> 8; |
| 582 | ecc_code[2] = ecc >> 16; | 575 | ecc_code[2] = ecc >> 16; |
| 583 | 576 | ||
| 584 | pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]); | 577 | pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); |
| 585 | 578 | ||
| 586 | return 0; | 579 | return 0; |
| 587 | } | 580 | } |
| 588 | 581 | ||
| 589 | static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) | 582 | static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, |
| 583 | u_char *ecc_code) | ||
| 590 | { | 584 | { |
| 591 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | 585 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); |
| 592 | unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); | 586 | unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); |
| @@ -599,6 +593,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u | |||
| 599 | 593 | ||
| 600 | return 0; | 594 | return 0; |
| 601 | } | 595 | } |
| 596 | #endif | ||
| 602 | 597 | ||
| 603 | /* over-ride the standard functions for a little more speed. We can | 598 | /* over-ride the standard functions for a little more speed. We can |
| 604 | * use read/write block to move the data buffers to/from the controller | 599 | * use read/write block to move the data buffers to/from the controller |
| @@ -625,13 +620,15 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 625 | } | 620 | } |
| 626 | } | 621 | } |
| 627 | 622 | ||
| 628 | static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) | 623 | static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, |
| 624 | int len) | ||
| 629 | { | 625 | { |
| 630 | struct nand_chip *this = mtd->priv; | 626 | struct nand_chip *this = mtd->priv; |
| 631 | writesb(this->IO_ADDR_W, buf, len); | 627 | writesb(this->IO_ADDR_W, buf, len); |
| 632 | } | 628 | } |
| 633 | 629 | ||
| 634 | static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) | 630 | static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, |
| 631 | int len) | ||
| 635 | { | 632 | { |
| 636 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | 633 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); |
| 637 | 634 | ||
| @@ -675,7 +672,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) | |||
| 675 | CPUFREQ_TRANSITION_NOTIFIER); | 672 | CPUFREQ_TRANSITION_NOTIFIER); |
| 676 | } | 673 | } |
| 677 | 674 | ||
| 678 | static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) | 675 | static inline void |
| 676 | s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) | ||
| 679 | { | 677 | { |
| 680 | cpufreq_unregister_notifier(&info->freq_transition, | 678 | cpufreq_unregister_notifier(&info->freq_transition, |
| 681 | CPUFREQ_TRANSITION_NOTIFIER); | 679 | CPUFREQ_TRANSITION_NOTIFIER); |
| @@ -687,7 +685,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) | |||
| 687 | return 0; | 685 | return 0; |
| 688 | } | 686 | } |
| 689 | 687 | ||
| 690 | static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) | 688 | static inline void |
| 689 | s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) | ||
| 691 | { | 690 | { |
| 692 | } | 691 | } |
| 693 | #endif | 692 | #endif |
| @@ -717,29 +716,12 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) | |||
| 717 | pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); | 716 | pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); |
| 718 | nand_release(&ptr->mtd); | 717 | nand_release(&ptr->mtd); |
| 719 | } | 718 | } |
| 720 | |||
| 721 | kfree(info->mtds); | ||
| 722 | } | 719 | } |
| 723 | 720 | ||
| 724 | /* free the common resources */ | 721 | /* free the common resources */ |
| 725 | 722 | ||
| 726 | if (!IS_ERR(info->clk)) { | 723 | if (!IS_ERR(info->clk)) |
| 727 | s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); | 724 | s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); |
| 728 | clk_put(info->clk); | ||
| 729 | } | ||
| 730 | |||
| 731 | if (info->regs != NULL) { | ||
| 732 | iounmap(info->regs); | ||
| 733 | info->regs = NULL; | ||
| 734 | } | ||
| 735 | |||
| 736 | if (info->area != NULL) { | ||
| 737 | release_resource(info->area); | ||
| 738 | kfree(info->area); | ||
| 739 | info->area = NULL; | ||
| 740 | } | ||
| 741 | |||
| 742 | kfree(info); | ||
| 743 | 725 | ||
| 744 | return 0; | 726 | return 0; |
| 745 | } | 727 | } |
| @@ -810,7 +792,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
| 810 | dev_info(info->device, "System booted from NAND\n"); | 792 | dev_info(info->device, "System booted from NAND\n"); |
| 811 | 793 | ||
| 812 | break; | 794 | break; |
| 813 | } | 795 | } |
| 814 | 796 | ||
| 815 | chip->IO_ADDR_R = chip->IO_ADDR_W; | 797 | chip->IO_ADDR_R = chip->IO_ADDR_W; |
| 816 | 798 | ||
| @@ -819,32 +801,31 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
| 819 | nmtd->mtd.owner = THIS_MODULE; | 801 | nmtd->mtd.owner = THIS_MODULE; |
| 820 | nmtd->set = set; | 802 | nmtd->set = set; |
| 821 | 803 | ||
| 822 | if (hardware_ecc) { | 804 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC |
| 805 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | ||
| 806 | chip->ecc.correct = s3c2410_nand_correct_data; | ||
| 807 | chip->ecc.mode = NAND_ECC_HW; | ||
| 808 | chip->ecc.strength = 1; | ||
| 809 | |||
| 810 | switch (info->cpu_type) { | ||
| 811 | case TYPE_S3C2410: | ||
| 812 | chip->ecc.hwctl = s3c2410_nand_enable_hwecc; | ||
| 823 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | 813 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; |
| 824 | chip->ecc.correct = s3c2410_nand_correct_data; | 814 | break; |
| 825 | chip->ecc.mode = NAND_ECC_HW; | ||
| 826 | chip->ecc.strength = 1; | ||
| 827 | |||
| 828 | switch (info->cpu_type) { | ||
| 829 | case TYPE_S3C2410: | ||
| 830 | chip->ecc.hwctl = s3c2410_nand_enable_hwecc; | ||
| 831 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | ||
| 832 | break; | ||
| 833 | |||
| 834 | case TYPE_S3C2412: | ||
| 835 | chip->ecc.hwctl = s3c2412_nand_enable_hwecc; | ||
| 836 | chip->ecc.calculate = s3c2412_nand_calculate_ecc; | ||
| 837 | break; | ||
| 838 | |||
| 839 | case TYPE_S3C2440: | ||
| 840 | chip->ecc.hwctl = s3c2440_nand_enable_hwecc; | ||
| 841 | chip->ecc.calculate = s3c2440_nand_calculate_ecc; | ||
| 842 | break; | ||
| 843 | 815 | ||
| 844 | } | 816 | case TYPE_S3C2412: |
| 845 | } else { | 817 | chip->ecc.hwctl = s3c2412_nand_enable_hwecc; |
| 846 | chip->ecc.mode = NAND_ECC_SOFT; | 818 | chip->ecc.calculate = s3c2412_nand_calculate_ecc; |
| 819 | break; | ||
| 820 | |||
| 821 | case TYPE_S3C2440: | ||
| 822 | chip->ecc.hwctl = s3c2440_nand_enable_hwecc; | ||
| 823 | chip->ecc.calculate = s3c2440_nand_calculate_ecc; | ||
| 824 | break; | ||
| 847 | } | 825 | } |
| 826 | #else | ||
| 827 | chip->ecc.mode = NAND_ECC_SOFT; | ||
| 828 | #endif | ||
| 848 | 829 | ||
| 849 | if (set->ecc_layout != NULL) | 830 | if (set->ecc_layout != NULL) |
| 850 | chip->ecc.layout = set->ecc_layout; | 831 | chip->ecc.layout = set->ecc_layout; |
| @@ -921,7 +902,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, | |||
| 921 | static int s3c24xx_nand_probe(struct platform_device *pdev) | 902 | static int s3c24xx_nand_probe(struct platform_device *pdev) |
| 922 | { | 903 | { |
| 923 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); | 904 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); |
| 924 | enum s3c_cpu_type cpu_type; | 905 | enum s3c_cpu_type cpu_type; |
| 925 | struct s3c2410_nand_info *info; | 906 | struct s3c2410_nand_info *info; |
| 926 | struct s3c2410_nand_mtd *nmtd; | 907 | struct s3c2410_nand_mtd *nmtd; |
| 927 | struct s3c2410_nand_set *sets; | 908 | struct s3c2410_nand_set *sets; |
| @@ -935,7 +916,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
| 935 | 916 | ||
| 936 | pr_debug("s3c2410_nand_probe(%p)\n", pdev); | 917 | pr_debug("s3c2410_nand_probe(%p)\n", pdev); |
| 937 | 918 | ||
| 938 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 919 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
| 939 | if (info == NULL) { | 920 | if (info == NULL) { |
| 940 | dev_err(&pdev->dev, "no memory for flash info\n"); | 921 | dev_err(&pdev->dev, "no memory for flash info\n"); |
| 941 | err = -ENOMEM; | 922 | err = -ENOMEM; |
| @@ -949,7 +930,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
| 949 | 930 | ||
| 950 | /* get the clock source and enable it */ | 931 | /* get the clock source and enable it */ |
| 951 | 932 | ||
| 952 | info->clk = clk_get(&pdev->dev, "nand"); | 933 | info->clk = devm_clk_get(&pdev->dev, "nand"); |
| 953 | if (IS_ERR(info->clk)) { | 934 | if (IS_ERR(info->clk)) { |
| 954 | dev_err(&pdev->dev, "failed to get clock\n"); | 935 | dev_err(&pdev->dev, "failed to get clock\n"); |
| 955 | err = -ENOENT; | 936 | err = -ENOENT; |
| @@ -961,22 +942,14 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
| 961 | /* allocate and map the resource */ | 942 | /* allocate and map the resource */ |
| 962 | 943 | ||
| 963 | /* currently we assume we have the one resource */ | 944 | /* currently we assume we have the one resource */ |
| 964 | res = pdev->resource; | 945 | res = pdev->resource; |
| 965 | size = resource_size(res); | 946 | size = resource_size(res); |
| 966 | 947 | ||
| 967 | info->area = request_mem_region(res->start, size, pdev->name); | 948 | info->device = &pdev->dev; |
| 968 | 949 | info->platform = plat; | |
| 969 | if (info->area == NULL) { | 950 | info->cpu_type = cpu_type; |
| 970 | dev_err(&pdev->dev, "cannot reserve register region\n"); | ||
| 971 | err = -ENOENT; | ||
| 972 | goto exit_error; | ||
| 973 | } | ||
| 974 | |||
| 975 | info->device = &pdev->dev; | ||
| 976 | info->platform = plat; | ||
| 977 | info->regs = ioremap(res->start, size); | ||
| 978 | info->cpu_type = cpu_type; | ||
| 979 | 951 | ||
| 952 | info->regs = devm_request_and_ioremap(&pdev->dev, res); | ||
| 980 | if (info->regs == NULL) { | 953 | if (info->regs == NULL) { |
| 981 | dev_err(&pdev->dev, "cannot reserve register region\n"); | 954 | dev_err(&pdev->dev, "cannot reserve register region\n"); |
| 982 | err = -EIO; | 955 | err = -EIO; |
| @@ -999,7 +972,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
| 999 | /* allocate our information */ | 972 | /* allocate our information */ |
| 1000 | 973 | ||
| 1001 | size = nr_sets * sizeof(*info->mtds); | 974 | size = nr_sets * sizeof(*info->mtds); |
| 1002 | info->mtds = kzalloc(size, GFP_KERNEL); | 975 | info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); |
| 1003 | if (info->mtds == NULL) { | 976 | if (info->mtds == NULL) { |
| 1004 | dev_err(&pdev->dev, "failed to allocate mtd storage\n"); | 977 | dev_err(&pdev->dev, "failed to allocate mtd storage\n"); |
| 1005 | err = -ENOMEM; | 978 | err = -ENOMEM; |
| @@ -1011,7 +984,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
| 1011 | nmtd = info->mtds; | 984 | nmtd = info->mtds; |
| 1012 | 985 | ||
| 1013 | for (setno = 0; setno < nr_sets; setno++, nmtd++) { | 986 | for (setno = 0; setno < nr_sets; setno++, nmtd++) { |
| 1014 | pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info); | 987 | pr_debug("initialising set %d (%p, info %p)\n", |
| 988 | setno, nmtd, info); | ||
| 1015 | 989 | ||
| 1016 | s3c2410_nand_init_chip(info, nmtd, sets); | 990 | s3c2410_nand_init_chip(info, nmtd, sets); |
| 1017 | 991 | ||
| @@ -1134,20 +1108,7 @@ static struct platform_driver s3c24xx_nand_driver = { | |||
| 1134 | }, | 1108 | }, |
| 1135 | }; | 1109 | }; |
| 1136 | 1110 | ||
| 1137 | static int __init s3c2410_nand_init(void) | 1111 | module_platform_driver(s3c24xx_nand_driver); |
| 1138 | { | ||
| 1139 | printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); | ||
| 1140 | |||
| 1141 | return platform_driver_register(&s3c24xx_nand_driver); | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | static void __exit s3c2410_nand_exit(void) | ||
| 1145 | { | ||
| 1146 | platform_driver_unregister(&s3c24xx_nand_driver); | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | module_init(s3c2410_nand_init); | ||
| 1150 | module_exit(s3c2410_nand_exit); | ||
| 1151 | 1112 | ||
| 1152 | MODULE_LICENSE("GPL"); | 1113 | MODULE_LICENSE("GPL"); |
| 1153 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); | 1114 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index aa9b8a5e0b8f..4fbfe96e37a1 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
| @@ -24,10 +24,12 @@ | |||
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
| 27 | #include <linux/interrupt.h> | ||
| 27 | #include <linux/io.h> | 28 | #include <linux/io.h> |
| 28 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
| 29 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
| 30 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 32 | #include <linux/string.h> | ||
| 31 | 33 | ||
| 32 | #include <linux/mtd/mtd.h> | 34 | #include <linux/mtd/mtd.h> |
| 33 | #include <linux/mtd/nand.h> | 35 | #include <linux/mtd/nand.h> |
| @@ -43,11 +45,17 @@ static struct nand_ecclayout flctl_4secc_oob_16 = { | |||
| 43 | }; | 45 | }; |
| 44 | 46 | ||
| 45 | static struct nand_ecclayout flctl_4secc_oob_64 = { | 47 | static struct nand_ecclayout flctl_4secc_oob_64 = { |
| 46 | .eccbytes = 10, | 48 | .eccbytes = 4 * 10, |
| 47 | .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57}, | 49 | .eccpos = { |
| 50 | 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, | ||
| 51 | 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, | ||
| 52 | 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, | ||
| 53 | 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, | ||
| 48 | .oobfree = { | 54 | .oobfree = { |
| 49 | {.offset = 60, | 55 | {.offset = 2, .length = 4}, |
| 50 | . length = 4} }, | 56 | {.offset = 16, .length = 6}, |
| 57 | {.offset = 32, .length = 6}, | ||
| 58 | {.offset = 48, .length = 6} }, | ||
| 51 | }; | 59 | }; |
| 52 | 60 | ||
| 53 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | 61 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; |
| @@ -61,15 +69,15 @@ static struct nand_bbt_descr flctl_4secc_smallpage = { | |||
| 61 | 69 | ||
| 62 | static struct nand_bbt_descr flctl_4secc_largepage = { | 70 | static struct nand_bbt_descr flctl_4secc_largepage = { |
| 63 | .options = NAND_BBT_SCAN2NDPAGE, | 71 | .options = NAND_BBT_SCAN2NDPAGE, |
| 64 | .offs = 58, | 72 | .offs = 0, |
| 65 | .len = 2, | 73 | .len = 2, |
| 66 | .pattern = scan_ff_pattern, | 74 | .pattern = scan_ff_pattern, |
| 67 | }; | 75 | }; |
| 68 | 76 | ||
| 69 | static void empty_fifo(struct sh_flctl *flctl) | 77 | static void empty_fifo(struct sh_flctl *flctl) |
| 70 | { | 78 | { |
| 71 | writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */ | 79 | writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); |
| 72 | writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */ | 80 | writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); |
| 73 | } | 81 | } |
| 74 | 82 | ||
| 75 | static void start_translation(struct sh_flctl *flctl) | 83 | static void start_translation(struct sh_flctl *flctl) |
| @@ -158,27 +166,56 @@ static void wait_wfifo_ready(struct sh_flctl *flctl) | |||
| 158 | timeout_error(flctl, __func__); | 166 | timeout_error(flctl, __func__); |
| 159 | } | 167 | } |
| 160 | 168 | ||
| 161 | static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) | 169 | static enum flctl_ecc_res_t wait_recfifo_ready |
| 170 | (struct sh_flctl *flctl, int sector_number) | ||
| 162 | { | 171 | { |
| 163 | uint32_t timeout = LOOP_TIMEOUT_MAX; | 172 | uint32_t timeout = LOOP_TIMEOUT_MAX; |
| 164 | int checked[4]; | ||
| 165 | void __iomem *ecc_reg[4]; | 173 | void __iomem *ecc_reg[4]; |
| 166 | int i; | 174 | int i; |
| 175 | int state = FL_SUCCESS; | ||
| 167 | uint32_t data, size; | 176 | uint32_t data, size; |
| 168 | 177 | ||
| 169 | memset(checked, 0, sizeof(checked)); | 178 | /* |
| 170 | 179 | * First this loops checks in FLDTCNTR if we are ready to read out the | |
| 180 | * oob data. This is the case if either all went fine without errors or | ||
| 181 | * if the bottom part of the loop corrected the errors or marked them as | ||
| 182 | * uncorrectable and the controller is given time to push the data into | ||
| 183 | * the FIFO. | ||
| 184 | */ | ||
| 171 | while (timeout--) { | 185 | while (timeout--) { |
| 186 | /* check if all is ok and we can read out the OOB */ | ||
| 172 | size = readl(FLDTCNTR(flctl)) >> 24; | 187 | size = readl(FLDTCNTR(flctl)) >> 24; |
| 173 | if (size & 0xFF) | 188 | if ((size & 0xFF) == 4) |
| 174 | return 0; /* success */ | 189 | return state; |
| 190 | |||
| 191 | /* check if a correction code has been calculated */ | ||
| 192 | if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) { | ||
| 193 | /* | ||
| 194 | * either we wait for the fifo to be filled or a | ||
| 195 | * correction pattern is being generated | ||
| 196 | */ | ||
| 197 | udelay(1); | ||
| 198 | continue; | ||
| 199 | } | ||
| 175 | 200 | ||
| 176 | if (readl(FL4ECCCR(flctl)) & _4ECCFA) | 201 | /* check for an uncorrectable error */ |
| 177 | return 1; /* can't correct */ | 202 | if (readl(FL4ECCCR(flctl)) & _4ECCFA) { |
| 203 | /* check if we face a non-empty page */ | ||
| 204 | for (i = 0; i < 512; i++) { | ||
| 205 | if (flctl->done_buff[i] != 0xff) { | ||
| 206 | state = FL_ERROR; /* can't correct */ | ||
| 207 | break; | ||
| 208 | } | ||
| 209 | } | ||
| 178 | 210 | ||
| 179 | udelay(1); | 211 | if (state == FL_SUCCESS) |
| 180 | if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) | 212 | dev_dbg(&flctl->pdev->dev, |
| 213 | "reading empty sector %d, ecc error ignored\n", | ||
| 214 | sector_number); | ||
| 215 | |||
| 216 | writel(0, FL4ECCCR(flctl)); | ||
| 181 | continue; | 217 | continue; |
| 218 | } | ||
| 182 | 219 | ||
| 183 | /* start error correction */ | 220 | /* start error correction */ |
| 184 | ecc_reg[0] = FL4ECCRESULT0(flctl); | 221 | ecc_reg[0] = FL4ECCRESULT0(flctl); |
| @@ -187,28 +224,26 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) | |||
| 187 | ecc_reg[3] = FL4ECCRESULT3(flctl); | 224 | ecc_reg[3] = FL4ECCRESULT3(flctl); |
| 188 | 225 | ||
| 189 | for (i = 0; i < 3; i++) { | 226 | for (i = 0; i < 3; i++) { |
| 227 | uint8_t org; | ||
| 228 | int index; | ||
| 229 | |||
| 190 | data = readl(ecc_reg[i]); | 230 | data = readl(ecc_reg[i]); |
| 191 | if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) { | ||
| 192 | uint8_t org; | ||
| 193 | int index; | ||
| 194 | |||
| 195 | if (flctl->page_size) | ||
| 196 | index = (512 * sector_number) + | ||
| 197 | (data >> 16); | ||
| 198 | else | ||
| 199 | index = data >> 16; | ||
| 200 | |||
| 201 | org = flctl->done_buff[index]; | ||
| 202 | flctl->done_buff[index] = org ^ (data & 0xFF); | ||
| 203 | checked[i] = 1; | ||
| 204 | } | ||
| 205 | } | ||
| 206 | 231 | ||
| 232 | if (flctl->page_size) | ||
| 233 | index = (512 * sector_number) + | ||
| 234 | (data >> 16); | ||
| 235 | else | ||
| 236 | index = data >> 16; | ||
| 237 | |||
| 238 | org = flctl->done_buff[index]; | ||
| 239 | flctl->done_buff[index] = org ^ (data & 0xFF); | ||
| 240 | } | ||
| 241 | state = FL_REPAIRABLE; | ||
| 207 | writel(0, FL4ECCCR(flctl)); | 242 | writel(0, FL4ECCCR(flctl)); |
| 208 | } | 243 | } |
| 209 | 244 | ||
| 210 | timeout_error(flctl, __func__); | 245 | timeout_error(flctl, __func__); |
| 211 | return 1; /* timeout */ | 246 | return FL_TIMEOUT; /* timeout */ |
| 212 | } | 247 | } |
| 213 | 248 | ||
| 214 | static void wait_wecfifo_ready(struct sh_flctl *flctl) | 249 | static void wait_wecfifo_ready(struct sh_flctl *flctl) |
| @@ -241,31 +276,33 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | |||
| 241 | { | 276 | { |
| 242 | int i, len_4align; | 277 | int i, len_4align; |
| 243 | unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; | 278 | unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; |
| 244 | void *fifo_addr = (void *)FLDTFIFO(flctl); | ||
| 245 | 279 | ||
| 246 | len_4align = (rlen + 3) / 4; | 280 | len_4align = (rlen + 3) / 4; |
| 247 | 281 | ||
| 248 | for (i = 0; i < len_4align; i++) { | 282 | for (i = 0; i < len_4align; i++) { |
| 249 | wait_rfifo_ready(flctl); | 283 | wait_rfifo_ready(flctl); |
| 250 | buf[i] = readl(fifo_addr); | 284 | buf[i] = readl(FLDTFIFO(flctl)); |
| 251 | buf[i] = be32_to_cpu(buf[i]); | 285 | buf[i] = be32_to_cpu(buf[i]); |
| 252 | } | 286 | } |
| 253 | } | 287 | } |
| 254 | 288 | ||
| 255 | static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector) | 289 | static enum flctl_ecc_res_t read_ecfiforeg |
| 290 | (struct sh_flctl *flctl, uint8_t *buff, int sector) | ||
| 256 | { | 291 | { |
| 257 | int i; | 292 | int i; |
| 293 | enum flctl_ecc_res_t res; | ||
| 258 | unsigned long *ecc_buf = (unsigned long *)buff; | 294 | unsigned long *ecc_buf = (unsigned long *)buff; |
| 259 | void *fifo_addr = (void *)FLECFIFO(flctl); | ||
| 260 | 295 | ||
| 261 | for (i = 0; i < 4; i++) { | 296 | res = wait_recfifo_ready(flctl , sector); |
| 262 | if (wait_recfifo_ready(flctl , sector)) | 297 | |
| 263 | return 1; | 298 | if (res != FL_ERROR) { |
| 264 | ecc_buf[i] = readl(fifo_addr); | 299 | for (i = 0; i < 4; i++) { |
| 265 | ecc_buf[i] = be32_to_cpu(ecc_buf[i]); | 300 | ecc_buf[i] = readl(FLECFIFO(flctl)); |
| 301 | ecc_buf[i] = be32_to_cpu(ecc_buf[i]); | ||
| 302 | } | ||
| 266 | } | 303 | } |
| 267 | 304 | ||
| 268 | return 0; | 305 | return res; |
| 269 | } | 306 | } |
| 270 | 307 | ||
| 271 | static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | 308 | static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) |
| @@ -281,6 +318,18 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | |||
| 281 | } | 318 | } |
| 282 | } | 319 | } |
| 283 | 320 | ||
| 321 | static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | ||
| 322 | { | ||
| 323 | int i, len_4align; | ||
| 324 | unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; | ||
| 325 | |||
| 326 | len_4align = (rlen + 3) / 4; | ||
| 327 | for (i = 0; i < len_4align; i++) { | ||
| 328 | wait_wecfifo_ready(flctl); | ||
| 329 | writel(cpu_to_be32(data[i]), FLECFIFO(flctl)); | ||
| 330 | } | ||
| 331 | } | ||
| 332 | |||
| 284 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) | 333 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) |
| 285 | { | 334 | { |
| 286 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 335 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| @@ -346,73 +395,65 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va | |||
| 346 | static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | 395 | static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, |
| 347 | uint8_t *buf, int oob_required, int page) | 396 | uint8_t *buf, int oob_required, int page) |
| 348 | { | 397 | { |
| 349 | int i, eccsize = chip->ecc.size; | 398 | chip->read_buf(mtd, buf, mtd->writesize); |
| 350 | int eccbytes = chip->ecc.bytes; | 399 | if (oob_required) |
| 351 | int eccsteps = chip->ecc.steps; | 400 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 352 | uint8_t *p = buf; | ||
| 353 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | ||
| 354 | |||
| 355 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) | ||
| 356 | chip->read_buf(mtd, p, eccsize); | ||
| 357 | |||
| 358 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { | ||
| 359 | if (flctl->hwecc_cant_correct[i]) | ||
| 360 | mtd->ecc_stats.failed++; | ||
| 361 | else | ||
| 362 | mtd->ecc_stats.corrected += 0; /* FIXME */ | ||
| 363 | } | ||
| 364 | |||
| 365 | return 0; | 401 | return 0; |
| 366 | } | 402 | } |
| 367 | 403 | ||
| 368 | static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | 404 | static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, |
| 369 | const uint8_t *buf, int oob_required) | 405 | const uint8_t *buf, int oob_required) |
| 370 | { | 406 | { |
| 371 | int i, eccsize = chip->ecc.size; | 407 | chip->write_buf(mtd, buf, mtd->writesize); |
| 372 | int eccbytes = chip->ecc.bytes; | 408 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); |
| 373 | int eccsteps = chip->ecc.steps; | 409 | return 0; |
| 374 | const uint8_t *p = buf; | ||
| 375 | |||
| 376 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) | ||
| 377 | chip->write_buf(mtd, p, eccsize); | ||
| 378 | } | 410 | } |
| 379 | 411 | ||
| 380 | static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) | 412 | static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) |
| 381 | { | 413 | { |
| 382 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 414 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| 383 | int sector, page_sectors; | 415 | int sector, page_sectors; |
| 416 | enum flctl_ecc_res_t ecc_result; | ||
| 384 | 417 | ||
| 385 | if (flctl->page_size) | 418 | page_sectors = flctl->page_size ? 4 : 1; |
| 386 | page_sectors = 4; | ||
| 387 | else | ||
| 388 | page_sectors = 1; | ||
| 389 | |||
| 390 | writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, | ||
| 391 | FLCMNCR(flctl)); | ||
| 392 | 419 | ||
| 393 | set_cmd_regs(mtd, NAND_CMD_READ0, | 420 | set_cmd_regs(mtd, NAND_CMD_READ0, |
| 394 | (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); | 421 | (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); |
| 395 | 422 | ||
| 396 | for (sector = 0; sector < page_sectors; sector++) { | 423 | writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, |
| 397 | int ret; | 424 | FLCMNCR(flctl)); |
| 425 | writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); | ||
| 426 | writel(page_addr << 2, FLADR(flctl)); | ||
| 398 | 427 | ||
| 399 | empty_fifo(flctl); | 428 | empty_fifo(flctl); |
| 400 | writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl)); | 429 | start_translation(flctl); |
| 401 | writel(page_addr << 2 | sector, FLADR(flctl)); | ||
| 402 | 430 | ||
| 403 | start_translation(flctl); | 431 | for (sector = 0; sector < page_sectors; sector++) { |
| 404 | read_fiforeg(flctl, 512, 512 * sector); | 432 | read_fiforeg(flctl, 512, 512 * sector); |
| 405 | 433 | ||
| 406 | ret = read_ecfiforeg(flctl, | 434 | ecc_result = read_ecfiforeg(flctl, |
| 407 | &flctl->done_buff[mtd->writesize + 16 * sector], | 435 | &flctl->done_buff[mtd->writesize + 16 * sector], |
| 408 | sector); | 436 | sector); |
| 409 | 437 | ||
| 410 | if (ret) | 438 | switch (ecc_result) { |
| 411 | flctl->hwecc_cant_correct[sector] = 1; | 439 | case FL_REPAIRABLE: |
| 412 | 440 | dev_info(&flctl->pdev->dev, | |
| 413 | writel(0x0, FL4ECCCR(flctl)); | 441 | "applied ecc on page 0x%x", page_addr); |
| 414 | wait_completion(flctl); | 442 | flctl->mtd.ecc_stats.corrected++; |
| 443 | break; | ||
| 444 | case FL_ERROR: | ||
| 445 | dev_warn(&flctl->pdev->dev, | ||
| 446 | "page 0x%x contains corrupted data\n", | ||
| 447 | page_addr); | ||
| 448 | flctl->mtd.ecc_stats.failed++; | ||
| 449 | break; | ||
| 450 | default: | ||
| 451 | ; | ||
| 452 | } | ||
| 415 | } | 453 | } |
| 454 | |||
| 455 | wait_completion(flctl); | ||
| 456 | |||
| 416 | writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), | 457 | writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), |
| 417 | FLCMNCR(flctl)); | 458 | FLCMNCR(flctl)); |
| 418 | } | 459 | } |
| @@ -420,30 +461,20 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) | |||
| 420 | static void execmd_read_oob(struct mtd_info *mtd, int page_addr) | 461 | static void execmd_read_oob(struct mtd_info *mtd, int page_addr) |
| 421 | { | 462 | { |
| 422 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 463 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| 464 | int page_sectors = flctl->page_size ? 4 : 1; | ||
| 465 | int i; | ||
| 423 | 466 | ||
| 424 | set_cmd_regs(mtd, NAND_CMD_READ0, | 467 | set_cmd_regs(mtd, NAND_CMD_READ0, |
| 425 | (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); | 468 | (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); |
| 426 | 469 | ||
| 427 | empty_fifo(flctl); | 470 | empty_fifo(flctl); |
| 428 | if (flctl->page_size) { | ||
| 429 | int i; | ||
| 430 | /* In case that the page size is 2k */ | ||
| 431 | for (i = 0; i < 16 * 3; i++) | ||
| 432 | flctl->done_buff[i] = 0xFF; | ||
| 433 | |||
| 434 | set_addr(mtd, 3 * 528 + 512, page_addr); | ||
| 435 | writel(16, FLDTCNTR(flctl)); | ||
| 436 | 471 | ||
| 437 | start_translation(flctl); | 472 | for (i = 0; i < page_sectors; i++) { |
| 438 | read_fiforeg(flctl, 16, 16 * 3); | 473 | set_addr(mtd, (512 + 16) * i + 512 , page_addr); |
| 439 | wait_completion(flctl); | ||
| 440 | } else { | ||
| 441 | /* In case that the page size is 512b */ | ||
| 442 | set_addr(mtd, 512, page_addr); | ||
| 443 | writel(16, FLDTCNTR(flctl)); | 474 | writel(16, FLDTCNTR(flctl)); |
| 444 | 475 | ||
| 445 | start_translation(flctl); | 476 | start_translation(flctl); |
| 446 | read_fiforeg(flctl, 16, 0); | 477 | read_fiforeg(flctl, 16, 16 * i); |
| 447 | wait_completion(flctl); | 478 | wait_completion(flctl); |
| 448 | } | 479 | } |
| 449 | } | 480 | } |
| @@ -451,34 +482,26 @@ static void execmd_read_oob(struct mtd_info *mtd, int page_addr) | |||
| 451 | static void execmd_write_page_sector(struct mtd_info *mtd) | 482 | static void execmd_write_page_sector(struct mtd_info *mtd) |
| 452 | { | 483 | { |
| 453 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 484 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| 454 | int i, page_addr = flctl->seqin_page_addr; | 485 | int page_addr = flctl->seqin_page_addr; |
| 455 | int sector, page_sectors; | 486 | int sector, page_sectors; |
| 456 | 487 | ||
| 457 | if (flctl->page_size) | 488 | page_sectors = flctl->page_size ? 4 : 1; |
| 458 | page_sectors = 4; | ||
| 459 | else | ||
| 460 | page_sectors = 1; | ||
| 461 | |||
| 462 | writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); | ||
| 463 | 489 | ||
| 464 | set_cmd_regs(mtd, NAND_CMD_PAGEPROG, | 490 | set_cmd_regs(mtd, NAND_CMD_PAGEPROG, |
| 465 | (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); | 491 | (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); |
| 466 | 492 | ||
| 467 | for (sector = 0; sector < page_sectors; sector++) { | 493 | empty_fifo(flctl); |
| 468 | empty_fifo(flctl); | 494 | writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); |
| 469 | writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl)); | 495 | writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); |
| 470 | writel(page_addr << 2 | sector, FLADR(flctl)); | 496 | writel(page_addr << 2, FLADR(flctl)); |
| 497 | start_translation(flctl); | ||
| 471 | 498 | ||
| 472 | start_translation(flctl); | 499 | for (sector = 0; sector < page_sectors; sector++) { |
| 473 | write_fiforeg(flctl, 512, 512 * sector); | 500 | write_fiforeg(flctl, 512, 512 * sector); |
| 474 | 501 | write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); | |
| 475 | for (i = 0; i < 4; i++) { | ||
| 476 | wait_wecfifo_ready(flctl); /* wait for write ready */ | ||
| 477 | writel(0xFFFFFFFF, FLECFIFO(flctl)); | ||
| 478 | } | ||
| 479 | wait_completion(flctl); | ||
| 480 | } | 502 | } |
| 481 | 503 | ||
| 504 | wait_completion(flctl); | ||
| 482 | writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); | 505 | writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); |
| 483 | } | 506 | } |
| 484 | 507 | ||
| @@ -488,18 +511,12 @@ static void execmd_write_oob(struct mtd_info *mtd) | |||
| 488 | int page_addr = flctl->seqin_page_addr; | 511 | int page_addr = flctl->seqin_page_addr; |
| 489 | int sector, page_sectors; | 512 | int sector, page_sectors; |
| 490 | 513 | ||
| 491 | if (flctl->page_size) { | 514 | page_sectors = flctl->page_size ? 4 : 1; |
| 492 | sector = 3; | ||
| 493 | page_sectors = 4; | ||
| 494 | } else { | ||
| 495 | sector = 0; | ||
| 496 | page_sectors = 1; | ||
| 497 | } | ||
| 498 | 515 | ||
| 499 | set_cmd_regs(mtd, NAND_CMD_PAGEPROG, | 516 | set_cmd_regs(mtd, NAND_CMD_PAGEPROG, |
| 500 | (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); | 517 | (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); |
| 501 | 518 | ||
| 502 | for (; sector < page_sectors; sector++) { | 519 | for (sector = 0; sector < page_sectors; sector++) { |
| 503 | empty_fifo(flctl); | 520 | empty_fifo(flctl); |
| 504 | set_addr(mtd, sector * 528 + 512, page_addr); | 521 | set_addr(mtd, sector * 528 + 512, page_addr); |
| 505 | writel(16, FLDTCNTR(flctl)); /* set read size */ | 522 | writel(16, FLDTCNTR(flctl)); /* set read size */ |
| @@ -731,10 +748,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr) | |||
| 731 | static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 748 | static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
| 732 | { | 749 | { |
| 733 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 750 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| 734 | int i, index = flctl->index; | 751 | int index = flctl->index; |
| 735 | 752 | ||
| 736 | for (i = 0; i < len; i++) | 753 | memcpy(&flctl->done_buff[index], buf, len); |
| 737 | flctl->done_buff[index + i] = buf[i]; | ||
| 738 | flctl->index += len; | 754 | flctl->index += len; |
| 739 | } | 755 | } |
| 740 | 756 | ||
| @@ -763,20 +779,11 @@ static uint16_t flctl_read_word(struct mtd_info *mtd) | |||
| 763 | 779 | ||
| 764 | static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | 780 | static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
| 765 | { | 781 | { |
| 766 | int i; | 782 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
| 767 | 783 | int index = flctl->index; | |
| 768 | for (i = 0; i < len; i++) | ||
| 769 | buf[i] = flctl_read_byte(mtd); | ||
| 770 | } | ||
| 771 | |||
| 772 | static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 773 | { | ||
| 774 | int i; | ||
| 775 | 784 | ||
| 776 | for (i = 0; i < len; i++) | 785 | memcpy(buf, &flctl->done_buff[index], len); |
| 777 | if (buf[i] != flctl_read_byte(mtd)) | 786 | flctl->index += len; |
| 778 | return -EFAULT; | ||
| 779 | return 0; | ||
| 780 | } | 787 | } |
| 781 | 788 | ||
| 782 | static int flctl_chip_init_tail(struct mtd_info *mtd) | 789 | static int flctl_chip_init_tail(struct mtd_info *mtd) |
| @@ -831,7 +838,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) | |||
| 831 | chip->ecc.mode = NAND_ECC_HW; | 838 | chip->ecc.mode = NAND_ECC_HW; |
| 832 | 839 | ||
| 833 | /* 4 symbols ECC enabled */ | 840 | /* 4 symbols ECC enabled */ |
| 834 | flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02; | 841 | flctl->flcmncr_base |= _4ECCEN; |
| 835 | } else { | 842 | } else { |
| 836 | chip->ecc.mode = NAND_ECC_SOFT; | 843 | chip->ecc.mode = NAND_ECC_SOFT; |
| 837 | } | 844 | } |
| @@ -839,6 +846,16 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) | |||
| 839 | return 0; | 846 | return 0; |
| 840 | } | 847 | } |
| 841 | 848 | ||
| 849 | static irqreturn_t flctl_handle_flste(int irq, void *dev_id) | ||
| 850 | { | ||
| 851 | struct sh_flctl *flctl = dev_id; | ||
| 852 | |||
| 853 | dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl))); | ||
| 854 | writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); | ||
| 855 | |||
| 856 | return IRQ_HANDLED; | ||
| 857 | } | ||
| 858 | |||
| 842 | static int __devinit flctl_probe(struct platform_device *pdev) | 859 | static int __devinit flctl_probe(struct platform_device *pdev) |
| 843 | { | 860 | { |
| 844 | struct resource *res; | 861 | struct resource *res; |
| @@ -847,6 +864,7 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
| 847 | struct nand_chip *nand; | 864 | struct nand_chip *nand; |
| 848 | struct sh_flctl_platform_data *pdata; | 865 | struct sh_flctl_platform_data *pdata; |
| 849 | int ret = -ENXIO; | 866 | int ret = -ENXIO; |
| 867 | int irq; | ||
| 850 | 868 | ||
| 851 | pdata = pdev->dev.platform_data; | 869 | pdata = pdev->dev.platform_data; |
| 852 | if (pdata == NULL) { | 870 | if (pdata == NULL) { |
| @@ -872,14 +890,27 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
| 872 | goto err_iomap; | 890 | goto err_iomap; |
| 873 | } | 891 | } |
| 874 | 892 | ||
| 893 | irq = platform_get_irq(pdev, 0); | ||
| 894 | if (irq < 0) { | ||
| 895 | dev_err(&pdev->dev, "failed to get flste irq data\n"); | ||
| 896 | goto err_flste; | ||
| 897 | } | ||
| 898 | |||
| 899 | ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl); | ||
| 900 | if (ret) { | ||
| 901 | dev_err(&pdev->dev, "request interrupt failed.\n"); | ||
| 902 | goto err_flste; | ||
| 903 | } | ||
| 904 | |||
| 875 | platform_set_drvdata(pdev, flctl); | 905 | platform_set_drvdata(pdev, flctl); |
| 876 | flctl_mtd = &flctl->mtd; | 906 | flctl_mtd = &flctl->mtd; |
| 877 | nand = &flctl->chip; | 907 | nand = &flctl->chip; |
| 878 | flctl_mtd->priv = nand; | 908 | flctl_mtd->priv = nand; |
| 879 | flctl->pdev = pdev; | 909 | flctl->pdev = pdev; |
| 880 | flctl->flcmncr_base = pdata->flcmncr_val; | ||
| 881 | flctl->hwecc = pdata->has_hwecc; | 910 | flctl->hwecc = pdata->has_hwecc; |
| 882 | flctl->holden = pdata->use_holden; | 911 | flctl->holden = pdata->use_holden; |
| 912 | flctl->flcmncr_base = pdata->flcmncr_val; | ||
| 913 | flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; | ||
| 883 | 914 | ||
| 884 | /* Set address of hardware control function */ | 915 | /* Set address of hardware control function */ |
| 885 | /* 20 us command delay time */ | 916 | /* 20 us command delay time */ |
| @@ -888,7 +919,6 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
| 888 | nand->read_byte = flctl_read_byte; | 919 | nand->read_byte = flctl_read_byte; |
| 889 | nand->write_buf = flctl_write_buf; | 920 | nand->write_buf = flctl_write_buf; |
| 890 | nand->read_buf = flctl_read_buf; | 921 | nand->read_buf = flctl_read_buf; |
| 891 | nand->verify_buf = flctl_verify_buf; | ||
| 892 | nand->select_chip = flctl_select_chip; | 922 | nand->select_chip = flctl_select_chip; |
| 893 | nand->cmdfunc = flctl_cmdfunc; | 923 | nand->cmdfunc = flctl_cmdfunc; |
| 894 | 924 | ||
| @@ -918,6 +948,9 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
| 918 | 948 | ||
| 919 | err_chip: | 949 | err_chip: |
| 920 | pm_runtime_disable(&pdev->dev); | 950 | pm_runtime_disable(&pdev->dev); |
| 951 | free_irq(irq, flctl); | ||
| 952 | err_flste: | ||
| 953 | iounmap(flctl->reg); | ||
| 921 | err_iomap: | 954 | err_iomap: |
| 922 | kfree(flctl); | 955 | kfree(flctl); |
| 923 | return ret; | 956 | return ret; |
| @@ -929,6 +962,8 @@ static int __devexit flctl_remove(struct platform_device *pdev) | |||
| 929 | 962 | ||
| 930 | nand_release(&flctl->mtd); | 963 | nand_release(&flctl->mtd); |
| 931 | pm_runtime_disable(&pdev->dev); | 964 | pm_runtime_disable(&pdev->dev); |
| 965 | free_irq(platform_get_irq(pdev, 0), flctl); | ||
| 966 | iounmap(flctl->reg); | ||
| 932 | kfree(flctl); | 967 | kfree(flctl); |
| 933 | 968 | ||
| 934 | return 0; | 969 | return 0; |
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index e02b08bcf0c0..f3f28fafbf7a 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c | |||
| @@ -98,24 +98,6 @@ static uint16_t socrates_nand_read_word(struct mtd_info *mtd) | |||
| 98 | return word; | 98 | return word; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | /** | ||
| 102 | * socrates_nand_verify_buf - Verify chip data against buffer | ||
| 103 | * @mtd: MTD device structure | ||
| 104 | * @buf: buffer containing the data to compare | ||
| 105 | * @len: number of bytes to compare | ||
| 106 | */ | ||
| 107 | static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf, | ||
| 108 | int len) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | |||
| 112 | for (i = 0; i < len; i++) { | ||
| 113 | if (buf[i] != socrates_nand_read_byte(mtd)) | ||
| 114 | return -EFAULT; | ||
| 115 | } | ||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | 101 | /* |
| 120 | * Hardware specific access to control-lines | 102 | * Hardware specific access to control-lines |
| 121 | */ | 103 | */ |
| @@ -201,7 +183,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev) | |||
| 201 | nand_chip->read_word = socrates_nand_read_word; | 183 | nand_chip->read_word = socrates_nand_read_word; |
| 202 | nand_chip->write_buf = socrates_nand_write_buf; | 184 | nand_chip->write_buf = socrates_nand_write_buf; |
| 203 | nand_chip->read_buf = socrates_nand_read_buf; | 185 | nand_chip->read_buf = socrates_nand_read_buf; |
| 204 | nand_chip->verify_buf = socrates_nand_verify_buf; | ||
| 205 | nand_chip->dev_ready = socrates_nand_device_ready; | 186 | nand_chip->dev_ready = socrates_nand_device_ready; |
| 206 | 187 | ||
| 207 | nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ | 188 | nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ |
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 5aa518081c51..508e9e04b092 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
| @@ -256,18 +256,6 @@ static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
| 256 | tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); | 256 | tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int | ||
| 260 | tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | ||
| 261 | { | ||
| 262 | struct tmio_nand *tmio = mtd_to_tmio(mtd); | ||
| 263 | u16 *p = (u16 *) buf; | ||
| 264 | |||
| 265 | for (len >>= 1; len; len--) | ||
| 266 | if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA)) | ||
| 267 | return -EFAULT; | ||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode) | 259 | static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode) |
| 272 | { | 260 | { |
| 273 | struct tmio_nand *tmio = mtd_to_tmio(mtd); | 261 | struct tmio_nand *tmio = mtd_to_tmio(mtd); |
| @@ -424,7 +412,6 @@ static int tmio_probe(struct platform_device *dev) | |||
| 424 | nand_chip->read_byte = tmio_nand_read_byte; | 412 | nand_chip->read_byte = tmio_nand_read_byte; |
| 425 | nand_chip->write_buf = tmio_nand_write_buf; | 413 | nand_chip->write_buf = tmio_nand_write_buf; |
| 426 | nand_chip->read_buf = tmio_nand_read_buf; | 414 | nand_chip->read_buf = tmio_nand_read_buf; |
| 427 | nand_chip->verify_buf = tmio_nand_verify_buf; | ||
| 428 | 415 | ||
| 429 | /* set eccmode using hardware ECC */ | 416 | /* set eccmode using hardware ECC */ |
| 430 | nand_chip->ecc.mode = NAND_ECC_HW; | 417 | nand_chip->ecc.mode = NAND_ECC_HW; |
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 26398dcf21cf..e3d7266e256f 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c | |||
| @@ -131,18 +131,6 @@ static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
| 131 | *buf++ = __raw_readl(ndfdtr); | 131 | *buf++ = __raw_readl(ndfdtr); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, | ||
| 135 | int len) | ||
| 136 | { | ||
| 137 | struct platform_device *dev = mtd_to_platdev(mtd); | ||
| 138 | void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR); | ||
| 139 | |||
| 140 | while (len--) | ||
| 141 | if (*buf++ != (uint8_t)__raw_readl(ndfdtr)) | ||
| 142 | return -EFAULT; | ||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd, | 134 | static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd, |
| 147 | unsigned int ctrl) | 135 | unsigned int ctrl) |
| 148 | { | 136 | { |
| @@ -346,7 +334,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) | |||
| 346 | chip->read_byte = txx9ndfmc_read_byte; | 334 | chip->read_byte = txx9ndfmc_read_byte; |
| 347 | chip->read_buf = txx9ndfmc_read_buf; | 335 | chip->read_buf = txx9ndfmc_read_buf; |
| 348 | chip->write_buf = txx9ndfmc_write_buf; | 336 | chip->write_buf = txx9ndfmc_write_buf; |
| 349 | chip->verify_buf = txx9ndfmc_verify_buf; | ||
| 350 | chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; | 337 | chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; |
| 351 | chip->dev_ready = txx9ndfmc_dev_ready; | 338 | chip->dev_ready = txx9ndfmc_dev_ready; |
| 352 | chip->ecc.calculate = txx9ndfmc_calculate_ecc; | 339 | chip->ecc.calculate = txx9ndfmc_calculate_ecc; |
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c new file mode 100644 index 000000000000..3f81dc8f214c --- /dev/null +++ b/drivers/mtd/nand/xway_nand.c | |||
| @@ -0,0 +1,201 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify it | ||
| 3 | * under the terms of the GNU General Public License version 2 as published | ||
| 4 | * by the Free Software Foundation. | ||
| 5 | * | ||
| 6 | * Copyright © 2012 John Crispin <blogic@openwrt.org> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/mtd/nand.h> | ||
| 10 | #include <linux/of_gpio.h> | ||
| 11 | #include <linux/of_platform.h> | ||
| 12 | |||
| 13 | #include <lantiq_soc.h> | ||
| 14 | |||
| 15 | /* nand registers */ | ||
| 16 | #define EBU_ADDSEL1 0x24 | ||
| 17 | #define EBU_NAND_CON 0xB0 | ||
| 18 | #define EBU_NAND_WAIT 0xB4 | ||
| 19 | #define EBU_NAND_ECC0 0xB8 | ||
| 20 | #define EBU_NAND_ECC_AC 0xBC | ||
| 21 | |||
| 22 | /* nand commands */ | ||
| 23 | #define NAND_CMD_ALE (1 << 2) | ||
| 24 | #define NAND_CMD_CLE (1 << 3) | ||
| 25 | #define NAND_CMD_CS (1 << 4) | ||
| 26 | #define NAND_WRITE_CMD_RESET 0xff | ||
| 27 | #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) | ||
| 28 | #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) | ||
| 29 | #define NAND_WRITE_DATA (NAND_CMD_CS) | ||
| 30 | #define NAND_READ_DATA (NAND_CMD_CS) | ||
| 31 | #define NAND_WAIT_WR_C (1 << 3) | ||
| 32 | #define NAND_WAIT_RD (0x1) | ||
| 33 | |||
| 34 | /* we need to tel the ebu which addr we mapped the nand to */ | ||
| 35 | #define ADDSEL1_MASK(x) (x << 4) | ||
| 36 | #define ADDSEL1_REGEN 1 | ||
| 37 | |||
| 38 | /* we need to tell the EBU that we have nand attached and set it up properly */ | ||
| 39 | #define BUSCON1_SETUP (1 << 22) | ||
| 40 | #define BUSCON1_BCGEN_RES (0x3 << 12) | ||
| 41 | #define BUSCON1_WAITWRC2 (2 << 8) | ||
| 42 | #define BUSCON1_WAITRDC2 (2 << 6) | ||
| 43 | #define BUSCON1_HOLDC1 (1 << 4) | ||
| 44 | #define BUSCON1_RECOVC1 (1 << 2) | ||
| 45 | #define BUSCON1_CMULT4 1 | ||
| 46 | |||
| 47 | #define NAND_CON_CE (1 << 20) | ||
| 48 | #define NAND_CON_OUT_CS1 (1 << 10) | ||
| 49 | #define NAND_CON_IN_CS1 (1 << 8) | ||
| 50 | #define NAND_CON_PRE_P (1 << 7) | ||
| 51 | #define NAND_CON_WP_P (1 << 6) | ||
| 52 | #define NAND_CON_SE_P (1 << 5) | ||
| 53 | #define NAND_CON_CS_P (1 << 4) | ||
| 54 | #define NAND_CON_CSMUX (1 << 1) | ||
| 55 | #define NAND_CON_NANDM 1 | ||
| 56 | |||
| 57 | static void xway_reset_chip(struct nand_chip *chip) | ||
| 58 | { | ||
| 59 | unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; | ||
| 60 | unsigned long flags; | ||
| 61 | |||
| 62 | nandaddr &= ~NAND_WRITE_ADDR; | ||
| 63 | nandaddr |= NAND_WRITE_CMD; | ||
| 64 | |||
| 65 | /* finish with a reset */ | ||
| 66 | spin_lock_irqsave(&ebu_lock, flags); | ||
| 67 | writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); | ||
| 68 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | ||
| 69 | ; | ||
| 70 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
| 71 | } | ||
| 72 | |||
| 73 | static void xway_select_chip(struct mtd_info *mtd, int chip) | ||
| 74 | { | ||
| 75 | |||
| 76 | switch (chip) { | ||
| 77 | case -1: | ||
| 78 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); | ||
| 79 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); | ||
| 80 | break; | ||
| 81 | case 0: | ||
| 82 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); | ||
| 83 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); | ||
| 84 | break; | ||
| 85 | default: | ||
| 86 | BUG(); | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | ||
| 91 | { | ||
| 92 | struct nand_chip *this = mtd->priv; | ||
| 93 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | ||
| 94 | unsigned long flags; | ||
| 95 | |||
| 96 | if (ctrl & NAND_CTRL_CHANGE) { | ||
| 97 | nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); | ||
| 98 | if (ctrl & NAND_CLE) | ||
| 99 | nandaddr |= NAND_WRITE_CMD; | ||
| 100 | else | ||
| 101 | nandaddr |= NAND_WRITE_ADDR; | ||
| 102 | this->IO_ADDR_W = (void __iomem *) nandaddr; | ||
| 103 | } | ||
| 104 | |||
| 105 | if (cmd != NAND_CMD_NONE) { | ||
| 106 | spin_lock_irqsave(&ebu_lock, flags); | ||
| 107 | writeb(cmd, this->IO_ADDR_W); | ||
| 108 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | ||
| 109 | ; | ||
| 110 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | static int xway_dev_ready(struct mtd_info *mtd) | ||
| 115 | { | ||
| 116 | return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD; | ||
| 117 | } | ||
| 118 | |||
| 119 | static unsigned char xway_read_byte(struct mtd_info *mtd) | ||
| 120 | { | ||
| 121 | struct nand_chip *this = mtd->priv; | ||
| 122 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; | ||
| 123 | unsigned long flags; | ||
| 124 | int ret; | ||
| 125 | |||
| 126 | spin_lock_irqsave(&ebu_lock, flags); | ||
| 127 | ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); | ||
| 128 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
| 129 | |||
| 130 | return ret; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int xway_nand_probe(struct platform_device *pdev) | ||
| 134 | { | ||
| 135 | struct nand_chip *this = platform_get_drvdata(pdev); | ||
| 136 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | ||
| 137 | const __be32 *cs = of_get_property(pdev->dev.of_node, | ||
| 138 | "lantiq,cs", NULL); | ||
| 139 | u32 cs_flag = 0; | ||
| 140 | |||
| 141 | /* load our CS from the DT. Either we find a valid 1 or default to 0 */ | ||
| 142 | if (cs && (*cs == 1)) | ||
| 143 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; | ||
| 144 | |||
| 145 | /* setup the EBU to run in NAND mode on our base addr */ | ||
| 146 | ltq_ebu_w32(CPHYSADDR(nandaddr) | ||
| 147 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); | ||
| 148 | |||
| 149 | ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | ||
| 150 | | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | ||
| 151 | | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); | ||
| 152 | |||
| 153 | ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | ||
| 154 | | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | ||
| 155 | | cs_flag, EBU_NAND_CON); | ||
| 156 | |||
| 157 | /* finish with a reset */ | ||
| 158 | xway_reset_chip(this); | ||
| 159 | |||
| 160 | return 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | /* allow users to override the partition in DT using the cmdline */ | ||
| 164 | static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL }; | ||
| 165 | |||
| 166 | static struct platform_nand_data xway_nand_data = { | ||
| 167 | .chip = { | ||
| 168 | .nr_chips = 1, | ||
| 169 | .chip_delay = 30, | ||
| 170 | .part_probe_types = part_probes, | ||
| 171 | }, | ||
| 172 | .ctrl = { | ||
| 173 | .probe = xway_nand_probe, | ||
| 174 | .cmd_ctrl = xway_cmd_ctrl, | ||
| 175 | .dev_ready = xway_dev_ready, | ||
| 176 | .select_chip = xway_select_chip, | ||
| 177 | .read_byte = xway_read_byte, | ||
| 178 | } | ||
| 179 | }; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Try to find the node inside the DT. If it is available attach out | ||
| 183 | * platform_nand_data | ||
| 184 | */ | ||
| 185 | static int __init xway_register_nand(void) | ||
| 186 | { | ||
| 187 | struct device_node *node; | ||
| 188 | struct platform_device *pdev; | ||
| 189 | |||
| 190 | node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); | ||
| 191 | if (!node) | ||
| 192 | return -ENOENT; | ||
| 193 | pdev = of_find_device_by_node(node); | ||
| 194 | if (!pdev) | ||
| 195 | return -EINVAL; | ||
| 196 | pdev->dev.platform_data = &xway_nand_data; | ||
| 197 | of_node_put(node); | ||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | subsys_initcall(xway_register_nand); | ||
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 9e2dfd517aa5..8dd6ba52404a 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
| @@ -346,7 +346,6 @@ static int sm_write_sector(struct sm_ftl *ftl, | |||
| 346 | ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | 346 | ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); |
| 347 | 347 | ||
| 348 | /* Now we assume that hardware will catch write bitflip errors */ | 348 | /* Now we assume that hardware will catch write bitflip errors */ |
| 349 | /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */ | ||
| 350 | 349 | ||
| 351 | if (ret) { | 350 | if (ret) { |
| 352 | dbg("write to block %d at zone %d, failed with error %d", | 351 | dbg("write to block %d at zone %d, failed with error %d", |
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile index b44dcab940d8..bd0065c0d359 100644 --- a/drivers/mtd/tests/Makefile +++ b/drivers/mtd/tests/Makefile | |||
| @@ -6,3 +6,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o | |||
| 6 | obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o | 6 | obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o |
| 7 | obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o | 7 | obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o |
| 8 | obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o | 8 | obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o |
| 9 | obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o | ||
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c new file mode 100644 index 000000000000..cc8d62cb280c --- /dev/null +++ b/drivers/mtd/tests/mtd_nandbiterrs.c | |||
| @@ -0,0 +1,460 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2012 NetCommWireless | ||
| 3 | * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au> | ||
| 4 | * | ||
| 5 | * Test for multi-bit error recovery on a NAND page This mostly tests the | ||
| 6 | * ECC controller / driver. | ||
| 7 | * | ||
| 8 | * There are two test modes: | ||
| 9 | * | ||
| 10 | * 0 - artificially inserting bit errors until the ECC fails | ||
| 11 | * This is the default method and fairly quick. It should | ||
| 12 | * be independent of the quality of the FLASH. | ||
| 13 | * | ||
| 14 | * 1 - re-writing the same pattern repeatedly until the ECC fails. | ||
| 15 | * This method relies on the physics of NAND FLASH to eventually | ||
| 16 | * generate '0' bits if '1' has been written sufficient times. | ||
| 17 | * Depending on the NAND, the first bit errors will appear after | ||
| 18 | * 1000 or more writes and then will usually snowball, reaching the | ||
| 19 | * limits of the ECC quickly. | ||
| 20 | * | ||
| 21 | * The test stops after 10000 cycles, should your FLASH be | ||
| 22 | * exceptionally good and not generate bit errors before that. Try | ||
| 23 | * a different page in that case. | ||
| 24 | * | ||
| 25 | * Please note that neither of these tests will significantly 'use up' any | ||
| 26 | * FLASH endurance. Only a maximum of two erase operations will be performed. | ||
| 27 | * | ||
| 28 | * | ||
| 29 | * This program is free software; you can redistribute it and/or modify it | ||
| 30 | * under the terms of the GNU General Public License version 2 as published by | ||
| 31 | * the Free Software Foundation. | ||
| 32 | * | ||
| 33 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 34 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 35 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 36 | * more details. | ||
| 37 | * | ||
| 38 | * You should have received a copy of the GNU General Public License along with | ||
| 39 | * this program; see the file COPYING. If not, write to the Free Software | ||
| 40 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 41 | */ | ||
| 42 | #include <linux/init.h> | ||
| 43 | #include <linux/module.h> | ||
| 44 | #include <linux/moduleparam.h> | ||
| 45 | #include <linux/mtd/mtd.h> | ||
| 46 | #include <linux/err.h> | ||
| 47 | #include <linux/mtd/nand.h> | ||
| 48 | #include <linux/slab.h> | ||
| 49 | |||
| 50 | #define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA) | ||
| 51 | |||
| 52 | static int dev; | ||
| 53 | module_param(dev, int, S_IRUGO); | ||
| 54 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
| 55 | |||
| 56 | static unsigned page_offset; | ||
| 57 | module_param(page_offset, uint, S_IRUGO); | ||
| 58 | MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); | ||
| 59 | |||
| 60 | static unsigned seed; | ||
| 61 | module_param(seed, uint, S_IRUGO); | ||
| 62 | MODULE_PARM_DESC(seed, "Random seed"); | ||
| 63 | |||
| 64 | static int mode; | ||
| 65 | module_param(mode, int, S_IRUGO); | ||
| 66 | MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test"); | ||
| 67 | |||
| 68 | static unsigned max_overwrite = 10000; | ||
| 69 | |||
| 70 | static loff_t offset; /* Offset of the page we're using. */ | ||
| 71 | static unsigned eraseblock; /* Eraseblock number for our page. */ | ||
| 72 | |||
| 73 | /* We assume that the ECC can correct up to a certain number | ||
| 74 | * of biterrors per subpage. */ | ||
| 75 | static unsigned subsize; /* Size of subpages */ | ||
| 76 | static unsigned subcount; /* Number of subpages per page */ | ||
| 77 | |||
| 78 | static struct mtd_info *mtd; /* MTD device */ | ||
| 79 | |||
| 80 | static uint8_t *wbuffer; /* One page write / compare buffer */ | ||
| 81 | static uint8_t *rbuffer; /* One page read buffer */ | ||
| 82 | |||
| 83 | /* 'random' bytes from known offsets */ | ||
| 84 | static uint8_t hash(unsigned offset) | ||
| 85 | { | ||
| 86 | unsigned v = offset; | ||
| 87 | unsigned char c; | ||
| 88 | v ^= 0x7f7edfd3; | ||
| 89 | v = v ^ (v >> 3); | ||
| 90 | v = v ^ (v >> 5); | ||
| 91 | v = v ^ (v >> 13); | ||
| 92 | c = v & 0xFF; | ||
| 93 | /* Reverse bits of result. */ | ||
| 94 | c = (c & 0x0F) << 4 | (c & 0xF0) >> 4; | ||
| 95 | c = (c & 0x33) << 2 | (c & 0xCC) >> 2; | ||
| 96 | c = (c & 0x55) << 1 | (c & 0xAA) >> 1; | ||
| 97 | return c; | ||
| 98 | } | ||
| 99 | |||
| 100 | static int erase_block(void) | ||
| 101 | { | ||
| 102 | int err; | ||
| 103 | struct erase_info ei; | ||
| 104 | loff_t addr = eraseblock * mtd->erasesize; | ||
| 105 | |||
| 106 | msg("erase_block\n"); | ||
| 107 | |||
| 108 | memset(&ei, 0, sizeof(struct erase_info)); | ||
| 109 | ei.mtd = mtd; | ||
| 110 | ei.addr = addr; | ||
| 111 | ei.len = mtd->erasesize; | ||
| 112 | |||
| 113 | err = mtd_erase(mtd, &ei); | ||
| 114 | if (err || ei.state == MTD_ERASE_FAILED) { | ||
| 115 | msg("error %d while erasing\n", err); | ||
| 116 | if (!err) | ||
| 117 | err = -EIO; | ||
| 118 | return err; | ||
| 119 | } | ||
| 120 | |||
| 121 | return 0; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* Writes wbuffer to page */ | ||
| 125 | static int write_page(int log) | ||
| 126 | { | ||
| 127 | int err = 0; | ||
| 128 | size_t written; | ||
| 129 | |||
| 130 | if (log) | ||
| 131 | msg("write_page\n"); | ||
| 132 | |||
| 133 | err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer); | ||
| 134 | if (err || written != mtd->writesize) { | ||
| 135 | msg("error: write failed at %#llx\n", (long long)offset); | ||
| 136 | if (!err) | ||
| 137 | err = -EIO; | ||
| 138 | } | ||
| 139 | |||
| 140 | return err; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* Re-writes the data area while leaving the OOB alone. */ | ||
| 144 | static int rewrite_page(int log) | ||
| 145 | { | ||
| 146 | int err = 0; | ||
| 147 | struct mtd_oob_ops ops; | ||
| 148 | |||
| 149 | if (log) | ||
| 150 | msg("rewrite page\n"); | ||
| 151 | |||
| 152 | ops.mode = MTD_OPS_RAW; /* No ECC */ | ||
| 153 | ops.len = mtd->writesize; | ||
| 154 | ops.retlen = 0; | ||
| 155 | ops.ooblen = 0; | ||
| 156 | ops.oobretlen = 0; | ||
| 157 | ops.ooboffs = 0; | ||
| 158 | ops.datbuf = wbuffer; | ||
| 159 | ops.oobbuf = NULL; | ||
| 160 | |||
| 161 | err = mtd_write_oob(mtd, offset, &ops); | ||
| 162 | if (err || ops.retlen != mtd->writesize) { | ||
| 163 | msg("error: write_oob failed (%d)\n", err); | ||
| 164 | if (!err) | ||
| 165 | err = -EIO; | ||
| 166 | } | ||
| 167 | |||
| 168 | return err; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* Reads page into rbuffer. Returns number of corrected bit errors (>=0) | ||
| 172 | * or error (<0) */ | ||
| 173 | static int read_page(int log) | ||
| 174 | { | ||
| 175 | int err = 0; | ||
| 176 | size_t read; | ||
| 177 | struct mtd_ecc_stats oldstats; | ||
| 178 | |||
| 179 | if (log) | ||
| 180 | msg("read_page\n"); | ||
| 181 | |||
| 182 | /* Saving last mtd stats */ | ||
| 183 | memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats)); | ||
| 184 | |||
| 185 | err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer); | ||
| 186 | if (err == -EUCLEAN) | ||
| 187 | err = mtd->ecc_stats.corrected - oldstats.corrected; | ||
| 188 | |||
| 189 | if (err < 0 || read != mtd->writesize) { | ||
| 190 | msg("error: read failed at %#llx\n", (long long)offset); | ||
| 191 | if (err >= 0) | ||
| 192 | err = -EIO; | ||
| 193 | } | ||
| 194 | |||
| 195 | return err; | ||
| 196 | } | ||
| 197 | |||
| 198 | /* Verifies rbuffer against random sequence */ | ||
| 199 | static int verify_page(int log) | ||
| 200 | { | ||
| 201 | unsigned i, errs = 0; | ||
| 202 | |||
| 203 | if (log) | ||
| 204 | msg("verify_page\n"); | ||
| 205 | |||
| 206 | for (i = 0; i < mtd->writesize; i++) { | ||
| 207 | if (rbuffer[i] != hash(i+seed)) { | ||
| 208 | msg("Error: page offset %u, expected %02x, got %02x\n", | ||
| 209 | i, hash(i+seed), rbuffer[i]); | ||
| 210 | errs++; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | if (errs) | ||
| 215 | return -EIO; | ||
| 216 | else | ||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | #define CBIT(v, n) ((v) & (1 << (n))) | ||
| 221 | #define BCLR(v, n) ((v) = (v) & ~(1 << (n))) | ||
| 222 | |||
| 223 | /* Finds the first '1' bit in wbuffer starting at offset 'byte' | ||
| 224 | * and sets it to '0'. */ | ||
| 225 | static int insert_biterror(unsigned byte) | ||
| 226 | { | ||
| 227 | int bit; | ||
| 228 | |||
| 229 | while (byte < mtd->writesize) { | ||
| 230 | for (bit = 7; bit >= 0; bit--) { | ||
| 231 | if (CBIT(wbuffer[byte], bit)) { | ||
| 232 | BCLR(wbuffer[byte], bit); | ||
| 233 | msg("Inserted biterror @ %u/%u\n", byte, bit); | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | } | ||
| 237 | byte++; | ||
| 238 | } | ||
| 239 | msg("biterror: Failed to find a '1' bit\n"); | ||
| 240 | return -EIO; | ||
| 241 | } | ||
| 242 | |||
| 243 | /* Writes 'random' data to page and then introduces deliberate bit | ||
| 244 | * errors into the page, while verifying each step. */ | ||
| 245 | static int incremental_errors_test(void) | ||
| 246 | { | ||
| 247 | int err = 0; | ||
| 248 | unsigned i; | ||
| 249 | unsigned errs_per_subpage = 0; | ||
| 250 | |||
| 251 | msg("incremental biterrors test\n"); | ||
| 252 | |||
| 253 | for (i = 0; i < mtd->writesize; i++) | ||
| 254 | wbuffer[i] = hash(i+seed); | ||
| 255 | |||
| 256 | err = write_page(1); | ||
| 257 | if (err) | ||
| 258 | goto exit; | ||
| 259 | |||
| 260 | while (1) { | ||
| 261 | |||
| 262 | err = rewrite_page(1); | ||
| 263 | if (err) | ||
| 264 | goto exit; | ||
| 265 | |||
| 266 | err = read_page(1); | ||
| 267 | if (err > 0) | ||
| 268 | msg("Read reported %d corrected bit errors\n", err); | ||
| 269 | if (err < 0) { | ||
| 270 | msg("After %d biterrors per subpage, read reported error %d\n", | ||
| 271 | errs_per_subpage, err); | ||
| 272 | err = 0; | ||
| 273 | goto exit; | ||
| 274 | } | ||
| 275 | |||
| 276 | err = verify_page(1); | ||
| 277 | if (err) { | ||
| 278 | msg("ECC failure, read data is incorrect despite read success\n"); | ||
| 279 | goto exit; | ||
| 280 | } | ||
| 281 | |||
| 282 | msg("Successfully corrected %d bit errors per subpage\n", | ||
| 283 | errs_per_subpage); | ||
| 284 | |||
| 285 | for (i = 0; i < subcount; i++) { | ||
| 286 | err = insert_biterror(i * subsize); | ||
| 287 | if (err < 0) | ||
| 288 | goto exit; | ||
| 289 | } | ||
| 290 | errs_per_subpage++; | ||
| 291 | } | ||
| 292 | |||
| 293 | exit: | ||
| 294 | return err; | ||
| 295 | } | ||
| 296 | |||
| 297 | |||
| 298 | /* Writes 'random' data to page and then re-writes that same data repeatedly. | ||
| 299 | This eventually develops bit errors (bits written as '1' will slowly become | ||
| 300 | '0'), which are corrected as far as the ECC is capable of. */ | ||
| 301 | static int overwrite_test(void) | ||
| 302 | { | ||
| 303 | int err = 0; | ||
| 304 | unsigned i; | ||
| 305 | unsigned max_corrected = 0; | ||
| 306 | unsigned opno = 0; | ||
| 307 | /* We don't expect more than this many correctable bit errors per | ||
| 308 | * page. */ | ||
| 309 | #define MAXBITS 512 | ||
| 310 | static unsigned bitstats[MAXBITS]; /* bit error histogram. */ | ||
| 311 | |||
| 312 | memset(bitstats, 0, sizeof(bitstats)); | ||
| 313 | |||
| 314 | msg("overwrite biterrors test\n"); | ||
| 315 | |||
| 316 | for (i = 0; i < mtd->writesize; i++) | ||
| 317 | wbuffer[i] = hash(i+seed); | ||
| 318 | |||
| 319 | err = write_page(1); | ||
| 320 | if (err) | ||
| 321 | goto exit; | ||
| 322 | |||
| 323 | while (opno < max_overwrite) { | ||
| 324 | |||
| 325 | err = rewrite_page(0); | ||
| 326 | if (err) | ||
| 327 | break; | ||
| 328 | |||
| 329 | err = read_page(0); | ||
| 330 | if (err >= 0) { | ||
| 331 | if (err >= MAXBITS) { | ||
| 332 | msg("Implausible number of bit errors corrected\n"); | ||
| 333 | err = -EIO; | ||
| 334 | break; | ||
| 335 | } | ||
| 336 | bitstats[err]++; | ||
| 337 | if (err > max_corrected) { | ||
| 338 | max_corrected = err; | ||
| 339 | msg("Read reported %d corrected bit errors\n", | ||
| 340 | err); | ||
| 341 | } | ||
| 342 | } else { /* err < 0 */ | ||
| 343 | msg("Read reported error %d\n", err); | ||
| 344 | err = 0; | ||
| 345 | break; | ||
| 346 | } | ||
| 347 | |||
| 348 | err = verify_page(0); | ||
| 349 | if (err) { | ||
| 350 | bitstats[max_corrected] = opno; | ||
| 351 | msg("ECC failure, read data is incorrect despite read success\n"); | ||
| 352 | break; | ||
| 353 | } | ||
| 354 | |||
| 355 | opno++; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* At this point bitstats[0] contains the number of ops with no bit | ||
| 359 | * errors, bitstats[1] the number of ops with 1 bit error, etc. */ | ||
| 360 | msg("Bit error histogram (%d operations total):\n", opno); | ||
| 361 | for (i = 0; i < max_corrected; i++) | ||
| 362 | msg("Page reads with %3d corrected bit errors: %d\n", | ||
| 363 | i, bitstats[i]); | ||
| 364 | |||
| 365 | exit: | ||
| 366 | return err; | ||
| 367 | } | ||
| 368 | |||
| 369 | static int __init mtd_nandbiterrs_init(void) | ||
| 370 | { | ||
| 371 | int err = 0; | ||
| 372 | |||
| 373 | msg("\n"); | ||
| 374 | msg("==================================================\n"); | ||
| 375 | msg("MTD device: %d\n", dev); | ||
| 376 | |||
| 377 | mtd = get_mtd_device(NULL, dev); | ||
| 378 | if (IS_ERR(mtd)) { | ||
| 379 | err = PTR_ERR(mtd); | ||
| 380 | msg("error: cannot get MTD device\n"); | ||
| 381 | goto exit_mtddev; | ||
| 382 | } | ||
| 383 | |||
| 384 | if (mtd->type != MTD_NANDFLASH) { | ||
| 385 | msg("this test requires NAND flash\n"); | ||
| 386 | err = -ENODEV; | ||
| 387 | goto exit_nand; | ||
| 388 | } | ||
| 389 | |||
| 390 | msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n", | ||
| 391 | (unsigned long long)mtd->size, mtd->erasesize, | ||
| 392 | mtd->writesize, mtd->oobsize); | ||
| 393 | |||
| 394 | subsize = mtd->writesize >> mtd->subpage_sft; | ||
| 395 | subcount = mtd->writesize / subsize; | ||
| 396 | |||
| 397 | msg("Device uses %d subpages of %d bytes\n", subcount, subsize); | ||
| 398 | |||
| 399 | offset = page_offset * mtd->writesize; | ||
| 400 | eraseblock = mtd_div_by_eb(offset, mtd); | ||
| 401 | |||
| 402 | msg("Using page=%u, offset=%llu, eraseblock=%u\n", | ||
| 403 | page_offset, offset, eraseblock); | ||
| 404 | |||
| 405 | wbuffer = kmalloc(mtd->writesize, GFP_KERNEL); | ||
| 406 | if (!wbuffer) { | ||
| 407 | err = -ENOMEM; | ||
| 408 | goto exit_wbuffer; | ||
| 409 | } | ||
| 410 | |||
| 411 | rbuffer = kmalloc(mtd->writesize, GFP_KERNEL); | ||
| 412 | if (!rbuffer) { | ||
| 413 | err = -ENOMEM; | ||
| 414 | goto exit_rbuffer; | ||
| 415 | } | ||
| 416 | |||
| 417 | err = erase_block(); | ||
| 418 | if (err) | ||
| 419 | goto exit_error; | ||
| 420 | |||
| 421 | if (mode == 0) | ||
| 422 | err = incremental_errors_test(); | ||
| 423 | else | ||
| 424 | err = overwrite_test(); | ||
| 425 | |||
| 426 | if (err) | ||
| 427 | goto exit_error; | ||
| 428 | |||
| 429 | /* We leave the block un-erased in case of test failure. */ | ||
| 430 | err = erase_block(); | ||
| 431 | if (err) | ||
| 432 | goto exit_error; | ||
| 433 | |||
| 434 | err = -EIO; | ||
| 435 | msg("finished successfully.\n"); | ||
| 436 | msg("==================================================\n"); | ||
| 437 | |||
| 438 | exit_error: | ||
| 439 | kfree(rbuffer); | ||
| 440 | exit_rbuffer: | ||
| 441 | kfree(wbuffer); | ||
| 442 | exit_wbuffer: | ||
| 443 | /* Nothing */ | ||
| 444 | exit_nand: | ||
| 445 | put_mtd_device(mtd); | ||
| 446 | exit_mtddev: | ||
| 447 | return err; | ||
| 448 | } | ||
| 449 | |||
| 450 | static void __exit mtd_nandbiterrs_exit(void) | ||
| 451 | { | ||
| 452 | return; | ||
| 453 | } | ||
| 454 | |||
| 455 | module_init(mtd_nandbiterrs_init); | ||
| 456 | module_exit(mtd_nandbiterrs_exit); | ||
| 457 | |||
| 458 | MODULE_DESCRIPTION("NAND bit error recovery test"); | ||
| 459 | MODULE_AUTHOR("Iwo Mergler"); | ||
| 460 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c index 70d6d7d0d656..b437fa425077 100644 --- a/drivers/mtd/tests/mtd_nandecctest.c +++ b/drivers/mtd/tests/mtd_nandecctest.c | |||
| @@ -4,60 +4,287 @@ | |||
| 4 | #include <linux/random.h> | 4 | #include <linux/random.h> |
| 5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
| 6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
| 7 | #include <linux/jiffies.h> | 7 | #include <linux/slab.h> |
| 8 | #include <linux/mtd/nand_ecc.h> | 8 | #include <linux/mtd/nand_ecc.h> |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Test the implementation for software ECC | ||
| 12 | * | ||
| 13 | * No actual MTD device is needed, So we don't need to warry about losing | ||
| 14 | * important data by human error. | ||
| 15 | * | ||
| 16 | * This covers possible patterns of corruption which can be reliably corrected | ||
| 17 | * or detected. | ||
| 18 | */ | ||
| 19 | |||
| 10 | #if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) | 20 | #if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) |
| 11 | 21 | ||
| 12 | static void inject_single_bit_error(void *data, size_t size) | 22 | struct nand_ecc_test { |
| 23 | const char *name; | ||
| 24 | void (*prepare)(void *, void *, void *, void *, const size_t); | ||
| 25 | int (*verify)(void *, void *, void *, const size_t); | ||
| 26 | }; | ||
| 27 | |||
| 28 | /* | ||
| 29 | * The reason for this __change_bit_le() instead of __change_bit() is to inject | ||
| 30 | * bit error properly within the region which is not a multiple of | ||
| 31 | * sizeof(unsigned long) on big-endian systems | ||
| 32 | */ | ||
| 33 | #ifdef __LITTLE_ENDIAN | ||
| 34 | #define __change_bit_le(nr, addr) __change_bit(nr, addr) | ||
| 35 | #elif defined(__BIG_ENDIAN) | ||
| 36 | #define __change_bit_le(nr, addr) \ | ||
| 37 | __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr) | ||
| 38 | #else | ||
| 39 | #error "Unknown byte order" | ||
| 40 | #endif | ||
| 41 | |||
| 42 | static void single_bit_error_data(void *error_data, void *correct_data, | ||
| 43 | size_t size) | ||
| 13 | { | 44 | { |
| 14 | unsigned long offset = random32() % (size * BITS_PER_BYTE); | 45 | unsigned int offset = random32() % (size * BITS_PER_BYTE); |
| 15 | 46 | ||
| 16 | __change_bit(offset, data); | 47 | memcpy(error_data, correct_data, size); |
| 48 | __change_bit_le(offset, error_data); | ||
| 17 | } | 49 | } |
| 18 | 50 | ||
| 19 | static unsigned char data[512]; | 51 | static void double_bit_error_data(void *error_data, void *correct_data, |
| 20 | static unsigned char error_data[512]; | 52 | size_t size) |
| 53 | { | ||
| 54 | unsigned int offset[2]; | ||
| 55 | |||
| 56 | offset[0] = random32() % (size * BITS_PER_BYTE); | ||
| 57 | do { | ||
| 58 | offset[1] = random32() % (size * BITS_PER_BYTE); | ||
| 59 | } while (offset[0] == offset[1]); | ||
| 21 | 60 | ||
| 22 | static int nand_ecc_test(const size_t size) | 61 | memcpy(error_data, correct_data, size); |
| 62 | |||
| 63 | __change_bit_le(offset[0], error_data); | ||
| 64 | __change_bit_le(offset[1], error_data); | ||
| 65 | } | ||
| 66 | |||
| 67 | static unsigned int random_ecc_bit(size_t size) | ||
| 23 | { | 68 | { |
| 24 | unsigned char code[3]; | 69 | unsigned int offset = random32() % (3 * BITS_PER_BYTE); |
| 25 | unsigned char error_code[3]; | 70 | |
| 26 | char testname[30]; | 71 | if (size == 256) { |
| 72 | /* | ||
| 73 | * Don't inject a bit error into the insignificant bits (16th | ||
| 74 | * and 17th bit) in ECC code for 256 byte data block | ||
| 75 | */ | ||
| 76 | while (offset == 16 || offset == 17) | ||
| 77 | offset = random32() % (3 * BITS_PER_BYTE); | ||
| 78 | } | ||
| 27 | 79 | ||
| 28 | BUG_ON(sizeof(data) < size); | 80 | return offset; |
| 81 | } | ||
| 29 | 82 | ||
| 30 | sprintf(testname, "nand-ecc-%zu", size); | 83 | static void single_bit_error_ecc(void *error_ecc, void *correct_ecc, |
| 84 | size_t size) | ||
| 85 | { | ||
| 86 | unsigned int offset = random_ecc_bit(size); | ||
| 31 | 87 | ||
| 32 | get_random_bytes(data, size); | 88 | memcpy(error_ecc, correct_ecc, 3); |
| 89 | __change_bit_le(offset, error_ecc); | ||
| 90 | } | ||
| 33 | 91 | ||
| 34 | memcpy(error_data, data, size); | 92 | static void double_bit_error_ecc(void *error_ecc, void *correct_ecc, |
| 35 | inject_single_bit_error(error_data, size); | 93 | size_t size) |
| 94 | { | ||
| 95 | unsigned int offset[2]; | ||
| 36 | 96 | ||
| 37 | __nand_calculate_ecc(data, size, code); | 97 | offset[0] = random_ecc_bit(size); |
| 38 | __nand_calculate_ecc(error_data, size, error_code); | 98 | do { |
| 39 | __nand_correct_data(error_data, code, error_code, size); | 99 | offset[1] = random_ecc_bit(size); |
| 100 | } while (offset[0] == offset[1]); | ||
| 40 | 101 | ||
| 41 | if (!memcmp(data, error_data, size)) { | 102 | memcpy(error_ecc, correct_ecc, 3); |
| 42 | printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname); | 103 | __change_bit_le(offset[0], error_ecc); |
| 104 | __change_bit_le(offset[1], error_ecc); | ||
| 105 | } | ||
| 106 | |||
| 107 | static void no_bit_error(void *error_data, void *error_ecc, | ||
| 108 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 109 | { | ||
| 110 | memcpy(error_data, correct_data, size); | ||
| 111 | memcpy(error_ecc, correct_ecc, 3); | ||
| 112 | } | ||
| 113 | |||
| 114 | static int no_bit_error_verify(void *error_data, void *error_ecc, | ||
| 115 | void *correct_data, const size_t size) | ||
| 116 | { | ||
| 117 | unsigned char calc_ecc[3]; | ||
| 118 | int ret; | ||
| 119 | |||
| 120 | __nand_calculate_ecc(error_data, size, calc_ecc); | ||
| 121 | ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size); | ||
| 122 | if (ret == 0 && !memcmp(correct_data, error_data, size)) | ||
| 43 | return 0; | 123 | return 0; |
| 44 | } | ||
| 45 | 124 | ||
| 46 | printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname); | 125 | return -EINVAL; |
| 126 | } | ||
| 127 | |||
| 128 | static void single_bit_error_in_data(void *error_data, void *error_ecc, | ||
| 129 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 130 | { | ||
| 131 | single_bit_error_data(error_data, correct_data, size); | ||
| 132 | memcpy(error_ecc, correct_ecc, 3); | ||
| 133 | } | ||
| 134 | |||
| 135 | static void single_bit_error_in_ecc(void *error_data, void *error_ecc, | ||
| 136 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 137 | { | ||
| 138 | memcpy(error_data, correct_data, size); | ||
| 139 | single_bit_error_ecc(error_ecc, correct_ecc, size); | ||
| 140 | } | ||
| 141 | |||
| 142 | static int single_bit_error_correct(void *error_data, void *error_ecc, | ||
| 143 | void *correct_data, const size_t size) | ||
| 144 | { | ||
| 145 | unsigned char calc_ecc[3]; | ||
| 146 | int ret; | ||
| 47 | 147 | ||
| 48 | printk(KERN_DEBUG "hexdump of data:\n"); | 148 | __nand_calculate_ecc(error_data, size, calc_ecc); |
| 49 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, | 149 | ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size); |
| 50 | data, size, false); | 150 | if (ret == 1 && !memcmp(correct_data, error_data, size)) |
| 51 | printk(KERN_DEBUG "hexdump of error data:\n"); | 151 | return 0; |
| 52 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, | 152 | |
| 153 | return -EINVAL; | ||
| 154 | } | ||
| 155 | |||
| 156 | static void double_bit_error_in_data(void *error_data, void *error_ecc, | ||
| 157 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 158 | { | ||
| 159 | double_bit_error_data(error_data, correct_data, size); | ||
| 160 | memcpy(error_ecc, correct_ecc, 3); | ||
| 161 | } | ||
| 162 | |||
| 163 | static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc, | ||
| 164 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 165 | { | ||
| 166 | single_bit_error_data(error_data, correct_data, size); | ||
| 167 | single_bit_error_ecc(error_ecc, correct_ecc, size); | ||
| 168 | } | ||
| 169 | |||
| 170 | static void double_bit_error_in_ecc(void *error_data, void *error_ecc, | ||
| 171 | void *correct_data, void *correct_ecc, const size_t size) | ||
| 172 | { | ||
| 173 | memcpy(error_data, correct_data, size); | ||
| 174 | double_bit_error_ecc(error_ecc, correct_ecc, size); | ||
| 175 | } | ||
| 176 | |||
| 177 | static int double_bit_error_detect(void *error_data, void *error_ecc, | ||
| 178 | void *correct_data, const size_t size) | ||
| 179 | { | ||
| 180 | unsigned char calc_ecc[3]; | ||
| 181 | int ret; | ||
| 182 | |||
| 183 | __nand_calculate_ecc(error_data, size, calc_ecc); | ||
| 184 | ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size); | ||
| 185 | |||
| 186 | return (ret == -1) ? 0 : -EINVAL; | ||
| 187 | } | ||
| 188 | |||
| 189 | static const struct nand_ecc_test nand_ecc_test[] = { | ||
| 190 | { | ||
| 191 | .name = "no-bit-error", | ||
| 192 | .prepare = no_bit_error, | ||
| 193 | .verify = no_bit_error_verify, | ||
| 194 | }, | ||
| 195 | { | ||
| 196 | .name = "single-bit-error-in-data-correct", | ||
| 197 | .prepare = single_bit_error_in_data, | ||
| 198 | .verify = single_bit_error_correct, | ||
| 199 | }, | ||
| 200 | { | ||
| 201 | .name = "single-bit-error-in-ecc-correct", | ||
| 202 | .prepare = single_bit_error_in_ecc, | ||
| 203 | .verify = single_bit_error_correct, | ||
| 204 | }, | ||
| 205 | { | ||
| 206 | .name = "double-bit-error-in-data-detect", | ||
| 207 | .prepare = double_bit_error_in_data, | ||
| 208 | .verify = double_bit_error_detect, | ||
| 209 | }, | ||
| 210 | { | ||
| 211 | .name = "single-bit-error-in-data-and-ecc-detect", | ||
| 212 | .prepare = single_bit_error_in_data_and_ecc, | ||
| 213 | .verify = double_bit_error_detect, | ||
| 214 | }, | ||
| 215 | { | ||
| 216 | .name = "double-bit-error-in-ecc-detect", | ||
| 217 | .prepare = double_bit_error_in_ecc, | ||
| 218 | .verify = double_bit_error_detect, | ||
| 219 | }, | ||
| 220 | }; | ||
| 221 | |||
| 222 | static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data, | ||
| 223 | void *correct_ecc, const size_t size) | ||
| 224 | { | ||
| 225 | pr_info("hexdump of error data:\n"); | ||
| 226 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, | ||
| 53 | error_data, size, false); | 227 | error_data, size, false); |
| 228 | print_hex_dump(KERN_INFO, "hexdump of error ecc: ", | ||
| 229 | DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false); | ||
| 230 | |||
| 231 | pr_info("hexdump of correct data:\n"); | ||
| 232 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, | ||
| 233 | correct_data, size, false); | ||
| 234 | print_hex_dump(KERN_INFO, "hexdump of correct ecc: ", | ||
| 235 | DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false); | ||
| 236 | } | ||
| 237 | |||
| 238 | static int nand_ecc_test_run(const size_t size) | ||
| 239 | { | ||
| 240 | int i; | ||
| 241 | int err = 0; | ||
| 242 | void *error_data; | ||
| 243 | void *error_ecc; | ||
| 244 | void *correct_data; | ||
| 245 | void *correct_ecc; | ||
| 54 | 246 | ||
| 55 | return -1; | 247 | error_data = kmalloc(size, GFP_KERNEL); |
| 248 | error_ecc = kmalloc(3, GFP_KERNEL); | ||
| 249 | correct_data = kmalloc(size, GFP_KERNEL); | ||
| 250 | correct_ecc = kmalloc(3, GFP_KERNEL); | ||
| 251 | |||
| 252 | if (!error_data || !error_ecc || !correct_data || !correct_ecc) { | ||
| 253 | err = -ENOMEM; | ||
| 254 | goto error; | ||
| 255 | } | ||
| 256 | |||
| 257 | get_random_bytes(correct_data, size); | ||
| 258 | __nand_calculate_ecc(correct_data, size, correct_ecc); | ||
| 259 | |||
| 260 | for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) { | ||
| 261 | nand_ecc_test[i].prepare(error_data, error_ecc, | ||
| 262 | correct_data, correct_ecc, size); | ||
| 263 | err = nand_ecc_test[i].verify(error_data, error_ecc, | ||
| 264 | correct_data, size); | ||
| 265 | |||
| 266 | if (err) { | ||
| 267 | pr_err("mtd_nandecctest: not ok - %s-%zd\n", | ||
| 268 | nand_ecc_test[i].name, size); | ||
| 269 | dump_data_ecc(error_data, error_ecc, | ||
| 270 | correct_data, correct_ecc, size); | ||
| 271 | break; | ||
| 272 | } | ||
| 273 | pr_info("mtd_nandecctest: ok - %s-%zd\n", | ||
| 274 | nand_ecc_test[i].name, size); | ||
| 275 | } | ||
| 276 | error: | ||
| 277 | kfree(error_data); | ||
| 278 | kfree(error_ecc); | ||
| 279 | kfree(correct_data); | ||
| 280 | kfree(correct_ecc); | ||
| 281 | |||
| 282 | return err; | ||
| 56 | } | 283 | } |
| 57 | 284 | ||
| 58 | #else | 285 | #else |
| 59 | 286 | ||
| 60 | static int nand_ecc_test(const size_t size) | 287 | static int nand_ecc_test_run(const size_t size) |
| 61 | { | 288 | { |
| 62 | return 0; | 289 | return 0; |
| 63 | } | 290 | } |
| @@ -66,12 +293,13 @@ static int nand_ecc_test(const size_t size) | |||
| 66 | 293 | ||
| 67 | static int __init ecc_test_init(void) | 294 | static int __init ecc_test_init(void) |
| 68 | { | 295 | { |
| 69 | srandom32(jiffies); | 296 | int err; |
| 70 | 297 | ||
| 71 | nand_ecc_test(256); | 298 | err = nand_ecc_test_run(256); |
| 72 | nand_ecc_test(512); | 299 | if (err) |
| 300 | return err; | ||
| 73 | 301 | ||
| 74 | return 0; | 302 | return nand_ecc_test_run(512); |
| 75 | } | 303 | } |
| 76 | 304 | ||
| 77 | static void __exit ecc_test_exit(void) | 305 | static void __exit ecc_test_exit(void) |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 2aec4f3b72be..42b0f7456fc4 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/mtd/mtd.h> | 26 | #include <linux/mtd/mtd.h> |
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 29 | #include <linux/random.h> | ||
| 29 | 30 | ||
| 30 | #define PRINT_PREF KERN_INFO "mtd_speedtest: " | 31 | #define PRINT_PREF KERN_INFO "mtd_speedtest: " |
| 31 | 32 | ||
| @@ -47,25 +48,13 @@ static int ebcnt; | |||
| 47 | static int pgcnt; | 48 | static int pgcnt; |
| 48 | static int goodebcnt; | 49 | static int goodebcnt; |
| 49 | static struct timeval start, finish; | 50 | static struct timeval start, finish; |
| 50 | static unsigned long next = 1; | ||
| 51 | |||
| 52 | static inline unsigned int simple_rand(void) | ||
| 53 | { | ||
| 54 | next = next * 1103515245 + 12345; | ||
| 55 | return (unsigned int)((next / 65536) % 32768); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void simple_srand(unsigned long seed) | ||
| 59 | { | ||
| 60 | next = seed; | ||
| 61 | } | ||
| 62 | 51 | ||
| 63 | static void set_random_data(unsigned char *buf, size_t len) | 52 | static void set_random_data(unsigned char *buf, size_t len) |
| 64 | { | 53 | { |
| 65 | size_t i; | 54 | size_t i; |
| 66 | 55 | ||
| 67 | for (i = 0; i < len; ++i) | 56 | for (i = 0; i < len; ++i) |
| 68 | buf[i] = simple_rand(); | 57 | buf[i] = random32(); |
| 69 | } | 58 | } |
| 70 | 59 | ||
| 71 | static int erase_eraseblock(int ebnum) | 60 | static int erase_eraseblock(int ebnum) |
| @@ -407,7 +396,6 @@ static int __init mtd_speedtest_init(void) | |||
| 407 | goto out; | 396 | goto out; |
| 408 | } | 397 | } |
| 409 | 398 | ||
| 410 | simple_srand(1); | ||
| 411 | set_random_data(iobuf, mtd->erasesize); | 399 | set_random_data(iobuf, mtd->erasesize); |
| 412 | 400 | ||
| 413 | err = scan_for_bad_eraseblocks(); | 401 | err = scan_for_bad_eraseblocks(); |
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 7b33f22d0b58..cb268cebf01a 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
| 30 | #include <linux/random.h> | ||
| 30 | 31 | ||
| 31 | #define PRINT_PREF KERN_INFO "mtd_stresstest: " | 32 | #define PRINT_PREF KERN_INFO "mtd_stresstest: " |
| 32 | 33 | ||
| @@ -48,28 +49,13 @@ static int pgsize; | |||
| 48 | static int bufsize; | 49 | static int bufsize; |
| 49 | static int ebcnt; | 50 | static int ebcnt; |
| 50 | static int pgcnt; | 51 | static int pgcnt; |
| 51 | static unsigned long next = 1; | ||
| 52 | |||
| 53 | static inline unsigned int simple_rand(void) | ||
| 54 | { | ||
| 55 | next = next * 1103515245 + 12345; | ||
| 56 | return (unsigned int)((next / 65536) % 32768); | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline void simple_srand(unsigned long seed) | ||
| 60 | { | ||
| 61 | next = seed; | ||
| 62 | } | ||
| 63 | 52 | ||
| 64 | static int rand_eb(void) | 53 | static int rand_eb(void) |
| 65 | { | 54 | { |
| 66 | int eb; | 55 | unsigned int eb; |
| 67 | 56 | ||
| 68 | again: | 57 | again: |
| 69 | if (ebcnt < 32768) | 58 | eb = random32(); |
| 70 | eb = simple_rand(); | ||
| 71 | else | ||
| 72 | eb = (simple_rand() << 15) | simple_rand(); | ||
| 73 | /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ | 59 | /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ |
| 74 | eb %= (ebcnt - 1); | 60 | eb %= (ebcnt - 1); |
| 75 | if (bbt[eb]) | 61 | if (bbt[eb]) |
| @@ -79,24 +65,18 @@ again: | |||
| 79 | 65 | ||
| 80 | static int rand_offs(void) | 66 | static int rand_offs(void) |
| 81 | { | 67 | { |
| 82 | int offs; | 68 | unsigned int offs; |
| 83 | 69 | ||
| 84 | if (bufsize < 32768) | 70 | offs = random32(); |
| 85 | offs = simple_rand(); | ||
| 86 | else | ||
| 87 | offs = (simple_rand() << 15) | simple_rand(); | ||
| 88 | offs %= bufsize; | 71 | offs %= bufsize; |
| 89 | return offs; | 72 | return offs; |
| 90 | } | 73 | } |
| 91 | 74 | ||
| 92 | static int rand_len(int offs) | 75 | static int rand_len(int offs) |
| 93 | { | 76 | { |
| 94 | int len; | 77 | unsigned int len; |
| 95 | 78 | ||
| 96 | if (bufsize < 32768) | 79 | len = random32(); |
| 97 | len = simple_rand(); | ||
| 98 | else | ||
| 99 | len = (simple_rand() << 15) | simple_rand(); | ||
| 100 | len %= (bufsize - offs); | 80 | len %= (bufsize - offs); |
| 101 | return len; | 81 | return len; |
| 102 | } | 82 | } |
| @@ -211,7 +191,7 @@ static int do_write(void) | |||
| 211 | 191 | ||
| 212 | static int do_operation(void) | 192 | static int do_operation(void) |
| 213 | { | 193 | { |
| 214 | if (simple_rand() & 1) | 194 | if (random32() & 1) |
| 215 | return do_read(); | 195 | return do_read(); |
| 216 | else | 196 | else |
| 217 | return do_write(); | 197 | return do_write(); |
| @@ -302,9 +282,8 @@ static int __init mtd_stresstest_init(void) | |||
| 302 | } | 282 | } |
| 303 | for (i = 0; i < ebcnt; i++) | 283 | for (i = 0; i < ebcnt; i++) |
| 304 | offsets[i] = mtd->erasesize; | 284 | offsets[i] = mtd->erasesize; |
| 305 | simple_srand(current->pid); | ||
| 306 | for (i = 0; i < bufsize; i++) | 285 | for (i = 0; i < bufsize; i++) |
| 307 | writebuf[i] = simple_rand(); | 286 | writebuf[i] = random32(); |
| 308 | 287 | ||
| 309 | err = scan_for_bad_eraseblocks(); | 288 | err = scan_for_bad_eraseblocks(); |
| 310 | if (err) | 289 | if (err) |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 8c2ff2490d99..1acae359cabe 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
| @@ -134,6 +134,7 @@ config SPI_DAVINCI | |||
| 134 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" | 134 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" |
| 135 | depends on ARCH_DAVINCI | 135 | depends on ARCH_DAVINCI |
| 136 | select SPI_BITBANG | 136 | select SPI_BITBANG |
| 137 | select TI_EDMA | ||
| 137 | help | 138 | help |
| 138 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. | 139 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
| 139 | 140 | ||
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3afe2f4f5b8e..147dfa87a64b 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
| @@ -25,13 +25,14 @@ | |||
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
| 27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
| 28 | #include <linux/dmaengine.h> | ||
| 28 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
| 30 | #include <linux/edma.h> | ||
| 29 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
| 30 | #include <linux/spi/spi_bitbang.h> | 32 | #include <linux/spi/spi_bitbang.h> |
| 31 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 32 | 34 | ||
| 33 | #include <linux/platform_data/spi-davinci.h> | 35 | #include <linux/platform_data/spi-davinci.h> |
| 34 | #include <mach/edma.h> | ||
| 35 | 36 | ||
| 36 | #define SPI_NO_RESOURCE ((resource_size_t)-1) | 37 | #define SPI_NO_RESOURCE ((resource_size_t)-1) |
| 37 | 38 | ||
| @@ -113,14 +114,6 @@ | |||
| 113 | #define SPIDEF 0x4c | 114 | #define SPIDEF 0x4c |
| 114 | #define SPIFMT0 0x50 | 115 | #define SPIFMT0 0x50 |
| 115 | 116 | ||
| 116 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | ||
| 117 | struct davinci_spi_dma { | ||
| 118 | int tx_channel; | ||
| 119 | int rx_channel; | ||
| 120 | int dummy_param_slot; | ||
| 121 | enum dma_event_q eventq; | ||
| 122 | }; | ||
| 123 | |||
| 124 | /* SPI Controller driver's private data. */ | 117 | /* SPI Controller driver's private data. */ |
| 125 | struct davinci_spi { | 118 | struct davinci_spi { |
| 126 | struct spi_bitbang bitbang; | 119 | struct spi_bitbang bitbang; |
| @@ -134,11 +127,14 @@ struct davinci_spi { | |||
| 134 | 127 | ||
| 135 | const void *tx; | 128 | const void *tx; |
| 136 | void *rx; | 129 | void *rx; |
| 137 | #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) | ||
| 138 | u8 rx_tmp_buf[SPI_TMP_BUFSZ]; | ||
| 139 | int rcount; | 130 | int rcount; |
| 140 | int wcount; | 131 | int wcount; |
| 141 | struct davinci_spi_dma dma; | 132 | |
| 133 | struct dma_chan *dma_rx; | ||
| 134 | struct dma_chan *dma_tx; | ||
| 135 | int dma_rx_chnum; | ||
| 136 | int dma_tx_chnum; | ||
| 137 | |||
| 142 | struct davinci_spi_platform_data *pdata; | 138 | struct davinci_spi_platform_data *pdata; |
| 143 | 139 | ||
| 144 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | 140 | void (*get_rx)(u32 rx_data, struct davinci_spi *); |
| @@ -496,21 +492,23 @@ out: | |||
| 496 | return errors; | 492 | return errors; |
| 497 | } | 493 | } |
| 498 | 494 | ||
| 499 | static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) | 495 | static void davinci_spi_dma_rx_callback(void *data) |
| 500 | { | 496 | { |
| 501 | struct davinci_spi *dspi = data; | 497 | struct davinci_spi *dspi = (struct davinci_spi *)data; |
| 502 | struct davinci_spi_dma *dma = &dspi->dma; | ||
| 503 | 498 | ||
| 504 | edma_stop(lch); | 499 | dspi->rcount = 0; |
| 505 | 500 | ||
| 506 | if (status == DMA_COMPLETE) { | 501 | if (!dspi->wcount && !dspi->rcount) |
| 507 | if (lch == dma->rx_channel) | 502 | complete(&dspi->done); |
| 508 | dspi->rcount = 0; | 503 | } |
| 509 | if (lch == dma->tx_channel) | ||
| 510 | dspi->wcount = 0; | ||
| 511 | } | ||
| 512 | 504 | ||
| 513 | if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) | 505 | static void davinci_spi_dma_tx_callback(void *data) |
| 506 | { | ||
| 507 | struct davinci_spi *dspi = (struct davinci_spi *)data; | ||
| 508 | |||
| 509 | dspi->wcount = 0; | ||
| 510 | |||
| 511 | if (!dspi->wcount && !dspi->rcount) | ||
| 514 | complete(&dspi->done); | 512 | complete(&dspi->done); |
| 515 | } | 513 | } |
| 516 | 514 | ||
| @@ -526,20 +524,20 @@ static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) | |||
| 526 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | 524 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) |
| 527 | { | 525 | { |
| 528 | struct davinci_spi *dspi; | 526 | struct davinci_spi *dspi; |
| 529 | int data_type, ret; | 527 | int data_type, ret = -ENOMEM; |
| 530 | u32 tx_data, spidat1; | 528 | u32 tx_data, spidat1; |
| 531 | u32 errors = 0; | 529 | u32 errors = 0; |
| 532 | struct davinci_spi_config *spicfg; | 530 | struct davinci_spi_config *spicfg; |
| 533 | struct davinci_spi_platform_data *pdata; | 531 | struct davinci_spi_platform_data *pdata; |
| 534 | unsigned uninitialized_var(rx_buf_count); | 532 | unsigned uninitialized_var(rx_buf_count); |
| 535 | struct device *sdev; | 533 | void *dummy_buf = NULL; |
| 534 | struct scatterlist sg_rx, sg_tx; | ||
| 536 | 535 | ||
| 537 | dspi = spi_master_get_devdata(spi->master); | 536 | dspi = spi_master_get_devdata(spi->master); |
| 538 | pdata = dspi->pdata; | 537 | pdata = dspi->pdata; |
| 539 | spicfg = (struct davinci_spi_config *)spi->controller_data; | 538 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
| 540 | if (!spicfg) | 539 | if (!spicfg) |
| 541 | spicfg = &davinci_spi_default_cfg; | 540 | spicfg = &davinci_spi_default_cfg; |
| 542 | sdev = dspi->bitbang.master->dev.parent; | ||
| 543 | 541 | ||
| 544 | /* convert len to words based on bits_per_word */ | 542 | /* convert len to words based on bits_per_word */ |
| 545 | data_type = dspi->bytes_per_word[spi->chip_select]; | 543 | data_type = dspi->bytes_per_word[spi->chip_select]; |
| @@ -567,112 +565,83 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
| 567 | spidat1 |= tx_data & 0xFFFF; | 565 | spidat1 |= tx_data & 0xFFFF; |
| 568 | iowrite32(spidat1, dspi->base + SPIDAT1); | 566 | iowrite32(spidat1, dspi->base + SPIDAT1); |
| 569 | } else { | 567 | } else { |
| 570 | struct davinci_spi_dma *dma; | 568 | struct dma_slave_config dma_rx_conf = { |
| 571 | unsigned long tx_reg, rx_reg; | 569 | .direction = DMA_DEV_TO_MEM, |
| 572 | struct edmacc_param param; | 570 | .src_addr = (unsigned long)dspi->pbase + SPIBUF, |
| 573 | void *rx_buf; | 571 | .src_addr_width = data_type, |
| 574 | int b, c; | 572 | .src_maxburst = 1, |
| 575 | 573 | }; | |
| 576 | dma = &dspi->dma; | 574 | struct dma_slave_config dma_tx_conf = { |
| 577 | 575 | .direction = DMA_MEM_TO_DEV, | |
| 578 | tx_reg = (unsigned long)dspi->pbase + SPIDAT1; | 576 | .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, |
| 579 | rx_reg = (unsigned long)dspi->pbase + SPIBUF; | 577 | .dst_addr_width = data_type, |
| 580 | 578 | .dst_maxburst = 1, | |
| 581 | /* | 579 | }; |
| 582 | * Transmit DMA setup | 580 | struct dma_async_tx_descriptor *rxdesc; |
| 583 | * | 581 | struct dma_async_tx_descriptor *txdesc; |
| 584 | * If there is transmit data, map the transmit buffer, set it | 582 | void *buf; |
| 585 | * as the source of data and set the source B index to data | 583 | |
| 586 | * size. If there is no transmit data, set the transmit register | 584 | dummy_buf = kzalloc(t->len, GFP_KERNEL); |
| 587 | * as the source of data, and set the source B index to zero. | 585 | if (!dummy_buf) |
| 588 | * | 586 | goto err_alloc_dummy_buf; |
| 589 | * The destination is always the transmit register itself. And | 587 | |
| 590 | * the destination never increments. | 588 | dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); |
| 591 | */ | 589 | dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); |
| 592 | 590 | ||
| 593 | if (t->tx_buf) { | 591 | sg_init_table(&sg_rx, 1); |
| 594 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, | 592 | if (!t->rx_buf) |
| 595 | t->len, DMA_TO_DEVICE); | 593 | buf = dummy_buf; |
| 596 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
| 597 | dev_dbg(sdev, "Unable to DMA map %d bytes" | ||
| 598 | "TX buffer\n", t->len); | ||
| 599 | return -ENOMEM; | ||
| 600 | } | ||
| 601 | } | ||
| 602 | |||
| 603 | /* | ||
| 604 | * If number of words is greater than 65535, then we need | ||
| 605 | * to configure a 3 dimension transfer. Use the BCNTRLD | ||
| 606 | * feature to allow for transfers that aren't even multiples | ||
| 607 | * of 65535 (or any other possible b size) by first transferring | ||
| 608 | * the remainder amount then grabbing the next N blocks of | ||
| 609 | * 65535 words. | ||
| 610 | */ | ||
| 611 | |||
| 612 | c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ | ||
| 613 | b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ | ||
| 614 | if (b) | ||
| 615 | c++; | ||
| 616 | else | 594 | else |
| 617 | b = SZ_64K - 1; | 595 | buf = t->rx_buf; |
| 618 | 596 | t->rx_dma = dma_map_single(&spi->dev, buf, | |
| 619 | param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); | 597 | t->len, DMA_FROM_DEVICE); |
| 620 | param.src = t->tx_buf ? t->tx_dma : tx_reg; | 598 | if (!t->rx_dma) { |
| 621 | param.a_b_cnt = b << 16 | data_type; | 599 | ret = -EFAULT; |
| 622 | param.dst = tx_reg; | 600 | goto err_rx_map; |
| 623 | param.src_dst_bidx = t->tx_buf ? data_type : 0; | ||
| 624 | param.link_bcntrld = 0xffffffff; | ||
| 625 | param.src_dst_cidx = t->tx_buf ? data_type : 0; | ||
| 626 | param.ccnt = c; | ||
| 627 | edma_write_slot(dma->tx_channel, ¶m); | ||
| 628 | edma_link(dma->tx_channel, dma->dummy_param_slot); | ||
| 629 | |||
| 630 | /* | ||
| 631 | * Receive DMA setup | ||
| 632 | * | ||
| 633 | * If there is receive buffer, use it to receive data. If there | ||
| 634 | * is none provided, use a temporary receive buffer. Set the | ||
| 635 | * destination B index to 0 so effectively only one byte is used | ||
| 636 | * in the temporary buffer (address does not increment). | ||
| 637 | * | ||
| 638 | * The source of receive data is the receive data register. The | ||
| 639 | * source address never increments. | ||
| 640 | */ | ||
| 641 | |||
| 642 | if (t->rx_buf) { | ||
| 643 | rx_buf = t->rx_buf; | ||
| 644 | rx_buf_count = t->len; | ||
| 645 | } else { | ||
| 646 | rx_buf = dspi->rx_tmp_buf; | ||
| 647 | rx_buf_count = sizeof(dspi->rx_tmp_buf); | ||
| 648 | } | 601 | } |
| 602 | sg_dma_address(&sg_rx) = t->rx_dma; | ||
| 603 | sg_dma_len(&sg_rx) = t->len; | ||
| 649 | 604 | ||
| 650 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, | 605 | sg_init_table(&sg_tx, 1); |
| 651 | DMA_FROM_DEVICE); | 606 | if (!t->tx_buf) |
| 652 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | 607 | buf = dummy_buf; |
| 653 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 608 | else |
| 654 | rx_buf_count); | 609 | buf = (void *)t->tx_buf; |
| 655 | if (t->tx_buf) | 610 | t->tx_dma = dma_map_single(&spi->dev, buf, |
| 656 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, | 611 | t->len, DMA_FROM_DEVICE); |
| 657 | DMA_TO_DEVICE); | 612 | if (!t->tx_dma) { |
| 658 | return -ENOMEM; | 613 | ret = -EFAULT; |
| 614 | goto err_tx_map; | ||
| 659 | } | 615 | } |
| 660 | 616 | sg_dma_address(&sg_tx) = t->tx_dma; | |
| 661 | param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); | 617 | sg_dma_len(&sg_tx) = t->len; |
| 662 | param.src = rx_reg; | 618 | |
| 663 | param.a_b_cnt = b << 16 | data_type; | 619 | rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, |
| 664 | param.dst = t->rx_dma; | 620 | &sg_rx, 1, DMA_DEV_TO_MEM, |
| 665 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; | 621 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 666 | param.link_bcntrld = 0xffffffff; | 622 | if (!rxdesc) |
| 667 | param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; | 623 | goto err_desc; |
| 668 | param.ccnt = c; | 624 | |
| 669 | edma_write_slot(dma->rx_channel, ¶m); | 625 | txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, |
| 626 | &sg_tx, 1, DMA_MEM_TO_DEV, | ||
| 627 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 628 | if (!txdesc) | ||
| 629 | goto err_desc; | ||
| 630 | |||
| 631 | rxdesc->callback = davinci_spi_dma_rx_callback; | ||
| 632 | rxdesc->callback_param = (void *)dspi; | ||
| 633 | txdesc->callback = davinci_spi_dma_tx_callback; | ||
| 634 | txdesc->callback_param = (void *)dspi; | ||
| 670 | 635 | ||
| 671 | if (pdata->cshold_bug) | 636 | if (pdata->cshold_bug) |
| 672 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); | 637 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); |
| 673 | 638 | ||
| 674 | edma_start(dma->rx_channel); | 639 | dmaengine_submit(rxdesc); |
| 675 | edma_start(dma->tx_channel); | 640 | dmaengine_submit(txdesc); |
| 641 | |||
| 642 | dma_async_issue_pending(dspi->dma_rx); | ||
| 643 | dma_async_issue_pending(dspi->dma_tx); | ||
| 644 | |||
| 676 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | 645 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
| 677 | } | 646 | } |
| 678 | 647 | ||
| @@ -690,15 +659,13 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
| 690 | 659 | ||
| 691 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); | 660 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); |
| 692 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { | 661 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { |
| 693 | |||
| 694 | if (t->tx_buf) | ||
| 695 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, | ||
| 696 | DMA_TO_DEVICE); | ||
| 697 | |||
| 698 | dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count, | ||
| 699 | DMA_FROM_DEVICE); | ||
| 700 | |||
| 701 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | 662 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
| 663 | |||
| 664 | dma_unmap_single(&spi->dev, t->rx_dma, | ||
| 665 | t->len, DMA_FROM_DEVICE); | ||
| 666 | dma_unmap_single(&spi->dev, t->tx_dma, | ||
| 667 | t->len, DMA_TO_DEVICE); | ||
| 668 | kfree(dummy_buf); | ||
| 702 | } | 669 | } |
| 703 | 670 | ||
| 704 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | 671 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
| @@ -716,11 +683,20 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
| 716 | } | 683 | } |
| 717 | 684 | ||
| 718 | if (dspi->rcount != 0 || dspi->wcount != 0) { | 685 | if (dspi->rcount != 0 || dspi->wcount != 0) { |
| 719 | dev_err(sdev, "SPI data transfer error\n"); | 686 | dev_err(&spi->dev, "SPI data transfer error\n"); |
| 720 | return -EIO; | 687 | return -EIO; |
| 721 | } | 688 | } |
| 722 | 689 | ||
| 723 | return t->len; | 690 | return t->len; |
| 691 | |||
| 692 | err_desc: | ||
| 693 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE); | ||
| 694 | err_tx_map: | ||
| 695 | dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE); | ||
| 696 | err_rx_map: | ||
| 697 | kfree(dummy_buf); | ||
| 698 | err_alloc_dummy_buf: | ||
| 699 | return ret; | ||
| 724 | } | 700 | } |
| 725 | 701 | ||
| 726 | /** | 702 | /** |
| @@ -751,39 +727,33 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data) | |||
| 751 | 727 | ||
| 752 | static int davinci_spi_request_dma(struct davinci_spi *dspi) | 728 | static int davinci_spi_request_dma(struct davinci_spi *dspi) |
| 753 | { | 729 | { |
| 730 | dma_cap_mask_t mask; | ||
| 731 | struct device *sdev = dspi->bitbang.master->dev.parent; | ||
| 754 | int r; | 732 | int r; |
| 755 | struct davinci_spi_dma *dma = &dspi->dma; | ||
| 756 | 733 | ||
| 757 | r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, | 734 | dma_cap_zero(mask); |
| 758 | dma->eventq); | 735 | dma_cap_set(DMA_SLAVE, mask); |
| 759 | if (r < 0) { | 736 | |
| 760 | pr_err("Unable to request DMA channel for SPI RX\n"); | 737 | dspi->dma_rx = dma_request_channel(mask, edma_filter_fn, |
| 761 | r = -EAGAIN; | 738 | &dspi->dma_rx_chnum); |
| 739 | if (!dspi->dma_rx) { | ||
| 740 | dev_err(sdev, "request RX DMA channel failed\n"); | ||
| 741 | r = -ENODEV; | ||
| 762 | goto rx_dma_failed; | 742 | goto rx_dma_failed; |
| 763 | } | 743 | } |
| 764 | 744 | ||
| 765 | r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, | 745 | dspi->dma_tx = dma_request_channel(mask, edma_filter_fn, |
| 766 | dma->eventq); | 746 | &dspi->dma_tx_chnum); |
| 767 | if (r < 0) { | 747 | if (!dspi->dma_tx) { |
| 768 | pr_err("Unable to request DMA channel for SPI TX\n"); | 748 | dev_err(sdev, "request TX DMA channel failed\n"); |
| 769 | r = -EAGAIN; | 749 | r = -ENODEV; |
| 770 | goto tx_dma_failed; | 750 | goto tx_dma_failed; |
| 771 | } | 751 | } |
| 772 | 752 | ||
| 773 | r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); | ||
| 774 | if (r < 0) { | ||
| 775 | pr_err("Unable to request SPI TX DMA param slot\n"); | ||
| 776 | r = -EAGAIN; | ||
| 777 | goto param_failed; | ||
| 778 | } | ||
| 779 | dma->dummy_param_slot = r; | ||
| 780 | edma_link(dma->dummy_param_slot, dma->dummy_param_slot); | ||
| 781 | |||
| 782 | return 0; | 753 | return 0; |
| 783 | param_failed: | 754 | |
| 784 | edma_free_channel(dma->tx_channel); | ||
| 785 | tx_dma_failed: | 755 | tx_dma_failed: |
| 786 | edma_free_channel(dma->rx_channel); | 756 | dma_release_channel(dspi->dma_rx); |
| 787 | rx_dma_failed: | 757 | rx_dma_failed: |
| 788 | return r; | 758 | return r; |
| 789 | } | 759 | } |
| @@ -898,9 +868,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev) | |||
| 898 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; | 868 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; |
| 899 | if (dma_rx_chan != SPI_NO_RESOURCE && | 869 | if (dma_rx_chan != SPI_NO_RESOURCE && |
| 900 | dma_tx_chan != SPI_NO_RESOURCE) { | 870 | dma_tx_chan != SPI_NO_RESOURCE) { |
| 901 | dspi->dma.rx_channel = dma_rx_chan; | 871 | dspi->dma_rx_chnum = dma_rx_chan; |
| 902 | dspi->dma.tx_channel = dma_tx_chan; | 872 | dspi->dma_tx_chnum = dma_tx_chan; |
| 903 | dspi->dma.eventq = pdata->dma_event_q; | ||
| 904 | 873 | ||
| 905 | ret = davinci_spi_request_dma(dspi); | 874 | ret = davinci_spi_request_dma(dspi); |
| 906 | if (ret) | 875 | if (ret) |
| @@ -955,9 +924,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev) | |||
| 955 | return ret; | 924 | return ret; |
| 956 | 925 | ||
| 957 | free_dma: | 926 | free_dma: |
| 958 | edma_free_channel(dspi->dma.tx_channel); | 927 | dma_release_channel(dspi->dma_rx); |
| 959 | edma_free_channel(dspi->dma.rx_channel); | 928 | dma_release_channel(dspi->dma_tx); |
| 960 | edma_free_slot(dspi->dma.dummy_param_slot); | ||
| 961 | free_clk: | 929 | free_clk: |
| 962 | clk_disable(dspi->clk); | 930 | clk_disable(dspi->clk); |
| 963 | clk_put(dspi->clk); | 931 | clk_put(dspi->clk); |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ff6475f409d6..f3187938e081 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/vmalloc.h> | ||
| 19 | #include "ctree.h" | 20 | #include "ctree.h" |
| 20 | #include "disk-io.h" | 21 | #include "disk-io.h" |
| 21 | #include "backref.h" | 22 | #include "backref.h" |
| @@ -231,7 +232,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | |||
| 231 | } | 232 | } |
| 232 | if (!ret) { | 233 | if (!ret) { |
| 233 | ret = ulist_add(parents, eb->start, | 234 | ret = ulist_add(parents, eb->start, |
| 234 | (unsigned long)eie, GFP_NOFS); | 235 | (uintptr_t)eie, GFP_NOFS); |
| 235 | if (ret < 0) | 236 | if (ret < 0) |
| 236 | break; | 237 | break; |
| 237 | if (!extent_item_pos) { | 238 | if (!extent_item_pos) { |
| @@ -363,8 +364,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, | |||
| 363 | ULIST_ITER_INIT(&uiter); | 364 | ULIST_ITER_INIT(&uiter); |
| 364 | node = ulist_next(parents, &uiter); | 365 | node = ulist_next(parents, &uiter); |
| 365 | ref->parent = node ? node->val : 0; | 366 | ref->parent = node ? node->val : 0; |
| 366 | ref->inode_list = | 367 | ref->inode_list = node ? |
| 367 | node ? (struct extent_inode_elem *)node->aux : 0; | 368 | (struct extent_inode_elem *)(uintptr_t)node->aux : 0; |
| 368 | 369 | ||
| 369 | /* additional parents require new refs being added here */ | 370 | /* additional parents require new refs being added here */ |
| 370 | while ((node = ulist_next(parents, &uiter))) { | 371 | while ((node = ulist_next(parents, &uiter))) { |
| @@ -375,8 +376,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, | |||
| 375 | } | 376 | } |
| 376 | memcpy(new_ref, ref, sizeof(*ref)); | 377 | memcpy(new_ref, ref, sizeof(*ref)); |
| 377 | new_ref->parent = node->val; | 378 | new_ref->parent = node->val; |
| 378 | new_ref->inode_list = | 379 | new_ref->inode_list = (struct extent_inode_elem *) |
| 379 | (struct extent_inode_elem *)node->aux; | 380 | (uintptr_t)node->aux; |
| 380 | list_add(&new_ref->list, &ref->list); | 381 | list_add(&new_ref->list, &ref->list); |
| 381 | } | 382 | } |
| 382 | ulist_reinit(parents); | 383 | ulist_reinit(parents); |
| @@ -914,8 +915,8 @@ again: | |||
| 914 | free_extent_buffer(eb); | 915 | free_extent_buffer(eb); |
| 915 | } | 916 | } |
| 916 | ret = ulist_add_merge(refs, ref->parent, | 917 | ret = ulist_add_merge(refs, ref->parent, |
| 917 | (unsigned long)ref->inode_list, | 918 | (uintptr_t)ref->inode_list, |
| 918 | (unsigned long *)&eie, GFP_NOFS); | 919 | (u64 *)&eie, GFP_NOFS); |
| 919 | if (!ret && extent_item_pos) { | 920 | if (!ret && extent_item_pos) { |
| 920 | /* | 921 | /* |
| 921 | * we've recorded that parent, so we must extend | 922 | * we've recorded that parent, so we must extend |
| @@ -959,7 +960,7 @@ static void free_leaf_list(struct ulist *blocks) | |||
| 959 | while ((node = ulist_next(blocks, &uiter))) { | 960 | while ((node = ulist_next(blocks, &uiter))) { |
| 960 | if (!node->aux) | 961 | if (!node->aux) |
| 961 | continue; | 962 | continue; |
| 962 | eie = (struct extent_inode_elem *)node->aux; | 963 | eie = (struct extent_inode_elem *)(uintptr_t)node->aux; |
| 963 | for (; eie; eie = eie_next) { | 964 | for (; eie; eie = eie_next) { |
| 964 | eie_next = eie->next; | 965 | eie_next = eie->next; |
| 965 | kfree(eie); | 966 | kfree(eie); |
| @@ -1108,26 +1109,80 @@ static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, | |||
| 1108 | found_key); | 1109 | found_key); |
| 1109 | } | 1110 | } |
| 1110 | 1111 | ||
| 1111 | /* | 1112 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, |
| 1112 | * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements | 1113 | u64 start_off, struct btrfs_path *path, |
| 1113 | * of the path are separated by '/' and the path is guaranteed to be | 1114 | struct btrfs_inode_extref **ret_extref, |
| 1114 | * 0-terminated. the path is only given within the current file system. | 1115 | u64 *found_off) |
| 1115 | * Therefore, it never starts with a '/'. the caller is responsible to provide | 1116 | { |
| 1116 | * "size" bytes in "dest". the dest buffer will be filled backwards. finally, | 1117 | int ret, slot; |
| 1117 | * the start point of the resulting string is returned. this pointer is within | 1118 | struct btrfs_key key; |
| 1118 | * dest, normally. | 1119 | struct btrfs_key found_key; |
| 1119 | * in case the path buffer would overflow, the pointer is decremented further | 1120 | struct btrfs_inode_extref *extref; |
| 1120 | * as if output was written to the buffer, though no more output is actually | 1121 | struct extent_buffer *leaf; |
| 1121 | * generated. that way, the caller can determine how much space would be | 1122 | unsigned long ptr; |
| 1122 | * required for the path to fit into the buffer. in that case, the returned | 1123 | |
| 1123 | * value will be smaller than dest. callers must check this! | 1124 | key.objectid = inode_objectid; |
| 1124 | */ | 1125 | btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY); |
| 1125 | char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | 1126 | key.offset = start_off; |
| 1126 | struct btrfs_inode_ref *iref, | 1127 | |
| 1128 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
| 1129 | if (ret < 0) | ||
| 1130 | return ret; | ||
| 1131 | |||
| 1132 | while (1) { | ||
| 1133 | leaf = path->nodes[0]; | ||
| 1134 | slot = path->slots[0]; | ||
| 1135 | if (slot >= btrfs_header_nritems(leaf)) { | ||
| 1136 | /* | ||
| 1137 | * If the item at offset is not found, | ||
| 1138 | * btrfs_search_slot will point us to the slot | ||
| 1139 | * where it should be inserted. In our case | ||
| 1140 | * that will be the slot directly before the | ||
| 1141 | * next INODE_REF_KEY_V2 item. In the case | ||
| 1142 | * that we're pointing to the last slot in a | ||
| 1143 | * leaf, we must move one leaf over. | ||
| 1144 | */ | ||
| 1145 | ret = btrfs_next_leaf(root, path); | ||
| 1146 | if (ret) { | ||
| 1147 | if (ret >= 1) | ||
| 1148 | ret = -ENOENT; | ||
| 1149 | break; | ||
| 1150 | } | ||
| 1151 | continue; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | ||
| 1155 | |||
| 1156 | /* | ||
| 1157 | * Check that we're still looking at an extended ref key for | ||
| 1158 | * this particular objectid. If we have different | ||
| 1159 | * objectid or type then there are no more to be found | ||
| 1160 | * in the tree and we can exit. | ||
| 1161 | */ | ||
| 1162 | ret = -ENOENT; | ||
| 1163 | if (found_key.objectid != inode_objectid) | ||
| 1164 | break; | ||
| 1165 | if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY) | ||
| 1166 | break; | ||
| 1167 | |||
| 1168 | ret = 0; | ||
| 1169 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
| 1170 | extref = (struct btrfs_inode_extref *)ptr; | ||
| 1171 | *ret_extref = extref; | ||
| 1172 | if (found_off) | ||
| 1173 | *found_off = found_key.offset; | ||
| 1174 | break; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | return ret; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static char *ref_to_path(struct btrfs_root *fs_root, | ||
| 1181 | struct btrfs_path *path, | ||
| 1182 | u32 name_len, unsigned long name_off, | ||
| 1127 | struct extent_buffer *eb_in, u64 parent, | 1183 | struct extent_buffer *eb_in, u64 parent, |
| 1128 | char *dest, u32 size) | 1184 | char *dest, u32 size) |
| 1129 | { | 1185 | { |
| 1130 | u32 len; | ||
| 1131 | int slot; | 1186 | int slot; |
| 1132 | u64 next_inum; | 1187 | u64 next_inum; |
| 1133 | int ret; | 1188 | int ret; |
| @@ -1135,17 +1190,17 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
| 1135 | struct extent_buffer *eb = eb_in; | 1190 | struct extent_buffer *eb = eb_in; |
| 1136 | struct btrfs_key found_key; | 1191 | struct btrfs_key found_key; |
| 1137 | int leave_spinning = path->leave_spinning; | 1192 | int leave_spinning = path->leave_spinning; |
| 1193 | struct btrfs_inode_ref *iref; | ||
| 1138 | 1194 | ||
| 1139 | if (bytes_left >= 0) | 1195 | if (bytes_left >= 0) |
| 1140 | dest[bytes_left] = '\0'; | 1196 | dest[bytes_left] = '\0'; |
| 1141 | 1197 | ||
| 1142 | path->leave_spinning = 1; | 1198 | path->leave_spinning = 1; |
| 1143 | while (1) { | 1199 | while (1) { |
| 1144 | len = btrfs_inode_ref_name_len(eb, iref); | 1200 | bytes_left -= name_len; |
| 1145 | bytes_left -= len; | ||
| 1146 | if (bytes_left >= 0) | 1201 | if (bytes_left >= 0) |
| 1147 | read_extent_buffer(eb, dest + bytes_left, | 1202 | read_extent_buffer(eb, dest + bytes_left, |
| 1148 | (unsigned long)(iref + 1), len); | 1203 | name_off, name_len); |
| 1149 | if (eb != eb_in) { | 1204 | if (eb != eb_in) { |
| 1150 | btrfs_tree_read_unlock_blocking(eb); | 1205 | btrfs_tree_read_unlock_blocking(eb); |
| 1151 | free_extent_buffer(eb); | 1206 | free_extent_buffer(eb); |
| @@ -1155,6 +1210,7 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
| 1155 | ret = -ENOENT; | 1210 | ret = -ENOENT; |
| 1156 | if (ret) | 1211 | if (ret) |
| 1157 | break; | 1212 | break; |
| 1213 | |||
| 1158 | next_inum = found_key.offset; | 1214 | next_inum = found_key.offset; |
| 1159 | 1215 | ||
| 1160 | /* regular exit ahead */ | 1216 | /* regular exit ahead */ |
| @@ -1170,8 +1226,11 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
| 1170 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | 1226 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
| 1171 | } | 1227 | } |
| 1172 | btrfs_release_path(path); | 1228 | btrfs_release_path(path); |
| 1173 | |||
| 1174 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); | 1229 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
| 1230 | |||
| 1231 | name_len = btrfs_inode_ref_name_len(eb, iref); | ||
| 1232 | name_off = (unsigned long)(iref + 1); | ||
| 1233 | |||
| 1175 | parent = next_inum; | 1234 | parent = next_inum; |
| 1176 | --bytes_left; | 1235 | --bytes_left; |
| 1177 | if (bytes_left >= 0) | 1236 | if (bytes_left >= 0) |
| @@ -1188,12 +1247,39 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
| 1188 | } | 1247 | } |
| 1189 | 1248 | ||
| 1190 | /* | 1249 | /* |
| 1250 | * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements | ||
| 1251 | * of the path are separated by '/' and the path is guaranteed to be | ||
| 1252 | * 0-terminated. the path is only given within the current file system. | ||
| 1253 | * Therefore, it never starts with a '/'. the caller is responsible to provide | ||
| 1254 | * "size" bytes in "dest". the dest buffer will be filled backwards. finally, | ||
| 1255 | * the start point of the resulting string is returned. this pointer is within | ||
| 1256 | * dest, normally. | ||
| 1257 | * in case the path buffer would overflow, the pointer is decremented further | ||
| 1258 | * as if output was written to the buffer, though no more output is actually | ||
| 1259 | * generated. that way, the caller can determine how much space would be | ||
| 1260 | * required for the path to fit into the buffer. in that case, the returned | ||
| 1261 | * value will be smaller than dest. callers must check this! | ||
| 1262 | */ | ||
| 1263 | char *btrfs_iref_to_path(struct btrfs_root *fs_root, | ||
| 1264 | struct btrfs_path *path, | ||
| 1265 | struct btrfs_inode_ref *iref, | ||
| 1266 | struct extent_buffer *eb_in, u64 parent, | ||
| 1267 | char *dest, u32 size) | ||
| 1268 | { | ||
| 1269 | return ref_to_path(fs_root, path, | ||
| 1270 | btrfs_inode_ref_name_len(eb_in, iref), | ||
| 1271 | (unsigned long)(iref + 1), | ||
| 1272 | eb_in, parent, dest, size); | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | /* | ||
| 1191 | * this makes the path point to (logical EXTENT_ITEM *) | 1276 | * this makes the path point to (logical EXTENT_ITEM *) |
| 1192 | * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for | 1277 | * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for |
| 1193 | * tree blocks and <0 on error. | 1278 | * tree blocks and <0 on error. |
| 1194 | */ | 1279 | */ |
| 1195 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | 1280 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
| 1196 | struct btrfs_path *path, struct btrfs_key *found_key) | 1281 | struct btrfs_path *path, struct btrfs_key *found_key, |
| 1282 | u64 *flags_ret) | ||
| 1197 | { | 1283 | { |
| 1198 | int ret; | 1284 | int ret; |
| 1199 | u64 flags; | 1285 | u64 flags; |
| @@ -1237,10 +1323,17 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | |||
| 1237 | (unsigned long long)found_key->objectid, | 1323 | (unsigned long long)found_key->objectid, |
| 1238 | (unsigned long long)found_key->offset, | 1324 | (unsigned long long)found_key->offset, |
| 1239 | (unsigned long long)flags, item_size); | 1325 | (unsigned long long)flags, item_size); |
| 1240 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 1326 | |
| 1241 | return BTRFS_EXTENT_FLAG_TREE_BLOCK; | 1327 | WARN_ON(!flags_ret); |
| 1242 | if (flags & BTRFS_EXTENT_FLAG_DATA) | 1328 | if (flags_ret) { |
| 1243 | return BTRFS_EXTENT_FLAG_DATA; | 1329 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
| 1330 | *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; | ||
| 1331 | else if (flags & BTRFS_EXTENT_FLAG_DATA) | ||
| 1332 | *flags_ret = BTRFS_EXTENT_FLAG_DATA; | ||
| 1333 | else | ||
| 1334 | BUG_ON(1); | ||
| 1335 | return 0; | ||
| 1336 | } | ||
| 1244 | 1337 | ||
| 1245 | return -EIO; | 1338 | return -EIO; |
| 1246 | } | 1339 | } |
| @@ -1404,12 +1497,13 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, | |||
| 1404 | ULIST_ITER_INIT(&root_uiter); | 1497 | ULIST_ITER_INIT(&root_uiter); |
| 1405 | while (!ret && (root_node = ulist_next(roots, &root_uiter))) { | 1498 | while (!ret && (root_node = ulist_next(roots, &root_uiter))) { |
| 1406 | pr_debug("root %llu references leaf %llu, data list " | 1499 | pr_debug("root %llu references leaf %llu, data list " |
| 1407 | "%#lx\n", root_node->val, ref_node->val, | 1500 | "%#llx\n", root_node->val, ref_node->val, |
| 1408 | ref_node->aux); | 1501 | (long long)ref_node->aux); |
| 1409 | ret = iterate_leaf_refs( | 1502 | ret = iterate_leaf_refs((struct extent_inode_elem *) |
| 1410 | (struct extent_inode_elem *)ref_node->aux, | 1503 | (uintptr_t)ref_node->aux, |
| 1411 | root_node->val, extent_item_objectid, | 1504 | root_node->val, |
| 1412 | iterate, ctx); | 1505 | extent_item_objectid, |
| 1506 | iterate, ctx); | ||
| 1413 | } | 1507 | } |
| 1414 | ulist_free(roots); | 1508 | ulist_free(roots); |
| 1415 | roots = NULL; | 1509 | roots = NULL; |
| @@ -1432,15 +1526,15 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, | |||
| 1432 | { | 1526 | { |
| 1433 | int ret; | 1527 | int ret; |
| 1434 | u64 extent_item_pos; | 1528 | u64 extent_item_pos; |
| 1529 | u64 flags = 0; | ||
| 1435 | struct btrfs_key found_key; | 1530 | struct btrfs_key found_key; |
| 1436 | int search_commit_root = path->search_commit_root; | 1531 | int search_commit_root = path->search_commit_root; |
| 1437 | 1532 | ||
| 1438 | ret = extent_from_logical(fs_info, logical, path, | 1533 | ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); |
| 1439 | &found_key); | ||
| 1440 | btrfs_release_path(path); | 1534 | btrfs_release_path(path); |
| 1441 | if (ret < 0) | 1535 | if (ret < 0) |
| 1442 | return ret; | 1536 | return ret; |
| 1443 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 1537 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
| 1444 | return -EINVAL; | 1538 | return -EINVAL; |
| 1445 | 1539 | ||
| 1446 | extent_item_pos = logical - found_key.objectid; | 1540 | extent_item_pos = logical - found_key.objectid; |
| @@ -1451,9 +1545,12 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, | |||
| 1451 | return ret; | 1545 | return ret; |
| 1452 | } | 1546 | } |
| 1453 | 1547 | ||
| 1454 | static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | 1548 | typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, |
| 1455 | struct btrfs_path *path, | 1549 | struct extent_buffer *eb, void *ctx); |
| 1456 | iterate_irefs_t *iterate, void *ctx) | 1550 | |
| 1551 | static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, | ||
| 1552 | struct btrfs_path *path, | ||
| 1553 | iterate_irefs_t *iterate, void *ctx) | ||
| 1457 | { | 1554 | { |
| 1458 | int ret = 0; | 1555 | int ret = 0; |
| 1459 | int slot; | 1556 | int slot; |
| @@ -1470,7 +1567,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
| 1470 | while (!ret) { | 1567 | while (!ret) { |
| 1471 | path->leave_spinning = 1; | 1568 | path->leave_spinning = 1; |
| 1472 | ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, | 1569 | ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, |
| 1473 | &found_key); | 1570 | &found_key); |
| 1474 | if (ret < 0) | 1571 | if (ret < 0) |
| 1475 | break; | 1572 | break; |
| 1476 | if (ret) { | 1573 | if (ret) { |
| @@ -1498,7 +1595,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
| 1498 | "tree %llu\n", cur, | 1595 | "tree %llu\n", cur, |
| 1499 | (unsigned long long)found_key.objectid, | 1596 | (unsigned long long)found_key.objectid, |
| 1500 | (unsigned long long)fs_root->objectid); | 1597 | (unsigned long long)fs_root->objectid); |
| 1501 | ret = iterate(parent, iref, eb, ctx); | 1598 | ret = iterate(parent, name_len, |
| 1599 | (unsigned long)(iref + 1), eb, ctx); | ||
| 1502 | if (ret) | 1600 | if (ret) |
| 1503 | break; | 1601 | break; |
| 1504 | len = sizeof(*iref) + name_len; | 1602 | len = sizeof(*iref) + name_len; |
| @@ -1513,12 +1611,98 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
| 1513 | return ret; | 1611 | return ret; |
| 1514 | } | 1612 | } |
| 1515 | 1613 | ||
| 1614 | static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, | ||
| 1615 | struct btrfs_path *path, | ||
| 1616 | iterate_irefs_t *iterate, void *ctx) | ||
| 1617 | { | ||
| 1618 | int ret; | ||
| 1619 | int slot; | ||
| 1620 | u64 offset = 0; | ||
| 1621 | u64 parent; | ||
| 1622 | int found = 0; | ||
| 1623 | struct extent_buffer *eb; | ||
| 1624 | struct btrfs_inode_extref *extref; | ||
| 1625 | struct extent_buffer *leaf; | ||
| 1626 | u32 item_size; | ||
| 1627 | u32 cur_offset; | ||
| 1628 | unsigned long ptr; | ||
| 1629 | |||
| 1630 | while (1) { | ||
| 1631 | ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, | ||
| 1632 | &offset); | ||
| 1633 | if (ret < 0) | ||
| 1634 | break; | ||
| 1635 | if (ret) { | ||
| 1636 | ret = found ? 0 : -ENOENT; | ||
| 1637 | break; | ||
| 1638 | } | ||
| 1639 | ++found; | ||
| 1640 | |||
| 1641 | slot = path->slots[0]; | ||
| 1642 | eb = path->nodes[0]; | ||
| 1643 | /* make sure we can use eb after releasing the path */ | ||
| 1644 | atomic_inc(&eb->refs); | ||
| 1645 | |||
| 1646 | btrfs_tree_read_lock(eb); | ||
| 1647 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | ||
| 1648 | btrfs_release_path(path); | ||
| 1649 | |||
| 1650 | leaf = path->nodes[0]; | ||
| 1651 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
| 1652 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
| 1653 | cur_offset = 0; | ||
| 1654 | |||
| 1655 | while (cur_offset < item_size) { | ||
| 1656 | u32 name_len; | ||
| 1657 | |||
| 1658 | extref = (struct btrfs_inode_extref *)(ptr + cur_offset); | ||
| 1659 | parent = btrfs_inode_extref_parent(eb, extref); | ||
| 1660 | name_len = btrfs_inode_extref_name_len(eb, extref); | ||
| 1661 | ret = iterate(parent, name_len, | ||
| 1662 | (unsigned long)&extref->name, eb, ctx); | ||
| 1663 | if (ret) | ||
| 1664 | break; | ||
| 1665 | |||
| 1666 | cur_offset += btrfs_inode_extref_name_len(leaf, extref); | ||
| 1667 | cur_offset += sizeof(*extref); | ||
| 1668 | } | ||
| 1669 | btrfs_tree_read_unlock_blocking(eb); | ||
| 1670 | free_extent_buffer(eb); | ||
| 1671 | |||
| 1672 | offset++; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | btrfs_release_path(path); | ||
| 1676 | |||
| 1677 | return ret; | ||
| 1678 | } | ||
| 1679 | |||
| 1680 | static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | ||
| 1681 | struct btrfs_path *path, iterate_irefs_t *iterate, | ||
| 1682 | void *ctx) | ||
| 1683 | { | ||
| 1684 | int ret; | ||
| 1685 | int found_refs = 0; | ||
| 1686 | |||
| 1687 | ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); | ||
| 1688 | if (!ret) | ||
| 1689 | ++found_refs; | ||
| 1690 | else if (ret != -ENOENT) | ||
| 1691 | return ret; | ||
| 1692 | |||
| 1693 | ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); | ||
| 1694 | if (ret == -ENOENT && found_refs) | ||
| 1695 | return 0; | ||
| 1696 | |||
| 1697 | return ret; | ||
| 1698 | } | ||
| 1699 | |||
| 1516 | /* | 1700 | /* |
| 1517 | * returns 0 if the path could be dumped (probably truncated) | 1701 | * returns 0 if the path could be dumped (probably truncated) |
| 1518 | * returns <0 in case of an error | 1702 | * returns <0 in case of an error |
| 1519 | */ | 1703 | */ |
| 1520 | static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | 1704 | static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, |
| 1521 | struct extent_buffer *eb, void *ctx) | 1705 | struct extent_buffer *eb, void *ctx) |
| 1522 | { | 1706 | { |
| 1523 | struct inode_fs_paths *ipath = ctx; | 1707 | struct inode_fs_paths *ipath = ctx; |
| 1524 | char *fspath; | 1708 | char *fspath; |
| @@ -1531,20 +1715,17 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
| 1531 | ipath->fspath->bytes_left - s_ptr : 0; | 1715 | ipath->fspath->bytes_left - s_ptr : 0; |
| 1532 | 1716 | ||
| 1533 | fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; | 1717 | fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; |
| 1534 | fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb, | 1718 | fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, |
| 1535 | inum, fspath_min, bytes_left); | 1719 | name_off, eb, inum, fspath_min, |
| 1720 | bytes_left); | ||
| 1536 | if (IS_ERR(fspath)) | 1721 | if (IS_ERR(fspath)) |
| 1537 | return PTR_ERR(fspath); | 1722 | return PTR_ERR(fspath); |
| 1538 | 1723 | ||
| 1539 | if (fspath > fspath_min) { | 1724 | if (fspath > fspath_min) { |
| 1540 | pr_debug("path resolved: %s\n", fspath); | ||
| 1541 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; | 1725 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
| 1542 | ++ipath->fspath->elem_cnt; | 1726 | ++ipath->fspath->elem_cnt; |
| 1543 | ipath->fspath->bytes_left = fspath - fspath_min; | 1727 | ipath->fspath->bytes_left = fspath - fspath_min; |
| 1544 | } else { | 1728 | } else { |
| 1545 | pr_debug("missed path, not enough space. missing bytes: %lu, " | ||
| 1546 | "constructed so far: %s\n", | ||
| 1547 | (unsigned long)(fspath_min - fspath), fspath_min); | ||
| 1548 | ++ipath->fspath->elem_missed; | 1729 | ++ipath->fspath->elem_missed; |
| 1549 | ipath->fspath->bytes_missing += fspath_min - fspath; | 1730 | ipath->fspath->bytes_missing += fspath_min - fspath; |
| 1550 | ipath->fspath->bytes_left = 0; | 1731 | ipath->fspath->bytes_left = 0; |
| @@ -1566,7 +1747,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
| 1566 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) | 1747 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) |
| 1567 | { | 1748 | { |
| 1568 | return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, | 1749 | return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, |
| 1569 | inode_to_path, ipath); | 1750 | inode_to_path, ipath); |
| 1570 | } | 1751 | } |
| 1571 | 1752 | ||
| 1572 | struct btrfs_data_container *init_data_container(u32 total_bytes) | 1753 | struct btrfs_data_container *init_data_container(u32 total_bytes) |
| @@ -1575,7 +1756,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes) | |||
| 1575 | size_t alloc_bytes; | 1756 | size_t alloc_bytes; |
| 1576 | 1757 | ||
| 1577 | alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); | 1758 | alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); |
| 1578 | data = kmalloc(alloc_bytes, GFP_NOFS); | 1759 | data = vmalloc(alloc_bytes); |
| 1579 | if (!data) | 1760 | if (!data) |
| 1580 | return ERR_PTR(-ENOMEM); | 1761 | return ERR_PTR(-ENOMEM); |
| 1581 | 1762 | ||
| @@ -1626,6 +1807,6 @@ void free_ipath(struct inode_fs_paths *ipath) | |||
| 1626 | { | 1807 | { |
| 1627 | if (!ipath) | 1808 | if (!ipath) |
| 1628 | return; | 1809 | return; |
| 1629 | kfree(ipath->fspath); | 1810 | vfree(ipath->fspath); |
| 1630 | kfree(ipath); | 1811 | kfree(ipath); |
| 1631 | } | 1812 | } |
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 032f4dc7eab8..e75533043a5f 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h | |||
| @@ -33,14 +33,13 @@ struct inode_fs_paths { | |||
| 33 | 33 | ||
| 34 | typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, | 34 | typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, |
| 35 | void *ctx); | 35 | void *ctx); |
| 36 | typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref, | ||
| 37 | struct extent_buffer *eb, void *ctx); | ||
| 38 | 36 | ||
| 39 | int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, | 37 | int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, |
| 40 | struct btrfs_path *path); | 38 | struct btrfs_path *path); |
| 41 | 39 | ||
| 42 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | 40 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
| 43 | struct btrfs_path *path, struct btrfs_key *found_key); | 41 | struct btrfs_path *path, struct btrfs_key *found_key, |
| 42 | u64 *flags); | ||
| 44 | 43 | ||
| 45 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | 44 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
| 46 | struct btrfs_extent_item *ei, u32 item_size, | 45 | struct btrfs_extent_item *ei, u32 item_size, |
| @@ -69,4 +68,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, | |||
| 69 | struct btrfs_path *path); | 68 | struct btrfs_path *path); |
| 70 | void free_ipath(struct inode_fs_paths *ipath); | 69 | void free_ipath(struct inode_fs_paths *ipath); |
| 71 | 70 | ||
| 71 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, | ||
| 72 | u64 start_off, struct btrfs_path *path, | ||
| 73 | struct btrfs_inode_extref **ret_extref, | ||
| 74 | u64 *found_off); | ||
| 75 | |||
| 72 | #endif | 76 | #endif |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 5b2ad6bc4fe7..ed8ca7ca5eff 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 | 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 |
| 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 | 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 |
| 40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 | 40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 |
| 41 | #define BTRFS_INODE_NEEDS_FULL_SYNC 7 | ||
| 41 | 42 | ||
| 42 | /* in memory btrfs inode */ | 43 | /* in memory btrfs inode */ |
| 43 | struct btrfs_inode { | 44 | struct btrfs_inode { |
| @@ -143,6 +144,9 @@ struct btrfs_inode { | |||
| 143 | /* flags field from the on disk inode */ | 144 | /* flags field from the on disk inode */ |
| 144 | u32 flags; | 145 | u32 flags; |
| 145 | 146 | ||
| 147 | /* a local copy of root's last_log_commit */ | ||
| 148 | unsigned long last_log_commit; | ||
| 149 | |||
| 146 | /* | 150 | /* |
| 147 | * Counters to keep track of the number of extent item's we may use due | 151 | * Counters to keep track of the number of extent item's we may use due |
| 148 | * to delalloc and such. outstanding_extents is the number of extent | 152 | * to delalloc and such. outstanding_extents is the number of extent |
| @@ -202,15 +206,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode) | |||
| 202 | 206 | ||
| 203 | static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) | 207 | static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) |
| 204 | { | 208 | { |
| 205 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 206 | int ret = 0; | ||
| 207 | |||
| 208 | mutex_lock(&root->log_mutex); | ||
| 209 | if (BTRFS_I(inode)->logged_trans == generation && | 209 | if (BTRFS_I(inode)->logged_trans == generation && |
| 210 | BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) | 210 | BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit) |
| 211 | ret = 1; | 211 | return 1; |
| 212 | mutex_unlock(&root->log_mutex); | 212 | return 0; |
| 213 | return ret; | ||
| 214 | } | 213 | } |
| 215 | 214 | ||
| 216 | #endif | 215 | #endif |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 9197e2e33407..5a3e45db642a 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
| @@ -37,8 +37,9 @@ | |||
| 37 | * the file system was mounted, (i.e., they have been | 37 | * the file system was mounted, (i.e., they have been |
| 38 | * referenced by the super block) or they have been | 38 | * referenced by the super block) or they have been |
| 39 | * written since then and the write completion callback | 39 | * written since then and the write completion callback |
| 40 | * was called and a FLUSH request to the device where | 40 | * was called and no write error was indicated and a |
| 41 | * these blocks are located was received and completed. | 41 | * FLUSH request to the device where these blocks are |
| 42 | * located was received and completed. | ||
| 42 | * 2b. All referenced blocks need to have a generation | 43 | * 2b. All referenced blocks need to have a generation |
| 43 | * number which is equal to the parent's number. | 44 | * number which is equal to the parent's number. |
| 44 | * | 45 | * |
| @@ -2601,6 +2602,17 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, | |||
| 2601 | (unsigned long long)l->block_ref_to->dev_bytenr, | 2602 | (unsigned long long)l->block_ref_to->dev_bytenr, |
| 2602 | l->block_ref_to->mirror_num); | 2603 | l->block_ref_to->mirror_num); |
| 2603 | ret = -1; | 2604 | ret = -1; |
| 2605 | } else if (l->block_ref_to->iodone_w_error) { | ||
| 2606 | printk(KERN_INFO "btrfs: attempt to write superblock" | ||
| 2607 | " which references block %c @%llu (%s/%llu/%d)" | ||
| 2608 | " which has write error!\n", | ||
| 2609 | btrfsic_get_block_type(state, l->block_ref_to), | ||
| 2610 | (unsigned long long) | ||
| 2611 | l->block_ref_to->logical_bytenr, | ||
| 2612 | l->block_ref_to->dev_state->name, | ||
| 2613 | (unsigned long long)l->block_ref_to->dev_bytenr, | ||
| 2614 | l->block_ref_to->mirror_num); | ||
| 2615 | ret = -1; | ||
| 2604 | } else if (l->parent_generation != | 2616 | } else if (l->parent_generation != |
| 2605 | l->block_ref_to->generation && | 2617 | l->block_ref_to->generation && |
| 2606 | BTRFSIC_GENERATION_UNKNOWN != | 2618 | BTRFSIC_GENERATION_UNKNOWN != |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 43d1c5a3a030..c6467aa88bee 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
| @@ -577,6 +577,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
| 577 | u64 em_start; | 577 | u64 em_start; |
| 578 | struct extent_map *em; | 578 | struct extent_map *em; |
| 579 | int ret = -ENOMEM; | 579 | int ret = -ENOMEM; |
| 580 | int faili = 0; | ||
| 580 | u32 *sums; | 581 | u32 *sums; |
| 581 | 582 | ||
| 582 | tree = &BTRFS_I(inode)->io_tree; | 583 | tree = &BTRFS_I(inode)->io_tree; |
| @@ -626,9 +627,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
| 626 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { | 627 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 627 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | | 628 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
| 628 | __GFP_HIGHMEM); | 629 | __GFP_HIGHMEM); |
| 629 | if (!cb->compressed_pages[pg_index]) | 630 | if (!cb->compressed_pages[pg_index]) { |
| 631 | faili = pg_index - 1; | ||
| 632 | ret = -ENOMEM; | ||
| 630 | goto fail2; | 633 | goto fail2; |
| 634 | } | ||
| 631 | } | 635 | } |
| 636 | faili = nr_pages - 1; | ||
| 632 | cb->nr_pages = nr_pages; | 637 | cb->nr_pages = nr_pages; |
| 633 | 638 | ||
| 634 | add_ra_bio_pages(inode, em_start + em_len, cb); | 639 | add_ra_bio_pages(inode, em_start + em_len, cb); |
| @@ -713,8 +718,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
| 713 | return 0; | 718 | return 0; |
| 714 | 719 | ||
| 715 | fail2: | 720 | fail2: |
| 716 | for (pg_index = 0; pg_index < nr_pages; pg_index++) | 721 | while (faili >= 0) { |
| 717 | free_page((unsigned long)cb->compressed_pages[pg_index]); | 722 | __free_page(cb->compressed_pages[faili]); |
| 723 | faili--; | ||
| 724 | } | ||
| 718 | 725 | ||
| 719 | kfree(cb->compressed_pages); | 726 | kfree(cb->compressed_pages); |
| 720 | fail1: | 727 | fail1: |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6d183f60d63a..b33436211000 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -4402,149 +4402,6 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans, | |||
| 4402 | } | 4402 | } |
| 4403 | 4403 | ||
| 4404 | /* | 4404 | /* |
| 4405 | * Given a key and some data, insert items into the tree. | ||
| 4406 | * This does all the path init required, making room in the tree if needed. | ||
| 4407 | * Returns the number of keys that were inserted. | ||
| 4408 | */ | ||
| 4409 | int btrfs_insert_some_items(struct btrfs_trans_handle *trans, | ||
| 4410 | struct btrfs_root *root, | ||
| 4411 | struct btrfs_path *path, | ||
| 4412 | struct btrfs_key *cpu_key, u32 *data_size, | ||
| 4413 | int nr) | ||
| 4414 | { | ||
| 4415 | struct extent_buffer *leaf; | ||
| 4416 | struct btrfs_item *item; | ||
| 4417 | int ret = 0; | ||
| 4418 | int slot; | ||
| 4419 | int i; | ||
| 4420 | u32 nritems; | ||
| 4421 | u32 total_data = 0; | ||
| 4422 | u32 total_size = 0; | ||
| 4423 | unsigned int data_end; | ||
| 4424 | struct btrfs_disk_key disk_key; | ||
| 4425 | struct btrfs_key found_key; | ||
| 4426 | struct btrfs_map_token token; | ||
| 4427 | |||
| 4428 | btrfs_init_map_token(&token); | ||
| 4429 | |||
| 4430 | for (i = 0; i < nr; i++) { | ||
| 4431 | if (total_size + data_size[i] + sizeof(struct btrfs_item) > | ||
| 4432 | BTRFS_LEAF_DATA_SIZE(root)) { | ||
| 4433 | break; | ||
| 4434 | nr = i; | ||
| 4435 | } | ||
| 4436 | total_data += data_size[i]; | ||
| 4437 | total_size += data_size[i] + sizeof(struct btrfs_item); | ||
| 4438 | } | ||
| 4439 | BUG_ON(nr == 0); | ||
| 4440 | |||
| 4441 | ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); | ||
| 4442 | if (ret == 0) | ||
| 4443 | return -EEXIST; | ||
| 4444 | if (ret < 0) | ||
| 4445 | goto out; | ||
| 4446 | |||
| 4447 | leaf = path->nodes[0]; | ||
| 4448 | |||
| 4449 | nritems = btrfs_header_nritems(leaf); | ||
| 4450 | data_end = leaf_data_end(root, leaf); | ||
| 4451 | |||
| 4452 | if (btrfs_leaf_free_space(root, leaf) < total_size) { | ||
| 4453 | for (i = nr; i >= 0; i--) { | ||
| 4454 | total_data -= data_size[i]; | ||
| 4455 | total_size -= data_size[i] + sizeof(struct btrfs_item); | ||
| 4456 | if (total_size < btrfs_leaf_free_space(root, leaf)) | ||
| 4457 | break; | ||
| 4458 | } | ||
| 4459 | nr = i; | ||
| 4460 | } | ||
| 4461 | |||
| 4462 | slot = path->slots[0]; | ||
| 4463 | BUG_ON(slot < 0); | ||
| 4464 | |||
| 4465 | if (slot != nritems) { | ||
| 4466 | unsigned int old_data = btrfs_item_end_nr(leaf, slot); | ||
| 4467 | |||
| 4468 | item = btrfs_item_nr(leaf, slot); | ||
| 4469 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | ||
| 4470 | |||
| 4471 | /* figure out how many keys we can insert in here */ | ||
| 4472 | total_data = data_size[0]; | ||
| 4473 | for (i = 1; i < nr; i++) { | ||
| 4474 | if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0) | ||
| 4475 | break; | ||
| 4476 | total_data += data_size[i]; | ||
| 4477 | } | ||
| 4478 | nr = i; | ||
| 4479 | |||
| 4480 | if (old_data < data_end) { | ||
| 4481 | btrfs_print_leaf(root, leaf); | ||
| 4482 | printk(KERN_CRIT "slot %d old_data %d data_end %d\n", | ||
| 4483 | slot, old_data, data_end); | ||
| 4484 | BUG_ON(1); | ||
| 4485 | } | ||
| 4486 | /* | ||
| 4487 | * item0..itemN ... dataN.offset..dataN.size .. data0.size | ||
| 4488 | */ | ||
| 4489 | /* first correct the data pointers */ | ||
| 4490 | for (i = slot; i < nritems; i++) { | ||
| 4491 | u32 ioff; | ||
| 4492 | |||
| 4493 | item = btrfs_item_nr(leaf, i); | ||
| 4494 | ioff = btrfs_token_item_offset(leaf, item, &token); | ||
| 4495 | btrfs_set_token_item_offset(leaf, item, | ||
| 4496 | ioff - total_data, &token); | ||
| 4497 | } | ||
| 4498 | /* shift the items */ | ||
| 4499 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), | ||
| 4500 | btrfs_item_nr_offset(slot), | ||
| 4501 | (nritems - slot) * sizeof(struct btrfs_item)); | ||
| 4502 | |||
| 4503 | /* shift the data */ | ||
| 4504 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | ||
| 4505 | data_end - total_data, btrfs_leaf_data(leaf) + | ||
| 4506 | data_end, old_data - data_end); | ||
| 4507 | data_end = old_data; | ||
| 4508 | } else { | ||
| 4509 | /* | ||
| 4510 | * this sucks but it has to be done, if we are inserting at | ||
| 4511 | * the end of the leaf only insert 1 of the items, since we | ||
| 4512 | * have no way of knowing whats on the next leaf and we'd have | ||
| 4513 | * to drop our current locks to figure it out | ||
| 4514 | */ | ||
| 4515 | nr = 1; | ||
| 4516 | } | ||
| 4517 | |||
| 4518 | /* setup the item for the new data */ | ||
| 4519 | for (i = 0; i < nr; i++) { | ||
| 4520 | btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); | ||
| 4521 | btrfs_set_item_key(leaf, &disk_key, slot + i); | ||
| 4522 | item = btrfs_item_nr(leaf, slot + i); | ||
| 4523 | btrfs_set_token_item_offset(leaf, item, | ||
| 4524 | data_end - data_size[i], &token); | ||
| 4525 | data_end -= data_size[i]; | ||
| 4526 | btrfs_set_token_item_size(leaf, item, data_size[i], &token); | ||
| 4527 | } | ||
| 4528 | btrfs_set_header_nritems(leaf, nritems + nr); | ||
| 4529 | btrfs_mark_buffer_dirty(leaf); | ||
| 4530 | |||
| 4531 | ret = 0; | ||
| 4532 | if (slot == 0) { | ||
| 4533 | btrfs_cpu_key_to_disk(&disk_key, cpu_key); | ||
| 4534 | fixup_low_keys(trans, root, path, &disk_key, 1); | ||
| 4535 | } | ||
| 4536 | |||
| 4537 | if (btrfs_leaf_free_space(root, leaf) < 0) { | ||
| 4538 | btrfs_print_leaf(root, leaf); | ||
| 4539 | BUG(); | ||
| 4540 | } | ||
| 4541 | out: | ||
| 4542 | if (!ret) | ||
| 4543 | ret = nr; | ||
| 4544 | return ret; | ||
| 4545 | } | ||
| 4546 | |||
| 4547 | /* | ||
| 4548 | * this is a helper for btrfs_insert_empty_items, the main goal here is | 4405 | * this is a helper for btrfs_insert_empty_items, the main goal here is |
| 4549 | * to save stack depth by doing the bulk of the work in a function | 4406 | * to save stack depth by doing the bulk of the work in a function |
| 4550 | * that doesn't call btrfs_search_slot | 4407 | * that doesn't call btrfs_search_slot |
| @@ -5073,6 +4930,7 @@ static void tree_move_down(struct btrfs_root *root, | |||
| 5073 | struct btrfs_path *path, | 4930 | struct btrfs_path *path, |
| 5074 | int *level, int root_level) | 4931 | int *level, int root_level) |
| 5075 | { | 4932 | { |
| 4933 | BUG_ON(*level == 0); | ||
| 5076 | path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], | 4934 | path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], |
| 5077 | path->slots[*level]); | 4935 | path->slots[*level]); |
| 5078 | path->slots[*level - 1] = 0; | 4936 | path->slots[*level - 1] = 0; |
| @@ -5089,7 +4947,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root, | |||
| 5089 | 4947 | ||
| 5090 | path->slots[*level]++; | 4948 | path->slots[*level]++; |
| 5091 | 4949 | ||
| 5092 | while (path->slots[*level] == nritems) { | 4950 | while (path->slots[*level] >= nritems) { |
| 5093 | if (*level == root_level) | 4951 | if (*level == root_level) |
| 5094 | return -1; | 4952 | return -1; |
| 5095 | 4953 | ||
| @@ -5433,9 +5291,11 @@ int btrfs_compare_trees(struct btrfs_root *left_root, | |||
| 5433 | goto out; | 5291 | goto out; |
| 5434 | advance_right = ADVANCE; | 5292 | advance_right = ADVANCE; |
| 5435 | } else { | 5293 | } else { |
| 5294 | WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); | ||
| 5436 | ret = tree_compare_item(left_root, left_path, | 5295 | ret = tree_compare_item(left_root, left_path, |
| 5437 | right_path, tmp_buf); | 5296 | right_path, tmp_buf); |
| 5438 | if (ret) { | 5297 | if (ret) { |
| 5298 | WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); | ||
| 5439 | ret = changed_cb(left_root, right_root, | 5299 | ret = changed_cb(left_root, right_root, |
| 5440 | left_path, right_path, | 5300 | left_path, right_path, |
| 5441 | &left_key, | 5301 | &left_key, |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9821b672f5a2..926c9ffc66d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -154,6 +154,13 @@ struct btrfs_ordered_sum; | |||
| 154 | */ | 154 | */ |
| 155 | #define BTRFS_NAME_LEN 255 | 155 | #define BTRFS_NAME_LEN 255 |
| 156 | 156 | ||
| 157 | /* | ||
| 158 | * Theoretical limit is larger, but we keep this down to a sane | ||
| 159 | * value. That should limit greatly the possibility of collisions on | ||
| 160 | * inode ref items. | ||
| 161 | */ | ||
| 162 | #define BTRFS_LINK_MAX 65535U | ||
| 163 | |||
| 157 | /* 32 bytes in various csum fields */ | 164 | /* 32 bytes in various csum fields */ |
| 158 | #define BTRFS_CSUM_SIZE 32 | 165 | #define BTRFS_CSUM_SIZE 32 |
| 159 | 166 | ||
| @@ -489,6 +496,8 @@ struct btrfs_super_block { | |||
| 489 | */ | 496 | */ |
| 490 | #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) | 497 | #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) |
| 491 | 498 | ||
| 499 | #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) | ||
| 500 | |||
| 492 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL | 501 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL |
| 493 | #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL | 502 | #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL |
| 494 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | 503 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ |
| @@ -496,7 +505,8 @@ struct btrfs_super_block { | |||
| 496 | BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ | 505 | BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ |
| 497 | BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ | 506 | BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ |
| 498 | BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ | 507 | BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ |
| 499 | BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO) | 508 | BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ |
| 509 | BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) | ||
| 500 | 510 | ||
| 501 | /* | 511 | /* |
| 502 | * A leaf is full of items. offset and size tell us where to find | 512 | * A leaf is full of items. offset and size tell us where to find |
| @@ -643,6 +653,14 @@ struct btrfs_inode_ref { | |||
| 643 | /* name goes here */ | 653 | /* name goes here */ |
| 644 | } __attribute__ ((__packed__)); | 654 | } __attribute__ ((__packed__)); |
| 645 | 655 | ||
| 656 | struct btrfs_inode_extref { | ||
| 657 | __le64 parent_objectid; | ||
| 658 | __le64 index; | ||
| 659 | __le16 name_len; | ||
| 660 | __u8 name[0]; | ||
| 661 | /* name goes here */ | ||
| 662 | } __attribute__ ((__packed__)); | ||
| 663 | |||
| 646 | struct btrfs_timespec { | 664 | struct btrfs_timespec { |
| 647 | __le64 sec; | 665 | __le64 sec; |
| 648 | __le32 nsec; | 666 | __le32 nsec; |
| @@ -1028,12 +1046,22 @@ struct btrfs_space_info { | |||
| 1028 | wait_queue_head_t wait; | 1046 | wait_queue_head_t wait; |
| 1029 | }; | 1047 | }; |
| 1030 | 1048 | ||
| 1049 | #define BTRFS_BLOCK_RSV_GLOBAL 1 | ||
| 1050 | #define BTRFS_BLOCK_RSV_DELALLOC 2 | ||
| 1051 | #define BTRFS_BLOCK_RSV_TRANS 3 | ||
| 1052 | #define BTRFS_BLOCK_RSV_CHUNK 4 | ||
| 1053 | #define BTRFS_BLOCK_RSV_DELOPS 5 | ||
| 1054 | #define BTRFS_BLOCK_RSV_EMPTY 6 | ||
| 1055 | #define BTRFS_BLOCK_RSV_TEMP 7 | ||
| 1056 | |||
| 1031 | struct btrfs_block_rsv { | 1057 | struct btrfs_block_rsv { |
| 1032 | u64 size; | 1058 | u64 size; |
| 1033 | u64 reserved; | 1059 | u64 reserved; |
| 1034 | struct btrfs_space_info *space_info; | 1060 | struct btrfs_space_info *space_info; |
| 1035 | spinlock_t lock; | 1061 | spinlock_t lock; |
| 1036 | unsigned int full; | 1062 | unsigned short full; |
| 1063 | unsigned short type; | ||
| 1064 | unsigned short failfast; | ||
| 1037 | }; | 1065 | }; |
| 1038 | 1066 | ||
| 1039 | /* | 1067 | /* |
| @@ -1127,6 +1155,9 @@ struct btrfs_block_group_cache { | |||
| 1127 | * Today it will only have one thing on it, but that may change | 1155 | * Today it will only have one thing on it, but that may change |
| 1128 | */ | 1156 | */ |
| 1129 | struct list_head cluster_list; | 1157 | struct list_head cluster_list; |
| 1158 | |||
| 1159 | /* For delayed block group creation */ | ||
| 1160 | struct list_head new_bg_list; | ||
| 1130 | }; | 1161 | }; |
| 1131 | 1162 | ||
| 1132 | /* delayed seq elem */ | 1163 | /* delayed seq elem */ |
| @@ -1240,7 +1271,6 @@ struct btrfs_fs_info { | |||
| 1240 | struct mutex reloc_mutex; | 1271 | struct mutex reloc_mutex; |
| 1241 | 1272 | ||
| 1242 | struct list_head trans_list; | 1273 | struct list_head trans_list; |
| 1243 | struct list_head hashers; | ||
| 1244 | struct list_head dead_roots; | 1274 | struct list_head dead_roots; |
| 1245 | struct list_head caching_block_groups; | 1275 | struct list_head caching_block_groups; |
| 1246 | 1276 | ||
| @@ -1366,9 +1396,6 @@ struct btrfs_fs_info { | |||
| 1366 | struct rb_root defrag_inodes; | 1396 | struct rb_root defrag_inodes; |
| 1367 | atomic_t defrag_running; | 1397 | atomic_t defrag_running; |
| 1368 | 1398 | ||
| 1369 | spinlock_t ref_cache_lock; | ||
| 1370 | u64 total_ref_cache_size; | ||
| 1371 | |||
| 1372 | /* | 1399 | /* |
| 1373 | * these three are in extended format (availability of single | 1400 | * these three are in extended format (availability of single |
| 1374 | * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other | 1401 | * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other |
| @@ -1441,6 +1468,8 @@ struct btrfs_fs_info { | |||
| 1441 | 1468 | ||
| 1442 | /* next backup root to be overwritten */ | 1469 | /* next backup root to be overwritten */ |
| 1443 | int backup_root_index; | 1470 | int backup_root_index; |
| 1471 | |||
| 1472 | int num_tolerated_disk_barrier_failures; | ||
| 1444 | }; | 1473 | }; |
| 1445 | 1474 | ||
| 1446 | /* | 1475 | /* |
| @@ -1481,9 +1510,9 @@ struct btrfs_root { | |||
| 1481 | wait_queue_head_t log_commit_wait[2]; | 1510 | wait_queue_head_t log_commit_wait[2]; |
| 1482 | atomic_t log_writers; | 1511 | atomic_t log_writers; |
| 1483 | atomic_t log_commit[2]; | 1512 | atomic_t log_commit[2]; |
| 1513 | atomic_t log_batch; | ||
| 1484 | unsigned long log_transid; | 1514 | unsigned long log_transid; |
| 1485 | unsigned long last_log_commit; | 1515 | unsigned long last_log_commit; |
| 1486 | unsigned long log_batch; | ||
| 1487 | pid_t log_start_pid; | 1516 | pid_t log_start_pid; |
| 1488 | bool log_multiple_pids; | 1517 | bool log_multiple_pids; |
| 1489 | 1518 | ||
| @@ -1592,6 +1621,7 @@ struct btrfs_ioctl_defrag_range_args { | |||
| 1592 | */ | 1621 | */ |
| 1593 | #define BTRFS_INODE_ITEM_KEY 1 | 1622 | #define BTRFS_INODE_ITEM_KEY 1 |
| 1594 | #define BTRFS_INODE_REF_KEY 12 | 1623 | #define BTRFS_INODE_REF_KEY 12 |
| 1624 | #define BTRFS_INODE_EXTREF_KEY 13 | ||
| 1595 | #define BTRFS_XATTR_ITEM_KEY 24 | 1625 | #define BTRFS_XATTR_ITEM_KEY 24 |
| 1596 | #define BTRFS_ORPHAN_ITEM_KEY 48 | 1626 | #define BTRFS_ORPHAN_ITEM_KEY 48 |
| 1597 | /* reserve 2-15 close to the inode for later flexibility */ | 1627 | /* reserve 2-15 close to the inode for later flexibility */ |
| @@ -1978,6 +2008,13 @@ BTRFS_SETGET_STACK_FUNCS(block_group_flags, | |||
| 1978 | BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); | 2008 | BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); |
| 1979 | BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); | 2009 | BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); |
| 1980 | 2010 | ||
| 2011 | /* struct btrfs_inode_extref */ | ||
| 2012 | BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, | ||
| 2013 | parent_objectid, 64); | ||
| 2014 | BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, | ||
| 2015 | name_len, 16); | ||
| 2016 | BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); | ||
| 2017 | |||
| 1981 | /* struct btrfs_inode_item */ | 2018 | /* struct btrfs_inode_item */ |
| 1982 | BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); | 2019 | BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); |
| 1983 | BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); | 2020 | BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); |
| @@ -2858,6 +2895,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
| 2858 | u64 size); | 2895 | u64 size); |
| 2859 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | 2896 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
| 2860 | struct btrfs_root *root, u64 group_start); | 2897 | struct btrfs_root *root, u64 group_start); |
| 2898 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, | ||
| 2899 | struct btrfs_root *root); | ||
| 2861 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); | 2900 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); |
| 2862 | u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); | 2901 | u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); |
| 2863 | void btrfs_clear_space_info_full(struct btrfs_fs_info *info); | 2902 | void btrfs_clear_space_info_full(struct btrfs_fs_info *info); |
| @@ -2874,8 +2913,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); | |||
| 2874 | void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); | 2913 | void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); |
| 2875 | int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); | 2914 | int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); |
| 2876 | void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); | 2915 | void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); |
| 2877 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv); | 2916 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); |
| 2878 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root); | 2917 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, |
| 2918 | unsigned short type); | ||
| 2879 | void btrfs_free_block_rsv(struct btrfs_root *root, | 2919 | void btrfs_free_block_rsv(struct btrfs_root *root, |
| 2880 | struct btrfs_block_rsv *rsv); | 2920 | struct btrfs_block_rsv *rsv); |
| 2881 | int btrfs_block_rsv_add(struct btrfs_root *root, | 2921 | int btrfs_block_rsv_add(struct btrfs_root *root, |
| @@ -3172,12 +3212,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
| 3172 | struct btrfs_root *root, | 3212 | struct btrfs_root *root, |
| 3173 | const char *name, int name_len, | 3213 | const char *name, int name_len, |
| 3174 | u64 inode_objectid, u64 ref_objectid, u64 *index); | 3214 | u64 inode_objectid, u64 ref_objectid, u64 *index); |
| 3175 | struct btrfs_inode_ref * | 3215 | int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans, |
| 3176 | btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, | 3216 | struct btrfs_root *root, |
| 3177 | struct btrfs_root *root, | 3217 | struct btrfs_path *path, |
| 3178 | struct btrfs_path *path, | 3218 | const char *name, int name_len, |
| 3179 | const char *name, int name_len, | 3219 | u64 inode_objectid, u64 ref_objectid, int mod, |
| 3180 | u64 inode_objectid, u64 ref_objectid, int mod); | 3220 | u64 *ret_index); |
| 3181 | int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, | 3221 | int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, |
| 3182 | struct btrfs_root *root, | 3222 | struct btrfs_root *root, |
| 3183 | struct btrfs_path *path, u64 objectid); | 3223 | struct btrfs_path *path, u64 objectid); |
| @@ -3185,6 +3225,19 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root | |||
| 3185 | *root, struct btrfs_path *path, | 3225 | *root, struct btrfs_path *path, |
| 3186 | struct btrfs_key *location, int mod); | 3226 | struct btrfs_key *location, int mod); |
| 3187 | 3227 | ||
| 3228 | struct btrfs_inode_extref * | ||
| 3229 | btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, | ||
| 3230 | struct btrfs_root *root, | ||
| 3231 | struct btrfs_path *path, | ||
| 3232 | const char *name, int name_len, | ||
| 3233 | u64 inode_objectid, u64 ref_objectid, int ins_len, | ||
| 3234 | int cow); | ||
| 3235 | |||
| 3236 | int btrfs_find_name_in_ext_backref(struct btrfs_path *path, | ||
| 3237 | u64 ref_objectid, const char *name, | ||
| 3238 | int name_len, | ||
| 3239 | struct btrfs_inode_extref **extref_ret); | ||
| 3240 | |||
| 3188 | /* file-item.c */ | 3241 | /* file-item.c */ |
| 3189 | int btrfs_del_csums(struct btrfs_trans_handle *trans, | 3242 | int btrfs_del_csums(struct btrfs_trans_handle *trans, |
| 3190 | struct btrfs_root *root, u64 bytenr, u64 len); | 3243 | struct btrfs_root *root, u64 bytenr, u64 len); |
| @@ -3249,6 +3302,8 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, | |||
| 3249 | struct btrfs_root *root, | 3302 | struct btrfs_root *root, |
| 3250 | struct inode *dir, u64 objectid, | 3303 | struct inode *dir, u64 objectid, |
| 3251 | const char *name, int name_len); | 3304 | const char *name, int name_len); |
| 3305 | int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, | ||
| 3306 | int front); | ||
| 3252 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | 3307 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
| 3253 | struct btrfs_root *root, | 3308 | struct btrfs_root *root, |
| 3254 | struct inode *inode, u64 new_size, | 3309 | struct inode *inode, u64 new_size, |
| @@ -3308,16 +3363,27 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); | |||
| 3308 | int btrfs_defrag_file(struct inode *inode, struct file *file, | 3363 | int btrfs_defrag_file(struct inode *inode, struct file *file, |
| 3309 | struct btrfs_ioctl_defrag_range_args *range, | 3364 | struct btrfs_ioctl_defrag_range_args *range, |
| 3310 | u64 newer_than, unsigned long max_pages); | 3365 | u64 newer_than, unsigned long max_pages); |
| 3366 | void btrfs_get_block_group_info(struct list_head *groups_list, | ||
| 3367 | struct btrfs_ioctl_space_info *space); | ||
| 3368 | |||
| 3311 | /* file.c */ | 3369 | /* file.c */ |
| 3312 | int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, | 3370 | int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, |
| 3313 | struct inode *inode); | 3371 | struct inode *inode); |
| 3314 | int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); | 3372 | int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); |
| 3315 | int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); | 3373 | int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); |
| 3316 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | 3374 | void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, |
| 3317 | int skip_pinned); | 3375 | int skip_pinned); |
| 3376 | int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace, | ||
| 3377 | u64 start, u64 end, int skip_pinned, | ||
| 3378 | int modified); | ||
| 3318 | extern const struct file_operations btrfs_file_operations; | 3379 | extern const struct file_operations btrfs_file_operations; |
| 3319 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | 3380 | int __btrfs_drop_extents(struct btrfs_trans_handle *trans, |
| 3320 | u64 start, u64 end, u64 *hint_byte, int drop_cache); | 3381 | struct btrfs_root *root, struct inode *inode, |
| 3382 | struct btrfs_path *path, u64 start, u64 end, | ||
| 3383 | u64 *drop_end, int drop_cache); | ||
| 3384 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, | ||
| 3385 | struct btrfs_root *root, struct inode *inode, u64 start, | ||
| 3386 | u64 end, int drop_cache); | ||
| 3321 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | 3387 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, |
| 3322 | struct inode *inode, u64 start, u64 end); | 3388 | struct inode *inode, u64 start, u64 end); |
| 3323 | int btrfs_release_file(struct inode *inode, struct file *file); | 3389 | int btrfs_release_file(struct inode *inode, struct file *file); |
| @@ -3378,6 +3444,11 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, | |||
| 3378 | } | 3444 | } |
| 3379 | } | 3445 | } |
| 3380 | 3446 | ||
| 3447 | /* | ||
| 3448 | * Call btrfs_abort_transaction as early as possible when an error condition is | ||
| 3449 | * detected, that way the exact line number is reported. | ||
| 3450 | */ | ||
| 3451 | |||
| 3381 | #define btrfs_abort_transaction(trans, root, errno) \ | 3452 | #define btrfs_abort_transaction(trans, root, errno) \ |
| 3382 | do { \ | 3453 | do { \ |
| 3383 | __btrfs_abort_transaction(trans, root, __func__, \ | 3454 | __btrfs_abort_transaction(trans, root, __func__, \ |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 52c85e2b95d0..478f66bdc57b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
| @@ -29,7 +29,7 @@ static struct kmem_cache *delayed_node_cache; | |||
| 29 | 29 | ||
| 30 | int __init btrfs_delayed_inode_init(void) | 30 | int __init btrfs_delayed_inode_init(void) |
| 31 | { | 31 | { |
| 32 | delayed_node_cache = kmem_cache_create("delayed_node", | 32 | delayed_node_cache = kmem_cache_create("btrfs_delayed_node", |
| 33 | sizeof(struct btrfs_delayed_node), | 33 | sizeof(struct btrfs_delayed_node), |
| 34 | 0, | 34 | 0, |
| 35 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | 35 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, |
| @@ -650,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata( | |||
| 650 | * we're accounted for. | 650 | * we're accounted for. |
| 651 | */ | 651 | */ |
| 652 | if (!src_rsv || (!trans->bytes_reserved && | 652 | if (!src_rsv || (!trans->bytes_reserved && |
| 653 | src_rsv != &root->fs_info->delalloc_block_rsv)) { | 653 | src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { |
| 654 | ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); | 654 | ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); |
| 655 | /* | 655 | /* |
| 656 | * Since we're under a transaction reserve_metadata_bytes could | 656 | * Since we're under a transaction reserve_metadata_bytes could |
| @@ -668,7 +668,7 @@ static int btrfs_delayed_inode_reserve_metadata( | |||
| 668 | num_bytes, 1); | 668 | num_bytes, 1); |
| 669 | } | 669 | } |
| 670 | return ret; | 670 | return ret; |
| 671 | } else if (src_rsv == &root->fs_info->delalloc_block_rsv) { | 671 | } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) { |
| 672 | spin_lock(&BTRFS_I(inode)->lock); | 672 | spin_lock(&BTRFS_I(inode)->lock); |
| 673 | if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, | 673 | if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, |
| 674 | &BTRFS_I(inode)->runtime_flags)) { | 674 | &BTRFS_I(inode)->runtime_flags)) { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 22e98e04c2ea..7cda51995c1e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -46,6 +46,10 @@ | |||
| 46 | #include "check-integrity.h" | 46 | #include "check-integrity.h" |
| 47 | #include "rcu-string.h" | 47 | #include "rcu-string.h" |
| 48 | 48 | ||
| 49 | #ifdef CONFIG_X86 | ||
| 50 | #include <asm/cpufeature.h> | ||
| 51 | #endif | ||
| 52 | |||
| 49 | static struct extent_io_ops btree_extent_io_ops; | 53 | static struct extent_io_ops btree_extent_io_ops; |
| 50 | static void end_workqueue_fn(struct btrfs_work *work); | 54 | static void end_workqueue_fn(struct btrfs_work *work); |
| 51 | static void free_fs_root(struct btrfs_root *root); | 55 | static void free_fs_root(struct btrfs_root *root); |
| @@ -217,26 +221,16 @@ static struct extent_map *btree_get_extent(struct inode *inode, | |||
| 217 | write_lock(&em_tree->lock); | 221 | write_lock(&em_tree->lock); |
| 218 | ret = add_extent_mapping(em_tree, em); | 222 | ret = add_extent_mapping(em_tree, em); |
| 219 | if (ret == -EEXIST) { | 223 | if (ret == -EEXIST) { |
| 220 | u64 failed_start = em->start; | ||
| 221 | u64 failed_len = em->len; | ||
| 222 | |||
| 223 | free_extent_map(em); | 224 | free_extent_map(em); |
| 224 | em = lookup_extent_mapping(em_tree, start, len); | 225 | em = lookup_extent_mapping(em_tree, start, len); |
| 225 | if (em) { | 226 | if (!em) |
| 226 | ret = 0; | 227 | em = ERR_PTR(-EIO); |
| 227 | } else { | ||
| 228 | em = lookup_extent_mapping(em_tree, failed_start, | ||
| 229 | failed_len); | ||
| 230 | ret = -EIO; | ||
| 231 | } | ||
| 232 | } else if (ret) { | 228 | } else if (ret) { |
| 233 | free_extent_map(em); | 229 | free_extent_map(em); |
| 234 | em = NULL; | 230 | em = ERR_PTR(ret); |
| 235 | } | 231 | } |
| 236 | write_unlock(&em_tree->lock); | 232 | write_unlock(&em_tree->lock); |
| 237 | 233 | ||
| 238 | if (ret) | ||
| 239 | em = ERR_PTR(ret); | ||
| 240 | out: | 234 | out: |
| 241 | return em; | 235 | return em; |
| 242 | } | 236 | } |
| @@ -439,10 +433,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) | |||
| 439 | WARN_ON(1); | 433 | WARN_ON(1); |
| 440 | return 0; | 434 | return 0; |
| 441 | } | 435 | } |
| 442 | if (eb->pages[0] != page) { | ||
| 443 | WARN_ON(1); | ||
| 444 | return 0; | ||
| 445 | } | ||
| 446 | if (!PageUptodate(page)) { | 436 | if (!PageUptodate(page)) { |
| 447 | WARN_ON(1); | 437 | WARN_ON(1); |
| 448 | return 0; | 438 | return 0; |
| @@ -869,10 +859,22 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |||
| 869 | return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); | 859 | return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); |
| 870 | } | 860 | } |
| 871 | 861 | ||
| 862 | static int check_async_write(struct inode *inode, unsigned long bio_flags) | ||
| 863 | { | ||
| 864 | if (bio_flags & EXTENT_BIO_TREE_LOG) | ||
| 865 | return 0; | ||
| 866 | #ifdef CONFIG_X86 | ||
| 867 | if (cpu_has_xmm4_2) | ||
| 868 | return 0; | ||
| 869 | #endif | ||
| 870 | return 1; | ||
| 871 | } | ||
| 872 | |||
| 872 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | 873 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
| 873 | int mirror_num, unsigned long bio_flags, | 874 | int mirror_num, unsigned long bio_flags, |
| 874 | u64 bio_offset) | 875 | u64 bio_offset) |
| 875 | { | 876 | { |
| 877 | int async = check_async_write(inode, bio_flags); | ||
| 876 | int ret; | 878 | int ret; |
| 877 | 879 | ||
| 878 | if (!(rw & REQ_WRITE)) { | 880 | if (!(rw & REQ_WRITE)) { |
| @@ -887,6 +889,12 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
| 887 | return ret; | 889 | return ret; |
| 888 | return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 890 | return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, |
| 889 | mirror_num, 0); | 891 | mirror_num, 0); |
| 892 | } else if (!async) { | ||
| 893 | ret = btree_csum_one_bio(bio); | ||
| 894 | if (ret) | ||
| 895 | return ret; | ||
| 896 | return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | ||
| 897 | mirror_num, 0); | ||
| 890 | } | 898 | } |
| 891 | 899 | ||
| 892 | /* | 900 | /* |
| @@ -1168,8 +1176,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
| 1168 | atomic_set(&root->log_commit[0], 0); | 1176 | atomic_set(&root->log_commit[0], 0); |
| 1169 | atomic_set(&root->log_commit[1], 0); | 1177 | atomic_set(&root->log_commit[1], 0); |
| 1170 | atomic_set(&root->log_writers, 0); | 1178 | atomic_set(&root->log_writers, 0); |
| 1179 | atomic_set(&root->log_batch, 0); | ||
| 1171 | atomic_set(&root->orphan_inodes, 0); | 1180 | atomic_set(&root->orphan_inodes, 0); |
| 1172 | root->log_batch = 0; | ||
| 1173 | root->log_transid = 0; | 1181 | root->log_transid = 0; |
| 1174 | root->last_log_commit = 0; | 1182 | root->last_log_commit = 0; |
| 1175 | extent_io_tree_init(&root->dirty_log_pages, | 1183 | extent_io_tree_init(&root->dirty_log_pages, |
| @@ -1667,9 +1675,10 @@ static int transaction_kthread(void *arg) | |||
| 1667 | spin_unlock(&root->fs_info->trans_lock); | 1675 | spin_unlock(&root->fs_info->trans_lock); |
| 1668 | 1676 | ||
| 1669 | /* If the file system is aborted, this will always fail. */ | 1677 | /* If the file system is aborted, this will always fail. */ |
| 1670 | trans = btrfs_join_transaction(root); | 1678 | trans = btrfs_attach_transaction(root); |
| 1671 | if (IS_ERR(trans)) { | 1679 | if (IS_ERR(trans)) { |
| 1672 | cannot_commit = true; | 1680 | if (PTR_ERR(trans) != -ENOENT) |
| 1681 | cannot_commit = true; | ||
| 1673 | goto sleep; | 1682 | goto sleep; |
| 1674 | } | 1683 | } |
| 1675 | if (transid == trans->transid) { | 1684 | if (transid == trans->transid) { |
| @@ -1994,13 +2003,11 @@ int open_ctree(struct super_block *sb, | |||
| 1994 | INIT_LIST_HEAD(&fs_info->trans_list); | 2003 | INIT_LIST_HEAD(&fs_info->trans_list); |
| 1995 | INIT_LIST_HEAD(&fs_info->dead_roots); | 2004 | INIT_LIST_HEAD(&fs_info->dead_roots); |
| 1996 | INIT_LIST_HEAD(&fs_info->delayed_iputs); | 2005 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
| 1997 | INIT_LIST_HEAD(&fs_info->hashers); | ||
| 1998 | INIT_LIST_HEAD(&fs_info->delalloc_inodes); | 2006 | INIT_LIST_HEAD(&fs_info->delalloc_inodes); |
| 1999 | INIT_LIST_HEAD(&fs_info->ordered_operations); | 2007 | INIT_LIST_HEAD(&fs_info->ordered_operations); |
| 2000 | INIT_LIST_HEAD(&fs_info->caching_block_groups); | 2008 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
| 2001 | spin_lock_init(&fs_info->delalloc_lock); | 2009 | spin_lock_init(&fs_info->delalloc_lock); |
| 2002 | spin_lock_init(&fs_info->trans_lock); | 2010 | spin_lock_init(&fs_info->trans_lock); |
| 2003 | spin_lock_init(&fs_info->ref_cache_lock); | ||
| 2004 | spin_lock_init(&fs_info->fs_roots_radix_lock); | 2011 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
| 2005 | spin_lock_init(&fs_info->delayed_iput_lock); | 2012 | spin_lock_init(&fs_info->delayed_iput_lock); |
| 2006 | spin_lock_init(&fs_info->defrag_inodes_lock); | 2013 | spin_lock_init(&fs_info->defrag_inodes_lock); |
| @@ -2014,12 +2021,15 @@ int open_ctree(struct super_block *sb, | |||
| 2014 | INIT_LIST_HEAD(&fs_info->space_info); | 2021 | INIT_LIST_HEAD(&fs_info->space_info); |
| 2015 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); | 2022 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
| 2016 | btrfs_mapping_init(&fs_info->mapping_tree); | 2023 | btrfs_mapping_init(&fs_info->mapping_tree); |
| 2017 | btrfs_init_block_rsv(&fs_info->global_block_rsv); | 2024 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
| 2018 | btrfs_init_block_rsv(&fs_info->delalloc_block_rsv); | 2025 | BTRFS_BLOCK_RSV_GLOBAL); |
| 2019 | btrfs_init_block_rsv(&fs_info->trans_block_rsv); | 2026 | btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, |
| 2020 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv); | 2027 | BTRFS_BLOCK_RSV_DELALLOC); |
| 2021 | btrfs_init_block_rsv(&fs_info->empty_block_rsv); | 2028 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
| 2022 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv); | 2029 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); |
| 2030 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); | ||
| 2031 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, | ||
| 2032 | BTRFS_BLOCK_RSV_DELOPS); | ||
| 2023 | atomic_set(&fs_info->nr_async_submits, 0); | 2033 | atomic_set(&fs_info->nr_async_submits, 0); |
| 2024 | atomic_set(&fs_info->async_delalloc_pages, 0); | 2034 | atomic_set(&fs_info->async_delalloc_pages, 0); |
| 2025 | atomic_set(&fs_info->async_submit_draining, 0); | 2035 | atomic_set(&fs_info->async_submit_draining, 0); |
| @@ -2491,6 +2501,8 @@ retry_root_backup: | |||
| 2491 | printk(KERN_ERR "Failed to read block groups: %d\n", ret); | 2501 | printk(KERN_ERR "Failed to read block groups: %d\n", ret); |
| 2492 | goto fail_block_groups; | 2502 | goto fail_block_groups; |
| 2493 | } | 2503 | } |
| 2504 | fs_info->num_tolerated_disk_barrier_failures = | ||
| 2505 | btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); | ||
| 2494 | 2506 | ||
| 2495 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, | 2507 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
| 2496 | "btrfs-cleaner"); | 2508 | "btrfs-cleaner"); |
| @@ -2874,12 +2886,10 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
| 2874 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", | 2886 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", |
| 2875 | rcu_str_deref(device->name)); | 2887 | rcu_str_deref(device->name)); |
| 2876 | device->nobarriers = 1; | 2888 | device->nobarriers = 1; |
| 2877 | } | 2889 | } else if (!bio_flagged(bio, BIO_UPTODATE)) { |
| 2878 | if (!bio_flagged(bio, BIO_UPTODATE)) { | ||
| 2879 | ret = -EIO; | 2890 | ret = -EIO; |
| 2880 | if (!bio_flagged(bio, BIO_EOPNOTSUPP)) | 2891 | btrfs_dev_stat_inc_and_print(device, |
| 2881 | btrfs_dev_stat_inc_and_print(device, | 2892 | BTRFS_DEV_STAT_FLUSH_ERRS); |
| 2882 | BTRFS_DEV_STAT_FLUSH_ERRS); | ||
| 2883 | } | 2893 | } |
| 2884 | 2894 | ||
| 2885 | /* drop the reference from the wait == 0 run */ | 2895 | /* drop the reference from the wait == 0 run */ |
| @@ -2918,14 +2928,15 @@ static int barrier_all_devices(struct btrfs_fs_info *info) | |||
| 2918 | { | 2928 | { |
| 2919 | struct list_head *head; | 2929 | struct list_head *head; |
| 2920 | struct btrfs_device *dev; | 2930 | struct btrfs_device *dev; |
| 2921 | int errors = 0; | 2931 | int errors_send = 0; |
| 2932 | int errors_wait = 0; | ||
| 2922 | int ret; | 2933 | int ret; |
| 2923 | 2934 | ||
| 2924 | /* send down all the barriers */ | 2935 | /* send down all the barriers */ |
| 2925 | head = &info->fs_devices->devices; | 2936 | head = &info->fs_devices->devices; |
| 2926 | list_for_each_entry_rcu(dev, head, dev_list) { | 2937 | list_for_each_entry_rcu(dev, head, dev_list) { |
| 2927 | if (!dev->bdev) { | 2938 | if (!dev->bdev) { |
| 2928 | errors++; | 2939 | errors_send++; |
| 2929 | continue; | 2940 | continue; |
| 2930 | } | 2941 | } |
| 2931 | if (!dev->in_fs_metadata || !dev->writeable) | 2942 | if (!dev->in_fs_metadata || !dev->writeable) |
| @@ -2933,13 +2944,13 @@ static int barrier_all_devices(struct btrfs_fs_info *info) | |||
| 2933 | 2944 | ||
| 2934 | ret = write_dev_flush(dev, 0); | 2945 | ret = write_dev_flush(dev, 0); |
| 2935 | if (ret) | 2946 | if (ret) |
| 2936 | errors++; | 2947 | errors_send++; |
| 2937 | } | 2948 | } |
| 2938 | 2949 | ||
| 2939 | /* wait for all the barriers */ | 2950 | /* wait for all the barriers */ |
| 2940 | list_for_each_entry_rcu(dev, head, dev_list) { | 2951 | list_for_each_entry_rcu(dev, head, dev_list) { |
| 2941 | if (!dev->bdev) { | 2952 | if (!dev->bdev) { |
| 2942 | errors++; | 2953 | errors_wait++; |
| 2943 | continue; | 2954 | continue; |
| 2944 | } | 2955 | } |
| 2945 | if (!dev->in_fs_metadata || !dev->writeable) | 2956 | if (!dev->in_fs_metadata || !dev->writeable) |
| @@ -2947,13 +2958,87 @@ static int barrier_all_devices(struct btrfs_fs_info *info) | |||
| 2947 | 2958 | ||
| 2948 | ret = write_dev_flush(dev, 1); | 2959 | ret = write_dev_flush(dev, 1); |
| 2949 | if (ret) | 2960 | if (ret) |
| 2950 | errors++; | 2961 | errors_wait++; |
| 2951 | } | 2962 | } |
| 2952 | if (errors) | 2963 | if (errors_send > info->num_tolerated_disk_barrier_failures || |
| 2964 | errors_wait > info->num_tolerated_disk_barrier_failures) | ||
| 2953 | return -EIO; | 2965 | return -EIO; |
| 2954 | return 0; | 2966 | return 0; |
| 2955 | } | 2967 | } |
| 2956 | 2968 | ||
| 2969 | int btrfs_calc_num_tolerated_disk_barrier_failures( | ||
| 2970 | struct btrfs_fs_info *fs_info) | ||
| 2971 | { | ||
| 2972 | struct btrfs_ioctl_space_info space; | ||
| 2973 | struct btrfs_space_info *sinfo; | ||
| 2974 | u64 types[] = {BTRFS_BLOCK_GROUP_DATA, | ||
| 2975 | BTRFS_BLOCK_GROUP_SYSTEM, | ||
| 2976 | BTRFS_BLOCK_GROUP_METADATA, | ||
| 2977 | BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; | ||
| 2978 | int num_types = 4; | ||
| 2979 | int i; | ||
| 2980 | int c; | ||
| 2981 | int num_tolerated_disk_barrier_failures = | ||
| 2982 | (int)fs_info->fs_devices->num_devices; | ||
| 2983 | |||
| 2984 | for (i = 0; i < num_types; i++) { | ||
| 2985 | struct btrfs_space_info *tmp; | ||
| 2986 | |||
| 2987 | sinfo = NULL; | ||
| 2988 | rcu_read_lock(); | ||
| 2989 | list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { | ||
| 2990 | if (tmp->flags == types[i]) { | ||
| 2991 | sinfo = tmp; | ||
| 2992 | break; | ||
| 2993 | } | ||
| 2994 | } | ||
| 2995 | rcu_read_unlock(); | ||
| 2996 | |||
| 2997 | if (!sinfo) | ||
| 2998 | continue; | ||
| 2999 | |||
| 3000 | down_read(&sinfo->groups_sem); | ||
| 3001 | for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { | ||
| 3002 | if (!list_empty(&sinfo->block_groups[c])) { | ||
| 3003 | u64 flags; | ||
| 3004 | |||
| 3005 | btrfs_get_block_group_info( | ||
| 3006 | &sinfo->block_groups[c], &space); | ||
| 3007 | if (space.total_bytes == 0 || | ||
| 3008 | space.used_bytes == 0) | ||
| 3009 | continue; | ||
| 3010 | flags = space.flags; | ||
| 3011 | /* | ||
| 3012 | * return | ||
| 3013 | * 0: if dup, single or RAID0 is configured for | ||
| 3014 | * any of metadata, system or data, else | ||
| 3015 | * 1: if RAID5 is configured, or if RAID1 or | ||
| 3016 | * RAID10 is configured and only two mirrors | ||
| 3017 | * are used, else | ||
| 3018 | * 2: if RAID6 is configured, else | ||
| 3019 | * num_mirrors - 1: if RAID1 or RAID10 is | ||
| 3020 | * configured and more than | ||
| 3021 | * 2 mirrors are used. | ||
| 3022 | */ | ||
| 3023 | if (num_tolerated_disk_barrier_failures > 0 && | ||
| 3024 | ((flags & (BTRFS_BLOCK_GROUP_DUP | | ||
| 3025 | BTRFS_BLOCK_GROUP_RAID0)) || | ||
| 3026 | ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) | ||
| 3027 | == 0))) | ||
| 3028 | num_tolerated_disk_barrier_failures = 0; | ||
| 3029 | else if (num_tolerated_disk_barrier_failures > 1 | ||
| 3030 | && | ||
| 3031 | (flags & (BTRFS_BLOCK_GROUP_RAID1 | | ||
| 3032 | BTRFS_BLOCK_GROUP_RAID10))) | ||
| 3033 | num_tolerated_disk_barrier_failures = 1; | ||
| 3034 | } | ||
| 3035 | } | ||
| 3036 | up_read(&sinfo->groups_sem); | ||
| 3037 | } | ||
| 3038 | |||
| 3039 | return num_tolerated_disk_barrier_failures; | ||
| 3040 | } | ||
| 3041 | |||
| 2957 | int write_all_supers(struct btrfs_root *root, int max_mirrors) | 3042 | int write_all_supers(struct btrfs_root *root, int max_mirrors) |
| 2958 | { | 3043 | { |
| 2959 | struct list_head *head; | 3044 | struct list_head *head; |
| @@ -2976,8 +3061,16 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
| 2976 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 3061 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
| 2977 | head = &root->fs_info->fs_devices->devices; | 3062 | head = &root->fs_info->fs_devices->devices; |
| 2978 | 3063 | ||
| 2979 | if (do_barriers) | 3064 | if (do_barriers) { |
| 2980 | barrier_all_devices(root->fs_info); | 3065 | ret = barrier_all_devices(root->fs_info); |
| 3066 | if (ret) { | ||
| 3067 | mutex_unlock( | ||
| 3068 | &root->fs_info->fs_devices->device_list_mutex); | ||
| 3069 | btrfs_error(root->fs_info, ret, | ||
| 3070 | "errors while submitting device barriers."); | ||
| 3071 | return ret; | ||
| 3072 | } | ||
| 3073 | } | ||
| 2981 | 3074 | ||
| 2982 | list_for_each_entry_rcu(dev, head, dev_list) { | 3075 | list_for_each_entry_rcu(dev, head, dev_list) { |
| 2983 | if (!dev->bdev) { | 3076 | if (!dev->bdev) { |
| @@ -3211,10 +3304,6 @@ int close_ctree(struct btrfs_root *root) | |||
| 3211 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", | 3304 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", |
| 3212 | (unsigned long long)fs_info->delalloc_bytes); | 3305 | (unsigned long long)fs_info->delalloc_bytes); |
| 3213 | } | 3306 | } |
| 3214 | if (fs_info->total_ref_cache_size) { | ||
| 3215 | printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", | ||
| 3216 | (unsigned long long)fs_info->total_ref_cache_size); | ||
| 3217 | } | ||
| 3218 | 3307 | ||
| 3219 | free_extent_buffer(fs_info->extent_root->node); | 3308 | free_extent_buffer(fs_info->extent_root->node); |
| 3220 | free_extent_buffer(fs_info->extent_root->commit_root); | 3309 | free_extent_buffer(fs_info->extent_root->commit_root); |
| @@ -3360,52 +3449,6 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) | |||
| 3360 | return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); | 3449 | return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); |
| 3361 | } | 3450 | } |
| 3362 | 3451 | ||
| 3363 | int btree_lock_page_hook(struct page *page, void *data, | ||
| 3364 | void (*flush_fn)(void *)) | ||
| 3365 | { | ||
| 3366 | struct inode *inode = page->mapping->host; | ||
| 3367 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 3368 | struct extent_buffer *eb; | ||
| 3369 | |||
| 3370 | /* | ||
| 3371 | * We culled this eb but the page is still hanging out on the mapping, | ||
| 3372 | * carry on. | ||
| 3373 | */ | ||
| 3374 | if (!PagePrivate(page)) | ||
| 3375 | goto out; | ||
| 3376 | |||
| 3377 | eb = (struct extent_buffer *)page->private; | ||
| 3378 | if (!eb) { | ||
| 3379 | WARN_ON(1); | ||
| 3380 | goto out; | ||
| 3381 | } | ||
| 3382 | if (page != eb->pages[0]) | ||
| 3383 | goto out; | ||
| 3384 | |||
| 3385 | if (!btrfs_try_tree_write_lock(eb)) { | ||
| 3386 | flush_fn(data); | ||
| 3387 | btrfs_tree_lock(eb); | ||
| 3388 | } | ||
| 3389 | btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); | ||
| 3390 | |||
| 3391 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { | ||
| 3392 | spin_lock(&root->fs_info->delalloc_lock); | ||
| 3393 | if (root->fs_info->dirty_metadata_bytes >= eb->len) | ||
| 3394 | root->fs_info->dirty_metadata_bytes -= eb->len; | ||
| 3395 | else | ||
| 3396 | WARN_ON(1); | ||
| 3397 | spin_unlock(&root->fs_info->delalloc_lock); | ||
| 3398 | } | ||
| 3399 | |||
| 3400 | btrfs_tree_unlock(eb); | ||
| 3401 | out: | ||
| 3402 | if (!trylock_page(page)) { | ||
| 3403 | flush_fn(data); | ||
| 3404 | lock_page(page); | ||
| 3405 | } | ||
| 3406 | return 0; | ||
| 3407 | } | ||
| 3408 | |||
| 3409 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | 3452 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, |
| 3410 | int read_only) | 3453 | int read_only) |
| 3411 | { | 3454 | { |
| @@ -3608,7 +3651,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
| 3608 | 3651 | ||
| 3609 | while (1) { | 3652 | while (1) { |
| 3610 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | 3653 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, |
| 3611 | mark); | 3654 | mark, NULL); |
| 3612 | if (ret) | 3655 | if (ret) |
| 3613 | break; | 3656 | break; |
| 3614 | 3657 | ||
| @@ -3663,7 +3706,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
| 3663 | again: | 3706 | again: |
| 3664 | while (1) { | 3707 | while (1) { |
| 3665 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3708 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
| 3666 | EXTENT_DIRTY); | 3709 | EXTENT_DIRTY, NULL); |
| 3667 | if (ret) | 3710 | if (ret) |
| 3668 | break; | 3711 | break; |
| 3669 | 3712 | ||
| @@ -3800,7 +3843,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
| 3800 | } | 3843 | } |
| 3801 | 3844 | ||
| 3802 | static struct extent_io_ops btree_extent_io_ops = { | 3845 | static struct extent_io_ops btree_extent_io_ops = { |
| 3803 | .write_cache_pages_lock_hook = btree_lock_page_hook, | ||
| 3804 | .readpage_end_io_hook = btree_readpage_end_io_hook, | 3846 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
| 3805 | .readpage_io_failed_hook = btree_io_failed_hook, | 3847 | .readpage_io_failed_hook = btree_io_failed_hook, |
| 3806 | .submit_bio_hook = btree_submit_bio_hook, | 3848 | .submit_bio_hook = btree_submit_bio_hook, |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index c5b00a735fef..2025a9132c16 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
| @@ -95,6 +95,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | |||
| 95 | u64 objectid); | 95 | u64 objectid); |
| 96 | int btree_lock_page_hook(struct page *page, void *data, | 96 | int btree_lock_page_hook(struct page *page, void *data, |
| 97 | void (*flush_fn)(void *)); | 97 | void (*flush_fn)(void *)); |
| 98 | int btrfs_calc_num_tolerated_disk_barrier_failures( | ||
| 99 | struct btrfs_fs_info *fs_info); | ||
| 98 | 100 | ||
| 99 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 101 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 100 | void btrfs_init_lockdep(void); | 102 | void btrfs_init_lockdep(void); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ba58024d40d3..3d3e2c17d8d1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -94,8 +94,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, | |||
| 94 | u64 flags, struct btrfs_disk_key *key, | 94 | u64 flags, struct btrfs_disk_key *key, |
| 95 | int level, struct btrfs_key *ins); | 95 | int level, struct btrfs_key *ins); |
| 96 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | 96 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, |
| 97 | struct btrfs_root *extent_root, u64 alloc_bytes, | 97 | struct btrfs_root *extent_root, u64 flags, |
| 98 | u64 flags, int force); | 98 | int force); |
| 99 | static int find_next_key(struct btrfs_path *path, int level, | 99 | static int find_next_key(struct btrfs_path *path, int level, |
| 100 | struct btrfs_key *key); | 100 | struct btrfs_key *key); |
| 101 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | 101 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, |
| @@ -312,7 +312,8 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
| 312 | while (start < end) { | 312 | while (start < end) { |
| 313 | ret = find_first_extent_bit(info->pinned_extents, start, | 313 | ret = find_first_extent_bit(info->pinned_extents, start, |
| 314 | &extent_start, &extent_end, | 314 | &extent_start, &extent_end, |
| 315 | EXTENT_DIRTY | EXTENT_UPTODATE); | 315 | EXTENT_DIRTY | EXTENT_UPTODATE, |
| 316 | NULL); | ||
| 316 | if (ret) | 317 | if (ret) |
| 317 | break; | 318 | break; |
| 318 | 319 | ||
| @@ -2361,10 +2362,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
| 2361 | } | 2362 | } |
| 2362 | 2363 | ||
| 2363 | next: | 2364 | next: |
| 2364 | do_chunk_alloc(trans, fs_info->extent_root, | ||
| 2365 | 2 * 1024 * 1024, | ||
| 2366 | btrfs_get_alloc_profile(root, 0), | ||
| 2367 | CHUNK_ALLOC_NO_FORCE); | ||
| 2368 | cond_resched(); | 2365 | cond_resched(); |
| 2369 | spin_lock(&delayed_refs->lock); | 2366 | spin_lock(&delayed_refs->lock); |
| 2370 | } | 2367 | } |
| @@ -2478,10 +2475,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2478 | if (root == root->fs_info->extent_root) | 2475 | if (root == root->fs_info->extent_root) |
| 2479 | root = root->fs_info->tree_root; | 2476 | root = root->fs_info->tree_root; |
| 2480 | 2477 | ||
| 2481 | do_chunk_alloc(trans, root->fs_info->extent_root, | ||
| 2482 | 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0), | ||
| 2483 | CHUNK_ALLOC_NO_FORCE); | ||
| 2484 | |||
| 2485 | btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); | 2478 | btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); |
| 2486 | 2479 | ||
| 2487 | delayed_refs = &trans->transaction->delayed_refs; | 2480 | delayed_refs = &trans->transaction->delayed_refs; |
| @@ -2551,6 +2544,12 @@ again: | |||
| 2551 | } | 2544 | } |
| 2552 | 2545 | ||
| 2553 | if (run_all) { | 2546 | if (run_all) { |
| 2547 | if (!list_empty(&trans->new_bgs)) { | ||
| 2548 | spin_unlock(&delayed_refs->lock); | ||
| 2549 | btrfs_create_pending_block_groups(trans, root); | ||
| 2550 | spin_lock(&delayed_refs->lock); | ||
| 2551 | } | ||
| 2552 | |||
| 2554 | node = rb_first(&delayed_refs->root); | 2553 | node = rb_first(&delayed_refs->root); |
| 2555 | if (!node) | 2554 | if (!node) |
| 2556 | goto out; | 2555 | goto out; |
| @@ -3406,7 +3405,6 @@ alloc: | |||
| 3406 | return PTR_ERR(trans); | 3405 | return PTR_ERR(trans); |
| 3407 | 3406 | ||
| 3408 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 3407 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
| 3409 | bytes + 2 * 1024 * 1024, | ||
| 3410 | alloc_target, | 3408 | alloc_target, |
| 3411 | CHUNK_ALLOC_NO_FORCE); | 3409 | CHUNK_ALLOC_NO_FORCE); |
| 3412 | btrfs_end_transaction(trans, root); | 3410 | btrfs_end_transaction(trans, root); |
| @@ -3488,8 +3486,7 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) | |||
| 3488 | } | 3486 | } |
| 3489 | 3487 | ||
| 3490 | static int should_alloc_chunk(struct btrfs_root *root, | 3488 | static int should_alloc_chunk(struct btrfs_root *root, |
| 3491 | struct btrfs_space_info *sinfo, u64 alloc_bytes, | 3489 | struct btrfs_space_info *sinfo, int force) |
| 3492 | int force) | ||
| 3493 | { | 3490 | { |
| 3494 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | 3491 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; |
| 3495 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; | 3492 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; |
| @@ -3504,7 +3501,8 @@ static int should_alloc_chunk(struct btrfs_root *root, | |||
| 3504 | * and purposes it's used space. Don't worry about locking the | 3501 | * and purposes it's used space. Don't worry about locking the |
| 3505 | * global_rsv, it doesn't change except when the transaction commits. | 3502 | * global_rsv, it doesn't change except when the transaction commits. |
| 3506 | */ | 3503 | */ |
| 3507 | num_allocated += global_rsv->size; | 3504 | if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA) |
| 3505 | num_allocated += global_rsv->size; | ||
| 3508 | 3506 | ||
| 3509 | /* | 3507 | /* |
| 3510 | * in limited mode, we want to have some free space up to | 3508 | * in limited mode, we want to have some free space up to |
| @@ -3518,15 +3516,8 @@ static int should_alloc_chunk(struct btrfs_root *root, | |||
| 3518 | if (num_bytes - num_allocated < thresh) | 3516 | if (num_bytes - num_allocated < thresh) |
| 3519 | return 1; | 3517 | return 1; |
| 3520 | } | 3518 | } |
| 3521 | thresh = btrfs_super_total_bytes(root->fs_info->super_copy); | ||
| 3522 | 3519 | ||
| 3523 | /* 256MB or 2% of the FS */ | 3520 | if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8)) |
| 3524 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2)); | ||
| 3525 | /* system chunks need a much small threshold */ | ||
| 3526 | if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM) | ||
| 3527 | thresh = 32 * 1024 * 1024; | ||
| 3528 | |||
| 3529 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8)) | ||
| 3530 | return 0; | 3521 | return 0; |
| 3531 | return 1; | 3522 | return 1; |
| 3532 | } | 3523 | } |
| @@ -3576,8 +3567,7 @@ static void check_system_chunk(struct btrfs_trans_handle *trans, | |||
| 3576 | } | 3567 | } |
| 3577 | 3568 | ||
| 3578 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | 3569 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, |
| 3579 | struct btrfs_root *extent_root, u64 alloc_bytes, | 3570 | struct btrfs_root *extent_root, u64 flags, int force) |
| 3580 | u64 flags, int force) | ||
| 3581 | { | 3571 | { |
| 3582 | struct btrfs_space_info *space_info; | 3572 | struct btrfs_space_info *space_info; |
| 3583 | struct btrfs_fs_info *fs_info = extent_root->fs_info; | 3573 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
| @@ -3601,7 +3591,7 @@ again: | |||
| 3601 | return 0; | 3591 | return 0; |
| 3602 | } | 3592 | } |
| 3603 | 3593 | ||
| 3604 | if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { | 3594 | if (!should_alloc_chunk(extent_root, space_info, force)) { |
| 3605 | spin_unlock(&space_info->lock); | 3595 | spin_unlock(&space_info->lock); |
| 3606 | return 0; | 3596 | return 0; |
| 3607 | } else if (space_info->chunk_alloc) { | 3597 | } else if (space_info->chunk_alloc) { |
| @@ -3669,6 +3659,46 @@ out: | |||
| 3669 | return ret; | 3659 | return ret; |
| 3670 | } | 3660 | } |
| 3671 | 3661 | ||
| 3662 | static int can_overcommit(struct btrfs_root *root, | ||
| 3663 | struct btrfs_space_info *space_info, u64 bytes, | ||
| 3664 | int flush) | ||
| 3665 | { | ||
| 3666 | u64 profile = btrfs_get_alloc_profile(root, 0); | ||
| 3667 | u64 avail; | ||
| 3668 | u64 used; | ||
| 3669 | |||
| 3670 | used = space_info->bytes_used + space_info->bytes_reserved + | ||
| 3671 | space_info->bytes_pinned + space_info->bytes_readonly + | ||
| 3672 | space_info->bytes_may_use; | ||
| 3673 | |||
| 3674 | spin_lock(&root->fs_info->free_chunk_lock); | ||
| 3675 | avail = root->fs_info->free_chunk_space; | ||
| 3676 | spin_unlock(&root->fs_info->free_chunk_lock); | ||
| 3677 | |||
| 3678 | /* | ||
| 3679 | * If we have dup, raid1 or raid10 then only half of the free | ||
| 3680 | * space is actually useable. | ||
| 3681 | */ | ||
| 3682 | if (profile & (BTRFS_BLOCK_GROUP_DUP | | ||
| 3683 | BTRFS_BLOCK_GROUP_RAID1 | | ||
| 3684 | BTRFS_BLOCK_GROUP_RAID10)) | ||
| 3685 | avail >>= 1; | ||
| 3686 | |||
| 3687 | /* | ||
| 3688 | * If we aren't flushing don't let us overcommit too much, say | ||
| 3689 | * 1/8th of the space. If we can flush, let it overcommit up to | ||
| 3690 | * 1/2 of the space. | ||
| 3691 | */ | ||
| 3692 | if (flush) | ||
| 3693 | avail >>= 3; | ||
| 3694 | else | ||
| 3695 | avail >>= 1; | ||
| 3696 | |||
| 3697 | if (used + bytes < space_info->total_bytes + avail) | ||
| 3698 | return 1; | ||
| 3699 | return 0; | ||
| 3700 | } | ||
| 3701 | |||
| 3672 | /* | 3702 | /* |
| 3673 | * shrink metadata reservation for delalloc | 3703 | * shrink metadata reservation for delalloc |
| 3674 | */ | 3704 | */ |
| @@ -3693,7 +3723,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, | |||
| 3693 | if (delalloc_bytes == 0) { | 3723 | if (delalloc_bytes == 0) { |
| 3694 | if (trans) | 3724 | if (trans) |
| 3695 | return; | 3725 | return; |
| 3696 | btrfs_wait_ordered_extents(root, 0, 0); | 3726 | btrfs_wait_ordered_extents(root, 0); |
| 3697 | return; | 3727 | return; |
| 3698 | } | 3728 | } |
| 3699 | 3729 | ||
| @@ -3703,11 +3733,15 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, | |||
| 3703 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages, | 3733 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages, |
| 3704 | WB_REASON_FS_FREE_SPACE); | 3734 | WB_REASON_FS_FREE_SPACE); |
| 3705 | 3735 | ||
| 3736 | /* | ||
| 3737 | * We need to wait for the async pages to actually start before | ||
| 3738 | * we do anything. | ||
| 3739 | */ | ||
| 3740 | wait_event(root->fs_info->async_submit_wait, | ||
| 3741 | !atomic_read(&root->fs_info->async_delalloc_pages)); | ||
| 3742 | |||
| 3706 | spin_lock(&space_info->lock); | 3743 | spin_lock(&space_info->lock); |
| 3707 | if (space_info->bytes_used + space_info->bytes_reserved + | 3744 | if (can_overcommit(root, space_info, orig, !trans)) { |
| 3708 | space_info->bytes_pinned + space_info->bytes_readonly + | ||
| 3709 | space_info->bytes_may_use + orig <= | ||
| 3710 | space_info->total_bytes) { | ||
| 3711 | spin_unlock(&space_info->lock); | 3745 | spin_unlock(&space_info->lock); |
| 3712 | break; | 3746 | break; |
| 3713 | } | 3747 | } |
| @@ -3715,7 +3749,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, | |||
| 3715 | 3749 | ||
| 3716 | loops++; | 3750 | loops++; |
| 3717 | if (wait_ordered && !trans) { | 3751 | if (wait_ordered && !trans) { |
| 3718 | btrfs_wait_ordered_extents(root, 0, 0); | 3752 | btrfs_wait_ordered_extents(root, 0); |
| 3719 | } else { | 3753 | } else { |
| 3720 | time_left = schedule_timeout_killable(1); | 3754 | time_left = schedule_timeout_killable(1); |
| 3721 | if (time_left) | 3755 | if (time_left) |
| @@ -3784,11 +3818,12 @@ commit: | |||
| 3784 | } | 3818 | } |
| 3785 | 3819 | ||
| 3786 | enum flush_state { | 3820 | enum flush_state { |
| 3787 | FLUSH_DELALLOC = 1, | 3821 | FLUSH_DELAYED_ITEMS_NR = 1, |
| 3788 | FLUSH_DELALLOC_WAIT = 2, | 3822 | FLUSH_DELAYED_ITEMS = 2, |
| 3789 | FLUSH_DELAYED_ITEMS_NR = 3, | 3823 | FLUSH_DELALLOC = 3, |
| 3790 | FLUSH_DELAYED_ITEMS = 4, | 3824 | FLUSH_DELALLOC_WAIT = 4, |
| 3791 | COMMIT_TRANS = 5, | 3825 | ALLOC_CHUNK = 5, |
| 3826 | COMMIT_TRANS = 6, | ||
| 3792 | }; | 3827 | }; |
| 3793 | 3828 | ||
| 3794 | static int flush_space(struct btrfs_root *root, | 3829 | static int flush_space(struct btrfs_root *root, |
| @@ -3800,11 +3835,6 @@ static int flush_space(struct btrfs_root *root, | |||
| 3800 | int ret = 0; | 3835 | int ret = 0; |
| 3801 | 3836 | ||
| 3802 | switch (state) { | 3837 | switch (state) { |
| 3803 | case FLUSH_DELALLOC: | ||
| 3804 | case FLUSH_DELALLOC_WAIT: | ||
| 3805 | shrink_delalloc(root, num_bytes, orig_bytes, | ||
| 3806 | state == FLUSH_DELALLOC_WAIT); | ||
| 3807 | break; | ||
| 3808 | case FLUSH_DELAYED_ITEMS_NR: | 3838 | case FLUSH_DELAYED_ITEMS_NR: |
| 3809 | case FLUSH_DELAYED_ITEMS: | 3839 | case FLUSH_DELAYED_ITEMS: |
| 3810 | if (state == FLUSH_DELAYED_ITEMS_NR) { | 3840 | if (state == FLUSH_DELAYED_ITEMS_NR) { |
| @@ -3825,6 +3855,24 @@ static int flush_space(struct btrfs_root *root, | |||
| 3825 | ret = btrfs_run_delayed_items_nr(trans, root, nr); | 3855 | ret = btrfs_run_delayed_items_nr(trans, root, nr); |
| 3826 | btrfs_end_transaction(trans, root); | 3856 | btrfs_end_transaction(trans, root); |
| 3827 | break; | 3857 | break; |
| 3858 | case FLUSH_DELALLOC: | ||
| 3859 | case FLUSH_DELALLOC_WAIT: | ||
| 3860 | shrink_delalloc(root, num_bytes, orig_bytes, | ||
| 3861 | state == FLUSH_DELALLOC_WAIT); | ||
| 3862 | break; | ||
| 3863 | case ALLOC_CHUNK: | ||
| 3864 | trans = btrfs_join_transaction(root); | ||
| 3865 | if (IS_ERR(trans)) { | ||
| 3866 | ret = PTR_ERR(trans); | ||
| 3867 | break; | ||
| 3868 | } | ||
| 3869 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
| 3870 | btrfs_get_alloc_profile(root, 0), | ||
| 3871 | CHUNK_ALLOC_NO_FORCE); | ||
| 3872 | btrfs_end_transaction(trans, root); | ||
| 3873 | if (ret == -ENOSPC) | ||
| 3874 | ret = 0; | ||
| 3875 | break; | ||
| 3828 | case COMMIT_TRANS: | 3876 | case COMMIT_TRANS: |
| 3829 | ret = may_commit_transaction(root, space_info, orig_bytes, 0); | 3877 | ret = may_commit_transaction(root, space_info, orig_bytes, 0); |
| 3830 | break; | 3878 | break; |
| @@ -3856,10 +3904,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root, | |||
| 3856 | struct btrfs_space_info *space_info = block_rsv->space_info; | 3904 | struct btrfs_space_info *space_info = block_rsv->space_info; |
| 3857 | u64 used; | 3905 | u64 used; |
| 3858 | u64 num_bytes = orig_bytes; | 3906 | u64 num_bytes = orig_bytes; |
| 3859 | int flush_state = FLUSH_DELALLOC; | 3907 | int flush_state = FLUSH_DELAYED_ITEMS_NR; |
| 3860 | int ret = 0; | 3908 | int ret = 0; |
| 3861 | bool flushing = false; | 3909 | bool flushing = false; |
| 3862 | bool committed = false; | ||
| 3863 | 3910 | ||
| 3864 | again: | 3911 | again: |
| 3865 | ret = 0; | 3912 | ret = 0; |
| @@ -3922,57 +3969,12 @@ again: | |||
| 3922 | (orig_bytes * 2); | 3969 | (orig_bytes * 2); |
| 3923 | } | 3970 | } |
| 3924 | 3971 | ||
| 3925 | if (ret) { | 3972 | if (ret && can_overcommit(root, space_info, orig_bytes, flush)) { |
| 3926 | u64 profile = btrfs_get_alloc_profile(root, 0); | 3973 | space_info->bytes_may_use += orig_bytes; |
| 3927 | u64 avail; | 3974 | trace_btrfs_space_reservation(root->fs_info, "space_info", |
| 3928 | 3975 | space_info->flags, orig_bytes, | |
| 3929 | /* | 3976 | 1); |
| 3930 | * If we have a lot of space that's pinned, don't bother doing | 3977 | ret = 0; |
| 3931 | * the overcommit dance yet and just commit the transaction. | ||
| 3932 | */ | ||
| 3933 | avail = (space_info->total_bytes - space_info->bytes_used) * 8; | ||
| 3934 | do_div(avail, 10); | ||
| 3935 | if (space_info->bytes_pinned >= avail && flush && !committed) { | ||
| 3936 | space_info->flush = 1; | ||
| 3937 | flushing = true; | ||
| 3938 | spin_unlock(&space_info->lock); | ||
| 3939 | ret = may_commit_transaction(root, space_info, | ||
| 3940 | orig_bytes, 1); | ||
| 3941 | if (ret) | ||
| 3942 | goto out; | ||
| 3943 | committed = true; | ||
| 3944 | goto again; | ||
| 3945 | } | ||
| 3946 | |||
| 3947 | spin_lock(&root->fs_info->free_chunk_lock); | ||
| 3948 | avail = root->fs_info->free_chunk_space; | ||
| 3949 | |||
| 3950 | /* | ||
| 3951 | * If we have dup, raid1 or raid10 then only half of the free | ||
| 3952 | * space is actually useable. | ||
| 3953 | */ | ||
| 3954 | if (profile & (BTRFS_BLOCK_GROUP_DUP | | ||
| 3955 | BTRFS_BLOCK_GROUP_RAID1 | | ||
| 3956 | BTRFS_BLOCK_GROUP_RAID10)) | ||
| 3957 | avail >>= 1; | ||
| 3958 | |||
| 3959 | /* | ||
| 3960 | * If we aren't flushing don't let us overcommit too much, say | ||
| 3961 | * 1/8th of the space. If we can flush, let it overcommit up to | ||
| 3962 | * 1/2 of the space. | ||
| 3963 | */ | ||
| 3964 | if (flush) | ||
| 3965 | avail >>= 3; | ||
| 3966 | else | ||
| 3967 | avail >>= 1; | ||
| 3968 | spin_unlock(&root->fs_info->free_chunk_lock); | ||
| 3969 | |||
| 3970 | if (used + num_bytes < space_info->total_bytes + avail) { | ||
| 3971 | space_info->bytes_may_use += orig_bytes; | ||
| 3972 | trace_btrfs_space_reservation(root->fs_info, | ||
| 3973 | "space_info", space_info->flags, orig_bytes, 1); | ||
| 3974 | ret = 0; | ||
| 3975 | } | ||
| 3976 | } | 3978 | } |
| 3977 | 3979 | ||
| 3978 | /* | 3980 | /* |
| @@ -4114,13 +4116,15 @@ static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, | |||
| 4114 | return 0; | 4116 | return 0; |
| 4115 | } | 4117 | } |
| 4116 | 4118 | ||
| 4117 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) | 4119 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type) |
| 4118 | { | 4120 | { |
| 4119 | memset(rsv, 0, sizeof(*rsv)); | 4121 | memset(rsv, 0, sizeof(*rsv)); |
| 4120 | spin_lock_init(&rsv->lock); | 4122 | spin_lock_init(&rsv->lock); |
| 4123 | rsv->type = type; | ||
| 4121 | } | 4124 | } |
| 4122 | 4125 | ||
| 4123 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) | 4126 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, |
| 4127 | unsigned short type) | ||
| 4124 | { | 4128 | { |
| 4125 | struct btrfs_block_rsv *block_rsv; | 4129 | struct btrfs_block_rsv *block_rsv; |
| 4126 | struct btrfs_fs_info *fs_info = root->fs_info; | 4130 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -4129,7 +4133,7 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) | |||
| 4129 | if (!block_rsv) | 4133 | if (!block_rsv) |
| 4130 | return NULL; | 4134 | return NULL; |
| 4131 | 4135 | ||
| 4132 | btrfs_init_block_rsv(block_rsv); | 4136 | btrfs_init_block_rsv(block_rsv, type); |
| 4133 | block_rsv->space_info = __find_space_info(fs_info, | 4137 | block_rsv->space_info = __find_space_info(fs_info, |
| 4134 | BTRFS_BLOCK_GROUP_METADATA); | 4138 | BTRFS_BLOCK_GROUP_METADATA); |
| 4135 | return block_rsv; | 4139 | return block_rsv; |
| @@ -4138,6 +4142,8 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) | |||
| 4138 | void btrfs_free_block_rsv(struct btrfs_root *root, | 4142 | void btrfs_free_block_rsv(struct btrfs_root *root, |
| 4139 | struct btrfs_block_rsv *rsv) | 4143 | struct btrfs_block_rsv *rsv) |
| 4140 | { | 4144 | { |
| 4145 | if (!rsv) | ||
| 4146 | return; | ||
| 4141 | btrfs_block_rsv_release(root, rsv, (u64)-1); | 4147 | btrfs_block_rsv_release(root, rsv, (u64)-1); |
| 4142 | kfree(rsv); | 4148 | kfree(rsv); |
| 4143 | } | 4149 | } |
| @@ -4416,10 +4422,10 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, | |||
| 4416 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); | 4422 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); |
| 4417 | struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; | 4423 | struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; |
| 4418 | /* | 4424 | /* |
| 4419 | * two for root back/forward refs, two for directory entries | 4425 | * two for root back/forward refs, two for directory entries, |
| 4420 | * and one for root of the snapshot. | 4426 | * one for root of the snapshot and one for parent inode. |
| 4421 | */ | 4427 | */ |
| 4422 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); | 4428 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6); |
| 4423 | dst_rsv->space_info = src_rsv->space_info; | 4429 | dst_rsv->space_info = src_rsv->space_info; |
| 4424 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | 4430 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); |
| 4425 | } | 4431 | } |
| @@ -5018,7 +5024,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
| 5018 | 5024 | ||
| 5019 | while (1) { | 5025 | while (1) { |
| 5020 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 5026 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
| 5021 | EXTENT_DIRTY); | 5027 | EXTENT_DIRTY, NULL); |
| 5022 | if (ret) | 5028 | if (ret) |
| 5023 | break; | 5029 | break; |
| 5024 | 5030 | ||
| @@ -5096,8 +5102,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5096 | ret = remove_extent_backref(trans, extent_root, path, | 5102 | ret = remove_extent_backref(trans, extent_root, path, |
| 5097 | NULL, refs_to_drop, | 5103 | NULL, refs_to_drop, |
| 5098 | is_data); | 5104 | is_data); |
| 5099 | if (ret) | 5105 | if (ret) { |
| 5100 | goto abort; | 5106 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5107 | goto out; | ||
| 5108 | } | ||
| 5101 | btrfs_release_path(path); | 5109 | btrfs_release_path(path); |
| 5102 | path->leave_spinning = 1; | 5110 | path->leave_spinning = 1; |
| 5103 | 5111 | ||
| @@ -5115,8 +5123,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5115 | btrfs_print_leaf(extent_root, | 5123 | btrfs_print_leaf(extent_root, |
| 5116 | path->nodes[0]); | 5124 | path->nodes[0]); |
| 5117 | } | 5125 | } |
| 5118 | if (ret < 0) | 5126 | if (ret < 0) { |
| 5119 | goto abort; | 5127 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5128 | goto out; | ||
| 5129 | } | ||
| 5120 | extent_slot = path->slots[0]; | 5130 | extent_slot = path->slots[0]; |
| 5121 | } | 5131 | } |
| 5122 | } else if (ret == -ENOENT) { | 5132 | } else if (ret == -ENOENT) { |
| @@ -5130,7 +5140,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5130 | (unsigned long long)owner_objectid, | 5140 | (unsigned long long)owner_objectid, |
| 5131 | (unsigned long long)owner_offset); | 5141 | (unsigned long long)owner_offset); |
| 5132 | } else { | 5142 | } else { |
| 5133 | goto abort; | 5143 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5144 | goto out; | ||
| 5134 | } | 5145 | } |
| 5135 | 5146 | ||
| 5136 | leaf = path->nodes[0]; | 5147 | leaf = path->nodes[0]; |
| @@ -5140,8 +5151,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5140 | BUG_ON(found_extent || extent_slot != path->slots[0]); | 5151 | BUG_ON(found_extent || extent_slot != path->slots[0]); |
| 5141 | ret = convert_extent_item_v0(trans, extent_root, path, | 5152 | ret = convert_extent_item_v0(trans, extent_root, path, |
| 5142 | owner_objectid, 0); | 5153 | owner_objectid, 0); |
| 5143 | if (ret < 0) | 5154 | if (ret < 0) { |
| 5144 | goto abort; | 5155 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5156 | goto out; | ||
| 5157 | } | ||
| 5145 | 5158 | ||
| 5146 | btrfs_release_path(path); | 5159 | btrfs_release_path(path); |
| 5147 | path->leave_spinning = 1; | 5160 | path->leave_spinning = 1; |
| @@ -5158,8 +5171,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5158 | (unsigned long long)bytenr); | 5171 | (unsigned long long)bytenr); |
| 5159 | btrfs_print_leaf(extent_root, path->nodes[0]); | 5172 | btrfs_print_leaf(extent_root, path->nodes[0]); |
| 5160 | } | 5173 | } |
| 5161 | if (ret < 0) | 5174 | if (ret < 0) { |
| 5162 | goto abort; | 5175 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5176 | goto out; | ||
| 5177 | } | ||
| 5178 | |||
| 5163 | extent_slot = path->slots[0]; | 5179 | extent_slot = path->slots[0]; |
| 5164 | leaf = path->nodes[0]; | 5180 | leaf = path->nodes[0]; |
| 5165 | item_size = btrfs_item_size_nr(leaf, extent_slot); | 5181 | item_size = btrfs_item_size_nr(leaf, extent_slot); |
| @@ -5196,8 +5212,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5196 | ret = remove_extent_backref(trans, extent_root, path, | 5212 | ret = remove_extent_backref(trans, extent_root, path, |
| 5197 | iref, refs_to_drop, | 5213 | iref, refs_to_drop, |
| 5198 | is_data); | 5214 | is_data); |
| 5199 | if (ret) | 5215 | if (ret) { |
| 5200 | goto abort; | 5216 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5217 | goto out; | ||
| 5218 | } | ||
| 5201 | } | 5219 | } |
| 5202 | } else { | 5220 | } else { |
| 5203 | if (found_extent) { | 5221 | if (found_extent) { |
| @@ -5214,27 +5232,29 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 5214 | 5232 | ||
| 5215 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], | 5233 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], |
| 5216 | num_to_del); | 5234 | num_to_del); |
| 5217 | if (ret) | 5235 | if (ret) { |
| 5218 | goto abort; | 5236 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5237 | goto out; | ||
| 5238 | } | ||
| 5219 | btrfs_release_path(path); | 5239 | btrfs_release_path(path); |
| 5220 | 5240 | ||
| 5221 | if (is_data) { | 5241 | if (is_data) { |
| 5222 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); | 5242 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); |
| 5223 | if (ret) | 5243 | if (ret) { |
| 5224 | goto abort; | 5244 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5245 | goto out; | ||
| 5246 | } | ||
| 5225 | } | 5247 | } |
| 5226 | 5248 | ||
| 5227 | ret = update_block_group(trans, root, bytenr, num_bytes, 0); | 5249 | ret = update_block_group(trans, root, bytenr, num_bytes, 0); |
| 5228 | if (ret) | 5250 | if (ret) { |
| 5229 | goto abort; | 5251 | btrfs_abort_transaction(trans, extent_root, ret); |
| 5252 | goto out; | ||
| 5253 | } | ||
| 5230 | } | 5254 | } |
| 5231 | out: | 5255 | out: |
| 5232 | btrfs_free_path(path); | 5256 | btrfs_free_path(path); |
| 5233 | return ret; | 5257 | return ret; |
| 5234 | |||
| 5235 | abort: | ||
| 5236 | btrfs_abort_transaction(trans, extent_root, ret); | ||
| 5237 | goto out; | ||
| 5238 | } | 5258 | } |
| 5239 | 5259 | ||
| 5240 | /* | 5260 | /* |
| @@ -5497,8 +5517,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
| 5497 | struct btrfs_block_group_cache *used_block_group; | 5517 | struct btrfs_block_group_cache *used_block_group; |
| 5498 | u64 search_start = 0; | 5518 | u64 search_start = 0; |
| 5499 | int empty_cluster = 2 * 1024 * 1024; | 5519 | int empty_cluster = 2 * 1024 * 1024; |
| 5500 | int allowed_chunk_alloc = 0; | ||
| 5501 | int done_chunk_alloc = 0; | ||
| 5502 | struct btrfs_space_info *space_info; | 5520 | struct btrfs_space_info *space_info; |
| 5503 | int loop = 0; | 5521 | int loop = 0; |
| 5504 | int index = 0; | 5522 | int index = 0; |
| @@ -5530,9 +5548,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
| 5530 | if (btrfs_mixed_space_info(space_info)) | 5548 | if (btrfs_mixed_space_info(space_info)) |
| 5531 | use_cluster = false; | 5549 | use_cluster = false; |
| 5532 | 5550 | ||
| 5533 | if (orig_root->ref_cows || empty_size) | ||
| 5534 | allowed_chunk_alloc = 1; | ||
| 5535 | |||
| 5536 | if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { | 5551 | if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { |
| 5537 | last_ptr = &root->fs_info->meta_alloc_cluster; | 5552 | last_ptr = &root->fs_info->meta_alloc_cluster; |
| 5538 | if (!btrfs_test_opt(root, SSD)) | 5553 | if (!btrfs_test_opt(root, SSD)) |
| @@ -5806,10 +5821,6 @@ checks: | |||
| 5806 | 5821 | ||
| 5807 | trace_btrfs_reserve_extent(orig_root, block_group, | 5822 | trace_btrfs_reserve_extent(orig_root, block_group, |
| 5808 | search_start, num_bytes); | 5823 | search_start, num_bytes); |
| 5809 | if (offset < search_start) | ||
| 5810 | btrfs_add_free_space(used_block_group, offset, | ||
| 5811 | search_start - offset); | ||
| 5812 | BUG_ON(offset > search_start); | ||
| 5813 | if (used_block_group != block_group) | 5824 | if (used_block_group != block_group) |
| 5814 | btrfs_put_block_group(used_block_group); | 5825 | btrfs_put_block_group(used_block_group); |
| 5815 | btrfs_put_block_group(block_group); | 5826 | btrfs_put_block_group(block_group); |
| @@ -5842,34 +5853,17 @@ loop: | |||
| 5842 | index = 0; | 5853 | index = 0; |
| 5843 | loop++; | 5854 | loop++; |
| 5844 | if (loop == LOOP_ALLOC_CHUNK) { | 5855 | if (loop == LOOP_ALLOC_CHUNK) { |
| 5845 | if (allowed_chunk_alloc) { | 5856 | ret = do_chunk_alloc(trans, root, data, |
| 5846 | ret = do_chunk_alloc(trans, root, num_bytes + | 5857 | CHUNK_ALLOC_FORCE); |
| 5847 | 2 * 1024 * 1024, data, | 5858 | /* |
| 5848 | CHUNK_ALLOC_LIMITED); | 5859 | * Do not bail out on ENOSPC since we |
| 5849 | /* | 5860 | * can do more things. |
| 5850 | * Do not bail out on ENOSPC since we | 5861 | */ |
| 5851 | * can do more things. | 5862 | if (ret < 0 && ret != -ENOSPC) { |
| 5852 | */ | 5863 | btrfs_abort_transaction(trans, |
| 5853 | if (ret < 0 && ret != -ENOSPC) { | 5864 | root, ret); |
| 5854 | btrfs_abort_transaction(trans, | 5865 | goto out; |
| 5855 | root, ret); | ||
| 5856 | goto out; | ||
| 5857 | } | ||
| 5858 | allowed_chunk_alloc = 0; | ||
| 5859 | if (ret == 1) | ||
| 5860 | done_chunk_alloc = 1; | ||
| 5861 | } else if (!done_chunk_alloc && | ||
| 5862 | space_info->force_alloc == | ||
| 5863 | CHUNK_ALLOC_NO_FORCE) { | ||
| 5864 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | ||
| 5865 | } | 5866 | } |
| 5866 | |||
| 5867 | /* | ||
| 5868 | * We didn't allocate a chunk, go ahead and drop the | ||
| 5869 | * empty size and loop again. | ||
| 5870 | */ | ||
| 5871 | if (!done_chunk_alloc) | ||
| 5872 | loop = LOOP_NO_EMPTY_SIZE; | ||
| 5873 | } | 5867 | } |
| 5874 | 5868 | ||
| 5875 | if (loop == LOOP_NO_EMPTY_SIZE) { | 5869 | if (loop == LOOP_NO_EMPTY_SIZE) { |
| @@ -5944,20 +5938,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
| 5944 | 5938 | ||
| 5945 | data = btrfs_get_alloc_profile(root, data); | 5939 | data = btrfs_get_alloc_profile(root, data); |
| 5946 | again: | 5940 | again: |
| 5947 | /* | ||
| 5948 | * the only place that sets empty_size is btrfs_realloc_node, which | ||
| 5949 | * is not called recursively on allocations | ||
| 5950 | */ | ||
| 5951 | if (empty_size || root->ref_cows) { | ||
| 5952 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
| 5953 | num_bytes + 2 * 1024 * 1024, data, | ||
| 5954 | CHUNK_ALLOC_NO_FORCE); | ||
| 5955 | if (ret < 0 && ret != -ENOSPC) { | ||
| 5956 | btrfs_abort_transaction(trans, root, ret); | ||
| 5957 | return ret; | ||
| 5958 | } | ||
| 5959 | } | ||
| 5960 | |||
| 5961 | WARN_ON(num_bytes < root->sectorsize); | 5941 | WARN_ON(num_bytes < root->sectorsize); |
| 5962 | ret = find_free_extent(trans, root, num_bytes, empty_size, | 5942 | ret = find_free_extent(trans, root, num_bytes, empty_size, |
| 5963 | hint_byte, ins, data); | 5943 | hint_byte, ins, data); |
| @@ -5967,12 +5947,6 @@ again: | |||
| 5967 | num_bytes = num_bytes >> 1; | 5947 | num_bytes = num_bytes >> 1; |
| 5968 | num_bytes = num_bytes & ~(root->sectorsize - 1); | 5948 | num_bytes = num_bytes & ~(root->sectorsize - 1); |
| 5969 | num_bytes = max(num_bytes, min_alloc_size); | 5949 | num_bytes = max(num_bytes, min_alloc_size); |
| 5970 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
| 5971 | num_bytes, data, CHUNK_ALLOC_FORCE); | ||
| 5972 | if (ret < 0 && ret != -ENOSPC) { | ||
| 5973 | btrfs_abort_transaction(trans, root, ret); | ||
| 5974 | return ret; | ||
| 5975 | } | ||
| 5976 | if (num_bytes == min_alloc_size) | 5950 | if (num_bytes == min_alloc_size) |
| 5977 | final_tried = true; | 5951 | final_tried = true; |
| 5978 | goto again; | 5952 | goto again; |
| @@ -6314,7 +6288,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
| 6314 | ret = block_rsv_use_bytes(block_rsv, blocksize); | 6288 | ret = block_rsv_use_bytes(block_rsv, blocksize); |
| 6315 | if (!ret) | 6289 | if (!ret) |
| 6316 | return block_rsv; | 6290 | return block_rsv; |
| 6317 | if (ret) { | 6291 | if (ret && !block_rsv->failfast) { |
| 6318 | static DEFINE_RATELIMIT_STATE(_rs, | 6292 | static DEFINE_RATELIMIT_STATE(_rs, |
| 6319 | DEFAULT_RATELIMIT_INTERVAL, | 6293 | DEFAULT_RATELIMIT_INTERVAL, |
| 6320 | /*DEFAULT_RATELIMIT_BURST*/ 2); | 6294 | /*DEFAULT_RATELIMIT_BURST*/ 2); |
| @@ -7279,7 +7253,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, | |||
| 7279 | 7253 | ||
| 7280 | alloc_flags = update_block_group_flags(root, cache->flags); | 7254 | alloc_flags = update_block_group_flags(root, cache->flags); |
| 7281 | if (alloc_flags != cache->flags) { | 7255 | if (alloc_flags != cache->flags) { |
| 7282 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | 7256 | ret = do_chunk_alloc(trans, root, alloc_flags, |
| 7283 | CHUNK_ALLOC_FORCE); | 7257 | CHUNK_ALLOC_FORCE); |
| 7284 | if (ret < 0) | 7258 | if (ret < 0) |
| 7285 | goto out; | 7259 | goto out; |
| @@ -7289,7 +7263,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, | |||
| 7289 | if (!ret) | 7263 | if (!ret) |
| 7290 | goto out; | 7264 | goto out; |
| 7291 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); | 7265 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); |
| 7292 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | 7266 | ret = do_chunk_alloc(trans, root, alloc_flags, |
| 7293 | CHUNK_ALLOC_FORCE); | 7267 | CHUNK_ALLOC_FORCE); |
| 7294 | if (ret < 0) | 7268 | if (ret < 0) |
| 7295 | goto out; | 7269 | goto out; |
| @@ -7303,7 +7277,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | |||
| 7303 | struct btrfs_root *root, u64 type) | 7277 | struct btrfs_root *root, u64 type) |
| 7304 | { | 7278 | { |
| 7305 | u64 alloc_flags = get_alloc_profile(root, type); | 7279 | u64 alloc_flags = get_alloc_profile(root, type); |
| 7306 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | 7280 | return do_chunk_alloc(trans, root, alloc_flags, |
| 7307 | CHUNK_ALLOC_FORCE); | 7281 | CHUNK_ALLOC_FORCE); |
| 7308 | } | 7282 | } |
| 7309 | 7283 | ||
| @@ -7810,6 +7784,34 @@ error: | |||
| 7810 | return ret; | 7784 | return ret; |
| 7811 | } | 7785 | } |
| 7812 | 7786 | ||
| 7787 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, | ||
| 7788 | struct btrfs_root *root) | ||
| 7789 | { | ||
| 7790 | struct btrfs_block_group_cache *block_group, *tmp; | ||
| 7791 | struct btrfs_root *extent_root = root->fs_info->extent_root; | ||
| 7792 | struct btrfs_block_group_item item; | ||
| 7793 | struct btrfs_key key; | ||
| 7794 | int ret = 0; | ||
| 7795 | |||
| 7796 | list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, | ||
| 7797 | new_bg_list) { | ||
| 7798 | list_del_init(&block_group->new_bg_list); | ||
| 7799 | |||
| 7800 | if (ret) | ||
| 7801 | continue; | ||
| 7802 | |||
| 7803 | spin_lock(&block_group->lock); | ||
| 7804 | memcpy(&item, &block_group->item, sizeof(item)); | ||
| 7805 | memcpy(&key, &block_group->key, sizeof(key)); | ||
| 7806 | spin_unlock(&block_group->lock); | ||
| 7807 | |||
| 7808 | ret = btrfs_insert_item(trans, extent_root, &key, &item, | ||
| 7809 | sizeof(item)); | ||
| 7810 | if (ret) | ||
| 7811 | btrfs_abort_transaction(trans, extent_root, ret); | ||
| 7812 | } | ||
| 7813 | } | ||
| 7814 | |||
| 7813 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, | 7815 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, |
| 7814 | struct btrfs_root *root, u64 bytes_used, | 7816 | struct btrfs_root *root, u64 bytes_used, |
| 7815 | u64 type, u64 chunk_objectid, u64 chunk_offset, | 7817 | u64 type, u64 chunk_objectid, u64 chunk_offset, |
| @@ -7843,6 +7845,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
| 7843 | spin_lock_init(&cache->lock); | 7845 | spin_lock_init(&cache->lock); |
| 7844 | INIT_LIST_HEAD(&cache->list); | 7846 | INIT_LIST_HEAD(&cache->list); |
| 7845 | INIT_LIST_HEAD(&cache->cluster_list); | 7847 | INIT_LIST_HEAD(&cache->cluster_list); |
| 7848 | INIT_LIST_HEAD(&cache->new_bg_list); | ||
| 7846 | 7849 | ||
| 7847 | btrfs_init_free_space_ctl(cache); | 7850 | btrfs_init_free_space_ctl(cache); |
| 7848 | 7851 | ||
| @@ -7874,12 +7877,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
| 7874 | ret = btrfs_add_block_group_cache(root->fs_info, cache); | 7877 | ret = btrfs_add_block_group_cache(root->fs_info, cache); |
| 7875 | BUG_ON(ret); /* Logic error */ | 7878 | BUG_ON(ret); /* Logic error */ |
| 7876 | 7879 | ||
| 7877 | ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, | 7880 | list_add_tail(&cache->new_bg_list, &trans->new_bgs); |
| 7878 | sizeof(cache->item)); | ||
| 7879 | if (ret) { | ||
| 7880 | btrfs_abort_transaction(trans, extent_root, ret); | ||
| 7881 | return ret; | ||
| 7882 | } | ||
| 7883 | 7881 | ||
| 7884 | set_avail_alloc_bits(extent_root->fs_info, type); | 7882 | set_avail_alloc_bits(extent_root->fs_info, type); |
| 7885 | 7883 | ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b08ea4717e9d..8036d3a84853 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -45,6 +45,7 @@ struct extent_page_data { | |||
| 45 | struct bio *bio; | 45 | struct bio *bio; |
| 46 | struct extent_io_tree *tree; | 46 | struct extent_io_tree *tree; |
| 47 | get_extent_t *get_extent; | 47 | get_extent_t *get_extent; |
| 48 | unsigned long bio_flags; | ||
| 48 | 49 | ||
| 49 | /* tells writepage not to lock the state bits for this range | 50 | /* tells writepage not to lock the state bits for this range |
| 50 | * it still does the unlocking | 51 | * it still does the unlocking |
| @@ -64,13 +65,13 @@ tree_fs_info(struct extent_io_tree *tree) | |||
| 64 | 65 | ||
| 65 | int __init extent_io_init(void) | 66 | int __init extent_io_init(void) |
| 66 | { | 67 | { |
| 67 | extent_state_cache = kmem_cache_create("extent_state", | 68 | extent_state_cache = kmem_cache_create("btrfs_extent_state", |
| 68 | sizeof(struct extent_state), 0, | 69 | sizeof(struct extent_state), 0, |
| 69 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 70 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 70 | if (!extent_state_cache) | 71 | if (!extent_state_cache) |
| 71 | return -ENOMEM; | 72 | return -ENOMEM; |
| 72 | 73 | ||
| 73 | extent_buffer_cache = kmem_cache_create("extent_buffers", | 74 | extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", |
| 74 | sizeof(struct extent_buffer), 0, | 75 | sizeof(struct extent_buffer), 0, |
| 75 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 76 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 76 | if (!extent_buffer_cache) | 77 | if (!extent_buffer_cache) |
| @@ -942,6 +943,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, | |||
| 942 | * @end: the end offset in bytes (inclusive) | 943 | * @end: the end offset in bytes (inclusive) |
| 943 | * @bits: the bits to set in this range | 944 | * @bits: the bits to set in this range |
| 944 | * @clear_bits: the bits to clear in this range | 945 | * @clear_bits: the bits to clear in this range |
| 946 | * @cached_state: state that we're going to cache | ||
| 945 | * @mask: the allocation mask | 947 | * @mask: the allocation mask |
| 946 | * | 948 | * |
| 947 | * This will go through and set bits for the given range. If any states exist | 949 | * This will go through and set bits for the given range. If any states exist |
| @@ -951,7 +953,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, | |||
| 951 | * boundary bits like LOCK. | 953 | * boundary bits like LOCK. |
| 952 | */ | 954 | */ |
| 953 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 955 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 954 | int bits, int clear_bits, gfp_t mask) | 956 | int bits, int clear_bits, |
| 957 | struct extent_state **cached_state, gfp_t mask) | ||
| 955 | { | 958 | { |
| 956 | struct extent_state *state; | 959 | struct extent_state *state; |
| 957 | struct extent_state *prealloc = NULL; | 960 | struct extent_state *prealloc = NULL; |
| @@ -968,6 +971,15 @@ again: | |||
| 968 | } | 971 | } |
| 969 | 972 | ||
| 970 | spin_lock(&tree->lock); | 973 | spin_lock(&tree->lock); |
| 974 | if (cached_state && *cached_state) { | ||
| 975 | state = *cached_state; | ||
| 976 | if (state->start <= start && state->end > start && | ||
| 977 | state->tree) { | ||
| 978 | node = &state->rb_node; | ||
| 979 | goto hit_next; | ||
| 980 | } | ||
| 981 | } | ||
| 982 | |||
| 971 | /* | 983 | /* |
| 972 | * this search will find all the extents that end after | 984 | * this search will find all the extents that end after |
| 973 | * our range starts. | 985 | * our range starts. |
| @@ -998,6 +1010,7 @@ hit_next: | |||
| 998 | */ | 1010 | */ |
| 999 | if (state->start == start && state->end <= end) { | 1011 | if (state->start == start && state->end <= end) { |
| 1000 | set_state_bits(tree, state, &bits); | 1012 | set_state_bits(tree, state, &bits); |
| 1013 | cache_state(state, cached_state); | ||
| 1001 | state = clear_state_bit(tree, state, &clear_bits, 0); | 1014 | state = clear_state_bit(tree, state, &clear_bits, 0); |
| 1002 | if (last_end == (u64)-1) | 1015 | if (last_end == (u64)-1) |
| 1003 | goto out; | 1016 | goto out; |
| @@ -1038,6 +1051,7 @@ hit_next: | |||
| 1038 | goto out; | 1051 | goto out; |
| 1039 | if (state->end <= end) { | 1052 | if (state->end <= end) { |
| 1040 | set_state_bits(tree, state, &bits); | 1053 | set_state_bits(tree, state, &bits); |
| 1054 | cache_state(state, cached_state); | ||
| 1041 | state = clear_state_bit(tree, state, &clear_bits, 0); | 1055 | state = clear_state_bit(tree, state, &clear_bits, 0); |
| 1042 | if (last_end == (u64)-1) | 1056 | if (last_end == (u64)-1) |
| 1043 | goto out; | 1057 | goto out; |
| @@ -1076,6 +1090,7 @@ hit_next: | |||
| 1076 | &bits); | 1090 | &bits); |
| 1077 | if (err) | 1091 | if (err) |
| 1078 | extent_io_tree_panic(tree, err); | 1092 | extent_io_tree_panic(tree, err); |
| 1093 | cache_state(prealloc, cached_state); | ||
| 1079 | prealloc = NULL; | 1094 | prealloc = NULL; |
| 1080 | start = this_end + 1; | 1095 | start = this_end + 1; |
| 1081 | goto search_again; | 1096 | goto search_again; |
| @@ -1098,6 +1113,7 @@ hit_next: | |||
| 1098 | extent_io_tree_panic(tree, err); | 1113 | extent_io_tree_panic(tree, err); |
| 1099 | 1114 | ||
| 1100 | set_state_bits(tree, prealloc, &bits); | 1115 | set_state_bits(tree, prealloc, &bits); |
| 1116 | cache_state(prealloc, cached_state); | ||
| 1101 | clear_state_bit(tree, prealloc, &clear_bits, 0); | 1117 | clear_state_bit(tree, prealloc, &clear_bits, 0); |
| 1102 | prealloc = NULL; | 1118 | prealloc = NULL; |
| 1103 | goto out; | 1119 | goto out; |
| @@ -1150,6 +1166,14 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 1150 | NULL, cached_state, mask); | 1166 | NULL, cached_state, mask); |
| 1151 | } | 1167 | } |
| 1152 | 1168 | ||
| 1169 | int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, | ||
| 1170 | struct extent_state **cached_state, gfp_t mask) | ||
| 1171 | { | ||
| 1172 | return set_extent_bit(tree, start, end, | ||
| 1173 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, | ||
| 1174 | NULL, cached_state, mask); | ||
| 1175 | } | ||
| 1176 | |||
| 1153 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 1177 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
| 1154 | gfp_t mask) | 1178 | gfp_t mask) |
| 1155 | { | 1179 | { |
| @@ -1294,18 +1318,42 @@ out: | |||
| 1294 | * If nothing was found, 1 is returned. If found something, return 0. | 1318 | * If nothing was found, 1 is returned. If found something, return 0. |
| 1295 | */ | 1319 | */ |
| 1296 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, | 1320 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
| 1297 | u64 *start_ret, u64 *end_ret, int bits) | 1321 | u64 *start_ret, u64 *end_ret, int bits, |
| 1322 | struct extent_state **cached_state) | ||
| 1298 | { | 1323 | { |
| 1299 | struct extent_state *state; | 1324 | struct extent_state *state; |
| 1325 | struct rb_node *n; | ||
| 1300 | int ret = 1; | 1326 | int ret = 1; |
| 1301 | 1327 | ||
| 1302 | spin_lock(&tree->lock); | 1328 | spin_lock(&tree->lock); |
| 1329 | if (cached_state && *cached_state) { | ||
| 1330 | state = *cached_state; | ||
| 1331 | if (state->end == start - 1 && state->tree) { | ||
| 1332 | n = rb_next(&state->rb_node); | ||
| 1333 | while (n) { | ||
| 1334 | state = rb_entry(n, struct extent_state, | ||
| 1335 | rb_node); | ||
| 1336 | if (state->state & bits) | ||
| 1337 | goto got_it; | ||
| 1338 | n = rb_next(n); | ||
| 1339 | } | ||
| 1340 | free_extent_state(*cached_state); | ||
| 1341 | *cached_state = NULL; | ||
| 1342 | goto out; | ||
| 1343 | } | ||
| 1344 | free_extent_state(*cached_state); | ||
| 1345 | *cached_state = NULL; | ||
| 1346 | } | ||
| 1347 | |||
| 1303 | state = find_first_extent_bit_state(tree, start, bits); | 1348 | state = find_first_extent_bit_state(tree, start, bits); |
| 1349 | got_it: | ||
| 1304 | if (state) { | 1350 | if (state) { |
| 1351 | cache_state(state, cached_state); | ||
| 1305 | *start_ret = state->start; | 1352 | *start_ret = state->start; |
| 1306 | *end_ret = state->end; | 1353 | *end_ret = state->end; |
| 1307 | ret = 0; | 1354 | ret = 0; |
| 1308 | } | 1355 | } |
| 1356 | out: | ||
| 1309 | spin_unlock(&tree->lock); | 1357 | spin_unlock(&tree->lock); |
| 1310 | return ret; | 1358 | return ret; |
| 1311 | } | 1359 | } |
| @@ -2068,7 +2116,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
| 2068 | } | 2116 | } |
| 2069 | read_unlock(&em_tree->lock); | 2117 | read_unlock(&em_tree->lock); |
| 2070 | 2118 | ||
| 2071 | if (!em || IS_ERR(em)) { | 2119 | if (!em) { |
| 2072 | kfree(failrec); | 2120 | kfree(failrec); |
| 2073 | return -EIO; | 2121 | return -EIO; |
| 2074 | } | 2122 | } |
| @@ -2304,8 +2352,8 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2304 | struct extent_state *cached = NULL; | 2352 | struct extent_state *cached = NULL; |
| 2305 | struct extent_state *state; | 2353 | struct extent_state *state; |
| 2306 | 2354 | ||
| 2307 | pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, " | 2355 | pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " |
| 2308 | "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err, | 2356 | "mirror=%ld\n", (u64)bio->bi_sector, err, |
| 2309 | (long int)bio->bi_bdev); | 2357 | (long int)bio->bi_bdev); |
| 2310 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 2358 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 2311 | 2359 | ||
| @@ -2709,12 +2757,15 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
| 2709 | end_bio_extent_readpage, mirror_num, | 2757 | end_bio_extent_readpage, mirror_num, |
| 2710 | *bio_flags, | 2758 | *bio_flags, |
| 2711 | this_bio_flag); | 2759 | this_bio_flag); |
| 2712 | BUG_ON(ret == -ENOMEM); | 2760 | if (!ret) { |
| 2713 | nr++; | 2761 | nr++; |
| 2714 | *bio_flags = this_bio_flag; | 2762 | *bio_flags = this_bio_flag; |
| 2763 | } | ||
| 2715 | } | 2764 | } |
| 2716 | if (ret) | 2765 | if (ret) { |
| 2717 | SetPageError(page); | 2766 | SetPageError(page); |
| 2767 | unlock_extent(tree, cur, cur + iosize - 1); | ||
| 2768 | } | ||
| 2718 | cur = cur + iosize; | 2769 | cur = cur + iosize; |
| 2719 | pg_offset += iosize; | 2770 | pg_offset += iosize; |
| 2720 | } | 2771 | } |
| @@ -3161,12 +3212,16 @@ static int write_one_eb(struct extent_buffer *eb, | |||
| 3161 | struct block_device *bdev = fs_info->fs_devices->latest_bdev; | 3212 | struct block_device *bdev = fs_info->fs_devices->latest_bdev; |
| 3162 | u64 offset = eb->start; | 3213 | u64 offset = eb->start; |
| 3163 | unsigned long i, num_pages; | 3214 | unsigned long i, num_pages; |
| 3215 | unsigned long bio_flags = 0; | ||
| 3164 | int rw = (epd->sync_io ? WRITE_SYNC : WRITE); | 3216 | int rw = (epd->sync_io ? WRITE_SYNC : WRITE); |
| 3165 | int ret = 0; | 3217 | int ret = 0; |
| 3166 | 3218 | ||
| 3167 | clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); | 3219 | clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); |
| 3168 | num_pages = num_extent_pages(eb->start, eb->len); | 3220 | num_pages = num_extent_pages(eb->start, eb->len); |
| 3169 | atomic_set(&eb->io_pages, num_pages); | 3221 | atomic_set(&eb->io_pages, num_pages); |
| 3222 | if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) | ||
| 3223 | bio_flags = EXTENT_BIO_TREE_LOG; | ||
| 3224 | |||
| 3170 | for (i = 0; i < num_pages; i++) { | 3225 | for (i = 0; i < num_pages; i++) { |
| 3171 | struct page *p = extent_buffer_page(eb, i); | 3226 | struct page *p = extent_buffer_page(eb, i); |
| 3172 | 3227 | ||
| @@ -3175,7 +3230,8 @@ static int write_one_eb(struct extent_buffer *eb, | |||
| 3175 | ret = submit_extent_page(rw, eb->tree, p, offset >> 9, | 3230 | ret = submit_extent_page(rw, eb->tree, p, offset >> 9, |
| 3176 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, | 3231 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, |
| 3177 | -1, end_bio_extent_buffer_writepage, | 3232 | -1, end_bio_extent_buffer_writepage, |
| 3178 | 0, 0, 0); | 3233 | 0, epd->bio_flags, bio_flags); |
| 3234 | epd->bio_flags = bio_flags; | ||
| 3179 | if (ret) { | 3235 | if (ret) { |
| 3180 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); | 3236 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); |
| 3181 | SetPageError(p); | 3237 | SetPageError(p); |
| @@ -3210,6 +3266,7 @@ int btree_write_cache_pages(struct address_space *mapping, | |||
| 3210 | .tree = tree, | 3266 | .tree = tree, |
| 3211 | .extent_locked = 0, | 3267 | .extent_locked = 0, |
| 3212 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | 3268 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, |
| 3269 | .bio_flags = 0, | ||
| 3213 | }; | 3270 | }; |
| 3214 | int ret = 0; | 3271 | int ret = 0; |
| 3215 | int done = 0; | 3272 | int done = 0; |
| @@ -3254,19 +3311,34 @@ retry: | |||
| 3254 | break; | 3311 | break; |
| 3255 | } | 3312 | } |
| 3256 | 3313 | ||
| 3314 | spin_lock(&mapping->private_lock); | ||
| 3315 | if (!PagePrivate(page)) { | ||
| 3316 | spin_unlock(&mapping->private_lock); | ||
| 3317 | continue; | ||
| 3318 | } | ||
| 3319 | |||
| 3257 | eb = (struct extent_buffer *)page->private; | 3320 | eb = (struct extent_buffer *)page->private; |
| 3321 | |||
| 3322 | /* | ||
| 3323 | * Shouldn't happen and normally this would be a BUG_ON | ||
| 3324 | * but no sense in crashing the users box for something | ||
| 3325 | * we can survive anyway. | ||
| 3326 | */ | ||
| 3258 | if (!eb) { | 3327 | if (!eb) { |
| 3328 | spin_unlock(&mapping->private_lock); | ||
| 3259 | WARN_ON(1); | 3329 | WARN_ON(1); |
| 3260 | continue; | 3330 | continue; |
| 3261 | } | 3331 | } |
| 3262 | 3332 | ||
| 3263 | if (eb == prev_eb) | 3333 | if (eb == prev_eb) { |
| 3334 | spin_unlock(&mapping->private_lock); | ||
| 3264 | continue; | 3335 | continue; |
| 3336 | } | ||
| 3265 | 3337 | ||
| 3266 | if (!atomic_inc_not_zero(&eb->refs)) { | 3338 | ret = atomic_inc_not_zero(&eb->refs); |
| 3267 | WARN_ON(1); | 3339 | spin_unlock(&mapping->private_lock); |
| 3340 | if (!ret) | ||
| 3268 | continue; | 3341 | continue; |
| 3269 | } | ||
| 3270 | 3342 | ||
| 3271 | prev_eb = eb; | 3343 | prev_eb = eb; |
| 3272 | ret = lock_extent_buffer_for_io(eb, fs_info, &epd); | 3344 | ret = lock_extent_buffer_for_io(eb, fs_info, &epd); |
| @@ -3457,7 +3529,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd) | |||
| 3457 | if (epd->sync_io) | 3529 | if (epd->sync_io) |
| 3458 | rw = WRITE_SYNC; | 3530 | rw = WRITE_SYNC; |
| 3459 | 3531 | ||
| 3460 | ret = submit_one_bio(rw, epd->bio, 0, 0); | 3532 | ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags); |
| 3461 | BUG_ON(ret < 0); /* -ENOMEM */ | 3533 | BUG_ON(ret < 0); /* -ENOMEM */ |
| 3462 | epd->bio = NULL; | 3534 | epd->bio = NULL; |
| 3463 | } | 3535 | } |
| @@ -3480,6 +3552,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, | |||
| 3480 | .get_extent = get_extent, | 3552 | .get_extent = get_extent, |
| 3481 | .extent_locked = 0, | 3553 | .extent_locked = 0, |
| 3482 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | 3554 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, |
| 3555 | .bio_flags = 0, | ||
| 3483 | }; | 3556 | }; |
| 3484 | 3557 | ||
| 3485 | ret = __extent_writepage(page, wbc, &epd); | 3558 | ret = __extent_writepage(page, wbc, &epd); |
| @@ -3504,6 +3577,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
| 3504 | .get_extent = get_extent, | 3577 | .get_extent = get_extent, |
| 3505 | .extent_locked = 1, | 3578 | .extent_locked = 1, |
| 3506 | .sync_io = mode == WB_SYNC_ALL, | 3579 | .sync_io = mode == WB_SYNC_ALL, |
| 3580 | .bio_flags = 0, | ||
| 3507 | }; | 3581 | }; |
| 3508 | struct writeback_control wbc_writepages = { | 3582 | struct writeback_control wbc_writepages = { |
| 3509 | .sync_mode = mode, | 3583 | .sync_mode = mode, |
| @@ -3543,6 +3617,7 @@ int extent_writepages(struct extent_io_tree *tree, | |||
| 3543 | .get_extent = get_extent, | 3617 | .get_extent = get_extent, |
| 3544 | .extent_locked = 0, | 3618 | .extent_locked = 0, |
| 3545 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | 3619 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, |
| 3620 | .bio_flags = 0, | ||
| 3546 | }; | 3621 | }; |
| 3547 | 3622 | ||
| 3548 | ret = extent_write_cache_pages(tree, mapping, wbc, | 3623 | ret = extent_write_cache_pages(tree, mapping, wbc, |
| @@ -3920,18 +3995,6 @@ out: | |||
| 3920 | return ret; | 3995 | return ret; |
| 3921 | } | 3996 | } |
| 3922 | 3997 | ||
| 3923 | inline struct page *extent_buffer_page(struct extent_buffer *eb, | ||
| 3924 | unsigned long i) | ||
| 3925 | { | ||
| 3926 | return eb->pages[i]; | ||
| 3927 | } | ||
| 3928 | |||
| 3929 | inline unsigned long num_extent_pages(u64 start, u64 len) | ||
| 3930 | { | ||
| 3931 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | ||
| 3932 | (start >> PAGE_CACHE_SHIFT); | ||
| 3933 | } | ||
| 3934 | |||
| 3935 | static void __free_extent_buffer(struct extent_buffer *eb) | 3998 | static void __free_extent_buffer(struct extent_buffer *eb) |
| 3936 | { | 3999 | { |
| 3937 | #if LEAK_DEBUG | 4000 | #if LEAK_DEBUG |
| @@ -4047,7 +4110,7 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) | |||
| 4047 | 4110 | ||
| 4048 | return eb; | 4111 | return eb; |
| 4049 | err: | 4112 | err: |
| 4050 | for (i--; i > 0; i--) | 4113 | for (i--; i >= 0; i--) |
| 4051 | __free_page(eb->pages[i]); | 4114 | __free_page(eb->pages[i]); |
| 4052 | __free_extent_buffer(eb); | 4115 | __free_extent_buffer(eb); |
| 4053 | return NULL; | 4116 | return NULL; |
| @@ -4192,10 +4255,8 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
| 4192 | 4255 | ||
| 4193 | for (i = 0; i < num_pages; i++, index++) { | 4256 | for (i = 0; i < num_pages; i++, index++) { |
| 4194 | p = find_or_create_page(mapping, index, GFP_NOFS); | 4257 | p = find_or_create_page(mapping, index, GFP_NOFS); |
| 4195 | if (!p) { | 4258 | if (!p) |
| 4196 | WARN_ON(1); | ||
| 4197 | goto free_eb; | 4259 | goto free_eb; |
| 4198 | } | ||
| 4199 | 4260 | ||
| 4200 | spin_lock(&mapping->private_lock); | 4261 | spin_lock(&mapping->private_lock); |
| 4201 | if (PagePrivate(p)) { | 4262 | if (PagePrivate(p)) { |
| @@ -4338,7 +4399,6 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask) | |||
| 4338 | 4399 | ||
| 4339 | /* Should be safe to release our pages at this point */ | 4400 | /* Should be safe to release our pages at this point */ |
| 4340 | btrfs_release_extent_buffer_page(eb, 0); | 4401 | btrfs_release_extent_buffer_page(eb, 0); |
| 4341 | |||
| 4342 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); | 4402 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); |
| 4343 | return 1; | 4403 | return 1; |
| 4344 | } | 4404 | } |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 25900af5b15d..711d12b80028 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | * type for this bio | 27 | * type for this bio |
| 28 | */ | 28 | */ |
| 29 | #define EXTENT_BIO_COMPRESSED 1 | 29 | #define EXTENT_BIO_COMPRESSED 1 |
| 30 | #define EXTENT_BIO_TREE_LOG 2 | ||
| 30 | #define EXTENT_BIO_FLAG_SHIFT 16 | 31 | #define EXTENT_BIO_FLAG_SHIFT 16 |
| 31 | 32 | ||
| 32 | /* these are bit numbers for test/set bit */ | 33 | /* these are bit numbers for test/set bit */ |
| @@ -232,11 +233,15 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 232 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 233 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
| 233 | gfp_t mask); | 234 | gfp_t mask); |
| 234 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 235 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 235 | int bits, int clear_bits, gfp_t mask); | 236 | int bits, int clear_bits, |
| 237 | struct extent_state **cached_state, gfp_t mask); | ||
| 236 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, | 238 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, |
| 237 | struct extent_state **cached_state, gfp_t mask); | 239 | struct extent_state **cached_state, gfp_t mask); |
| 240 | int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, | ||
| 241 | struct extent_state **cached_state, gfp_t mask); | ||
| 238 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, | 242 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
| 239 | u64 *start_ret, u64 *end_ret, int bits); | 243 | u64 *start_ret, u64 *end_ret, int bits, |
| 244 | struct extent_state **cached_state); | ||
| 240 | struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, | 245 | struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, |
| 241 | u64 start, int bits); | 246 | u64 start, int bits); |
| 242 | int extent_invalidatepage(struct extent_io_tree *tree, | 247 | int extent_invalidatepage(struct extent_io_tree *tree, |
| @@ -277,8 +282,18 @@ void free_extent_buffer_stale(struct extent_buffer *eb); | |||
| 277 | int read_extent_buffer_pages(struct extent_io_tree *tree, | 282 | int read_extent_buffer_pages(struct extent_io_tree *tree, |
| 278 | struct extent_buffer *eb, u64 start, int wait, | 283 | struct extent_buffer *eb, u64 start, int wait, |
| 279 | get_extent_t *get_extent, int mirror_num); | 284 | get_extent_t *get_extent, int mirror_num); |
| 280 | unsigned long num_extent_pages(u64 start, u64 len); | 285 | |
| 281 | struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i); | 286 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
| 287 | { | ||
| 288 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | ||
| 289 | (start >> PAGE_CACHE_SHIFT); | ||
| 290 | } | ||
| 291 | |||
| 292 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, | ||
| 293 | unsigned long i) | ||
| 294 | { | ||
| 295 | return eb->pages[i]; | ||
| 296 | } | ||
| 282 | 297 | ||
| 283 | static inline void extent_buffer_get(struct extent_buffer *eb) | 298 | static inline void extent_buffer_get(struct extent_buffer *eb) |
| 284 | { | 299 | { |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 7c97b3301459..b8cbc8d5c7f7 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
| @@ -11,7 +11,7 @@ static struct kmem_cache *extent_map_cache; | |||
| 11 | 11 | ||
| 12 | int __init extent_map_init(void) | 12 | int __init extent_map_init(void) |
| 13 | { | 13 | { |
| 14 | extent_map_cache = kmem_cache_create("extent_map", | 14 | extent_map_cache = kmem_cache_create("btrfs_extent_map", |
| 15 | sizeof(struct extent_map), 0, | 15 | sizeof(struct extent_map), 0, |
| 16 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 16 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 17 | if (!extent_map_cache) | 17 | if (!extent_map_cache) |
| @@ -35,6 +35,7 @@ void extent_map_exit(void) | |||
| 35 | void extent_map_tree_init(struct extent_map_tree *tree) | 35 | void extent_map_tree_init(struct extent_map_tree *tree) |
| 36 | { | 36 | { |
| 37 | tree->map = RB_ROOT; | 37 | tree->map = RB_ROOT; |
| 38 | INIT_LIST_HEAD(&tree->modified_extents); | ||
| 38 | rwlock_init(&tree->lock); | 39 | rwlock_init(&tree->lock); |
| 39 | } | 40 | } |
| 40 | 41 | ||
| @@ -54,7 +55,9 @@ struct extent_map *alloc_extent_map(void) | |||
| 54 | em->in_tree = 0; | 55 | em->in_tree = 0; |
| 55 | em->flags = 0; | 56 | em->flags = 0; |
| 56 | em->compress_type = BTRFS_COMPRESS_NONE; | 57 | em->compress_type = BTRFS_COMPRESS_NONE; |
| 58 | em->generation = 0; | ||
| 57 | atomic_set(&em->refs, 1); | 59 | atomic_set(&em->refs, 1); |
| 60 | INIT_LIST_HEAD(&em->list); | ||
| 58 | return em; | 61 | return em; |
| 59 | } | 62 | } |
| 60 | 63 | ||
| @@ -72,6 +75,7 @@ void free_extent_map(struct extent_map *em) | |||
| 72 | WARN_ON(atomic_read(&em->refs) == 0); | 75 | WARN_ON(atomic_read(&em->refs) == 0); |
| 73 | if (atomic_dec_and_test(&em->refs)) { | 76 | if (atomic_dec_and_test(&em->refs)) { |
| 74 | WARN_ON(em->in_tree); | 77 | WARN_ON(em->in_tree); |
| 78 | WARN_ON(!list_empty(&em->list)); | ||
| 75 | kmem_cache_free(extent_map_cache, em); | 79 | kmem_cache_free(extent_map_cache, em); |
| 76 | } | 80 | } |
| 77 | } | 81 | } |
| @@ -198,6 +202,14 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) | |||
| 198 | em->block_len += merge->block_len; | 202 | em->block_len += merge->block_len; |
| 199 | em->block_start = merge->block_start; | 203 | em->block_start = merge->block_start; |
| 200 | merge->in_tree = 0; | 204 | merge->in_tree = 0; |
| 205 | if (merge->generation > em->generation) { | ||
| 206 | em->mod_start = em->start; | ||
| 207 | em->mod_len = em->len; | ||
| 208 | em->generation = merge->generation; | ||
| 209 | list_move(&em->list, &tree->modified_extents); | ||
| 210 | } | ||
| 211 | |||
| 212 | list_del_init(&merge->list); | ||
| 201 | rb_erase(&merge->rb_node, &tree->map); | 213 | rb_erase(&merge->rb_node, &tree->map); |
| 202 | free_extent_map(merge); | 214 | free_extent_map(merge); |
| 203 | } | 215 | } |
| @@ -211,14 +223,34 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) | |||
| 211 | em->block_len += merge->len; | 223 | em->block_len += merge->len; |
| 212 | rb_erase(&merge->rb_node, &tree->map); | 224 | rb_erase(&merge->rb_node, &tree->map); |
| 213 | merge->in_tree = 0; | 225 | merge->in_tree = 0; |
| 226 | if (merge->generation > em->generation) { | ||
| 227 | em->mod_len = em->len; | ||
| 228 | em->generation = merge->generation; | ||
| 229 | list_move(&em->list, &tree->modified_extents); | ||
| 230 | } | ||
| 231 | list_del_init(&merge->list); | ||
| 214 | free_extent_map(merge); | 232 | free_extent_map(merge); |
| 215 | } | 233 | } |
| 216 | } | 234 | } |
| 217 | 235 | ||
| 218 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) | 236 | /** |
| 237 | * unpint_extent_cache - unpin an extent from the cache | ||
| 238 | * @tree: tree to unpin the extent in | ||
| 239 | * @start: logical offset in the file | ||
| 240 | * @len: length of the extent | ||
| 241 | * @gen: generation that this extent has been modified in | ||
| 242 | * @prealloc: if this is set we need to clear the prealloc flag | ||
| 243 | * | ||
| 244 | * Called after an extent has been written to disk properly. Set the generation | ||
| 245 | * to the generation that actually added the file item to the inode so we know | ||
| 246 | * we need to sync this extent when we call fsync(). | ||
| 247 | */ | ||
| 248 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, | ||
| 249 | u64 gen) | ||
| 219 | { | 250 | { |
| 220 | int ret = 0; | 251 | int ret = 0; |
| 221 | struct extent_map *em; | 252 | struct extent_map *em; |
| 253 | bool prealloc = false; | ||
| 222 | 254 | ||
| 223 | write_lock(&tree->lock); | 255 | write_lock(&tree->lock); |
| 224 | em = lookup_extent_mapping(tree, start, len); | 256 | em = lookup_extent_mapping(tree, start, len); |
| @@ -228,10 +260,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) | |||
| 228 | if (!em) | 260 | if (!em) |
| 229 | goto out; | 261 | goto out; |
| 230 | 262 | ||
| 263 | list_move(&em->list, &tree->modified_extents); | ||
| 264 | em->generation = gen; | ||
| 231 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); | 265 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 266 | em->mod_start = em->start; | ||
| 267 | em->mod_len = em->len; | ||
| 268 | |||
| 269 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { | ||
| 270 | prealloc = true; | ||
| 271 | clear_bit(EXTENT_FLAG_PREALLOC, &em->flags); | ||
| 272 | } | ||
| 232 | 273 | ||
| 233 | try_merge_map(tree, em); | 274 | try_merge_map(tree, em); |
| 234 | 275 | ||
| 276 | if (prealloc) { | ||
| 277 | em->mod_start = em->start; | ||
| 278 | em->mod_len = em->len; | ||
| 279 | } | ||
| 280 | |||
| 235 | free_extent_map(em); | 281 | free_extent_map(em); |
| 236 | out: | 282 | out: |
| 237 | write_unlock(&tree->lock); | 283 | write_unlock(&tree->lock); |
| @@ -269,6 +315,9 @@ int add_extent_mapping(struct extent_map_tree *tree, | |||
| 269 | } | 315 | } |
| 270 | atomic_inc(&em->refs); | 316 | atomic_inc(&em->refs); |
| 271 | 317 | ||
| 318 | em->mod_start = em->start; | ||
| 319 | em->mod_len = em->len; | ||
| 320 | |||
| 272 | try_merge_map(tree, em); | 321 | try_merge_map(tree, em); |
| 273 | out: | 322 | out: |
| 274 | return ret; | 323 | return ret; |
| @@ -358,6 +407,8 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) | |||
| 358 | 407 | ||
| 359 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); | 408 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); |
| 360 | rb_erase(&em->rb_node, &tree->map); | 409 | rb_erase(&em->rb_node, &tree->map); |
| 410 | if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) | ||
| 411 | list_del_init(&em->list); | ||
| 361 | em->in_tree = 0; | 412 | em->in_tree = 0; |
| 362 | return ret; | 413 | return ret; |
| 363 | } | 414 | } |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 1195f09761fe..679225555f7b 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #define EXTENT_FLAG_COMPRESSED 1 | 13 | #define EXTENT_FLAG_COMPRESSED 1 |
| 14 | #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */ | 14 | #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */ |
| 15 | #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ | 15 | #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ |
| 16 | #define EXTENT_FLAG_LOGGING 4 /* Logging this extent */ | ||
| 16 | 17 | ||
| 17 | struct extent_map { | 18 | struct extent_map { |
| 18 | struct rb_node rb_node; | 19 | struct rb_node rb_node; |
| @@ -20,18 +21,23 @@ struct extent_map { | |||
| 20 | /* all of these are in bytes */ | 21 | /* all of these are in bytes */ |
| 21 | u64 start; | 22 | u64 start; |
| 22 | u64 len; | 23 | u64 len; |
| 24 | u64 mod_start; | ||
| 25 | u64 mod_len; | ||
| 23 | u64 orig_start; | 26 | u64 orig_start; |
| 24 | u64 block_start; | 27 | u64 block_start; |
| 25 | u64 block_len; | 28 | u64 block_len; |
| 29 | u64 generation; | ||
| 26 | unsigned long flags; | 30 | unsigned long flags; |
| 27 | struct block_device *bdev; | 31 | struct block_device *bdev; |
| 28 | atomic_t refs; | 32 | atomic_t refs; |
| 29 | unsigned int in_tree; | 33 | unsigned int in_tree; |
| 30 | unsigned int compress_type; | 34 | unsigned int compress_type; |
| 35 | struct list_head list; | ||
| 31 | }; | 36 | }; |
| 32 | 37 | ||
| 33 | struct extent_map_tree { | 38 | struct extent_map_tree { |
| 34 | struct rb_root map; | 39 | struct rb_root map; |
| 40 | struct list_head modified_extents; | ||
| 35 | rwlock_t lock; | 41 | rwlock_t lock; |
| 36 | }; | 42 | }; |
| 37 | 43 | ||
| @@ -60,7 +66,7 @@ struct extent_map *alloc_extent_map(void); | |||
| 60 | void free_extent_map(struct extent_map *em); | 66 | void free_extent_map(struct extent_map *em); |
| 61 | int __init extent_map_init(void); | 67 | int __init extent_map_init(void); |
| 62 | void extent_map_exit(void); | 68 | void extent_map_exit(void); |
| 63 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len); | 69 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); |
| 64 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, | 70 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, |
| 65 | u64 start, u64 len); | 71 | u64 start, u64 len); |
| 66 | #endif | 72 | #endif |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 857d93cd01dc..1ad08e4e4a15 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
| @@ -25,11 +25,12 @@ | |||
| 25 | #include "transaction.h" | 25 | #include "transaction.h" |
| 26 | #include "print-tree.h" | 26 | #include "print-tree.h" |
| 27 | 27 | ||
| 28 | #define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ | 28 | #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ |
| 29 | sizeof(struct btrfs_item) * 2) / \ | 29 | sizeof(struct btrfs_item) * 2) / \ |
| 30 | size) - 1)) | 30 | size) - 1)) |
| 31 | 31 | ||
| 32 | #define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE)) | 32 | #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ |
| 33 | PAGE_CACHE_SIZE)) | ||
| 33 | 34 | ||
| 34 | #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ | 35 | #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ |
| 35 | sizeof(struct btrfs_ordered_sum)) / \ | 36 | sizeof(struct btrfs_ordered_sum)) / \ |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f6b40e86121b..9ab1bed88116 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include "tree-log.h" | 39 | #include "tree-log.h" |
| 40 | #include "locking.h" | 40 | #include "locking.h" |
| 41 | #include "compat.h" | 41 | #include "compat.h" |
| 42 | #include "volumes.h" | ||
| 42 | 43 | ||
| 43 | /* | 44 | /* |
| 44 | * when auto defrag is enabled we | 45 | * when auto defrag is enabled we |
| @@ -458,14 +459,15 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, | |||
| 458 | * this drops all the extents in the cache that intersect the range | 459 | * this drops all the extents in the cache that intersect the range |
| 459 | * [start, end]. Existing extents are split as required. | 460 | * [start, end]. Existing extents are split as required. |
| 460 | */ | 461 | */ |
| 461 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | 462 | void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, |
| 462 | int skip_pinned) | 463 | int skip_pinned) |
| 463 | { | 464 | { |
| 464 | struct extent_map *em; | 465 | struct extent_map *em; |
| 465 | struct extent_map *split = NULL; | 466 | struct extent_map *split = NULL; |
| 466 | struct extent_map *split2 = NULL; | 467 | struct extent_map *split2 = NULL; |
| 467 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 468 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| 468 | u64 len = end - start + 1; | 469 | u64 len = end - start + 1; |
| 470 | u64 gen; | ||
| 469 | int ret; | 471 | int ret; |
| 470 | int testend = 1; | 472 | int testend = 1; |
| 471 | unsigned long flags; | 473 | unsigned long flags; |
| @@ -477,11 +479,14 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 477 | testend = 0; | 479 | testend = 0; |
| 478 | } | 480 | } |
| 479 | while (1) { | 481 | while (1) { |
| 482 | int no_splits = 0; | ||
| 483 | |||
| 480 | if (!split) | 484 | if (!split) |
| 481 | split = alloc_extent_map(); | 485 | split = alloc_extent_map(); |
| 482 | if (!split2) | 486 | if (!split2) |
| 483 | split2 = alloc_extent_map(); | 487 | split2 = alloc_extent_map(); |
| 484 | BUG_ON(!split || !split2); /* -ENOMEM */ | 488 | if (!split || !split2) |
| 489 | no_splits = 1; | ||
| 485 | 490 | ||
| 486 | write_lock(&em_tree->lock); | 491 | write_lock(&em_tree->lock); |
| 487 | em = lookup_extent_mapping(em_tree, start, len); | 492 | em = lookup_extent_mapping(em_tree, start, len); |
| @@ -490,6 +495,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 490 | break; | 495 | break; |
| 491 | } | 496 | } |
| 492 | flags = em->flags; | 497 | flags = em->flags; |
| 498 | gen = em->generation; | ||
| 493 | if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { | 499 | if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { |
| 494 | if (testend && em->start + em->len >= start + len) { | 500 | if (testend && em->start + em->len >= start + len) { |
| 495 | free_extent_map(em); | 501 | free_extent_map(em); |
| @@ -506,6 +512,8 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 506 | compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | 512 | compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
| 507 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); | 513 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 508 | remove_extent_mapping(em_tree, em); | 514 | remove_extent_mapping(em_tree, em); |
| 515 | if (no_splits) | ||
| 516 | goto next; | ||
| 509 | 517 | ||
| 510 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | 518 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
| 511 | em->start < start) { | 519 | em->start < start) { |
| @@ -518,12 +526,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 518 | split->block_len = em->block_len; | 526 | split->block_len = em->block_len; |
| 519 | else | 527 | else |
| 520 | split->block_len = split->len; | 528 | split->block_len = split->len; |
| 521 | 529 | split->generation = gen; | |
| 522 | split->bdev = em->bdev; | 530 | split->bdev = em->bdev; |
| 523 | split->flags = flags; | 531 | split->flags = flags; |
| 524 | split->compress_type = em->compress_type; | 532 | split->compress_type = em->compress_type; |
| 525 | ret = add_extent_mapping(em_tree, split); | 533 | ret = add_extent_mapping(em_tree, split); |
| 526 | BUG_ON(ret); /* Logic error */ | 534 | BUG_ON(ret); /* Logic error */ |
| 535 | list_move(&split->list, &em_tree->modified_extents); | ||
| 527 | free_extent_map(split); | 536 | free_extent_map(split); |
| 528 | split = split2; | 537 | split = split2; |
| 529 | split2 = NULL; | 538 | split2 = NULL; |
| @@ -537,6 +546,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 537 | split->bdev = em->bdev; | 546 | split->bdev = em->bdev; |
| 538 | split->flags = flags; | 547 | split->flags = flags; |
| 539 | split->compress_type = em->compress_type; | 548 | split->compress_type = em->compress_type; |
| 549 | split->generation = gen; | ||
| 540 | 550 | ||
| 541 | if (compressed) { | 551 | if (compressed) { |
| 542 | split->block_len = em->block_len; | 552 | split->block_len = em->block_len; |
| @@ -550,9 +560,11 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 550 | 560 | ||
| 551 | ret = add_extent_mapping(em_tree, split); | 561 | ret = add_extent_mapping(em_tree, split); |
| 552 | BUG_ON(ret); /* Logic error */ | 562 | BUG_ON(ret); /* Logic error */ |
| 563 | list_move(&split->list, &em_tree->modified_extents); | ||
| 553 | free_extent_map(split); | 564 | free_extent_map(split); |
| 554 | split = NULL; | 565 | split = NULL; |
| 555 | } | 566 | } |
| 567 | next: | ||
| 556 | write_unlock(&em_tree->lock); | 568 | write_unlock(&em_tree->lock); |
| 557 | 569 | ||
| 558 | /* once for us */ | 570 | /* once for us */ |
| @@ -564,7 +576,6 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 564 | free_extent_map(split); | 576 | free_extent_map(split); |
| 565 | if (split2) | 577 | if (split2) |
| 566 | free_extent_map(split2); | 578 | free_extent_map(split2); |
| 567 | return 0; | ||
| 568 | } | 579 | } |
| 569 | 580 | ||
| 570 | /* | 581 | /* |
| @@ -576,13 +587,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 576 | * it is either truncated or split. Anything entirely inside the range | 587 | * it is either truncated or split. Anything entirely inside the range |
| 577 | * is deleted from the tree. | 588 | * is deleted from the tree. |
| 578 | */ | 589 | */ |
| 579 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | 590 | int __btrfs_drop_extents(struct btrfs_trans_handle *trans, |
| 580 | u64 start, u64 end, u64 *hint_byte, int drop_cache) | 591 | struct btrfs_root *root, struct inode *inode, |
| 592 | struct btrfs_path *path, u64 start, u64 end, | ||
| 593 | u64 *drop_end, int drop_cache) | ||
| 581 | { | 594 | { |
| 582 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 583 | struct extent_buffer *leaf; | 595 | struct extent_buffer *leaf; |
| 584 | struct btrfs_file_extent_item *fi; | 596 | struct btrfs_file_extent_item *fi; |
| 585 | struct btrfs_path *path; | ||
| 586 | struct btrfs_key key; | 597 | struct btrfs_key key; |
| 587 | struct btrfs_key new_key; | 598 | struct btrfs_key new_key; |
| 588 | u64 ino = btrfs_ino(inode); | 599 | u64 ino = btrfs_ino(inode); |
| @@ -597,14 +608,12 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
| 597 | int recow; | 608 | int recow; |
| 598 | int ret; | 609 | int ret; |
| 599 | int modify_tree = -1; | 610 | int modify_tree = -1; |
| 611 | int update_refs = (root->ref_cows || root == root->fs_info->tree_root); | ||
| 612 | int found = 0; | ||
| 600 | 613 | ||
| 601 | if (drop_cache) | 614 | if (drop_cache) |
| 602 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | 615 | btrfs_drop_extent_cache(inode, start, end - 1, 0); |
| 603 | 616 | ||
| 604 | path = btrfs_alloc_path(); | ||
| 605 | if (!path) | ||
| 606 | return -ENOMEM; | ||
| 607 | |||
| 608 | if (start >= BTRFS_I(inode)->disk_i_size) | 617 | if (start >= BTRFS_I(inode)->disk_i_size) |
| 609 | modify_tree = 0; | 618 | modify_tree = 0; |
| 610 | 619 | ||
| @@ -666,6 +675,7 @@ next_slot: | |||
| 666 | goto next_slot; | 675 | goto next_slot; |
| 667 | } | 676 | } |
| 668 | 677 | ||
| 678 | found = 1; | ||
| 669 | search_start = max(key.offset, start); | 679 | search_start = max(key.offset, start); |
| 670 | if (recow || !modify_tree) { | 680 | if (recow || !modify_tree) { |
| 671 | modify_tree = -1; | 681 | modify_tree = -1; |
| @@ -707,14 +717,13 @@ next_slot: | |||
| 707 | extent_end - start); | 717 | extent_end - start); |
| 708 | btrfs_mark_buffer_dirty(leaf); | 718 | btrfs_mark_buffer_dirty(leaf); |
| 709 | 719 | ||
| 710 | if (disk_bytenr > 0) { | 720 | if (update_refs && disk_bytenr > 0) { |
| 711 | ret = btrfs_inc_extent_ref(trans, root, | 721 | ret = btrfs_inc_extent_ref(trans, root, |
| 712 | disk_bytenr, num_bytes, 0, | 722 | disk_bytenr, num_bytes, 0, |
| 713 | root->root_key.objectid, | 723 | root->root_key.objectid, |
| 714 | new_key.objectid, | 724 | new_key.objectid, |
| 715 | start - extent_offset, 0); | 725 | start - extent_offset, 0); |
| 716 | BUG_ON(ret); /* -ENOMEM */ | 726 | BUG_ON(ret); /* -ENOMEM */ |
| 717 | *hint_byte = disk_bytenr; | ||
| 718 | } | 727 | } |
| 719 | key.offset = start; | 728 | key.offset = start; |
| 720 | } | 729 | } |
| @@ -734,10 +743,8 @@ next_slot: | |||
| 734 | btrfs_set_file_extent_num_bytes(leaf, fi, | 743 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 735 | extent_end - end); | 744 | extent_end - end); |
| 736 | btrfs_mark_buffer_dirty(leaf); | 745 | btrfs_mark_buffer_dirty(leaf); |
| 737 | if (disk_bytenr > 0) { | 746 | if (update_refs && disk_bytenr > 0) |
| 738 | inode_sub_bytes(inode, end - key.offset); | 747 | inode_sub_bytes(inode, end - key.offset); |
| 739 | *hint_byte = disk_bytenr; | ||
| 740 | } | ||
| 741 | break; | 748 | break; |
| 742 | } | 749 | } |
| 743 | 750 | ||
| @@ -753,10 +760,8 @@ next_slot: | |||
| 753 | btrfs_set_file_extent_num_bytes(leaf, fi, | 760 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 754 | start - key.offset); | 761 | start - key.offset); |
| 755 | btrfs_mark_buffer_dirty(leaf); | 762 | btrfs_mark_buffer_dirty(leaf); |
| 756 | if (disk_bytenr > 0) { | 763 | if (update_refs && disk_bytenr > 0) |
| 757 | inode_sub_bytes(inode, extent_end - start); | 764 | inode_sub_bytes(inode, extent_end - start); |
| 758 | *hint_byte = disk_bytenr; | ||
| 759 | } | ||
| 760 | if (end == extent_end) | 765 | if (end == extent_end) |
| 761 | break; | 766 | break; |
| 762 | 767 | ||
| @@ -777,12 +782,13 @@ next_slot: | |||
| 777 | del_nr++; | 782 | del_nr++; |
| 778 | } | 783 | } |
| 779 | 784 | ||
| 780 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | 785 | if (update_refs && |
| 786 | extent_type == BTRFS_FILE_EXTENT_INLINE) { | ||
| 781 | inode_sub_bytes(inode, | 787 | inode_sub_bytes(inode, |
| 782 | extent_end - key.offset); | 788 | extent_end - key.offset); |
| 783 | extent_end = ALIGN(extent_end, | 789 | extent_end = ALIGN(extent_end, |
| 784 | root->sectorsize); | 790 | root->sectorsize); |
| 785 | } else if (disk_bytenr > 0) { | 791 | } else if (update_refs && disk_bytenr > 0) { |
| 786 | ret = btrfs_free_extent(trans, root, | 792 | ret = btrfs_free_extent(trans, root, |
| 787 | disk_bytenr, num_bytes, 0, | 793 | disk_bytenr, num_bytes, 0, |
| 788 | root->root_key.objectid, | 794 | root->root_key.objectid, |
| @@ -791,7 +797,6 @@ next_slot: | |||
| 791 | BUG_ON(ret); /* -ENOMEM */ | 797 | BUG_ON(ret); /* -ENOMEM */ |
| 792 | inode_sub_bytes(inode, | 798 | inode_sub_bytes(inode, |
| 793 | extent_end - key.offset); | 799 | extent_end - key.offset); |
| 794 | *hint_byte = disk_bytenr; | ||
| 795 | } | 800 | } |
| 796 | 801 | ||
| 797 | if (end == extent_end) | 802 | if (end == extent_end) |
| @@ -806,7 +811,7 @@ next_slot: | |||
| 806 | del_nr); | 811 | del_nr); |
| 807 | if (ret) { | 812 | if (ret) { |
| 808 | btrfs_abort_transaction(trans, root, ret); | 813 | btrfs_abort_transaction(trans, root, ret); |
| 809 | goto out; | 814 | break; |
| 810 | } | 815 | } |
| 811 | 816 | ||
| 812 | del_nr = 0; | 817 | del_nr = 0; |
| @@ -825,7 +830,24 @@ next_slot: | |||
| 825 | btrfs_abort_transaction(trans, root, ret); | 830 | btrfs_abort_transaction(trans, root, ret); |
| 826 | } | 831 | } |
| 827 | 832 | ||
| 828 | out: | 833 | if (drop_end) |
| 834 | *drop_end = found ? min(end, extent_end) : end; | ||
| 835 | btrfs_release_path(path); | ||
| 836 | return ret; | ||
| 837 | } | ||
| 838 | |||
| 839 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, | ||
| 840 | struct btrfs_root *root, struct inode *inode, u64 start, | ||
| 841 | u64 end, int drop_cache) | ||
| 842 | { | ||
| 843 | struct btrfs_path *path; | ||
| 844 | int ret; | ||
| 845 | |||
| 846 | path = btrfs_alloc_path(); | ||
| 847 | if (!path) | ||
| 848 | return -ENOMEM; | ||
| 849 | ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL, | ||
| 850 | drop_cache); | ||
| 829 | btrfs_free_path(path); | 851 | btrfs_free_path(path); |
| 830 | return ret; | 852 | return ret; |
| 831 | } | 853 | } |
| @@ -892,8 +914,6 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | |||
| 892 | int ret; | 914 | int ret; |
| 893 | u64 ino = btrfs_ino(inode); | 915 | u64 ino = btrfs_ino(inode); |
| 894 | 916 | ||
| 895 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | ||
| 896 | |||
| 897 | path = btrfs_alloc_path(); | 917 | path = btrfs_alloc_path(); |
| 898 | if (!path) | 918 | if (!path) |
| 899 | return -ENOMEM; | 919 | return -ENOMEM; |
| @@ -935,12 +955,16 @@ again: | |||
| 935 | btrfs_set_item_key_safe(trans, root, path, &new_key); | 955 | btrfs_set_item_key_safe(trans, root, path, &new_key); |
| 936 | fi = btrfs_item_ptr(leaf, path->slots[0], | 956 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| 937 | struct btrfs_file_extent_item); | 957 | struct btrfs_file_extent_item); |
| 958 | btrfs_set_file_extent_generation(leaf, fi, | ||
| 959 | trans->transid); | ||
| 938 | btrfs_set_file_extent_num_bytes(leaf, fi, | 960 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 939 | extent_end - end); | 961 | extent_end - end); |
| 940 | btrfs_set_file_extent_offset(leaf, fi, | 962 | btrfs_set_file_extent_offset(leaf, fi, |
| 941 | end - orig_offset); | 963 | end - orig_offset); |
| 942 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | 964 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, |
| 943 | struct btrfs_file_extent_item); | 965 | struct btrfs_file_extent_item); |
| 966 | btrfs_set_file_extent_generation(leaf, fi, | ||
| 967 | trans->transid); | ||
| 944 | btrfs_set_file_extent_num_bytes(leaf, fi, | 968 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 945 | end - other_start); | 969 | end - other_start); |
| 946 | btrfs_mark_buffer_dirty(leaf); | 970 | btrfs_mark_buffer_dirty(leaf); |
| @@ -958,12 +982,16 @@ again: | |||
| 958 | struct btrfs_file_extent_item); | 982 | struct btrfs_file_extent_item); |
| 959 | btrfs_set_file_extent_num_bytes(leaf, fi, | 983 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 960 | start - key.offset); | 984 | start - key.offset); |
| 985 | btrfs_set_file_extent_generation(leaf, fi, | ||
| 986 | trans->transid); | ||
| 961 | path->slots[0]++; | 987 | path->slots[0]++; |
| 962 | new_key.offset = start; | 988 | new_key.offset = start; |
| 963 | btrfs_set_item_key_safe(trans, root, path, &new_key); | 989 | btrfs_set_item_key_safe(trans, root, path, &new_key); |
| 964 | 990 | ||
| 965 | fi = btrfs_item_ptr(leaf, path->slots[0], | 991 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| 966 | struct btrfs_file_extent_item); | 992 | struct btrfs_file_extent_item); |
| 993 | btrfs_set_file_extent_generation(leaf, fi, | ||
| 994 | trans->transid); | ||
| 967 | btrfs_set_file_extent_num_bytes(leaf, fi, | 995 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 968 | other_end - start); | 996 | other_end - start); |
| 969 | btrfs_set_file_extent_offset(leaf, fi, | 997 | btrfs_set_file_extent_offset(leaf, fi, |
| @@ -991,12 +1019,14 @@ again: | |||
| 991 | leaf = path->nodes[0]; | 1019 | leaf = path->nodes[0]; |
| 992 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | 1020 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, |
| 993 | struct btrfs_file_extent_item); | 1021 | struct btrfs_file_extent_item); |
| 1022 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | ||
| 994 | btrfs_set_file_extent_num_bytes(leaf, fi, | 1023 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 995 | split - key.offset); | 1024 | split - key.offset); |
| 996 | 1025 | ||
| 997 | fi = btrfs_item_ptr(leaf, path->slots[0], | 1026 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| 998 | struct btrfs_file_extent_item); | 1027 | struct btrfs_file_extent_item); |
| 999 | 1028 | ||
| 1029 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | ||
| 1000 | btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); | 1030 | btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); |
| 1001 | btrfs_set_file_extent_num_bytes(leaf, fi, | 1031 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 1002 | extent_end - split); | 1032 | extent_end - split); |
| @@ -1056,12 +1086,14 @@ again: | |||
| 1056 | struct btrfs_file_extent_item); | 1086 | struct btrfs_file_extent_item); |
| 1057 | btrfs_set_file_extent_type(leaf, fi, | 1087 | btrfs_set_file_extent_type(leaf, fi, |
| 1058 | BTRFS_FILE_EXTENT_REG); | 1088 | BTRFS_FILE_EXTENT_REG); |
| 1089 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | ||
| 1059 | btrfs_mark_buffer_dirty(leaf); | 1090 | btrfs_mark_buffer_dirty(leaf); |
| 1060 | } else { | 1091 | } else { |
| 1061 | fi = btrfs_item_ptr(leaf, del_slot - 1, | 1092 | fi = btrfs_item_ptr(leaf, del_slot - 1, |
| 1062 | struct btrfs_file_extent_item); | 1093 | struct btrfs_file_extent_item); |
| 1063 | btrfs_set_file_extent_type(leaf, fi, | 1094 | btrfs_set_file_extent_type(leaf, fi, |
| 1064 | BTRFS_FILE_EXTENT_REG); | 1095 | BTRFS_FILE_EXTENT_REG); |
| 1096 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | ||
| 1065 | btrfs_set_file_extent_num_bytes(leaf, fi, | 1097 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 1066 | extent_end - key.offset); | 1098 | extent_end - key.offset); |
| 1067 | btrfs_mark_buffer_dirty(leaf); | 1099 | btrfs_mark_buffer_dirty(leaf); |
| @@ -1173,8 +1205,8 @@ again: | |||
| 1173 | 1205 | ||
| 1174 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, | 1206 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, |
| 1175 | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | 1207 | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1176 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | 1208 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, |
| 1177 | GFP_NOFS); | 1209 | 0, 0, &cached_state, GFP_NOFS); |
| 1178 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | 1210 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
| 1179 | start_pos, last_pos - 1, &cached_state, | 1211 | start_pos, last_pos - 1, &cached_state, |
| 1180 | GFP_NOFS); | 1212 | GFP_NOFS); |
| @@ -1514,16 +1546,24 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 1514 | 1546 | ||
| 1515 | trace_btrfs_sync_file(file, datasync); | 1547 | trace_btrfs_sync_file(file, datasync); |
| 1516 | 1548 | ||
| 1549 | /* | ||
| 1550 | * We write the dirty pages in the range and wait until they complete | ||
| 1551 | * out of the ->i_mutex. If so, we can flush the dirty pages by | ||
| 1552 | * multi-task, and make the performance up. | ||
| 1553 | */ | ||
| 1554 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); | ||
| 1555 | if (ret) | ||
| 1556 | return ret; | ||
| 1557 | |||
| 1517 | mutex_lock(&inode->i_mutex); | 1558 | mutex_lock(&inode->i_mutex); |
| 1518 | 1559 | ||
| 1519 | /* | 1560 | /* |
| 1520 | * we wait first, since the writeback may change the inode, also wait | 1561 | * We flush the dirty pages again to avoid some dirty pages in the |
| 1521 | * ordered range does a filemape_write_and_wait_range which is why we | 1562 | * range being left. |
| 1522 | * don't do it above like other file systems. | ||
| 1523 | */ | 1563 | */ |
| 1524 | root->log_batch++; | 1564 | atomic_inc(&root->log_batch); |
| 1525 | btrfs_wait_ordered_range(inode, start, end); | 1565 | btrfs_wait_ordered_range(inode, start, end); |
| 1526 | root->log_batch++; | 1566 | atomic_inc(&root->log_batch); |
| 1527 | 1567 | ||
| 1528 | /* | 1568 | /* |
| 1529 | * check the transaction that last modified this inode | 1569 | * check the transaction that last modified this inode |
| @@ -1544,6 +1584,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 1544 | BTRFS_I(inode)->last_trans <= | 1584 | BTRFS_I(inode)->last_trans <= |
| 1545 | root->fs_info->last_trans_committed) { | 1585 | root->fs_info->last_trans_committed) { |
| 1546 | BTRFS_I(inode)->last_trans = 0; | 1586 | BTRFS_I(inode)->last_trans = 0; |
| 1587 | |||
| 1588 | /* | ||
| 1589 | * We'v had everything committed since the last time we were | ||
| 1590 | * modified so clear this flag in case it was set for whatever | ||
| 1591 | * reason, it's no longer relevant. | ||
| 1592 | */ | ||
| 1593 | clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 1594 | &BTRFS_I(inode)->runtime_flags); | ||
| 1547 | mutex_unlock(&inode->i_mutex); | 1595 | mutex_unlock(&inode->i_mutex); |
| 1548 | goto out; | 1596 | goto out; |
| 1549 | } | 1597 | } |
| @@ -1615,6 +1663,324 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 1615 | return 0; | 1663 | return 0; |
| 1616 | } | 1664 | } |
| 1617 | 1665 | ||
| 1666 | static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, | ||
| 1667 | int slot, u64 start, u64 end) | ||
| 1668 | { | ||
| 1669 | struct btrfs_file_extent_item *fi; | ||
| 1670 | struct btrfs_key key; | ||
| 1671 | |||
| 1672 | if (slot < 0 || slot >= btrfs_header_nritems(leaf)) | ||
| 1673 | return 0; | ||
| 1674 | |||
| 1675 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
| 1676 | if (key.objectid != btrfs_ino(inode) || | ||
| 1677 | key.type != BTRFS_EXTENT_DATA_KEY) | ||
| 1678 | return 0; | ||
| 1679 | |||
| 1680 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | ||
| 1681 | |||
| 1682 | if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) | ||
| 1683 | return 0; | ||
| 1684 | |||
| 1685 | if (btrfs_file_extent_disk_bytenr(leaf, fi)) | ||
| 1686 | return 0; | ||
| 1687 | |||
| 1688 | if (key.offset == end) | ||
| 1689 | return 1; | ||
| 1690 | if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) | ||
| 1691 | return 1; | ||
| 1692 | return 0; | ||
| 1693 | } | ||
| 1694 | |||
| 1695 | static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, | ||
| 1696 | struct btrfs_path *path, u64 offset, u64 end) | ||
| 1697 | { | ||
| 1698 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 1699 | struct extent_buffer *leaf; | ||
| 1700 | struct btrfs_file_extent_item *fi; | ||
| 1701 | struct extent_map *hole_em; | ||
| 1702 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
| 1703 | struct btrfs_key key; | ||
| 1704 | int ret; | ||
| 1705 | |||
| 1706 | key.objectid = btrfs_ino(inode); | ||
| 1707 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
| 1708 | key.offset = offset; | ||
| 1709 | |||
| 1710 | |||
| 1711 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | ||
| 1712 | if (ret < 0) | ||
| 1713 | return ret; | ||
| 1714 | BUG_ON(!ret); | ||
| 1715 | |||
| 1716 | leaf = path->nodes[0]; | ||
| 1717 | if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { | ||
| 1718 | u64 num_bytes; | ||
| 1719 | |||
| 1720 | path->slots[0]--; | ||
| 1721 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 1722 | struct btrfs_file_extent_item); | ||
| 1723 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + | ||
| 1724 | end - offset; | ||
| 1725 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | ||
| 1726 | btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); | ||
| 1727 | btrfs_set_file_extent_offset(leaf, fi, 0); | ||
| 1728 | btrfs_mark_buffer_dirty(leaf); | ||
| 1729 | goto out; | ||
| 1730 | } | ||
| 1731 | |||
| 1732 | if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) { | ||
| 1733 | u64 num_bytes; | ||
| 1734 | |||
| 1735 | path->slots[0]++; | ||
| 1736 | key.offset = offset; | ||
| 1737 | btrfs_set_item_key_safe(trans, root, path, &key); | ||
| 1738 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 1739 | struct btrfs_file_extent_item); | ||
| 1740 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - | ||
| 1741 | offset; | ||
| 1742 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | ||
| 1743 | btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); | ||
| 1744 | btrfs_set_file_extent_offset(leaf, fi, 0); | ||
| 1745 | btrfs_mark_buffer_dirty(leaf); | ||
| 1746 | goto out; | ||
| 1747 | } | ||
| 1748 | btrfs_release_path(path); | ||
| 1749 | |||
| 1750 | ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, | ||
| 1751 | 0, 0, end - offset, 0, end - offset, | ||
| 1752 | 0, 0, 0); | ||
| 1753 | if (ret) | ||
| 1754 | return ret; | ||
| 1755 | |||
| 1756 | out: | ||
| 1757 | btrfs_release_path(path); | ||
| 1758 | |||
| 1759 | hole_em = alloc_extent_map(); | ||
| 1760 | if (!hole_em) { | ||
| 1761 | btrfs_drop_extent_cache(inode, offset, end - 1, 0); | ||
| 1762 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 1763 | &BTRFS_I(inode)->runtime_flags); | ||
| 1764 | } else { | ||
| 1765 | hole_em->start = offset; | ||
| 1766 | hole_em->len = end - offset; | ||
| 1767 | hole_em->orig_start = offset; | ||
| 1768 | |||
| 1769 | hole_em->block_start = EXTENT_MAP_HOLE; | ||
| 1770 | hole_em->block_len = 0; | ||
| 1771 | hole_em->bdev = root->fs_info->fs_devices->latest_bdev; | ||
| 1772 | hole_em->compress_type = BTRFS_COMPRESS_NONE; | ||
| 1773 | hole_em->generation = trans->transid; | ||
| 1774 | |||
| 1775 | do { | ||
| 1776 | btrfs_drop_extent_cache(inode, offset, end - 1, 0); | ||
| 1777 | write_lock(&em_tree->lock); | ||
| 1778 | ret = add_extent_mapping(em_tree, hole_em); | ||
| 1779 | if (!ret) | ||
| 1780 | list_move(&hole_em->list, | ||
| 1781 | &em_tree->modified_extents); | ||
| 1782 | write_unlock(&em_tree->lock); | ||
| 1783 | } while (ret == -EEXIST); | ||
| 1784 | free_extent_map(hole_em); | ||
| 1785 | if (ret) | ||
| 1786 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 1787 | &BTRFS_I(inode)->runtime_flags); | ||
| 1788 | } | ||
| 1789 | |||
| 1790 | return 0; | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | ||
| 1794 | { | ||
| 1795 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 1796 | struct extent_state *cached_state = NULL; | ||
| 1797 | struct btrfs_path *path; | ||
| 1798 | struct btrfs_block_rsv *rsv; | ||
| 1799 | struct btrfs_trans_handle *trans; | ||
| 1800 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | ||
| 1801 | u64 lockstart = (offset + mask) & ~mask; | ||
| 1802 | u64 lockend = ((offset + len) & ~mask) - 1; | ||
| 1803 | u64 cur_offset = lockstart; | ||
| 1804 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); | ||
| 1805 | u64 drop_end; | ||
| 1806 | unsigned long nr; | ||
| 1807 | int ret = 0; | ||
| 1808 | int err = 0; | ||
| 1809 | bool same_page = (offset >> PAGE_CACHE_SHIFT) == | ||
| 1810 | ((offset + len) >> PAGE_CACHE_SHIFT); | ||
| 1811 | |||
| 1812 | btrfs_wait_ordered_range(inode, offset, len); | ||
| 1813 | |||
| 1814 | mutex_lock(&inode->i_mutex); | ||
| 1815 | if (offset >= inode->i_size) { | ||
| 1816 | mutex_unlock(&inode->i_mutex); | ||
| 1817 | return 0; | ||
| 1818 | } | ||
| 1819 | |||
| 1820 | /* | ||
| 1821 | * Only do this if we are in the same page and we aren't doing the | ||
| 1822 | * entire page. | ||
| 1823 | */ | ||
| 1824 | if (same_page && len < PAGE_CACHE_SIZE) { | ||
| 1825 | ret = btrfs_truncate_page(inode, offset, len, 0); | ||
| 1826 | mutex_unlock(&inode->i_mutex); | ||
| 1827 | return ret; | ||
| 1828 | } | ||
| 1829 | |||
| 1830 | /* zero back part of the first page */ | ||
| 1831 | ret = btrfs_truncate_page(inode, offset, 0, 0); | ||
| 1832 | if (ret) { | ||
| 1833 | mutex_unlock(&inode->i_mutex); | ||
| 1834 | return ret; | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | /* zero the front end of the last page */ | ||
| 1838 | ret = btrfs_truncate_page(inode, offset + len, 0, 1); | ||
| 1839 | if (ret) { | ||
| 1840 | mutex_unlock(&inode->i_mutex); | ||
| 1841 | return ret; | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | if (lockend < lockstart) { | ||
| 1845 | mutex_unlock(&inode->i_mutex); | ||
| 1846 | return 0; | ||
| 1847 | } | ||
| 1848 | |||
| 1849 | while (1) { | ||
| 1850 | struct btrfs_ordered_extent *ordered; | ||
| 1851 | |||
| 1852 | truncate_pagecache_range(inode, lockstart, lockend); | ||
| 1853 | |||
| 1854 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
| 1855 | 0, &cached_state); | ||
| 1856 | ordered = btrfs_lookup_first_ordered_extent(inode, lockend); | ||
| 1857 | |||
| 1858 | /* | ||
| 1859 | * We need to make sure we have no ordered extents in this range | ||
| 1860 | * and nobody raced in and read a page in this range, if we did | ||
| 1861 | * we need to try again. | ||
| 1862 | */ | ||
| 1863 | if ((!ordered || | ||
| 1864 | (ordered->file_offset + ordered->len < lockstart || | ||
| 1865 | ordered->file_offset > lockend)) && | ||
| 1866 | !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart, | ||
| 1867 | lockend, EXTENT_UPTODATE, 0, | ||
| 1868 | cached_state)) { | ||
| 1869 | if (ordered) | ||
| 1870 | btrfs_put_ordered_extent(ordered); | ||
| 1871 | break; | ||
| 1872 | } | ||
| 1873 | if (ordered) | ||
| 1874 | btrfs_put_ordered_extent(ordered); | ||
| 1875 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, | ||
| 1876 | lockend, &cached_state, GFP_NOFS); | ||
| 1877 | btrfs_wait_ordered_range(inode, lockstart, | ||
| 1878 | lockend - lockstart + 1); | ||
| 1879 | } | ||
| 1880 | |||
| 1881 | path = btrfs_alloc_path(); | ||
| 1882 | if (!path) { | ||
| 1883 | ret = -ENOMEM; | ||
| 1884 | goto out; | ||
| 1885 | } | ||
| 1886 | |||
| 1887 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); | ||
| 1888 | if (!rsv) { | ||
| 1889 | ret = -ENOMEM; | ||
| 1890 | goto out_free; | ||
| 1891 | } | ||
| 1892 | rsv->size = btrfs_calc_trunc_metadata_size(root, 1); | ||
| 1893 | rsv->failfast = 1; | ||
| 1894 | |||
| 1895 | /* | ||
| 1896 | * 1 - update the inode | ||
| 1897 | * 1 - removing the extents in the range | ||
| 1898 | * 1 - adding the hole extent | ||
| 1899 | */ | ||
| 1900 | trans = btrfs_start_transaction(root, 3); | ||
| 1901 | if (IS_ERR(trans)) { | ||
| 1902 | err = PTR_ERR(trans); | ||
| 1903 | goto out_free; | ||
| 1904 | } | ||
| 1905 | |||
| 1906 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, | ||
| 1907 | min_size); | ||
| 1908 | BUG_ON(ret); | ||
| 1909 | trans->block_rsv = rsv; | ||
| 1910 | |||
| 1911 | while (cur_offset < lockend) { | ||
| 1912 | ret = __btrfs_drop_extents(trans, root, inode, path, | ||
| 1913 | cur_offset, lockend + 1, | ||
| 1914 | &drop_end, 1); | ||
| 1915 | if (ret != -ENOSPC) | ||
| 1916 | break; | ||
| 1917 | |||
| 1918 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 1919 | |||
| 1920 | ret = fill_holes(trans, inode, path, cur_offset, drop_end); | ||
| 1921 | if (ret) { | ||
| 1922 | err = ret; | ||
| 1923 | break; | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | cur_offset = drop_end; | ||
| 1927 | |||
| 1928 | ret = btrfs_update_inode(trans, root, inode); | ||
| 1929 | if (ret) { | ||
| 1930 | err = ret; | ||
| 1931 | break; | ||
| 1932 | } | ||
| 1933 | |||
| 1934 | nr = trans->blocks_used; | ||
| 1935 | btrfs_end_transaction(trans, root); | ||
| 1936 | btrfs_btree_balance_dirty(root, nr); | ||
| 1937 | |||
| 1938 | trans = btrfs_start_transaction(root, 3); | ||
| 1939 | if (IS_ERR(trans)) { | ||
| 1940 | ret = PTR_ERR(trans); | ||
| 1941 | trans = NULL; | ||
| 1942 | break; | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, | ||
| 1946 | rsv, min_size); | ||
| 1947 | BUG_ON(ret); /* shouldn't happen */ | ||
| 1948 | trans->block_rsv = rsv; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | if (ret) { | ||
| 1952 | err = ret; | ||
| 1953 | goto out_trans; | ||
| 1954 | } | ||
| 1955 | |||
| 1956 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 1957 | ret = fill_holes(trans, inode, path, cur_offset, drop_end); | ||
| 1958 | if (ret) { | ||
| 1959 | err = ret; | ||
| 1960 | goto out_trans; | ||
| 1961 | } | ||
| 1962 | |||
| 1963 | out_trans: | ||
| 1964 | if (!trans) | ||
| 1965 | goto out_free; | ||
| 1966 | |||
| 1967 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 1968 | ret = btrfs_update_inode(trans, root, inode); | ||
| 1969 | nr = trans->blocks_used; | ||
| 1970 | btrfs_end_transaction(trans, root); | ||
| 1971 | btrfs_btree_balance_dirty(root, nr); | ||
| 1972 | out_free: | ||
| 1973 | btrfs_free_path(path); | ||
| 1974 | btrfs_free_block_rsv(root, rsv); | ||
| 1975 | out: | ||
| 1976 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
| 1977 | &cached_state, GFP_NOFS); | ||
| 1978 | mutex_unlock(&inode->i_mutex); | ||
| 1979 | if (ret && !err) | ||
| 1980 | err = ret; | ||
| 1981 | return err; | ||
| 1982 | } | ||
| 1983 | |||
| 1618 | static long btrfs_fallocate(struct file *file, int mode, | 1984 | static long btrfs_fallocate(struct file *file, int mode, |
| 1619 | loff_t offset, loff_t len) | 1985 | loff_t offset, loff_t len) |
| 1620 | { | 1986 | { |
| @@ -1633,15 +1999,18 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
| 1633 | alloc_start = offset & ~mask; | 1999 | alloc_start = offset & ~mask; |
| 1634 | alloc_end = (offset + len + mask) & ~mask; | 2000 | alloc_end = (offset + len + mask) & ~mask; |
| 1635 | 2001 | ||
| 1636 | /* We only support the FALLOC_FL_KEEP_SIZE mode */ | 2002 | /* Make sure we aren't being give some crap mode */ |
| 1637 | if (mode & ~FALLOC_FL_KEEP_SIZE) | 2003 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 1638 | return -EOPNOTSUPP; | 2004 | return -EOPNOTSUPP; |
| 1639 | 2005 | ||
| 2006 | if (mode & FALLOC_FL_PUNCH_HOLE) | ||
| 2007 | return btrfs_punch_hole(inode, offset, len); | ||
| 2008 | |||
| 1640 | /* | 2009 | /* |
| 1641 | * Make sure we have enough space before we do the | 2010 | * Make sure we have enough space before we do the |
| 1642 | * allocation. | 2011 | * allocation. |
| 1643 | */ | 2012 | */ |
| 1644 | ret = btrfs_check_data_free_space(inode, len); | 2013 | ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1); |
| 1645 | if (ret) | 2014 | if (ret) |
| 1646 | return ret; | 2015 | return ret; |
| 1647 | 2016 | ||
| @@ -1748,7 +2117,7 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
| 1748 | out: | 2117 | out: |
| 1749 | mutex_unlock(&inode->i_mutex); | 2118 | mutex_unlock(&inode->i_mutex); |
| 1750 | /* Let go of our reservation. */ | 2119 | /* Let go of our reservation. */ |
| 1751 | btrfs_free_reserved_data_space(inode, len); | 2120 | btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1); |
| 1752 | return ret; | 2121 | return ret; |
| 1753 | } | 2122 | } |
| 1754 | 2123 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 6b10acfc2f5c..1027b854b90c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -966,7 +966,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
| 966 | block_group->key.offset)) { | 966 | block_group->key.offset)) { |
| 967 | ret = find_first_extent_bit(unpin, start, | 967 | ret = find_first_extent_bit(unpin, start, |
| 968 | &extent_start, &extent_end, | 968 | &extent_start, &extent_end, |
| 969 | EXTENT_DIRTY); | 969 | EXTENT_DIRTY, NULL); |
| 970 | if (ret) { | 970 | if (ret) { |
| 971 | ret = 0; | 971 | ret = 0; |
| 972 | break; | 972 | break; |
| @@ -1454,9 +1454,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, | |||
| 1454 | max_t(u64, *offset, bitmap_info->offset)); | 1454 | max_t(u64, *offset, bitmap_info->offset)); |
| 1455 | bits = bytes_to_bits(*bytes, ctl->unit); | 1455 | bits = bytes_to_bits(*bytes, ctl->unit); |
| 1456 | 1456 | ||
| 1457 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | 1457 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { |
| 1458 | i < BITS_PER_BITMAP; | ||
| 1459 | i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
| 1460 | next_zero = find_next_zero_bit(bitmap_info->bitmap, | 1458 | next_zero = find_next_zero_bit(bitmap_info->bitmap, |
| 1461 | BITS_PER_BITMAP, i); | 1459 | BITS_PER_BITMAP, i); |
| 1462 | if ((next_zero - i) >= bits) { | 1460 | if ((next_zero - i) >= bits) { |
| @@ -2307,9 +2305,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
| 2307 | 2305 | ||
| 2308 | again: | 2306 | again: |
| 2309 | found_bits = 0; | 2307 | found_bits = 0; |
| 2310 | for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); | 2308 | for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { |
| 2311 | i < BITS_PER_BITMAP; | ||
| 2312 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
| 2313 | next_zero = find_next_zero_bit(entry->bitmap, | 2309 | next_zero = find_next_zero_bit(entry->bitmap, |
| 2314 | BITS_PER_BITMAP, i); | 2310 | BITS_PER_BITMAP, i); |
| 2315 | if (next_zero - i >= min_bits) { | 2311 | if (next_zero - i >= min_bits) { |
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h index db2ff9773b99..1d982812ab67 100644 --- a/fs/btrfs/hash.h +++ b/fs/btrfs/hash.h | |||
| @@ -24,4 +24,14 @@ static inline u64 btrfs_name_hash(const char *name, int len) | |||
| 24 | { | 24 | { |
| 25 | return crc32c((u32)~1, name, len); | 25 | return crc32c((u32)~1, name, len); |
| 26 | } | 26 | } |
| 27 | |||
| 28 | /* | ||
| 29 | * Figure the key offset of an extended inode ref | ||
| 30 | */ | ||
| 31 | static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, | ||
| 32 | int len) | ||
| 33 | { | ||
| 34 | return (u64) crc32c(parent_objectid, name, len); | ||
| 35 | } | ||
| 36 | |||
| 27 | #endif | 37 | #endif |
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index a13cf1a96c73..48b8fda93132 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | 18 | ||
| 19 | #include "ctree.h" | 19 | #include "ctree.h" |
| 20 | #include "disk-io.h" | 20 | #include "disk-io.h" |
| 21 | #include "hash.h" | ||
| 21 | #include "transaction.h" | 22 | #include "transaction.h" |
| 22 | #include "print-tree.h" | 23 | #include "print-tree.h" |
| 23 | 24 | ||
| @@ -50,18 +51,57 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name, | |||
| 50 | return 0; | 51 | return 0; |
| 51 | } | 52 | } |
| 52 | 53 | ||
| 53 | struct btrfs_inode_ref * | 54 | int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid, |
| 55 | const char *name, int name_len, | ||
| 56 | struct btrfs_inode_extref **extref_ret) | ||
| 57 | { | ||
| 58 | struct extent_buffer *leaf; | ||
| 59 | struct btrfs_inode_extref *extref; | ||
| 60 | unsigned long ptr; | ||
| 61 | unsigned long name_ptr; | ||
| 62 | u32 item_size; | ||
| 63 | u32 cur_offset = 0; | ||
| 64 | int ref_name_len; | ||
| 65 | |||
| 66 | leaf = path->nodes[0]; | ||
| 67 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
| 68 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Search all extended backrefs in this item. We're only | ||
| 72 | * looking through any collisions so most of the time this is | ||
| 73 | * just going to compare against one buffer. If all is well, | ||
| 74 | * we'll return success and the inode ref object. | ||
| 75 | */ | ||
| 76 | while (cur_offset < item_size) { | ||
| 77 | extref = (struct btrfs_inode_extref *) (ptr + cur_offset); | ||
| 78 | name_ptr = (unsigned long)(&extref->name); | ||
| 79 | ref_name_len = btrfs_inode_extref_name_len(leaf, extref); | ||
| 80 | |||
| 81 | if (ref_name_len == name_len && | ||
| 82 | btrfs_inode_extref_parent(leaf, extref) == ref_objectid && | ||
| 83 | (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) { | ||
| 84 | if (extref_ret) | ||
| 85 | *extref_ret = extref; | ||
| 86 | return 1; | ||
| 87 | } | ||
| 88 | |||
| 89 | cur_offset += ref_name_len + sizeof(*extref); | ||
| 90 | } | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | static struct btrfs_inode_ref * | ||
| 54 | btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, | 95 | btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, |
| 55 | struct btrfs_root *root, | 96 | struct btrfs_root *root, |
| 56 | struct btrfs_path *path, | 97 | struct btrfs_path *path, |
| 57 | const char *name, int name_len, | 98 | const char *name, int name_len, |
| 58 | u64 inode_objectid, u64 ref_objectid, int mod) | 99 | u64 inode_objectid, u64 ref_objectid, int ins_len, |
| 100 | int cow) | ||
| 59 | { | 101 | { |
| 102 | int ret; | ||
| 60 | struct btrfs_key key; | 103 | struct btrfs_key key; |
| 61 | struct btrfs_inode_ref *ref; | 104 | struct btrfs_inode_ref *ref; |
| 62 | int ins_len = mod < 0 ? -1 : 0; | ||
| 63 | int cow = mod != 0; | ||
| 64 | int ret; | ||
| 65 | 105 | ||
| 66 | key.objectid = inode_objectid; | 106 | key.objectid = inode_objectid; |
| 67 | key.type = BTRFS_INODE_REF_KEY; | 107 | key.type = BTRFS_INODE_REF_KEY; |
| @@ -77,13 +117,150 @@ btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, | |||
| 77 | return ref; | 117 | return ref; |
| 78 | } | 118 | } |
| 79 | 119 | ||
| 80 | int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | 120 | /* Returns NULL if no extref found */ |
| 121 | struct btrfs_inode_extref * | ||
| 122 | btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, | ||
| 123 | struct btrfs_root *root, | ||
| 124 | struct btrfs_path *path, | ||
| 125 | const char *name, int name_len, | ||
| 126 | u64 inode_objectid, u64 ref_objectid, int ins_len, | ||
| 127 | int cow) | ||
| 128 | { | ||
| 129 | int ret; | ||
| 130 | struct btrfs_key key; | ||
| 131 | struct btrfs_inode_extref *extref; | ||
| 132 | |||
| 133 | key.objectid = inode_objectid; | ||
| 134 | key.type = BTRFS_INODE_EXTREF_KEY; | ||
| 135 | key.offset = btrfs_extref_hash(ref_objectid, name, name_len); | ||
| 136 | |||
| 137 | ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); | ||
| 138 | if (ret < 0) | ||
| 139 | return ERR_PTR(ret); | ||
| 140 | if (ret > 0) | ||
| 141 | return NULL; | ||
| 142 | if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref)) | ||
| 143 | return NULL; | ||
| 144 | return extref; | ||
| 145 | } | ||
| 146 | |||
| 147 | int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans, | ||
| 148 | struct btrfs_root *root, | ||
| 149 | struct btrfs_path *path, | ||
| 150 | const char *name, int name_len, | ||
| 151 | u64 inode_objectid, u64 ref_objectid, int mod, | ||
| 152 | u64 *ret_index) | ||
| 153 | { | ||
| 154 | struct btrfs_inode_ref *ref; | ||
| 155 | struct btrfs_inode_extref *extref; | ||
| 156 | int ins_len = mod < 0 ? -1 : 0; | ||
| 157 | int cow = mod != 0; | ||
| 158 | |||
| 159 | ref = btrfs_lookup_inode_ref(trans, root, path, name, name_len, | ||
| 160 | inode_objectid, ref_objectid, ins_len, | ||
| 161 | cow); | ||
| 162 | if (IS_ERR(ref)) | ||
| 163 | return PTR_ERR(ref); | ||
| 164 | |||
| 165 | if (ref != NULL) { | ||
| 166 | *ret_index = btrfs_inode_ref_index(path->nodes[0], ref); | ||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | |||
| 170 | btrfs_release_path(path); | ||
| 171 | |||
| 172 | extref = btrfs_lookup_inode_extref(trans, root, path, name, | ||
| 173 | name_len, inode_objectid, | ||
| 174 | ref_objectid, ins_len, cow); | ||
| 175 | if (IS_ERR(extref)) | ||
| 176 | return PTR_ERR(extref); | ||
| 177 | |||
| 178 | if (extref) { | ||
| 179 | *ret_index = btrfs_inode_extref_index(path->nodes[0], extref); | ||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | return -ENOENT; | ||
| 184 | } | ||
| 185 | |||
| 186 | int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, | ||
| 81 | struct btrfs_root *root, | 187 | struct btrfs_root *root, |
| 82 | const char *name, int name_len, | 188 | const char *name, int name_len, |
| 83 | u64 inode_objectid, u64 ref_objectid, u64 *index) | 189 | u64 inode_objectid, u64 ref_objectid, u64 *index) |
| 84 | { | 190 | { |
| 85 | struct btrfs_path *path; | 191 | struct btrfs_path *path; |
| 86 | struct btrfs_key key; | 192 | struct btrfs_key key; |
| 193 | struct btrfs_inode_extref *extref; | ||
| 194 | struct extent_buffer *leaf; | ||
| 195 | int ret; | ||
| 196 | int del_len = name_len + sizeof(*extref); | ||
| 197 | unsigned long ptr; | ||
| 198 | unsigned long item_start; | ||
| 199 | u32 item_size; | ||
| 200 | |||
| 201 | key.objectid = inode_objectid; | ||
| 202 | btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY); | ||
| 203 | key.offset = btrfs_extref_hash(ref_objectid, name, name_len); | ||
| 204 | |||
| 205 | path = btrfs_alloc_path(); | ||
| 206 | if (!path) | ||
| 207 | return -ENOMEM; | ||
| 208 | |||
| 209 | path->leave_spinning = 1; | ||
| 210 | |||
| 211 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | ||
| 212 | if (ret > 0) | ||
| 213 | ret = -ENOENT; | ||
| 214 | if (ret < 0) | ||
| 215 | goto out; | ||
| 216 | |||
| 217 | /* | ||
| 218 | * Sanity check - did we find the right item for this name? | ||
| 219 | * This should always succeed so error here will make the FS | ||
| 220 | * readonly. | ||
| 221 | */ | ||
| 222 | if (!btrfs_find_name_in_ext_backref(path, ref_objectid, | ||
| 223 | name, name_len, &extref)) { | ||
| 224 | btrfs_std_error(root->fs_info, -ENOENT); | ||
| 225 | ret = -EROFS; | ||
| 226 | goto out; | ||
| 227 | } | ||
| 228 | |||
| 229 | leaf = path->nodes[0]; | ||
| 230 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
| 231 | if (index) | ||
| 232 | *index = btrfs_inode_extref_index(leaf, extref); | ||
| 233 | |||
| 234 | if (del_len == item_size) { | ||
| 235 | /* | ||
| 236 | * Common case only one ref in the item, remove the | ||
| 237 | * whole item. | ||
| 238 | */ | ||
| 239 | ret = btrfs_del_item(trans, root, path); | ||
| 240 | goto out; | ||
| 241 | } | ||
| 242 | |||
| 243 | ptr = (unsigned long)extref; | ||
| 244 | item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
| 245 | |||
| 246 | memmove_extent_buffer(leaf, ptr, ptr + del_len, | ||
| 247 | item_size - (ptr + del_len - item_start)); | ||
| 248 | |||
| 249 | btrfs_truncate_item(trans, root, path, item_size - del_len, 1); | ||
| 250 | |||
| 251 | out: | ||
| 252 | btrfs_free_path(path); | ||
| 253 | |||
| 254 | return ret; | ||
| 255 | } | ||
| 256 | |||
| 257 | int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | ||
| 258 | struct btrfs_root *root, | ||
| 259 | const char *name, int name_len, | ||
| 260 | u64 inode_objectid, u64 ref_objectid, u64 *index) | ||
| 261 | { | ||
| 262 | struct btrfs_path *path; | ||
| 263 | struct btrfs_key key; | ||
| 87 | struct btrfs_inode_ref *ref; | 264 | struct btrfs_inode_ref *ref; |
| 88 | struct extent_buffer *leaf; | 265 | struct extent_buffer *leaf; |
| 89 | unsigned long ptr; | 266 | unsigned long ptr; |
| @@ -91,6 +268,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
| 91 | u32 item_size; | 268 | u32 item_size; |
| 92 | u32 sub_item_len; | 269 | u32 sub_item_len; |
| 93 | int ret; | 270 | int ret; |
| 271 | int search_ext_refs = 0; | ||
| 94 | int del_len = name_len + sizeof(*ref); | 272 | int del_len = name_len + sizeof(*ref); |
| 95 | 273 | ||
| 96 | key.objectid = inode_objectid; | 274 | key.objectid = inode_objectid; |
| @@ -106,12 +284,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
| 106 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 284 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
| 107 | if (ret > 0) { | 285 | if (ret > 0) { |
| 108 | ret = -ENOENT; | 286 | ret = -ENOENT; |
| 287 | search_ext_refs = 1; | ||
| 109 | goto out; | 288 | goto out; |
| 110 | } else if (ret < 0) { | 289 | } else if (ret < 0) { |
| 111 | goto out; | 290 | goto out; |
| 112 | } | 291 | } |
| 113 | if (!find_name_in_backref(path, name, name_len, &ref)) { | 292 | if (!find_name_in_backref(path, name, name_len, &ref)) { |
| 114 | ret = -ENOENT; | 293 | ret = -ENOENT; |
| 294 | search_ext_refs = 1; | ||
| 115 | goto out; | 295 | goto out; |
| 116 | } | 296 | } |
| 117 | leaf = path->nodes[0]; | 297 | leaf = path->nodes[0]; |
| @@ -129,8 +309,78 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
| 129 | item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); | 309 | item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); |
| 130 | memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, | 310 | memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, |
| 131 | item_size - (ptr + sub_item_len - item_start)); | 311 | item_size - (ptr + sub_item_len - item_start)); |
| 132 | btrfs_truncate_item(trans, root, path, | 312 | btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1); |
| 133 | item_size - sub_item_len, 1); | 313 | out: |
| 314 | btrfs_free_path(path); | ||
| 315 | |||
| 316 | if (search_ext_refs) { | ||
| 317 | /* | ||
| 318 | * No refs were found, or we could not find the | ||
| 319 | * name in our ref array. Find and remove the extended | ||
| 320 | * inode ref then. | ||
| 321 | */ | ||
| 322 | return btrfs_del_inode_extref(trans, root, name, name_len, | ||
| 323 | inode_objectid, ref_objectid, index); | ||
| 324 | } | ||
| 325 | |||
| 326 | return ret; | ||
| 327 | } | ||
| 328 | |||
| 329 | /* | ||
| 330 | * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree. | ||
| 331 | * | ||
| 332 | * The caller must have checked against BTRFS_LINK_MAX already. | ||
| 333 | */ | ||
| 334 | static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, | ||
| 335 | struct btrfs_root *root, | ||
| 336 | const char *name, int name_len, | ||
| 337 | u64 inode_objectid, u64 ref_objectid, u64 index) | ||
| 338 | { | ||
| 339 | struct btrfs_inode_extref *extref; | ||
| 340 | int ret; | ||
| 341 | int ins_len = name_len + sizeof(*extref); | ||
| 342 | unsigned long ptr; | ||
| 343 | struct btrfs_path *path; | ||
| 344 | struct btrfs_key key; | ||
| 345 | struct extent_buffer *leaf; | ||
| 346 | struct btrfs_item *item; | ||
| 347 | |||
| 348 | key.objectid = inode_objectid; | ||
| 349 | key.type = BTRFS_INODE_EXTREF_KEY; | ||
| 350 | key.offset = btrfs_extref_hash(ref_objectid, name, name_len); | ||
| 351 | |||
| 352 | path = btrfs_alloc_path(); | ||
| 353 | if (!path) | ||
| 354 | return -ENOMEM; | ||
| 355 | |||
| 356 | path->leave_spinning = 1; | ||
| 357 | ret = btrfs_insert_empty_item(trans, root, path, &key, | ||
| 358 | ins_len); | ||
| 359 | if (ret == -EEXIST) { | ||
| 360 | if (btrfs_find_name_in_ext_backref(path, ref_objectid, | ||
| 361 | name, name_len, NULL)) | ||
| 362 | goto out; | ||
| 363 | |||
| 364 | btrfs_extend_item(trans, root, path, ins_len); | ||
| 365 | ret = 0; | ||
| 366 | } | ||
| 367 | if (ret < 0) | ||
| 368 | goto out; | ||
| 369 | |||
| 370 | leaf = path->nodes[0]; | ||
| 371 | item = btrfs_item_nr(leaf, path->slots[0]); | ||
| 372 | ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); | ||
| 373 | ptr += btrfs_item_size(leaf, item) - ins_len; | ||
| 374 | extref = (struct btrfs_inode_extref *)ptr; | ||
| 375 | |||
| 376 | btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len); | ||
| 377 | btrfs_set_inode_extref_index(path->nodes[0], extref, index); | ||
| 378 | btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); | ||
| 379 | |||
| 380 | ptr = (unsigned long)&extref->name; | ||
| 381 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | ||
| 382 | btrfs_mark_buffer_dirty(path->nodes[0]); | ||
| 383 | |||
| 134 | out: | 384 | out: |
| 135 | btrfs_free_path(path); | 385 | btrfs_free_path(path); |
| 136 | return ret; | 386 | return ret; |
| @@ -191,6 +441,19 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | |||
| 191 | 441 | ||
| 192 | out: | 442 | out: |
| 193 | btrfs_free_path(path); | 443 | btrfs_free_path(path); |
| 444 | |||
| 445 | if (ret == -EMLINK) { | ||
| 446 | struct btrfs_super_block *disk_super = root->fs_info->super_copy; | ||
| 447 | /* We ran out of space in the ref array. Need to | ||
| 448 | * add an extended ref. */ | ||
| 449 | if (btrfs_super_incompat_flags(disk_super) | ||
| 450 | & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) | ||
| 451 | ret = btrfs_insert_inode_extref(trans, root, name, | ||
| 452 | name_len, | ||
| 453 | inode_objectid, | ||
| 454 | ref_objectid, index); | ||
| 455 | } | ||
| 456 | |||
| 194 | return ret; | 457 | return ret; |
| 195 | } | 458 | } |
| 196 | 459 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a6ed6944e50c..85a1e5053fe6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -230,7 +230,6 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |||
| 230 | u64 inline_len = actual_end - start; | 230 | u64 inline_len = actual_end - start; |
| 231 | u64 aligned_end = (end + root->sectorsize - 1) & | 231 | u64 aligned_end = (end + root->sectorsize - 1) & |
| 232 | ~((u64)root->sectorsize - 1); | 232 | ~((u64)root->sectorsize - 1); |
| 233 | u64 hint_byte; | ||
| 234 | u64 data_len = inline_len; | 233 | u64 data_len = inline_len; |
| 235 | int ret; | 234 | int ret; |
| 236 | 235 | ||
| @@ -247,8 +246,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |||
| 247 | return 1; | 246 | return 1; |
| 248 | } | 247 | } |
| 249 | 248 | ||
| 250 | ret = btrfs_drop_extents(trans, inode, start, aligned_end, | 249 | ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1); |
| 251 | &hint_byte, 1); | ||
| 252 | if (ret) | 250 | if (ret) |
| 253 | return ret; | 251 | return ret; |
| 254 | 252 | ||
| @@ -664,7 +662,7 @@ retry: | |||
| 664 | async_extent->compressed_size, | 662 | async_extent->compressed_size, |
| 665 | async_extent->compressed_size, | 663 | async_extent->compressed_size, |
| 666 | 0, alloc_hint, &ins, 1); | 664 | 0, alloc_hint, &ins, 1); |
| 667 | if (ret) | 665 | if (ret && ret != -ENOSPC) |
| 668 | btrfs_abort_transaction(trans, root, ret); | 666 | btrfs_abort_transaction(trans, root, ret); |
| 669 | btrfs_end_transaction(trans, root); | 667 | btrfs_end_transaction(trans, root); |
| 670 | } | 668 | } |
| @@ -1308,6 +1306,7 @@ out_check: | |||
| 1308 | em->block_start = disk_bytenr; | 1306 | em->block_start = disk_bytenr; |
| 1309 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 1307 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
| 1310 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | 1308 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 1309 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | ||
| 1311 | while (1) { | 1310 | while (1) { |
| 1312 | write_lock(&em_tree->lock); | 1311 | write_lock(&em_tree->lock); |
| 1313 | ret = add_extent_mapping(em_tree, em); | 1312 | ret = add_extent_mapping(em_tree, em); |
| @@ -1364,11 +1363,7 @@ out_check: | |||
| 1364 | } | 1363 | } |
| 1365 | 1364 | ||
| 1366 | error: | 1365 | error: |
| 1367 | if (nolock) { | 1366 | err = btrfs_end_transaction(trans, root); |
| 1368 | err = btrfs_end_transaction_nolock(trans, root); | ||
| 1369 | } else { | ||
| 1370 | err = btrfs_end_transaction(trans, root); | ||
| 1371 | } | ||
| 1372 | if (!ret) | 1367 | if (!ret) |
| 1373 | ret = err; | 1368 | ret = err; |
| 1374 | 1369 | ||
| @@ -1785,7 +1780,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
| 1785 | struct btrfs_path *path; | 1780 | struct btrfs_path *path; |
| 1786 | struct extent_buffer *leaf; | 1781 | struct extent_buffer *leaf; |
| 1787 | struct btrfs_key ins; | 1782 | struct btrfs_key ins; |
| 1788 | u64 hint; | ||
| 1789 | int ret; | 1783 | int ret; |
| 1790 | 1784 | ||
| 1791 | path = btrfs_alloc_path(); | 1785 | path = btrfs_alloc_path(); |
| @@ -1803,8 +1797,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
| 1803 | * the caller is expected to unpin it and allow it to be merged | 1797 | * the caller is expected to unpin it and allow it to be merged |
| 1804 | * with the others. | 1798 | * with the others. |
| 1805 | */ | 1799 | */ |
| 1806 | ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, | 1800 | ret = btrfs_drop_extents(trans, root, inode, file_pos, |
| 1807 | &hint, 0); | 1801 | file_pos + num_bytes, 0); |
| 1808 | if (ret) | 1802 | if (ret) |
| 1809 | goto out; | 1803 | goto out; |
| 1810 | 1804 | ||
| @@ -1828,10 +1822,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
| 1828 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | 1822 | btrfs_set_file_extent_encryption(leaf, fi, encryption); |
| 1829 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | 1823 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); |
| 1830 | 1824 | ||
| 1831 | btrfs_unlock_up_safe(path, 1); | ||
| 1832 | btrfs_set_lock_blocking(leaf); | ||
| 1833 | |||
| 1834 | btrfs_mark_buffer_dirty(leaf); | 1825 | btrfs_mark_buffer_dirty(leaf); |
| 1826 | btrfs_release_path(path); | ||
| 1835 | 1827 | ||
| 1836 | inode_add_bytes(inode, num_bytes); | 1828 | inode_add_bytes(inode, num_bytes); |
| 1837 | 1829 | ||
| @@ -1929,11 +1921,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) | |||
| 1929 | ordered_extent->len, | 1921 | ordered_extent->len, |
| 1930 | compress_type, 0, 0, | 1922 | compress_type, 0, 0, |
| 1931 | BTRFS_FILE_EXTENT_REG); | 1923 | BTRFS_FILE_EXTENT_REG); |
| 1932 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | ||
| 1933 | ordered_extent->file_offset, | ||
| 1934 | ordered_extent->len); | ||
| 1935 | } | 1924 | } |
| 1936 | 1925 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | |
| 1926 | ordered_extent->file_offset, ordered_extent->len, | ||
| 1927 | trans->transid); | ||
| 1937 | if (ret < 0) { | 1928 | if (ret < 0) { |
| 1938 | btrfs_abort_transaction(trans, root, ret); | 1929 | btrfs_abort_transaction(trans, root, ret); |
| 1939 | goto out_unlock; | 1930 | goto out_unlock; |
| @@ -1949,6 +1940,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) | |||
| 1949 | btrfs_abort_transaction(trans, root, ret); | 1940 | btrfs_abort_transaction(trans, root, ret); |
| 1950 | goto out_unlock; | 1941 | goto out_unlock; |
| 1951 | } | 1942 | } |
| 1943 | } else { | ||
| 1944 | btrfs_set_inode_last_trans(trans, inode); | ||
| 1952 | } | 1945 | } |
| 1953 | ret = 0; | 1946 | ret = 0; |
| 1954 | out_unlock: | 1947 | out_unlock: |
| @@ -1958,12 +1951,8 @@ out_unlock: | |||
| 1958 | out: | 1951 | out: |
| 1959 | if (root != root->fs_info->tree_root) | 1952 | if (root != root->fs_info->tree_root) |
| 1960 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); | 1953 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); |
| 1961 | if (trans) { | 1954 | if (trans) |
| 1962 | if (nolock) | 1955 | btrfs_end_transaction(trans, root); |
| 1963 | btrfs_end_transaction_nolock(trans, root); | ||
| 1964 | else | ||
| 1965 | btrfs_end_transaction(trans, root); | ||
| 1966 | } | ||
| 1967 | 1956 | ||
| 1968 | if (ret) | 1957 | if (ret) |
| 1969 | clear_extent_uptodate(io_tree, ordered_extent->file_offset, | 1958 | clear_extent_uptodate(io_tree, ordered_extent->file_offset, |
| @@ -2119,7 +2108,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) | |||
| 2119 | if (empty) | 2108 | if (empty) |
| 2120 | return; | 2109 | return; |
| 2121 | 2110 | ||
| 2122 | down_read(&root->fs_info->cleanup_work_sem); | ||
| 2123 | spin_lock(&fs_info->delayed_iput_lock); | 2111 | spin_lock(&fs_info->delayed_iput_lock); |
| 2124 | list_splice_init(&fs_info->delayed_iputs, &list); | 2112 | list_splice_init(&fs_info->delayed_iputs, &list); |
| 2125 | spin_unlock(&fs_info->delayed_iput_lock); | 2113 | spin_unlock(&fs_info->delayed_iput_lock); |
| @@ -2130,7 +2118,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) | |||
| 2130 | iput(delayed->inode); | 2118 | iput(delayed->inode); |
| 2131 | kfree(delayed); | 2119 | kfree(delayed); |
| 2132 | } | 2120 | } |
| 2133 | up_read(&root->fs_info->cleanup_work_sem); | ||
| 2134 | } | 2121 | } |
| 2135 | 2122 | ||
| 2136 | enum btrfs_orphan_cleanup_state { | 2123 | enum btrfs_orphan_cleanup_state { |
| @@ -2198,7 +2185,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |||
| 2198 | int ret; | 2185 | int ret; |
| 2199 | 2186 | ||
| 2200 | if (!root->orphan_block_rsv) { | 2187 | if (!root->orphan_block_rsv) { |
| 2201 | block_rsv = btrfs_alloc_block_rsv(root); | 2188 | block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
| 2202 | if (!block_rsv) | 2189 | if (!block_rsv) |
| 2203 | return -ENOMEM; | 2190 | return -ENOMEM; |
| 2204 | } | 2191 | } |
| @@ -2225,7 +2212,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |||
| 2225 | insert = 1; | 2212 | insert = 1; |
| 2226 | #endif | 2213 | #endif |
| 2227 | insert = 1; | 2214 | insert = 1; |
| 2228 | atomic_dec(&root->orphan_inodes); | 2215 | atomic_inc(&root->orphan_inodes); |
| 2229 | } | 2216 | } |
| 2230 | 2217 | ||
| 2231 | if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | 2218 | if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
| @@ -2590,6 +2577,18 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
| 2590 | 2577 | ||
| 2591 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); | 2578 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); |
| 2592 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); | 2579 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
| 2580 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); | ||
| 2581 | |||
| 2582 | /* | ||
| 2583 | * If we were modified in the current generation and evicted from memory | ||
| 2584 | * and then re-read we need to do a full sync since we don't have any | ||
| 2585 | * idea about which extents were modified before we were evicted from | ||
| 2586 | * cache. | ||
| 2587 | */ | ||
| 2588 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) | ||
| 2589 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 2590 | &BTRFS_I(inode)->runtime_flags); | ||
| 2591 | |||
| 2593 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | 2592 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); |
| 2594 | inode->i_generation = BTRFS_I(inode)->generation; | 2593 | inode->i_generation = BTRFS_I(inode)->generation; |
| 2595 | inode->i_rdev = 0; | 2594 | inode->i_rdev = 0; |
| @@ -2894,7 +2893,6 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
| 2894 | struct btrfs_trans_handle *trans; | 2893 | struct btrfs_trans_handle *trans; |
| 2895 | struct btrfs_root *root = BTRFS_I(dir)->root; | 2894 | struct btrfs_root *root = BTRFS_I(dir)->root; |
| 2896 | struct btrfs_path *path; | 2895 | struct btrfs_path *path; |
| 2897 | struct btrfs_inode_ref *ref; | ||
| 2898 | struct btrfs_dir_item *di; | 2896 | struct btrfs_dir_item *di; |
| 2899 | struct inode *inode = dentry->d_inode; | 2897 | struct inode *inode = dentry->d_inode; |
| 2900 | u64 index; | 2898 | u64 index; |
| @@ -3008,17 +3006,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
| 3008 | } | 3006 | } |
| 3009 | btrfs_release_path(path); | 3007 | btrfs_release_path(path); |
| 3010 | 3008 | ||
| 3011 | ref = btrfs_lookup_inode_ref(trans, root, path, | 3009 | ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name, |
| 3012 | dentry->d_name.name, dentry->d_name.len, | 3010 | dentry->d_name.len, ino, dir_ino, 0, |
| 3013 | ino, dir_ino, 0); | 3011 | &index); |
| 3014 | if (IS_ERR(ref)) { | 3012 | if (ret) { |
| 3015 | err = PTR_ERR(ref); | 3013 | err = ret; |
| 3016 | goto out; | 3014 | goto out; |
| 3017 | } | 3015 | } |
| 3018 | BUG_ON(!ref); /* Logic error */ | 3016 | |
| 3019 | if (check_path_shared(root, path)) | 3017 | if (check_path_shared(root, path)) |
| 3020 | goto out; | 3018 | goto out; |
| 3021 | index = btrfs_inode_ref_index(path->nodes[0], ref); | 3019 | |
| 3022 | btrfs_release_path(path); | 3020 | btrfs_release_path(path); |
| 3023 | 3021 | ||
| 3024 | /* | 3022 | /* |
| @@ -3061,7 +3059,7 @@ out: | |||
| 3061 | static void __unlink_end_trans(struct btrfs_trans_handle *trans, | 3059 | static void __unlink_end_trans(struct btrfs_trans_handle *trans, |
| 3062 | struct btrfs_root *root) | 3060 | struct btrfs_root *root) |
| 3063 | { | 3061 | { |
| 3064 | if (trans->block_rsv == &root->fs_info->global_block_rsv) { | 3062 | if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) { |
| 3065 | btrfs_block_rsv_release(root, trans->block_rsv, | 3063 | btrfs_block_rsv_release(root, trans->block_rsv, |
| 3066 | trans->bytes_reserved); | 3064 | trans->bytes_reserved); |
| 3067 | trans->block_rsv = &root->fs_info->trans_block_rsv; | 3065 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
| @@ -3191,9 +3189,10 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 3191 | struct btrfs_trans_handle *trans; | 3189 | struct btrfs_trans_handle *trans; |
| 3192 | unsigned long nr = 0; | 3190 | unsigned long nr = 0; |
| 3193 | 3191 | ||
| 3194 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || | 3192 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) |
| 3195 | btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) | ||
| 3196 | return -ENOTEMPTY; | 3193 | return -ENOTEMPTY; |
| 3194 | if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) | ||
| 3195 | return -EPERM; | ||
| 3197 | 3196 | ||
| 3198 | trans = __unlink_start_trans(dir, dentry); | 3197 | trans = __unlink_start_trans(dir, dentry); |
| 3199 | if (IS_ERR(trans)) | 3198 | if (IS_ERR(trans)) |
| @@ -3267,8 +3266,13 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
| 3267 | return -ENOMEM; | 3266 | return -ENOMEM; |
| 3268 | path->reada = -1; | 3267 | path->reada = -1; |
| 3269 | 3268 | ||
| 3269 | /* | ||
| 3270 | * We want to drop from the next block forward in case this new size is | ||
| 3271 | * not block aligned since we will be keeping the last block of the | ||
| 3272 | * extent just the way it is. | ||
| 3273 | */ | ||
| 3270 | if (root->ref_cows || root == root->fs_info->tree_root) | 3274 | if (root->ref_cows || root == root->fs_info->tree_root) |
| 3271 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); | 3275 | btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0); |
| 3272 | 3276 | ||
| 3273 | /* | 3277 | /* |
| 3274 | * This function is also used to drop the items in the log tree before | 3278 | * This function is also used to drop the items in the log tree before |
| @@ -3429,12 +3433,6 @@ delete: | |||
| 3429 | 3433 | ||
| 3430 | if (path->slots[0] == 0 || | 3434 | if (path->slots[0] == 0 || |
| 3431 | path->slots[0] != pending_del_slot) { | 3435 | path->slots[0] != pending_del_slot) { |
| 3432 | if (root->ref_cows && | ||
| 3433 | BTRFS_I(inode)->location.objectid != | ||
| 3434 | BTRFS_FREE_INO_OBJECTID) { | ||
| 3435 | err = -EAGAIN; | ||
| 3436 | goto out; | ||
| 3437 | } | ||
| 3438 | if (pending_del_nr) { | 3436 | if (pending_del_nr) { |
| 3439 | ret = btrfs_del_items(trans, root, path, | 3437 | ret = btrfs_del_items(trans, root, path, |
| 3440 | pending_del_slot, | 3438 | pending_del_slot, |
| @@ -3465,12 +3463,20 @@ error: | |||
| 3465 | } | 3463 | } |
| 3466 | 3464 | ||
| 3467 | /* | 3465 | /* |
| 3468 | * taken from block_truncate_page, but does cow as it zeros out | 3466 | * btrfs_truncate_page - read, zero a chunk and write a page |
| 3469 | * any bytes left in the last page in the file. | 3467 | * @inode - inode that we're zeroing |
| 3468 | * @from - the offset to start zeroing | ||
| 3469 | * @len - the length to zero, 0 to zero the entire range respective to the | ||
| 3470 | * offset | ||
| 3471 | * @front - zero up to the offset instead of from the offset on | ||
| 3472 | * | ||
| 3473 | * This will find the page for the "from" offset and cow the page and zero the | ||
| 3474 | * part we want to zero. This is used with truncate and hole punching. | ||
| 3470 | */ | 3475 | */ |
| 3471 | static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | 3476 | int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, |
| 3477 | int front) | ||
| 3472 | { | 3478 | { |
| 3473 | struct inode *inode = mapping->host; | 3479 | struct address_space *mapping = inode->i_mapping; |
| 3474 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3480 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 3475 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 3481 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 3476 | struct btrfs_ordered_extent *ordered; | 3482 | struct btrfs_ordered_extent *ordered; |
| @@ -3485,7 +3491,8 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |||
| 3485 | u64 page_start; | 3491 | u64 page_start; |
| 3486 | u64 page_end; | 3492 | u64 page_end; |
| 3487 | 3493 | ||
| 3488 | if ((offset & (blocksize - 1)) == 0) | 3494 | if ((offset & (blocksize - 1)) == 0 && |
| 3495 | (!len || ((len & (blocksize - 1)) == 0))) | ||
| 3489 | goto out; | 3496 | goto out; |
| 3490 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | 3497 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
| 3491 | if (ret) | 3498 | if (ret) |
| @@ -3532,7 +3539,8 @@ again: | |||
| 3532 | } | 3539 | } |
| 3533 | 3540 | ||
| 3534 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | 3541 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
| 3535 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, | 3542 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 3543 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | ||
| 3536 | 0, 0, &cached_state, GFP_NOFS); | 3544 | 0, 0, &cached_state, GFP_NOFS); |
| 3537 | 3545 | ||
| 3538 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, | 3546 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
| @@ -3545,8 +3553,13 @@ again: | |||
| 3545 | 3553 | ||
| 3546 | ret = 0; | 3554 | ret = 0; |
| 3547 | if (offset != PAGE_CACHE_SIZE) { | 3555 | if (offset != PAGE_CACHE_SIZE) { |
| 3556 | if (!len) | ||
| 3557 | len = PAGE_CACHE_SIZE - offset; | ||
| 3548 | kaddr = kmap(page); | 3558 | kaddr = kmap(page); |
| 3549 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 3559 | if (front) |
| 3560 | memset(kaddr, 0, offset); | ||
| 3561 | else | ||
| 3562 | memset(kaddr + offset, 0, len); | ||
| 3550 | flush_dcache_page(page); | 3563 | flush_dcache_page(page); |
| 3551 | kunmap(page); | 3564 | kunmap(page); |
| 3552 | } | 3565 | } |
| @@ -3577,6 +3590,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
| 3577 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 3590 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 3578 | struct extent_map *em = NULL; | 3591 | struct extent_map *em = NULL; |
| 3579 | struct extent_state *cached_state = NULL; | 3592 | struct extent_state *cached_state = NULL; |
| 3593 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
| 3580 | u64 mask = root->sectorsize - 1; | 3594 | u64 mask = root->sectorsize - 1; |
| 3581 | u64 hole_start = (oldsize + mask) & ~mask; | 3595 | u64 hole_start = (oldsize + mask) & ~mask; |
| 3582 | u64 block_end = (size + mask) & ~mask; | 3596 | u64 block_end = (size + mask) & ~mask; |
| @@ -3613,7 +3627,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
| 3613 | last_byte = min(extent_map_end(em), block_end); | 3627 | last_byte = min(extent_map_end(em), block_end); |
| 3614 | last_byte = (last_byte + mask) & ~mask; | 3628 | last_byte = (last_byte + mask) & ~mask; |
| 3615 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { | 3629 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
| 3616 | u64 hint_byte = 0; | 3630 | struct extent_map *hole_em; |
| 3617 | hole_size = last_byte - cur_offset; | 3631 | hole_size = last_byte - cur_offset; |
| 3618 | 3632 | ||
| 3619 | trans = btrfs_start_transaction(root, 3); | 3633 | trans = btrfs_start_transaction(root, 3); |
| @@ -3622,9 +3636,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
| 3622 | break; | 3636 | break; |
| 3623 | } | 3637 | } |
| 3624 | 3638 | ||
| 3625 | err = btrfs_drop_extents(trans, inode, cur_offset, | 3639 | err = btrfs_drop_extents(trans, root, inode, |
| 3626 | cur_offset + hole_size, | 3640 | cur_offset, |
| 3627 | &hint_byte, 1); | 3641 | cur_offset + hole_size, 1); |
| 3628 | if (err) { | 3642 | if (err) { |
| 3629 | btrfs_abort_transaction(trans, root, err); | 3643 | btrfs_abort_transaction(trans, root, err); |
| 3630 | btrfs_end_transaction(trans, root); | 3644 | btrfs_end_transaction(trans, root); |
| @@ -3641,9 +3655,39 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
| 3641 | break; | 3655 | break; |
| 3642 | } | 3656 | } |
| 3643 | 3657 | ||
| 3644 | btrfs_drop_extent_cache(inode, hole_start, | 3658 | btrfs_drop_extent_cache(inode, cur_offset, |
| 3645 | last_byte - 1, 0); | 3659 | cur_offset + hole_size - 1, 0); |
| 3660 | hole_em = alloc_extent_map(); | ||
| 3661 | if (!hole_em) { | ||
| 3662 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 3663 | &BTRFS_I(inode)->runtime_flags); | ||
| 3664 | goto next; | ||
| 3665 | } | ||
| 3666 | hole_em->start = cur_offset; | ||
| 3667 | hole_em->len = hole_size; | ||
| 3668 | hole_em->orig_start = cur_offset; | ||
| 3646 | 3669 | ||
| 3670 | hole_em->block_start = EXTENT_MAP_HOLE; | ||
| 3671 | hole_em->block_len = 0; | ||
| 3672 | hole_em->bdev = root->fs_info->fs_devices->latest_bdev; | ||
| 3673 | hole_em->compress_type = BTRFS_COMPRESS_NONE; | ||
| 3674 | hole_em->generation = trans->transid; | ||
| 3675 | |||
| 3676 | while (1) { | ||
| 3677 | write_lock(&em_tree->lock); | ||
| 3678 | err = add_extent_mapping(em_tree, hole_em); | ||
| 3679 | if (!err) | ||
| 3680 | list_move(&hole_em->list, | ||
| 3681 | &em_tree->modified_extents); | ||
| 3682 | write_unlock(&em_tree->lock); | ||
| 3683 | if (err != -EEXIST) | ||
| 3684 | break; | ||
| 3685 | btrfs_drop_extent_cache(inode, cur_offset, | ||
| 3686 | cur_offset + | ||
| 3687 | hole_size - 1, 0); | ||
| 3688 | } | ||
| 3689 | free_extent_map(hole_em); | ||
| 3690 | next: | ||
| 3647 | btrfs_update_inode(trans, root, inode); | 3691 | btrfs_update_inode(trans, root, inode); |
| 3648 | btrfs_end_transaction(trans, root); | 3692 | btrfs_end_transaction(trans, root); |
| 3649 | } | 3693 | } |
| @@ -3768,26 +3812,22 @@ void btrfs_evict_inode(struct inode *inode) | |||
| 3768 | goto no_delete; | 3812 | goto no_delete; |
| 3769 | } | 3813 | } |
| 3770 | 3814 | ||
| 3771 | rsv = btrfs_alloc_block_rsv(root); | 3815 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
| 3772 | if (!rsv) { | 3816 | if (!rsv) { |
| 3773 | btrfs_orphan_del(NULL, inode); | 3817 | btrfs_orphan_del(NULL, inode); |
| 3774 | goto no_delete; | 3818 | goto no_delete; |
| 3775 | } | 3819 | } |
| 3776 | rsv->size = min_size; | 3820 | rsv->size = min_size; |
| 3821 | rsv->failfast = 1; | ||
| 3777 | global_rsv = &root->fs_info->global_block_rsv; | 3822 | global_rsv = &root->fs_info->global_block_rsv; |
| 3778 | 3823 | ||
| 3779 | btrfs_i_size_write(inode, 0); | 3824 | btrfs_i_size_write(inode, 0); |
| 3780 | 3825 | ||
| 3781 | /* | 3826 | /* |
| 3782 | * This is a bit simpler than btrfs_truncate since | 3827 | * This is a bit simpler than btrfs_truncate since we've already |
| 3783 | * | 3828 | * reserved our space for our orphan item in the unlink, so we just |
| 3784 | * 1) We've already reserved our space for our orphan item in the | 3829 | * need to reserve some slack space in case we add bytes and update |
| 3785 | * unlink. | 3830 | * inode item when doing the truncate. |
| 3786 | * 2) We're going to delete the inode item, so we don't need to update | ||
| 3787 | * it at all. | ||
| 3788 | * | ||
| 3789 | * So we just need to reserve some slack space in case we add bytes when | ||
| 3790 | * doing the truncate. | ||
| 3791 | */ | 3831 | */ |
| 3792 | while (1) { | 3832 | while (1) { |
| 3793 | ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); | 3833 | ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); |
| @@ -3808,7 +3848,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
| 3808 | goto no_delete; | 3848 | goto no_delete; |
| 3809 | } | 3849 | } |
| 3810 | 3850 | ||
| 3811 | trans = btrfs_start_transaction(root, 0); | 3851 | trans = btrfs_start_transaction_noflush(root, 1); |
| 3812 | if (IS_ERR(trans)) { | 3852 | if (IS_ERR(trans)) { |
| 3813 | btrfs_orphan_del(NULL, inode); | 3853 | btrfs_orphan_del(NULL, inode); |
| 3814 | btrfs_free_block_rsv(root, rsv); | 3854 | btrfs_free_block_rsv(root, rsv); |
| @@ -3818,9 +3858,13 @@ void btrfs_evict_inode(struct inode *inode) | |||
| 3818 | trans->block_rsv = rsv; | 3858 | trans->block_rsv = rsv; |
| 3819 | 3859 | ||
| 3820 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); | 3860 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); |
| 3821 | if (ret != -EAGAIN) | 3861 | if (ret != -ENOSPC) |
| 3822 | break; | 3862 | break; |
| 3823 | 3863 | ||
| 3864 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 3865 | ret = btrfs_update_inode(trans, root, inode); | ||
| 3866 | BUG_ON(ret); | ||
| 3867 | |||
| 3824 | nr = trans->blocks_used; | 3868 | nr = trans->blocks_used; |
| 3825 | btrfs_end_transaction(trans, root); | 3869 | btrfs_end_transaction(trans, root); |
| 3826 | trans = NULL; | 3870 | trans = NULL; |
| @@ -4470,10 +4514,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 4470 | trans = btrfs_join_transaction(root); | 4514 | trans = btrfs_join_transaction(root); |
| 4471 | if (IS_ERR(trans)) | 4515 | if (IS_ERR(trans)) |
| 4472 | return PTR_ERR(trans); | 4516 | return PTR_ERR(trans); |
| 4473 | if (nolock) | 4517 | ret = btrfs_commit_transaction(trans, root); |
| 4474 | ret = btrfs_end_transaction_nolock(trans, root); | ||
| 4475 | else | ||
| 4476 | ret = btrfs_commit_transaction(trans, root); | ||
| 4477 | } | 4518 | } |
| 4478 | return ret; | 4519 | return ret; |
| 4479 | } | 4520 | } |
| @@ -4671,6 +4712,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
| 4671 | BTRFS_I(inode)->generation = trans->transid; | 4712 | BTRFS_I(inode)->generation = trans->transid; |
| 4672 | inode->i_generation = BTRFS_I(inode)->generation; | 4713 | inode->i_generation = BTRFS_I(inode)->generation; |
| 4673 | 4714 | ||
| 4715 | /* | ||
| 4716 | * We could have gotten an inode number from somebody who was fsynced | ||
| 4717 | * and then removed in this same transaction, so let's just set full | ||
| 4718 | * sync since it will be a full sync anyway and this will blow away the | ||
| 4719 | * old info in the log. | ||
| 4720 | */ | ||
| 4721 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | ||
| 4722 | |||
| 4674 | if (S_ISDIR(mode)) | 4723 | if (S_ISDIR(mode)) |
| 4675 | owner = 0; | 4724 | owner = 0; |
| 4676 | else | 4725 | else |
| @@ -4680,6 +4729,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
| 4680 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | 4729 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); |
| 4681 | key[0].offset = 0; | 4730 | key[0].offset = 0; |
| 4682 | 4731 | ||
| 4732 | /* | ||
| 4733 | * Start new inodes with an inode_ref. This is slightly more | ||
| 4734 | * efficient for small numbers of hard links since they will | ||
| 4735 | * be packed into one item. Extended refs will kick in if we | ||
| 4736 | * add more hard links than can fit in the ref item. | ||
| 4737 | */ | ||
| 4683 | key[1].objectid = objectid; | 4738 | key[1].objectid = objectid; |
| 4684 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); | 4739 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); |
| 4685 | key[1].offset = ref_objectid; | 4740 | key[1].offset = ref_objectid; |
| @@ -4986,7 +5041,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 4986 | if (root->objectid != BTRFS_I(inode)->root->objectid) | 5041 | if (root->objectid != BTRFS_I(inode)->root->objectid) |
| 4987 | return -EXDEV; | 5042 | return -EXDEV; |
| 4988 | 5043 | ||
| 4989 | if (inode->i_nlink == ~0U) | 5044 | if (inode->i_nlink >= BTRFS_LINK_MAX) |
| 4990 | return -EMLINK; | 5045 | return -EMLINK; |
| 4991 | 5046 | ||
| 4992 | err = btrfs_set_inode_index(dir, &index); | 5047 | err = btrfs_set_inode_index(dir, &index); |
| @@ -5450,7 +5505,8 @@ insert: | |||
| 5450 | write_unlock(&em_tree->lock); | 5505 | write_unlock(&em_tree->lock); |
| 5451 | out: | 5506 | out: |
| 5452 | 5507 | ||
| 5453 | trace_btrfs_get_extent(root, em); | 5508 | if (em) |
| 5509 | trace_btrfs_get_extent(root, em); | ||
| 5454 | 5510 | ||
| 5455 | if (path) | 5511 | if (path) |
| 5456 | btrfs_free_path(path); | 5512 | btrfs_free_path(path); |
| @@ -5836,6 +5892,48 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, | |||
| 5836 | return ret; | 5892 | return ret; |
| 5837 | } | 5893 | } |
| 5838 | 5894 | ||
| 5895 | static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | ||
| 5896 | u64 len, u64 orig_start, | ||
| 5897 | u64 block_start, u64 block_len, | ||
| 5898 | int type) | ||
| 5899 | { | ||
| 5900 | struct extent_map_tree *em_tree; | ||
| 5901 | struct extent_map *em; | ||
| 5902 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 5903 | int ret; | ||
| 5904 | |||
| 5905 | em_tree = &BTRFS_I(inode)->extent_tree; | ||
| 5906 | em = alloc_extent_map(); | ||
| 5907 | if (!em) | ||
| 5908 | return ERR_PTR(-ENOMEM); | ||
| 5909 | |||
| 5910 | em->start = start; | ||
| 5911 | em->orig_start = orig_start; | ||
| 5912 | em->len = len; | ||
| 5913 | em->block_len = block_len; | ||
| 5914 | em->block_start = block_start; | ||
| 5915 | em->bdev = root->fs_info->fs_devices->latest_bdev; | ||
| 5916 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | ||
| 5917 | if (type == BTRFS_ORDERED_PREALLOC) | ||
| 5918 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | ||
| 5919 | |||
| 5920 | do { | ||
| 5921 | btrfs_drop_extent_cache(inode, em->start, | ||
| 5922 | em->start + em->len - 1, 0); | ||
| 5923 | write_lock(&em_tree->lock); | ||
| 5924 | ret = add_extent_mapping(em_tree, em); | ||
| 5925 | write_unlock(&em_tree->lock); | ||
| 5926 | } while (ret == -EEXIST); | ||
| 5927 | |||
| 5928 | if (ret) { | ||
| 5929 | free_extent_map(em); | ||
| 5930 | return ERR_PTR(ret); | ||
| 5931 | } | ||
| 5932 | |||
| 5933 | return em; | ||
| 5934 | } | ||
| 5935 | |||
| 5936 | |||
| 5839 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | 5937 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
| 5840 | struct buffer_head *bh_result, int create) | 5938 | struct buffer_head *bh_result, int create) |
| 5841 | { | 5939 | { |
| @@ -5950,6 +6048,19 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
| 5950 | goto must_cow; | 6048 | goto must_cow; |
| 5951 | 6049 | ||
| 5952 | if (can_nocow_odirect(trans, inode, start, len) == 1) { | 6050 | if (can_nocow_odirect(trans, inode, start, len) == 1) { |
| 6051 | u64 orig_start = em->start; | ||
| 6052 | |||
| 6053 | if (type == BTRFS_ORDERED_PREALLOC) { | ||
| 6054 | free_extent_map(em); | ||
| 6055 | em = create_pinned_em(inode, start, len, | ||
| 6056 | orig_start, | ||
| 6057 | block_start, len, type); | ||
| 6058 | if (IS_ERR(em)) { | ||
| 6059 | btrfs_end_transaction(trans, root); | ||
| 6060 | goto unlock_err; | ||
| 6061 | } | ||
| 6062 | } | ||
| 6063 | |||
| 5953 | ret = btrfs_add_ordered_extent_dio(inode, start, | 6064 | ret = btrfs_add_ordered_extent_dio(inode, start, |
| 5954 | block_start, len, len, type); | 6065 | block_start, len, len, type); |
| 5955 | btrfs_end_transaction(trans, root); | 6066 | btrfs_end_transaction(trans, root); |
| @@ -5999,7 +6110,8 @@ unlock: | |||
| 5999 | if (lockstart < lockend) { | 6110 | if (lockstart < lockend) { |
| 6000 | if (create && len < lockend - lockstart) { | 6111 | if (create && len < lockend - lockstart) { |
| 6001 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | 6112 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, |
| 6002 | lockstart + len - 1, unlock_bits, 1, 0, | 6113 | lockstart + len - 1, |
| 6114 | unlock_bits | EXTENT_DEFRAG, 1, 0, | ||
| 6003 | &cached_state, GFP_NOFS); | 6115 | &cached_state, GFP_NOFS); |
| 6004 | /* | 6116 | /* |
| 6005 | * Beside unlock, we also need to cleanup reserved space | 6117 | * Beside unlock, we also need to cleanup reserved space |
| @@ -6007,8 +6119,8 @@ unlock: | |||
| 6007 | */ | 6119 | */ |
| 6008 | clear_extent_bit(&BTRFS_I(inode)->io_tree, | 6120 | clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| 6009 | lockstart + len, lockend, | 6121 | lockstart + len, lockend, |
| 6010 | unlock_bits | EXTENT_DO_ACCOUNTING, | 6122 | unlock_bits | EXTENT_DO_ACCOUNTING | |
| 6011 | 1, 0, NULL, GFP_NOFS); | 6123 | EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS); |
| 6012 | } else { | 6124 | } else { |
| 6013 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | 6125 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, |
| 6014 | lockend, unlock_bits, 1, 0, | 6126 | lockend, unlock_bits, 1, 0, |
| @@ -6573,8 +6685,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
| 6573 | */ | 6685 | */ |
| 6574 | clear_extent_bit(tree, page_start, page_end, | 6686 | clear_extent_bit(tree, page_start, page_end, |
| 6575 | EXTENT_DIRTY | EXTENT_DELALLOC | | 6687 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 6576 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, | 6688 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | |
| 6577 | &cached_state, GFP_NOFS); | 6689 | EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS); |
| 6578 | /* | 6690 | /* |
| 6579 | * whoever cleared the private bit is responsible | 6691 | * whoever cleared the private bit is responsible |
| 6580 | * for the finish_ordered_io | 6692 | * for the finish_ordered_io |
| @@ -6590,7 +6702,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
| 6590 | } | 6702 | } |
| 6591 | clear_extent_bit(tree, page_start, page_end, | 6703 | clear_extent_bit(tree, page_start, page_end, |
| 6592 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | 6704 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 6593 | EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); | 6705 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, |
| 6706 | &cached_state, GFP_NOFS); | ||
| 6594 | __btrfs_releasepage(page, GFP_NOFS); | 6707 | __btrfs_releasepage(page, GFP_NOFS); |
| 6595 | 6708 | ||
| 6596 | ClearPageChecked(page); | 6709 | ClearPageChecked(page); |
| @@ -6687,7 +6800,8 @@ again: | |||
| 6687 | * prepare_pages in the normal write path. | 6800 | * prepare_pages in the normal write path. |
| 6688 | */ | 6801 | */ |
| 6689 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | 6802 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
| 6690 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, | 6803 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 6804 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | ||
| 6691 | 0, 0, &cached_state, GFP_NOFS); | 6805 | 0, 0, &cached_state, GFP_NOFS); |
| 6692 | 6806 | ||
| 6693 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, | 6807 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
| @@ -6718,6 +6832,7 @@ again: | |||
| 6718 | 6832 | ||
| 6719 | BTRFS_I(inode)->last_trans = root->fs_info->generation; | 6833 | BTRFS_I(inode)->last_trans = root->fs_info->generation; |
| 6720 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | 6834 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; |
| 6835 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; | ||
| 6721 | 6836 | ||
| 6722 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); | 6837 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); |
| 6723 | 6838 | ||
| @@ -6745,7 +6860,7 @@ static int btrfs_truncate(struct inode *inode) | |||
| 6745 | u64 mask = root->sectorsize - 1; | 6860 | u64 mask = root->sectorsize - 1; |
| 6746 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); | 6861 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); |
| 6747 | 6862 | ||
| 6748 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); | 6863 | ret = btrfs_truncate_page(inode, inode->i_size, 0, 0); |
| 6749 | if (ret) | 6864 | if (ret) |
| 6750 | return ret; | 6865 | return ret; |
| 6751 | 6866 | ||
| @@ -6788,10 +6903,11 @@ static int btrfs_truncate(struct inode *inode) | |||
| 6788 | * 3) fs_info->trans_block_rsv - this will have 1 items worth left for | 6903 | * 3) fs_info->trans_block_rsv - this will have 1 items worth left for |
| 6789 | * updating the inode. | 6904 | * updating the inode. |
| 6790 | */ | 6905 | */ |
| 6791 | rsv = btrfs_alloc_block_rsv(root); | 6906 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
| 6792 | if (!rsv) | 6907 | if (!rsv) |
| 6793 | return -ENOMEM; | 6908 | return -ENOMEM; |
| 6794 | rsv->size = min_size; | 6909 | rsv->size = min_size; |
| 6910 | rsv->failfast = 1; | ||
| 6795 | 6911 | ||
| 6796 | /* | 6912 | /* |
| 6797 | * 1 for the truncate slack space | 6913 | * 1 for the truncate slack space |
| @@ -6837,36 +6953,21 @@ static int btrfs_truncate(struct inode *inode) | |||
| 6837 | &BTRFS_I(inode)->runtime_flags)) | 6953 | &BTRFS_I(inode)->runtime_flags)) |
| 6838 | btrfs_add_ordered_operation(trans, root, inode); | 6954 | btrfs_add_ordered_operation(trans, root, inode); |
| 6839 | 6955 | ||
| 6840 | while (1) { | 6956 | /* |
| 6841 | ret = btrfs_block_rsv_refill(root, rsv, min_size); | 6957 | * So if we truncate and then write and fsync we normally would just |
| 6842 | if (ret) { | 6958 | * write the extents that changed, which is a problem if we need to |
| 6843 | /* | 6959 | * first truncate that entire inode. So set this flag so we write out |
| 6844 | * This can only happen with the original transaction we | 6960 | * all of the extents in the inode to the sync log so we're completely |
| 6845 | * started above, every other time we shouldn't have a | 6961 | * safe. |
| 6846 | * transaction started yet. | 6962 | */ |
| 6847 | */ | 6963 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); |
| 6848 | if (ret == -EAGAIN) | 6964 | trans->block_rsv = rsv; |
| 6849 | goto end_trans; | ||
| 6850 | err = ret; | ||
| 6851 | break; | ||
| 6852 | } | ||
| 6853 | |||
| 6854 | if (!trans) { | ||
| 6855 | /* Just need the 1 for updating the inode */ | ||
| 6856 | trans = btrfs_start_transaction(root, 1); | ||
| 6857 | if (IS_ERR(trans)) { | ||
| 6858 | ret = err = PTR_ERR(trans); | ||
| 6859 | trans = NULL; | ||
| 6860 | break; | ||
| 6861 | } | ||
| 6862 | } | ||
| 6863 | |||
| 6864 | trans->block_rsv = rsv; | ||
| 6865 | 6965 | ||
| 6966 | while (1) { | ||
| 6866 | ret = btrfs_truncate_inode_items(trans, root, inode, | 6967 | ret = btrfs_truncate_inode_items(trans, root, inode, |
| 6867 | inode->i_size, | 6968 | inode->i_size, |
| 6868 | BTRFS_EXTENT_DATA_KEY); | 6969 | BTRFS_EXTENT_DATA_KEY); |
| 6869 | if (ret != -EAGAIN) { | 6970 | if (ret != -ENOSPC) { |
| 6870 | err = ret; | 6971 | err = ret; |
| 6871 | break; | 6972 | break; |
| 6872 | } | 6973 | } |
| @@ -6877,11 +6978,22 @@ static int btrfs_truncate(struct inode *inode) | |||
| 6877 | err = ret; | 6978 | err = ret; |
| 6878 | break; | 6979 | break; |
| 6879 | } | 6980 | } |
| 6880 | end_trans: | 6981 | |
| 6881 | nr = trans->blocks_used; | 6982 | nr = trans->blocks_used; |
| 6882 | btrfs_end_transaction(trans, root); | 6983 | btrfs_end_transaction(trans, root); |
| 6883 | trans = NULL; | ||
| 6884 | btrfs_btree_balance_dirty(root, nr); | 6984 | btrfs_btree_balance_dirty(root, nr); |
| 6985 | |||
| 6986 | trans = btrfs_start_transaction(root, 2); | ||
| 6987 | if (IS_ERR(trans)) { | ||
| 6988 | ret = err = PTR_ERR(trans); | ||
| 6989 | trans = NULL; | ||
| 6990 | break; | ||
| 6991 | } | ||
| 6992 | |||
| 6993 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, | ||
| 6994 | rsv, min_size); | ||
| 6995 | BUG_ON(ret); /* shouldn't happen */ | ||
| 6996 | trans->block_rsv = rsv; | ||
| 6885 | } | 6997 | } |
| 6886 | 6998 | ||
| 6887 | if (ret == 0 && inode->i_nlink > 0) { | 6999 | if (ret == 0 && inode->i_nlink > 0) { |
| @@ -6965,6 +7077,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) | |||
| 6965 | ei->csum_bytes = 0; | 7077 | ei->csum_bytes = 0; |
| 6966 | ei->index_cnt = (u64)-1; | 7078 | ei->index_cnt = (u64)-1; |
| 6967 | ei->last_unlink_trans = 0; | 7079 | ei->last_unlink_trans = 0; |
| 7080 | ei->last_log_commit = 0; | ||
| 6968 | 7081 | ||
| 6969 | spin_lock_init(&ei->lock); | 7082 | spin_lock_init(&ei->lock); |
| 6970 | ei->outstanding_extents = 0; | 7083 | ei->outstanding_extents = 0; |
| @@ -7095,31 +7208,31 @@ void btrfs_destroy_cachep(void) | |||
| 7095 | 7208 | ||
| 7096 | int btrfs_init_cachep(void) | 7209 | int btrfs_init_cachep(void) |
| 7097 | { | 7210 | { |
| 7098 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", | 7211 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode", |
| 7099 | sizeof(struct btrfs_inode), 0, | 7212 | sizeof(struct btrfs_inode), 0, |
| 7100 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); | 7213 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); |
| 7101 | if (!btrfs_inode_cachep) | 7214 | if (!btrfs_inode_cachep) |
| 7102 | goto fail; | 7215 | goto fail; |
| 7103 | 7216 | ||
| 7104 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", | 7217 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", |
| 7105 | sizeof(struct btrfs_trans_handle), 0, | 7218 | sizeof(struct btrfs_trans_handle), 0, |
| 7106 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 7219 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 7107 | if (!btrfs_trans_handle_cachep) | 7220 | if (!btrfs_trans_handle_cachep) |
| 7108 | goto fail; | 7221 | goto fail; |
| 7109 | 7222 | ||
| 7110 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", | 7223 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", |
| 7111 | sizeof(struct btrfs_transaction), 0, | 7224 | sizeof(struct btrfs_transaction), 0, |
| 7112 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 7225 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 7113 | if (!btrfs_transaction_cachep) | 7226 | if (!btrfs_transaction_cachep) |
| 7114 | goto fail; | 7227 | goto fail; |
| 7115 | 7228 | ||
| 7116 | btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", | 7229 | btrfs_path_cachep = kmem_cache_create("btrfs_path", |
| 7117 | sizeof(struct btrfs_path), 0, | 7230 | sizeof(struct btrfs_path), 0, |
| 7118 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 7231 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 7119 | if (!btrfs_path_cachep) | 7232 | if (!btrfs_path_cachep) |
| 7120 | goto fail; | 7233 | goto fail; |
| 7121 | 7234 | ||
| 7122 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", | 7235 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", |
| 7123 | sizeof(struct btrfs_free_space), 0, | 7236 | sizeof(struct btrfs_free_space), 0, |
| 7124 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 7237 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 7125 | if (!btrfs_free_space_cachep) | 7238 | if (!btrfs_free_space_cachep) |
| @@ -7513,6 +7626,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, | |||
| 7513 | loff_t actual_len, u64 *alloc_hint, | 7626 | loff_t actual_len, u64 *alloc_hint, |
| 7514 | struct btrfs_trans_handle *trans) | 7627 | struct btrfs_trans_handle *trans) |
| 7515 | { | 7628 | { |
| 7629 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
| 7630 | struct extent_map *em; | ||
| 7516 | struct btrfs_root *root = BTRFS_I(inode)->root; | 7631 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 7517 | struct btrfs_key ins; | 7632 | struct btrfs_key ins; |
| 7518 | u64 cur_offset = start; | 7633 | u64 cur_offset = start; |
| @@ -7553,6 +7668,37 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, | |||
| 7553 | btrfs_drop_extent_cache(inode, cur_offset, | 7668 | btrfs_drop_extent_cache(inode, cur_offset, |
| 7554 | cur_offset + ins.offset -1, 0); | 7669 | cur_offset + ins.offset -1, 0); |
| 7555 | 7670 | ||
| 7671 | em = alloc_extent_map(); | ||
| 7672 | if (!em) { | ||
| 7673 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 7674 | &BTRFS_I(inode)->runtime_flags); | ||
| 7675 | goto next; | ||
| 7676 | } | ||
| 7677 | |||
| 7678 | em->start = cur_offset; | ||
| 7679 | em->orig_start = cur_offset; | ||
| 7680 | em->len = ins.offset; | ||
| 7681 | em->block_start = ins.objectid; | ||
| 7682 | em->block_len = ins.offset; | ||
| 7683 | em->bdev = root->fs_info->fs_devices->latest_bdev; | ||
| 7684 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | ||
| 7685 | em->generation = trans->transid; | ||
| 7686 | |||
| 7687 | while (1) { | ||
| 7688 | write_lock(&em_tree->lock); | ||
| 7689 | ret = add_extent_mapping(em_tree, em); | ||
| 7690 | if (!ret) | ||
| 7691 | list_move(&em->list, | ||
| 7692 | &em_tree->modified_extents); | ||
| 7693 | write_unlock(&em_tree->lock); | ||
| 7694 | if (ret != -EEXIST) | ||
| 7695 | break; | ||
| 7696 | btrfs_drop_extent_cache(inode, cur_offset, | ||
| 7697 | cur_offset + ins.offset - 1, | ||
| 7698 | 0); | ||
| 7699 | } | ||
| 7700 | free_extent_map(em); | ||
| 7701 | next: | ||
| 7556 | num_bytes -= ins.offset; | 7702 | num_bytes -= ins.offset; |
| 7557 | cur_offset += ins.offset; | 7703 | cur_offset += ins.offset; |
| 7558 | *alloc_hint = ins.objectid + ins.offset; | 7704 | *alloc_hint = ins.objectid + ins.offset; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 47127c1bd290..e568c472f807 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -181,6 +181,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
| 181 | int ret; | 181 | int ret; |
| 182 | u64 ip_oldflags; | 182 | u64 ip_oldflags; |
| 183 | unsigned int i_oldflags; | 183 | unsigned int i_oldflags; |
| 184 | umode_t mode; | ||
| 184 | 185 | ||
| 185 | if (btrfs_root_readonly(root)) | 186 | if (btrfs_root_readonly(root)) |
| 186 | return -EROFS; | 187 | return -EROFS; |
| @@ -203,6 +204,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
| 203 | 204 | ||
| 204 | ip_oldflags = ip->flags; | 205 | ip_oldflags = ip->flags; |
| 205 | i_oldflags = inode->i_flags; | 206 | i_oldflags = inode->i_flags; |
| 207 | mode = inode->i_mode; | ||
| 206 | 208 | ||
| 207 | flags = btrfs_mask_flags(inode->i_mode, flags); | 209 | flags = btrfs_mask_flags(inode->i_mode, flags); |
| 208 | oldflags = btrfs_flags_to_ioctl(ip->flags); | 210 | oldflags = btrfs_flags_to_ioctl(ip->flags); |
| @@ -237,10 +239,31 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
| 237 | ip->flags |= BTRFS_INODE_DIRSYNC; | 239 | ip->flags |= BTRFS_INODE_DIRSYNC; |
| 238 | else | 240 | else |
| 239 | ip->flags &= ~BTRFS_INODE_DIRSYNC; | 241 | ip->flags &= ~BTRFS_INODE_DIRSYNC; |
| 240 | if (flags & FS_NOCOW_FL) | 242 | if (flags & FS_NOCOW_FL) { |
| 241 | ip->flags |= BTRFS_INODE_NODATACOW; | 243 | if (S_ISREG(mode)) { |
| 242 | else | 244 | /* |
| 243 | ip->flags &= ~BTRFS_INODE_NODATACOW; | 245 | * It's safe to turn csums off here, no extents exist. |
| 246 | * Otherwise we want the flag to reflect the real COW | ||
| 247 | * status of the file and will not set it. | ||
| 248 | */ | ||
| 249 | if (inode->i_size == 0) | ||
| 250 | ip->flags |= BTRFS_INODE_NODATACOW | ||
| 251 | | BTRFS_INODE_NODATASUM; | ||
| 252 | } else { | ||
| 253 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
| 254 | } | ||
| 255 | } else { | ||
| 256 | /* | ||
| 257 | * Revert back under same assuptions as above | ||
| 258 | */ | ||
| 259 | if (S_ISREG(mode)) { | ||
| 260 | if (inode->i_size == 0) | ||
| 261 | ip->flags &= ~(BTRFS_INODE_NODATACOW | ||
| 262 | | BTRFS_INODE_NODATASUM); | ||
| 263 | } else { | ||
| 264 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
| 265 | } | ||
| 266 | } | ||
| 244 | 267 | ||
| 245 | /* | 268 | /* |
| 246 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS | 269 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS |
| @@ -516,7 +539,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | |||
| 516 | if (!pending_snapshot) | 539 | if (!pending_snapshot) |
| 517 | return -ENOMEM; | 540 | return -ENOMEM; |
| 518 | 541 | ||
| 519 | btrfs_init_block_rsv(&pending_snapshot->block_rsv); | 542 | btrfs_init_block_rsv(&pending_snapshot->block_rsv, |
| 543 | BTRFS_BLOCK_RSV_TEMP); | ||
| 520 | pending_snapshot->dentry = dentry; | 544 | pending_snapshot->dentry = dentry; |
| 521 | pending_snapshot->root = root; | 545 | pending_snapshot->root = root; |
| 522 | pending_snapshot->readonly = readonly; | 546 | pending_snapshot->readonly = readonly; |
| @@ -525,7 +549,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | |||
| 525 | *inherit = NULL; /* take responsibility to free it */ | 549 | *inherit = NULL; /* take responsibility to free it */ |
| 526 | } | 550 | } |
| 527 | 551 | ||
| 528 | trans = btrfs_start_transaction(root->fs_info->extent_root, 5); | 552 | trans = btrfs_start_transaction(root->fs_info->extent_root, 6); |
| 529 | if (IS_ERR(trans)) { | 553 | if (IS_ERR(trans)) { |
| 530 | ret = PTR_ERR(trans); | 554 | ret = PTR_ERR(trans); |
| 531 | goto fail; | 555 | goto fail; |
| @@ -1022,8 +1046,8 @@ again: | |||
| 1022 | page_start, page_end - 1, 0, &cached_state); | 1046 | page_start, page_end - 1, 0, &cached_state); |
| 1023 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, | 1047 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, |
| 1024 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | 1048 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1025 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | 1049 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, |
| 1026 | GFP_NOFS); | 1050 | &cached_state, GFP_NOFS); |
| 1027 | 1051 | ||
| 1028 | if (i_done != page_cnt) { | 1052 | if (i_done != page_cnt) { |
| 1029 | spin_lock(&BTRFS_I(inode)->lock); | 1053 | spin_lock(&BTRFS_I(inode)->lock); |
| @@ -1034,8 +1058,8 @@ again: | |||
| 1034 | } | 1058 | } |
| 1035 | 1059 | ||
| 1036 | 1060 | ||
| 1037 | btrfs_set_extent_delalloc(inode, page_start, page_end - 1, | 1061 | set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, |
| 1038 | &cached_state); | 1062 | &cached_state, GFP_NOFS); |
| 1039 | 1063 | ||
| 1040 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | 1064 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
| 1041 | page_start, page_end - 1, &cached_state, | 1065 | page_start, page_end - 1, &cached_state, |
| @@ -2351,7 +2375,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2351 | int ret; | 2375 | int ret; |
| 2352 | u64 len = olen; | 2376 | u64 len = olen; |
| 2353 | u64 bs = root->fs_info->sb->s_blocksize; | 2377 | u64 bs = root->fs_info->sb->s_blocksize; |
| 2354 | u64 hint_byte; | ||
| 2355 | 2378 | ||
| 2356 | /* | 2379 | /* |
| 2357 | * TODO: | 2380 | * TODO: |
| @@ -2456,13 +2479,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2456 | another, and lock file content */ | 2479 | another, and lock file content */ |
| 2457 | while (1) { | 2480 | while (1) { |
| 2458 | struct btrfs_ordered_extent *ordered; | 2481 | struct btrfs_ordered_extent *ordered; |
| 2459 | lock_extent(&BTRFS_I(src)->io_tree, off, off+len); | 2482 | lock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1); |
| 2460 | ordered = btrfs_lookup_first_ordered_extent(src, off+len); | 2483 | ordered = btrfs_lookup_first_ordered_extent(src, off + len - 1); |
| 2461 | if (!ordered && | 2484 | if (!ordered && |
| 2462 | !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, | 2485 | !test_range_bit(&BTRFS_I(src)->io_tree, off, off + len - 1, |
| 2463 | EXTENT_DELALLOC, 0, NULL)) | 2486 | EXTENT_DELALLOC, 0, NULL)) |
| 2464 | break; | 2487 | break; |
| 2465 | unlock_extent(&BTRFS_I(src)->io_tree, off, off+len); | 2488 | unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1); |
| 2466 | if (ordered) | 2489 | if (ordered) |
| 2467 | btrfs_put_ordered_extent(ordered); | 2490 | btrfs_put_ordered_extent(ordered); |
| 2468 | btrfs_wait_ordered_range(src, off, len); | 2491 | btrfs_wait_ordered_range(src, off, len); |
| @@ -2536,7 +2559,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2536 | btrfs_release_path(path); | 2559 | btrfs_release_path(path); |
| 2537 | 2560 | ||
| 2538 | if (key.offset + datal <= off || | 2561 | if (key.offset + datal <= off || |
| 2539 | key.offset >= off+len) | 2562 | key.offset >= off + len - 1) |
| 2540 | goto next; | 2563 | goto next; |
| 2541 | 2564 | ||
| 2542 | memcpy(&new_key, &key, sizeof(new_key)); | 2565 | memcpy(&new_key, &key, sizeof(new_key)); |
| @@ -2574,10 +2597,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2574 | datal -= off - key.offset; | 2597 | datal -= off - key.offset; |
| 2575 | } | 2598 | } |
| 2576 | 2599 | ||
| 2577 | ret = btrfs_drop_extents(trans, inode, | 2600 | ret = btrfs_drop_extents(trans, root, inode, |
| 2578 | new_key.offset, | 2601 | new_key.offset, |
| 2579 | new_key.offset + datal, | 2602 | new_key.offset + datal, |
| 2580 | &hint_byte, 1); | 2603 | 1); |
| 2581 | if (ret) { | 2604 | if (ret) { |
| 2582 | btrfs_abort_transaction(trans, root, | 2605 | btrfs_abort_transaction(trans, root, |
| 2583 | ret); | 2606 | ret); |
| @@ -2637,8 +2660,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2637 | new_key.offset += skip; | 2660 | new_key.offset += skip; |
| 2638 | } | 2661 | } |
| 2639 | 2662 | ||
| 2640 | if (key.offset + datal > off+len) | 2663 | if (key.offset + datal > off + len) |
| 2641 | trim = key.offset + datal - (off+len); | 2664 | trim = key.offset + datal - (off + len); |
| 2642 | 2665 | ||
| 2643 | if (comp && (skip || trim)) { | 2666 | if (comp && (skip || trim)) { |
| 2644 | ret = -EINVAL; | 2667 | ret = -EINVAL; |
| @@ -2648,10 +2671,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 2648 | size -= skip + trim; | 2671 | size -= skip + trim; |
| 2649 | datal -= skip + trim; | 2672 | datal -= skip + trim; |
| 2650 | 2673 | ||
| 2651 | ret = btrfs_drop_extents(trans, inode, | 2674 | ret = btrfs_drop_extents(trans, root, inode, |
| 2652 | new_key.offset, | 2675 | new_key.offset, |
| 2653 | new_key.offset + datal, | 2676 | new_key.offset + datal, |
| 2654 | &hint_byte, 1); | 2677 | 1); |
| 2655 | if (ret) { | 2678 | if (ret) { |
| 2656 | btrfs_abort_transaction(trans, root, | 2679 | btrfs_abort_transaction(trans, root, |
| 2657 | ret); | 2680 | ret); |
| @@ -2715,7 +2738,7 @@ next: | |||
| 2715 | ret = 0; | 2738 | ret = 0; |
| 2716 | out: | 2739 | out: |
| 2717 | btrfs_release_path(path); | 2740 | btrfs_release_path(path); |
| 2718 | unlock_extent(&BTRFS_I(src)->io_tree, off, off+len); | 2741 | unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1); |
| 2719 | out_unlock: | 2742 | out_unlock: |
| 2720 | mutex_unlock(&src->i_mutex); | 2743 | mutex_unlock(&src->i_mutex); |
| 2721 | mutex_unlock(&inode->i_mutex); | 2744 | mutex_unlock(&inode->i_mutex); |
| @@ -2850,8 +2873,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | |||
| 2850 | return 0; | 2873 | return 0; |
| 2851 | } | 2874 | } |
| 2852 | 2875 | ||
| 2853 | static void get_block_group_info(struct list_head *groups_list, | 2876 | void btrfs_get_block_group_info(struct list_head *groups_list, |
| 2854 | struct btrfs_ioctl_space_info *space) | 2877 | struct btrfs_ioctl_space_info *space) |
| 2855 | { | 2878 | { |
| 2856 | struct btrfs_block_group_cache *block_group; | 2879 | struct btrfs_block_group_cache *block_group; |
| 2857 | 2880 | ||
| @@ -2959,8 +2982,8 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
| 2959 | down_read(&info->groups_sem); | 2982 | down_read(&info->groups_sem); |
| 2960 | for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { | 2983 | for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { |
| 2961 | if (!list_empty(&info->block_groups[c])) { | 2984 | if (!list_empty(&info->block_groups[c])) { |
| 2962 | get_block_group_info(&info->block_groups[c], | 2985 | btrfs_get_block_group_info( |
| 2963 | &space); | 2986 | &info->block_groups[c], &space); |
| 2964 | memcpy(dest, &space, sizeof(space)); | 2987 | memcpy(dest, &space, sizeof(space)); |
| 2965 | dest++; | 2988 | dest++; |
| 2966 | space_args.total_spaces++; | 2989 | space_args.total_spaces++; |
| @@ -3208,11 +3231,9 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
| 3208 | { | 3231 | { |
| 3209 | int ret = 0; | 3232 | int ret = 0; |
| 3210 | int size; | 3233 | int size; |
| 3211 | u64 extent_item_pos; | ||
| 3212 | struct btrfs_ioctl_logical_ino_args *loi; | 3234 | struct btrfs_ioctl_logical_ino_args *loi; |
| 3213 | struct btrfs_data_container *inodes = NULL; | 3235 | struct btrfs_data_container *inodes = NULL; |
| 3214 | struct btrfs_path *path = NULL; | 3236 | struct btrfs_path *path = NULL; |
| 3215 | struct btrfs_key key; | ||
| 3216 | 3237 | ||
| 3217 | if (!capable(CAP_SYS_ADMIN)) | 3238 | if (!capable(CAP_SYS_ADMIN)) |
| 3218 | return -EPERM; | 3239 | return -EPERM; |
| @@ -3230,7 +3251,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
| 3230 | goto out; | 3251 | goto out; |
| 3231 | } | 3252 | } |
| 3232 | 3253 | ||
| 3233 | size = min_t(u32, loi->size, 4096); | 3254 | size = min_t(u32, loi->size, 64 * 1024); |
| 3234 | inodes = init_data_container(size); | 3255 | inodes = init_data_container(size); |
| 3235 | if (IS_ERR(inodes)) { | 3256 | if (IS_ERR(inodes)) { |
| 3236 | ret = PTR_ERR(inodes); | 3257 | ret = PTR_ERR(inodes); |
| @@ -3238,22 +3259,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
| 3238 | goto out; | 3259 | goto out; |
| 3239 | } | 3260 | } |
| 3240 | 3261 | ||
| 3241 | ret = extent_from_logical(root->fs_info, loi->logical, path, &key); | 3262 | ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path, |
| 3242 | btrfs_release_path(path); | 3263 | build_ino_list, inodes); |
| 3243 | 3264 | if (ret == -EINVAL) | |
| 3244 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | ||
| 3245 | ret = -ENOENT; | 3265 | ret = -ENOENT; |
| 3246 | if (ret < 0) | 3266 | if (ret < 0) |
| 3247 | goto out; | 3267 | goto out; |
| 3248 | 3268 | ||
| 3249 | extent_item_pos = loi->logical - key.objectid; | ||
| 3250 | ret = iterate_extent_inodes(root->fs_info, key.objectid, | ||
| 3251 | extent_item_pos, 0, build_ino_list, | ||
| 3252 | inodes); | ||
| 3253 | |||
| 3254 | if (ret < 0) | ||
| 3255 | goto out; | ||
| 3256 | |||
| 3257 | ret = copy_to_user((void *)(unsigned long)loi->inodes, | 3269 | ret = copy_to_user((void *)(unsigned long)loi->inodes, |
| 3258 | (void *)(unsigned long)inodes, size); | 3270 | (void *)(unsigned long)inodes, size); |
| 3259 | if (ret) | 3271 | if (ret) |
| @@ -3261,7 +3273,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
| 3261 | 3273 | ||
| 3262 | out: | 3274 | out: |
| 3263 | btrfs_free_path(path); | 3275 | btrfs_free_path(path); |
| 3264 | kfree(inodes); | 3276 | vfree(inodes); |
| 3265 | kfree(loi); | 3277 | kfree(loi); |
| 3266 | 3278 | ||
| 3267 | return ret; | 3279 | return ret; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 051c7fe551dd..7772f02ba28e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include "btrfs_inode.h" | 25 | #include "btrfs_inode.h" |
| 26 | #include "extent_io.h" | 26 | #include "extent_io.h" |
| 27 | 27 | ||
| 28 | static struct kmem_cache *btrfs_ordered_extent_cache; | ||
| 29 | |||
| 28 | static u64 entry_end(struct btrfs_ordered_extent *entry) | 30 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
| 29 | { | 31 | { |
| 30 | if (entry->file_offset + entry->len < entry->file_offset) | 32 | if (entry->file_offset + entry->len < entry->file_offset) |
| @@ -187,7 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
| 187 | struct btrfs_ordered_extent *entry; | 189 | struct btrfs_ordered_extent *entry; |
| 188 | 190 | ||
| 189 | tree = &BTRFS_I(inode)->ordered_tree; | 191 | tree = &BTRFS_I(inode)->ordered_tree; |
| 190 | entry = kzalloc(sizeof(*entry), GFP_NOFS); | 192 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
| 191 | if (!entry) | 193 | if (!entry) |
| 192 | return -ENOMEM; | 194 | return -ENOMEM; |
| 193 | 195 | ||
| @@ -421,7 +423,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
| 421 | list_del(&sum->list); | 423 | list_del(&sum->list); |
| 422 | kfree(sum); | 424 | kfree(sum); |
| 423 | } | 425 | } |
| 424 | kfree(entry); | 426 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
| 425 | } | 427 | } |
| 426 | } | 428 | } |
| 427 | 429 | ||
| @@ -466,8 +468,7 @@ void btrfs_remove_ordered_extent(struct inode *inode, | |||
| 466 | * wait for all the ordered extents in a root. This is done when balancing | 468 | * wait for all the ordered extents in a root. This is done when balancing |
| 467 | * space between drives. | 469 | * space between drives. |
| 468 | */ | 470 | */ |
| 469 | void btrfs_wait_ordered_extents(struct btrfs_root *root, | 471 | void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) |
| 470 | int nocow_only, int delay_iput) | ||
| 471 | { | 472 | { |
| 472 | struct list_head splice; | 473 | struct list_head splice; |
| 473 | struct list_head *cur; | 474 | struct list_head *cur; |
| @@ -482,15 +483,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, | |||
| 482 | cur = splice.next; | 483 | cur = splice.next; |
| 483 | ordered = list_entry(cur, struct btrfs_ordered_extent, | 484 | ordered = list_entry(cur, struct btrfs_ordered_extent, |
| 484 | root_extent_list); | 485 | root_extent_list); |
| 485 | if (nocow_only && | ||
| 486 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && | ||
| 487 | !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { | ||
| 488 | list_move(&ordered->root_extent_list, | ||
| 489 | &root->fs_info->ordered_extents); | ||
| 490 | cond_resched_lock(&root->fs_info->ordered_extent_lock); | ||
| 491 | continue; | ||
| 492 | } | ||
| 493 | |||
| 494 | list_del_init(&ordered->root_extent_list); | 486 | list_del_init(&ordered->root_extent_list); |
| 495 | atomic_inc(&ordered->refs); | 487 | atomic_inc(&ordered->refs); |
| 496 | 488 | ||
| @@ -775,7 +767,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 775 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | 767 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| 776 | u64 disk_i_size; | 768 | u64 disk_i_size; |
| 777 | u64 new_i_size; | 769 | u64 new_i_size; |
| 778 | u64 i_size_test; | ||
| 779 | u64 i_size = i_size_read(inode); | 770 | u64 i_size = i_size_read(inode); |
| 780 | struct rb_node *node; | 771 | struct rb_node *node; |
| 781 | struct rb_node *prev = NULL; | 772 | struct rb_node *prev = NULL; |
| @@ -835,55 +826,30 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 835 | break; | 826 | break; |
| 836 | if (test->file_offset >= i_size) | 827 | if (test->file_offset >= i_size) |
| 837 | break; | 828 | break; |
| 838 | if (test->file_offset >= disk_i_size) | 829 | if (test->file_offset >= disk_i_size) { |
| 830 | /* | ||
| 831 | * we don't update disk_i_size now, so record this | ||
| 832 | * undealt i_size. Or we will not know the real | ||
| 833 | * i_size. | ||
| 834 | */ | ||
| 835 | if (test->outstanding_isize < offset) | ||
| 836 | test->outstanding_isize = offset; | ||
| 837 | if (ordered && | ||
| 838 | ordered->outstanding_isize > | ||
| 839 | test->outstanding_isize) | ||
| 840 | test->outstanding_isize = | ||
| 841 | ordered->outstanding_isize; | ||
| 839 | goto out; | 842 | goto out; |
| 840 | } | ||
| 841 | new_i_size = min_t(u64, offset, i_size); | ||
| 842 | |||
| 843 | /* | ||
| 844 | * at this point, we know we can safely update i_size to at least | ||
| 845 | * the offset from this ordered extent. But, we need to | ||
| 846 | * walk forward and see if ios from higher up in the file have | ||
| 847 | * finished. | ||
| 848 | */ | ||
| 849 | if (ordered) { | ||
| 850 | node = rb_next(&ordered->rb_node); | ||
| 851 | } else { | ||
| 852 | if (prev) | ||
| 853 | node = rb_next(prev); | ||
| 854 | else | ||
| 855 | node = rb_first(&tree->tree); | ||
| 856 | } | ||
| 857 | |||
| 858 | /* | ||
| 859 | * We are looking for an area between our current extent and the next | ||
| 860 | * ordered extent to update the i_size to. There are 3 cases here | ||
| 861 | * | ||
| 862 | * 1) We don't actually have anything and we can update to i_size. | ||
| 863 | * 2) We have stuff but they already did their i_size update so again we | ||
| 864 | * can just update to i_size. | ||
| 865 | * 3) We have an outstanding ordered extent so the most we can update | ||
| 866 | * our disk_i_size to is the start of the next offset. | ||
| 867 | */ | ||
| 868 | i_size_test = i_size; | ||
| 869 | for (; node; node = rb_next(node)) { | ||
| 870 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
| 871 | |||
| 872 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | ||
| 873 | continue; | ||
| 874 | if (test->file_offset > offset) { | ||
| 875 | i_size_test = test->file_offset; | ||
| 876 | break; | ||
| 877 | } | 843 | } |
| 878 | } | 844 | } |
| 845 | new_i_size = min_t(u64, offset, i_size); | ||
| 879 | 846 | ||
| 880 | /* | 847 | /* |
| 881 | * i_size_test is the end of a region after this ordered | 848 | * Some ordered extents may completed before the current one, and |
| 882 | * extent where there are no ordered extents, we can safely set | 849 | * we hold the real i_size in ->outstanding_isize. |
| 883 | * disk_i_size to this. | ||
| 884 | */ | 850 | */ |
| 885 | if (i_size_test > offset) | 851 | if (ordered && ordered->outstanding_isize > new_i_size) |
| 886 | new_i_size = min_t(u64, i_size_test, i_size); | 852 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); |
| 887 | BTRFS_I(inode)->disk_i_size = new_i_size; | 853 | BTRFS_I(inode)->disk_i_size = new_i_size; |
| 888 | ret = 0; | 854 | ret = 0; |
| 889 | out: | 855 | out: |
| @@ -984,3 +950,20 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, | |||
| 984 | } | 950 | } |
| 985 | spin_unlock(&root->fs_info->ordered_extent_lock); | 951 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 986 | } | 952 | } |
| 953 | |||
| 954 | int __init ordered_data_init(void) | ||
| 955 | { | ||
| 956 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | ||
| 957 | sizeof(struct btrfs_ordered_extent), 0, | ||
| 958 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | ||
| 959 | NULL); | ||
| 960 | if (!btrfs_ordered_extent_cache) | ||
| 961 | return -ENOMEM; | ||
| 962 | return 0; | ||
| 963 | } | ||
| 964 | |||
| 965 | void ordered_data_exit(void) | ||
| 966 | { | ||
| 967 | if (btrfs_ordered_extent_cache) | ||
| 968 | kmem_cache_destroy(btrfs_ordered_extent_cache); | ||
| 969 | } | ||
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index e03c560d2997..dd27a0b46a37 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
| @@ -96,6 +96,13 @@ struct btrfs_ordered_extent { | |||
| 96 | /* number of bytes that still need writing */ | 96 | /* number of bytes that still need writing */ |
| 97 | u64 bytes_left; | 97 | u64 bytes_left; |
| 98 | 98 | ||
| 99 | /* | ||
| 100 | * the end of the ordered extent which is behind it but | ||
| 101 | * didn't update disk_i_size. Please see the comment of | ||
| 102 | * btrfs_ordered_update_i_size(); | ||
| 103 | */ | ||
| 104 | u64 outstanding_isize; | ||
| 105 | |||
| 99 | /* flags (described above) */ | 106 | /* flags (described above) */ |
| 100 | unsigned long flags; | 107 | unsigned long flags; |
| 101 | 108 | ||
| @@ -183,6 +190,7 @@ void btrfs_run_ordered_operations(struct btrfs_root *root, int wait); | |||
| 183 | void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, | 190 | void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
| 184 | struct btrfs_root *root, | 191 | struct btrfs_root *root, |
| 185 | struct inode *inode); | 192 | struct inode *inode); |
| 186 | void btrfs_wait_ordered_extents(struct btrfs_root *root, | 193 | void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput); |
| 187 | int nocow_only, int delay_iput); | 194 | int __init ordered_data_init(void); |
| 195 | void ordered_data_exit(void); | ||
| 188 | #endif | 196 | #endif |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index b65015581744..5039686df6ae 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
| @@ -1145,12 +1145,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1145 | 1145 | ||
| 1146 | ulist_reinit(tmp); | 1146 | ulist_reinit(tmp); |
| 1147 | /* XXX id not needed */ | 1147 | /* XXX id not needed */ |
| 1148 | ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); | 1148 | ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC); |
| 1149 | ULIST_ITER_INIT(&tmp_uiter); | 1149 | ULIST_ITER_INIT(&tmp_uiter); |
| 1150 | while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { | 1150 | while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { |
| 1151 | struct btrfs_qgroup_list *glist; | 1151 | struct btrfs_qgroup_list *glist; |
| 1152 | 1152 | ||
| 1153 | qg = (struct btrfs_qgroup *)tmp_unode->aux; | 1153 | qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux; |
| 1154 | if (qg->refcnt < seq) | 1154 | if (qg->refcnt < seq) |
| 1155 | qg->refcnt = seq + 1; | 1155 | qg->refcnt = seq + 1; |
| 1156 | else | 1156 | else |
| @@ -1158,7 +1158,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1158 | 1158 | ||
| 1159 | list_for_each_entry(glist, &qg->groups, next_group) { | 1159 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1160 | ulist_add(tmp, glist->group->qgroupid, | 1160 | ulist_add(tmp, glist->group->qgroupid, |
| 1161 | (unsigned long)glist->group, | 1161 | (u64)(uintptr_t)glist->group, |
| 1162 | GFP_ATOMIC); | 1162 | GFP_ATOMIC); |
| 1163 | } | 1163 | } |
| 1164 | } | 1164 | } |
| @@ -1168,13 +1168,13 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1168 | * step 2: walk from the new root | 1168 | * step 2: walk from the new root |
| 1169 | */ | 1169 | */ |
| 1170 | ulist_reinit(tmp); | 1170 | ulist_reinit(tmp); |
| 1171 | ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); | 1171 | ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC); |
| 1172 | ULIST_ITER_INIT(&uiter); | 1172 | ULIST_ITER_INIT(&uiter); |
| 1173 | while ((unode = ulist_next(tmp, &uiter))) { | 1173 | while ((unode = ulist_next(tmp, &uiter))) { |
| 1174 | struct btrfs_qgroup *qg; | 1174 | struct btrfs_qgroup *qg; |
| 1175 | struct btrfs_qgroup_list *glist; | 1175 | struct btrfs_qgroup_list *glist; |
| 1176 | 1176 | ||
| 1177 | qg = (struct btrfs_qgroup *)unode->aux; | 1177 | qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux; |
| 1178 | if (qg->refcnt < seq) { | 1178 | if (qg->refcnt < seq) { |
| 1179 | /* not visited by step 1 */ | 1179 | /* not visited by step 1 */ |
| 1180 | qg->rfer += sgn * node->num_bytes; | 1180 | qg->rfer += sgn * node->num_bytes; |
| @@ -1190,7 +1190,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1190 | 1190 | ||
| 1191 | list_for_each_entry(glist, &qg->groups, next_group) { | 1191 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1192 | ulist_add(tmp, glist->group->qgroupid, | 1192 | ulist_add(tmp, glist->group->qgroupid, |
| 1193 | (unsigned long)glist->group, GFP_ATOMIC); | 1193 | (uintptr_t)glist->group, GFP_ATOMIC); |
| 1194 | } | 1194 | } |
| 1195 | } | 1195 | } |
| 1196 | 1196 | ||
| @@ -1208,12 +1208,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1208 | continue; | 1208 | continue; |
| 1209 | 1209 | ||
| 1210 | ulist_reinit(tmp); | 1210 | ulist_reinit(tmp); |
| 1211 | ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); | 1211 | ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC); |
| 1212 | ULIST_ITER_INIT(&tmp_uiter); | 1212 | ULIST_ITER_INIT(&tmp_uiter); |
| 1213 | while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { | 1213 | while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { |
| 1214 | struct btrfs_qgroup_list *glist; | 1214 | struct btrfs_qgroup_list *glist; |
| 1215 | 1215 | ||
| 1216 | qg = (struct btrfs_qgroup *)tmp_unode->aux; | 1216 | qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux; |
| 1217 | if (qg->tag == seq) | 1217 | if (qg->tag == seq) |
| 1218 | continue; | 1218 | continue; |
| 1219 | 1219 | ||
| @@ -1225,7 +1225,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
| 1225 | 1225 | ||
| 1226 | list_for_each_entry(glist, &qg->groups, next_group) { | 1226 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1227 | ulist_add(tmp, glist->group->qgroupid, | 1227 | ulist_add(tmp, glist->group->qgroupid, |
| 1228 | (unsigned long)glist->group, | 1228 | (uintptr_t)glist->group, |
| 1229 | GFP_ATOMIC); | 1229 | GFP_ATOMIC); |
| 1230 | } | 1230 | } |
| 1231 | } | 1231 | } |
| @@ -1469,13 +1469,17 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) | |||
| 1469 | * be exceeded | 1469 | * be exceeded |
| 1470 | */ | 1470 | */ |
| 1471 | ulist = ulist_alloc(GFP_ATOMIC); | 1471 | ulist = ulist_alloc(GFP_ATOMIC); |
| 1472 | ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); | 1472 | if (!ulist) { |
| 1473 | ret = -ENOMEM; | ||
| 1474 | goto out; | ||
| 1475 | } | ||
| 1476 | ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC); | ||
| 1473 | ULIST_ITER_INIT(&uiter); | 1477 | ULIST_ITER_INIT(&uiter); |
| 1474 | while ((unode = ulist_next(ulist, &uiter))) { | 1478 | while ((unode = ulist_next(ulist, &uiter))) { |
| 1475 | struct btrfs_qgroup *qg; | 1479 | struct btrfs_qgroup *qg; |
| 1476 | struct btrfs_qgroup_list *glist; | 1480 | struct btrfs_qgroup_list *glist; |
| 1477 | 1481 | ||
| 1478 | qg = (struct btrfs_qgroup *)unode->aux; | 1482 | qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux; |
| 1479 | 1483 | ||
| 1480 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && | 1484 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && |
| 1481 | qg->reserved + qg->rfer + num_bytes > | 1485 | qg->reserved + qg->rfer + num_bytes > |
| @@ -1489,7 +1493,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) | |||
| 1489 | 1493 | ||
| 1490 | list_for_each_entry(glist, &qg->groups, next_group) { | 1494 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1491 | ulist_add(ulist, glist->group->qgroupid, | 1495 | ulist_add(ulist, glist->group->qgroupid, |
| 1492 | (unsigned long)glist->group, GFP_ATOMIC); | 1496 | (uintptr_t)glist->group, GFP_ATOMIC); |
| 1493 | } | 1497 | } |
| 1494 | } | 1498 | } |
| 1495 | if (ret) | 1499 | if (ret) |
| @@ -1502,7 +1506,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) | |||
| 1502 | while ((unode = ulist_next(ulist, &uiter))) { | 1506 | while ((unode = ulist_next(ulist, &uiter))) { |
| 1503 | struct btrfs_qgroup *qg; | 1507 | struct btrfs_qgroup *qg; |
| 1504 | 1508 | ||
| 1505 | qg = (struct btrfs_qgroup *)unode->aux; | 1509 | qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux; |
| 1506 | 1510 | ||
| 1507 | qg->reserved += num_bytes; | 1511 | qg->reserved += num_bytes; |
| 1508 | } | 1512 | } |
| @@ -1541,19 +1545,23 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) | |||
| 1541 | goto out; | 1545 | goto out; |
| 1542 | 1546 | ||
| 1543 | ulist = ulist_alloc(GFP_ATOMIC); | 1547 | ulist = ulist_alloc(GFP_ATOMIC); |
| 1544 | ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); | 1548 | if (!ulist) { |
| 1549 | btrfs_std_error(fs_info, -ENOMEM); | ||
| 1550 | goto out; | ||
| 1551 | } | ||
| 1552 | ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC); | ||
| 1545 | ULIST_ITER_INIT(&uiter); | 1553 | ULIST_ITER_INIT(&uiter); |
| 1546 | while ((unode = ulist_next(ulist, &uiter))) { | 1554 | while ((unode = ulist_next(ulist, &uiter))) { |
| 1547 | struct btrfs_qgroup *qg; | 1555 | struct btrfs_qgroup *qg; |
| 1548 | struct btrfs_qgroup_list *glist; | 1556 | struct btrfs_qgroup_list *glist; |
| 1549 | 1557 | ||
| 1550 | qg = (struct btrfs_qgroup *)unode->aux; | 1558 | qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux; |
| 1551 | 1559 | ||
| 1552 | qg->reserved -= num_bytes; | 1560 | qg->reserved -= num_bytes; |
| 1553 | 1561 | ||
| 1554 | list_for_each_entry(glist, &qg->groups, next_group) { | 1562 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1555 | ulist_add(ulist, glist->group->qgroupid, | 1563 | ulist_add(ulist, glist->group->qgroupid, |
| 1556 | (unsigned long)glist->group, GFP_ATOMIC); | 1564 | (uintptr_t)glist->group, GFP_ATOMIC); |
| 1557 | } | 1565 | } |
| 1558 | } | 1566 | } |
| 1559 | 1567 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 4da08652004d..776f0aa128fc 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -3270,8 +3270,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, | |||
| 3270 | key.offset = 0; | 3270 | key.offset = 0; |
| 3271 | 3271 | ||
| 3272 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); | 3272 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); |
| 3273 | if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) { | 3273 | if (IS_ERR(inode) || is_bad_inode(inode)) { |
| 3274 | if (inode && !IS_ERR(inode)) | 3274 | if (!IS_ERR(inode)) |
| 3275 | iput(inode); | 3275 | iput(inode); |
| 3276 | return -ENOENT; | 3276 | return -ENOENT; |
| 3277 | } | 3277 | } |
| @@ -3621,7 +3621,7 @@ next: | |||
| 3621 | 3621 | ||
| 3622 | ret = find_first_extent_bit(&rc->processed_blocks, | 3622 | ret = find_first_extent_bit(&rc->processed_blocks, |
| 3623 | key.objectid, &start, &end, | 3623 | key.objectid, &start, &end, |
| 3624 | EXTENT_DIRTY); | 3624 | EXTENT_DIRTY, NULL); |
| 3625 | 3625 | ||
| 3626 | if (ret == 0 && start <= key.objectid) { | 3626 | if (ret == 0 && start <= key.objectid) { |
| 3627 | btrfs_release_path(path); | 3627 | btrfs_release_path(path); |
| @@ -3674,7 +3674,8 @@ int prepare_to_relocate(struct reloc_control *rc) | |||
| 3674 | struct btrfs_trans_handle *trans; | 3674 | struct btrfs_trans_handle *trans; |
| 3675 | int ret; | 3675 | int ret; |
| 3676 | 3676 | ||
| 3677 | rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root); | 3677 | rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root, |
| 3678 | BTRFS_BLOCK_RSV_TEMP); | ||
| 3678 | if (!rc->block_rsv) | 3679 | if (!rc->block_rsv) |
| 3679 | return -ENOMEM; | 3680 | return -ENOMEM; |
| 3680 | 3681 | ||
| @@ -4057,7 +4058,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) | |||
| 4057 | (unsigned long long)rc->block_group->flags); | 4058 | (unsigned long long)rc->block_group->flags); |
| 4058 | 4059 | ||
| 4059 | btrfs_start_delalloc_inodes(fs_info->tree_root, 0); | 4060 | btrfs_start_delalloc_inodes(fs_info->tree_root, 0); |
| 4060 | btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); | 4061 | btrfs_wait_ordered_extents(fs_info->tree_root, 0); |
| 4061 | 4062 | ||
| 4062 | while (1) { | 4063 | while (1) { |
| 4063 | mutex_lock(&fs_info->cleaner_mutex); | 4064 | mutex_lock(&fs_info->cleaner_mutex); |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 10d8e4d88071..eb923d087da7 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
| @@ -141,8 +141,10 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root | |||
| 141 | return -ENOMEM; | 141 | return -ENOMEM; |
| 142 | 142 | ||
| 143 | ret = btrfs_search_slot(trans, root, key, path, 0, 1); | 143 | ret = btrfs_search_slot(trans, root, key, path, 0, 1); |
| 144 | if (ret < 0) | 144 | if (ret < 0) { |
| 145 | goto out_abort; | 145 | btrfs_abort_transaction(trans, root, ret); |
| 146 | goto out; | ||
| 147 | } | ||
| 146 | 148 | ||
| 147 | if (ret != 0) { | 149 | if (ret != 0) { |
| 148 | btrfs_print_leaf(root, path->nodes[0]); | 150 | btrfs_print_leaf(root, path->nodes[0]); |
| @@ -166,16 +168,23 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root | |||
| 166 | btrfs_release_path(path); | 168 | btrfs_release_path(path); |
| 167 | ret = btrfs_search_slot(trans, root, key, path, | 169 | ret = btrfs_search_slot(trans, root, key, path, |
| 168 | -1, 1); | 170 | -1, 1); |
| 169 | if (ret < 0) | 171 | if (ret < 0) { |
| 170 | goto out_abort; | 172 | btrfs_abort_transaction(trans, root, ret); |
| 173 | goto out; | ||
| 174 | } | ||
| 175 | |||
| 171 | ret = btrfs_del_item(trans, root, path); | 176 | ret = btrfs_del_item(trans, root, path); |
| 172 | if (ret < 0) | 177 | if (ret < 0) { |
| 173 | goto out_abort; | 178 | btrfs_abort_transaction(trans, root, ret); |
| 179 | goto out; | ||
| 180 | } | ||
| 174 | btrfs_release_path(path); | 181 | btrfs_release_path(path); |
| 175 | ret = btrfs_insert_empty_item(trans, root, path, | 182 | ret = btrfs_insert_empty_item(trans, root, path, |
| 176 | key, sizeof(*item)); | 183 | key, sizeof(*item)); |
| 177 | if (ret < 0) | 184 | if (ret < 0) { |
| 178 | goto out_abort; | 185 | btrfs_abort_transaction(trans, root, ret); |
| 186 | goto out; | ||
| 187 | } | ||
| 179 | l = path->nodes[0]; | 188 | l = path->nodes[0]; |
| 180 | slot = path->slots[0]; | 189 | slot = path->slots[0]; |
| 181 | ptr = btrfs_item_ptr_offset(l, slot); | 190 | ptr = btrfs_item_ptr_offset(l, slot); |
| @@ -192,10 +201,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root | |||
| 192 | out: | 201 | out: |
| 193 | btrfs_free_path(path); | 202 | btrfs_free_path(path); |
| 194 | return ret; | 203 | return ret; |
| 195 | |||
| 196 | out_abort: | ||
| 197 | btrfs_abort_transaction(trans, root, ret); | ||
| 198 | goto out; | ||
| 199 | } | 204 | } |
| 200 | 205 | ||
| 201 | int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 206 | int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index b223620cd5a6..27892f67e69b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -352,13 +352,14 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
| 352 | struct extent_buffer *eb; | 352 | struct extent_buffer *eb; |
| 353 | struct btrfs_extent_item *ei; | 353 | struct btrfs_extent_item *ei; |
| 354 | struct scrub_warning swarn; | 354 | struct scrub_warning swarn; |
| 355 | u32 item_size; | 355 | unsigned long ptr = 0; |
| 356 | int ret; | 356 | u64 extent_item_pos; |
| 357 | u64 flags = 0; | ||
| 357 | u64 ref_root; | 358 | u64 ref_root; |
| 359 | u32 item_size; | ||
| 358 | u8 ref_level; | 360 | u8 ref_level; |
| 359 | unsigned long ptr = 0; | ||
| 360 | const int bufsize = 4096; | 361 | const int bufsize = 4096; |
| 361 | u64 extent_item_pos; | 362 | int ret; |
| 362 | 363 | ||
| 363 | path = btrfs_alloc_path(); | 364 | path = btrfs_alloc_path(); |
| 364 | 365 | ||
| @@ -375,7 +376,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
| 375 | if (!path || !swarn.scratch_buf || !swarn.msg_buf) | 376 | if (!path || !swarn.scratch_buf || !swarn.msg_buf) |
| 376 | goto out; | 377 | goto out; |
| 377 | 378 | ||
| 378 | ret = extent_from_logical(fs_info, swarn.logical, path, &found_key); | 379 | ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, |
| 380 | &flags); | ||
| 379 | if (ret < 0) | 381 | if (ret < 0) |
| 380 | goto out; | 382 | goto out; |
| 381 | 383 | ||
| @@ -387,7 +389,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
| 387 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 389 | item_size = btrfs_item_size_nr(eb, path->slots[0]); |
| 388 | btrfs_release_path(path); | 390 | btrfs_release_path(path); |
| 389 | 391 | ||
| 390 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 392 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
| 391 | do { | 393 | do { |
| 392 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 394 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
| 393 | &ref_root, &ref_level); | 395 | &ref_root, &ref_level); |
| @@ -1029,6 +1031,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev, | |||
| 1029 | spin_lock(&sdev->stat_lock); | 1031 | spin_lock(&sdev->stat_lock); |
| 1030 | sdev->stat.malloc_errors++; | 1032 | sdev->stat.malloc_errors++; |
| 1031 | spin_unlock(&sdev->stat_lock); | 1033 | spin_unlock(&sdev->stat_lock); |
| 1034 | kfree(bbio); | ||
| 1032 | return -ENOMEM; | 1035 | return -ENOMEM; |
| 1033 | } | 1036 | } |
| 1034 | sblock->page_count++; | 1037 | sblock->page_count++; |
| @@ -1666,21 +1669,6 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) | |||
| 1666 | scrub_block_put(sblock); | 1669 | scrub_block_put(sblock); |
| 1667 | } | 1670 | } |
| 1668 | 1671 | ||
| 1669 | if (sbio->err) { | ||
| 1670 | /* what is this good for??? */ | ||
| 1671 | sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); | ||
| 1672 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; | ||
| 1673 | sbio->bio->bi_phys_segments = 0; | ||
| 1674 | sbio->bio->bi_idx = 0; | ||
| 1675 | |||
| 1676 | for (i = 0; i < sbio->page_count; i++) { | ||
| 1677 | struct bio_vec *bi; | ||
| 1678 | bi = &sbio->bio->bi_io_vec[i]; | ||
| 1679 | bi->bv_offset = 0; | ||
| 1680 | bi->bv_len = PAGE_SIZE; | ||
| 1681 | } | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | bio_put(sbio->bio); | 1672 | bio_put(sbio->bio); |
| 1685 | sbio->bio = NULL; | 1673 | sbio->bio = NULL; |
| 1686 | spin_lock(&sdev->list_lock); | 1674 | spin_lock(&sdev->list_lock); |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index fb5ffe95f869..c7beb543a4a8 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
| @@ -107,7 +107,6 @@ struct send_ctx { | |||
| 107 | int cur_inode_new; | 107 | int cur_inode_new; |
| 108 | int cur_inode_new_gen; | 108 | int cur_inode_new_gen; |
| 109 | int cur_inode_deleted; | 109 | int cur_inode_deleted; |
| 110 | int cur_inode_first_ref_orphan; | ||
| 111 | u64 cur_inode_size; | 110 | u64 cur_inode_size; |
| 112 | u64 cur_inode_mode; | 111 | u64 cur_inode_mode; |
| 113 | 112 | ||
| @@ -126,7 +125,15 @@ struct send_ctx { | |||
| 126 | 125 | ||
| 127 | struct name_cache_entry { | 126 | struct name_cache_entry { |
| 128 | struct list_head list; | 127 | struct list_head list; |
| 129 | struct list_head use_list; | 128 | /* |
| 129 | * radix_tree has only 32bit entries but we need to handle 64bit inums. | ||
| 130 | * We use the lower 32bit of the 64bit inum to store it in the tree. If | ||
| 131 | * more then one inum would fall into the same entry, we use radix_list | ||
| 132 | * to store the additional entries. radix_list is also used to store | ||
| 133 | * entries where two entries have the same inum but different | ||
| 134 | * generations. | ||
| 135 | */ | ||
| 136 | struct list_head radix_list; | ||
| 130 | u64 ino; | 137 | u64 ino; |
| 131 | u64 gen; | 138 | u64 gen; |
| 132 | u64 parent_ino; | 139 | u64 parent_ino; |
| @@ -328,6 +335,7 @@ out: | |||
| 328 | return ret; | 335 | return ret; |
| 329 | } | 336 | } |
| 330 | 337 | ||
| 338 | #if 0 | ||
| 331 | static void fs_path_remove(struct fs_path *p) | 339 | static void fs_path_remove(struct fs_path *p) |
| 332 | { | 340 | { |
| 333 | BUG_ON(p->reversed); | 341 | BUG_ON(p->reversed); |
| @@ -335,6 +343,7 @@ static void fs_path_remove(struct fs_path *p) | |||
| 335 | p->end--; | 343 | p->end--; |
| 336 | *p->end = 0; | 344 | *p->end = 0; |
| 337 | } | 345 | } |
| 346 | #endif | ||
| 338 | 347 | ||
| 339 | static int fs_path_copy(struct fs_path *p, struct fs_path *from) | 348 | static int fs_path_copy(struct fs_path *p, struct fs_path *from) |
| 340 | { | 349 | { |
| @@ -377,7 +386,7 @@ static struct btrfs_path *alloc_path_for_send(void) | |||
| 377 | return path; | 386 | return path; |
| 378 | } | 387 | } |
| 379 | 388 | ||
| 380 | static int write_buf(struct send_ctx *sctx, const void *buf, u32 len) | 389 | int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) |
| 381 | { | 390 | { |
| 382 | int ret; | 391 | int ret; |
| 383 | mm_segment_t old_fs; | 392 | mm_segment_t old_fs; |
| @@ -387,8 +396,7 @@ static int write_buf(struct send_ctx *sctx, const void *buf, u32 len) | |||
| 387 | set_fs(KERNEL_DS); | 396 | set_fs(KERNEL_DS); |
| 388 | 397 | ||
| 389 | while (pos < len) { | 398 | while (pos < len) { |
| 390 | ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos, | 399 | ret = vfs_write(filp, (char *)buf + pos, len - pos, off); |
| 391 | &sctx->send_off); | ||
| 392 | /* TODO handle that correctly */ | 400 | /* TODO handle that correctly */ |
| 393 | /*if (ret == -ERESTARTSYS) { | 401 | /*if (ret == -ERESTARTSYS) { |
| 394 | continue; | 402 | continue; |
| @@ -544,7 +552,8 @@ static int send_header(struct send_ctx *sctx) | |||
| 544 | strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); | 552 | strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); |
| 545 | hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); | 553 | hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); |
| 546 | 554 | ||
| 547 | return write_buf(sctx, &hdr, sizeof(hdr)); | 555 | return write_buf(sctx->send_filp, &hdr, sizeof(hdr), |
| 556 | &sctx->send_off); | ||
| 548 | } | 557 | } |
| 549 | 558 | ||
| 550 | /* | 559 | /* |
| @@ -581,7 +590,8 @@ static int send_cmd(struct send_ctx *sctx) | |||
| 581 | crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); | 590 | crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); |
| 582 | hdr->crc = cpu_to_le32(crc); | 591 | hdr->crc = cpu_to_le32(crc); |
| 583 | 592 | ||
| 584 | ret = write_buf(sctx, sctx->send_buf, sctx->send_size); | 593 | ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, |
| 594 | &sctx->send_off); | ||
| 585 | 595 | ||
| 586 | sctx->total_send_size += sctx->send_size; | 596 | sctx->total_send_size += sctx->send_size; |
| 587 | sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; | 597 | sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; |
| @@ -687,7 +697,8 @@ out: | |||
| 687 | */ | 697 | */ |
| 688 | static int get_inode_info(struct btrfs_root *root, | 698 | static int get_inode_info(struct btrfs_root *root, |
| 689 | u64 ino, u64 *size, u64 *gen, | 699 | u64 ino, u64 *size, u64 *gen, |
| 690 | u64 *mode, u64 *uid, u64 *gid) | 700 | u64 *mode, u64 *uid, u64 *gid, |
| 701 | u64 *rdev) | ||
| 691 | { | 702 | { |
| 692 | int ret; | 703 | int ret; |
| 693 | struct btrfs_inode_item *ii; | 704 | struct btrfs_inode_item *ii; |
| @@ -721,6 +732,8 @@ static int get_inode_info(struct btrfs_root *root, | |||
| 721 | *uid = btrfs_inode_uid(path->nodes[0], ii); | 732 | *uid = btrfs_inode_uid(path->nodes[0], ii); |
| 722 | if (gid) | 733 | if (gid) |
| 723 | *gid = btrfs_inode_gid(path->nodes[0], ii); | 734 | *gid = btrfs_inode_gid(path->nodes[0], ii); |
| 735 | if (rdev) | ||
| 736 | *rdev = btrfs_inode_rdev(path->nodes[0], ii); | ||
| 724 | 737 | ||
| 725 | out: | 738 | out: |
| 726 | btrfs_free_path(path); | 739 | btrfs_free_path(path); |
| @@ -852,7 +865,6 @@ static int iterate_dir_item(struct send_ctx *sctx, | |||
| 852 | struct extent_buffer *eb; | 865 | struct extent_buffer *eb; |
| 853 | struct btrfs_item *item; | 866 | struct btrfs_item *item; |
| 854 | struct btrfs_dir_item *di; | 867 | struct btrfs_dir_item *di; |
| 855 | struct btrfs_path *tmp_path = NULL; | ||
| 856 | struct btrfs_key di_key; | 868 | struct btrfs_key di_key; |
| 857 | char *buf = NULL; | 869 | char *buf = NULL; |
| 858 | char *buf2 = NULL; | 870 | char *buf2 = NULL; |
| @@ -874,12 +886,6 @@ static int iterate_dir_item(struct send_ctx *sctx, | |||
| 874 | goto out; | 886 | goto out; |
| 875 | } | 887 | } |
| 876 | 888 | ||
| 877 | tmp_path = alloc_path_for_send(); | ||
| 878 | if (!tmp_path) { | ||
| 879 | ret = -ENOMEM; | ||
| 880 | goto out; | ||
| 881 | } | ||
| 882 | |||
| 883 | eb = path->nodes[0]; | 889 | eb = path->nodes[0]; |
| 884 | slot = path->slots[0]; | 890 | slot = path->slots[0]; |
| 885 | item = btrfs_item_nr(eb, slot); | 891 | item = btrfs_item_nr(eb, slot); |
| @@ -941,7 +947,6 @@ static int iterate_dir_item(struct send_ctx *sctx, | |||
| 941 | } | 947 | } |
| 942 | 948 | ||
| 943 | out: | 949 | out: |
| 944 | btrfs_free_path(tmp_path); | ||
| 945 | if (buf_virtual) | 950 | if (buf_virtual) |
| 946 | vfree(buf); | 951 | vfree(buf); |
| 947 | else | 952 | else |
| @@ -1026,12 +1031,12 @@ struct backref_ctx { | |||
| 1026 | u64 extent_len; | 1031 | u64 extent_len; |
| 1027 | 1032 | ||
| 1028 | /* Just to check for bugs in backref resolving */ | 1033 | /* Just to check for bugs in backref resolving */ |
| 1029 | int found_in_send_root; | 1034 | int found_itself; |
| 1030 | }; | 1035 | }; |
| 1031 | 1036 | ||
| 1032 | static int __clone_root_cmp_bsearch(const void *key, const void *elt) | 1037 | static int __clone_root_cmp_bsearch(const void *key, const void *elt) |
| 1033 | { | 1038 | { |
| 1034 | u64 root = (u64)key; | 1039 | u64 root = (u64)(uintptr_t)key; |
| 1035 | struct clone_root *cr = (struct clone_root *)elt; | 1040 | struct clone_root *cr = (struct clone_root *)elt; |
| 1036 | 1041 | ||
| 1037 | if (root < cr->root->objectid) | 1042 | if (root < cr->root->objectid) |
| @@ -1055,6 +1060,7 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2) | |||
| 1055 | 1060 | ||
| 1056 | /* | 1061 | /* |
| 1057 | * Called for every backref that is found for the current extent. | 1062 | * Called for every backref that is found for the current extent. |
| 1063 | * Results are collected in sctx->clone_roots->ino/offset/found_refs | ||
| 1058 | */ | 1064 | */ |
| 1059 | static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | 1065 | static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) |
| 1060 | { | 1066 | { |
| @@ -1064,7 +1070,7 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | |||
| 1064 | u64 i_size; | 1070 | u64 i_size; |
| 1065 | 1071 | ||
| 1066 | /* First check if the root is in the list of accepted clone sources */ | 1072 | /* First check if the root is in the list of accepted clone sources */ |
| 1067 | found = bsearch((void *)root, bctx->sctx->clone_roots, | 1073 | found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, |
| 1068 | bctx->sctx->clone_roots_cnt, | 1074 | bctx->sctx->clone_roots_cnt, |
| 1069 | sizeof(struct clone_root), | 1075 | sizeof(struct clone_root), |
| 1070 | __clone_root_cmp_bsearch); | 1076 | __clone_root_cmp_bsearch); |
| @@ -1074,14 +1080,15 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | |||
| 1074 | if (found->root == bctx->sctx->send_root && | 1080 | if (found->root == bctx->sctx->send_root && |
| 1075 | ino == bctx->cur_objectid && | 1081 | ino == bctx->cur_objectid && |
| 1076 | offset == bctx->cur_offset) { | 1082 | offset == bctx->cur_offset) { |
| 1077 | bctx->found_in_send_root = 1; | 1083 | bctx->found_itself = 1; |
| 1078 | } | 1084 | } |
| 1079 | 1085 | ||
| 1080 | /* | 1086 | /* |
| 1081 | * There are inodes that have extents that lie behind it's i_size. Don't | 1087 | * There are inodes that have extents that lie behind its i_size. Don't |
| 1082 | * accept clones from these extents. | 1088 | * accept clones from these extents. |
| 1083 | */ | 1089 | */ |
| 1084 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL); | 1090 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, |
| 1091 | NULL); | ||
| 1085 | if (ret < 0) | 1092 | if (ret < 0) |
| 1086 | return ret; | 1093 | return ret; |
| 1087 | 1094 | ||
| @@ -1101,16 +1108,12 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | |||
| 1101 | */ | 1108 | */ |
| 1102 | if (ino >= bctx->cur_objectid) | 1109 | if (ino >= bctx->cur_objectid) |
| 1103 | return 0; | 1110 | return 0; |
| 1104 | /*if (ino > ctx->cur_objectid) | 1111 | #if 0 |
| 1112 | if (ino > bctx->cur_objectid) | ||
| 1105 | return 0; | 1113 | return 0; |
| 1106 | if (offset + ctx->extent_len > ctx->cur_offset) | 1114 | if (offset + bctx->extent_len > bctx->cur_offset) |
| 1107 | return 0;*/ | 1115 | return 0; |
| 1108 | 1116 | #endif | |
| 1109 | bctx->found++; | ||
| 1110 | found->found_refs++; | ||
| 1111 | found->ino = ino; | ||
| 1112 | found->offset = offset; | ||
| 1113 | return 0; | ||
| 1114 | } | 1117 | } |
| 1115 | 1118 | ||
| 1116 | bctx->found++; | 1119 | bctx->found++; |
| @@ -1130,6 +1133,12 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | |||
| 1130 | } | 1133 | } |
| 1131 | 1134 | ||
| 1132 | /* | 1135 | /* |
| 1136 | * Given an inode, offset and extent item, it finds a good clone for a clone | ||
| 1137 | * instruction. Returns -ENOENT when none could be found. The function makes | ||
| 1138 | * sure that the returned clone is usable at the point where sending is at the | ||
| 1139 | * moment. This means, that no clones are accepted which lie behind the current | ||
| 1140 | * inode+offset. | ||
| 1141 | * | ||
| 1133 | * path must point to the extent item when called. | 1142 | * path must point to the extent item when called. |
| 1134 | */ | 1143 | */ |
| 1135 | static int find_extent_clone(struct send_ctx *sctx, | 1144 | static int find_extent_clone(struct send_ctx *sctx, |
| @@ -1141,20 +1150,29 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
| 1141 | int ret; | 1150 | int ret; |
| 1142 | int extent_type; | 1151 | int extent_type; |
| 1143 | u64 logical; | 1152 | u64 logical; |
| 1153 | u64 disk_byte; | ||
| 1144 | u64 num_bytes; | 1154 | u64 num_bytes; |
| 1145 | u64 extent_item_pos; | 1155 | u64 extent_item_pos; |
| 1156 | u64 flags = 0; | ||
| 1146 | struct btrfs_file_extent_item *fi; | 1157 | struct btrfs_file_extent_item *fi; |
| 1147 | struct extent_buffer *eb = path->nodes[0]; | 1158 | struct extent_buffer *eb = path->nodes[0]; |
| 1148 | struct backref_ctx backref_ctx; | 1159 | struct backref_ctx *backref_ctx = NULL; |
| 1149 | struct clone_root *cur_clone_root; | 1160 | struct clone_root *cur_clone_root; |
| 1150 | struct btrfs_key found_key; | 1161 | struct btrfs_key found_key; |
| 1151 | struct btrfs_path *tmp_path; | 1162 | struct btrfs_path *tmp_path; |
| 1163 | int compressed; | ||
| 1152 | u32 i; | 1164 | u32 i; |
| 1153 | 1165 | ||
| 1154 | tmp_path = alloc_path_for_send(); | 1166 | tmp_path = alloc_path_for_send(); |
| 1155 | if (!tmp_path) | 1167 | if (!tmp_path) |
| 1156 | return -ENOMEM; | 1168 | return -ENOMEM; |
| 1157 | 1169 | ||
| 1170 | backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); | ||
| 1171 | if (!backref_ctx) { | ||
| 1172 | ret = -ENOMEM; | ||
| 1173 | goto out; | ||
| 1174 | } | ||
| 1175 | |||
| 1158 | if (data_offset >= ino_size) { | 1176 | if (data_offset >= ino_size) { |
| 1159 | /* | 1177 | /* |
| 1160 | * There may be extents that lie behind the file's size. | 1178 | * There may be extents that lie behind the file's size. |
| @@ -1172,22 +1190,23 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
| 1172 | ret = -ENOENT; | 1190 | ret = -ENOENT; |
| 1173 | goto out; | 1191 | goto out; |
| 1174 | } | 1192 | } |
| 1193 | compressed = btrfs_file_extent_compression(eb, fi); | ||
| 1175 | 1194 | ||
| 1176 | num_bytes = btrfs_file_extent_num_bytes(eb, fi); | 1195 | num_bytes = btrfs_file_extent_num_bytes(eb, fi); |
| 1177 | logical = btrfs_file_extent_disk_bytenr(eb, fi); | 1196 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); |
| 1178 | if (logical == 0) { | 1197 | if (disk_byte == 0) { |
| 1179 | ret = -ENOENT; | 1198 | ret = -ENOENT; |
| 1180 | goto out; | 1199 | goto out; |
| 1181 | } | 1200 | } |
| 1182 | logical += btrfs_file_extent_offset(eb, fi); | 1201 | logical = disk_byte + btrfs_file_extent_offset(eb, fi); |
| 1183 | 1202 | ||
| 1184 | ret = extent_from_logical(sctx->send_root->fs_info, | 1203 | ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, |
| 1185 | logical, tmp_path, &found_key); | 1204 | &found_key, &flags); |
| 1186 | btrfs_release_path(tmp_path); | 1205 | btrfs_release_path(tmp_path); |
| 1187 | 1206 | ||
| 1188 | if (ret < 0) | 1207 | if (ret < 0) |
| 1189 | goto out; | 1208 | goto out; |
| 1190 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 1209 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
| 1191 | ret = -EIO; | 1210 | ret = -EIO; |
| 1192 | goto out; | 1211 | goto out; |
| 1193 | } | 1212 | } |
| @@ -1202,12 +1221,12 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
| 1202 | cur_clone_root->found_refs = 0; | 1221 | cur_clone_root->found_refs = 0; |
| 1203 | } | 1222 | } |
| 1204 | 1223 | ||
| 1205 | backref_ctx.sctx = sctx; | 1224 | backref_ctx->sctx = sctx; |
| 1206 | backref_ctx.found = 0; | 1225 | backref_ctx->found = 0; |
| 1207 | backref_ctx.cur_objectid = ino; | 1226 | backref_ctx->cur_objectid = ino; |
| 1208 | backref_ctx.cur_offset = data_offset; | 1227 | backref_ctx->cur_offset = data_offset; |
| 1209 | backref_ctx.found_in_send_root = 0; | 1228 | backref_ctx->found_itself = 0; |
| 1210 | backref_ctx.extent_len = num_bytes; | 1229 | backref_ctx->extent_len = num_bytes; |
| 1211 | 1230 | ||
| 1212 | /* | 1231 | /* |
| 1213 | * The last extent of a file may be too large due to page alignment. | 1232 | * The last extent of a file may be too large due to page alignment. |
| @@ -1215,25 +1234,31 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
| 1215 | * __iterate_backrefs work. | 1234 | * __iterate_backrefs work. |
| 1216 | */ | 1235 | */ |
| 1217 | if (data_offset + num_bytes >= ino_size) | 1236 | if (data_offset + num_bytes >= ino_size) |
| 1218 | backref_ctx.extent_len = ino_size - data_offset; | 1237 | backref_ctx->extent_len = ino_size - data_offset; |
| 1219 | 1238 | ||
| 1220 | /* | 1239 | /* |
| 1221 | * Now collect all backrefs. | 1240 | * Now collect all backrefs. |
| 1222 | */ | 1241 | */ |
| 1242 | if (compressed == BTRFS_COMPRESS_NONE) | ||
| 1243 | extent_item_pos = logical - found_key.objectid; | ||
| 1244 | else | ||
| 1245 | extent_item_pos = 0; | ||
| 1246 | |||
| 1223 | extent_item_pos = logical - found_key.objectid; | 1247 | extent_item_pos = logical - found_key.objectid; |
| 1224 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | 1248 | ret = iterate_extent_inodes(sctx->send_root->fs_info, |
| 1225 | found_key.objectid, extent_item_pos, 1, | 1249 | found_key.objectid, extent_item_pos, 1, |
| 1226 | __iterate_backrefs, &backref_ctx); | 1250 | __iterate_backrefs, backref_ctx); |
| 1251 | |||
| 1227 | if (ret < 0) | 1252 | if (ret < 0) |
| 1228 | goto out; | 1253 | goto out; |
| 1229 | 1254 | ||
| 1230 | if (!backref_ctx.found_in_send_root) { | 1255 | if (!backref_ctx->found_itself) { |
| 1231 | /* found a bug in backref code? */ | 1256 | /* found a bug in backref code? */ |
| 1232 | ret = -EIO; | 1257 | ret = -EIO; |
| 1233 | printk(KERN_ERR "btrfs: ERROR did not find backref in " | 1258 | printk(KERN_ERR "btrfs: ERROR did not find backref in " |
| 1234 | "send_root. inode=%llu, offset=%llu, " | 1259 | "send_root. inode=%llu, offset=%llu, " |
| 1235 | "logical=%llu\n", | 1260 | "disk_byte=%llu found extent=%llu\n", |
| 1236 | ino, data_offset, logical); | 1261 | ino, data_offset, disk_byte, found_key.objectid); |
| 1237 | goto out; | 1262 | goto out; |
| 1238 | } | 1263 | } |
| 1239 | 1264 | ||
| @@ -1242,7 +1267,7 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | |||
| 1242 | "num_bytes=%llu, logical=%llu\n", | 1267 | "num_bytes=%llu, logical=%llu\n", |
| 1243 | data_offset, ino, num_bytes, logical); | 1268 | data_offset, ino, num_bytes, logical); |
| 1244 | 1269 | ||
| 1245 | if (!backref_ctx.found) | 1270 | if (!backref_ctx->found) |
| 1246 | verbose_printk("btrfs: no clones found\n"); | 1271 | verbose_printk("btrfs: no clones found\n"); |
| 1247 | 1272 | ||
| 1248 | cur_clone_root = NULL; | 1273 | cur_clone_root = NULL; |
| @@ -1253,7 +1278,6 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | |||
| 1253 | else if (sctx->clone_roots[i].root == sctx->send_root) | 1278 | else if (sctx->clone_roots[i].root == sctx->send_root) |
| 1254 | /* prefer clones from send_root over others */ | 1279 | /* prefer clones from send_root over others */ |
| 1255 | cur_clone_root = sctx->clone_roots + i; | 1280 | cur_clone_root = sctx->clone_roots + i; |
| 1256 | break; | ||
| 1257 | } | 1281 | } |
| 1258 | 1282 | ||
| 1259 | } | 1283 | } |
| @@ -1267,6 +1291,7 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | |||
| 1267 | 1291 | ||
| 1268 | out: | 1292 | out: |
| 1269 | btrfs_free_path(tmp_path); | 1293 | btrfs_free_path(tmp_path); |
| 1294 | kfree(backref_ctx); | ||
| 1270 | return ret; | 1295 | return ret; |
| 1271 | } | 1296 | } |
| 1272 | 1297 | ||
| @@ -1307,8 +1332,6 @@ static int read_symlink(struct send_ctx *sctx, | |||
| 1307 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 1332 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); |
| 1308 | 1333 | ||
| 1309 | ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); | 1334 | ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); |
| 1310 | if (ret < 0) | ||
| 1311 | goto out; | ||
| 1312 | 1335 | ||
| 1313 | out: | 1336 | out: |
| 1314 | btrfs_free_path(path); | 1337 | btrfs_free_path(path); |
| @@ -1404,7 +1427,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) | |||
| 1404 | u64 right_gen; | 1427 | u64 right_gen; |
| 1405 | 1428 | ||
| 1406 | ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, | 1429 | ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, |
| 1407 | NULL); | 1430 | NULL, NULL); |
| 1408 | if (ret < 0 && ret != -ENOENT) | 1431 | if (ret < 0 && ret != -ENOENT) |
| 1409 | goto out; | 1432 | goto out; |
| 1410 | left_ret = ret; | 1433 | left_ret = ret; |
| @@ -1413,16 +1436,16 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) | |||
| 1413 | right_ret = -ENOENT; | 1436 | right_ret = -ENOENT; |
| 1414 | } else { | 1437 | } else { |
| 1415 | ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, | 1438 | ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, |
| 1416 | NULL, NULL, NULL); | 1439 | NULL, NULL, NULL, NULL); |
| 1417 | if (ret < 0 && ret != -ENOENT) | 1440 | if (ret < 0 && ret != -ENOENT) |
| 1418 | goto out; | 1441 | goto out; |
| 1419 | right_ret = ret; | 1442 | right_ret = ret; |
| 1420 | } | 1443 | } |
| 1421 | 1444 | ||
| 1422 | if (!left_ret && !right_ret) { | 1445 | if (!left_ret && !right_ret) { |
| 1423 | if (left_gen == gen && right_gen == gen) | 1446 | if (left_gen == gen && right_gen == gen) { |
| 1424 | ret = inode_state_no_change; | 1447 | ret = inode_state_no_change; |
| 1425 | else if (left_gen == gen) { | 1448 | } else if (left_gen == gen) { |
| 1426 | if (ino < sctx->send_progress) | 1449 | if (ino < sctx->send_progress) |
| 1427 | ret = inode_state_did_create; | 1450 | ret = inode_state_did_create; |
| 1428 | else | 1451 | else |
| @@ -1516,6 +1539,10 @@ out: | |||
| 1516 | return ret; | 1539 | return ret; |
| 1517 | } | 1540 | } |
| 1518 | 1541 | ||
| 1542 | /* | ||
| 1543 | * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, | ||
| 1544 | * generation of the parent dir and the name of the dir entry. | ||
| 1545 | */ | ||
| 1519 | static int get_first_ref(struct send_ctx *sctx, | 1546 | static int get_first_ref(struct send_ctx *sctx, |
| 1520 | struct btrfs_root *root, u64 ino, | 1547 | struct btrfs_root *root, u64 ino, |
| 1521 | u64 *dir, u64 *dir_gen, struct fs_path *name) | 1548 | u64 *dir, u64 *dir_gen, struct fs_path *name) |
| @@ -1557,7 +1584,7 @@ static int get_first_ref(struct send_ctx *sctx, | |||
| 1557 | btrfs_release_path(path); | 1584 | btrfs_release_path(path); |
| 1558 | 1585 | ||
| 1559 | ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, | 1586 | ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, |
| 1560 | NULL); | 1587 | NULL, NULL); |
| 1561 | if (ret < 0) | 1588 | if (ret < 0) |
| 1562 | goto out; | 1589 | goto out; |
| 1563 | 1590 | ||
| @@ -1586,22 +1613,28 @@ static int is_first_ref(struct send_ctx *sctx, | |||
| 1586 | if (ret < 0) | 1613 | if (ret < 0) |
| 1587 | goto out; | 1614 | goto out; |
| 1588 | 1615 | ||
| 1589 | if (name_len != fs_path_len(tmp_name)) { | 1616 | if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { |
| 1590 | ret = 0; | 1617 | ret = 0; |
| 1591 | goto out; | 1618 | goto out; |
| 1592 | } | 1619 | } |
| 1593 | 1620 | ||
| 1594 | ret = memcmp(tmp_name->start, name, name_len); | 1621 | ret = !memcmp(tmp_name->start, name, name_len); |
| 1595 | if (ret) | ||
| 1596 | ret = 0; | ||
| 1597 | else | ||
| 1598 | ret = 1; | ||
| 1599 | 1622 | ||
| 1600 | out: | 1623 | out: |
| 1601 | fs_path_free(sctx, tmp_name); | 1624 | fs_path_free(sctx, tmp_name); |
| 1602 | return ret; | 1625 | return ret; |
| 1603 | } | 1626 | } |
| 1604 | 1627 | ||
| 1628 | /* | ||
| 1629 | * Used by process_recorded_refs to determine if a new ref would overwrite an | ||
| 1630 | * already existing ref. In case it detects an overwrite, it returns the | ||
| 1631 | * inode/gen in who_ino/who_gen. | ||
| 1632 | * When an overwrite is detected, process_recorded_refs does proper orphanizing | ||
| 1633 | * to make sure later references to the overwritten inode are possible. | ||
| 1634 | * Orphanizing is however only required for the first ref of an inode. | ||
| 1635 | * process_recorded_refs does an additional is_first_ref check to see if | ||
| 1636 | * orphanizing is really required. | ||
| 1637 | */ | ||
| 1605 | static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | 1638 | static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, |
| 1606 | const char *name, int name_len, | 1639 | const char *name, int name_len, |
| 1607 | u64 *who_ino, u64 *who_gen) | 1640 | u64 *who_ino, u64 *who_gen) |
| @@ -1626,9 +1659,14 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | |||
| 1626 | goto out; | 1659 | goto out; |
| 1627 | } | 1660 | } |
| 1628 | 1661 | ||
| 1662 | /* | ||
| 1663 | * Check if the overwritten ref was already processed. If yes, the ref | ||
| 1664 | * was already unlinked/moved, so we can safely assume that we will not | ||
| 1665 | * overwrite anything at this point in time. | ||
| 1666 | */ | ||
| 1629 | if (other_inode > sctx->send_progress) { | 1667 | if (other_inode > sctx->send_progress) { |
| 1630 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, | 1668 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, |
| 1631 | who_gen, NULL, NULL, NULL); | 1669 | who_gen, NULL, NULL, NULL, NULL); |
| 1632 | if (ret < 0) | 1670 | if (ret < 0) |
| 1633 | goto out; | 1671 | goto out; |
| 1634 | 1672 | ||
| @@ -1642,6 +1680,13 @@ out: | |||
| 1642 | return ret; | 1680 | return ret; |
| 1643 | } | 1681 | } |
| 1644 | 1682 | ||
| 1683 | /* | ||
| 1684 | * Checks if the ref was overwritten by an already processed inode. This is | ||
| 1685 | * used by __get_cur_name_and_parent to find out if the ref was orphanized and | ||
| 1686 | * thus the orphan name needs be used. | ||
| 1687 | * process_recorded_refs also uses it to avoid unlinking of refs that were | ||
| 1688 | * overwritten. | ||
| 1689 | */ | ||
| 1645 | static int did_overwrite_ref(struct send_ctx *sctx, | 1690 | static int did_overwrite_ref(struct send_ctx *sctx, |
| 1646 | u64 dir, u64 dir_gen, | 1691 | u64 dir, u64 dir_gen, |
| 1647 | u64 ino, u64 ino_gen, | 1692 | u64 ino, u64 ino_gen, |
| @@ -1671,7 +1716,7 @@ static int did_overwrite_ref(struct send_ctx *sctx, | |||
| 1671 | } | 1716 | } |
| 1672 | 1717 | ||
| 1673 | ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, | 1718 | ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, |
| 1674 | NULL); | 1719 | NULL, NULL); |
| 1675 | if (ret < 0) | 1720 | if (ret < 0) |
| 1676 | goto out; | 1721 | goto out; |
| 1677 | 1722 | ||
| @@ -1690,6 +1735,11 @@ out: | |||
| 1690 | return ret; | 1735 | return ret; |
| 1691 | } | 1736 | } |
| 1692 | 1737 | ||
| 1738 | /* | ||
| 1739 | * Same as did_overwrite_ref, but also checks if it is the first ref of an inode | ||
| 1740 | * that got overwritten. This is used by process_recorded_refs to determine | ||
| 1741 | * if it has to use the path as returned by get_cur_path or the orphan name. | ||
| 1742 | */ | ||
| 1693 | static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) | 1743 | static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) |
| 1694 | { | 1744 | { |
| 1695 | int ret = 0; | 1745 | int ret = 0; |
| @@ -1710,39 +1760,40 @@ static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) | |||
| 1710 | 1760 | ||
| 1711 | ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, | 1761 | ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, |
| 1712 | name->start, fs_path_len(name)); | 1762 | name->start, fs_path_len(name)); |
| 1713 | if (ret < 0) | ||
| 1714 | goto out; | ||
| 1715 | 1763 | ||
| 1716 | out: | 1764 | out: |
| 1717 | fs_path_free(sctx, name); | 1765 | fs_path_free(sctx, name); |
| 1718 | return ret; | 1766 | return ret; |
| 1719 | } | 1767 | } |
| 1720 | 1768 | ||
| 1769 | /* | ||
| 1770 | * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, | ||
| 1771 | * so we need to do some special handling in case we have clashes. This function | ||
| 1772 | * takes care of this with the help of name_cache_entry::radix_list. | ||
| 1773 | * In case of error, nce is kfreed. | ||
| 1774 | */ | ||
| 1721 | static int name_cache_insert(struct send_ctx *sctx, | 1775 | static int name_cache_insert(struct send_ctx *sctx, |
| 1722 | struct name_cache_entry *nce) | 1776 | struct name_cache_entry *nce) |
| 1723 | { | 1777 | { |
| 1724 | int ret = 0; | 1778 | int ret = 0; |
| 1725 | struct name_cache_entry **ncea; | 1779 | struct list_head *nce_head; |
| 1726 | 1780 | ||
| 1727 | ncea = radix_tree_lookup(&sctx->name_cache, nce->ino); | 1781 | nce_head = radix_tree_lookup(&sctx->name_cache, |
| 1728 | if (ncea) { | 1782 | (unsigned long)nce->ino); |
| 1729 | if (!ncea[0]) | 1783 | if (!nce_head) { |
| 1730 | ncea[0] = nce; | 1784 | nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); |
| 1731 | else if (!ncea[1]) | 1785 | if (!nce_head) |
| 1732 | ncea[1] = nce; | ||
| 1733 | else | ||
| 1734 | BUG(); | ||
| 1735 | } else { | ||
| 1736 | ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS); | ||
| 1737 | if (!ncea) | ||
| 1738 | return -ENOMEM; | 1786 | return -ENOMEM; |
| 1787 | INIT_LIST_HEAD(nce_head); | ||
| 1739 | 1788 | ||
| 1740 | ncea[0] = nce; | 1789 | ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); |
| 1741 | ncea[1] = NULL; | 1790 | if (ret < 0) { |
| 1742 | ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea); | 1791 | kfree(nce_head); |
| 1743 | if (ret < 0) | 1792 | kfree(nce); |
| 1744 | return ret; | 1793 | return ret; |
| 1794 | } | ||
| 1745 | } | 1795 | } |
| 1796 | list_add_tail(&nce->radix_list, nce_head); | ||
| 1746 | list_add_tail(&nce->list, &sctx->name_cache_list); | 1797 | list_add_tail(&nce->list, &sctx->name_cache_list); |
| 1747 | sctx->name_cache_size++; | 1798 | sctx->name_cache_size++; |
| 1748 | 1799 | ||
| @@ -1752,50 +1803,52 @@ static int name_cache_insert(struct send_ctx *sctx, | |||
| 1752 | static void name_cache_delete(struct send_ctx *sctx, | 1803 | static void name_cache_delete(struct send_ctx *sctx, |
| 1753 | struct name_cache_entry *nce) | 1804 | struct name_cache_entry *nce) |
| 1754 | { | 1805 | { |
| 1755 | struct name_cache_entry **ncea; | 1806 | struct list_head *nce_head; |
| 1756 | |||
| 1757 | ncea = radix_tree_lookup(&sctx->name_cache, nce->ino); | ||
| 1758 | BUG_ON(!ncea); | ||
| 1759 | |||
| 1760 | if (ncea[0] == nce) | ||
| 1761 | ncea[0] = NULL; | ||
| 1762 | else if (ncea[1] == nce) | ||
| 1763 | ncea[1] = NULL; | ||
| 1764 | else | ||
| 1765 | BUG(); | ||
| 1766 | 1807 | ||
| 1767 | if (!ncea[0] && !ncea[1]) { | 1808 | nce_head = radix_tree_lookup(&sctx->name_cache, |
| 1768 | radix_tree_delete(&sctx->name_cache, nce->ino); | 1809 | (unsigned long)nce->ino); |
| 1769 | kfree(ncea); | 1810 | BUG_ON(!nce_head); |
| 1770 | } | ||
| 1771 | 1811 | ||
| 1812 | list_del(&nce->radix_list); | ||
| 1772 | list_del(&nce->list); | 1813 | list_del(&nce->list); |
| 1773 | |||
| 1774 | sctx->name_cache_size--; | 1814 | sctx->name_cache_size--; |
| 1815 | |||
| 1816 | if (list_empty(nce_head)) { | ||
| 1817 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); | ||
| 1818 | kfree(nce_head); | ||
| 1819 | } | ||
| 1775 | } | 1820 | } |
| 1776 | 1821 | ||
| 1777 | static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, | 1822 | static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, |
| 1778 | u64 ino, u64 gen) | 1823 | u64 ino, u64 gen) |
| 1779 | { | 1824 | { |
| 1780 | struct name_cache_entry **ncea; | 1825 | struct list_head *nce_head; |
| 1826 | struct name_cache_entry *cur; | ||
| 1781 | 1827 | ||
| 1782 | ncea = radix_tree_lookup(&sctx->name_cache, ino); | 1828 | nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); |
| 1783 | if (!ncea) | 1829 | if (!nce_head) |
| 1784 | return NULL; | 1830 | return NULL; |
| 1785 | 1831 | ||
| 1786 | if (ncea[0] && ncea[0]->gen == gen) | 1832 | list_for_each_entry(cur, nce_head, radix_list) { |
| 1787 | return ncea[0]; | 1833 | if (cur->ino == ino && cur->gen == gen) |
| 1788 | else if (ncea[1] && ncea[1]->gen == gen) | 1834 | return cur; |
| 1789 | return ncea[1]; | 1835 | } |
| 1790 | return NULL; | 1836 | return NULL; |
| 1791 | } | 1837 | } |
| 1792 | 1838 | ||
| 1839 | /* | ||
| 1840 | * Removes the entry from the list and adds it back to the end. This marks the | ||
| 1841 | * entry as recently used so that name_cache_clean_unused does not remove it. | ||
| 1842 | */ | ||
| 1793 | static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) | 1843 | static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) |
| 1794 | { | 1844 | { |
| 1795 | list_del(&nce->list); | 1845 | list_del(&nce->list); |
| 1796 | list_add_tail(&nce->list, &sctx->name_cache_list); | 1846 | list_add_tail(&nce->list, &sctx->name_cache_list); |
| 1797 | } | 1847 | } |
| 1798 | 1848 | ||
| 1849 | /* | ||
| 1850 | * Remove some entries from the beginning of name_cache_list. | ||
| 1851 | */ | ||
| 1799 | static void name_cache_clean_unused(struct send_ctx *sctx) | 1852 | static void name_cache_clean_unused(struct send_ctx *sctx) |
| 1800 | { | 1853 | { |
| 1801 | struct name_cache_entry *nce; | 1854 | struct name_cache_entry *nce; |
| @@ -1814,13 +1867,23 @@ static void name_cache_clean_unused(struct send_ctx *sctx) | |||
| 1814 | static void name_cache_free(struct send_ctx *sctx) | 1867 | static void name_cache_free(struct send_ctx *sctx) |
| 1815 | { | 1868 | { |
| 1816 | struct name_cache_entry *nce; | 1869 | struct name_cache_entry *nce; |
| 1817 | struct name_cache_entry *tmp; | ||
| 1818 | 1870 | ||
| 1819 | list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) { | 1871 | while (!list_empty(&sctx->name_cache_list)) { |
| 1872 | nce = list_entry(sctx->name_cache_list.next, | ||
| 1873 | struct name_cache_entry, list); | ||
| 1820 | name_cache_delete(sctx, nce); | 1874 | name_cache_delete(sctx, nce); |
| 1875 | kfree(nce); | ||
| 1821 | } | 1876 | } |
| 1822 | } | 1877 | } |
| 1823 | 1878 | ||
| 1879 | /* | ||
| 1880 | * Used by get_cur_path for each ref up to the root. | ||
| 1881 | * Returns 0 if it succeeded. | ||
| 1882 | * Returns 1 if the inode is not existent or got overwritten. In that case, the | ||
| 1883 | * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 | ||
| 1884 | * is returned, parent_ino/parent_gen are not guaranteed to be valid. | ||
| 1885 | * Returns <0 in case of error. | ||
| 1886 | */ | ||
| 1824 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | 1887 | static int __get_cur_name_and_parent(struct send_ctx *sctx, |
| 1825 | u64 ino, u64 gen, | 1888 | u64 ino, u64 gen, |
| 1826 | u64 *parent_ino, | 1889 | u64 *parent_ino, |
| @@ -1832,6 +1895,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
| 1832 | struct btrfs_path *path = NULL; | 1895 | struct btrfs_path *path = NULL; |
| 1833 | struct name_cache_entry *nce = NULL; | 1896 | struct name_cache_entry *nce = NULL; |
| 1834 | 1897 | ||
| 1898 | /* | ||
| 1899 | * First check if we already did a call to this function with the same | ||
| 1900 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes | ||
| 1901 | * return the cached result. | ||
| 1902 | */ | ||
| 1835 | nce = name_cache_search(sctx, ino, gen); | 1903 | nce = name_cache_search(sctx, ino, gen); |
| 1836 | if (nce) { | 1904 | if (nce) { |
| 1837 | if (ino < sctx->send_progress && nce->need_later_update) { | 1905 | if (ino < sctx->send_progress && nce->need_later_update) { |
| @@ -1854,6 +1922,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
| 1854 | if (!path) | 1922 | if (!path) |
| 1855 | return -ENOMEM; | 1923 | return -ENOMEM; |
| 1856 | 1924 | ||
| 1925 | /* | ||
| 1926 | * If the inode is not existent yet, add the orphan name and return 1. | ||
| 1927 | * This should only happen for the parent dir that we determine in | ||
| 1928 | * __record_new_ref | ||
| 1929 | */ | ||
| 1857 | ret = is_inode_existent(sctx, ino, gen); | 1930 | ret = is_inode_existent(sctx, ino, gen); |
| 1858 | if (ret < 0) | 1931 | if (ret < 0) |
| 1859 | goto out; | 1932 | goto out; |
| @@ -1866,6 +1939,10 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
| 1866 | goto out_cache; | 1939 | goto out_cache; |
| 1867 | } | 1940 | } |
| 1868 | 1941 | ||
| 1942 | /* | ||
| 1943 | * Depending on whether the inode was already processed or not, use | ||
| 1944 | * send_root or parent_root for ref lookup. | ||
| 1945 | */ | ||
| 1869 | if (ino < sctx->send_progress) | 1946 | if (ino < sctx->send_progress) |
| 1870 | ret = get_first_ref(sctx, sctx->send_root, ino, | 1947 | ret = get_first_ref(sctx, sctx->send_root, ino, |
| 1871 | parent_ino, parent_gen, dest); | 1948 | parent_ino, parent_gen, dest); |
| @@ -1875,6 +1952,10 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
| 1875 | if (ret < 0) | 1952 | if (ret < 0) |
| 1876 | goto out; | 1953 | goto out; |
| 1877 | 1954 | ||
| 1955 | /* | ||
| 1956 | * Check if the ref was overwritten by an inode's ref that was processed | ||
| 1957 | * earlier. If yes, treat as orphan and return 1. | ||
| 1958 | */ | ||
| 1878 | ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, | 1959 | ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, |
| 1879 | dest->start, dest->end - dest->start); | 1960 | dest->start, dest->end - dest->start); |
| 1880 | if (ret < 0) | 1961 | if (ret < 0) |
| @@ -1888,6 +1969,9 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
| 1888 | } | 1969 | } |
| 1889 | 1970 | ||
| 1890 | out_cache: | 1971 | out_cache: |
| 1972 | /* | ||
| 1973 | * Store the result of the lookup in the name cache. | ||
| 1974 | */ | ||
| 1891 | nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); | 1975 | nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); |
| 1892 | if (!nce) { | 1976 | if (!nce) { |
| 1893 | ret = -ENOMEM; | 1977 | ret = -ENOMEM; |
| @@ -1901,7 +1985,6 @@ out_cache: | |||
| 1901 | nce->name_len = fs_path_len(dest); | 1985 | nce->name_len = fs_path_len(dest); |
| 1902 | nce->ret = ret; | 1986 | nce->ret = ret; |
| 1903 | strcpy(nce->name, dest->start); | 1987 | strcpy(nce->name, dest->start); |
| 1904 | memset(&nce->use_list, 0, sizeof(nce->use_list)); | ||
| 1905 | 1988 | ||
| 1906 | if (ino < sctx->send_progress) | 1989 | if (ino < sctx->send_progress) |
| 1907 | nce->need_later_update = 0; | 1990 | nce->need_later_update = 0; |
| @@ -2107,9 +2190,6 @@ static int send_subvol_begin(struct send_ctx *sctx) | |||
| 2107 | read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); | 2190 | read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); |
| 2108 | btrfs_release_path(path); | 2191 | btrfs_release_path(path); |
| 2109 | 2192 | ||
| 2110 | if (ret < 0) | ||
| 2111 | goto out; | ||
| 2112 | |||
| 2113 | if (parent_root) { | 2193 | if (parent_root) { |
| 2114 | ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); | 2194 | ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); |
| 2115 | if (ret < 0) | 2195 | if (ret < 0) |
| @@ -2276,7 +2356,7 @@ verbose_printk("btrfs: send_utimes %llu\n", ino); | |||
| 2276 | btrfs_inode_mtime(ii)); | 2356 | btrfs_inode_mtime(ii)); |
| 2277 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, | 2357 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, |
| 2278 | btrfs_inode_ctime(ii)); | 2358 | btrfs_inode_ctime(ii)); |
| 2279 | /* TODO otime? */ | 2359 | /* TODO Add otime support when the otime patches get into upstream */ |
| 2280 | 2360 | ||
| 2281 | ret = send_cmd(sctx); | 2361 | ret = send_cmd(sctx); |
| 2282 | 2362 | ||
| @@ -2292,39 +2372,39 @@ out: | |||
| 2292 | * a valid path yet because we did not process the refs yet. So, the inode | 2372 | * a valid path yet because we did not process the refs yet. So, the inode |
| 2293 | * is created as orphan. | 2373 | * is created as orphan. |
| 2294 | */ | 2374 | */ |
| 2295 | static int send_create_inode(struct send_ctx *sctx, struct btrfs_path *path, | 2375 | static int send_create_inode(struct send_ctx *sctx, u64 ino) |
| 2296 | struct btrfs_key *key) | ||
| 2297 | { | 2376 | { |
| 2298 | int ret = 0; | 2377 | int ret = 0; |
| 2299 | struct extent_buffer *eb = path->nodes[0]; | ||
| 2300 | struct btrfs_inode_item *ii; | ||
| 2301 | struct fs_path *p; | 2378 | struct fs_path *p; |
| 2302 | int slot = path->slots[0]; | ||
| 2303 | int cmd; | 2379 | int cmd; |
| 2380 | u64 gen; | ||
| 2304 | u64 mode; | 2381 | u64 mode; |
| 2382 | u64 rdev; | ||
| 2305 | 2383 | ||
| 2306 | verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino); | 2384 | verbose_printk("btrfs: send_create_inode %llu\n", ino); |
| 2307 | 2385 | ||
| 2308 | p = fs_path_alloc(sctx); | 2386 | p = fs_path_alloc(sctx); |
| 2309 | if (!p) | 2387 | if (!p) |
| 2310 | return -ENOMEM; | 2388 | return -ENOMEM; |
| 2311 | 2389 | ||
| 2312 | ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | 2390 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, |
| 2313 | mode = btrfs_inode_mode(eb, ii); | 2391 | NULL, &rdev); |
| 2392 | if (ret < 0) | ||
| 2393 | goto out; | ||
| 2314 | 2394 | ||
| 2315 | if (S_ISREG(mode)) | 2395 | if (S_ISREG(mode)) { |
| 2316 | cmd = BTRFS_SEND_C_MKFILE; | 2396 | cmd = BTRFS_SEND_C_MKFILE; |
| 2317 | else if (S_ISDIR(mode)) | 2397 | } else if (S_ISDIR(mode)) { |
| 2318 | cmd = BTRFS_SEND_C_MKDIR; | 2398 | cmd = BTRFS_SEND_C_MKDIR; |
| 2319 | else if (S_ISLNK(mode)) | 2399 | } else if (S_ISLNK(mode)) { |
| 2320 | cmd = BTRFS_SEND_C_SYMLINK; | 2400 | cmd = BTRFS_SEND_C_SYMLINK; |
| 2321 | else if (S_ISCHR(mode) || S_ISBLK(mode)) | 2401 | } else if (S_ISCHR(mode) || S_ISBLK(mode)) { |
| 2322 | cmd = BTRFS_SEND_C_MKNOD; | 2402 | cmd = BTRFS_SEND_C_MKNOD; |
| 2323 | else if (S_ISFIFO(mode)) | 2403 | } else if (S_ISFIFO(mode)) { |
| 2324 | cmd = BTRFS_SEND_C_MKFIFO; | 2404 | cmd = BTRFS_SEND_C_MKFIFO; |
| 2325 | else if (S_ISSOCK(mode)) | 2405 | } else if (S_ISSOCK(mode)) { |
| 2326 | cmd = BTRFS_SEND_C_MKSOCK; | 2406 | cmd = BTRFS_SEND_C_MKSOCK; |
| 2327 | else { | 2407 | } else { |
| 2328 | printk(KERN_WARNING "btrfs: unexpected inode type %o", | 2408 | printk(KERN_WARNING "btrfs: unexpected inode type %o", |
| 2329 | (int)(mode & S_IFMT)); | 2409 | (int)(mode & S_IFMT)); |
| 2330 | ret = -ENOTSUPP; | 2410 | ret = -ENOTSUPP; |
| @@ -2335,22 +2415,22 @@ verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino); | |||
| 2335 | if (ret < 0) | 2415 | if (ret < 0) |
| 2336 | goto out; | 2416 | goto out; |
| 2337 | 2417 | ||
| 2338 | ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 2418 | ret = gen_unique_name(sctx, ino, gen, p); |
| 2339 | if (ret < 0) | 2419 | if (ret < 0) |
| 2340 | goto out; | 2420 | goto out; |
| 2341 | 2421 | ||
| 2342 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 2422 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); |
| 2343 | TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, sctx->cur_ino); | 2423 | TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); |
| 2344 | 2424 | ||
| 2345 | if (S_ISLNK(mode)) { | 2425 | if (S_ISLNK(mode)) { |
| 2346 | fs_path_reset(p); | 2426 | fs_path_reset(p); |
| 2347 | ret = read_symlink(sctx, sctx->send_root, sctx->cur_ino, p); | 2427 | ret = read_symlink(sctx, sctx->send_root, ino, p); |
| 2348 | if (ret < 0) | 2428 | if (ret < 0) |
| 2349 | goto out; | 2429 | goto out; |
| 2350 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); | 2430 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); |
| 2351 | } else if (S_ISCHR(mode) || S_ISBLK(mode) || | 2431 | } else if (S_ISCHR(mode) || S_ISBLK(mode) || |
| 2352 | S_ISFIFO(mode) || S_ISSOCK(mode)) { | 2432 | S_ISFIFO(mode) || S_ISSOCK(mode)) { |
| 2353 | TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, btrfs_inode_rdev(eb, ii)); | 2433 | TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev); |
| 2354 | } | 2434 | } |
| 2355 | 2435 | ||
| 2356 | ret = send_cmd(sctx); | 2436 | ret = send_cmd(sctx); |
| @@ -2364,6 +2444,92 @@ out: | |||
| 2364 | return ret; | 2444 | return ret; |
| 2365 | } | 2445 | } |
| 2366 | 2446 | ||
| 2447 | /* | ||
| 2448 | * We need some special handling for inodes that get processed before the parent | ||
| 2449 | * directory got created. See process_recorded_refs for details. | ||
| 2450 | * This function does the check if we already created the dir out of order. | ||
| 2451 | */ | ||
| 2452 | static int did_create_dir(struct send_ctx *sctx, u64 dir) | ||
| 2453 | { | ||
| 2454 | int ret = 0; | ||
| 2455 | struct btrfs_path *path = NULL; | ||
| 2456 | struct btrfs_key key; | ||
| 2457 | struct btrfs_key found_key; | ||
| 2458 | struct btrfs_key di_key; | ||
| 2459 | struct extent_buffer *eb; | ||
| 2460 | struct btrfs_dir_item *di; | ||
| 2461 | int slot; | ||
| 2462 | |||
| 2463 | path = alloc_path_for_send(); | ||
| 2464 | if (!path) { | ||
| 2465 | ret = -ENOMEM; | ||
| 2466 | goto out; | ||
| 2467 | } | ||
| 2468 | |||
| 2469 | key.objectid = dir; | ||
| 2470 | key.type = BTRFS_DIR_INDEX_KEY; | ||
| 2471 | key.offset = 0; | ||
| 2472 | while (1) { | ||
| 2473 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | ||
| 2474 | 1, 0); | ||
| 2475 | if (ret < 0) | ||
| 2476 | goto out; | ||
| 2477 | if (!ret) { | ||
| 2478 | eb = path->nodes[0]; | ||
| 2479 | slot = path->slots[0]; | ||
| 2480 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
| 2481 | } | ||
| 2482 | if (ret || found_key.objectid != key.objectid || | ||
| 2483 | found_key.type != key.type) { | ||
| 2484 | ret = 0; | ||
| 2485 | goto out; | ||
| 2486 | } | ||
| 2487 | |||
| 2488 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | ||
| 2489 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | ||
| 2490 | |||
| 2491 | if (di_key.objectid < sctx->send_progress) { | ||
| 2492 | ret = 1; | ||
| 2493 | goto out; | ||
| 2494 | } | ||
| 2495 | |||
| 2496 | key.offset = found_key.offset + 1; | ||
| 2497 | btrfs_release_path(path); | ||
| 2498 | } | ||
| 2499 | |||
| 2500 | out: | ||
| 2501 | btrfs_free_path(path); | ||
| 2502 | return ret; | ||
| 2503 | } | ||
| 2504 | |||
| 2505 | /* | ||
| 2506 | * Only creates the inode if it is: | ||
| 2507 | * 1. Not a directory | ||
| 2508 | * 2. Or a directory which was not created already due to out of order | ||
| 2509 | * directories. See did_create_dir and process_recorded_refs for details. | ||
| 2510 | */ | ||
| 2511 | static int send_create_inode_if_needed(struct send_ctx *sctx) | ||
| 2512 | { | ||
| 2513 | int ret; | ||
| 2514 | |||
| 2515 | if (S_ISDIR(sctx->cur_inode_mode)) { | ||
| 2516 | ret = did_create_dir(sctx, sctx->cur_ino); | ||
| 2517 | if (ret < 0) | ||
| 2518 | goto out; | ||
| 2519 | if (ret) { | ||
| 2520 | ret = 0; | ||
| 2521 | goto out; | ||
| 2522 | } | ||
| 2523 | } | ||
| 2524 | |||
| 2525 | ret = send_create_inode(sctx, sctx->cur_ino); | ||
| 2526 | if (ret < 0) | ||
| 2527 | goto out; | ||
| 2528 | |||
| 2529 | out: | ||
| 2530 | return ret; | ||
| 2531 | } | ||
| 2532 | |||
| 2367 | struct recorded_ref { | 2533 | struct recorded_ref { |
| 2368 | struct list_head list; | 2534 | struct list_head list; |
| 2369 | char *dir_path; | 2535 | char *dir_path; |
| @@ -2416,13 +2582,13 @@ static int record_ref(struct list_head *head, u64 dir, | |||
| 2416 | static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head) | 2582 | static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head) |
| 2417 | { | 2583 | { |
| 2418 | struct recorded_ref *cur; | 2584 | struct recorded_ref *cur; |
| 2419 | struct recorded_ref *tmp; | ||
| 2420 | 2585 | ||
| 2421 | list_for_each_entry_safe(cur, tmp, head, list) { | 2586 | while (!list_empty(head)) { |
| 2587 | cur = list_entry(head->next, struct recorded_ref, list); | ||
| 2422 | fs_path_free(sctx, cur->full_path); | 2588 | fs_path_free(sctx, cur->full_path); |
| 2589 | list_del(&cur->list); | ||
| 2423 | kfree(cur); | 2590 | kfree(cur); |
| 2424 | } | 2591 | } |
| 2425 | INIT_LIST_HEAD(head); | ||
| 2426 | } | 2592 | } |
| 2427 | 2593 | ||
| 2428 | static void free_recorded_refs(struct send_ctx *sctx) | 2594 | static void free_recorded_refs(struct send_ctx *sctx) |
| @@ -2432,7 +2598,7 @@ static void free_recorded_refs(struct send_ctx *sctx) | |||
| 2432 | } | 2598 | } |
| 2433 | 2599 | ||
| 2434 | /* | 2600 | /* |
| 2435 | * Renames/moves a file/dir to it's orphan name. Used when the first | 2601 | * Renames/moves a file/dir to its orphan name. Used when the first |
| 2436 | * ref of an unprocessed inode gets overwritten and for all non empty | 2602 | * ref of an unprocessed inode gets overwritten and for all non empty |
| 2437 | * directories. | 2603 | * directories. |
| 2438 | */ | 2604 | */ |
| @@ -2472,6 +2638,12 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | |||
| 2472 | struct btrfs_key loc; | 2638 | struct btrfs_key loc; |
| 2473 | struct btrfs_dir_item *di; | 2639 | struct btrfs_dir_item *di; |
| 2474 | 2640 | ||
| 2641 | /* | ||
| 2642 | * Don't try to rmdir the top/root subvolume dir. | ||
| 2643 | */ | ||
| 2644 | if (dir == BTRFS_FIRST_FREE_OBJECTID) | ||
| 2645 | return 0; | ||
| 2646 | |||
| 2475 | path = alloc_path_for_send(); | 2647 | path = alloc_path_for_send(); |
| 2476 | if (!path) | 2648 | if (!path) |
| 2477 | return -ENOMEM; | 2649 | return -ENOMEM; |
| @@ -2513,160 +2685,6 @@ out: | |||
| 2513 | return ret; | 2685 | return ret; |
| 2514 | } | 2686 | } |
| 2515 | 2687 | ||
| 2516 | struct finish_unordered_dir_ctx { | ||
| 2517 | struct send_ctx *sctx; | ||
| 2518 | struct fs_path *cur_path; | ||
| 2519 | struct fs_path *dir_path; | ||
| 2520 | u64 dir_ino; | ||
| 2521 | int need_delete; | ||
| 2522 | int delete_pass; | ||
| 2523 | }; | ||
| 2524 | |||
| 2525 | int __finish_unordered_dir(int num, struct btrfs_key *di_key, | ||
| 2526 | const char *name, int name_len, | ||
| 2527 | const char *data, int data_len, | ||
| 2528 | u8 type, void *ctx) | ||
| 2529 | { | ||
| 2530 | int ret = 0; | ||
| 2531 | struct finish_unordered_dir_ctx *fctx = ctx; | ||
| 2532 | struct send_ctx *sctx = fctx->sctx; | ||
| 2533 | u64 di_gen; | ||
| 2534 | u64 di_mode; | ||
| 2535 | int is_orphan = 0; | ||
| 2536 | |||
| 2537 | if (di_key->objectid >= fctx->dir_ino) | ||
| 2538 | goto out; | ||
| 2539 | |||
| 2540 | fs_path_reset(fctx->cur_path); | ||
| 2541 | |||
| 2542 | ret = get_inode_info(sctx->send_root, di_key->objectid, | ||
| 2543 | NULL, &di_gen, &di_mode, NULL, NULL); | ||
| 2544 | if (ret < 0) | ||
| 2545 | goto out; | ||
| 2546 | |||
| 2547 | ret = is_first_ref(sctx, sctx->send_root, di_key->objectid, | ||
| 2548 | fctx->dir_ino, name, name_len); | ||
| 2549 | if (ret < 0) | ||
| 2550 | goto out; | ||
| 2551 | if (ret) { | ||
| 2552 | is_orphan = 1; | ||
| 2553 | ret = gen_unique_name(sctx, di_key->objectid, di_gen, | ||
| 2554 | fctx->cur_path); | ||
| 2555 | } else { | ||
| 2556 | ret = get_cur_path(sctx, di_key->objectid, di_gen, | ||
| 2557 | fctx->cur_path); | ||
| 2558 | } | ||
| 2559 | if (ret < 0) | ||
| 2560 | goto out; | ||
| 2561 | |||
| 2562 | ret = fs_path_add(fctx->dir_path, name, name_len); | ||
| 2563 | if (ret < 0) | ||
| 2564 | goto out; | ||
| 2565 | |||
| 2566 | if (!fctx->delete_pass) { | ||
| 2567 | if (S_ISDIR(di_mode)) { | ||
| 2568 | ret = send_rename(sctx, fctx->cur_path, | ||
| 2569 | fctx->dir_path); | ||
| 2570 | } else { | ||
| 2571 | ret = send_link(sctx, fctx->dir_path, | ||
| 2572 | fctx->cur_path); | ||
| 2573 | if (is_orphan) | ||
| 2574 | fctx->need_delete = 1; | ||
| 2575 | } | ||
| 2576 | } else if (!S_ISDIR(di_mode)) { | ||
| 2577 | ret = send_unlink(sctx, fctx->cur_path); | ||
| 2578 | } else { | ||
| 2579 | ret = 0; | ||
| 2580 | } | ||
| 2581 | |||
| 2582 | fs_path_remove(fctx->dir_path); | ||
| 2583 | |||
| 2584 | out: | ||
| 2585 | return ret; | ||
| 2586 | } | ||
| 2587 | |||
| 2588 | /* | ||
| 2589 | * Go through all dir items and see if we find refs which could not be created | ||
| 2590 | * in the past because the dir did not exist at that time. | ||
| 2591 | */ | ||
| 2592 | static int finish_outoforder_dir(struct send_ctx *sctx, u64 dir, u64 dir_gen) | ||
| 2593 | { | ||
| 2594 | int ret = 0; | ||
| 2595 | struct btrfs_path *path = NULL; | ||
| 2596 | struct btrfs_key key; | ||
| 2597 | struct btrfs_key found_key; | ||
| 2598 | struct extent_buffer *eb; | ||
| 2599 | struct finish_unordered_dir_ctx fctx; | ||
| 2600 | int slot; | ||
| 2601 | |||
| 2602 | path = alloc_path_for_send(); | ||
| 2603 | if (!path) { | ||
| 2604 | ret = -ENOMEM; | ||
| 2605 | goto out; | ||
| 2606 | } | ||
| 2607 | |||
| 2608 | memset(&fctx, 0, sizeof(fctx)); | ||
| 2609 | fctx.sctx = sctx; | ||
| 2610 | fctx.cur_path = fs_path_alloc(sctx); | ||
| 2611 | fctx.dir_path = fs_path_alloc(sctx); | ||
| 2612 | if (!fctx.cur_path || !fctx.dir_path) { | ||
| 2613 | ret = -ENOMEM; | ||
| 2614 | goto out; | ||
| 2615 | } | ||
| 2616 | fctx.dir_ino = dir; | ||
| 2617 | |||
| 2618 | ret = get_cur_path(sctx, dir, dir_gen, fctx.dir_path); | ||
| 2619 | if (ret < 0) | ||
| 2620 | goto out; | ||
| 2621 | |||
| 2622 | /* | ||
| 2623 | * We do two passes. The first links in the new refs and the second | ||
| 2624 | * deletes orphans if required. Deletion of orphans is not required for | ||
| 2625 | * directory inodes, as we always have only one ref and use rename | ||
| 2626 | * instead of link for those. | ||
| 2627 | */ | ||
| 2628 | |||
| 2629 | again: | ||
| 2630 | key.objectid = dir; | ||
| 2631 | key.type = BTRFS_DIR_ITEM_KEY; | ||
| 2632 | key.offset = 0; | ||
| 2633 | while (1) { | ||
| 2634 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | ||
| 2635 | 1, 0); | ||
| 2636 | if (ret < 0) | ||
| 2637 | goto out; | ||
| 2638 | eb = path->nodes[0]; | ||
| 2639 | slot = path->slots[0]; | ||
| 2640 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
| 2641 | |||
| 2642 | if (found_key.objectid != key.objectid || | ||
| 2643 | found_key.type != key.type) { | ||
| 2644 | btrfs_release_path(path); | ||
| 2645 | break; | ||
| 2646 | } | ||
| 2647 | |||
| 2648 | ret = iterate_dir_item(sctx, sctx->send_root, path, | ||
| 2649 | &found_key, __finish_unordered_dir, | ||
| 2650 | &fctx); | ||
| 2651 | if (ret < 0) | ||
| 2652 | goto out; | ||
| 2653 | |||
| 2654 | key.offset = found_key.offset + 1; | ||
| 2655 | btrfs_release_path(path); | ||
| 2656 | } | ||
| 2657 | |||
| 2658 | if (!fctx.delete_pass && fctx.need_delete) { | ||
| 2659 | fctx.delete_pass = 1; | ||
| 2660 | goto again; | ||
| 2661 | } | ||
| 2662 | |||
| 2663 | out: | ||
| 2664 | btrfs_free_path(path); | ||
| 2665 | fs_path_free(sctx, fctx.cur_path); | ||
| 2666 | fs_path_free(sctx, fctx.dir_path); | ||
| 2667 | return ret; | ||
| 2668 | } | ||
| 2669 | |||
| 2670 | /* | 2688 | /* |
| 2671 | * This does all the move/link/unlink/rmdir magic. | 2689 | * This does all the move/link/unlink/rmdir magic. |
| 2672 | */ | 2690 | */ |
| @@ -2674,6 +2692,7 @@ static int process_recorded_refs(struct send_ctx *sctx) | |||
| 2674 | { | 2692 | { |
| 2675 | int ret = 0; | 2693 | int ret = 0; |
| 2676 | struct recorded_ref *cur; | 2694 | struct recorded_ref *cur; |
| 2695 | struct recorded_ref *cur2; | ||
| 2677 | struct ulist *check_dirs = NULL; | 2696 | struct ulist *check_dirs = NULL; |
| 2678 | struct ulist_iterator uit; | 2697 | struct ulist_iterator uit; |
| 2679 | struct ulist_node *un; | 2698 | struct ulist_node *un; |
| @@ -2685,6 +2704,12 @@ static int process_recorded_refs(struct send_ctx *sctx) | |||
| 2685 | 2704 | ||
| 2686 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 2705 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); |
| 2687 | 2706 | ||
| 2707 | /* | ||
| 2708 | * This should never happen as the root dir always has the same ref | ||
| 2709 | * which is always '..' | ||
| 2710 | */ | ||
| 2711 | BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); | ||
| 2712 | |||
| 2688 | valid_path = fs_path_alloc(sctx); | 2713 | valid_path = fs_path_alloc(sctx); |
| 2689 | if (!valid_path) { | 2714 | if (!valid_path) { |
| 2690 | ret = -ENOMEM; | 2715 | ret = -ENOMEM; |
| @@ -2731,6 +2756,46 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2731 | 2756 | ||
| 2732 | list_for_each_entry(cur, &sctx->new_refs, list) { | 2757 | list_for_each_entry(cur, &sctx->new_refs, list) { |
| 2733 | /* | 2758 | /* |
| 2759 | * We may have refs where the parent directory does not exist | ||
| 2760 | * yet. This happens if the parent directories inum is higher | ||
| 2761 | * the the current inum. To handle this case, we create the | ||
| 2762 | * parent directory out of order. But we need to check if this | ||
| 2763 | * did already happen before due to other refs in the same dir. | ||
| 2764 | */ | ||
| 2765 | ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); | ||
| 2766 | if (ret < 0) | ||
| 2767 | goto out; | ||
| 2768 | if (ret == inode_state_will_create) { | ||
| 2769 | ret = 0; | ||
| 2770 | /* | ||
| 2771 | * First check if any of the current inodes refs did | ||
| 2772 | * already create the dir. | ||
| 2773 | */ | ||
| 2774 | list_for_each_entry(cur2, &sctx->new_refs, list) { | ||
| 2775 | if (cur == cur2) | ||
| 2776 | break; | ||
| 2777 | if (cur2->dir == cur->dir) { | ||
| 2778 | ret = 1; | ||
| 2779 | break; | ||
| 2780 | } | ||
| 2781 | } | ||
| 2782 | |||
| 2783 | /* | ||
| 2784 | * If that did not happen, check if a previous inode | ||
| 2785 | * did already create the dir. | ||
| 2786 | */ | ||
| 2787 | if (!ret) | ||
| 2788 | ret = did_create_dir(sctx, cur->dir); | ||
| 2789 | if (ret < 0) | ||
| 2790 | goto out; | ||
| 2791 | if (!ret) { | ||
| 2792 | ret = send_create_inode(sctx, cur->dir); | ||
| 2793 | if (ret < 0) | ||
| 2794 | goto out; | ||
| 2795 | } | ||
| 2796 | } | ||
| 2797 | |||
| 2798 | /* | ||
| 2734 | * Check if this new ref would overwrite the first ref of | 2799 | * Check if this new ref would overwrite the first ref of |
| 2735 | * another unprocessed inode. If yes, orphanize the | 2800 | * another unprocessed inode. If yes, orphanize the |
| 2736 | * overwritten inode. If we find an overwritten ref that is | 2801 | * overwritten inode. If we find an overwritten ref that is |
| @@ -2764,7 +2829,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2764 | * inode, move it and update valid_path. If not, link or move | 2829 | * inode, move it and update valid_path. If not, link or move |
| 2765 | * it depending on the inode mode. | 2830 | * it depending on the inode mode. |
| 2766 | */ | 2831 | */ |
| 2767 | if (is_orphan && !sctx->cur_inode_first_ref_orphan) { | 2832 | if (is_orphan) { |
| 2768 | ret = send_rename(sctx, valid_path, cur->full_path); | 2833 | ret = send_rename(sctx, valid_path, cur->full_path); |
| 2769 | if (ret < 0) | 2834 | if (ret < 0) |
| 2770 | goto out; | 2835 | goto out; |
| @@ -2827,6 +2892,17 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2827 | if (ret < 0) | 2892 | if (ret < 0) |
| 2828 | goto out; | 2893 | goto out; |
| 2829 | } | 2894 | } |
| 2895 | } else if (S_ISDIR(sctx->cur_inode_mode) && | ||
| 2896 | !list_empty(&sctx->deleted_refs)) { | ||
| 2897 | /* | ||
| 2898 | * We have a moved dir. Add the old parent to check_dirs | ||
| 2899 | */ | ||
| 2900 | cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, | ||
| 2901 | list); | ||
| 2902 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | ||
| 2903 | GFP_NOFS); | ||
| 2904 | if (ret < 0) | ||
| 2905 | goto out; | ||
| 2830 | } else if (!S_ISDIR(sctx->cur_inode_mode)) { | 2906 | } else if (!S_ISDIR(sctx->cur_inode_mode)) { |
| 2831 | /* | 2907 | /* |
| 2832 | * We have a non dir inode. Go through all deleted refs and | 2908 | * We have a non dir inode. Go through all deleted refs and |
| @@ -2840,35 +2916,9 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2840 | if (ret < 0) | 2916 | if (ret < 0) |
| 2841 | goto out; | 2917 | goto out; |
| 2842 | if (!ret) { | 2918 | if (!ret) { |
| 2843 | /* | 2919 | ret = send_unlink(sctx, cur->full_path); |
| 2844 | * In case the inode was moved to a directory | 2920 | if (ret < 0) |
| 2845 | * that was not created yet (see | 2921 | goto out; |
| 2846 | * __record_new_ref), we can not unlink the ref | ||
| 2847 | * as it will be needed later when the parent | ||
| 2848 | * directory is created, so that we can move in | ||
| 2849 | * the inode to the new dir. | ||
| 2850 | */ | ||
| 2851 | if (!is_orphan && | ||
| 2852 | sctx->cur_inode_first_ref_orphan) { | ||
| 2853 | ret = orphanize_inode(sctx, | ||
| 2854 | sctx->cur_ino, | ||
| 2855 | sctx->cur_inode_gen, | ||
| 2856 | cur->full_path); | ||
| 2857 | if (ret < 0) | ||
| 2858 | goto out; | ||
| 2859 | ret = gen_unique_name(sctx, | ||
| 2860 | sctx->cur_ino, | ||
| 2861 | sctx->cur_inode_gen, | ||
| 2862 | valid_path); | ||
| 2863 | if (ret < 0) | ||
| 2864 | goto out; | ||
| 2865 | is_orphan = 1; | ||
| 2866 | |||
| 2867 | } else { | ||
| 2868 | ret = send_unlink(sctx, cur->full_path); | ||
| 2869 | if (ret < 0) | ||
| 2870 | goto out; | ||
| 2871 | } | ||
| 2872 | } | 2922 | } |
| 2873 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 2923 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, |
| 2874 | GFP_NOFS); | 2924 | GFP_NOFS); |
| @@ -2880,12 +2930,11 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2880 | * If the inode is still orphan, unlink the orphan. This may | 2930 | * If the inode is still orphan, unlink the orphan. This may |
| 2881 | * happen when a previous inode did overwrite the first ref | 2931 | * happen when a previous inode did overwrite the first ref |
| 2882 | * of this inode and no new refs were added for the current | 2932 | * of this inode and no new refs were added for the current |
| 2883 | * inode. | 2933 | * inode. Unlinking does not mean that the inode is deleted in |
| 2884 | * We can however not delete the orphan in case the inode relies | 2934 | * all cases. There may still be links to this inode in other |
| 2885 | * in a directory that was not created yet (see | 2935 | * places. |
| 2886 | * __record_new_ref) | ||
| 2887 | */ | 2936 | */ |
| 2888 | if (is_orphan && !sctx->cur_inode_first_ref_orphan) { | 2937 | if (is_orphan) { |
| 2889 | ret = send_unlink(sctx, valid_path); | 2938 | ret = send_unlink(sctx, valid_path); |
| 2890 | if (ret < 0) | 2939 | if (ret < 0) |
| 2891 | goto out; | 2940 | goto out; |
| @@ -2900,6 +2949,11 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2900 | */ | 2949 | */ |
| 2901 | ULIST_ITER_INIT(&uit); | 2950 | ULIST_ITER_INIT(&uit); |
| 2902 | while ((un = ulist_next(check_dirs, &uit))) { | 2951 | while ((un = ulist_next(check_dirs, &uit))) { |
| 2952 | /* | ||
| 2953 | * In case we had refs into dirs that were not processed yet, | ||
| 2954 | * we don't need to do the utime and rmdir logic for these dirs. | ||
| 2955 | * The dir will be processed later. | ||
| 2956 | */ | ||
| 2903 | if (un->val > sctx->cur_ino) | 2957 | if (un->val > sctx->cur_ino) |
| 2904 | continue; | 2958 | continue; |
| 2905 | 2959 | ||
| @@ -2929,25 +2983,6 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
| 2929 | } | 2983 | } |
| 2930 | } | 2984 | } |
| 2931 | 2985 | ||
| 2932 | /* | ||
| 2933 | * Current inode is now at it's new position, so we must increase | ||
| 2934 | * send_progress | ||
| 2935 | */ | ||
| 2936 | sctx->send_progress = sctx->cur_ino + 1; | ||
| 2937 | |||
| 2938 | /* | ||
| 2939 | * We may have a directory here that has pending refs which could not | ||
| 2940 | * be created before (because the dir did not exist before, see | ||
| 2941 | * __record_new_ref). finish_outoforder_dir will link/move the pending | ||
| 2942 | * refs. | ||
| 2943 | */ | ||
| 2944 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_new) { | ||
| 2945 | ret = finish_outoforder_dir(sctx, sctx->cur_ino, | ||
| 2946 | sctx->cur_inode_gen); | ||
| 2947 | if (ret < 0) | ||
| 2948 | goto out; | ||
| 2949 | } | ||
| 2950 | |||
| 2951 | ret = 0; | 2986 | ret = 0; |
| 2952 | 2987 | ||
| 2953 | out: | 2988 | out: |
| @@ -2971,34 +3006,9 @@ static int __record_new_ref(int num, u64 dir, int index, | |||
| 2971 | return -ENOMEM; | 3006 | return -ENOMEM; |
| 2972 | 3007 | ||
| 2973 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | 3008 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, |
| 2974 | NULL); | 3009 | NULL, NULL); |
| 2975 | if (ret < 0) | ||
| 2976 | goto out; | ||
| 2977 | |||
| 2978 | /* | ||
| 2979 | * The parent may be non-existent at this point in time. This happens | ||
| 2980 | * if the ino of the parent dir is higher then the current ino. In this | ||
| 2981 | * case, we can not process this ref until the parent dir is finally | ||
| 2982 | * created. If we reach the parent dir later, process_recorded_refs | ||
| 2983 | * will go through all dir items and process the refs that could not be | ||
| 2984 | * processed before. In case this is the first ref, we set | ||
| 2985 | * cur_inode_first_ref_orphan to 1 to inform process_recorded_refs to | ||
| 2986 | * keep an orphan of the inode so that it later can be used for | ||
| 2987 | * link/move | ||
| 2988 | */ | ||
| 2989 | ret = is_inode_existent(sctx, dir, gen); | ||
| 2990 | if (ret < 0) | 3010 | if (ret < 0) |
| 2991 | goto out; | 3011 | goto out; |
| 2992 | if (!ret) { | ||
| 2993 | ret = is_first_ref(sctx, sctx->send_root, sctx->cur_ino, dir, | ||
| 2994 | name->start, fs_path_len(name)); | ||
| 2995 | if (ret < 0) | ||
| 2996 | goto out; | ||
| 2997 | if (ret) | ||
| 2998 | sctx->cur_inode_first_ref_orphan = 1; | ||
| 2999 | ret = 0; | ||
| 3000 | goto out; | ||
| 3001 | } | ||
| 3002 | 3012 | ||
| 3003 | ret = get_cur_path(sctx, dir, gen, p); | 3013 | ret = get_cur_path(sctx, dir, gen, p); |
| 3004 | if (ret < 0) | 3014 | if (ret < 0) |
| @@ -3029,7 +3039,7 @@ static int __record_deleted_ref(int num, u64 dir, int index, | |||
| 3029 | return -ENOMEM; | 3039 | return -ENOMEM; |
| 3030 | 3040 | ||
| 3031 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | 3041 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, |
| 3032 | NULL); | 3042 | NULL, NULL); |
| 3033 | if (ret < 0) | 3043 | if (ret < 0) |
| 3034 | goto out; | 3044 | goto out; |
| 3035 | 3045 | ||
| @@ -3206,33 +3216,28 @@ static int process_all_refs(struct send_ctx *sctx, | |||
| 3206 | key.offset = 0; | 3216 | key.offset = 0; |
| 3207 | while (1) { | 3217 | while (1) { |
| 3208 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 3218 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); |
| 3209 | if (ret < 0) { | 3219 | if (ret < 0) |
| 3210 | btrfs_release_path(path); | ||
| 3211 | goto out; | 3220 | goto out; |
| 3212 | } | 3221 | if (ret) |
| 3213 | if (ret) { | ||
| 3214 | btrfs_release_path(path); | ||
| 3215 | break; | 3222 | break; |
| 3216 | } | ||
| 3217 | 3223 | ||
| 3218 | eb = path->nodes[0]; | 3224 | eb = path->nodes[0]; |
| 3219 | slot = path->slots[0]; | 3225 | slot = path->slots[0]; |
| 3220 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 3226 | btrfs_item_key_to_cpu(eb, &found_key, slot); |
| 3221 | 3227 | ||
| 3222 | if (found_key.objectid != key.objectid || | 3228 | if (found_key.objectid != key.objectid || |
| 3223 | found_key.type != key.type) { | 3229 | found_key.type != key.type) |
| 3224 | btrfs_release_path(path); | ||
| 3225 | break; | 3230 | break; |
| 3226 | } | ||
| 3227 | 3231 | ||
| 3228 | ret = iterate_inode_ref(sctx, sctx->parent_root, path, | 3232 | ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb, |
| 3229 | &found_key, 0, cb, sctx); | 3233 | sctx); |
| 3230 | btrfs_release_path(path); | 3234 | btrfs_release_path(path); |
| 3231 | if (ret < 0) | 3235 | if (ret < 0) |
| 3232 | goto out; | 3236 | goto out; |
| 3233 | 3237 | ||
| 3234 | key.offset = found_key.offset + 1; | 3238 | key.offset = found_key.offset + 1; |
| 3235 | } | 3239 | } |
| 3240 | btrfs_release_path(path); | ||
| 3236 | 3241 | ||
| 3237 | ret = process_recorded_refs(sctx); | 3242 | ret = process_recorded_refs(sctx); |
| 3238 | 3243 | ||
| @@ -3555,7 +3560,7 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len) | |||
| 3555 | int ret = 0; | 3560 | int ret = 0; |
| 3556 | struct fs_path *p; | 3561 | struct fs_path *p; |
| 3557 | loff_t pos = offset; | 3562 | loff_t pos = offset; |
| 3558 | int readed = 0; | 3563 | int num_read = 0; |
| 3559 | mm_segment_t old_fs; | 3564 | mm_segment_t old_fs; |
| 3560 | 3565 | ||
| 3561 | p = fs_path_alloc(sctx); | 3566 | p = fs_path_alloc(sctx); |
| @@ -3580,8 +3585,8 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); | |||
| 3580 | ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos); | 3585 | ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos); |
| 3581 | if (ret < 0) | 3586 | if (ret < 0) |
| 3582 | goto out; | 3587 | goto out; |
| 3583 | readed = ret; | 3588 | num_read = ret; |
| 3584 | if (!readed) | 3589 | if (!num_read) |
| 3585 | goto out; | 3590 | goto out; |
| 3586 | 3591 | ||
| 3587 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); | 3592 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); |
| @@ -3594,7 +3599,7 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); | |||
| 3594 | 3599 | ||
| 3595 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 3600 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); |
| 3596 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 3601 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); |
| 3597 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed); | 3602 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); |
| 3598 | 3603 | ||
| 3599 | ret = send_cmd(sctx); | 3604 | ret = send_cmd(sctx); |
| 3600 | 3605 | ||
| @@ -3604,7 +3609,7 @@ out: | |||
| 3604 | set_fs(old_fs); | 3609 | set_fs(old_fs); |
| 3605 | if (ret < 0) | 3610 | if (ret < 0) |
| 3606 | return ret; | 3611 | return ret; |
| 3607 | return readed; | 3612 | return num_read; |
| 3608 | } | 3613 | } |
| 3609 | 3614 | ||
| 3610 | /* | 3615 | /* |
| @@ -3615,7 +3620,6 @@ static int send_clone(struct send_ctx *sctx, | |||
| 3615 | struct clone_root *clone_root) | 3620 | struct clone_root *clone_root) |
| 3616 | { | 3621 | { |
| 3617 | int ret = 0; | 3622 | int ret = 0; |
| 3618 | struct btrfs_root *clone_root2 = clone_root->root; | ||
| 3619 | struct fs_path *p; | 3623 | struct fs_path *p; |
| 3620 | u64 gen; | 3624 | u64 gen; |
| 3621 | 3625 | ||
| @@ -3640,22 +3644,23 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " | |||
| 3640 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); | 3644 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); |
| 3641 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 3645 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); |
| 3642 | 3646 | ||
| 3643 | if (clone_root2 == sctx->send_root) { | 3647 | if (clone_root->root == sctx->send_root) { |
| 3644 | ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, | 3648 | ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, |
| 3645 | &gen, NULL, NULL, NULL); | 3649 | &gen, NULL, NULL, NULL, NULL); |
| 3646 | if (ret < 0) | 3650 | if (ret < 0) |
| 3647 | goto out; | 3651 | goto out; |
| 3648 | ret = get_cur_path(sctx, clone_root->ino, gen, p); | 3652 | ret = get_cur_path(sctx, clone_root->ino, gen, p); |
| 3649 | } else { | 3653 | } else { |
| 3650 | ret = get_inode_path(sctx, clone_root2, clone_root->ino, p); | 3654 | ret = get_inode_path(sctx, clone_root->root, |
| 3655 | clone_root->ino, p); | ||
| 3651 | } | 3656 | } |
| 3652 | if (ret < 0) | 3657 | if (ret < 0) |
| 3653 | goto out; | 3658 | goto out; |
| 3654 | 3659 | ||
| 3655 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | 3660 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, |
| 3656 | clone_root2->root_item.uuid); | 3661 | clone_root->root->root_item.uuid); |
| 3657 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | 3662 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, |
| 3658 | clone_root2->root_item.ctransid); | 3663 | clone_root->root->root_item.ctransid); |
| 3659 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); | 3664 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); |
| 3660 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, | 3665 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, |
| 3661 | clone_root->offset); | 3666 | clone_root->offset); |
| @@ -3684,10 +3689,17 @@ static int send_write_or_clone(struct send_ctx *sctx, | |||
| 3684 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | 3689 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| 3685 | struct btrfs_file_extent_item); | 3690 | struct btrfs_file_extent_item); |
| 3686 | type = btrfs_file_extent_type(path->nodes[0], ei); | 3691 | type = btrfs_file_extent_type(path->nodes[0], ei); |
| 3687 | if (type == BTRFS_FILE_EXTENT_INLINE) | 3692 | if (type == BTRFS_FILE_EXTENT_INLINE) { |
| 3688 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 3693 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); |
| 3689 | else | 3694 | /* |
| 3695 | * it is possible the inline item won't cover the whole page, | ||
| 3696 | * but there may be items after this page. Make | ||
| 3697 | * sure to send the whole thing | ||
| 3698 | */ | ||
| 3699 | len = PAGE_CACHE_ALIGN(len); | ||
| 3700 | } else { | ||
| 3690 | len = btrfs_file_extent_num_bytes(path->nodes[0], ei); | 3701 | len = btrfs_file_extent_num_bytes(path->nodes[0], ei); |
| 3702 | } | ||
| 3691 | 3703 | ||
| 3692 | if (offset + len > sctx->cur_inode_size) | 3704 | if (offset + len > sctx->cur_inode_size) |
| 3693 | len = sctx->cur_inode_size - offset; | 3705 | len = sctx->cur_inode_size - offset; |
| @@ -3735,6 +3747,8 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
| 3735 | u64 left_offset_fixed; | 3747 | u64 left_offset_fixed; |
| 3736 | u64 left_len; | 3748 | u64 left_len; |
| 3737 | u64 right_len; | 3749 | u64 right_len; |
| 3750 | u64 left_gen; | ||
| 3751 | u64 right_gen; | ||
| 3738 | u8 left_type; | 3752 | u8 left_type; |
| 3739 | u8 right_type; | 3753 | u8 right_type; |
| 3740 | 3754 | ||
| @@ -3744,17 +3758,17 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
| 3744 | 3758 | ||
| 3745 | eb = left_path->nodes[0]; | 3759 | eb = left_path->nodes[0]; |
| 3746 | slot = left_path->slots[0]; | 3760 | slot = left_path->slots[0]; |
| 3747 | |||
| 3748 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 3761 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
| 3749 | left_type = btrfs_file_extent_type(eb, ei); | 3762 | left_type = btrfs_file_extent_type(eb, ei); |
| 3750 | left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | ||
| 3751 | left_len = btrfs_file_extent_num_bytes(eb, ei); | ||
| 3752 | left_offset = btrfs_file_extent_offset(eb, ei); | ||
| 3753 | 3763 | ||
| 3754 | if (left_type != BTRFS_FILE_EXTENT_REG) { | 3764 | if (left_type != BTRFS_FILE_EXTENT_REG) { |
| 3755 | ret = 0; | 3765 | ret = 0; |
| 3756 | goto out; | 3766 | goto out; |
| 3757 | } | 3767 | } |
| 3768 | left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | ||
| 3769 | left_len = btrfs_file_extent_num_bytes(eb, ei); | ||
| 3770 | left_offset = btrfs_file_extent_offset(eb, ei); | ||
| 3771 | left_gen = btrfs_file_extent_generation(eb, ei); | ||
| 3758 | 3772 | ||
| 3759 | /* | 3773 | /* |
| 3760 | * Following comments will refer to these graphics. L is the left | 3774 | * Following comments will refer to these graphics. L is the left |
| @@ -3810,6 +3824,7 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
| 3810 | right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | 3824 | right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); |
| 3811 | right_len = btrfs_file_extent_num_bytes(eb, ei); | 3825 | right_len = btrfs_file_extent_num_bytes(eb, ei); |
| 3812 | right_offset = btrfs_file_extent_offset(eb, ei); | 3826 | right_offset = btrfs_file_extent_offset(eb, ei); |
| 3827 | right_gen = btrfs_file_extent_generation(eb, ei); | ||
| 3813 | 3828 | ||
| 3814 | if (right_type != BTRFS_FILE_EXTENT_REG) { | 3829 | if (right_type != BTRFS_FILE_EXTENT_REG) { |
| 3815 | ret = 0; | 3830 | ret = 0; |
| @@ -3820,7 +3835,7 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
| 3820 | * Are we at extent 8? If yes, we know the extent is changed. | 3835 | * Are we at extent 8? If yes, we know the extent is changed. |
| 3821 | * This may only happen on the first iteration. | 3836 | * This may only happen on the first iteration. |
| 3822 | */ | 3837 | */ |
| 3823 | if (found_key.offset + right_len < ekey->offset) { | 3838 | if (found_key.offset + right_len <= ekey->offset) { |
| 3824 | ret = 0; | 3839 | ret = 0; |
| 3825 | goto out; | 3840 | goto out; |
| 3826 | } | 3841 | } |
| @@ -3837,8 +3852,9 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
| 3837 | /* | 3852 | /* |
| 3838 | * Check if we have the same extent. | 3853 | * Check if we have the same extent. |
| 3839 | */ | 3854 | */ |
| 3840 | if (left_disknr + left_offset_fixed != | 3855 | if (left_disknr != right_disknr || |
| 3841 | right_disknr + right_offset) { | 3856 | left_offset_fixed != right_offset || |
| 3857 | left_gen != right_gen) { | ||
| 3842 | ret = 0; | 3858 | ret = 0; |
| 3843 | goto out; | 3859 | goto out; |
| 3844 | } | 3860 | } |
| @@ -3977,6 +3993,15 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end) | |||
| 3977 | goto out; | 3993 | goto out; |
| 3978 | 3994 | ||
| 3979 | ret = process_recorded_refs(sctx); | 3995 | ret = process_recorded_refs(sctx); |
| 3996 | if (ret < 0) | ||
| 3997 | goto out; | ||
| 3998 | |||
| 3999 | /* | ||
| 4000 | * We have processed the refs and thus need to advance send_progress. | ||
| 4001 | * Now, calls to get_cur_xxx will take the updated refs of the current | ||
| 4002 | * inode into account. | ||
| 4003 | */ | ||
| 4004 | sctx->send_progress = sctx->cur_ino + 1; | ||
| 3980 | 4005 | ||
| 3981 | out: | 4006 | out: |
| 3982 | return ret; | 4007 | return ret; |
| @@ -4004,7 +4029,7 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | |||
| 4004 | goto out; | 4029 | goto out; |
| 4005 | 4030 | ||
| 4006 | ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, | 4031 | ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, |
| 4007 | &left_mode, &left_uid, &left_gid); | 4032 | &left_mode, &left_uid, &left_gid, NULL); |
| 4008 | if (ret < 0) | 4033 | if (ret < 0) |
| 4009 | goto out; | 4034 | goto out; |
| 4010 | 4035 | ||
| @@ -4015,7 +4040,7 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | |||
| 4015 | } else { | 4040 | } else { |
| 4016 | ret = get_inode_info(sctx->parent_root, sctx->cur_ino, | 4041 | ret = get_inode_info(sctx->parent_root, sctx->cur_ino, |
| 4017 | NULL, NULL, &right_mode, &right_uid, | 4042 | NULL, NULL, &right_mode, &right_uid, |
| 4018 | &right_gid); | 4043 | &right_gid, NULL); |
| 4019 | if (ret < 0) | 4044 | if (ret < 0) |
| 4020 | goto out; | 4045 | goto out; |
| 4021 | 4046 | ||
| @@ -4074,7 +4099,12 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4074 | 4099 | ||
| 4075 | sctx->cur_ino = key->objectid; | 4100 | sctx->cur_ino = key->objectid; |
| 4076 | sctx->cur_inode_new_gen = 0; | 4101 | sctx->cur_inode_new_gen = 0; |
| 4077 | sctx->cur_inode_first_ref_orphan = 0; | 4102 | |
| 4103 | /* | ||
| 4104 | * Set send_progress to current inode. This will tell all get_cur_xxx | ||
| 4105 | * functions that the current inode's refs are not updated yet. Later, | ||
| 4106 | * when process_recorded_refs is finished, it is set to cur_ino + 1. | ||
| 4107 | */ | ||
| 4078 | sctx->send_progress = sctx->cur_ino; | 4108 | sctx->send_progress = sctx->cur_ino; |
| 4079 | 4109 | ||
| 4080 | if (result == BTRFS_COMPARE_TREE_NEW || | 4110 | if (result == BTRFS_COMPARE_TREE_NEW || |
| @@ -4098,7 +4128,14 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4098 | 4128 | ||
| 4099 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | 4129 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], |
| 4100 | right_ii); | 4130 | right_ii); |
| 4101 | if (left_gen != right_gen) | 4131 | |
| 4132 | /* | ||
| 4133 | * The cur_ino = root dir case is special here. We can't treat | ||
| 4134 | * the inode as deleted+reused because it would generate a | ||
| 4135 | * stream that tries to delete/mkdir the root dir. | ||
| 4136 | */ | ||
| 4137 | if (left_gen != right_gen && | ||
| 4138 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | ||
| 4102 | sctx->cur_inode_new_gen = 1; | 4139 | sctx->cur_inode_new_gen = 1; |
| 4103 | } | 4140 | } |
| 4104 | 4141 | ||
| @@ -4111,8 +4148,7 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4111 | sctx->cur_inode_mode = btrfs_inode_mode( | 4148 | sctx->cur_inode_mode = btrfs_inode_mode( |
| 4112 | sctx->left_path->nodes[0], left_ii); | 4149 | sctx->left_path->nodes[0], left_ii); |
| 4113 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 4150 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) |
| 4114 | ret = send_create_inode(sctx, sctx->left_path, | 4151 | ret = send_create_inode_if_needed(sctx); |
| 4115 | sctx->cmp_key); | ||
| 4116 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | 4152 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { |
| 4117 | sctx->cur_inode_gen = right_gen; | 4153 | sctx->cur_inode_gen = right_gen; |
| 4118 | sctx->cur_inode_new = 0; | 4154 | sctx->cur_inode_new = 0; |
| @@ -4122,7 +4158,17 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4122 | sctx->cur_inode_mode = btrfs_inode_mode( | 4158 | sctx->cur_inode_mode = btrfs_inode_mode( |
| 4123 | sctx->right_path->nodes[0], right_ii); | 4159 | sctx->right_path->nodes[0], right_ii); |
| 4124 | } else if (result == BTRFS_COMPARE_TREE_CHANGED) { | 4160 | } else if (result == BTRFS_COMPARE_TREE_CHANGED) { |
| 4161 | /* | ||
| 4162 | * We need to do some special handling in case the inode was | ||
| 4163 | * reported as changed with a changed generation number. This | ||
| 4164 | * means that the original inode was deleted and new inode | ||
| 4165 | * reused the same inum. So we have to treat the old inode as | ||
| 4166 | * deleted and the new one as new. | ||
| 4167 | */ | ||
| 4125 | if (sctx->cur_inode_new_gen) { | 4168 | if (sctx->cur_inode_new_gen) { |
| 4169 | /* | ||
| 4170 | * First, process the inode as if it was deleted. | ||
| 4171 | */ | ||
| 4126 | sctx->cur_inode_gen = right_gen; | 4172 | sctx->cur_inode_gen = right_gen; |
| 4127 | sctx->cur_inode_new = 0; | 4173 | sctx->cur_inode_new = 0; |
| 4128 | sctx->cur_inode_deleted = 1; | 4174 | sctx->cur_inode_deleted = 1; |
| @@ -4135,6 +4181,9 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4135 | if (ret < 0) | 4181 | if (ret < 0) |
| 4136 | goto out; | 4182 | goto out; |
| 4137 | 4183 | ||
| 4184 | /* | ||
| 4185 | * Now process the inode as if it was new. | ||
| 4186 | */ | ||
| 4138 | sctx->cur_inode_gen = left_gen; | 4187 | sctx->cur_inode_gen = left_gen; |
| 4139 | sctx->cur_inode_new = 1; | 4188 | sctx->cur_inode_new = 1; |
| 4140 | sctx->cur_inode_deleted = 0; | 4189 | sctx->cur_inode_deleted = 0; |
| @@ -4142,14 +4191,23 @@ static int changed_inode(struct send_ctx *sctx, | |||
| 4142 | sctx->left_path->nodes[0], left_ii); | 4191 | sctx->left_path->nodes[0], left_ii); |
| 4143 | sctx->cur_inode_mode = btrfs_inode_mode( | 4192 | sctx->cur_inode_mode = btrfs_inode_mode( |
| 4144 | sctx->left_path->nodes[0], left_ii); | 4193 | sctx->left_path->nodes[0], left_ii); |
| 4145 | ret = send_create_inode(sctx, sctx->left_path, | 4194 | ret = send_create_inode_if_needed(sctx); |
| 4146 | sctx->cmp_key); | ||
| 4147 | if (ret < 0) | 4195 | if (ret < 0) |
| 4148 | goto out; | 4196 | goto out; |
| 4149 | 4197 | ||
| 4150 | ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); | 4198 | ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); |
| 4151 | if (ret < 0) | 4199 | if (ret < 0) |
| 4152 | goto out; | 4200 | goto out; |
| 4201 | /* | ||
| 4202 | * Advance send_progress now as we did not get into | ||
| 4203 | * process_recorded_refs_if_needed in the new_gen case. | ||
| 4204 | */ | ||
| 4205 | sctx->send_progress = sctx->cur_ino + 1; | ||
| 4206 | |||
| 4207 | /* | ||
| 4208 | * Now process all extents and xattrs of the inode as if | ||
| 4209 | * they were all new. | ||
| 4210 | */ | ||
| 4153 | ret = process_all_extents(sctx); | 4211 | ret = process_all_extents(sctx); |
| 4154 | if (ret < 0) | 4212 | if (ret < 0) |
| 4155 | goto out; | 4213 | goto out; |
| @@ -4172,6 +4230,16 @@ out: | |||
| 4172 | return ret; | 4230 | return ret; |
| 4173 | } | 4231 | } |
| 4174 | 4232 | ||
| 4233 | /* | ||
| 4234 | * We have to process new refs before deleted refs, but compare_trees gives us | ||
| 4235 | * the new and deleted refs mixed. To fix this, we record the new/deleted refs | ||
| 4236 | * first and later process them in process_recorded_refs. | ||
| 4237 | * For the cur_inode_new_gen case, we skip recording completely because | ||
| 4238 | * changed_inode did already initiate processing of refs. The reason for this is | ||
| 4239 | * that in this case, compare_tree actually compares the refs of 2 different | ||
| 4240 | * inodes. To fix this, process_all_refs is used in changed_inode to handle all | ||
| 4241 | * refs of the right tree as deleted and all refs of the left tree as new. | ||
| 4242 | */ | ||
| 4175 | static int changed_ref(struct send_ctx *sctx, | 4243 | static int changed_ref(struct send_ctx *sctx, |
| 4176 | enum btrfs_compare_tree_result result) | 4244 | enum btrfs_compare_tree_result result) |
| 4177 | { | 4245 | { |
| @@ -4192,6 +4260,11 @@ static int changed_ref(struct send_ctx *sctx, | |||
| 4192 | return ret; | 4260 | return ret; |
| 4193 | } | 4261 | } |
| 4194 | 4262 | ||
| 4263 | /* | ||
| 4264 | * Process new/deleted/changed xattrs. We skip processing in the | ||
| 4265 | * cur_inode_new_gen case because changed_inode did already initiate processing | ||
| 4266 | * of xattrs. The reason is the same as in changed_ref | ||
| 4267 | */ | ||
| 4195 | static int changed_xattr(struct send_ctx *sctx, | 4268 | static int changed_xattr(struct send_ctx *sctx, |
| 4196 | enum btrfs_compare_tree_result result) | 4269 | enum btrfs_compare_tree_result result) |
| 4197 | { | 4270 | { |
| @@ -4211,6 +4284,11 @@ static int changed_xattr(struct send_ctx *sctx, | |||
| 4211 | return ret; | 4284 | return ret; |
| 4212 | } | 4285 | } |
| 4213 | 4286 | ||
| 4287 | /* | ||
| 4288 | * Process new/deleted/changed extents. We skip processing in the | ||
| 4289 | * cur_inode_new_gen case because changed_inode did already initiate processing | ||
| 4290 | * of extents. The reason is the same as in changed_ref | ||
| 4291 | */ | ||
| 4214 | static int changed_extent(struct send_ctx *sctx, | 4292 | static int changed_extent(struct send_ctx *sctx, |
| 4215 | enum btrfs_compare_tree_result result) | 4293 | enum btrfs_compare_tree_result result) |
| 4216 | { | 4294 | { |
| @@ -4227,7 +4305,10 @@ static int changed_extent(struct send_ctx *sctx, | |||
| 4227 | return ret; | 4305 | return ret; |
| 4228 | } | 4306 | } |
| 4229 | 4307 | ||
| 4230 | 4308 | /* | |
| 4309 | * Updates compare related fields in sctx and simply forwards to the actual | ||
| 4310 | * changed_xxx functions. | ||
| 4311 | */ | ||
| 4231 | static int changed_cb(struct btrfs_root *left_root, | 4312 | static int changed_cb(struct btrfs_root *left_root, |
| 4232 | struct btrfs_root *right_root, | 4313 | struct btrfs_root *right_root, |
| 4233 | struct btrfs_path *left_path, | 4314 | struct btrfs_path *left_path, |
| @@ -4247,6 +4328,11 @@ static int changed_cb(struct btrfs_root *left_root, | |||
| 4247 | if (ret < 0) | 4328 | if (ret < 0) |
| 4248 | goto out; | 4329 | goto out; |
| 4249 | 4330 | ||
| 4331 | /* Ignore non-FS objects */ | ||
| 4332 | if (key->objectid == BTRFS_FREE_INO_OBJECTID || | ||
| 4333 | key->objectid == BTRFS_FREE_SPACE_OBJECTID) | ||
| 4334 | goto out; | ||
| 4335 | |||
| 4250 | if (key->type == BTRFS_INODE_ITEM_KEY) | 4336 | if (key->type == BTRFS_INODE_ITEM_KEY) |
| 4251 | ret = changed_inode(sctx, result); | 4337 | ret = changed_inode(sctx, result); |
| 4252 | else if (key->type == BTRFS_INODE_REF_KEY) | 4338 | else if (key->type == BTRFS_INODE_REF_KEY) |
| @@ -4299,7 +4385,8 @@ join_trans: | |||
| 4299 | } | 4385 | } |
| 4300 | 4386 | ||
| 4301 | /* | 4387 | /* |
| 4302 | * Make sure the tree has not changed | 4388 | * Make sure the tree has not changed after re-joining. We detect this |
| 4389 | * by comparing start_ctransid and ctransid. They should always match. | ||
| 4303 | */ | 4390 | */ |
| 4304 | spin_lock(&send_root->root_times_lock); | 4391 | spin_lock(&send_root->root_times_lock); |
| 4305 | ctransid = btrfs_root_ctransid(&send_root->root_item); | 4392 | ctransid = btrfs_root_ctransid(&send_root->root_item); |
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h index 9934e948e57f..1bf4f32fd4ef 100644 --- a/fs/btrfs/send.h +++ b/fs/btrfs/send.h | |||
| @@ -130,4 +130,5 @@ enum { | |||
| 130 | 130 | ||
| 131 | #ifdef __KERNEL__ | 131 | #ifdef __KERNEL__ |
| 132 | long btrfs_ioctl_send(struct file *mnt_file, void __user *arg); | 132 | long btrfs_ioctl_send(struct file *mnt_file, void __user *arg); |
| 133 | int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off); | ||
| 133 | #endif | 134 | #endif |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 83d6f9f9c220..915ac14c2064 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -243,12 +243,18 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, | |||
| 243 | struct btrfs_root *root, const char *function, | 243 | struct btrfs_root *root, const char *function, |
| 244 | unsigned int line, int errno) | 244 | unsigned int line, int errno) |
| 245 | { | 245 | { |
| 246 | WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted"); | 246 | WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted\n"); |
| 247 | trans->aborted = errno; | 247 | trans->aborted = errno; |
| 248 | /* Nothing used. The other threads that have joined this | 248 | /* Nothing used. The other threads that have joined this |
| 249 | * transaction may be able to continue. */ | 249 | * transaction may be able to continue. */ |
| 250 | if (!trans->blocks_used) { | 250 | if (!trans->blocks_used) { |
| 251 | btrfs_printk(root->fs_info, "Aborting unused transaction.\n"); | 251 | char nbuf[16]; |
| 252 | const char *errstr; | ||
| 253 | |||
| 254 | errstr = btrfs_decode_error(root->fs_info, errno, nbuf); | ||
| 255 | btrfs_printk(root->fs_info, | ||
| 256 | "%s:%d: Aborting unused transaction(%s).\n", | ||
| 257 | function, line, errstr); | ||
| 252 | return; | 258 | return; |
| 253 | } | 259 | } |
| 254 | trans->transaction->aborted = errno; | 260 | trans->transaction->aborted = errno; |
| @@ -407,7 +413,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 407 | btrfs_set_opt(info->mount_opt, NODATASUM); | 413 | btrfs_set_opt(info->mount_opt, NODATASUM); |
| 408 | break; | 414 | break; |
| 409 | case Opt_nodatacow: | 415 | case Opt_nodatacow: |
| 410 | printk(KERN_INFO "btrfs: setting nodatacow\n"); | 416 | if (!btrfs_test_opt(root, COMPRESS) || |
| 417 | !btrfs_test_opt(root, FORCE_COMPRESS)) { | ||
| 418 | printk(KERN_INFO "btrfs: setting nodatacow, compression disabled\n"); | ||
| 419 | } else { | ||
| 420 | printk(KERN_INFO "btrfs: setting nodatacow\n"); | ||
| 421 | } | ||
| 422 | info->compress_type = BTRFS_COMPRESS_NONE; | ||
| 423 | btrfs_clear_opt(info->mount_opt, COMPRESS); | ||
| 424 | btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); | ||
| 411 | btrfs_set_opt(info->mount_opt, NODATACOW); | 425 | btrfs_set_opt(info->mount_opt, NODATACOW); |
| 412 | btrfs_set_opt(info->mount_opt, NODATASUM); | 426 | btrfs_set_opt(info->mount_opt, NODATASUM); |
| 413 | break; | 427 | break; |
| @@ -422,10 +436,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 422 | compress_type = "zlib"; | 436 | compress_type = "zlib"; |
| 423 | info->compress_type = BTRFS_COMPRESS_ZLIB; | 437 | info->compress_type = BTRFS_COMPRESS_ZLIB; |
| 424 | btrfs_set_opt(info->mount_opt, COMPRESS); | 438 | btrfs_set_opt(info->mount_opt, COMPRESS); |
| 439 | btrfs_clear_opt(info->mount_opt, NODATACOW); | ||
| 440 | btrfs_clear_opt(info->mount_opt, NODATASUM); | ||
| 425 | } else if (strcmp(args[0].from, "lzo") == 0) { | 441 | } else if (strcmp(args[0].from, "lzo") == 0) { |
| 426 | compress_type = "lzo"; | 442 | compress_type = "lzo"; |
| 427 | info->compress_type = BTRFS_COMPRESS_LZO; | 443 | info->compress_type = BTRFS_COMPRESS_LZO; |
| 428 | btrfs_set_opt(info->mount_opt, COMPRESS); | 444 | btrfs_set_opt(info->mount_opt, COMPRESS); |
| 445 | btrfs_clear_opt(info->mount_opt, NODATACOW); | ||
| 446 | btrfs_clear_opt(info->mount_opt, NODATASUM); | ||
| 429 | btrfs_set_fs_incompat(info, COMPRESS_LZO); | 447 | btrfs_set_fs_incompat(info, COMPRESS_LZO); |
| 430 | } else if (strncmp(args[0].from, "no", 2) == 0) { | 448 | } else if (strncmp(args[0].from, "no", 2) == 0) { |
| 431 | compress_type = "no"; | 449 | compress_type = "no"; |
| @@ -543,11 +561,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 543 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); | 561 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); |
| 544 | break; | 562 | break; |
| 545 | case Opt_defrag: | 563 | case Opt_defrag: |
| 546 | printk(KERN_INFO "btrfs: enabling auto defrag"); | 564 | printk(KERN_INFO "btrfs: enabling auto defrag\n"); |
| 547 | btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); | 565 | btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); |
| 548 | break; | 566 | break; |
| 549 | case Opt_recovery: | 567 | case Opt_recovery: |
| 550 | printk(KERN_INFO "btrfs: enabling auto recovery"); | 568 | printk(KERN_INFO "btrfs: enabling auto recovery\n"); |
| 551 | btrfs_set_opt(info->mount_opt, RECOVERY); | 569 | btrfs_set_opt(info->mount_opt, RECOVERY); |
| 552 | break; | 570 | break; |
| 553 | case Opt_skip_balance: | 571 | case Opt_skip_balance: |
| @@ -846,18 +864,15 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
| 846 | return 0; | 864 | return 0; |
| 847 | } | 865 | } |
| 848 | 866 | ||
| 849 | btrfs_wait_ordered_extents(root, 0, 0); | 867 | btrfs_wait_ordered_extents(root, 0); |
| 850 | |||
| 851 | spin_lock(&fs_info->trans_lock); | ||
| 852 | if (!fs_info->running_transaction) { | ||
| 853 | spin_unlock(&fs_info->trans_lock); | ||
| 854 | return 0; | ||
| 855 | } | ||
| 856 | spin_unlock(&fs_info->trans_lock); | ||
| 857 | 868 | ||
| 858 | trans = btrfs_join_transaction(root); | 869 | trans = btrfs_attach_transaction(root); |
| 859 | if (IS_ERR(trans)) | 870 | if (IS_ERR(trans)) { |
| 871 | /* no transaction, don't bother */ | ||
| 872 | if (PTR_ERR(trans) == -ENOENT) | ||
| 873 | return 0; | ||
| 860 | return PTR_ERR(trans); | 874 | return PTR_ERR(trans); |
| 875 | } | ||
| 861 | return btrfs_commit_transaction(trans, root); | 876 | return btrfs_commit_transaction(trans, root); |
| 862 | } | 877 | } |
| 863 | 878 | ||
| @@ -1508,17 +1523,21 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, | |||
| 1508 | 1523 | ||
| 1509 | static int btrfs_freeze(struct super_block *sb) | 1524 | static int btrfs_freeze(struct super_block *sb) |
| 1510 | { | 1525 | { |
| 1511 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | 1526 | struct btrfs_trans_handle *trans; |
| 1512 | mutex_lock(&fs_info->transaction_kthread_mutex); | 1527 | struct btrfs_root *root = btrfs_sb(sb)->tree_root; |
| 1513 | mutex_lock(&fs_info->cleaner_mutex); | 1528 | |
| 1514 | return 0; | 1529 | trans = btrfs_attach_transaction(root); |
| 1530 | if (IS_ERR(trans)) { | ||
| 1531 | /* no transaction, don't bother */ | ||
| 1532 | if (PTR_ERR(trans) == -ENOENT) | ||
| 1533 | return 0; | ||
| 1534 | return PTR_ERR(trans); | ||
| 1535 | } | ||
| 1536 | return btrfs_commit_transaction(trans, root); | ||
| 1515 | } | 1537 | } |
| 1516 | 1538 | ||
| 1517 | static int btrfs_unfreeze(struct super_block *sb) | 1539 | static int btrfs_unfreeze(struct super_block *sb) |
| 1518 | { | 1540 | { |
| 1519 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | ||
| 1520 | mutex_unlock(&fs_info->cleaner_mutex); | ||
| 1521 | mutex_unlock(&fs_info->transaction_kthread_mutex); | ||
| 1522 | return 0; | 1541 | return 0; |
| 1523 | } | 1542 | } |
| 1524 | 1543 | ||
| @@ -1595,7 +1614,7 @@ static int btrfs_interface_init(void) | |||
| 1595 | static void btrfs_interface_exit(void) | 1614 | static void btrfs_interface_exit(void) |
| 1596 | { | 1615 | { |
| 1597 | if (misc_deregister(&btrfs_misc) < 0) | 1616 | if (misc_deregister(&btrfs_misc) < 0) |
| 1598 | printk(KERN_INFO "misc_deregister failed for control device"); | 1617 | printk(KERN_INFO "btrfs: misc_deregister failed for control device\n"); |
| 1599 | } | 1618 | } |
| 1600 | 1619 | ||
| 1601 | static int __init init_btrfs_fs(void) | 1620 | static int __init init_btrfs_fs(void) |
| @@ -1620,10 +1639,14 @@ static int __init init_btrfs_fs(void) | |||
| 1620 | if (err) | 1639 | if (err) |
| 1621 | goto free_extent_io; | 1640 | goto free_extent_io; |
| 1622 | 1641 | ||
| 1623 | err = btrfs_delayed_inode_init(); | 1642 | err = ordered_data_init(); |
| 1624 | if (err) | 1643 | if (err) |
| 1625 | goto free_extent_map; | 1644 | goto free_extent_map; |
| 1626 | 1645 | ||
| 1646 | err = btrfs_delayed_inode_init(); | ||
| 1647 | if (err) | ||
| 1648 | goto free_ordered_data; | ||
| 1649 | |||
| 1627 | err = btrfs_interface_init(); | 1650 | err = btrfs_interface_init(); |
| 1628 | if (err) | 1651 | if (err) |
| 1629 | goto free_delayed_inode; | 1652 | goto free_delayed_inode; |
| @@ -1641,6 +1664,8 @@ unregister_ioctl: | |||
| 1641 | btrfs_interface_exit(); | 1664 | btrfs_interface_exit(); |
| 1642 | free_delayed_inode: | 1665 | free_delayed_inode: |
| 1643 | btrfs_delayed_inode_exit(); | 1666 | btrfs_delayed_inode_exit(); |
| 1667 | free_ordered_data: | ||
| 1668 | ordered_data_exit(); | ||
| 1644 | free_extent_map: | 1669 | free_extent_map: |
| 1645 | extent_map_exit(); | 1670 | extent_map_exit(); |
| 1646 | free_extent_io: | 1671 | free_extent_io: |
| @@ -1657,6 +1682,7 @@ static void __exit exit_btrfs_fs(void) | |||
| 1657 | { | 1682 | { |
| 1658 | btrfs_destroy_cachep(); | 1683 | btrfs_destroy_cachep(); |
| 1659 | btrfs_delayed_inode_exit(); | 1684 | btrfs_delayed_inode_exit(); |
| 1685 | ordered_data_exit(); | ||
| 1660 | extent_map_exit(); | 1686 | extent_map_exit(); |
| 1661 | extent_io_exit(); | 1687 | extent_io_exit(); |
| 1662 | btrfs_interface_exit(); | 1688 | btrfs_interface_exit(); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 27c26004e050..77db875b5116 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -53,7 +53,7 @@ static noinline void switch_commit_root(struct btrfs_root *root) | |||
| 53 | /* | 53 | /* |
| 54 | * either allocate a new transaction or hop into the existing one | 54 | * either allocate a new transaction or hop into the existing one |
| 55 | */ | 55 | */ |
| 56 | static noinline int join_transaction(struct btrfs_root *root, int nofail) | 56 | static noinline int join_transaction(struct btrfs_root *root, int type) |
| 57 | { | 57 | { |
| 58 | struct btrfs_transaction *cur_trans; | 58 | struct btrfs_transaction *cur_trans; |
| 59 | struct btrfs_fs_info *fs_info = root->fs_info; | 59 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -67,7 +67,13 @@ loop: | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | if (fs_info->trans_no_join) { | 69 | if (fs_info->trans_no_join) { |
| 70 | if (!nofail) { | 70 | /* |
| 71 | * If we are JOIN_NOLOCK we're already committing a current | ||
| 72 | * transaction, we just need a handle to deal with something | ||
| 73 | * when committing the transaction, such as inode cache and | ||
| 74 | * space cache. It is a special case. | ||
| 75 | */ | ||
| 76 | if (type != TRANS_JOIN_NOLOCK) { | ||
| 71 | spin_unlock(&fs_info->trans_lock); | 77 | spin_unlock(&fs_info->trans_lock); |
| 72 | return -EBUSY; | 78 | return -EBUSY; |
| 73 | } | 79 | } |
| @@ -87,6 +93,13 @@ loop: | |||
| 87 | } | 93 | } |
| 88 | spin_unlock(&fs_info->trans_lock); | 94 | spin_unlock(&fs_info->trans_lock); |
| 89 | 95 | ||
| 96 | /* | ||
| 97 | * If we are ATTACH, we just want to catch the current transaction, | ||
| 98 | * and commit it. If there is no transaction, just return ENOENT. | ||
| 99 | */ | ||
| 100 | if (type == TRANS_ATTACH) | ||
| 101 | return -ENOENT; | ||
| 102 | |||
| 90 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); | 103 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); |
| 91 | if (!cur_trans) | 104 | if (!cur_trans) |
| 92 | return -ENOMEM; | 105 | return -ENOMEM; |
| @@ -267,13 +280,6 @@ static void wait_current_trans(struct btrfs_root *root) | |||
| 267 | } | 280 | } |
| 268 | } | 281 | } |
| 269 | 282 | ||
| 270 | enum btrfs_trans_type { | ||
| 271 | TRANS_START, | ||
| 272 | TRANS_JOIN, | ||
| 273 | TRANS_USERSPACE, | ||
| 274 | TRANS_JOIN_NOLOCK, | ||
| 275 | }; | ||
| 276 | |||
| 277 | static int may_wait_transaction(struct btrfs_root *root, int type) | 283 | static int may_wait_transaction(struct btrfs_root *root, int type) |
| 278 | { | 284 | { |
| 279 | if (root->fs_info->log_root_recovering) | 285 | if (root->fs_info->log_root_recovering) |
| @@ -290,7 +296,8 @@ static int may_wait_transaction(struct btrfs_root *root, int type) | |||
| 290 | } | 296 | } |
| 291 | 297 | ||
| 292 | static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, | 298 | static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, |
| 293 | u64 num_items, int type) | 299 | u64 num_items, int type, |
| 300 | int noflush) | ||
| 294 | { | 301 | { |
| 295 | struct btrfs_trans_handle *h; | 302 | struct btrfs_trans_handle *h; |
| 296 | struct btrfs_transaction *cur_trans; | 303 | struct btrfs_transaction *cur_trans; |
| @@ -324,9 +331,14 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, | |||
| 324 | } | 331 | } |
| 325 | 332 | ||
| 326 | num_bytes = btrfs_calc_trans_metadata_size(root, num_items); | 333 | num_bytes = btrfs_calc_trans_metadata_size(root, num_items); |
| 327 | ret = btrfs_block_rsv_add(root, | 334 | if (noflush) |
| 328 | &root->fs_info->trans_block_rsv, | 335 | ret = btrfs_block_rsv_add_noflush(root, |
| 329 | num_bytes); | 336 | &root->fs_info->trans_block_rsv, |
| 337 | num_bytes); | ||
| 338 | else | ||
| 339 | ret = btrfs_block_rsv_add(root, | ||
| 340 | &root->fs_info->trans_block_rsv, | ||
| 341 | num_bytes); | ||
| 330 | if (ret) | 342 | if (ret) |
| 331 | return ERR_PTR(ret); | 343 | return ERR_PTR(ret); |
| 332 | } | 344 | } |
| @@ -335,19 +347,34 @@ again: | |||
| 335 | if (!h) | 347 | if (!h) |
| 336 | return ERR_PTR(-ENOMEM); | 348 | return ERR_PTR(-ENOMEM); |
| 337 | 349 | ||
| 338 | sb_start_intwrite(root->fs_info->sb); | 350 | /* |
| 351 | * If we are JOIN_NOLOCK we're already committing a transaction and | ||
| 352 | * waiting on this guy, so we don't need to do the sb_start_intwrite | ||
| 353 | * because we're already holding a ref. We need this because we could | ||
| 354 | * have raced in and did an fsync() on a file which can kick a commit | ||
| 355 | * and then we deadlock with somebody doing a freeze. | ||
| 356 | * | ||
| 357 | * If we are ATTACH, it means we just want to catch the current | ||
| 358 | * transaction and commit it, so we needn't do sb_start_intwrite(). | ||
| 359 | */ | ||
| 360 | if (type < TRANS_JOIN_NOLOCK) | ||
| 361 | sb_start_intwrite(root->fs_info->sb); | ||
| 339 | 362 | ||
| 340 | if (may_wait_transaction(root, type)) | 363 | if (may_wait_transaction(root, type)) |
| 341 | wait_current_trans(root); | 364 | wait_current_trans(root); |
| 342 | 365 | ||
| 343 | do { | 366 | do { |
| 344 | ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); | 367 | ret = join_transaction(root, type); |
| 345 | if (ret == -EBUSY) | 368 | if (ret == -EBUSY) |
| 346 | wait_current_trans(root); | 369 | wait_current_trans(root); |
| 347 | } while (ret == -EBUSY); | 370 | } while (ret == -EBUSY); |
| 348 | 371 | ||
| 349 | if (ret < 0) { | 372 | if (ret < 0) { |
| 350 | sb_end_intwrite(root->fs_info->sb); | 373 | /* We must get the transaction if we are JOIN_NOLOCK. */ |
| 374 | BUG_ON(type == TRANS_JOIN_NOLOCK); | ||
| 375 | |||
| 376 | if (type < TRANS_JOIN_NOLOCK) | ||
| 377 | sb_end_intwrite(root->fs_info->sb); | ||
| 351 | kmem_cache_free(btrfs_trans_handle_cachep, h); | 378 | kmem_cache_free(btrfs_trans_handle_cachep, h); |
| 352 | return ERR_PTR(ret); | 379 | return ERR_PTR(ret); |
| 353 | } | 380 | } |
| @@ -367,7 +394,9 @@ again: | |||
| 367 | h->aborted = 0; | 394 | h->aborted = 0; |
| 368 | h->qgroup_reserved = qgroup_reserved; | 395 | h->qgroup_reserved = qgroup_reserved; |
| 369 | h->delayed_ref_elem.seq = 0; | 396 | h->delayed_ref_elem.seq = 0; |
| 397 | h->type = type; | ||
| 370 | INIT_LIST_HEAD(&h->qgroup_ref_list); | 398 | INIT_LIST_HEAD(&h->qgroup_ref_list); |
| 399 | INIT_LIST_HEAD(&h->new_bgs); | ||
| 371 | 400 | ||
| 372 | smp_mb(); | 401 | smp_mb(); |
| 373 | if (cur_trans->blocked && may_wait_transaction(root, type)) { | 402 | if (cur_trans->blocked && may_wait_transaction(root, type)) { |
| @@ -393,21 +422,33 @@ got_it: | |||
| 393 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | 422 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
| 394 | int num_items) | 423 | int num_items) |
| 395 | { | 424 | { |
| 396 | return start_transaction(root, num_items, TRANS_START); | 425 | return start_transaction(root, num_items, TRANS_START, 0); |
| 426 | } | ||
| 427 | |||
| 428 | struct btrfs_trans_handle *btrfs_start_transaction_noflush( | ||
| 429 | struct btrfs_root *root, int num_items) | ||
| 430 | { | ||
| 431 | return start_transaction(root, num_items, TRANS_START, 1); | ||
| 397 | } | 432 | } |
| 433 | |||
| 398 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) | 434 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) |
| 399 | { | 435 | { |
| 400 | return start_transaction(root, 0, TRANS_JOIN); | 436 | return start_transaction(root, 0, TRANS_JOIN, 0); |
| 401 | } | 437 | } |
| 402 | 438 | ||
| 403 | struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) | 439 | struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) |
| 404 | { | 440 | { |
| 405 | return start_transaction(root, 0, TRANS_JOIN_NOLOCK); | 441 | return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); |
| 406 | } | 442 | } |
| 407 | 443 | ||
| 408 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) | 444 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) |
| 409 | { | 445 | { |
| 410 | return start_transaction(root, 0, TRANS_USERSPACE); | 446 | return start_transaction(root, 0, TRANS_USERSPACE, 0); |
| 447 | } | ||
| 448 | |||
| 449 | struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) | ||
| 450 | { | ||
| 451 | return start_transaction(root, 0, TRANS_ATTACH, 0); | ||
| 411 | } | 452 | } |
| 412 | 453 | ||
| 413 | /* wait for a transaction commit to be fully complete */ | 454 | /* wait for a transaction commit to be fully complete */ |
| @@ -506,11 +547,12 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, | |||
| 506 | } | 547 | } |
| 507 | 548 | ||
| 508 | static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | 549 | static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, |
| 509 | struct btrfs_root *root, int throttle, int lock) | 550 | struct btrfs_root *root, int throttle) |
| 510 | { | 551 | { |
| 511 | struct btrfs_transaction *cur_trans = trans->transaction; | 552 | struct btrfs_transaction *cur_trans = trans->transaction; |
| 512 | struct btrfs_fs_info *info = root->fs_info; | 553 | struct btrfs_fs_info *info = root->fs_info; |
| 513 | int count = 0; | 554 | int count = 0; |
| 555 | int lock = (trans->type != TRANS_JOIN_NOLOCK); | ||
| 514 | int err = 0; | 556 | int err = 0; |
| 515 | 557 | ||
| 516 | if (--trans->use_count) { | 558 | if (--trans->use_count) { |
| @@ -536,6 +578,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 536 | trans->qgroup_reserved = 0; | 578 | trans->qgroup_reserved = 0; |
| 537 | } | 579 | } |
| 538 | 580 | ||
| 581 | if (!list_empty(&trans->new_bgs)) | ||
| 582 | btrfs_create_pending_block_groups(trans, root); | ||
| 583 | |||
| 539 | while (count < 2) { | 584 | while (count < 2) { |
| 540 | unsigned long cur = trans->delayed_ref_updates; | 585 | unsigned long cur = trans->delayed_ref_updates; |
| 541 | trans->delayed_ref_updates = 0; | 586 | trans->delayed_ref_updates = 0; |
| @@ -551,7 +596,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 551 | btrfs_trans_release_metadata(trans, root); | 596 | btrfs_trans_release_metadata(trans, root); |
| 552 | trans->block_rsv = NULL; | 597 | trans->block_rsv = NULL; |
| 553 | 598 | ||
| 554 | sb_end_intwrite(root->fs_info->sb); | 599 | if (!list_empty(&trans->new_bgs)) |
| 600 | btrfs_create_pending_block_groups(trans, root); | ||
| 555 | 601 | ||
| 556 | if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && | 602 | if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && |
| 557 | should_end_transaction(trans, root)) { | 603 | should_end_transaction(trans, root)) { |
| @@ -573,6 +619,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 573 | } | 619 | } |
| 574 | } | 620 | } |
| 575 | 621 | ||
| 622 | if (trans->type < TRANS_JOIN_NOLOCK) | ||
| 623 | sb_end_intwrite(root->fs_info->sb); | ||
| 624 | |||
| 576 | WARN_ON(cur_trans != info->running_transaction); | 625 | WARN_ON(cur_trans != info->running_transaction); |
| 577 | WARN_ON(atomic_read(&cur_trans->num_writers) < 1); | 626 | WARN_ON(atomic_read(&cur_trans->num_writers) < 1); |
| 578 | atomic_dec(&cur_trans->num_writers); | 627 | atomic_dec(&cur_trans->num_writers); |
| @@ -604,7 +653,7 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 604 | { | 653 | { |
| 605 | int ret; | 654 | int ret; |
| 606 | 655 | ||
| 607 | ret = __btrfs_end_transaction(trans, root, 0, 1); | 656 | ret = __btrfs_end_transaction(trans, root, 0); |
| 608 | if (ret) | 657 | if (ret) |
| 609 | return ret; | 658 | return ret; |
| 610 | return 0; | 659 | return 0; |
| @@ -615,18 +664,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | |||
| 615 | { | 664 | { |
| 616 | int ret; | 665 | int ret; |
| 617 | 666 | ||
| 618 | ret = __btrfs_end_transaction(trans, root, 1, 1); | 667 | ret = __btrfs_end_transaction(trans, root, 1); |
| 619 | if (ret) | ||
| 620 | return ret; | ||
| 621 | return 0; | ||
| 622 | } | ||
| 623 | |||
| 624 | int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, | ||
| 625 | struct btrfs_root *root) | ||
| 626 | { | ||
| 627 | int ret; | ||
| 628 | |||
| 629 | ret = __btrfs_end_transaction(trans, root, 0, 0); | ||
| 630 | if (ret) | 668 | if (ret) |
| 631 | return ret; | 669 | return ret; |
| 632 | return 0; | 670 | return 0; |
| @@ -635,7 +673,7 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, | |||
| 635 | int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, | 673 | int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, |
| 636 | struct btrfs_root *root) | 674 | struct btrfs_root *root) |
| 637 | { | 675 | { |
| 638 | return __btrfs_end_transaction(trans, root, 1, 1); | 676 | return __btrfs_end_transaction(trans, root, 1); |
| 639 | } | 677 | } |
| 640 | 678 | ||
| 641 | /* | 679 | /* |
| @@ -649,13 +687,15 @@ int btrfs_write_marked_extents(struct btrfs_root *root, | |||
| 649 | int err = 0; | 687 | int err = 0; |
| 650 | int werr = 0; | 688 | int werr = 0; |
| 651 | struct address_space *mapping = root->fs_info->btree_inode->i_mapping; | 689 | struct address_space *mapping = root->fs_info->btree_inode->i_mapping; |
| 690 | struct extent_state *cached_state = NULL; | ||
| 652 | u64 start = 0; | 691 | u64 start = 0; |
| 653 | u64 end; | 692 | u64 end; |
| 654 | 693 | ||
| 655 | while (!find_first_extent_bit(dirty_pages, start, &start, &end, | 694 | while (!find_first_extent_bit(dirty_pages, start, &start, &end, |
| 656 | mark)) { | 695 | mark, &cached_state)) { |
| 657 | convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark, | 696 | convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, |
| 658 | GFP_NOFS); | 697 | mark, &cached_state, GFP_NOFS); |
| 698 | cached_state = NULL; | ||
| 659 | err = filemap_fdatawrite_range(mapping, start, end); | 699 | err = filemap_fdatawrite_range(mapping, start, end); |
| 660 | if (err) | 700 | if (err) |
| 661 | werr = err; | 701 | werr = err; |
| @@ -679,12 +719,14 @@ int btrfs_wait_marked_extents(struct btrfs_root *root, | |||
| 679 | int err = 0; | 719 | int err = 0; |
| 680 | int werr = 0; | 720 | int werr = 0; |
| 681 | struct address_space *mapping = root->fs_info->btree_inode->i_mapping; | 721 | struct address_space *mapping = root->fs_info->btree_inode->i_mapping; |
| 722 | struct extent_state *cached_state = NULL; | ||
| 682 | u64 start = 0; | 723 | u64 start = 0; |
| 683 | u64 end; | 724 | u64 end; |
| 684 | 725 | ||
| 685 | while (!find_first_extent_bit(dirty_pages, start, &start, &end, | 726 | while (!find_first_extent_bit(dirty_pages, start, &start, &end, |
| 686 | EXTENT_NEED_WAIT)) { | 727 | EXTENT_NEED_WAIT, &cached_state)) { |
| 687 | clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS); | 728 | clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, |
| 729 | 0, 0, &cached_state, GFP_NOFS); | ||
| 688 | err = filemap_fdatawait_range(mapping, start, end); | 730 | err = filemap_fdatawait_range(mapping, start, end); |
| 689 | if (err) | 731 | if (err) |
| 690 | werr = err; | 732 | werr = err; |
| @@ -955,6 +997,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 955 | struct btrfs_root *parent_root; | 997 | struct btrfs_root *parent_root; |
| 956 | struct btrfs_block_rsv *rsv; | 998 | struct btrfs_block_rsv *rsv; |
| 957 | struct inode *parent_inode; | 999 | struct inode *parent_inode; |
| 1000 | struct btrfs_path *path; | ||
| 1001 | struct btrfs_dir_item *dir_item; | ||
| 958 | struct dentry *parent; | 1002 | struct dentry *parent; |
| 959 | struct dentry *dentry; | 1003 | struct dentry *dentry; |
| 960 | struct extent_buffer *tmp; | 1004 | struct extent_buffer *tmp; |
| @@ -967,18 +1011,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 967 | u64 root_flags; | 1011 | u64 root_flags; |
| 968 | uuid_le new_uuid; | 1012 | uuid_le new_uuid; |
| 969 | 1013 | ||
| 970 | rsv = trans->block_rsv; | 1014 | path = btrfs_alloc_path(); |
| 1015 | if (!path) { | ||
| 1016 | ret = pending->error = -ENOMEM; | ||
| 1017 | goto path_alloc_fail; | ||
| 1018 | } | ||
| 971 | 1019 | ||
| 972 | new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); | 1020 | new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); |
| 973 | if (!new_root_item) { | 1021 | if (!new_root_item) { |
| 974 | ret = pending->error = -ENOMEM; | 1022 | ret = pending->error = -ENOMEM; |
| 975 | goto fail; | 1023 | goto root_item_alloc_fail; |
| 976 | } | 1024 | } |
| 977 | 1025 | ||
| 978 | ret = btrfs_find_free_objectid(tree_root, &objectid); | 1026 | ret = btrfs_find_free_objectid(tree_root, &objectid); |
| 979 | if (ret) { | 1027 | if (ret) { |
| 980 | pending->error = ret; | 1028 | pending->error = ret; |
| 981 | goto fail; | 1029 | goto no_free_objectid; |
| 982 | } | 1030 | } |
| 983 | 1031 | ||
| 984 | btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); | 1032 | btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); |
| @@ -988,22 +1036,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 988 | to_reserve); | 1036 | to_reserve); |
| 989 | if (ret) { | 1037 | if (ret) { |
| 990 | pending->error = ret; | 1038 | pending->error = ret; |
| 991 | goto fail; | 1039 | goto no_free_objectid; |
| 992 | } | 1040 | } |
| 993 | } | 1041 | } |
| 994 | 1042 | ||
| 995 | ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid, | 1043 | ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid, |
| 996 | objectid, pending->inherit); | 1044 | objectid, pending->inherit); |
| 997 | kfree(pending->inherit); | ||
| 998 | if (ret) { | 1045 | if (ret) { |
| 999 | pending->error = ret; | 1046 | pending->error = ret; |
| 1000 | goto fail; | 1047 | goto no_free_objectid; |
| 1001 | } | 1048 | } |
| 1002 | 1049 | ||
| 1003 | key.objectid = objectid; | 1050 | key.objectid = objectid; |
| 1004 | key.offset = (u64)-1; | 1051 | key.offset = (u64)-1; |
| 1005 | key.type = BTRFS_ROOT_ITEM_KEY; | 1052 | key.type = BTRFS_ROOT_ITEM_KEY; |
| 1006 | 1053 | ||
| 1054 | rsv = trans->block_rsv; | ||
| 1007 | trans->block_rsv = &pending->block_rsv; | 1055 | trans->block_rsv = &pending->block_rsv; |
| 1008 | 1056 | ||
| 1009 | dentry = pending->dentry; | 1057 | dentry = pending->dentry; |
| @@ -1017,24 +1065,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1017 | */ | 1065 | */ |
| 1018 | ret = btrfs_set_inode_index(parent_inode, &index); | 1066 | ret = btrfs_set_inode_index(parent_inode, &index); |
| 1019 | BUG_ON(ret); /* -ENOMEM */ | 1067 | BUG_ON(ret); /* -ENOMEM */ |
| 1020 | ret = btrfs_insert_dir_item(trans, parent_root, | 1068 | |
| 1021 | dentry->d_name.name, dentry->d_name.len, | 1069 | /* check if there is a file/dir which has the same name. */ |
| 1022 | parent_inode, &key, | 1070 | dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, |
| 1023 | BTRFS_FT_DIR, index); | 1071 | btrfs_ino(parent_inode), |
| 1024 | if (ret == -EEXIST) { | 1072 | dentry->d_name.name, |
| 1073 | dentry->d_name.len, 0); | ||
| 1074 | if (dir_item != NULL && !IS_ERR(dir_item)) { | ||
| 1025 | pending->error = -EEXIST; | 1075 | pending->error = -EEXIST; |
| 1026 | dput(parent); | ||
| 1027 | goto fail; | 1076 | goto fail; |
| 1028 | } else if (ret) { | 1077 | } else if (IS_ERR(dir_item)) { |
| 1029 | goto abort_trans_dput; | 1078 | ret = PTR_ERR(dir_item); |
| 1079 | btrfs_abort_transaction(trans, root, ret); | ||
| 1080 | goto fail; | ||
| 1030 | } | 1081 | } |
| 1031 | 1082 | btrfs_release_path(path); | |
| 1032 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | ||
| 1033 | dentry->d_name.len * 2); | ||
| 1034 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; | ||
| 1035 | ret = btrfs_update_inode(trans, parent_root, parent_inode); | ||
| 1036 | if (ret) | ||
| 1037 | goto abort_trans_dput; | ||
| 1038 | 1083 | ||
| 1039 | /* | 1084 | /* |
| 1040 | * pull in the delayed directory update | 1085 | * pull in the delayed directory update |
| @@ -1043,8 +1088,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1043 | * snapshot | 1088 | * snapshot |
| 1044 | */ | 1089 | */ |
| 1045 | ret = btrfs_run_delayed_items(trans, root); | 1090 | ret = btrfs_run_delayed_items(trans, root); |
| 1046 | if (ret) { /* Transaction aborted */ | 1091 | if (ret) { /* Transaction aborted */ |
| 1047 | dput(parent); | 1092 | btrfs_abort_transaction(trans, root, ret); |
| 1048 | goto fail; | 1093 | goto fail; |
| 1049 | } | 1094 | } |
| 1050 | 1095 | ||
| @@ -1079,7 +1124,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1079 | if (ret) { | 1124 | if (ret) { |
| 1080 | btrfs_tree_unlock(old); | 1125 | btrfs_tree_unlock(old); |
| 1081 | free_extent_buffer(old); | 1126 | free_extent_buffer(old); |
| 1082 | goto abort_trans_dput; | 1127 | btrfs_abort_transaction(trans, root, ret); |
| 1128 | goto fail; | ||
| 1083 | } | 1129 | } |
| 1084 | 1130 | ||
| 1085 | btrfs_set_lock_blocking(old); | 1131 | btrfs_set_lock_blocking(old); |
| @@ -1088,8 +1134,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1088 | /* clean up in any case */ | 1134 | /* clean up in any case */ |
| 1089 | btrfs_tree_unlock(old); | 1135 | btrfs_tree_unlock(old); |
| 1090 | free_extent_buffer(old); | 1136 | free_extent_buffer(old); |
| 1091 | if (ret) | 1137 | if (ret) { |
| 1092 | goto abort_trans_dput; | 1138 | btrfs_abort_transaction(trans, root, ret); |
| 1139 | goto fail; | ||
| 1140 | } | ||
| 1093 | 1141 | ||
| 1094 | /* see comments in should_cow_block() */ | 1142 | /* see comments in should_cow_block() */ |
| 1095 | root->force_cow = 1; | 1143 | root->force_cow = 1; |
| @@ -1101,8 +1149,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1101 | ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); | 1149 | ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); |
| 1102 | btrfs_tree_unlock(tmp); | 1150 | btrfs_tree_unlock(tmp); |
| 1103 | free_extent_buffer(tmp); | 1151 | free_extent_buffer(tmp); |
| 1104 | if (ret) | 1152 | if (ret) { |
| 1105 | goto abort_trans_dput; | 1153 | btrfs_abort_transaction(trans, root, ret); |
| 1154 | goto fail; | ||
| 1155 | } | ||
| 1106 | 1156 | ||
| 1107 | /* | 1157 | /* |
| 1108 | * insert root back/forward references | 1158 | * insert root back/forward references |
| @@ -1111,32 +1161,58 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 1111 | parent_root->root_key.objectid, | 1161 | parent_root->root_key.objectid, |
| 1112 | btrfs_ino(parent_inode), index, | 1162 | btrfs_ino(parent_inode), index, |
| 1113 | dentry->d_name.name, dentry->d_name.len); | 1163 | dentry->d_name.name, dentry->d_name.len); |
| 1114 | dput(parent); | 1164 | if (ret) { |
| 1115 | if (ret) | 1165 | btrfs_abort_transaction(trans, root, ret); |
| 1116 | goto fail; | 1166 | goto fail; |
| 1167 | } | ||
| 1117 | 1168 | ||
| 1118 | key.offset = (u64)-1; | 1169 | key.offset = (u64)-1; |
| 1119 | pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); | 1170 | pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); |
| 1120 | if (IS_ERR(pending->snap)) { | 1171 | if (IS_ERR(pending->snap)) { |
| 1121 | ret = PTR_ERR(pending->snap); | 1172 | ret = PTR_ERR(pending->snap); |
| 1122 | goto abort_trans; | 1173 | btrfs_abort_transaction(trans, root, ret); |
| 1174 | goto fail; | ||
| 1123 | } | 1175 | } |
| 1124 | 1176 | ||
| 1125 | ret = btrfs_reloc_post_snapshot(trans, pending); | 1177 | ret = btrfs_reloc_post_snapshot(trans, pending); |
| 1178 | if (ret) { | ||
| 1179 | btrfs_abort_transaction(trans, root, ret); | ||
| 1180 | goto fail; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
| 1184 | if (ret) { | ||
| 1185 | btrfs_abort_transaction(trans, root, ret); | ||
| 1186 | goto fail; | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | ret = btrfs_insert_dir_item(trans, parent_root, | ||
| 1190 | dentry->d_name.name, dentry->d_name.len, | ||
| 1191 | parent_inode, &key, | ||
| 1192 | BTRFS_FT_DIR, index); | ||
| 1193 | /* We have check then name at the beginning, so it is impossible. */ | ||
| 1194 | BUG_ON(ret == -EEXIST); | ||
| 1195 | if (ret) { | ||
| 1196 | btrfs_abort_transaction(trans, root, ret); | ||
| 1197 | goto fail; | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | ||
| 1201 | dentry->d_name.len * 2); | ||
| 1202 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; | ||
| 1203 | ret = btrfs_update_inode(trans, parent_root, parent_inode); | ||
| 1126 | if (ret) | 1204 | if (ret) |
| 1127 | goto abort_trans; | 1205 | btrfs_abort_transaction(trans, root, ret); |
| 1128 | ret = 0; | ||
| 1129 | fail: | 1206 | fail: |
| 1130 | kfree(new_root_item); | 1207 | dput(parent); |
| 1131 | trans->block_rsv = rsv; | 1208 | trans->block_rsv = rsv; |
| 1209 | no_free_objectid: | ||
| 1210 | kfree(new_root_item); | ||
| 1211 | root_item_alloc_fail: | ||
| 1212 | btrfs_free_path(path); | ||
| 1213 | path_alloc_fail: | ||
| 1132 | btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); | 1214 | btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); |
| 1133 | return ret; | 1215 | return ret; |
| 1134 | |||
| 1135 | abort_trans_dput: | ||
| 1136 | dput(parent); | ||
| 1137 | abort_trans: | ||
| 1138 | btrfs_abort_transaction(trans, root, ret); | ||
| 1139 | goto fail; | ||
| 1140 | } | 1216 | } |
| 1141 | 1217 | ||
| 1142 | /* | 1218 | /* |
| @@ -1229,6 +1305,16 @@ static void do_async_commit(struct work_struct *work) | |||
| 1229 | struct btrfs_async_commit *ac = | 1305 | struct btrfs_async_commit *ac = |
| 1230 | container_of(work, struct btrfs_async_commit, work.work); | 1306 | container_of(work, struct btrfs_async_commit, work.work); |
| 1231 | 1307 | ||
| 1308 | /* | ||
| 1309 | * We've got freeze protection passed with the transaction. | ||
| 1310 | * Tell lockdep about it. | ||
| 1311 | */ | ||
| 1312 | rwsem_acquire_read( | ||
| 1313 | &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], | ||
| 1314 | 0, 1, _THIS_IP_); | ||
| 1315 | |||
| 1316 | current->journal_info = ac->newtrans; | ||
| 1317 | |||
| 1232 | btrfs_commit_transaction(ac->newtrans, ac->root); | 1318 | btrfs_commit_transaction(ac->newtrans, ac->root); |
| 1233 | kfree(ac); | 1319 | kfree(ac); |
| 1234 | } | 1320 | } |
| @@ -1258,6 +1344,14 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
| 1258 | atomic_inc(&cur_trans->use_count); | 1344 | atomic_inc(&cur_trans->use_count); |
| 1259 | 1345 | ||
| 1260 | btrfs_end_transaction(trans, root); | 1346 | btrfs_end_transaction(trans, root); |
| 1347 | |||
| 1348 | /* | ||
| 1349 | * Tell lockdep we've released the freeze rwsem, since the | ||
| 1350 | * async commit thread will be the one to unlock it. | ||
| 1351 | */ | ||
| 1352 | rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], | ||
| 1353 | 1, _THIS_IP_); | ||
| 1354 | |||
| 1261 | schedule_delayed_work(&ac->work, 0); | 1355 | schedule_delayed_work(&ac->work, 0); |
| 1262 | 1356 | ||
| 1263 | /* wait for transaction to start and unblock */ | 1357 | /* wait for transaction to start and unblock */ |
| @@ -1348,6 +1442,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1348 | */ | 1442 | */ |
| 1349 | cur_trans->delayed_refs.flushing = 1; | 1443 | cur_trans->delayed_refs.flushing = 1; |
| 1350 | 1444 | ||
| 1445 | if (!list_empty(&trans->new_bgs)) | ||
| 1446 | btrfs_create_pending_block_groups(trans, root); | ||
| 1447 | |||
| 1351 | ret = btrfs_run_delayed_refs(trans, root, 0); | 1448 | ret = btrfs_run_delayed_refs(trans, root, 0); |
| 1352 | if (ret) | 1449 | if (ret) |
| 1353 | goto cleanup_transaction; | 1450 | goto cleanup_transaction; |
| @@ -1403,7 +1500,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1403 | 1500 | ||
| 1404 | if (flush_on_commit || snap_pending) { | 1501 | if (flush_on_commit || snap_pending) { |
| 1405 | btrfs_start_delalloc_inodes(root, 1); | 1502 | btrfs_start_delalloc_inodes(root, 1); |
| 1406 | btrfs_wait_ordered_extents(root, 0, 1); | 1503 | btrfs_wait_ordered_extents(root, 1); |
| 1407 | } | 1504 | } |
| 1408 | 1505 | ||
| 1409 | ret = btrfs_run_delayed_items(trans, root); | 1506 | ret = btrfs_run_delayed_items(trans, root); |
| @@ -1456,13 +1553,28 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1456 | */ | 1553 | */ |
| 1457 | mutex_lock(&root->fs_info->reloc_mutex); | 1554 | mutex_lock(&root->fs_info->reloc_mutex); |
| 1458 | 1555 | ||
| 1459 | ret = btrfs_run_delayed_items(trans, root); | 1556 | /* |
| 1557 | * We needn't worry about the delayed items because we will | ||
| 1558 | * deal with them in create_pending_snapshot(), which is the | ||
| 1559 | * core function of the snapshot creation. | ||
| 1560 | */ | ||
| 1561 | ret = create_pending_snapshots(trans, root->fs_info); | ||
| 1460 | if (ret) { | 1562 | if (ret) { |
| 1461 | mutex_unlock(&root->fs_info->reloc_mutex); | 1563 | mutex_unlock(&root->fs_info->reloc_mutex); |
| 1462 | goto cleanup_transaction; | 1564 | goto cleanup_transaction; |
| 1463 | } | 1565 | } |
| 1464 | 1566 | ||
| 1465 | ret = create_pending_snapshots(trans, root->fs_info); | 1567 | /* |
| 1568 | * We insert the dir indexes of the snapshots and update the inode | ||
| 1569 | * of the snapshots' parents after the snapshot creation, so there | ||
| 1570 | * are some delayed items which are not dealt with. Now deal with | ||
| 1571 | * them. | ||
| 1572 | * | ||
| 1573 | * We needn't worry that this operation will corrupt the snapshots, | ||
| 1574 | * because all the tree which are snapshoted will be forced to COW | ||
| 1575 | * the nodes and leaves. | ||
| 1576 | */ | ||
| 1577 | ret = btrfs_run_delayed_items(trans, root); | ||
| 1466 | if (ret) { | 1578 | if (ret) { |
| 1467 | mutex_unlock(&root->fs_info->reloc_mutex); | 1579 | mutex_unlock(&root->fs_info->reloc_mutex); |
| 1468 | goto cleanup_transaction; | 1580 | goto cleanup_transaction; |
| @@ -1584,7 +1696,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1584 | put_transaction(cur_trans); | 1696 | put_transaction(cur_trans); |
| 1585 | put_transaction(cur_trans); | 1697 | put_transaction(cur_trans); |
| 1586 | 1698 | ||
| 1587 | sb_end_intwrite(root->fs_info->sb); | 1699 | if (trans->type < TRANS_JOIN_NOLOCK) |
| 1700 | sb_end_intwrite(root->fs_info->sb); | ||
| 1588 | 1701 | ||
| 1589 | trace_btrfs_transaction_commit(root); | 1702 | trace_btrfs_transaction_commit(root); |
| 1590 | 1703 | ||
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e8b8416c688b..80961947a6b2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -47,6 +47,14 @@ struct btrfs_transaction { | |||
| 47 | int aborted; | 47 | int aborted; |
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | enum btrfs_trans_type { | ||
| 51 | TRANS_START, | ||
| 52 | TRANS_JOIN, | ||
| 53 | TRANS_USERSPACE, | ||
| 54 | TRANS_JOIN_NOLOCK, | ||
| 55 | TRANS_ATTACH, | ||
| 56 | }; | ||
| 57 | |||
| 50 | struct btrfs_trans_handle { | 58 | struct btrfs_trans_handle { |
| 51 | u64 transid; | 59 | u64 transid; |
| 52 | u64 bytes_reserved; | 60 | u64 bytes_reserved; |
| @@ -58,8 +66,9 @@ struct btrfs_trans_handle { | |||
| 58 | struct btrfs_transaction *transaction; | 66 | struct btrfs_transaction *transaction; |
| 59 | struct btrfs_block_rsv *block_rsv; | 67 | struct btrfs_block_rsv *block_rsv; |
| 60 | struct btrfs_block_rsv *orig_rsv; | 68 | struct btrfs_block_rsv *orig_rsv; |
| 61 | int aborted; | 69 | short aborted; |
| 62 | int adding_csums; | 70 | short adding_csums; |
| 71 | enum btrfs_trans_type type; | ||
| 63 | /* | 72 | /* |
| 64 | * this root is only needed to validate that the root passed to | 73 | * this root is only needed to validate that the root passed to |
| 65 | * start_transaction is the same as the one passed to end_transaction. | 74 | * start_transaction is the same as the one passed to end_transaction. |
| @@ -68,6 +77,7 @@ struct btrfs_trans_handle { | |||
| 68 | struct btrfs_root *root; | 77 | struct btrfs_root *root; |
| 69 | struct seq_list delayed_ref_elem; | 78 | struct seq_list delayed_ref_elem; |
| 70 | struct list_head qgroup_ref_list; | 79 | struct list_head qgroup_ref_list; |
| 80 | struct list_head new_bgs; | ||
| 71 | }; | 81 | }; |
| 72 | 82 | ||
| 73 | struct btrfs_pending_snapshot { | 83 | struct btrfs_pending_snapshot { |
| @@ -88,16 +98,18 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, | |||
| 88 | { | 98 | { |
| 89 | BTRFS_I(inode)->last_trans = trans->transaction->transid; | 99 | BTRFS_I(inode)->last_trans = trans->transaction->transid; |
| 90 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | 100 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; |
| 101 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; | ||
| 91 | } | 102 | } |
| 92 | 103 | ||
| 93 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, | 104 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, |
| 94 | struct btrfs_root *root); | 105 | struct btrfs_root *root); |
| 95 | int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, | ||
| 96 | struct btrfs_root *root); | ||
| 97 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | 106 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
| 98 | int num_items); | 107 | int num_items); |
| 108 | struct btrfs_trans_handle *btrfs_start_transaction_noflush( | ||
| 109 | struct btrfs_root *root, int num_items); | ||
| 99 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); | 110 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); |
| 100 | struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); | 111 | struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); |
| 112 | struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); | ||
| 101 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); | 113 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); |
| 102 | int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); | 114 | int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); |
| 103 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | 115 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c86670f4f285..e9ebb472b28b 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -18,13 +18,16 @@ | |||
| 18 | 18 | ||
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/list_sort.h> | ||
| 21 | #include "ctree.h" | 22 | #include "ctree.h" |
| 22 | #include "transaction.h" | 23 | #include "transaction.h" |
| 23 | #include "disk-io.h" | 24 | #include "disk-io.h" |
| 24 | #include "locking.h" | 25 | #include "locking.h" |
| 25 | #include "print-tree.h" | 26 | #include "print-tree.h" |
| 27 | #include "backref.h" | ||
| 26 | #include "compat.h" | 28 | #include "compat.h" |
| 27 | #include "tree-log.h" | 29 | #include "tree-log.h" |
| 30 | #include "hash.h" | ||
| 28 | 31 | ||
| 29 | /* magic values for the inode_only field in btrfs_log_inode: | 32 | /* magic values for the inode_only field in btrfs_log_inode: |
| 30 | * | 33 | * |
| @@ -146,7 +149,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, | |||
| 146 | root->log_multiple_pids = true; | 149 | root->log_multiple_pids = true; |
| 147 | } | 150 | } |
| 148 | 151 | ||
| 149 | root->log_batch++; | 152 | atomic_inc(&root->log_batch); |
| 150 | atomic_inc(&root->log_writers); | 153 | atomic_inc(&root->log_writers); |
| 151 | mutex_unlock(&root->log_mutex); | 154 | mutex_unlock(&root->log_mutex); |
| 152 | return 0; | 155 | return 0; |
| @@ -165,7 +168,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, | |||
| 165 | err = ret; | 168 | err = ret; |
| 166 | } | 169 | } |
| 167 | mutex_unlock(&root->fs_info->tree_log_mutex); | 170 | mutex_unlock(&root->fs_info->tree_log_mutex); |
| 168 | root->log_batch++; | 171 | atomic_inc(&root->log_batch); |
| 169 | atomic_inc(&root->log_writers); | 172 | atomic_inc(&root->log_writers); |
| 170 | mutex_unlock(&root->log_mutex); | 173 | mutex_unlock(&root->log_mutex); |
| 171 | return err; | 174 | return err; |
| @@ -484,7 +487,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
| 484 | int found_type; | 487 | int found_type; |
| 485 | u64 mask = root->sectorsize - 1; | 488 | u64 mask = root->sectorsize - 1; |
| 486 | u64 extent_end; | 489 | u64 extent_end; |
| 487 | u64 alloc_hint; | ||
| 488 | u64 start = key->offset; | 490 | u64 start = key->offset; |
| 489 | u64 saved_nbytes; | 491 | u64 saved_nbytes; |
| 490 | struct btrfs_file_extent_item *item; | 492 | struct btrfs_file_extent_item *item; |
| @@ -550,8 +552,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
| 550 | 552 | ||
| 551 | saved_nbytes = inode_get_bytes(inode); | 553 | saved_nbytes = inode_get_bytes(inode); |
| 552 | /* drop any overlapping extents */ | 554 | /* drop any overlapping extents */ |
| 553 | ret = btrfs_drop_extents(trans, inode, start, extent_end, | 555 | ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); |
| 554 | &alloc_hint, 1); | ||
| 555 | BUG_ON(ret); | 556 | BUG_ON(ret); |
| 556 | 557 | ||
| 557 | if (found_type == BTRFS_FILE_EXTENT_REG || | 558 | if (found_type == BTRFS_FILE_EXTENT_REG || |
| @@ -744,6 +745,7 @@ out: | |||
| 744 | */ | 745 | */ |
| 745 | static noinline int backref_in_log(struct btrfs_root *log, | 746 | static noinline int backref_in_log(struct btrfs_root *log, |
| 746 | struct btrfs_key *key, | 747 | struct btrfs_key *key, |
| 748 | u64 ref_objectid, | ||
| 747 | char *name, int namelen) | 749 | char *name, int namelen) |
| 748 | { | 750 | { |
| 749 | struct btrfs_path *path; | 751 | struct btrfs_path *path; |
| @@ -764,8 +766,17 @@ static noinline int backref_in_log(struct btrfs_root *log, | |||
| 764 | if (ret != 0) | 766 | if (ret != 0) |
| 765 | goto out; | 767 | goto out; |
| 766 | 768 | ||
| 767 | item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); | ||
| 768 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); | 769 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); |
| 770 | |||
| 771 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | ||
| 772 | if (btrfs_find_name_in_ext_backref(path, ref_objectid, | ||
| 773 | name, namelen, NULL)) | ||
| 774 | match = 1; | ||
| 775 | |||
| 776 | goto out; | ||
| 777 | } | ||
| 778 | |||
| 779 | item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); | ||
| 769 | ptr_end = ptr + item_size; | 780 | ptr_end = ptr + item_size; |
| 770 | while (ptr < ptr_end) { | 781 | while (ptr < ptr_end) { |
| 771 | ref = (struct btrfs_inode_ref *)ptr; | 782 | ref = (struct btrfs_inode_ref *)ptr; |
| @@ -786,91 +797,42 @@ out: | |||
| 786 | return match; | 797 | return match; |
| 787 | } | 798 | } |
| 788 | 799 | ||
| 789 | 800 | static inline int __add_inode_ref(struct btrfs_trans_handle *trans, | |
| 790 | /* | ||
| 791 | * replay one inode back reference item found in the log tree. | ||
| 792 | * eb, slot and key refer to the buffer and key found in the log tree. | ||
| 793 | * root is the destination we are replaying into, and path is for temp | ||
| 794 | * use by this function. (it should be released on return). | ||
| 795 | */ | ||
| 796 | static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | ||
| 797 | struct btrfs_root *root, | 801 | struct btrfs_root *root, |
| 798 | struct btrfs_root *log, | ||
| 799 | struct btrfs_path *path, | 802 | struct btrfs_path *path, |
| 800 | struct extent_buffer *eb, int slot, | 803 | struct btrfs_root *log_root, |
| 801 | struct btrfs_key *key) | 804 | struct inode *dir, struct inode *inode, |
| 805 | struct extent_buffer *eb, | ||
| 806 | u64 inode_objectid, u64 parent_objectid, | ||
| 807 | u64 ref_index, char *name, int namelen, | ||
| 808 | int *search_done) | ||
| 802 | { | 809 | { |
| 803 | struct btrfs_inode_ref *ref; | ||
| 804 | struct btrfs_dir_item *di; | ||
| 805 | struct inode *dir; | ||
| 806 | struct inode *inode; | ||
| 807 | unsigned long ref_ptr; | ||
| 808 | unsigned long ref_end; | ||
| 809 | char *name; | ||
| 810 | int namelen; | ||
| 811 | int ret; | 810 | int ret; |
| 812 | int search_done = 0; | 811 | char *victim_name; |
| 813 | 812 | int victim_name_len; | |
| 814 | /* | 813 | struct extent_buffer *leaf; |
| 815 | * it is possible that we didn't log all the parent directories | 814 | struct btrfs_dir_item *di; |
| 816 | * for a given inode. If we don't find the dir, just don't | 815 | struct btrfs_key search_key; |
| 817 | * copy the back ref in. The link count fixup code will take | 816 | struct btrfs_inode_extref *extref; |
| 818 | * care of the rest | ||
| 819 | */ | ||
| 820 | dir = read_one_inode(root, key->offset); | ||
| 821 | if (!dir) | ||
| 822 | return -ENOENT; | ||
| 823 | |||
| 824 | inode = read_one_inode(root, key->objectid); | ||
| 825 | if (!inode) { | ||
| 826 | iput(dir); | ||
| 827 | return -EIO; | ||
| 828 | } | ||
| 829 | |||
| 830 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | ||
| 831 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | ||
| 832 | 817 | ||
| 833 | again: | 818 | again: |
| 834 | ref = (struct btrfs_inode_ref *)ref_ptr; | 819 | /* Search old style refs */ |
| 835 | 820 | search_key.objectid = inode_objectid; | |
| 836 | namelen = btrfs_inode_ref_name_len(eb, ref); | 821 | search_key.type = BTRFS_INODE_REF_KEY; |
| 837 | name = kmalloc(namelen, GFP_NOFS); | 822 | search_key.offset = parent_objectid; |
| 838 | BUG_ON(!name); | 823 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); |
| 839 | |||
| 840 | read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); | ||
| 841 | |||
| 842 | /* if we already have a perfect match, we're done */ | ||
| 843 | if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), | ||
| 844 | btrfs_inode_ref_index(eb, ref), | ||
| 845 | name, namelen)) { | ||
| 846 | goto out; | ||
| 847 | } | ||
| 848 | |||
| 849 | /* | ||
| 850 | * look for a conflicting back reference in the metadata. | ||
| 851 | * if we find one we have to unlink that name of the file | ||
| 852 | * before we add our new link. Later on, we overwrite any | ||
| 853 | * existing back reference, and we don't want to create | ||
| 854 | * dangling pointers in the directory. | ||
| 855 | */ | ||
| 856 | |||
| 857 | if (search_done) | ||
| 858 | goto insert; | ||
| 859 | |||
| 860 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | ||
| 861 | if (ret == 0) { | 824 | if (ret == 0) { |
| 862 | char *victim_name; | ||
| 863 | int victim_name_len; | ||
| 864 | struct btrfs_inode_ref *victim_ref; | 825 | struct btrfs_inode_ref *victim_ref; |
| 865 | unsigned long ptr; | 826 | unsigned long ptr; |
| 866 | unsigned long ptr_end; | 827 | unsigned long ptr_end; |
| 867 | struct extent_buffer *leaf = path->nodes[0]; | 828 | |
| 829 | leaf = path->nodes[0]; | ||
| 868 | 830 | ||
| 869 | /* are we trying to overwrite a back ref for the root directory | 831 | /* are we trying to overwrite a back ref for the root directory |
| 870 | * if so, just jump out, we're done | 832 | * if so, just jump out, we're done |
| 871 | */ | 833 | */ |
| 872 | if (key->objectid == key->offset) | 834 | if (search_key.objectid == search_key.offset) |
| 873 | goto out_nowrite; | 835 | return 1; |
| 874 | 836 | ||
| 875 | /* check all the names in this back reference to see | 837 | /* check all the names in this back reference to see |
| 876 | * if they are in the log. if so, we allow them to stay | 838 | * if they are in the log. if so, we allow them to stay |
| @@ -889,7 +851,9 @@ again: | |||
| 889 | (unsigned long)(victim_ref + 1), | 851 | (unsigned long)(victim_ref + 1), |
| 890 | victim_name_len); | 852 | victim_name_len); |
| 891 | 853 | ||
| 892 | if (!backref_in_log(log, key, victim_name, | 854 | if (!backref_in_log(log_root, &search_key, |
| 855 | parent_objectid, | ||
| 856 | victim_name, | ||
| 893 | victim_name_len)) { | 857 | victim_name_len)) { |
| 894 | btrfs_inc_nlink(inode); | 858 | btrfs_inc_nlink(inode); |
| 895 | btrfs_release_path(path); | 859 | btrfs_release_path(path); |
| @@ -897,9 +861,14 @@ again: | |||
| 897 | ret = btrfs_unlink_inode(trans, root, dir, | 861 | ret = btrfs_unlink_inode(trans, root, dir, |
| 898 | inode, victim_name, | 862 | inode, victim_name, |
| 899 | victim_name_len); | 863 | victim_name_len); |
| 864 | BUG_ON(ret); | ||
| 900 | btrfs_run_delayed_items(trans, root); | 865 | btrfs_run_delayed_items(trans, root); |
| 866 | kfree(victim_name); | ||
| 867 | *search_done = 1; | ||
| 868 | goto again; | ||
| 901 | } | 869 | } |
| 902 | kfree(victim_name); | 870 | kfree(victim_name); |
| 871 | |||
| 903 | ptr = (unsigned long)(victim_ref + 1) + victim_name_len; | 872 | ptr = (unsigned long)(victim_ref + 1) + victim_name_len; |
| 904 | } | 873 | } |
| 905 | BUG_ON(ret); | 874 | BUG_ON(ret); |
| @@ -908,14 +877,78 @@ again: | |||
| 908 | * NOTE: we have searched root tree and checked the | 877 | * NOTE: we have searched root tree and checked the |
| 909 | * coresponding ref, it does not need to check again. | 878 | * coresponding ref, it does not need to check again. |
| 910 | */ | 879 | */ |
| 911 | search_done = 1; | 880 | *search_done = 1; |
| 881 | } | ||
| 882 | btrfs_release_path(path); | ||
| 883 | |||
| 884 | /* Same search but for extended refs */ | ||
| 885 | extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, | ||
| 886 | inode_objectid, parent_objectid, 0, | ||
| 887 | 0); | ||
| 888 | if (!IS_ERR_OR_NULL(extref)) { | ||
| 889 | u32 item_size; | ||
| 890 | u32 cur_offset = 0; | ||
| 891 | unsigned long base; | ||
| 892 | struct inode *victim_parent; | ||
| 893 | |||
| 894 | leaf = path->nodes[0]; | ||
| 895 | |||
| 896 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
| 897 | base = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
| 898 | |||
| 899 | while (cur_offset < item_size) { | ||
| 900 | extref = (struct btrfs_inode_extref *)base + cur_offset; | ||
| 901 | |||
| 902 | victim_name_len = btrfs_inode_extref_name_len(leaf, extref); | ||
| 903 | |||
| 904 | if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) | ||
| 905 | goto next; | ||
| 906 | |||
| 907 | victim_name = kmalloc(victim_name_len, GFP_NOFS); | ||
| 908 | read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, | ||
| 909 | victim_name_len); | ||
| 910 | |||
| 911 | search_key.objectid = inode_objectid; | ||
| 912 | search_key.type = BTRFS_INODE_EXTREF_KEY; | ||
| 913 | search_key.offset = btrfs_extref_hash(parent_objectid, | ||
| 914 | victim_name, | ||
| 915 | victim_name_len); | ||
| 916 | ret = 0; | ||
| 917 | if (!backref_in_log(log_root, &search_key, | ||
| 918 | parent_objectid, victim_name, | ||
| 919 | victim_name_len)) { | ||
| 920 | ret = -ENOENT; | ||
| 921 | victim_parent = read_one_inode(root, | ||
| 922 | parent_objectid); | ||
| 923 | if (victim_parent) { | ||
| 924 | btrfs_inc_nlink(inode); | ||
| 925 | btrfs_release_path(path); | ||
| 926 | |||
| 927 | ret = btrfs_unlink_inode(trans, root, | ||
| 928 | victim_parent, | ||
| 929 | inode, | ||
| 930 | victim_name, | ||
| 931 | victim_name_len); | ||
| 932 | btrfs_run_delayed_items(trans, root); | ||
| 933 | } | ||
| 934 | BUG_ON(ret); | ||
| 935 | iput(victim_parent); | ||
| 936 | kfree(victim_name); | ||
| 937 | *search_done = 1; | ||
| 938 | goto again; | ||
| 939 | } | ||
| 940 | kfree(victim_name); | ||
| 941 | BUG_ON(ret); | ||
| 942 | next: | ||
| 943 | cur_offset += victim_name_len + sizeof(*extref); | ||
| 944 | } | ||
| 945 | *search_done = 1; | ||
| 912 | } | 946 | } |
| 913 | btrfs_release_path(path); | 947 | btrfs_release_path(path); |
| 914 | 948 | ||
| 915 | /* look for a conflicting sequence number */ | 949 | /* look for a conflicting sequence number */ |
| 916 | di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), | 950 | di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), |
| 917 | btrfs_inode_ref_index(eb, ref), | 951 | ref_index, name, namelen, 0); |
| 918 | name, namelen, 0); | ||
| 919 | if (di && !IS_ERR(di)) { | 952 | if (di && !IS_ERR(di)) { |
| 920 | ret = drop_one_dir_item(trans, root, path, dir, di); | 953 | ret = drop_one_dir_item(trans, root, path, dir, di); |
| 921 | BUG_ON(ret); | 954 | BUG_ON(ret); |
| @@ -931,25 +964,173 @@ again: | |||
| 931 | } | 964 | } |
| 932 | btrfs_release_path(path); | 965 | btrfs_release_path(path); |
| 933 | 966 | ||
| 934 | insert: | 967 | return 0; |
| 935 | /* insert our name */ | 968 | } |
| 936 | ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, | ||
| 937 | btrfs_inode_ref_index(eb, ref)); | ||
| 938 | BUG_ON(ret); | ||
| 939 | 969 | ||
| 940 | btrfs_update_inode(trans, root, inode); | 970 | static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, |
| 971 | u32 *namelen, char **name, u64 *index, | ||
| 972 | u64 *parent_objectid) | ||
| 973 | { | ||
| 974 | struct btrfs_inode_extref *extref; | ||
| 941 | 975 | ||
| 942 | out: | 976 | extref = (struct btrfs_inode_extref *)ref_ptr; |
| 943 | ref_ptr = (unsigned long)(ref + 1) + namelen; | 977 | |
| 944 | kfree(name); | 978 | *namelen = btrfs_inode_extref_name_len(eb, extref); |
| 945 | if (ref_ptr < ref_end) | 979 | *name = kmalloc(*namelen, GFP_NOFS); |
| 946 | goto again; | 980 | if (*name == NULL) |
| 981 | return -ENOMEM; | ||
| 982 | |||
| 983 | read_extent_buffer(eb, *name, (unsigned long)&extref->name, | ||
| 984 | *namelen); | ||
| 985 | |||
| 986 | *index = btrfs_inode_extref_index(eb, extref); | ||
| 987 | if (parent_objectid) | ||
| 988 | *parent_objectid = btrfs_inode_extref_parent(eb, extref); | ||
| 989 | |||
| 990 | return 0; | ||
| 991 | } | ||
| 992 | |||
| 993 | static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, | ||
| 994 | u32 *namelen, char **name, u64 *index) | ||
| 995 | { | ||
| 996 | struct btrfs_inode_ref *ref; | ||
| 997 | |||
| 998 | ref = (struct btrfs_inode_ref *)ref_ptr; | ||
| 999 | |||
| 1000 | *namelen = btrfs_inode_ref_name_len(eb, ref); | ||
| 1001 | *name = kmalloc(*namelen, GFP_NOFS); | ||
| 1002 | if (*name == NULL) | ||
| 1003 | return -ENOMEM; | ||
| 1004 | |||
| 1005 | read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); | ||
| 1006 | |||
| 1007 | *index = btrfs_inode_ref_index(eb, ref); | ||
| 1008 | |||
| 1009 | return 0; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | /* | ||
| 1013 | * replay one inode back reference item found in the log tree. | ||
| 1014 | * eb, slot and key refer to the buffer and key found in the log tree. | ||
| 1015 | * root is the destination we are replaying into, and path is for temp | ||
| 1016 | * use by this function. (it should be released on return). | ||
| 1017 | */ | ||
| 1018 | static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | ||
| 1019 | struct btrfs_root *root, | ||
| 1020 | struct btrfs_root *log, | ||
| 1021 | struct btrfs_path *path, | ||
| 1022 | struct extent_buffer *eb, int slot, | ||
| 1023 | struct btrfs_key *key) | ||
| 1024 | { | ||
| 1025 | struct inode *dir; | ||
| 1026 | struct inode *inode; | ||
| 1027 | unsigned long ref_ptr; | ||
| 1028 | unsigned long ref_end; | ||
| 1029 | char *name; | ||
| 1030 | int namelen; | ||
| 1031 | int ret; | ||
| 1032 | int search_done = 0; | ||
| 1033 | int log_ref_ver = 0; | ||
| 1034 | u64 parent_objectid; | ||
| 1035 | u64 inode_objectid; | ||
| 1036 | u64 ref_index = 0; | ||
| 1037 | int ref_struct_size; | ||
| 1038 | |||
| 1039 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | ||
| 1040 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | ||
| 1041 | |||
| 1042 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | ||
| 1043 | struct btrfs_inode_extref *r; | ||
| 1044 | |||
| 1045 | ref_struct_size = sizeof(struct btrfs_inode_extref); | ||
| 1046 | log_ref_ver = 1; | ||
| 1047 | r = (struct btrfs_inode_extref *)ref_ptr; | ||
| 1048 | parent_objectid = btrfs_inode_extref_parent(eb, r); | ||
| 1049 | } else { | ||
| 1050 | ref_struct_size = sizeof(struct btrfs_inode_ref); | ||
| 1051 | parent_objectid = key->offset; | ||
| 1052 | } | ||
| 1053 | inode_objectid = key->objectid; | ||
| 1054 | |||
| 1055 | /* | ||
| 1056 | * it is possible that we didn't log all the parent directories | ||
| 1057 | * for a given inode. If we don't find the dir, just don't | ||
| 1058 | * copy the back ref in. The link count fixup code will take | ||
| 1059 | * care of the rest | ||
| 1060 | */ | ||
| 1061 | dir = read_one_inode(root, parent_objectid); | ||
| 1062 | if (!dir) | ||
| 1063 | return -ENOENT; | ||
| 1064 | |||
| 1065 | inode = read_one_inode(root, inode_objectid); | ||
| 1066 | if (!inode) { | ||
| 1067 | iput(dir); | ||
| 1068 | return -EIO; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | while (ref_ptr < ref_end) { | ||
| 1072 | if (log_ref_ver) { | ||
| 1073 | ret = extref_get_fields(eb, ref_ptr, &namelen, &name, | ||
| 1074 | &ref_index, &parent_objectid); | ||
| 1075 | /* | ||
| 1076 | * parent object can change from one array | ||
| 1077 | * item to another. | ||
| 1078 | */ | ||
| 1079 | if (!dir) | ||
| 1080 | dir = read_one_inode(root, parent_objectid); | ||
| 1081 | if (!dir) | ||
| 1082 | return -ENOENT; | ||
| 1083 | } else { | ||
| 1084 | ret = ref_get_fields(eb, ref_ptr, &namelen, &name, | ||
| 1085 | &ref_index); | ||
| 1086 | } | ||
| 1087 | if (ret) | ||
| 1088 | return ret; | ||
| 1089 | |||
| 1090 | /* if we already have a perfect match, we're done */ | ||
| 1091 | if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), | ||
| 1092 | ref_index, name, namelen)) { | ||
| 1093 | /* | ||
| 1094 | * look for a conflicting back reference in the | ||
| 1095 | * metadata. if we find one we have to unlink that name | ||
| 1096 | * of the file before we add our new link. Later on, we | ||
| 1097 | * overwrite any existing back reference, and we don't | ||
| 1098 | * want to create dangling pointers in the directory. | ||
| 1099 | */ | ||
| 1100 | |||
| 1101 | if (!search_done) { | ||
| 1102 | ret = __add_inode_ref(trans, root, path, log, | ||
| 1103 | dir, inode, eb, | ||
| 1104 | inode_objectid, | ||
| 1105 | parent_objectid, | ||
| 1106 | ref_index, name, namelen, | ||
| 1107 | &search_done); | ||
| 1108 | if (ret == 1) | ||
| 1109 | goto out; | ||
| 1110 | BUG_ON(ret); | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | /* insert our name */ | ||
| 1114 | ret = btrfs_add_link(trans, dir, inode, name, namelen, | ||
| 1115 | 0, ref_index); | ||
| 1116 | BUG_ON(ret); | ||
| 1117 | |||
| 1118 | btrfs_update_inode(trans, root, inode); | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; | ||
| 1122 | kfree(name); | ||
| 1123 | if (log_ref_ver) { | ||
| 1124 | iput(dir); | ||
| 1125 | dir = NULL; | ||
| 1126 | } | ||
| 1127 | } | ||
| 947 | 1128 | ||
| 948 | /* finally write the back reference in the inode */ | 1129 | /* finally write the back reference in the inode */ |
| 949 | ret = overwrite_item(trans, root, path, eb, slot, key); | 1130 | ret = overwrite_item(trans, root, path, eb, slot, key); |
| 950 | BUG_ON(ret); | 1131 | BUG_ON(ret); |
| 951 | 1132 | ||
| 952 | out_nowrite: | 1133 | out: |
| 953 | btrfs_release_path(path); | 1134 | btrfs_release_path(path); |
| 954 | iput(dir); | 1135 | iput(dir); |
| 955 | iput(inode); | 1136 | iput(inode); |
| @@ -966,25 +1147,55 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans, | |||
| 966 | return ret; | 1147 | return ret; |
| 967 | } | 1148 | } |
| 968 | 1149 | ||
| 1150 | static int count_inode_extrefs(struct btrfs_root *root, | ||
| 1151 | struct inode *inode, struct btrfs_path *path) | ||
| 1152 | { | ||
| 1153 | int ret = 0; | ||
| 1154 | int name_len; | ||
| 1155 | unsigned int nlink = 0; | ||
| 1156 | u32 item_size; | ||
| 1157 | u32 cur_offset = 0; | ||
| 1158 | u64 inode_objectid = btrfs_ino(inode); | ||
| 1159 | u64 offset = 0; | ||
| 1160 | unsigned long ptr; | ||
| 1161 | struct btrfs_inode_extref *extref; | ||
| 1162 | struct extent_buffer *leaf; | ||
| 969 | 1163 | ||
| 970 | /* | 1164 | while (1) { |
| 971 | * There are a few corners where the link count of the file can't | 1165 | ret = btrfs_find_one_extref(root, inode_objectid, offset, path, |
| 972 | * be properly maintained during replay. So, instead of adding | 1166 | &extref, &offset); |
| 973 | * lots of complexity to the log code, we just scan the backrefs | 1167 | if (ret) |
| 974 | * for any file that has been through replay. | 1168 | break; |
| 975 | * | 1169 | |
| 976 | * The scan will update the link count on the inode to reflect the | 1170 | leaf = path->nodes[0]; |
| 977 | * number of back refs found. If it goes down to zero, the iput | 1171 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); |
| 978 | * will free the inode. | 1172 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); |
| 979 | */ | 1173 | |
| 980 | static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | 1174 | while (cur_offset < item_size) { |
| 981 | struct btrfs_root *root, | 1175 | extref = (struct btrfs_inode_extref *) (ptr + cur_offset); |
| 982 | struct inode *inode) | 1176 | name_len = btrfs_inode_extref_name_len(leaf, extref); |
| 1177 | |||
| 1178 | nlink++; | ||
| 1179 | |||
| 1180 | cur_offset += name_len + sizeof(*extref); | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | offset++; | ||
| 1184 | btrfs_release_path(path); | ||
| 1185 | } | ||
| 1186 | btrfs_release_path(path); | ||
| 1187 | |||
| 1188 | if (ret < 0) | ||
| 1189 | return ret; | ||
| 1190 | return nlink; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | static int count_inode_refs(struct btrfs_root *root, | ||
| 1194 | struct inode *inode, struct btrfs_path *path) | ||
| 983 | { | 1195 | { |
| 984 | struct btrfs_path *path; | ||
| 985 | int ret; | 1196 | int ret; |
| 986 | struct btrfs_key key; | 1197 | struct btrfs_key key; |
| 987 | u64 nlink = 0; | 1198 | unsigned int nlink = 0; |
| 988 | unsigned long ptr; | 1199 | unsigned long ptr; |
| 989 | unsigned long ptr_end; | 1200 | unsigned long ptr_end; |
| 990 | int name_len; | 1201 | int name_len; |
| @@ -994,10 +1205,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
| 994 | key.type = BTRFS_INODE_REF_KEY; | 1205 | key.type = BTRFS_INODE_REF_KEY; |
| 995 | key.offset = (u64)-1; | 1206 | key.offset = (u64)-1; |
| 996 | 1207 | ||
| 997 | path = btrfs_alloc_path(); | ||
| 998 | if (!path) | ||
| 999 | return -ENOMEM; | ||
| 1000 | |||
| 1001 | while (1) { | 1208 | while (1) { |
| 1002 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 1209 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 1003 | if (ret < 0) | 1210 | if (ret < 0) |
| @@ -1031,6 +1238,50 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
| 1031 | btrfs_release_path(path); | 1238 | btrfs_release_path(path); |
| 1032 | } | 1239 | } |
| 1033 | btrfs_release_path(path); | 1240 | btrfs_release_path(path); |
| 1241 | |||
| 1242 | return nlink; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | /* | ||
| 1246 | * There are a few corners where the link count of the file can't | ||
| 1247 | * be properly maintained during replay. So, instead of adding | ||
| 1248 | * lots of complexity to the log code, we just scan the backrefs | ||
| 1249 | * for any file that has been through replay. | ||
| 1250 | * | ||
| 1251 | * The scan will update the link count on the inode to reflect the | ||
| 1252 | * number of back refs found. If it goes down to zero, the iput | ||
| 1253 | * will free the inode. | ||
| 1254 | */ | ||
| 1255 | static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | ||
| 1256 | struct btrfs_root *root, | ||
| 1257 | struct inode *inode) | ||
| 1258 | { | ||
| 1259 | struct btrfs_path *path; | ||
| 1260 | int ret; | ||
| 1261 | u64 nlink = 0; | ||
| 1262 | u64 ino = btrfs_ino(inode); | ||
| 1263 | |||
| 1264 | path = btrfs_alloc_path(); | ||
| 1265 | if (!path) | ||
| 1266 | return -ENOMEM; | ||
| 1267 | |||
| 1268 | ret = count_inode_refs(root, inode, path); | ||
| 1269 | if (ret < 0) | ||
| 1270 | goto out; | ||
| 1271 | |||
| 1272 | nlink = ret; | ||
| 1273 | |||
| 1274 | ret = count_inode_extrefs(root, inode, path); | ||
| 1275 | if (ret == -ENOENT) | ||
| 1276 | ret = 0; | ||
| 1277 | |||
| 1278 | if (ret < 0) | ||
| 1279 | goto out; | ||
| 1280 | |||
| 1281 | nlink += ret; | ||
| 1282 | |||
| 1283 | ret = 0; | ||
| 1284 | |||
| 1034 | if (nlink != inode->i_nlink) { | 1285 | if (nlink != inode->i_nlink) { |
| 1035 | set_nlink(inode, nlink); | 1286 | set_nlink(inode, nlink); |
| 1036 | btrfs_update_inode(trans, root, inode); | 1287 | btrfs_update_inode(trans, root, inode); |
| @@ -1046,9 +1297,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
| 1046 | ret = insert_orphan_item(trans, root, ino); | 1297 | ret = insert_orphan_item(trans, root, ino); |
| 1047 | BUG_ON(ret); | 1298 | BUG_ON(ret); |
| 1048 | } | 1299 | } |
| 1049 | btrfs_free_path(path); | ||
| 1050 | 1300 | ||
| 1051 | return 0; | 1301 | out: |
| 1302 | btrfs_free_path(path); | ||
| 1303 | return ret; | ||
| 1052 | } | 1304 | } |
| 1053 | 1305 | ||
| 1054 | static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, | 1306 | static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, |
| @@ -1695,6 +1947,10 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, | |||
| 1695 | ret = add_inode_ref(wc->trans, root, log, path, | 1947 | ret = add_inode_ref(wc->trans, root, log, path, |
| 1696 | eb, i, &key); | 1948 | eb, i, &key); |
| 1697 | BUG_ON(ret && ret != -ENOENT); | 1949 | BUG_ON(ret && ret != -ENOENT); |
| 1950 | } else if (key.type == BTRFS_INODE_EXTREF_KEY) { | ||
| 1951 | ret = add_inode_ref(wc->trans, root, log, path, | ||
| 1952 | eb, i, &key); | ||
| 1953 | BUG_ON(ret && ret != -ENOENT); | ||
| 1698 | } else if (key.type == BTRFS_EXTENT_DATA_KEY) { | 1954 | } else if (key.type == BTRFS_EXTENT_DATA_KEY) { |
| 1699 | ret = replay_one_extent(wc->trans, root, path, | 1955 | ret = replay_one_extent(wc->trans, root, path, |
| 1700 | eb, i, &key); | 1956 | eb, i, &key); |
| @@ -2037,7 +2293,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2037 | if (atomic_read(&root->log_commit[(index1 + 1) % 2])) | 2293 | if (atomic_read(&root->log_commit[(index1 + 1) % 2])) |
| 2038 | wait_log_commit(trans, root, root->log_transid - 1); | 2294 | wait_log_commit(trans, root, root->log_transid - 1); |
| 2039 | while (1) { | 2295 | while (1) { |
| 2040 | unsigned long batch = root->log_batch; | 2296 | int batch = atomic_read(&root->log_batch); |
| 2041 | /* when we're on an ssd, just kick the log commit out */ | 2297 | /* when we're on an ssd, just kick the log commit out */ |
| 2042 | if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) { | 2298 | if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) { |
| 2043 | mutex_unlock(&root->log_mutex); | 2299 | mutex_unlock(&root->log_mutex); |
| @@ -2045,7 +2301,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2045 | mutex_lock(&root->log_mutex); | 2301 | mutex_lock(&root->log_mutex); |
| 2046 | } | 2302 | } |
| 2047 | wait_for_writer(trans, root); | 2303 | wait_for_writer(trans, root); |
| 2048 | if (batch == root->log_batch) | 2304 | if (batch == atomic_read(&root->log_batch)) |
| 2049 | break; | 2305 | break; |
| 2050 | } | 2306 | } |
| 2051 | 2307 | ||
| @@ -2074,7 +2330,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2074 | 2330 | ||
| 2075 | btrfs_set_root_node(&log->root_item, log->node); | 2331 | btrfs_set_root_node(&log->root_item, log->node); |
| 2076 | 2332 | ||
| 2077 | root->log_batch = 0; | ||
| 2078 | root->log_transid++; | 2333 | root->log_transid++; |
| 2079 | log->log_transid = root->log_transid; | 2334 | log->log_transid = root->log_transid; |
| 2080 | root->log_start_pid = 0; | 2335 | root->log_start_pid = 0; |
| @@ -2087,7 +2342,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2087 | mutex_unlock(&root->log_mutex); | 2342 | mutex_unlock(&root->log_mutex); |
| 2088 | 2343 | ||
| 2089 | mutex_lock(&log_root_tree->log_mutex); | 2344 | mutex_lock(&log_root_tree->log_mutex); |
| 2090 | log_root_tree->log_batch++; | 2345 | atomic_inc(&log_root_tree->log_batch); |
| 2091 | atomic_inc(&log_root_tree->log_writers); | 2346 | atomic_inc(&log_root_tree->log_writers); |
| 2092 | mutex_unlock(&log_root_tree->log_mutex); | 2347 | mutex_unlock(&log_root_tree->log_mutex); |
| 2093 | 2348 | ||
| @@ -2157,7 +2412,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2157 | btrfs_set_super_log_root_level(root->fs_info->super_for_commit, | 2412 | btrfs_set_super_log_root_level(root->fs_info->super_for_commit, |
| 2158 | btrfs_header_level(log_root_tree->node)); | 2413 | btrfs_header_level(log_root_tree->node)); |
| 2159 | 2414 | ||
| 2160 | log_root_tree->log_batch = 0; | ||
| 2161 | log_root_tree->log_transid++; | 2415 | log_root_tree->log_transid++; |
| 2162 | smp_mb(); | 2416 | smp_mb(); |
| 2163 | 2417 | ||
| @@ -2171,9 +2425,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2171 | * in and cause problems either. | 2425 | * in and cause problems either. |
| 2172 | */ | 2426 | */ |
| 2173 | btrfs_scrub_pause_super(root); | 2427 | btrfs_scrub_pause_super(root); |
| 2174 | write_ctree_super(trans, root->fs_info->tree_root, 1); | 2428 | ret = write_ctree_super(trans, root->fs_info->tree_root, 1); |
| 2175 | btrfs_scrub_continue_super(root); | 2429 | btrfs_scrub_continue_super(root); |
| 2176 | ret = 0; | 2430 | if (ret) { |
| 2431 | btrfs_abort_transaction(trans, root, ret); | ||
| 2432 | goto out_wake_log_root; | ||
| 2433 | } | ||
| 2177 | 2434 | ||
| 2178 | mutex_lock(&root->log_mutex); | 2435 | mutex_lock(&root->log_mutex); |
| 2179 | if (root->last_log_commit < log_transid) | 2436 | if (root->last_log_commit < log_transid) |
| @@ -2209,7 +2466,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans, | |||
| 2209 | 2466 | ||
| 2210 | while (1) { | 2467 | while (1) { |
| 2211 | ret = find_first_extent_bit(&log->dirty_log_pages, | 2468 | ret = find_first_extent_bit(&log->dirty_log_pages, |
| 2212 | 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); | 2469 | 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, |
| 2470 | NULL); | ||
| 2213 | if (ret) | 2471 | if (ret) |
| 2214 | break; | 2472 | break; |
| 2215 | 2473 | ||
| @@ -2646,6 +2904,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, | |||
| 2646 | int ret; | 2904 | int ret; |
| 2647 | struct btrfs_key key; | 2905 | struct btrfs_key key; |
| 2648 | struct btrfs_key found_key; | 2906 | struct btrfs_key found_key; |
| 2907 | int start_slot; | ||
| 2649 | 2908 | ||
| 2650 | key.objectid = objectid; | 2909 | key.objectid = objectid; |
| 2651 | key.type = max_key_type; | 2910 | key.type = max_key_type; |
| @@ -2667,8 +2926,18 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, | |||
| 2667 | if (found_key.objectid != objectid) | 2926 | if (found_key.objectid != objectid) |
| 2668 | break; | 2927 | break; |
| 2669 | 2928 | ||
| 2670 | ret = btrfs_del_item(trans, log, path); | 2929 | found_key.offset = 0; |
| 2671 | if (ret) | 2930 | found_key.type = 0; |
| 2931 | ret = btrfs_bin_search(path->nodes[0], &found_key, 0, | ||
| 2932 | &start_slot); | ||
| 2933 | |||
| 2934 | ret = btrfs_del_items(trans, log, path, start_slot, | ||
| 2935 | path->slots[0] - start_slot + 1); | ||
| 2936 | /* | ||
| 2937 | * If start slot isn't 0 then we don't need to re-search, we've | ||
| 2938 | * found the last guy with the objectid in this tree. | ||
| 2939 | */ | ||
| 2940 | if (ret || start_slot != 0) | ||
| 2672 | break; | 2941 | break; |
| 2673 | btrfs_release_path(path); | 2942 | btrfs_release_path(path); |
| 2674 | } | 2943 | } |
| @@ -2678,14 +2947,64 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, | |||
| 2678 | return ret; | 2947 | return ret; |
| 2679 | } | 2948 | } |
| 2680 | 2949 | ||
| 2950 | static void fill_inode_item(struct btrfs_trans_handle *trans, | ||
| 2951 | struct extent_buffer *leaf, | ||
| 2952 | struct btrfs_inode_item *item, | ||
| 2953 | struct inode *inode, int log_inode_only) | ||
| 2954 | { | ||
| 2955 | btrfs_set_inode_uid(leaf, item, inode->i_uid); | ||
| 2956 | btrfs_set_inode_gid(leaf, item, inode->i_gid); | ||
| 2957 | btrfs_set_inode_mode(leaf, item, inode->i_mode); | ||
| 2958 | btrfs_set_inode_nlink(leaf, item, inode->i_nlink); | ||
| 2959 | |||
| 2960 | btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), | ||
| 2961 | inode->i_atime.tv_sec); | ||
| 2962 | btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), | ||
| 2963 | inode->i_atime.tv_nsec); | ||
| 2964 | |||
| 2965 | btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), | ||
| 2966 | inode->i_mtime.tv_sec); | ||
| 2967 | btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), | ||
| 2968 | inode->i_mtime.tv_nsec); | ||
| 2969 | |||
| 2970 | btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), | ||
| 2971 | inode->i_ctime.tv_sec); | ||
| 2972 | btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), | ||
| 2973 | inode->i_ctime.tv_nsec); | ||
| 2974 | |||
| 2975 | btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); | ||
| 2976 | |||
| 2977 | btrfs_set_inode_sequence(leaf, item, inode->i_version); | ||
| 2978 | btrfs_set_inode_transid(leaf, item, trans->transid); | ||
| 2979 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); | ||
| 2980 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); | ||
| 2981 | btrfs_set_inode_block_group(leaf, item, 0); | ||
| 2982 | |||
| 2983 | if (log_inode_only) { | ||
| 2984 | /* set the generation to zero so the recover code | ||
| 2985 | * can tell the difference between an logging | ||
| 2986 | * just to say 'this inode exists' and a logging | ||
| 2987 | * to say 'update this inode with these values' | ||
| 2988 | */ | ||
| 2989 | btrfs_set_inode_generation(leaf, item, 0); | ||
| 2990 | btrfs_set_inode_size(leaf, item, 0); | ||
| 2991 | } else { | ||
| 2992 | btrfs_set_inode_generation(leaf, item, | ||
| 2993 | BTRFS_I(inode)->generation); | ||
| 2994 | btrfs_set_inode_size(leaf, item, inode->i_size); | ||
| 2995 | } | ||
| 2996 | |||
| 2997 | } | ||
| 2998 | |||
| 2681 | static noinline int copy_items(struct btrfs_trans_handle *trans, | 2999 | static noinline int copy_items(struct btrfs_trans_handle *trans, |
| 2682 | struct btrfs_root *log, | 3000 | struct inode *inode, |
| 2683 | struct btrfs_path *dst_path, | 3001 | struct btrfs_path *dst_path, |
| 2684 | struct extent_buffer *src, | 3002 | struct extent_buffer *src, |
| 2685 | int start_slot, int nr, int inode_only) | 3003 | int start_slot, int nr, int inode_only) |
| 2686 | { | 3004 | { |
| 2687 | unsigned long src_offset; | 3005 | unsigned long src_offset; |
| 2688 | unsigned long dst_offset; | 3006 | unsigned long dst_offset; |
| 3007 | struct btrfs_root *log = BTRFS_I(inode)->root->log_root; | ||
| 2689 | struct btrfs_file_extent_item *extent; | 3008 | struct btrfs_file_extent_item *extent; |
| 2690 | struct btrfs_inode_item *inode_item; | 3009 | struct btrfs_inode_item *inode_item; |
| 2691 | int ret; | 3010 | int ret; |
| @@ -2694,6 +3013,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 2694 | char *ins_data; | 3013 | char *ins_data; |
| 2695 | int i; | 3014 | int i; |
| 2696 | struct list_head ordered_sums; | 3015 | struct list_head ordered_sums; |
| 3016 | int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | ||
| 2697 | 3017 | ||
| 2698 | INIT_LIST_HEAD(&ordered_sums); | 3018 | INIT_LIST_HEAD(&ordered_sums); |
| 2699 | 3019 | ||
| @@ -2722,29 +3042,23 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 2722 | 3042 | ||
| 2723 | src_offset = btrfs_item_ptr_offset(src, start_slot + i); | 3043 | src_offset = btrfs_item_ptr_offset(src, start_slot + i); |
| 2724 | 3044 | ||
| 2725 | copy_extent_buffer(dst_path->nodes[0], src, dst_offset, | 3045 | if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { |
| 2726 | src_offset, ins_sizes[i]); | ||
| 2727 | |||
| 2728 | if (inode_only == LOG_INODE_EXISTS && | ||
| 2729 | ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { | ||
| 2730 | inode_item = btrfs_item_ptr(dst_path->nodes[0], | 3046 | inode_item = btrfs_item_ptr(dst_path->nodes[0], |
| 2731 | dst_path->slots[0], | 3047 | dst_path->slots[0], |
| 2732 | struct btrfs_inode_item); | 3048 | struct btrfs_inode_item); |
| 2733 | btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0); | 3049 | fill_inode_item(trans, dst_path->nodes[0], inode_item, |
| 2734 | 3050 | inode, inode_only == LOG_INODE_EXISTS); | |
| 2735 | /* set the generation to zero so the recover code | 3051 | } else { |
| 2736 | * can tell the difference between an logging | 3052 | copy_extent_buffer(dst_path->nodes[0], src, dst_offset, |
| 2737 | * just to say 'this inode exists' and a logging | 3053 | src_offset, ins_sizes[i]); |
| 2738 | * to say 'update this inode with these values' | ||
| 2739 | */ | ||
| 2740 | btrfs_set_inode_generation(dst_path->nodes[0], | ||
| 2741 | inode_item, 0); | ||
| 2742 | } | 3054 | } |
| 3055 | |||
| 2743 | /* take a reference on file data extents so that truncates | 3056 | /* take a reference on file data extents so that truncates |
| 2744 | * or deletes of this inode don't have to relog the inode | 3057 | * or deletes of this inode don't have to relog the inode |
| 2745 | * again | 3058 | * again |
| 2746 | */ | 3059 | */ |
| 2747 | if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) { | 3060 | if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY && |
| 3061 | !skip_csum) { | ||
| 2748 | int found_type; | 3062 | int found_type; |
| 2749 | extent = btrfs_item_ptr(src, start_slot + i, | 3063 | extent = btrfs_item_ptr(src, start_slot + i, |
| 2750 | struct btrfs_file_extent_item); | 3064 | struct btrfs_file_extent_item); |
| @@ -2753,8 +3067,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 2753 | continue; | 3067 | continue; |
| 2754 | 3068 | ||
| 2755 | found_type = btrfs_file_extent_type(src, extent); | 3069 | found_type = btrfs_file_extent_type(src, extent); |
| 2756 | if (found_type == BTRFS_FILE_EXTENT_REG || | 3070 | if (found_type == BTRFS_FILE_EXTENT_REG) { |
| 2757 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
| 2758 | u64 ds, dl, cs, cl; | 3071 | u64 ds, dl, cs, cl; |
| 2759 | ds = btrfs_file_extent_disk_bytenr(src, | 3072 | ds = btrfs_file_extent_disk_bytenr(src, |
| 2760 | extent); | 3073 | extent); |
| @@ -2803,6 +3116,239 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 2803 | return ret; | 3116 | return ret; |
| 2804 | } | 3117 | } |
| 2805 | 3118 | ||
| 3119 | static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) | ||
| 3120 | { | ||
| 3121 | struct extent_map *em1, *em2; | ||
| 3122 | |||
| 3123 | em1 = list_entry(a, struct extent_map, list); | ||
| 3124 | em2 = list_entry(b, struct extent_map, list); | ||
| 3125 | |||
| 3126 | if (em1->start < em2->start) | ||
| 3127 | return -1; | ||
| 3128 | else if (em1->start > em2->start) | ||
| 3129 | return 1; | ||
| 3130 | return 0; | ||
| 3131 | } | ||
| 3132 | |||
| 3133 | struct log_args { | ||
| 3134 | struct extent_buffer *src; | ||
| 3135 | u64 next_offset; | ||
| 3136 | int start_slot; | ||
| 3137 | int nr; | ||
| 3138 | }; | ||
| 3139 | |||
| 3140 | static int log_one_extent(struct btrfs_trans_handle *trans, | ||
| 3141 | struct inode *inode, struct btrfs_root *root, | ||
| 3142 | struct extent_map *em, struct btrfs_path *path, | ||
| 3143 | struct btrfs_path *dst_path, struct log_args *args) | ||
| 3144 | { | ||
| 3145 | struct btrfs_root *log = root->log_root; | ||
| 3146 | struct btrfs_file_extent_item *fi; | ||
| 3147 | struct btrfs_key key; | ||
| 3148 | u64 start = em->mod_start; | ||
| 3149 | u64 search_start = start; | ||
| 3150 | u64 len = em->mod_len; | ||
| 3151 | u64 num_bytes; | ||
| 3152 | int nritems; | ||
| 3153 | int ret; | ||
| 3154 | |||
| 3155 | if (BTRFS_I(inode)->logged_trans == trans->transid) { | ||
| 3156 | ret = __btrfs_drop_extents(trans, log, inode, dst_path, start, | ||
| 3157 | start + len, NULL, 0); | ||
| 3158 | if (ret) | ||
| 3159 | return ret; | ||
| 3160 | } | ||
| 3161 | |||
| 3162 | while (len) { | ||
| 3163 | if (args->nr) | ||
| 3164 | goto next_slot; | ||
| 3165 | again: | ||
| 3166 | key.objectid = btrfs_ino(inode); | ||
| 3167 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
| 3168 | key.offset = search_start; | ||
| 3169 | |||
| 3170 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
| 3171 | if (ret < 0) | ||
| 3172 | return ret; | ||
| 3173 | |||
| 3174 | if (ret) { | ||
| 3175 | /* | ||
| 3176 | * A rare case were we can have an em for a section of a | ||
| 3177 | * larger extent so we need to make sure that this em | ||
| 3178 | * falls within the extent we've found. If not we just | ||
| 3179 | * bail and go back to ye-olde way of doing things but | ||
| 3180 | * it happens often enough in testing that we need to do | ||
| 3181 | * this dance to make sure. | ||
| 3182 | */ | ||
| 3183 | do { | ||
| 3184 | if (path->slots[0] == 0) { | ||
| 3185 | btrfs_release_path(path); | ||
| 3186 | if (search_start == 0) | ||
| 3187 | return -ENOENT; | ||
| 3188 | search_start--; | ||
| 3189 | goto again; | ||
| 3190 | } | ||
| 3191 | |||
| 3192 | path->slots[0]--; | ||
| 3193 | btrfs_item_key_to_cpu(path->nodes[0], &key, | ||
| 3194 | path->slots[0]); | ||
| 3195 | if (key.objectid != btrfs_ino(inode) || | ||
| 3196 | key.type != BTRFS_EXTENT_DATA_KEY) { | ||
| 3197 | btrfs_release_path(path); | ||
| 3198 | return -ENOENT; | ||
| 3199 | } | ||
| 3200 | } while (key.offset > start); | ||
| 3201 | |||
| 3202 | fi = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
| 3203 | struct btrfs_file_extent_item); | ||
| 3204 | num_bytes = btrfs_file_extent_num_bytes(path->nodes[0], | ||
| 3205 | fi); | ||
| 3206 | if (key.offset + num_bytes <= start) { | ||
| 3207 | btrfs_release_path(path); | ||
| 3208 | return -ENOENT; | ||
| 3209 | } | ||
| 3210 | } | ||
| 3211 | args->src = path->nodes[0]; | ||
| 3212 | next_slot: | ||
| 3213 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | ||
| 3214 | fi = btrfs_item_ptr(args->src, path->slots[0], | ||
| 3215 | struct btrfs_file_extent_item); | ||
| 3216 | if (args->nr && | ||
| 3217 | args->start_slot + args->nr == path->slots[0]) { | ||
| 3218 | args->nr++; | ||
| 3219 | } else if (args->nr) { | ||
| 3220 | ret = copy_items(trans, inode, dst_path, args->src, | ||
| 3221 | args->start_slot, args->nr, | ||
| 3222 | LOG_INODE_ALL); | ||
| 3223 | if (ret) | ||
| 3224 | return ret; | ||
| 3225 | args->nr = 1; | ||
| 3226 | args->start_slot = path->slots[0]; | ||
| 3227 | } else if (!args->nr) { | ||
| 3228 | args->nr = 1; | ||
| 3229 | args->start_slot = path->slots[0]; | ||
| 3230 | } | ||
| 3231 | nritems = btrfs_header_nritems(path->nodes[0]); | ||
| 3232 | path->slots[0]++; | ||
| 3233 | num_bytes = btrfs_file_extent_num_bytes(args->src, fi); | ||
| 3234 | if (len < num_bytes) { | ||
| 3235 | /* I _think_ this is ok, envision we write to a | ||
| 3236 | * preallocated space that is adjacent to a previously | ||
| 3237 | * written preallocated space that gets merged when we | ||
| 3238 | * mark this preallocated space written. If we do not | ||
| 3239 | * have the adjacent extent in cache then when we copy | ||
| 3240 | * this extent it could end up being larger than our EM | ||
| 3241 | * thinks it is, which is a-ok, so just set len to 0. | ||
| 3242 | */ | ||
| 3243 | len = 0; | ||
| 3244 | } else { | ||
| 3245 | len -= num_bytes; | ||
| 3246 | } | ||
| 3247 | start = key.offset + num_bytes; | ||
| 3248 | args->next_offset = start; | ||
| 3249 | search_start = start; | ||
| 3250 | |||
| 3251 | if (path->slots[0] < nritems) { | ||
| 3252 | if (len) | ||
| 3253 | goto next_slot; | ||
| 3254 | break; | ||
| 3255 | } | ||
| 3256 | |||
| 3257 | if (args->nr) { | ||
| 3258 | ret = copy_items(trans, inode, dst_path, args->src, | ||
| 3259 | args->start_slot, args->nr, | ||
| 3260 | LOG_INODE_ALL); | ||
| 3261 | if (ret) | ||
| 3262 | return ret; | ||
| 3263 | args->nr = 0; | ||
| 3264 | btrfs_release_path(path); | ||
| 3265 | } | ||
| 3266 | } | ||
| 3267 | |||
| 3268 | return 0; | ||
| 3269 | } | ||
| 3270 | |||
| 3271 | static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | ||
| 3272 | struct btrfs_root *root, | ||
| 3273 | struct inode *inode, | ||
| 3274 | struct btrfs_path *path, | ||
| 3275 | struct btrfs_path *dst_path) | ||
| 3276 | { | ||
| 3277 | struct log_args args; | ||
| 3278 | struct extent_map *em, *n; | ||
| 3279 | struct list_head extents; | ||
| 3280 | struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; | ||
| 3281 | u64 test_gen; | ||
| 3282 | int ret = 0; | ||
| 3283 | |||
| 3284 | INIT_LIST_HEAD(&extents); | ||
| 3285 | |||
| 3286 | memset(&args, 0, sizeof(args)); | ||
| 3287 | |||
| 3288 | write_lock(&tree->lock); | ||
| 3289 | test_gen = root->fs_info->last_trans_committed; | ||
| 3290 | |||
| 3291 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) { | ||
| 3292 | list_del_init(&em->list); | ||
| 3293 | if (em->generation <= test_gen) | ||
| 3294 | continue; | ||
| 3295 | /* Need a ref to keep it from getting evicted from cache */ | ||
| 3296 | atomic_inc(&em->refs); | ||
| 3297 | set_bit(EXTENT_FLAG_LOGGING, &em->flags); | ||
| 3298 | list_add_tail(&em->list, &extents); | ||
| 3299 | } | ||
| 3300 | |||
| 3301 | list_sort(NULL, &extents, extent_cmp); | ||
| 3302 | |||
| 3303 | while (!list_empty(&extents)) { | ||
| 3304 | em = list_entry(extents.next, struct extent_map, list); | ||
| 3305 | |||
| 3306 | list_del_init(&em->list); | ||
| 3307 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); | ||
| 3308 | |||
| 3309 | /* | ||
| 3310 | * If we had an error we just need to delete everybody from our | ||
| 3311 | * private list. | ||
| 3312 | */ | ||
| 3313 | if (ret) { | ||
| 3314 | free_extent_map(em); | ||
| 3315 | continue; | ||
| 3316 | } | ||
| 3317 | |||
| 3318 | write_unlock(&tree->lock); | ||
| 3319 | |||
| 3320 | /* | ||
| 3321 | * If the previous EM and the last extent we left off on aren't | ||
| 3322 | * sequential then we need to copy the items we have and redo | ||
| 3323 | * our search | ||
| 3324 | */ | ||
| 3325 | if (args.nr && em->mod_start != args.next_offset) { | ||
| 3326 | ret = copy_items(trans, inode, dst_path, args.src, | ||
| 3327 | args.start_slot, args.nr, | ||
| 3328 | LOG_INODE_ALL); | ||
| 3329 | if (ret) { | ||
| 3330 | free_extent_map(em); | ||
| 3331 | write_lock(&tree->lock); | ||
| 3332 | continue; | ||
| 3333 | } | ||
| 3334 | btrfs_release_path(path); | ||
| 3335 | args.nr = 0; | ||
| 3336 | } | ||
| 3337 | |||
| 3338 | ret = log_one_extent(trans, inode, root, em, path, dst_path, &args); | ||
| 3339 | free_extent_map(em); | ||
| 3340 | write_lock(&tree->lock); | ||
| 3341 | } | ||
| 3342 | WARN_ON(!list_empty(&extents)); | ||
| 3343 | write_unlock(&tree->lock); | ||
| 3344 | |||
| 3345 | if (!ret && args.nr) | ||
| 3346 | ret = copy_items(trans, inode, dst_path, args.src, | ||
| 3347 | args.start_slot, args.nr, LOG_INODE_ALL); | ||
| 3348 | btrfs_release_path(path); | ||
| 3349 | return ret; | ||
| 3350 | } | ||
| 3351 | |||
| 2806 | /* log a single inode in the tree log. | 3352 | /* log a single inode in the tree log. |
| 2807 | * At least one parent directory for this inode must exist in the tree | 3353 | * At least one parent directory for this inode must exist in the tree |
| 2808 | * or be logged already. | 3354 | * or be logged already. |
| @@ -2832,6 +3378,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 2832 | int nritems; | 3378 | int nritems; |
| 2833 | int ins_start_slot = 0; | 3379 | int ins_start_slot = 0; |
| 2834 | int ins_nr; | 3380 | int ins_nr; |
| 3381 | bool fast_search = false; | ||
| 2835 | u64 ino = btrfs_ino(inode); | 3382 | u64 ino = btrfs_ino(inode); |
| 2836 | 3383 | ||
| 2837 | log = root->log_root; | 3384 | log = root->log_root; |
| @@ -2851,21 +3398,23 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 2851 | 3398 | ||
| 2852 | max_key.objectid = ino; | 3399 | max_key.objectid = ino; |
| 2853 | 3400 | ||
| 2854 | /* today the code can only do partial logging of directories */ | ||
| 2855 | if (!S_ISDIR(inode->i_mode)) | ||
| 2856 | inode_only = LOG_INODE_ALL; | ||
| 2857 | 3401 | ||
| 3402 | /* today the code can only do partial logging of directories */ | ||
| 2858 | if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode)) | 3403 | if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode)) |
| 2859 | max_key.type = BTRFS_XATTR_ITEM_KEY; | 3404 | max_key.type = BTRFS_XATTR_ITEM_KEY; |
| 2860 | else | 3405 | else |
| 2861 | max_key.type = (u8)-1; | 3406 | max_key.type = (u8)-1; |
| 2862 | max_key.offset = (u64)-1; | 3407 | max_key.offset = (u64)-1; |
| 2863 | 3408 | ||
| 2864 | ret = btrfs_commit_inode_delayed_items(trans, inode); | 3409 | /* Only run delayed items if we are a dir or a new file */ |
| 2865 | if (ret) { | 3410 | if (S_ISDIR(inode->i_mode) || |
| 2866 | btrfs_free_path(path); | 3411 | BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { |
| 2867 | btrfs_free_path(dst_path); | 3412 | ret = btrfs_commit_inode_delayed_items(trans, inode); |
| 2868 | return ret; | 3413 | if (ret) { |
| 3414 | btrfs_free_path(path); | ||
| 3415 | btrfs_free_path(dst_path); | ||
| 3416 | return ret; | ||
| 3417 | } | ||
| 2869 | } | 3418 | } |
| 2870 | 3419 | ||
| 2871 | mutex_lock(&BTRFS_I(inode)->log_mutex); | 3420 | mutex_lock(&BTRFS_I(inode)->log_mutex); |
| @@ -2881,7 +3430,16 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 2881 | max_key_type = BTRFS_XATTR_ITEM_KEY; | 3430 | max_key_type = BTRFS_XATTR_ITEM_KEY; |
| 2882 | ret = drop_objectid_items(trans, log, path, ino, max_key_type); | 3431 | ret = drop_objectid_items(trans, log, path, ino, max_key_type); |
| 2883 | } else { | 3432 | } else { |
| 2884 | ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); | 3433 | if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
| 3434 | &BTRFS_I(inode)->runtime_flags)) { | ||
| 3435 | ret = btrfs_truncate_inode_items(trans, log, | ||
| 3436 | inode, 0, 0); | ||
| 3437 | } else { | ||
| 3438 | fast_search = true; | ||
| 3439 | max_key.type = BTRFS_XATTR_ITEM_KEY; | ||
| 3440 | ret = drop_objectid_items(trans, log, path, ino, | ||
| 3441 | BTRFS_XATTR_ITEM_KEY); | ||
| 3442 | } | ||
| 2885 | } | 3443 | } |
| 2886 | if (ret) { | 3444 | if (ret) { |
| 2887 | err = ret; | 3445 | err = ret; |
| @@ -2912,7 +3470,7 @@ again: | |||
| 2912 | goto next_slot; | 3470 | goto next_slot; |
| 2913 | } | 3471 | } |
| 2914 | 3472 | ||
| 2915 | ret = copy_items(trans, log, dst_path, src, ins_start_slot, | 3473 | ret = copy_items(trans, inode, dst_path, src, ins_start_slot, |
| 2916 | ins_nr, inode_only); | 3474 | ins_nr, inode_only); |
| 2917 | if (ret) { | 3475 | if (ret) { |
| 2918 | err = ret; | 3476 | err = ret; |
| @@ -2930,7 +3488,7 @@ next_slot: | |||
| 2930 | goto again; | 3488 | goto again; |
| 2931 | } | 3489 | } |
| 2932 | if (ins_nr) { | 3490 | if (ins_nr) { |
| 2933 | ret = copy_items(trans, log, dst_path, src, | 3491 | ret = copy_items(trans, inode, dst_path, src, |
| 2934 | ins_start_slot, | 3492 | ins_start_slot, |
| 2935 | ins_nr, inode_only); | 3493 | ins_nr, inode_only); |
| 2936 | if (ret) { | 3494 | if (ret) { |
| @@ -2951,8 +3509,7 @@ next_slot: | |||
| 2951 | break; | 3509 | break; |
| 2952 | } | 3510 | } |
| 2953 | if (ins_nr) { | 3511 | if (ins_nr) { |
| 2954 | ret = copy_items(trans, log, dst_path, src, | 3512 | ret = copy_items(trans, inode, dst_path, src, ins_start_slot, |
| 2955 | ins_start_slot, | ||
| 2956 | ins_nr, inode_only); | 3513 | ins_nr, inode_only); |
| 2957 | if (ret) { | 3514 | if (ret) { |
| 2958 | err = ret; | 3515 | err = ret; |
| @@ -2960,7 +3517,24 @@ next_slot: | |||
| 2960 | } | 3517 | } |
| 2961 | ins_nr = 0; | 3518 | ins_nr = 0; |
| 2962 | } | 3519 | } |
| 2963 | WARN_ON(ins_nr); | 3520 | |
| 3521 | if (fast_search) { | ||
| 3522 | btrfs_release_path(path); | ||
| 3523 | btrfs_release_path(dst_path); | ||
| 3524 | ret = btrfs_log_changed_extents(trans, root, inode, path, | ||
| 3525 | dst_path); | ||
| 3526 | if (ret) { | ||
| 3527 | err = ret; | ||
| 3528 | goto out_unlock; | ||
| 3529 | } | ||
| 3530 | } else { | ||
| 3531 | struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; | ||
| 3532 | struct extent_map *em, *n; | ||
| 3533 | |||
| 3534 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) | ||
| 3535 | list_del_init(&em->list); | ||
| 3536 | } | ||
| 3537 | |||
| 2964 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { | 3538 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { |
| 2965 | btrfs_release_path(path); | 3539 | btrfs_release_path(path); |
| 2966 | btrfs_release_path(dst_path); | 3540 | btrfs_release_path(dst_path); |
| @@ -2971,6 +3545,7 @@ next_slot: | |||
| 2971 | } | 3545 | } |
| 2972 | } | 3546 | } |
| 2973 | BTRFS_I(inode)->logged_trans = trans->transid; | 3547 | BTRFS_I(inode)->logged_trans = trans->transid; |
| 3548 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; | ||
| 2974 | out_unlock: | 3549 | out_unlock: |
| 2975 | mutex_unlock(&BTRFS_I(inode)->log_mutex); | 3550 | mutex_unlock(&BTRFS_I(inode)->log_mutex); |
| 2976 | 3551 | ||
| @@ -3138,7 +3713,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | |||
| 3138 | end_trans: | 3713 | end_trans: |
| 3139 | dput(old_parent); | 3714 | dput(old_parent); |
| 3140 | if (ret < 0) { | 3715 | if (ret < 0) { |
| 3141 | BUG_ON(ret != -ENOSPC); | 3716 | WARN_ON(ret != -ENOSPC); |
| 3142 | root->fs_info->last_trans_log_full_commit = trans->transid; | 3717 | root->fs_info->last_trans_log_full_commit = trans->transid; |
| 3143 | ret = 1; | 3718 | ret = 1; |
| 3144 | } | 3719 | } |
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index ab942f46b3dd..99be4c138db6 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c | |||
| @@ -143,14 +143,13 @@ EXPORT_SYMBOL(ulist_free); | |||
| 143 | * In case of allocation failure -ENOMEM is returned and the ulist stays | 143 | * In case of allocation failure -ENOMEM is returned and the ulist stays |
| 144 | * unaltered. | 144 | * unaltered. |
| 145 | */ | 145 | */ |
| 146 | int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, | 146 | int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) |
| 147 | gfp_t gfp_mask) | ||
| 148 | { | 147 | { |
| 149 | return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); | 148 | return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); |
| 150 | } | 149 | } |
| 151 | 150 | ||
| 152 | int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux, | 151 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
| 153 | unsigned long *old_aux, gfp_t gfp_mask) | 152 | u64 *old_aux, gfp_t gfp_mask) |
| 154 | { | 153 | { |
| 155 | int i; | 154 | int i; |
| 156 | 155 | ||
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h index 21bdc8ec8130..21a1963439c3 100644 --- a/fs/btrfs/ulist.h +++ b/fs/btrfs/ulist.h | |||
| @@ -33,7 +33,7 @@ struct ulist_iterator { | |||
| 33 | */ | 33 | */ |
| 34 | struct ulist_node { | 34 | struct ulist_node { |
| 35 | u64 val; /* value to store */ | 35 | u64 val; /* value to store */ |
| 36 | unsigned long aux; /* auxiliary value saved along with the val */ | 36 | u64 aux; /* auxiliary value saved along with the val */ |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | struct ulist { | 39 | struct ulist { |
| @@ -65,10 +65,9 @@ void ulist_fini(struct ulist *ulist); | |||
| 65 | void ulist_reinit(struct ulist *ulist); | 65 | void ulist_reinit(struct ulist *ulist); |
| 66 | struct ulist *ulist_alloc(gfp_t gfp_mask); | 66 | struct ulist *ulist_alloc(gfp_t gfp_mask); |
| 67 | void ulist_free(struct ulist *ulist); | 67 | void ulist_free(struct ulist *ulist); |
| 68 | int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, | 68 | int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); |
| 69 | gfp_t gfp_mask); | 69 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
| 70 | int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux, | 70 | u64 *old_aux, gfp_t gfp_mask); |
| 71 | unsigned long *old_aux, gfp_t gfp_mask); | ||
| 72 | struct ulist_node *ulist_next(struct ulist *ulist, | 71 | struct ulist_node *ulist_next(struct ulist *ulist, |
| 73 | struct ulist_iterator *uiter); | 72 | struct ulist_iterator *uiter); |
| 74 | 73 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 88b969aeeb71..029b903a4ae3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -639,7 +639,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
| 639 | 639 | ||
| 640 | bdev = blkdev_get_by_path(device->name->str, flags, holder); | 640 | bdev = blkdev_get_by_path(device->name->str, flags, holder); |
| 641 | if (IS_ERR(bdev)) { | 641 | if (IS_ERR(bdev)) { |
| 642 | printk(KERN_INFO "open %s failed\n", device->name->str); | 642 | printk(KERN_INFO "btrfs: open %s failed\n", device->name->str); |
| 643 | goto error; | 643 | goto error; |
| 644 | } | 644 | } |
| 645 | filemap_write_and_wait(bdev->bd_inode->i_mapping); | 645 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
| @@ -1475,6 +1475,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
| 1475 | free_fs_devices(cur_devices); | 1475 | free_fs_devices(cur_devices); |
| 1476 | } | 1476 | } |
| 1477 | 1477 | ||
| 1478 | root->fs_info->num_tolerated_disk_barrier_failures = | ||
| 1479 | btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); | ||
| 1480 | |||
| 1478 | /* | 1481 | /* |
| 1479 | * at this point, the device is zero sized. We want to | 1482 | * at this point, the device is zero sized. We want to |
| 1480 | * remove it from the devices list and zero out the old super | 1483 | * remove it from the devices list and zero out the old super |
| @@ -1775,15 +1778,21 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
| 1775 | 1778 | ||
| 1776 | if (seeding_dev) { | 1779 | if (seeding_dev) { |
| 1777 | ret = init_first_rw_device(trans, root, device); | 1780 | ret = init_first_rw_device(trans, root, device); |
| 1778 | if (ret) | 1781 | if (ret) { |
| 1782 | btrfs_abort_transaction(trans, root, ret); | ||
| 1779 | goto error_trans; | 1783 | goto error_trans; |
| 1784 | } | ||
| 1780 | ret = btrfs_finish_sprout(trans, root); | 1785 | ret = btrfs_finish_sprout(trans, root); |
| 1781 | if (ret) | 1786 | if (ret) { |
| 1787 | btrfs_abort_transaction(trans, root, ret); | ||
| 1782 | goto error_trans; | 1788 | goto error_trans; |
| 1789 | } | ||
| 1783 | } else { | 1790 | } else { |
| 1784 | ret = btrfs_add_device(trans, root, device); | 1791 | ret = btrfs_add_device(trans, root, device); |
| 1785 | if (ret) | 1792 | if (ret) { |
| 1793 | btrfs_abort_transaction(trans, root, ret); | ||
| 1786 | goto error_trans; | 1794 | goto error_trans; |
| 1795 | } | ||
| 1787 | } | 1796 | } |
| 1788 | 1797 | ||
| 1789 | /* | 1798 | /* |
| @@ -1793,6 +1802,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
| 1793 | btrfs_clear_space_info_full(root->fs_info); | 1802 | btrfs_clear_space_info_full(root->fs_info); |
| 1794 | 1803 | ||
| 1795 | unlock_chunks(root); | 1804 | unlock_chunks(root); |
| 1805 | root->fs_info->num_tolerated_disk_barrier_failures = | ||
| 1806 | btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); | ||
| 1796 | ret = btrfs_commit_transaction(trans, root); | 1807 | ret = btrfs_commit_transaction(trans, root); |
| 1797 | 1808 | ||
| 1798 | if (seeding_dev) { | 1809 | if (seeding_dev) { |
| @@ -1814,7 +1825,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
| 1814 | 1825 | ||
| 1815 | error_trans: | 1826 | error_trans: |
| 1816 | unlock_chunks(root); | 1827 | unlock_chunks(root); |
| 1817 | btrfs_abort_transaction(trans, root, ret); | ||
| 1818 | btrfs_end_transaction(trans, root); | 1828 | btrfs_end_transaction(trans, root); |
| 1819 | rcu_string_free(device->name); | 1829 | rcu_string_free(device->name); |
| 1820 | kfree(device); | 1830 | kfree(device); |
| @@ -2804,6 +2814,26 @@ int btrfs_balance(struct btrfs_balance_control *bctl, | |||
| 2804 | } | 2814 | } |
| 2805 | } | 2815 | } |
| 2806 | 2816 | ||
| 2817 | if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { | ||
| 2818 | int num_tolerated_disk_barrier_failures; | ||
| 2819 | u64 target = bctl->sys.target; | ||
| 2820 | |||
| 2821 | num_tolerated_disk_barrier_failures = | ||
| 2822 | btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); | ||
| 2823 | if (num_tolerated_disk_barrier_failures > 0 && | ||
| 2824 | (target & | ||
| 2825 | (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | | ||
| 2826 | BTRFS_AVAIL_ALLOC_BIT_SINGLE))) | ||
| 2827 | num_tolerated_disk_barrier_failures = 0; | ||
| 2828 | else if (num_tolerated_disk_barrier_failures > 1 && | ||
| 2829 | (target & | ||
| 2830 | (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))) | ||
| 2831 | num_tolerated_disk_barrier_failures = 1; | ||
| 2832 | |||
| 2833 | fs_info->num_tolerated_disk_barrier_failures = | ||
| 2834 | num_tolerated_disk_barrier_failures; | ||
| 2835 | } | ||
| 2836 | |||
| 2807 | ret = insert_balance_item(fs_info->tree_root, bctl); | 2837 | ret = insert_balance_item(fs_info->tree_root, bctl); |
| 2808 | if (ret && ret != -EEXIST) | 2838 | if (ret && ret != -EEXIST) |
| 2809 | goto out; | 2839 | goto out; |
| @@ -2836,6 +2866,11 @@ int btrfs_balance(struct btrfs_balance_control *bctl, | |||
| 2836 | __cancel_balance(fs_info); | 2866 | __cancel_balance(fs_info); |
| 2837 | } | 2867 | } |
| 2838 | 2868 | ||
| 2869 | if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { | ||
| 2870 | fs_info->num_tolerated_disk_barrier_failures = | ||
| 2871 | btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); | ||
| 2872 | } | ||
| 2873 | |||
| 2839 | wake_up(&fs_info->balance_wait_q); | 2874 | wake_up(&fs_info->balance_wait_q); |
| 2840 | 2875 | ||
| 2841 | return ret; | 2876 | return ret; |
| @@ -3608,12 +3643,16 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, | |||
| 3608 | ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, | 3643 | ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, |
| 3609 | &sys_chunk_size, &sys_stripe_size, | 3644 | &sys_chunk_size, &sys_stripe_size, |
| 3610 | sys_chunk_offset, alloc_profile); | 3645 | sys_chunk_offset, alloc_profile); |
| 3611 | if (ret) | 3646 | if (ret) { |
| 3612 | goto abort; | 3647 | btrfs_abort_transaction(trans, root, ret); |
| 3648 | goto out; | ||
| 3649 | } | ||
| 3613 | 3650 | ||
| 3614 | ret = btrfs_add_device(trans, fs_info->chunk_root, device); | 3651 | ret = btrfs_add_device(trans, fs_info->chunk_root, device); |
| 3615 | if (ret) | 3652 | if (ret) { |
| 3616 | goto abort; | 3653 | btrfs_abort_transaction(trans, root, ret); |
| 3654 | goto out; | ||
| 3655 | } | ||
| 3617 | 3656 | ||
| 3618 | /* | 3657 | /* |
| 3619 | * Modifying chunk tree needs allocating new blocks from both | 3658 | * Modifying chunk tree needs allocating new blocks from both |
| @@ -3623,19 +3662,19 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, | |||
| 3623 | */ | 3662 | */ |
| 3624 | ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, | 3663 | ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, |
| 3625 | chunk_size, stripe_size); | 3664 | chunk_size, stripe_size); |
| 3626 | if (ret) | 3665 | if (ret) { |
| 3627 | goto abort; | 3666 | btrfs_abort_transaction(trans, root, ret); |
| 3667 | goto out; | ||
| 3668 | } | ||
| 3628 | 3669 | ||
| 3629 | ret = __finish_chunk_alloc(trans, extent_root, sys_map, | 3670 | ret = __finish_chunk_alloc(trans, extent_root, sys_map, |
| 3630 | sys_chunk_offset, sys_chunk_size, | 3671 | sys_chunk_offset, sys_chunk_size, |
| 3631 | sys_stripe_size); | 3672 | sys_stripe_size); |
| 3632 | if (ret) | 3673 | if (ret) |
| 3633 | goto abort; | 3674 | btrfs_abort_transaction(trans, root, ret); |
| 3634 | 3675 | ||
| 3635 | return 0; | 3676 | out: |
| 3636 | 3677 | ||
| 3637 | abort: | ||
| 3638 | btrfs_abort_transaction(trans, root, ret); | ||
| 3639 | return ret; | 3678 | return ret; |
| 3640 | } | 3679 | } |
| 3641 | 3680 | ||
| @@ -3760,7 +3799,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
| 3760 | read_unlock(&em_tree->lock); | 3799 | read_unlock(&em_tree->lock); |
| 3761 | 3800 | ||
| 3762 | if (!em) { | 3801 | if (!em) { |
| 3763 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", | 3802 | printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n", |
| 3764 | (unsigned long long)logical, | 3803 | (unsigned long long)logical, |
| 3765 | (unsigned long long)*length); | 3804 | (unsigned long long)*length); |
| 3766 | BUG(); | 3805 | BUG(); |
| @@ -4217,7 +4256,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
| 4217 | 4256 | ||
| 4218 | total_devs = bbio->num_stripes; | 4257 | total_devs = bbio->num_stripes; |
| 4219 | if (map_length < length) { | 4258 | if (map_length < length) { |
| 4220 | printk(KERN_CRIT "mapping failed logical %llu bio len %llu " | 4259 | printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu " |
| 4221 | "len %llu\n", (unsigned long long)logical, | 4260 | "len %llu\n", (unsigned long long)logical, |
| 4222 | (unsigned long long)length, | 4261 | (unsigned long long)length, |
| 4223 | (unsigned long long)map_length); | 4262 | (unsigned long long)map_length); |
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 92c20654cc55..9acb846c3e7f 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
| @@ -97,7 +97,7 @@ static int zlib_compress_pages(struct list_head *ws, | |||
| 97 | *total_in = 0; | 97 | *total_in = 0; |
| 98 | 98 | ||
| 99 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { | 99 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { |
| 100 | printk(KERN_WARNING "deflateInit failed\n"); | 100 | printk(KERN_WARNING "btrfs: deflateInit failed\n"); |
| 101 | ret = -1; | 101 | ret = -1; |
| 102 | goto out; | 102 | goto out; |
| 103 | } | 103 | } |
| @@ -125,7 +125,7 @@ static int zlib_compress_pages(struct list_head *ws, | |||
| 125 | while (workspace->def_strm.total_in < len) { | 125 | while (workspace->def_strm.total_in < len) { |
| 126 | ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH); | 126 | ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH); |
| 127 | if (ret != Z_OK) { | 127 | if (ret != Z_OK) { |
| 128 | printk(KERN_DEBUG "btrfs deflate in loop returned %d\n", | 128 | printk(KERN_DEBUG "btrfs: deflate in loop returned %d\n", |
| 129 | ret); | 129 | ret); |
| 130 | zlib_deflateEnd(&workspace->def_strm); | 130 | zlib_deflateEnd(&workspace->def_strm); |
| 131 | ret = -1; | 131 | ret = -1; |
| @@ -252,7 +252,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, | |||
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 254 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
| 255 | printk(KERN_WARNING "inflateInit failed\n"); | 255 | printk(KERN_WARNING "btrfs: inflateInit failed\n"); |
| 256 | return -1; | 256 | return -1; |
| 257 | } | 257 | } |
| 258 | while (workspace->inf_strm.total_in < srclen) { | 258 | while (workspace->inf_strm.total_in < srclen) { |
| @@ -336,7 +336,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, | |||
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 338 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
| 339 | printk(KERN_WARNING "inflateInit failed\n"); | 339 | printk(KERN_WARNING "btrfs: inflateInit failed\n"); |
| 340 | return -1; | 340 | return -1; |
| 341 | } | 341 | } |
| 342 | 342 | ||
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 53cf2aabce87..71d5d0a5f6b2 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
| @@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, | |||
| 203 | int i; | 203 | int i; |
| 204 | wchar_t wchar_to; /* needed to quiet sparse */ | 204 | wchar_t wchar_to; /* needed to quiet sparse */ |
| 205 | 205 | ||
| 206 | /* special case for utf8 to handle no plane0 chars */ | ||
| 207 | if (!strcmp(codepage->charset, "utf8")) { | ||
| 208 | /* | ||
| 209 | * convert utf8 -> utf16, we assume we have enough space | ||
| 210 | * as caller should have assumed conversion does not overflow | ||
| 211 | * in destination len is length in wchar_t units (16bits) | ||
| 212 | */ | ||
| 213 | i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN, | ||
| 214 | (wchar_t *) to, len); | ||
| 215 | |||
| 216 | /* if success terminate and exit */ | ||
| 217 | if (i >= 0) | ||
| 218 | goto success; | ||
| 219 | /* | ||
| 220 | * if fails fall back to UCS encoding as this | ||
| 221 | * function should not return negative values | ||
| 222 | * currently can fail only if source contains | ||
| 223 | * invalid encoded characters | ||
| 224 | */ | ||
| 225 | } | ||
| 226 | |||
| 206 | for (i = 0; len && *from; i++, from += charlen, len -= charlen) { | 227 | for (i = 0; len && *from; i++, from += charlen, len -= charlen) { |
| 207 | charlen = codepage->char2uni(from, len, &wchar_to); | 228 | charlen = codepage->char2uni(from, len, &wchar_to); |
| 208 | if (charlen < 1) { | 229 | if (charlen < 1) { |
| @@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, | |||
| 215 | put_unaligned_le16(wchar_to, &to[i]); | 236 | put_unaligned_le16(wchar_to, &to[i]); |
| 216 | } | 237 | } |
| 217 | 238 | ||
| 239 | success: | ||
| 218 | put_unaligned_le16(0, &to[i]); | 240 | put_unaligned_le16(0, &to[i]); |
| 219 | return i; | 241 | return i; |
| 220 | } | 242 | } |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2fdbe08a7a23..5c670b998ffb 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -67,6 +67,7 @@ enum { | |||
| 67 | /* Mount options that take no arguments */ | 67 | /* Mount options that take no arguments */ |
| 68 | Opt_user_xattr, Opt_nouser_xattr, | 68 | Opt_user_xattr, Opt_nouser_xattr, |
| 69 | Opt_forceuid, Opt_noforceuid, | 69 | Opt_forceuid, Opt_noforceuid, |
| 70 | Opt_forcegid, Opt_noforcegid, | ||
| 70 | Opt_noblocksend, Opt_noautotune, | 71 | Opt_noblocksend, Opt_noautotune, |
| 71 | Opt_hard, Opt_soft, Opt_perm, Opt_noperm, | 72 | Opt_hard, Opt_soft, Opt_perm, Opt_noperm, |
| 72 | Opt_mapchars, Opt_nomapchars, Opt_sfu, | 73 | Opt_mapchars, Opt_nomapchars, Opt_sfu, |
| @@ -117,6 +118,8 @@ static const match_table_t cifs_mount_option_tokens = { | |||
| 117 | { Opt_nouser_xattr, "nouser_xattr" }, | 118 | { Opt_nouser_xattr, "nouser_xattr" }, |
| 118 | { Opt_forceuid, "forceuid" }, | 119 | { Opt_forceuid, "forceuid" }, |
| 119 | { Opt_noforceuid, "noforceuid" }, | 120 | { Opt_noforceuid, "noforceuid" }, |
| 121 | { Opt_forcegid, "forcegid" }, | ||
| 122 | { Opt_noforcegid, "noforcegid" }, | ||
| 120 | { Opt_noblocksend, "noblocksend" }, | 123 | { Opt_noblocksend, "noblocksend" }, |
| 121 | { Opt_noautotune, "noautotune" }, | 124 | { Opt_noautotune, "noautotune" }, |
| 122 | { Opt_hard, "hard" }, | 125 | { Opt_hard, "hard" }, |
| @@ -1195,6 +1198,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1195 | case Opt_noforceuid: | 1198 | case Opt_noforceuid: |
| 1196 | override_uid = 0; | 1199 | override_uid = 0; |
| 1197 | break; | 1200 | break; |
| 1201 | case Opt_forcegid: | ||
| 1202 | override_gid = 1; | ||
| 1203 | break; | ||
| 1204 | case Opt_noforcegid: | ||
| 1205 | override_gid = 0; | ||
| 1206 | break; | ||
| 1198 | case Opt_noblocksend: | 1207 | case Opt_noblocksend: |
| 1199 | vol->noblocksnd = 1; | 1208 | vol->noblocksnd = 1; |
| 1200 | break; | 1209 | break; |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 2126ab185045..76d974c952fe 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -183,6 +183,12 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, | |||
| 183 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], | 183 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], |
| 184 | n_vec - first_vec, remaining); | 184 | n_vec - first_vec, remaining); |
| 185 | if (rc == -ENOSPC || rc == -EAGAIN) { | 185 | if (rc == -ENOSPC || rc == -EAGAIN) { |
| 186 | /* | ||
| 187 | * Catch if a low level driver returns -ENOSPC. This | ||
| 188 | * WARN_ON will be removed by 3.10 if no one reports | ||
| 189 | * seeing this. | ||
| 190 | */ | ||
| 191 | WARN_ON_ONCE(rc == -ENOSPC); | ||
| 186 | i++; | 192 | i++; |
| 187 | if (i >= 14 || (!server->noblocksnd && (i > 2))) { | 193 | if (i >= 14 || (!server->noblocksnd && (i > 2))) { |
| 188 | cERROR(1, "sends on sock %p stuck for 15 " | 194 | cERROR(1, "sends on sock %p stuck for 15 " |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index ff487954cd96..d3d8799e2187 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
| @@ -100,6 +100,10 @@ static int jffs2_sync_fs(struct super_block *sb, int wait) | |||
| 100 | { | 100 | { |
| 101 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | 101 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); |
| 102 | 102 | ||
| 103 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | ||
| 104 | cancel_delayed_work_sync(&c->wbuf_dwork); | ||
| 105 | #endif | ||
| 106 | |||
| 103 | mutex_lock(&c->alloc_sem); | 107 | mutex_lock(&c->alloc_sem); |
| 104 | jffs2_flush_wbuf_pad(c); | 108 | jffs2_flush_wbuf_pad(c); |
| 105 | mutex_unlock(&c->alloc_sem); | 109 | mutex_unlock(&c->alloc_sem); |
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 6f4529d3697f..a6597d60d76d 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
| @@ -1044,10 +1044,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
| 1044 | ops.datbuf = NULL; | 1044 | ops.datbuf = NULL; |
| 1045 | 1045 | ||
| 1046 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1046 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); |
| 1047 | if (ret || ops.oobretlen != ops.ooblen) { | 1047 | if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { |
| 1048 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", | 1048 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", |
| 1049 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | 1049 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
| 1050 | if (!ret) | 1050 | if (!ret || mtd_is_bitflip(ret)) |
| 1051 | ret = -EIO; | 1051 | ret = -EIO; |
| 1052 | return ret; | 1052 | return ret; |
| 1053 | } | 1053 | } |
| @@ -1086,10 +1086,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, | |||
| 1086 | ops.datbuf = NULL; | 1086 | ops.datbuf = NULL; |
| 1087 | 1087 | ||
| 1088 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1088 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); |
| 1089 | if (ret || ops.oobretlen != ops.ooblen) { | 1089 | if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { |
| 1090 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", | 1090 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", |
| 1091 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | 1091 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
| 1092 | if (!ret) | 1092 | if (!ret || mtd_is_bitflip(ret)) |
| 1093 | ret = -EIO; | 1093 | ret = -EIO; |
| 1094 | return ret; | 1094 | return ret; |
| 1095 | } | 1095 | } |
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 6ba45d2b99db..1cf1749440ac 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h | |||
| @@ -522,6 +522,8 @@ struct bcma_sflash { | |||
| 522 | u32 blocksize; | 522 | u32 blocksize; |
| 523 | u16 numblocks; | 523 | u16 numblocks; |
| 524 | u32 size; | 524 | u32 size; |
| 525 | |||
| 526 | struct mtd_info *mtd; | ||
| 525 | }; | 527 | }; |
| 526 | #endif | 528 | #endif |
| 527 | 529 | ||
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 2412e02d7c0f..e1c8c9e919ac 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
| @@ -19,6 +19,10 @@ | |||
| 19 | * @nr_channels: Number of channels supported by hardware (max 8) | 19 | * @nr_channels: Number of channels supported by hardware (max 8) |
| 20 | * @is_private: The device channels should be marked as private and not for | 20 | * @is_private: The device channels should be marked as private and not for |
| 21 | * by the general purpose DMA channel allocator. | 21 | * by the general purpose DMA channel allocator. |
| 22 | * @block_size: Maximum block size supported by the controller | ||
| 23 | * @nr_masters: Number of AHB masters supported by the controller | ||
| 24 | * @data_width: Maximum data width supported by hardware per AHB master | ||
| 25 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | ||
| 22 | */ | 26 | */ |
| 23 | struct dw_dma_platform_data { | 27 | struct dw_dma_platform_data { |
| 24 | unsigned int nr_channels; | 28 | unsigned int nr_channels; |
| @@ -29,6 +33,9 @@ struct dw_dma_platform_data { | |||
| 29 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | 33 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ |
| 30 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | 34 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ |
| 31 | unsigned char chan_priority; | 35 | unsigned char chan_priority; |
| 36 | unsigned short block_size; | ||
| 37 | unsigned char nr_masters; | ||
| 38 | unsigned char data_width[4]; | ||
| 32 | }; | 39 | }; |
| 33 | 40 | ||
| 34 | /* bursts size */ | 41 | /* bursts size */ |
diff --git a/include/linux/edma.h b/include/linux/edma.h new file mode 100644 index 000000000000..a1307e7827e8 --- /dev/null +++ b/include/linux/edma.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * TI EDMA DMA engine driver | ||
| 3 | * | ||
| 4 | * Copyright 2012 Texas Instruments | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License as | ||
| 8 | * published by the Free Software Foundation version 2. | ||
| 9 | * | ||
| 10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 11 | * kind, whether express or implied; without even the implied warranty | ||
| 12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | #ifndef __LINUX_EDMA_H | ||
| 16 | #define __LINUX_EDMA_H | ||
| 17 | |||
| 18 | struct dma_chan; | ||
| 19 | |||
| 20 | #if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) | ||
| 21 | bool edma_filter_fn(struct dma_chan *, void *); | ||
| 22 | #else | ||
| 23 | static inline bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
| 24 | { | ||
| 25 | return false; | ||
| 26 | } | ||
| 27 | #endif | ||
| 28 | |||
| 29 | #endif | ||
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 4b27f9f503e4..943550dfe9ea 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -57,6 +57,7 @@ struct mmc_ext_csd { | |||
| 57 | unsigned int sa_timeout; /* Units: 100ns */ | 57 | unsigned int sa_timeout; /* Units: 100ns */ |
| 58 | unsigned int generic_cmd6_time; /* Units: 10ms */ | 58 | unsigned int generic_cmd6_time; /* Units: 10ms */ |
| 59 | unsigned int power_off_longtime; /* Units: ms */ | 59 | unsigned int power_off_longtime; /* Units: ms */ |
| 60 | u8 power_off_notification; /* state */ | ||
| 60 | unsigned int hs_max_dtr; | 61 | unsigned int hs_max_dtr; |
| 61 | #define MMC_HIGH_26_MAX_DTR 26000000 | 62 | #define MMC_HIGH_26_MAX_DTR 26000000 |
| 62 | #define MMC_HIGH_52_MAX_DTR 52000000 | 63 | #define MMC_HIGH_52_MAX_DTR 52000000 |
| @@ -76,10 +77,13 @@ struct mmc_ext_csd { | |||
| 76 | bool hpi_en; /* HPI enablebit */ | 77 | bool hpi_en; /* HPI enablebit */ |
| 77 | bool hpi; /* HPI support bit */ | 78 | bool hpi; /* HPI support bit */ |
| 78 | unsigned int hpi_cmd; /* cmd used as HPI */ | 79 | unsigned int hpi_cmd; /* cmd used as HPI */ |
| 80 | bool bkops; /* background support bit */ | ||
| 81 | bool bkops_en; /* background enable bit */ | ||
| 79 | unsigned int data_sector_size; /* 512 bytes or 4KB */ | 82 | unsigned int data_sector_size; /* 512 bytes or 4KB */ |
| 80 | unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ | 83 | unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ |
| 81 | unsigned int boot_ro_lock; /* ro lock support */ | 84 | unsigned int boot_ro_lock; /* ro lock support */ |
| 82 | bool boot_ro_lockable; | 85 | bool boot_ro_lockable; |
| 86 | u8 raw_exception_status; /* 53 */ | ||
| 83 | u8 raw_partition_support; /* 160 */ | 87 | u8 raw_partition_support; /* 160 */ |
| 84 | u8 raw_erased_mem_count; /* 181 */ | 88 | u8 raw_erased_mem_count; /* 181 */ |
| 85 | u8 raw_ext_csd_structure; /* 194 */ | 89 | u8 raw_ext_csd_structure; /* 194 */ |
| @@ -93,6 +97,7 @@ struct mmc_ext_csd { | |||
| 93 | u8 raw_sec_erase_mult; /* 230 */ | 97 | u8 raw_sec_erase_mult; /* 230 */ |
| 94 | u8 raw_sec_feature_support;/* 231 */ | 98 | u8 raw_sec_feature_support;/* 231 */ |
| 95 | u8 raw_trim_mult; /* 232 */ | 99 | u8 raw_trim_mult; /* 232 */ |
| 100 | u8 raw_bkops_status; /* 246 */ | ||
| 96 | u8 raw_sectors[4]; /* 212 - 4 bytes */ | 101 | u8 raw_sectors[4]; /* 212 - 4 bytes */ |
| 97 | 102 | ||
| 98 | unsigned int feature_support; | 103 | unsigned int feature_support; |
| @@ -225,7 +230,7 @@ struct mmc_card { | |||
| 225 | #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ | 230 | #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ |
| 226 | #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ | 231 | #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ |
| 227 | #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ | 232 | #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ |
| 228 | #define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */ | 233 | #define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ |
| 229 | unsigned int quirks; /* card quirks */ | 234 | unsigned int quirks; /* card quirks */ |
| 230 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ | 235 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ |
| 231 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ | 236 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ |
| @@ -241,11 +246,6 @@ struct mmc_card { | |||
| 241 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ | 246 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ |
| 242 | #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ | 247 | #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ |
| 243 | /* byte mode */ | 248 | /* byte mode */ |
| 244 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ | ||
| 245 | #define MMC_NO_POWER_NOTIFICATION 0 | ||
| 246 | #define MMC_POWERED_ON 1 | ||
| 247 | #define MMC_POWEROFF_SHORT 2 | ||
| 248 | #define MMC_POWEROFF_LONG 3 | ||
| 249 | 249 | ||
| 250 | unsigned int erase_size; /* erase size in sectors */ | 250 | unsigned int erase_size; /* erase size in sectors */ |
| 251 | unsigned int erase_shift; /* if erase unit is power 2 */ | 251 | unsigned int erase_shift; /* if erase unit is power 2 */ |
| @@ -392,7 +392,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
| 392 | #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) | 392 | #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) |
| 393 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) | 393 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) |
| 394 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) | 394 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) |
| 395 | #define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP) | 395 | #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) |
| 396 | 396 | ||
| 397 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) | 397 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) |
| 398 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) | 398 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) |
| @@ -404,9 +404,9 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
| 404 | #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) | 404 | #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) |
| 405 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) | 405 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) |
| 406 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) | 406 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) |
| 407 | #define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP) | 407 | #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) |
| 408 | #define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS) | ||
| 408 | 409 | ||
| 409 | #define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP) | ||
| 410 | /* | 410 | /* |
| 411 | * Quirk add/remove for MMC products. | 411 | * Quirk add/remove for MMC products. |
| 412 | */ | 412 | */ |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 1b431c728b9a..9b9cdafc7737 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
| @@ -134,6 +134,8 @@ struct mmc_host; | |||
| 134 | struct mmc_card; | 134 | struct mmc_card; |
| 135 | struct mmc_async_req; | 135 | struct mmc_async_req; |
| 136 | 136 | ||
| 137 | extern int mmc_stop_bkops(struct mmc_card *); | ||
| 138 | extern int mmc_read_bkops_status(struct mmc_card *); | ||
| 137 | extern struct mmc_async_req *mmc_start_req(struct mmc_host *, | 139 | extern struct mmc_async_req *mmc_start_req(struct mmc_host *, |
| 138 | struct mmc_async_req *, int *); | 140 | struct mmc_async_req *, int *); |
| 139 | extern int mmc_interrupt_hpi(struct mmc_card *); | 141 | extern int mmc_interrupt_hpi(struct mmc_card *); |
| @@ -142,6 +144,8 @@ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); | |||
| 142 | extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); | 144 | extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); |
| 143 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, | 145 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, |
| 144 | struct mmc_command *, int); | 146 | struct mmc_command *, int); |
| 147 | extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); | ||
| 148 | extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool); | ||
| 145 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); | 149 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); |
| 146 | 150 | ||
| 147 | #define MMC_ERASE_ARG 0x00000000 | 151 | #define MMC_ERASE_ARG 0x00000000 |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 7a7ebd367cfd..7c6a1139d8fa 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
| @@ -78,6 +78,10 @@ struct mmc_data; | |||
| 78 | * @data_offset: Set the offset of DATA register according to VERID. | 78 | * @data_offset: Set the offset of DATA register according to VERID. |
| 79 | * @dev: Device associated with the MMC controller. | 79 | * @dev: Device associated with the MMC controller. |
| 80 | * @pdata: Platform data associated with the MMC controller. | 80 | * @pdata: Platform data associated with the MMC controller. |
| 81 | * @drv_data: Driver specific data for identified variant of the controller | ||
| 82 | * @priv: Implementation defined private data. | ||
| 83 | * @biu_clk: Pointer to bus interface unit clock instance. | ||
| 84 | * @ciu_clk: Pointer to card interface unit clock instance. | ||
| 81 | * @slot: Slots sharing this MMC controller. | 85 | * @slot: Slots sharing this MMC controller. |
| 82 | * @fifo_depth: depth of FIFO. | 86 | * @fifo_depth: depth of FIFO. |
| 83 | * @data_shift: log2 of FIFO item size. | 87 | * @data_shift: log2 of FIFO item size. |
| @@ -156,8 +160,12 @@ struct dw_mci { | |||
| 156 | u32 fifoth_val; | 160 | u32 fifoth_val; |
| 157 | u16 verid; | 161 | u16 verid; |
| 158 | u16 data_offset; | 162 | u16 data_offset; |
| 159 | struct device dev; | 163 | struct device *dev; |
| 160 | struct dw_mci_board *pdata; | 164 | struct dw_mci_board *pdata; |
| 165 | struct dw_mci_drv_data *drv_data; | ||
| 166 | void *priv; | ||
| 167 | struct clk *biu_clk; | ||
| 168 | struct clk *ciu_clk; | ||
| 161 | struct dw_mci_slot *slot[MAX_MCI_SLOTS]; | 169 | struct dw_mci_slot *slot[MAX_MCI_SLOTS]; |
| 162 | 170 | ||
| 163 | /* FIFO push and pull */ | 171 | /* FIFO push and pull */ |
| @@ -201,7 +209,8 @@ struct dw_mci_dma_ops { | |||
| 201 | #define DW_MCI_QUIRK_HIGHSPEED BIT(2) | 209 | #define DW_MCI_QUIRK_HIGHSPEED BIT(2) |
| 202 | /* Unreliable card detection */ | 210 | /* Unreliable card detection */ |
| 203 | #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) | 211 | #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) |
| 204 | 212 | /* Write Protect detection not available */ | |
| 213 | #define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4) | ||
| 205 | 214 | ||
| 206 | struct dma_pdata; | 215 | struct dma_pdata; |
| 207 | 216 | ||
| @@ -218,7 +227,7 @@ struct dw_mci_board { | |||
| 218 | u32 num_slots; | 227 | u32 num_slots; |
| 219 | 228 | ||
| 220 | u32 quirks; /* Workaround / Quirk flags */ | 229 | u32 quirks; /* Workaround / Quirk flags */ |
| 221 | unsigned int bus_hz; /* Bus speed */ | 230 | unsigned int bus_hz; /* Clock speed at the cclk_in pad */ |
| 222 | 231 | ||
| 223 | unsigned int caps; /* Capabilities */ | 232 | unsigned int caps; /* Capabilities */ |
| 224 | unsigned int caps2; /* More capabilities */ | 233 | unsigned int caps2; /* More capabilities */ |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f578a71d82a6..7abb0e1f7bda 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -259,10 +259,6 @@ struct mmc_host { | |||
| 259 | #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ | 259 | #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ |
| 260 | 260 | ||
| 261 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 261 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
| 262 | unsigned int power_notify_type; | ||
| 263 | #define MMC_HOST_PW_NOTIFY_NONE 0 | ||
| 264 | #define MMC_HOST_PW_NOTIFY_SHORT 1 | ||
| 265 | #define MMC_HOST_PW_NOTIFY_LONG 2 | ||
| 266 | 262 | ||
| 267 | #ifdef CONFIG_MMC_CLKGATE | 263 | #ifdef CONFIG_MMC_CLKGATE |
| 268 | int clk_requests; /* internal reference counter */ | 264 | int clk_requests; /* internal reference counter */ |
| @@ -300,6 +296,7 @@ struct mmc_host { | |||
| 300 | #endif | 296 | #endif |
| 301 | 297 | ||
| 302 | int rescan_disable; /* disable card detection */ | 298 | int rescan_disable; /* disable card detection */ |
| 299 | int rescan_entered; /* used with nonremovable devices */ | ||
| 303 | 300 | ||
| 304 | struct mmc_card *card; /* device attached to this host */ | 301 | struct mmc_card *card; /* device attached to this host */ |
| 305 | 302 | ||
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index d425cab144d9..01e4b394029b 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
| @@ -139,6 +139,7 @@ static inline bool mmc_op_multi(u32 opcode) | |||
| 139 | #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ | 139 | #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ |
| 140 | #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ | 140 | #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ |
| 141 | #define R1_SWITCH_ERROR (1 << 7) /* sx, c */ | 141 | #define R1_SWITCH_ERROR (1 << 7) /* sx, c */ |
| 142 | #define R1_EXCEPTION_EVENT (1 << 6) /* sx, a */ | ||
| 142 | #define R1_APP_CMD (1 << 5) /* sr, c */ | 143 | #define R1_APP_CMD (1 << 5) /* sr, c */ |
| 143 | 144 | ||
| 144 | #define R1_STATE_IDLE 0 | 145 | #define R1_STATE_IDLE 0 |
| @@ -274,12 +275,15 @@ struct _mmc_csd { | |||
| 274 | #define EXT_CSD_FLUSH_CACHE 32 /* W */ | 275 | #define EXT_CSD_FLUSH_CACHE 32 /* W */ |
| 275 | #define EXT_CSD_CACHE_CTRL 33 /* R/W */ | 276 | #define EXT_CSD_CACHE_CTRL 33 /* R/W */ |
| 276 | #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ | 277 | #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ |
| 278 | #define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO */ | ||
| 277 | #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ | 279 | #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ |
| 278 | #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ | 280 | #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ |
| 279 | #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ | 281 | #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ |
| 280 | #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ | 282 | #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ |
| 281 | #define EXT_CSD_HPI_MGMT 161 /* R/W */ | 283 | #define EXT_CSD_HPI_MGMT 161 /* R/W */ |
| 282 | #define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ | 284 | #define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ |
| 285 | #define EXT_CSD_BKOPS_EN 163 /* R/W */ | ||
| 286 | #define EXT_CSD_BKOPS_START 164 /* W */ | ||
| 283 | #define EXT_CSD_SANITIZE_START 165 /* W */ | 287 | #define EXT_CSD_SANITIZE_START 165 /* W */ |
| 284 | #define EXT_CSD_WR_REL_PARAM 166 /* RO */ | 288 | #define EXT_CSD_WR_REL_PARAM 166 /* RO */ |
| 285 | #define EXT_CSD_BOOT_WP 173 /* R/W */ | 289 | #define EXT_CSD_BOOT_WP 173 /* R/W */ |
| @@ -313,11 +317,13 @@ struct _mmc_csd { | |||
| 313 | #define EXT_CSD_PWR_CL_200_360 237 /* RO */ | 317 | #define EXT_CSD_PWR_CL_200_360 237 /* RO */ |
| 314 | #define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ | 318 | #define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ |
| 315 | #define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ | 319 | #define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ |
| 320 | #define EXT_CSD_BKOPS_STATUS 246 /* RO */ | ||
| 316 | #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ | 321 | #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ |
| 317 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ | 322 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ |
| 318 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ | 323 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ |
| 319 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ | 324 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ |
| 320 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ | 325 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ |
| 326 | #define EXT_CSD_BKOPS_SUPPORT 502 /* RO */ | ||
| 321 | #define EXT_CSD_HPI_FEATURES 503 /* RO */ | 327 | #define EXT_CSD_HPI_FEATURES 503 /* RO */ |
| 322 | 328 | ||
| 323 | /* | 329 | /* |
| @@ -378,6 +384,19 @@ struct _mmc_csd { | |||
| 378 | #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 | 384 | #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 |
| 379 | #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 | 385 | #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 |
| 380 | /* | 386 | /* |
| 387 | * EXCEPTION_EVENT_STATUS field | ||
| 388 | */ | ||
| 389 | #define EXT_CSD_URGENT_BKOPS BIT(0) | ||
| 390 | #define EXT_CSD_DYNCAP_NEEDED BIT(1) | ||
| 391 | #define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2) | ||
| 392 | #define EXT_CSD_PACKED_FAILURE BIT(3) | ||
| 393 | |||
| 394 | /* | ||
| 395 | * BKOPS status level | ||
| 396 | */ | ||
| 397 | #define EXT_CSD_BKOPS_LEVEL_2 0x2 | ||
| 398 | |||
| 399 | /* | ||
| 381 | * MMC_SWITCH access modes | 400 | * MMC_SWITCH access modes |
| 382 | */ | 401 | */ |
| 383 | 402 | ||
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index ac83b105bedd..fa8529a859b8 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h | |||
| @@ -97,7 +97,8 @@ struct sdhci_host { | |||
| 97 | 97 | ||
| 98 | const struct sdhci_ops *ops; /* Low level hw interface */ | 98 | const struct sdhci_ops *ops; /* Low level hw interface */ |
| 99 | 99 | ||
| 100 | struct regulator *vmmc; /* Power regulator */ | 100 | struct regulator *vmmc; /* Power regulator (vmmc) */ |
| 101 | struct regulator *vqmmc; /* Signaling regulator (vccq) */ | ||
| 101 | 102 | ||
| 102 | /* Internal data */ | 103 | /* Internal data */ |
| 103 | struct mmc_host *mmc; /* MMC structure */ | 104 | struct mmc_host *mmc; /* MMC structure */ |
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 650ef352f045..211ff67e8b0d 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h | |||
| @@ -78,8 +78,6 @@ struct nand_bbt_descr { | |||
| 78 | #define NAND_BBT_LASTBLOCK 0x00000010 | 78 | #define NAND_BBT_LASTBLOCK 0x00000010 |
| 79 | /* The bbt is at the given page, else we must scan for the bbt */ | 79 | /* The bbt is at the given page, else we must scan for the bbt */ |
| 80 | #define NAND_BBT_ABSPAGE 0x00000020 | 80 | #define NAND_BBT_ABSPAGE 0x00000020 |
| 81 | /* The bbt is at the given page, else we must scan for the bbt */ | ||
| 82 | #define NAND_BBT_SEARCH 0x00000040 | ||
| 83 | /* bbt is stored per chip on multichip devices */ | 81 | /* bbt is stored per chip on multichip devices */ |
| 84 | #define NAND_BBT_PERCHIP 0x00000080 | 82 | #define NAND_BBT_PERCHIP 0x00000080 |
| 85 | /* bbt has a version counter at offset veroffs */ | 83 | /* bbt has a version counter at offset veroffs */ |
| @@ -110,7 +108,10 @@ struct nand_bbt_descr { | |||
| 110 | * OOB area. This option is passed to the default bad block table function. | 108 | * OOB area. This option is passed to the default bad block table function. |
| 111 | */ | 109 | */ |
| 112 | #define NAND_BBT_USE_FLASH 0x00020000 | 110 | #define NAND_BBT_USE_FLASH 0x00020000 |
| 113 | /* Do not store flash based bad block table in OOB area; store it in-band */ | 111 | /* |
| 112 | * Do not store flash based bad block table marker in the OOB area; store it | ||
| 113 | * in-band. | ||
| 114 | */ | ||
| 114 | #define NAND_BBT_NO_OOB 0x00040000 | 115 | #define NAND_BBT_NO_OOB 0x00040000 |
| 115 | /* | 116 | /* |
| 116 | * Do not write new bad block markers to OOB; useful, e.g., when ECC covers | 117 | * Do not write new bad block markers to OOB; useful, e.g., when ECC covers |
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h new file mode 100644 index 000000000000..d91b1e35631e --- /dev/null +++ b/include/linux/mtd/lpc32xx_mlc.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Platform data for LPC32xx SoC MLC NAND controller | ||
| 3 | * | ||
| 4 | * Copyright © 2012 Roland Stigge | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __LINUX_MTD_LPC32XX_MLC_H | ||
| 12 | #define __LINUX_MTD_LPC32XX_MLC_H | ||
| 13 | |||
| 14 | #include <linux/dmaengine.h> | ||
| 15 | |||
| 16 | struct lpc32xx_mlc_platform_data { | ||
| 17 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
| 18 | }; | ||
| 19 | |||
| 20 | #endif /* __LINUX_MTD_LPC32XX_MLC_H */ | ||
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h new file mode 100644 index 000000000000..1169548a1535 --- /dev/null +++ b/include/linux/mtd/lpc32xx_slc.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Platform data for LPC32xx SoC SLC NAND controller | ||
| 3 | * | ||
| 4 | * Copyright © 2012 Roland Stigge | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __LINUX_MTD_LPC32XX_SLC_H | ||
| 12 | #define __LINUX_MTD_LPC32XX_SLC_H | ||
| 13 | |||
| 14 | #include <linux/dmaengine.h> | ||
| 15 | |||
| 16 | struct lpc32xx_slc_platform_data { | ||
| 17 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
| 18 | }; | ||
| 19 | |||
| 20 | #endif /* __LINUX_MTD_LPC32XX_SLC_H */ | ||
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 63dadc0dfb62..81d61e704599 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
| @@ -265,14 +265,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | |||
| 265 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | 265 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
| 266 | const u_char *buf); | 266 | const u_char *buf); |
| 267 | 267 | ||
| 268 | static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, | 268 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); |
| 269 | struct mtd_oob_ops *ops) | ||
| 270 | { | ||
| 271 | ops->retlen = ops->oobretlen = 0; | ||
| 272 | if (!mtd->_read_oob) | ||
| 273 | return -EOPNOTSUPP; | ||
| 274 | return mtd->_read_oob(mtd, from, ops); | ||
| 275 | } | ||
| 276 | 269 | ||
| 277 | static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, | 270 | static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
| 278 | struct mtd_oob_ops *ops) | 271 | struct mtd_oob_ops *ops) |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 57977c640529..24e915957e4f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -56,7 +56,7 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | |||
| 56 | * is supported now. If you add a chip with bigger oobsize/page | 56 | * is supported now. If you add a chip with bigger oobsize/page |
| 57 | * adjust this accordingly. | 57 | * adjust this accordingly. |
| 58 | */ | 58 | */ |
| 59 | #define NAND_MAX_OOBSIZE 576 | 59 | #define NAND_MAX_OOBSIZE 640 |
| 60 | #define NAND_MAX_PAGESIZE 8192 | 60 | #define NAND_MAX_PAGESIZE 8192 |
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| @@ -92,6 +92,8 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | |||
| 92 | #define NAND_CMD_READID 0x90 | 92 | #define NAND_CMD_READID 0x90 |
| 93 | #define NAND_CMD_ERASE2 0xd0 | 93 | #define NAND_CMD_ERASE2 0xd0 |
| 94 | #define NAND_CMD_PARAM 0xec | 94 | #define NAND_CMD_PARAM 0xec |
| 95 | #define NAND_CMD_GET_FEATURES 0xee | ||
| 96 | #define NAND_CMD_SET_FEATURES 0xef | ||
| 95 | #define NAND_CMD_RESET 0xff | 97 | #define NAND_CMD_RESET 0xff |
| 96 | 98 | ||
| 97 | #define NAND_CMD_LOCK 0x2a | 99 | #define NAND_CMD_LOCK 0x2a |
| @@ -185,12 +187,6 @@ typedef enum { | |||
| 185 | * This happens with the Renesas AG-AND chips, possibly others. | 187 | * This happens with the Renesas AG-AND chips, possibly others. |
| 186 | */ | 188 | */ |
| 187 | #define BBT_AUTO_REFRESH 0x00000080 | 189 | #define BBT_AUTO_REFRESH 0x00000080 |
| 188 | /* | ||
| 189 | * Chip does not require ready check on read. True | ||
| 190 | * for all large page devices, as they do not support | ||
| 191 | * autoincrement. | ||
| 192 | */ | ||
| 193 | #define NAND_NO_READRDY 0x00000100 | ||
| 194 | /* Chip does not allow subpage writes */ | 190 | /* Chip does not allow subpage writes */ |
| 195 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 | 191 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 |
| 196 | 192 | ||
| @@ -200,6 +196,9 @@ typedef enum { | |||
| 200 | /* Device behaves just like nand, but is readonly */ | 196 | /* Device behaves just like nand, but is readonly */ |
| 201 | #define NAND_ROM 0x00000800 | 197 | #define NAND_ROM 0x00000800 |
| 202 | 198 | ||
| 199 | /* Device supports subpage reads */ | ||
| 200 | #define NAND_SUBPAGE_READ 0x00001000 | ||
| 201 | |||
| 203 | /* Options valid for Samsung large page devices */ | 202 | /* Options valid for Samsung large page devices */ |
| 204 | #define NAND_SAMSUNG_LP_OPTIONS \ | 203 | #define NAND_SAMSUNG_LP_OPTIONS \ |
| 205 | (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) | 204 | (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) |
| @@ -208,12 +207,7 @@ typedef enum { | |||
| 208 | #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) | 207 | #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) |
| 209 | #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) | 208 | #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) |
| 210 | #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) | 209 | #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) |
| 211 | /* Large page NAND with SOFT_ECC should support subpage reads */ | 210 | #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) |
| 212 | #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ | ||
| 213 | && (chip->page_shift > 9)) | ||
| 214 | |||
| 215 | /* Mask to zero out the chip options, which come from the id table */ | ||
| 216 | #define NAND_CHIPOPTIONS_MSK 0x0000ffff | ||
| 217 | 211 | ||
| 218 | /* Non chip related options */ | 212 | /* Non chip related options */ |
| 219 | /* This option skips the bbt scan during initialization. */ | 213 | /* This option skips the bbt scan during initialization. */ |
| @@ -237,6 +231,21 @@ typedef enum { | |||
| 237 | /* Keep gcc happy */ | 231 | /* Keep gcc happy */ |
| 238 | struct nand_chip; | 232 | struct nand_chip; |
| 239 | 233 | ||
| 234 | /* ONFI timing mode, used in both asynchronous and synchronous mode */ | ||
| 235 | #define ONFI_TIMING_MODE_0 (1 << 0) | ||
| 236 | #define ONFI_TIMING_MODE_1 (1 << 1) | ||
| 237 | #define ONFI_TIMING_MODE_2 (1 << 2) | ||
| 238 | #define ONFI_TIMING_MODE_3 (1 << 3) | ||
| 239 | #define ONFI_TIMING_MODE_4 (1 << 4) | ||
| 240 | #define ONFI_TIMING_MODE_5 (1 << 5) | ||
| 241 | #define ONFI_TIMING_MODE_UNKNOWN (1 << 6) | ||
| 242 | |||
| 243 | /* ONFI feature address */ | ||
| 244 | #define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 | ||
| 245 | |||
| 246 | /* ONFI subfeature parameters length */ | ||
| 247 | #define ONFI_SUBFEATURE_PARAM_LEN 4 | ||
| 248 | |||
| 240 | struct nand_onfi_params { | 249 | struct nand_onfi_params { |
| 241 | /* rev info and features block */ | 250 | /* rev info and features block */ |
| 242 | /* 'O' 'N' 'F' 'I' */ | 251 | /* 'O' 'N' 'F' 'I' */ |
| @@ -334,8 +343,10 @@ struct nand_hw_control { | |||
| 334 | * @read_page_raw: function to read a raw page without ECC | 343 | * @read_page_raw: function to read a raw page without ECC |
| 335 | * @write_page_raw: function to write a raw page without ECC | 344 | * @write_page_raw: function to write a raw page without ECC |
| 336 | * @read_page: function to read a page according to the ECC generator | 345 | * @read_page: function to read a page according to the ECC generator |
| 337 | * requirements. | 346 | * requirements; returns maximum number of bitflips corrected in |
| 338 | * @read_subpage: function to read parts of the page covered by ECC. | 347 | * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error |
| 348 | * @read_subpage: function to read parts of the page covered by ECC; | ||
| 349 | * returns same as read_page() | ||
| 339 | * @write_page: function to write a page according to the ECC generator | 350 | * @write_page: function to write a page according to the ECC generator |
| 340 | * requirements. | 351 | * requirements. |
| 341 | * @write_oob_raw: function to write chip OOB data without ECC | 352 | * @write_oob_raw: function to write chip OOB data without ECC |
| @@ -361,13 +372,13 @@ struct nand_ecc_ctrl { | |||
| 361 | uint8_t *calc_ecc); | 372 | uint8_t *calc_ecc); |
| 362 | int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 373 | int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
| 363 | uint8_t *buf, int oob_required, int page); | 374 | uint8_t *buf, int oob_required, int page); |
| 364 | void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 375 | int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
| 365 | const uint8_t *buf, int oob_required); | 376 | const uint8_t *buf, int oob_required); |
| 366 | int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, | 377 | int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, |
| 367 | uint8_t *buf, int oob_required, int page); | 378 | uint8_t *buf, int oob_required, int page); |
| 368 | int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, | 379 | int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, |
| 369 | uint32_t offs, uint32_t len, uint8_t *buf); | 380 | uint32_t offs, uint32_t len, uint8_t *buf); |
| 370 | void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, | 381 | int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, |
| 371 | const uint8_t *buf, int oob_required); | 382 | const uint8_t *buf, int oob_required); |
| 372 | int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 383 | int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
| 373 | int page); | 384 | int page); |
| @@ -403,8 +414,6 @@ struct nand_buffers { | |||
| 403 | * @read_word: [REPLACEABLE] read one word from the chip | 414 | * @read_word: [REPLACEABLE] read one word from the chip |
| 404 | * @write_buf: [REPLACEABLE] write data from the buffer to the chip | 415 | * @write_buf: [REPLACEABLE] write data from the buffer to the chip |
| 405 | * @read_buf: [REPLACEABLE] read data from the chip into the buffer | 416 | * @read_buf: [REPLACEABLE] read data from the chip into the buffer |
| 406 | * @verify_buf: [REPLACEABLE] verify buffer contents against the chip | ||
| 407 | * data. | ||
| 408 | * @select_chip: [REPLACEABLE] select chip nr | 417 | * @select_chip: [REPLACEABLE] select chip nr |
| 409 | * @block_bad: [REPLACEABLE] check, if the block is bad | 418 | * @block_bad: [REPLACEABLE] check, if the block is bad |
| 410 | * @block_markbad: [REPLACEABLE] mark the block bad | 419 | * @block_markbad: [REPLACEABLE] mark the block bad |
| @@ -462,6 +471,8 @@ struct nand_buffers { | |||
| 462 | * non 0 if ONFI supported. | 471 | * non 0 if ONFI supported. |
| 463 | * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is | 472 | * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is |
| 464 | * supported, 0 otherwise. | 473 | * supported, 0 otherwise. |
| 474 | * @onfi_set_features [REPLACEABLE] set the features for ONFI nand | ||
| 475 | * @onfi_get_features [REPLACEABLE] get the features for ONFI nand | ||
| 465 | * @ecclayout: [REPLACEABLE] the default ECC placement scheme | 476 | * @ecclayout: [REPLACEABLE] the default ECC placement scheme |
| 466 | * @bbt: [INTERN] bad block table pointer | 477 | * @bbt: [INTERN] bad block table pointer |
| 467 | * @bbt_td: [REPLACEABLE] bad block table descriptor for flash | 478 | * @bbt_td: [REPLACEABLE] bad block table descriptor for flash |
| @@ -487,7 +498,6 @@ struct nand_chip { | |||
| 487 | u16 (*read_word)(struct mtd_info *mtd); | 498 | u16 (*read_word)(struct mtd_info *mtd); |
| 488 | void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); | 499 | void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); |
| 489 | void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); | 500 | void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); |
| 490 | int (*verify_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); | ||
| 491 | void (*select_chip)(struct mtd_info *mtd, int chip); | 501 | void (*select_chip)(struct mtd_info *mtd, int chip); |
| 492 | int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); | 502 | int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); |
| 493 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); | 503 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); |
| @@ -505,6 +515,10 @@ struct nand_chip { | |||
| 505 | int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, | 515 | int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, |
| 506 | const uint8_t *buf, int oob_required, int page, | 516 | const uint8_t *buf, int oob_required, int page, |
| 507 | int cached, int raw); | 517 | int cached, int raw); |
| 518 | int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 519 | int feature_addr, uint8_t *subfeature_para); | ||
| 520 | int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, | ||
| 521 | int feature_addr, uint8_t *subfeature_para); | ||
| 508 | 522 | ||
| 509 | int chip_delay; | 523 | int chip_delay; |
| 510 | unsigned int options; | 524 | unsigned int options; |
| @@ -559,6 +573,7 @@ struct nand_chip { | |||
| 559 | #define NAND_MFR_MICRON 0x2c | 573 | #define NAND_MFR_MICRON 0x2c |
| 560 | #define NAND_MFR_AMD 0x01 | 574 | #define NAND_MFR_AMD 0x01 |
| 561 | #define NAND_MFR_MACRONIX 0xc2 | 575 | #define NAND_MFR_MACRONIX 0xc2 |
| 576 | #define NAND_MFR_EON 0x92 | ||
| 562 | 577 | ||
| 563 | /** | 578 | /** |
| 564 | * struct nand_flash_dev - NAND Flash Device ID Structure | 579 | * struct nand_flash_dev - NAND Flash Device ID Structure |
| @@ -641,6 +656,7 @@ struct platform_device; | |||
| 641 | * ALE/CLE/nCE. Also used to write command and address | 656 | * ALE/CLE/nCE. Also used to write command and address |
| 642 | * @write_buf: platform specific function for write buffer | 657 | * @write_buf: platform specific function for write buffer |
| 643 | * @read_buf: platform specific function for read buffer | 658 | * @read_buf: platform specific function for read buffer |
| 659 | * @read_byte: platform specific function to read one byte from chip | ||
| 644 | * @priv: private data to transport driver specific settings | 660 | * @priv: private data to transport driver specific settings |
| 645 | * | 661 | * |
| 646 | * All fields are optional and depend on the hardware driver requirements | 662 | * All fields are optional and depend on the hardware driver requirements |
| @@ -677,4 +693,20 @@ struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) | |||
| 677 | return chip->priv; | 693 | return chip->priv; |
| 678 | } | 694 | } |
| 679 | 695 | ||
| 696 | /* return the supported asynchronous timing mode. */ | ||
| 697 | static inline int onfi_get_async_timing_mode(struct nand_chip *chip) | ||
| 698 | { | ||
| 699 | if (!chip->onfi_version) | ||
| 700 | return ONFI_TIMING_MODE_UNKNOWN; | ||
| 701 | return le16_to_cpu(chip->onfi_params.async_timing_mode); | ||
| 702 | } | ||
| 703 | |||
| 704 | /* return the supported synchronous timing mode. */ | ||
| 705 | static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) | ||
| 706 | { | ||
| 707 | if (!chip->onfi_version) | ||
| 708 | return ONFI_TIMING_MODE_UNKNOWN; | ||
| 709 | return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); | ||
| 710 | } | ||
| 711 | |||
| 680 | #endif /* __LINUX_MTD_NAND_H */ | 712 | #endif /* __LINUX_MTD_NAND_H */ |
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index a38e1fa8af01..01e4b15b280e 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h | |||
| @@ -49,7 +49,6 @@ | |||
| 49 | #define FLERRADR(f) (f->reg + 0x98) | 49 | #define FLERRADR(f) (f->reg + 0x98) |
| 50 | 50 | ||
| 51 | /* FLCMNCR control bits */ | 51 | /* FLCMNCR control bits */ |
| 52 | #define ECCPOS2 (0x1 << 25) | ||
| 53 | #define _4ECCCNTEN (0x1 << 24) | 52 | #define _4ECCCNTEN (0x1 << 24) |
| 54 | #define _4ECCEN (0x1 << 23) | 53 | #define _4ECCEN (0x1 << 23) |
| 55 | #define _4ECCCORRECT (0x1 << 22) | 54 | #define _4ECCCORRECT (0x1 << 22) |
| @@ -59,9 +58,6 @@ | |||
| 59 | #define QTSEL_E (0x1 << 17) | 58 | #define QTSEL_E (0x1 << 17) |
| 60 | #define ENDIAN (0x1 << 16) /* 1 = little endian */ | 59 | #define ENDIAN (0x1 << 16) /* 1 = little endian */ |
| 61 | #define FCKSEL_E (0x1 << 15) | 60 | #define FCKSEL_E (0x1 << 15) |
| 62 | #define ECCPOS_00 (0x00 << 12) | ||
| 63 | #define ECCPOS_01 (0x01 << 12) | ||
| 64 | #define ECCPOS_02 (0x02 << 12) | ||
| 65 | #define ACM_SACCES_MODE (0x01 << 10) | 61 | #define ACM_SACCES_MODE (0x01 << 10) |
| 66 | #define NANWF_E (0x1 << 9) | 62 | #define NANWF_E (0x1 << 9) |
| 67 | #define SE_D (0x1 << 8) /* Spare area disable */ | 63 | #define SE_D (0x1 << 8) /* Spare area disable */ |
| @@ -107,6 +103,14 @@ | |||
| 107 | #define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */ | 103 | #define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */ |
| 108 | #define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */ | 104 | #define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */ |
| 109 | 105 | ||
| 106 | /* FLINTDMACR control bits */ | ||
| 107 | #define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ | ||
| 108 | #define AC1CLR (0x1 << 19) /* ECC FIFO clear */ | ||
| 109 | #define AC0CLR (0x1 << 18) /* Data FIFO clear */ | ||
| 110 | #define ECERB (0x1 << 9) /* ECC error */ | ||
| 111 | #define STERB (0x1 << 8) /* Status error */ | ||
| 112 | #define STERINTE (0x1 << 4) /* Status error enable */ | ||
| 113 | |||
| 110 | /* FLTRCR control bits */ | 114 | /* FLTRCR control bits */ |
| 111 | #define TRSTRT (0x1 << 0) /* translation start */ | 115 | #define TRSTRT (0x1 << 0) /* translation start */ |
| 112 | #define TREND (0x1 << 1) /* translation end */ | 116 | #define TREND (0x1 << 1) /* translation end */ |
| @@ -125,9 +129,15 @@ | |||
| 125 | #define _4ECCEND (0x1 << 1) /* 4 symbols end */ | 129 | #define _4ECCEND (0x1 << 1) /* 4 symbols end */ |
| 126 | #define _4ECCEXST (0x1 << 0) /* 4 symbols exist */ | 130 | #define _4ECCEXST (0x1 << 0) /* 4 symbols exist */ |
| 127 | 131 | ||
| 128 | #define INIT_FL4ECCRESULT_VAL 0x03FF03FF | ||
| 129 | #define LOOP_TIMEOUT_MAX 0x00010000 | 132 | #define LOOP_TIMEOUT_MAX 0x00010000 |
| 130 | 133 | ||
| 134 | enum flctl_ecc_res_t { | ||
| 135 | FL_SUCCESS, | ||
| 136 | FL_REPAIRABLE, | ||
| 137 | FL_ERROR, | ||
| 138 | FL_TIMEOUT | ||
| 139 | }; | ||
| 140 | |||
| 131 | struct sh_flctl { | 141 | struct sh_flctl { |
| 132 | struct mtd_info mtd; | 142 | struct mtd_info mtd; |
| 133 | struct nand_chip chip; | 143 | struct nand_chip chip; |
| @@ -145,8 +155,7 @@ struct sh_flctl { | |||
| 145 | uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ | 155 | uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ |
| 146 | uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ | 156 | uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ |
| 147 | uint32_t flcmncr_base; /* base value of FLCMNCR */ | 157 | uint32_t flcmncr_base; /* base value of FLCMNCR */ |
| 148 | 158 | uint32_t flintdmacr_base; /* irq enable bits */ | |
| 149 | int hwecc_cant_correct[4]; | ||
| 150 | 159 | ||
| 151 | unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ | 160 | unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ |
| 152 | unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ | 161 | unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ |
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h new file mode 100644 index 000000000000..2a330ec9e2af --- /dev/null +++ b/include/linux/platform_data/mmp_dma.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* | ||
| 2 | * MMP Platform DMA Management | ||
| 3 | * | ||
| 4 | * Copyright (c) 2011 Marvell Semiconductors Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef MMP_DMA_H | ||
| 13 | #define MMP_DMA_H | ||
| 14 | |||
| 15 | struct mmp_dma_platdata { | ||
| 16 | int dma_channels; | ||
| 17 | }; | ||
| 18 | |||
| 19 | #endif /* MMP_DMA_H */ | ||
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 51ad0995abac..59acd987ed34 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h | |||
| @@ -49,6 +49,7 @@ struct sdhci_pxa_platdata { | |||
| 49 | bool ext_cd_gpio_invert; | 49 | bool ext_cd_gpio_invert; |
| 50 | unsigned int max_speed; | 50 | unsigned int max_speed; |
| 51 | unsigned int host_caps; | 51 | unsigned int host_caps; |
| 52 | unsigned int host_caps2; | ||
| 52 | unsigned int quirks; | 53 | unsigned int quirks; |
| 53 | unsigned int pm_caps; | 54 | unsigned int pm_caps; |
| 54 | }; | 55 | }; |
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild index 192f8fb7d546..e69de29bb2d1 100644 --- a/include/mtd/Kbuild +++ b/include/mtd/Kbuild | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | header-y += inftl-user.h | ||
| 2 | header-y += mtd-abi.h | ||
| 3 | header-y += mtd-user.h | ||
| 4 | header-y += nftl-user.h | ||
| 5 | header-y += ubi-user.h | ||
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 91b91e805673..54fab041b22a 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h | |||
| @@ -445,6 +445,7 @@ TRACE_EVENT(btrfs_delayed_tree_ref, | |||
| 445 | __field( u64, ref_root ) | 445 | __field( u64, ref_root ) |
| 446 | __field( int, level ) | 446 | __field( int, level ) |
| 447 | __field( int, type ) | 447 | __field( int, type ) |
| 448 | __field( u64, seq ) | ||
| 448 | ), | 449 | ), |
| 449 | 450 | ||
| 450 | TP_fast_assign( | 451 | TP_fast_assign( |
| @@ -455,17 +456,19 @@ TRACE_EVENT(btrfs_delayed_tree_ref, | |||
| 455 | __entry->ref_root = full_ref->root; | 456 | __entry->ref_root = full_ref->root; |
| 456 | __entry->level = full_ref->level; | 457 | __entry->level = full_ref->level; |
| 457 | __entry->type = ref->type; | 458 | __entry->type = ref->type; |
| 459 | __entry->seq = ref->seq; | ||
| 458 | ), | 460 | ), |
| 459 | 461 | ||
| 460 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " | 462 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " |
| 461 | "parent = %llu(%s), ref_root = %llu(%s), level = %d, " | 463 | "parent = %llu(%s), ref_root = %llu(%s), level = %d, " |
| 462 | "type = %s", | 464 | "type = %s, seq = %llu", |
| 463 | (unsigned long long)__entry->bytenr, | 465 | (unsigned long long)__entry->bytenr, |
| 464 | (unsigned long long)__entry->num_bytes, | 466 | (unsigned long long)__entry->num_bytes, |
| 465 | show_ref_action(__entry->action), | 467 | show_ref_action(__entry->action), |
| 466 | show_root_type(__entry->parent), | 468 | show_root_type(__entry->parent), |
| 467 | show_root_type(__entry->ref_root), | 469 | show_root_type(__entry->ref_root), |
| 468 | __entry->level, show_ref_type(__entry->type)) | 470 | __entry->level, show_ref_type(__entry->type), |
| 471 | (unsigned long long)__entry->seq) | ||
| 469 | ); | 472 | ); |
| 470 | 473 | ||
| 471 | TRACE_EVENT(btrfs_delayed_data_ref, | 474 | TRACE_EVENT(btrfs_delayed_data_ref, |
| @@ -485,6 +488,7 @@ TRACE_EVENT(btrfs_delayed_data_ref, | |||
| 485 | __field( u64, owner ) | 488 | __field( u64, owner ) |
| 486 | __field( u64, offset ) | 489 | __field( u64, offset ) |
| 487 | __field( int, type ) | 490 | __field( int, type ) |
| 491 | __field( u64, seq ) | ||
| 488 | ), | 492 | ), |
| 489 | 493 | ||
| 490 | TP_fast_assign( | 494 | TP_fast_assign( |
| @@ -496,11 +500,12 @@ TRACE_EVENT(btrfs_delayed_data_ref, | |||
| 496 | __entry->owner = full_ref->objectid; | 500 | __entry->owner = full_ref->objectid; |
| 497 | __entry->offset = full_ref->offset; | 501 | __entry->offset = full_ref->offset; |
| 498 | __entry->type = ref->type; | 502 | __entry->type = ref->type; |
| 503 | __entry->seq = ref->seq; | ||
| 499 | ), | 504 | ), |
| 500 | 505 | ||
| 501 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " | 506 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " |
| 502 | "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " | 507 | "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " |
| 503 | "offset = %llu, type = %s", | 508 | "offset = %llu, type = %s, seq = %llu", |
| 504 | (unsigned long long)__entry->bytenr, | 509 | (unsigned long long)__entry->bytenr, |
| 505 | (unsigned long long)__entry->num_bytes, | 510 | (unsigned long long)__entry->num_bytes, |
| 506 | show_ref_action(__entry->action), | 511 | show_ref_action(__entry->action), |
| @@ -508,7 +513,8 @@ TRACE_EVENT(btrfs_delayed_data_ref, | |||
| 508 | show_root_type(__entry->ref_root), | 513 | show_root_type(__entry->ref_root), |
| 509 | (unsigned long long)__entry->owner, | 514 | (unsigned long long)__entry->owner, |
| 510 | (unsigned long long)__entry->offset, | 515 | (unsigned long long)__entry->offset, |
| 511 | show_ref_type(__entry->type)) | 516 | show_ref_type(__entry->type), |
| 517 | (unsigned long long)__entry->seq) | ||
| 512 | ); | 518 | ); |
| 513 | 519 | ||
| 514 | TRACE_EVENT(btrfs_delayed_ref_head, | 520 | TRACE_EVENT(btrfs_delayed_ref_head, |
diff --git a/include/uapi/mtd/Kbuild b/include/uapi/mtd/Kbuild index aafaa5aa54d4..5a691e10cd0e 100644 --- a/include/uapi/mtd/Kbuild +++ b/include/uapi/mtd/Kbuild | |||
| @@ -1 +1,6 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | header-y += inftl-user.h | ||
| 3 | header-y += mtd-abi.h | ||
| 4 | header-y += mtd-user.h | ||
| 5 | header-y += nftl-user.h | ||
| 6 | header-y += ubi-user.h | ||
diff --git a/include/mtd/inftl-user.h b/include/uapi/mtd/inftl-user.h index 8376bd1a9e01..8376bd1a9e01 100644 --- a/include/mtd/inftl-user.h +++ b/include/uapi/mtd/inftl-user.h | |||
diff --git a/include/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 36eace03b2ac..36eace03b2ac 100644 --- a/include/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h | |||
diff --git a/include/mtd/mtd-user.h b/include/uapi/mtd/mtd-user.h index 83327c808c86..83327c808c86 100644 --- a/include/mtd/mtd-user.h +++ b/include/uapi/mtd/mtd-user.h | |||
diff --git a/include/mtd/nftl-user.h b/include/uapi/mtd/nftl-user.h index bdeabd86ad99..bdeabd86ad99 100644 --- a/include/mtd/nftl-user.h +++ b/include/uapi/mtd/nftl-user.h | |||
diff --git a/include/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h index 53cae1e11e57..53cae1e11e57 100644 --- a/include/mtd/ubi-user.h +++ b/include/uapi/mtd/ubi-user.h | |||
