diff options
67 files changed, 890 insertions, 635 deletions
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 84da2a4ba25a..12fedb7834c6 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt | |||
| @@ -79,13 +79,6 @@ Mount options | |||
| 79 | 79 | ||
| 80 | (*) == default. | 80 | (*) == default. |
| 81 | 81 | ||
| 82 | norm_unmount (*) commit on unmount; the journal is committed | ||
| 83 | when the file-system is unmounted so that the | ||
| 84 | next mount does not have to replay the journal | ||
| 85 | and it becomes very fast; | ||
| 86 | fast_unmount do not commit on unmount; this option makes | ||
| 87 | unmount faster, but the next mount slower | ||
| 88 | because of the need to replay the journal. | ||
| 89 | bulk_read read more in one go to take advantage of flash | 82 | bulk_read read more in one go to take advantage of flash |
| 90 | media that read faster sequentially | 83 | media that read faster sequentially |
| 91 | no_bulk_read (*) do not bulk-read | 84 | no_bulk_read (*) do not bulk-read |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 77b047475539..85040cfeb5e5 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
| @@ -650,6 +650,7 @@ ENTRY(fp_enter) | |||
| 650 | no_fp: mov pc, lr | 650 | no_fp: mov pc, lr |
| 651 | 651 | ||
| 652 | __und_usr_unknown: | 652 | __und_usr_unknown: |
| 653 | enable_irq | ||
| 653 | mov r0, sp | 654 | mov r0, sp |
| 654 | adr lr, ret_from_exception | 655 | adr lr, ret_from_exception |
| 655 | b do_undefinstr | 656 | b do_undefinstr |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 06269ea375c5..49a6ba926c2b 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
| @@ -136,7 +136,7 @@ ENTRY(mcount) | |||
| 136 | ldmia sp!, {r0-r3, pc} | 136 | ldmia sp!, {r0-r3, pc} |
| 137 | 137 | ||
| 138 | trace: | 138 | trace: |
| 139 | ldr r1, [fp, #-4] | 139 | ldr r1, [fp, #-4] @ lr of instrumented routine |
| 140 | mov r0, lr | 140 | mov r0, lr |
| 141 | sub r0, r0, #MCOUNT_INSN_SIZE | 141 | sub r0, r0, #MCOUNT_INSN_SIZE |
| 142 | mov lr, pc | 142 | mov lr, pc |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7141cee1fab7..363db186cb93 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -101,7 +101,7 @@ unlock: | |||
| 101 | /* Handle bad interrupts */ | 101 | /* Handle bad interrupts */ |
| 102 | static struct irq_desc bad_irq_desc = { | 102 | static struct irq_desc bad_irq_desc = { |
| 103 | .handle_irq = handle_bad_irq, | 103 | .handle_irq = handle_bad_irq, |
| 104 | .lock = SPIN_LOCK_UNLOCKED | 104 | .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | /* | 107 | /* |
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index c2a96e3965a6..e61967dde9a1 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
| 28 | #include <asm/mach/flash.h> | 28 | #include <asm/mach/flash.h> |
| 29 | 29 | ||
| 30 | #include <mach/irqs.h> | ||
| 30 | #include <mach/board.h> | 31 | #include <mach/board.h> |
| 31 | #include <mach/msm_iomap.h> | 32 | #include <mach/msm_iomap.h> |
| 32 | 33 | ||
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c index 77382d8b6b2f..ba5d7c08dc17 100644 --- a/arch/arm/mach-omap1/devices.c +++ b/arch/arm/mach-omap1/devices.c | |||
| @@ -181,7 +181,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
| 181 | } | 181 | } |
| 182 | size = OMAP1_MMC_SIZE; | 182 | size = OMAP1_MMC_SIZE; |
| 183 | 183 | ||
| 184 | omap_mmc_add(i, base, size, irq, mmc_data[i]); | 184 | omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]); |
| 185 | }; | 185 | }; |
| 186 | } | 186 | } |
| 187 | 187 | ||
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c index ca7a0cc1707c..575ba31295cf 100644 --- a/arch/arm/mach-omap1/mcbsp.c +++ b/arch/arm/mach-omap1/mcbsp.c | |||
| @@ -28,81 +28,8 @@ | |||
| 28 | #define DPS_RSTCT2_PER_EN (1 << 0) | 28 | #define DPS_RSTCT2_PER_EN (1 << 0) |
| 29 | #define DSP_RSTCT2_WD_PER_EN (1 << 1) | 29 | #define DSP_RSTCT2_WD_PER_EN (1 << 1) |
| 30 | 30 | ||
| 31 | struct mcbsp_internal_clk { | ||
| 32 | struct clk clk; | ||
| 33 | struct clk **childs; | ||
| 34 | int n_childs; | ||
| 35 | }; | ||
| 36 | |||
| 37 | #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) | 31 | #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) |
| 38 | static void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) | 32 | const char *clk_names[] = { "dsp_ck", "api_ck", "dspxor_ck" }; |
| 39 | { | ||
| 40 | const char *clk_names[] = { "dsp_ck", "api_ck", "dspxor_ck" }; | ||
| 41 | int i; | ||
| 42 | |||
| 43 | mclk->n_childs = ARRAY_SIZE(clk_names); | ||
| 44 | mclk->childs = kzalloc(mclk->n_childs * sizeof(struct clk *), | ||
| 45 | GFP_KERNEL); | ||
| 46 | |||
| 47 | for (i = 0; i < mclk->n_childs; i++) { | ||
| 48 | /* We fake a platform device to get correct device id */ | ||
| 49 | struct platform_device pdev; | ||
| 50 | |||
| 51 | pdev.dev.bus = &platform_bus_type; | ||
| 52 | pdev.id = mclk->clk.id; | ||
| 53 | mclk->childs[i] = clk_get(&pdev.dev, clk_names[i]); | ||
| 54 | if (IS_ERR(mclk->childs[i])) | ||
| 55 | printk(KERN_ERR "Could not get clock %s (%d).\n", | ||
| 56 | clk_names[i], mclk->clk.id); | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | static int omap_mcbsp_clk_enable(struct clk *clk) | ||
| 61 | { | ||
| 62 | struct mcbsp_internal_clk *mclk = container_of(clk, | ||
| 63 | struct mcbsp_internal_clk, clk); | ||
| 64 | int i; | ||
| 65 | |||
| 66 | for (i = 0; i < mclk->n_childs; i++) | ||
| 67 | clk_enable(mclk->childs[i]); | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | static void omap_mcbsp_clk_disable(struct clk *clk) | ||
| 72 | { | ||
| 73 | struct mcbsp_internal_clk *mclk = container_of(clk, | ||
| 74 | struct mcbsp_internal_clk, clk); | ||
| 75 | int i; | ||
| 76 | |||
| 77 | for (i = 0; i < mclk->n_childs; i++) | ||
| 78 | clk_disable(mclk->childs[i]); | ||
| 79 | } | ||
| 80 | |||
| 81 | static struct mcbsp_internal_clk omap_mcbsp_clks[] = { | ||
| 82 | { | ||
| 83 | .clk = { | ||
| 84 | .name = "mcbsp_clk", | ||
| 85 | .id = 1, | ||
| 86 | .enable = omap_mcbsp_clk_enable, | ||
| 87 | .disable = omap_mcbsp_clk_disable, | ||
| 88 | }, | ||
| 89 | }, | ||
| 90 | { | ||
| 91 | .clk = { | ||
| 92 | .name = "mcbsp_clk", | ||
| 93 | .id = 3, | ||
| 94 | .enable = omap_mcbsp_clk_enable, | ||
| 95 | .disable = omap_mcbsp_clk_disable, | ||
| 96 | }, | ||
| 97 | }, | ||
| 98 | }; | ||
| 99 | |||
| 100 | #define omap_mcbsp_clks_size ARRAY_SIZE(omap_mcbsp_clks) | ||
| 101 | #else | ||
| 102 | #define omap_mcbsp_clks_size 0 | ||
| 103 | static struct mcbsp_internal_clk __initdata *omap_mcbsp_clks; | ||
| 104 | static inline void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) | ||
| 105 | { } | ||
| 106 | #endif | 33 | #endif |
| 107 | 34 | ||
| 108 | static void omap1_mcbsp_request(unsigned int id) | 35 | static void omap1_mcbsp_request(unsigned int id) |
| @@ -167,8 +94,9 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | |||
| 167 | .rx_irq = INT_McBSP1RX, | 94 | .rx_irq = INT_McBSP1RX, |
| 168 | .tx_irq = INT_McBSP1TX, | 95 | .tx_irq = INT_McBSP1TX, |
| 169 | .ops = &omap1_mcbsp_ops, | 96 | .ops = &omap1_mcbsp_ops, |
| 170 | .clk_name = "mcbsp_clk", | 97 | .clk_names = clk_names, |
| 171 | }, | 98 | .num_clks = 3, |
| 99 | }, | ||
| 172 | { | 100 | { |
| 173 | .phys_base = OMAP1510_MCBSP2_BASE, | 101 | .phys_base = OMAP1510_MCBSP2_BASE, |
| 174 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, | 102 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, |
| @@ -184,7 +112,8 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | |||
| 184 | .rx_irq = INT_McBSP3RX, | 112 | .rx_irq = INT_McBSP3RX, |
| 185 | .tx_irq = INT_McBSP3TX, | 113 | .tx_irq = INT_McBSP3TX, |
| 186 | .ops = &omap1_mcbsp_ops, | 114 | .ops = &omap1_mcbsp_ops, |
| 187 | .clk_name = "mcbsp_clk", | 115 | .clk_names = clk_names, |
| 116 | .num_clks = 3, | ||
| 188 | }, | 117 | }, |
| 189 | }; | 118 | }; |
| 190 | #define OMAP15XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap15xx_mcbsp_pdata) | 119 | #define OMAP15XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap15xx_mcbsp_pdata) |
| @@ -202,7 +131,8 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | |||
| 202 | .rx_irq = INT_McBSP1RX, | 131 | .rx_irq = INT_McBSP1RX, |
| 203 | .tx_irq = INT_McBSP1TX, | 132 | .tx_irq = INT_McBSP1TX, |
| 204 | .ops = &omap1_mcbsp_ops, | 133 | .ops = &omap1_mcbsp_ops, |
| 205 | .clk_name = "mcbsp_clk", | 134 | .clk_names = clk_names, |
| 135 | .num_clks = 3, | ||
| 206 | }, | 136 | }, |
| 207 | { | 137 | { |
| 208 | .phys_base = OMAP1610_MCBSP2_BASE, | 138 | .phys_base = OMAP1610_MCBSP2_BASE, |
| @@ -219,7 +149,8 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | |||
| 219 | .rx_irq = INT_McBSP3RX, | 149 | .rx_irq = INT_McBSP3RX, |
| 220 | .tx_irq = INT_McBSP3TX, | 150 | .tx_irq = INT_McBSP3TX, |
| 221 | .ops = &omap1_mcbsp_ops, | 151 | .ops = &omap1_mcbsp_ops, |
| 222 | .clk_name = "mcbsp_clk", | 152 | .clk_names = clk_names, |
| 153 | .num_clks = 3, | ||
| 223 | }, | 154 | }, |
| 224 | }; | 155 | }; |
| 225 | #define OMAP16XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap16xx_mcbsp_pdata) | 156 | #define OMAP16XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap16xx_mcbsp_pdata) |
| @@ -230,15 +161,6 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | |||
| 230 | 161 | ||
| 231 | int __init omap1_mcbsp_init(void) | 162 | int __init omap1_mcbsp_init(void) |
| 232 | { | 163 | { |
| 233 | int i; | ||
| 234 | |||
| 235 | for (i = 0; i < omap_mcbsp_clks_size; i++) { | ||
| 236 | if (cpu_is_omap15xx() || cpu_is_omap16xx()) { | ||
| 237 | omap_mcbsp_clk_init(&omap_mcbsp_clks[i]); | ||
| 238 | clk_register(&omap_mcbsp_clks[i].clk); | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | if (cpu_is_omap730()) | 164 | if (cpu_is_omap730()) |
| 243 | omap_mcbsp_count = OMAP730_MCBSP_PDATA_SZ; | 165 | omap_mcbsp_count = OMAP730_MCBSP_PDATA_SZ; |
| 244 | if (cpu_is_omap15xx()) | 166 | if (cpu_is_omap15xx()) |
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 9d7216ff6c9f..ce03fa750775 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c | |||
| @@ -421,6 +421,7 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
| 421 | int nr_controllers) | 421 | int nr_controllers) |
| 422 | { | 422 | { |
| 423 | int i; | 423 | int i; |
| 424 | char *name; | ||
| 424 | 425 | ||
| 425 | for (i = 0; i < nr_controllers; i++) { | 426 | for (i = 0; i < nr_controllers; i++) { |
| 426 | unsigned long base, size; | 427 | unsigned long base, size; |
| @@ -450,12 +451,14 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
| 450 | continue; | 451 | continue; |
| 451 | } | 452 | } |
| 452 | 453 | ||
| 453 | if (cpu_is_omap2420()) | 454 | if (cpu_is_omap2420()) { |
| 454 | size = OMAP2420_MMC_SIZE; | 455 | size = OMAP2420_MMC_SIZE; |
| 455 | else | 456 | name = "mmci-omap"; |
| 457 | } else { | ||
| 456 | size = HSMMC_SIZE; | 458 | size = HSMMC_SIZE; |
| 457 | 459 | name = "mmci-omap-hs"; | |
| 458 | omap_mmc_add(i, base, size, irq, mmc_data[i]); | 460 | } |
| 461 | omap_mmc_add(name, i, base, size, irq, mmc_data[i]); | ||
| 459 | }; | 462 | }; |
| 460 | } | 463 | } |
| 461 | 464 | ||
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index b0f8e7d62798..b52a02fc7cd6 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
| @@ -172,9 +172,13 @@ void __init omap34xx_check_revision(void) | |||
| 172 | omap_revision = OMAP3430_REV_ES3_0; | 172 | omap_revision = OMAP3430_REV_ES3_0; |
| 173 | rev_name = "ES3.0"; | 173 | rev_name = "ES3.0"; |
| 174 | break; | 174 | break; |
| 175 | case 4: | ||
| 176 | omap_revision = OMAP3430_REV_ES3_1; | ||
| 177 | rev_name = "ES3.1"; | ||
| 178 | break; | ||
| 175 | default: | 179 | default: |
| 176 | /* Use the latest known revision as default */ | 180 | /* Use the latest known revision as default */ |
| 177 | omap_revision = OMAP3430_REV_ES3_0; | 181 | omap_revision = OMAP3430_REV_ES3_1; |
| 178 | rev_name = "Unknown revision\n"; | 182 | rev_name = "Unknown revision\n"; |
| 179 | } | 183 | } |
| 180 | } | 184 | } |
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index 636e2821af7d..9ba20d985dda 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c | |||
| @@ -134,6 +134,7 @@ static struct irq_chip omap_irq_chip = { | |||
| 134 | .ack = omap_mask_ack_irq, | 134 | .ack = omap_mask_ack_irq, |
| 135 | .mask = omap_mask_irq, | 135 | .mask = omap_mask_irq, |
| 136 | .unmask = omap_unmask_irq, | 136 | .unmask = omap_unmask_irq, |
| 137 | .disable = omap_mask_irq, | ||
| 137 | }; | 138 | }; |
| 138 | 139 | ||
| 139 | static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) | 140 | static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index e20023c9d15d..a9e631fc1134 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
| @@ -24,106 +24,7 @@ | |||
| 24 | #include <mach/cpu.h> | 24 | #include <mach/cpu.h> |
| 25 | #include <mach/mcbsp.h> | 25 | #include <mach/mcbsp.h> |
| 26 | 26 | ||
| 27 | struct mcbsp_internal_clk { | 27 | const char *clk_names[] = { "mcbsp_ick", "mcbsp_fck" }; |
| 28 | struct clk clk; | ||
| 29 | struct clk **childs; | ||
| 30 | int n_childs; | ||
| 31 | }; | ||
| 32 | |||
| 33 | #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) | ||
| 34 | static void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) | ||
| 35 | { | ||
| 36 | const char *clk_names[] = { "mcbsp_ick", "mcbsp_fck" }; | ||
| 37 | int i; | ||
| 38 | |||
| 39 | mclk->n_childs = ARRAY_SIZE(clk_names); | ||
| 40 | mclk->childs = kzalloc(mclk->n_childs * sizeof(struct clk *), | ||
| 41 | GFP_KERNEL); | ||
| 42 | |||
| 43 | for (i = 0; i < mclk->n_childs; i++) { | ||
| 44 | /* We fake a platform device to get correct device id */ | ||
| 45 | struct platform_device pdev; | ||
| 46 | |||
| 47 | pdev.dev.bus = &platform_bus_type; | ||
| 48 | pdev.id = mclk->clk.id; | ||
| 49 | mclk->childs[i] = clk_get(&pdev.dev, clk_names[i]); | ||
| 50 | if (IS_ERR(mclk->childs[i])) | ||
| 51 | printk(KERN_ERR "Could not get clock %s (%d).\n", | ||
| 52 | clk_names[i], mclk->clk.id); | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | static int omap_mcbsp_clk_enable(struct clk *clk) | ||
| 57 | { | ||
| 58 | struct mcbsp_internal_clk *mclk = container_of(clk, | ||
| 59 | struct mcbsp_internal_clk, clk); | ||
| 60 | int i; | ||
| 61 | |||
| 62 | for (i = 0; i < mclk->n_childs; i++) | ||
| 63 | clk_enable(mclk->childs[i]); | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static void omap_mcbsp_clk_disable(struct clk *clk) | ||
| 68 | { | ||
| 69 | struct mcbsp_internal_clk *mclk = container_of(clk, | ||
| 70 | struct mcbsp_internal_clk, clk); | ||
| 71 | int i; | ||
| 72 | |||
| 73 | for (i = 0; i < mclk->n_childs; i++) | ||
| 74 | clk_disable(mclk->childs[i]); | ||
| 75 | } | ||
| 76 | |||
| 77 | static struct mcbsp_internal_clk omap_mcbsp_clks[] = { | ||
| 78 | { | ||
| 79 | .clk = { | ||
| 80 | .name = "mcbsp_clk", | ||
| 81 | .id = 1, | ||
| 82 | .enable = omap_mcbsp_clk_enable, | ||
| 83 | .disable = omap_mcbsp_clk_disable, | ||
| 84 | }, | ||
| 85 | }, | ||
| 86 | { | ||
| 87 | .clk = { | ||
| 88 | .name = "mcbsp_clk", | ||
| 89 | .id = 2, | ||
| 90 | .enable = omap_mcbsp_clk_enable, | ||
| 91 | .disable = omap_mcbsp_clk_disable, | ||
| 92 | }, | ||
| 93 | }, | ||
| 94 | { | ||
| 95 | .clk = { | ||
| 96 | .name = "mcbsp_clk", | ||
| 97 | .id = 3, | ||
| 98 | .enable = omap_mcbsp_clk_enable, | ||
| 99 | .disable = omap_mcbsp_clk_disable, | ||
| 100 | }, | ||
| 101 | }, | ||
| 102 | { | ||
| 103 | .clk = { | ||
| 104 | .name = "mcbsp_clk", | ||
| 105 | .id = 4, | ||
| 106 | .enable = omap_mcbsp_clk_enable, | ||
| 107 | .disable = omap_mcbsp_clk_disable, | ||
| 108 | }, | ||
| 109 | }, | ||
| 110 | { | ||
| 111 | .clk = { | ||
| 112 | .name = "mcbsp_clk", | ||
| 113 | .id = 5, | ||
| 114 | .enable = omap_mcbsp_clk_enable, | ||
| 115 | .disable = omap_mcbsp_clk_disable, | ||
| 116 | }, | ||
| 117 | }, | ||
| 118 | }; | ||
| 119 | |||
| 120 | #define omap_mcbsp_clks_size ARRAY_SIZE(omap_mcbsp_clks) | ||
| 121 | #else | ||
| 122 | #define omap_mcbsp_clks_size 0 | ||
| 123 | static struct mcbsp_internal_clk __initdata *omap_mcbsp_clks; | ||
| 124 | static inline void omap_mcbsp_clk_init(struct clk *clk) | ||
| 125 | { } | ||
| 126 | #endif | ||
| 127 | 28 | ||
| 128 | static void omap2_mcbsp2_mux_setup(void) | 29 | static void omap2_mcbsp2_mux_setup(void) |
| 129 | { | 30 | { |
| @@ -156,7 +57,8 @@ static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = { | |||
| 156 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 57 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
| 157 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 58 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
| 158 | .ops = &omap2_mcbsp_ops, | 59 | .ops = &omap2_mcbsp_ops, |
| 159 | .clk_name = "mcbsp_clk", | 60 | .clk_names = clk_names, |
| 61 | .num_clks = 2, | ||
| 160 | }, | 62 | }, |
| 161 | { | 63 | { |
| 162 | .phys_base = OMAP24XX_MCBSP2_BASE, | 64 | .phys_base = OMAP24XX_MCBSP2_BASE, |
| @@ -165,7 +67,8 @@ static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = { | |||
| 165 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 67 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
| 166 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 68 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
| 167 | .ops = &omap2_mcbsp_ops, | 69 | .ops = &omap2_mcbsp_ops, |
| 168 | .clk_name = "mcbsp_clk", | 70 | .clk_names = clk_names, |
| 71 | .num_clks = 2, | ||
| 169 | }, | 72 | }, |
| 170 | }; | 73 | }; |
| 171 | #define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata) | 74 | #define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata) |
| @@ -183,7 +86,8 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { | |||
| 183 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 86 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
| 184 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 87 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
| 185 | .ops = &omap2_mcbsp_ops, | 88 | .ops = &omap2_mcbsp_ops, |
| 186 | .clk_name = "mcbsp_clk", | 89 | .clk_names = clk_names, |
| 90 | .num_clks = 2, | ||
| 187 | }, | 91 | }, |
| 188 | { | 92 | { |
| 189 | .phys_base = OMAP24XX_MCBSP2_BASE, | 93 | .phys_base = OMAP24XX_MCBSP2_BASE, |
| @@ -192,7 +96,8 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { | |||
| 192 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 96 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
| 193 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 97 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
| 194 | .ops = &omap2_mcbsp_ops, | 98 | .ops = &omap2_mcbsp_ops, |
| 195 | .clk_name = "mcbsp_clk", | 99 | .clk_names = clk_names, |
| 100 | .num_clks = 2, | ||
| 196 | }, | 101 | }, |
| 197 | { | 102 | { |
| 198 | .phys_base = OMAP2430_MCBSP3_BASE, | 103 | .phys_base = OMAP2430_MCBSP3_BASE, |
| @@ -201,7 +106,8 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { | |||
| 201 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, | 106 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, |
| 202 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, | 107 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, |
| 203 | .ops = &omap2_mcbsp_ops, | 108 | .ops = &omap2_mcbsp_ops, |
| 204 | .clk_name = "mcbsp_clk", | 109 | .clk_names = clk_names, |
| 110 | .num_clks = 2, | ||
| 205 | }, | 111 | }, |
| 206 | { | 112 | { |
| 207 | .phys_base = OMAP2430_MCBSP4_BASE, | 113 | .phys_base = OMAP2430_MCBSP4_BASE, |
| @@ -210,7 +116,8 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { | |||
| 210 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, | 116 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, |
| 211 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, | 117 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, |
| 212 | .ops = &omap2_mcbsp_ops, | 118 | .ops = &omap2_mcbsp_ops, |
| 213 | .clk_name = "mcbsp_clk", | 119 | .clk_names = clk_names, |
| 120 | .num_clks = 2, | ||
| 214 | }, | 121 | }, |
| 215 | { | 122 | { |
| 216 | .phys_base = OMAP2430_MCBSP5_BASE, | 123 | .phys_base = OMAP2430_MCBSP5_BASE, |
| @@ -219,7 +126,8 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { | |||
| 219 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, | 126 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, |
| 220 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, | 127 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, |
| 221 | .ops = &omap2_mcbsp_ops, | 128 | .ops = &omap2_mcbsp_ops, |
| 222 | .clk_name = "mcbsp_clk", | 129 | .clk_names = clk_names, |
| 130 | .num_clks = 2, | ||
| 223 | }, | 131 | }, |
| 224 | }; | 132 | }; |
| 225 | #define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata) | 133 | #define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata) |
| @@ -237,7 +145,8 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 237 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 145 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
| 238 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 146 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
| 239 | .ops = &omap2_mcbsp_ops, | 147 | .ops = &omap2_mcbsp_ops, |
| 240 | .clk_name = "mcbsp_clk", | 148 | .clk_names = clk_names, |
| 149 | .num_clks = 2, | ||
| 241 | }, | 150 | }, |
| 242 | { | 151 | { |
| 243 | .phys_base = OMAP34XX_MCBSP2_BASE, | 152 | .phys_base = OMAP34XX_MCBSP2_BASE, |
| @@ -246,7 +155,8 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 246 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 155 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
| 247 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 156 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
| 248 | .ops = &omap2_mcbsp_ops, | 157 | .ops = &omap2_mcbsp_ops, |
| 249 | .clk_name = "mcbsp_clk", | 158 | .clk_names = clk_names, |
| 159 | .num_clks = 2, | ||
| 250 | }, | 160 | }, |
| 251 | { | 161 | { |
| 252 | .phys_base = OMAP34XX_MCBSP3_BASE, | 162 | .phys_base = OMAP34XX_MCBSP3_BASE, |
| @@ -255,7 +165,8 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 255 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, | 165 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, |
| 256 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, | 166 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, |
| 257 | .ops = &omap2_mcbsp_ops, | 167 | .ops = &omap2_mcbsp_ops, |
| 258 | .clk_name = "mcbsp_clk", | 168 | .clk_names = clk_names, |
| 169 | .num_clks = 2, | ||
| 259 | }, | 170 | }, |
| 260 | { | 171 | { |
| 261 | .phys_base = OMAP34XX_MCBSP4_BASE, | 172 | .phys_base = OMAP34XX_MCBSP4_BASE, |
| @@ -264,7 +175,8 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 264 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, | 175 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, |
| 265 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, | 176 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, |
| 266 | .ops = &omap2_mcbsp_ops, | 177 | .ops = &omap2_mcbsp_ops, |
| 267 | .clk_name = "mcbsp_clk", | 178 | .clk_names = clk_names, |
| 179 | .num_clks = 2, | ||
| 268 | }, | 180 | }, |
| 269 | { | 181 | { |
| 270 | .phys_base = OMAP34XX_MCBSP5_BASE, | 182 | .phys_base = OMAP34XX_MCBSP5_BASE, |
| @@ -273,7 +185,8 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 273 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, | 185 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, |
| 274 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, | 186 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, |
| 275 | .ops = &omap2_mcbsp_ops, | 187 | .ops = &omap2_mcbsp_ops, |
| 276 | .clk_name = "mcbsp_clk", | 188 | .clk_names = clk_names, |
| 189 | .num_clks = 2, | ||
| 277 | }, | 190 | }, |
| 278 | }; | 191 | }; |
| 279 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) | 192 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) |
| @@ -284,14 +197,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 284 | 197 | ||
| 285 | static int __init omap2_mcbsp_init(void) | 198 | static int __init omap2_mcbsp_init(void) |
| 286 | { | 199 | { |
| 287 | int i; | ||
| 288 | |||
| 289 | for (i = 0; i < omap_mcbsp_clks_size; i++) { | ||
| 290 | /* Once we call clk_get inside init, we do not register it */ | ||
| 291 | omap_mcbsp_clk_init(&omap_mcbsp_clks[i]); | ||
| 292 | clk_register(&omap_mcbsp_clks[i].clk); | ||
| 293 | } | ||
| 294 | |||
| 295 | if (cpu_is_omap2420()) | 200 | if (cpu_is_omap2420()) |
| 296 | omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ; | 201 | omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ; |
| 297 | if (cpu_is_omap2430()) | 202 | if (cpu_is_omap2430()) |
diff --git a/arch/arm/mach-omap2/sleep24xx.S b/arch/arm/mach-omap2/sleep24xx.S index 43336b93b21c..bf9e96105e11 100644 --- a/arch/arm/mach-omap2/sleep24xx.S +++ b/arch/arm/mach-omap2/sleep24xx.S | |||
| @@ -93,9 +93,8 @@ ENTRY(omap24xx_cpu_suspend) | |||
| 93 | orr r4, r4, #0x40 @ enable self refresh on idle req | 93 | orr r4, r4, #0x40 @ enable self refresh on idle req |
| 94 | mov r5, #0x2000 @ set delay (DPLL relock + DLL relock) | 94 | mov r5, #0x2000 @ set delay (DPLL relock + DLL relock) |
| 95 | str r4, [r2] @ make it so | 95 | str r4, [r2] @ make it so |
| 96 | mov r2, #0 | ||
| 97 | nop | 96 | nop |
| 98 | mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt | 97 | mcr p15, 0, r3, c7, c0, 4 @ wait for interrupt |
| 99 | nop | 98 | nop |
| 100 | loop: | 99 | loop: |
| 101 | subs r5, r5, #0x1 @ awake, wait just a bit | 100 | subs r5, r5, #0x1 @ awake, wait just a bit |
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index ae6036300f60..9fc13a2cc3f4 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c | |||
| @@ -118,7 +118,8 @@ static void __init omap2_gp_clockevent_init(void) | |||
| 118 | clockevent_gpt.max_delta_ns = | 118 | clockevent_gpt.max_delta_ns = |
| 119 | clockevent_delta2ns(0xffffffff, &clockevent_gpt); | 119 | clockevent_delta2ns(0xffffffff, &clockevent_gpt); |
| 120 | clockevent_gpt.min_delta_ns = | 120 | clockevent_gpt.min_delta_ns = |
| 121 | clockevent_delta2ns(1, &clockevent_gpt); | 121 | clockevent_delta2ns(3, &clockevent_gpt); |
| 122 | /* Timer internal resynch latency. */ | ||
| 122 | 123 | ||
| 123 | clockevent_gpt.cpumask = cpumask_of(0); | 124 | clockevent_gpt.cpumask = cpumask_of(0); |
| 124 | clockevents_register_device(&clockevent_gpt); | 125 | clockevents_register_device(&clockevent_gpt); |
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index c1fbd5b5f9c4..23cfdd593954 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
| @@ -289,7 +289,7 @@ static struct platform_device sa11x0pcmcia_device = { | |||
| 289 | }; | 289 | }; |
| 290 | 290 | ||
| 291 | static struct platform_device sa11x0mtd_device = { | 291 | static struct platform_device sa11x0mtd_device = { |
| 292 | .name = "flash", | 292 | .name = "sa1100-mtd", |
| 293 | .id = -1, | 293 | .id = -1, |
| 294 | }; | 294 | }; |
| 295 | 295 | ||
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 81d0b8772de3..bc0099d5ae85 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
| @@ -66,7 +66,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
| 66 | * fault (ie, is old), we can safely ignore any issues. | 66 | * fault (ie, is old), we can safely ignore any issues. |
| 67 | */ | 67 | */ |
| 68 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 68 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
| 69 | flush_cache_page(vma, address, pte_pfn(entry)); | 69 | unsigned long pfn = pte_pfn(entry); |
| 70 | flush_cache_page(vma, address, pfn); | ||
| 71 | outer_flush_range((pfn << PAGE_SHIFT), | ||
| 72 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | ||
| 70 | pte_val(entry) &= ~L_PTE_MT_MASK; | 73 | pte_val(entry) &= ~L_PTE_MT_MASK; |
| 71 | pte_val(entry) |= shared_pte_mask; | 74 | pte_val(entry) |= shared_pte_mask; |
| 72 | set_pte_at(vma->vm_mm, address, pte, entry); | 75 | set_pte_at(vma->vm_mm, address, pte, entry); |
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c index ac15c23fd5da..208dbb121f47 100644 --- a/arch/arm/plat-omap/devices.c +++ b/arch/arm/plat-omap/devices.c | |||
| @@ -200,14 +200,15 @@ void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, | |||
| 200 | /* | 200 | /* |
| 201 | * Register MMC devices. Called from mach-omap1 and mach-omap2 device init. | 201 | * Register MMC devices. Called from mach-omap1 and mach-omap2 device init. |
| 202 | */ | 202 | */ |
| 203 | int __init omap_mmc_add(int id, unsigned long base, unsigned long size, | 203 | int __init omap_mmc_add(const char *name, int id, unsigned long base, |
| 204 | unsigned int irq, struct omap_mmc_platform_data *data) | 204 | unsigned long size, unsigned int irq, |
| 205 | struct omap_mmc_platform_data *data) | ||
| 205 | { | 206 | { |
| 206 | struct platform_device *pdev; | 207 | struct platform_device *pdev; |
| 207 | struct resource res[OMAP_MMC_NR_RES]; | 208 | struct resource res[OMAP_MMC_NR_RES]; |
| 208 | int ret; | 209 | int ret; |
| 209 | 210 | ||
| 210 | pdev = platform_device_alloc("mmci-omap", id); | 211 | pdev = platform_device_alloc(name, id); |
| 211 | if (!pdev) | 212 | if (!pdev) |
| 212 | return -ENOMEM; | 213 | return -ENOMEM; |
| 213 | 214 | ||
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index e77373c39f8c..47ec77af4ccb 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
| @@ -709,6 +709,7 @@ int omap_request_dma(int dev_id, const char *dev_name, | |||
| 709 | chan->dev_name = dev_name; | 709 | chan->dev_name = dev_name; |
| 710 | chan->callback = callback; | 710 | chan->callback = callback; |
| 711 | chan->data = data; | 711 | chan->data = data; |
| 712 | chan->flags = 0; | ||
| 712 | 713 | ||
| 713 | #ifndef CONFIG_ARCH_OMAP1 | 714 | #ifndef CONFIG_ARCH_OMAP1 |
| 714 | if (cpu_class_is_omap2()) { | 715 | if (cpu_class_is_omap2()) { |
| @@ -1888,11 +1889,11 @@ static int omap2_dma_handle_ch(int ch) | |||
| 1888 | status = dma_read(CSR(ch)); | 1889 | status = dma_read(CSR(ch)); |
| 1889 | } | 1890 | } |
| 1890 | 1891 | ||
| 1892 | dma_write(status, CSR(ch)); | ||
| 1893 | |||
| 1891 | if (likely(dma_chan[ch].callback != NULL)) | 1894 | if (likely(dma_chan[ch].callback != NULL)) |
| 1892 | dma_chan[ch].callback(ch, status, dma_chan[ch].data); | 1895 | dma_chan[ch].callback(ch, status, dma_chan[ch].data); |
| 1893 | 1896 | ||
| 1894 | dma_write(status, CSR(ch)); | ||
| 1895 | |||
| 1896 | return 0; | 1897 | return 0; |
| 1897 | } | 1898 | } |
| 1898 | 1899 | ||
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h index b2062f1175de..a8e1178a9468 100644 --- a/arch/arm/plat-omap/include/mach/cpu.h +++ b/arch/arm/plat-omap/include/mach/cpu.h | |||
| @@ -339,6 +339,7 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
| 339 | #define OMAP3430_REV_ES2_0 0x34301034 | 339 | #define OMAP3430_REV_ES2_0 0x34301034 |
| 340 | #define OMAP3430_REV_ES2_1 0x34302034 | 340 | #define OMAP3430_REV_ES2_1 0x34302034 |
| 341 | #define OMAP3430_REV_ES3_0 0x34303034 | 341 | #define OMAP3430_REV_ES3_0 0x34303034 |
| 342 | #define OMAP3430_REV_ES3_1 0x34304034 | ||
| 342 | 343 | ||
| 343 | /* | 344 | /* |
| 344 | * omap_chip bits | 345 | * omap_chip bits |
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index eef873db3d48..113c2466c86a 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h | |||
| @@ -344,7 +344,8 @@ struct omap_mcbsp_platform_data { | |||
| 344 | u8 dma_rx_sync, dma_tx_sync; | 344 | u8 dma_rx_sync, dma_tx_sync; |
| 345 | u16 rx_irq, tx_irq; | 345 | u16 rx_irq, tx_irq; |
| 346 | struct omap_mcbsp_ops *ops; | 346 | struct omap_mcbsp_ops *ops; |
| 347 | char const *clk_name; | 347 | char const **clk_names; |
| 348 | int num_clks; | ||
| 348 | }; | 349 | }; |
| 349 | 350 | ||
| 350 | struct omap_mcbsp { | 351 | struct omap_mcbsp { |
| @@ -376,7 +377,8 @@ struct omap_mcbsp { | |||
| 376 | /* Protect the field .free, while checking if the mcbsp is in use */ | 377 | /* Protect the field .free, while checking if the mcbsp is in use */ |
| 377 | spinlock_t lock; | 378 | spinlock_t lock; |
| 378 | struct omap_mcbsp_platform_data *pdata; | 379 | struct omap_mcbsp_platform_data *pdata; |
| 379 | struct clk *clk; | 380 | struct clk **clks; |
| 381 | int num_clks; | ||
| 380 | }; | 382 | }; |
| 381 | extern struct omap_mcbsp **mcbsp_ptr; | 383 | extern struct omap_mcbsp **mcbsp_ptr; |
| 382 | extern int omap_mcbsp_count; | 384 | extern int omap_mcbsp_count; |
diff --git a/arch/arm/plat-omap/include/mach/mmc.h b/arch/arm/plat-omap/include/mach/mmc.h index 031250f02805..73a9e15031b1 100644 --- a/arch/arm/plat-omap/include/mach/mmc.h +++ b/arch/arm/plat-omap/include/mach/mmc.h | |||
| @@ -115,8 +115,9 @@ void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
| 115 | int nr_controllers); | 115 | int nr_controllers); |
| 116 | void omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, | 116 | void omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, |
| 117 | int nr_controllers); | 117 | int nr_controllers); |
| 118 | int omap_mmc_add(int id, unsigned long base, unsigned long size, | 118 | int omap_mmc_add(const char *name, int id, unsigned long base, |
| 119 | unsigned int irq, struct omap_mmc_platform_data *data); | 119 | unsigned long size, unsigned int irq, |
| 120 | struct omap_mmc_platform_data *data); | ||
| 120 | #else | 121 | #else |
| 121 | static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, | 122 | static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, |
| 122 | int nr_controllers) | 123 | int nr_controllers) |
| @@ -126,8 +127,9 @@ static inline void omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
| 126 | int nr_controllers) | 127 | int nr_controllers) |
| 127 | { | 128 | { |
| 128 | } | 129 | } |
| 129 | static inline int omap_mmc_add(int id, unsigned long base, unsigned long size, | 130 | static inline int omap_mmc_add(const char *name, int id, unsigned long base, |
| 130 | unsigned int irq, struct omap_mmc_platform_data *data) | 131 | unsigned long size, unsigned int irq, |
| 132 | struct omap_mmc_platform_data *data) | ||
| 131 | { | 133 | { |
| 132 | return 0; | 134 | return 0; |
| 133 | } | 135 | } |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index f2401a831f99..e5842e30e534 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
| @@ -214,6 +214,7 @@ EXPORT_SYMBOL(omap_mcbsp_set_io_type); | |||
| 214 | int omap_mcbsp_request(unsigned int id) | 214 | int omap_mcbsp_request(unsigned int id) |
| 215 | { | 215 | { |
| 216 | struct omap_mcbsp *mcbsp; | 216 | struct omap_mcbsp *mcbsp; |
| 217 | int i; | ||
| 217 | int err; | 218 | int err; |
| 218 | 219 | ||
| 219 | if (!omap_mcbsp_check_valid_id(id)) { | 220 | if (!omap_mcbsp_check_valid_id(id)) { |
| @@ -225,7 +226,8 @@ int omap_mcbsp_request(unsigned int id) | |||
| 225 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->request) | 226 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->request) |
| 226 | mcbsp->pdata->ops->request(id); | 227 | mcbsp->pdata->ops->request(id); |
| 227 | 228 | ||
| 228 | clk_enable(mcbsp->clk); | 229 | for (i = 0; i < mcbsp->num_clks; i++) |
| 230 | clk_enable(mcbsp->clks[i]); | ||
| 229 | 231 | ||
| 230 | spin_lock(&mcbsp->lock); | 232 | spin_lock(&mcbsp->lock); |
| 231 | if (!mcbsp->free) { | 233 | if (!mcbsp->free) { |
| @@ -276,6 +278,7 @@ EXPORT_SYMBOL(omap_mcbsp_request); | |||
| 276 | void omap_mcbsp_free(unsigned int id) | 278 | void omap_mcbsp_free(unsigned int id) |
| 277 | { | 279 | { |
| 278 | struct omap_mcbsp *mcbsp; | 280 | struct omap_mcbsp *mcbsp; |
| 281 | int i; | ||
| 279 | 282 | ||
| 280 | if (!omap_mcbsp_check_valid_id(id)) { | 283 | if (!omap_mcbsp_check_valid_id(id)) { |
| 281 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | 284 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); |
| @@ -286,7 +289,8 @@ void omap_mcbsp_free(unsigned int id) | |||
| 286 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) | 289 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) |
| 287 | mcbsp->pdata->ops->free(id); | 290 | mcbsp->pdata->ops->free(id); |
| 288 | 291 | ||
| 289 | clk_disable(mcbsp->clk); | 292 | for (i = mcbsp->num_clks - 1; i >= 0; i--) |
| 293 | clk_disable(mcbsp->clks[i]); | ||
| 290 | 294 | ||
| 291 | spin_lock(&mcbsp->lock); | 295 | spin_lock(&mcbsp->lock); |
| 292 | if (mcbsp->free) { | 296 | if (mcbsp->free) { |
| @@ -872,6 +876,7 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
| 872 | struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data; | 876 | struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data; |
| 873 | struct omap_mcbsp *mcbsp; | 877 | struct omap_mcbsp *mcbsp; |
| 874 | int id = pdev->id - 1; | 878 | int id = pdev->id - 1; |
| 879 | int i; | ||
| 875 | int ret = 0; | 880 | int ret = 0; |
| 876 | 881 | ||
| 877 | if (!pdata) { | 882 | if (!pdata) { |
| @@ -916,14 +921,25 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
| 916 | mcbsp->dma_rx_sync = pdata->dma_rx_sync; | 921 | mcbsp->dma_rx_sync = pdata->dma_rx_sync; |
| 917 | mcbsp->dma_tx_sync = pdata->dma_tx_sync; | 922 | mcbsp->dma_tx_sync = pdata->dma_tx_sync; |
| 918 | 923 | ||
| 919 | if (pdata->clk_name) | 924 | if (pdata->num_clks) { |
| 920 | mcbsp->clk = clk_get(&pdev->dev, pdata->clk_name); | 925 | mcbsp->num_clks = pdata->num_clks; |
| 921 | if (IS_ERR(mcbsp->clk)) { | 926 | mcbsp->clks = kzalloc(mcbsp->num_clks * sizeof(struct clk *), |
| 922 | dev_err(&pdev->dev, | 927 | GFP_KERNEL); |
| 923 | "Invalid clock configuration for McBSP%d.\n", | 928 | if (!mcbsp->clks) { |
| 924 | mcbsp->id); | 929 | ret = -ENOMEM; |
| 925 | ret = PTR_ERR(mcbsp->clk); | 930 | goto exit; |
| 926 | goto err_clk; | 931 | } |
| 932 | for (i = 0; i < mcbsp->num_clks; i++) { | ||
| 933 | mcbsp->clks[i] = clk_get(&pdev->dev, pdata->clk_names[i]); | ||
| 934 | if (IS_ERR(mcbsp->clks[i])) { | ||
| 935 | dev_err(&pdev->dev, | ||
| 936 | "Invalid %s configuration for McBSP%d.\n", | ||
| 937 | pdata->clk_names[i], mcbsp->id); | ||
| 938 | ret = PTR_ERR(mcbsp->clks[i]); | ||
| 939 | goto err_clk; | ||
| 940 | } | ||
| 941 | } | ||
| 942 | |||
| 927 | } | 943 | } |
| 928 | 944 | ||
| 929 | mcbsp->pdata = pdata; | 945 | mcbsp->pdata = pdata; |
| @@ -932,6 +948,9 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
| 932 | return 0; | 948 | return 0; |
| 933 | 949 | ||
| 934 | err_clk: | 950 | err_clk: |
| 951 | while (i--) | ||
| 952 | clk_put(mcbsp->clks[i]); | ||
| 953 | kfree(mcbsp->clks); | ||
| 935 | iounmap(mcbsp->io_base); | 954 | iounmap(mcbsp->io_base); |
| 936 | err_ioremap: | 955 | err_ioremap: |
| 937 | mcbsp->free = 0; | 956 | mcbsp->free = 0; |
| @@ -942,6 +961,7 @@ exit: | |||
| 942 | static int __devexit omap_mcbsp_remove(struct platform_device *pdev) | 961 | static int __devexit omap_mcbsp_remove(struct platform_device *pdev) |
| 943 | { | 962 | { |
| 944 | struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); | 963 | struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); |
| 964 | int i; | ||
| 945 | 965 | ||
| 946 | platform_set_drvdata(pdev, NULL); | 966 | platform_set_drvdata(pdev, NULL); |
| 947 | if (mcbsp) { | 967 | if (mcbsp) { |
| @@ -950,12 +970,18 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev) | |||
| 950 | mcbsp->pdata->ops->free) | 970 | mcbsp->pdata->ops->free) |
| 951 | mcbsp->pdata->ops->free(mcbsp->id); | 971 | mcbsp->pdata->ops->free(mcbsp->id); |
| 952 | 972 | ||
| 953 | clk_disable(mcbsp->clk); | 973 | for (i = mcbsp->num_clks - 1; i >= 0; i--) { |
| 954 | clk_put(mcbsp->clk); | 974 | clk_disable(mcbsp->clks[i]); |
| 975 | clk_put(mcbsp->clks[i]); | ||
| 976 | } | ||
| 955 | 977 | ||
| 956 | iounmap(mcbsp->io_base); | 978 | iounmap(mcbsp->io_base); |
| 957 | 979 | ||
| 958 | mcbsp->clk = NULL; | 980 | if (mcbsp->num_clks) { |
| 981 | kfree(mcbsp->clks); | ||
| 982 | mcbsp->clks = NULL; | ||
| 983 | mcbsp->num_clks = 0; | ||
| 984 | } | ||
| 959 | mcbsp->free = 0; | 985 | mcbsp->free = 0; |
| 960 | mcbsp->dev = NULL; | 986 | mcbsp->dev = NULL; |
| 961 | } | 987 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 77bba4c083cb..a603bbf9b1b7 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -61,9 +61,14 @@ | |||
| 61 | #define EM_MSG_LED_VALUE_ON 0x00010000 | 61 | #define EM_MSG_LED_VALUE_ON 0x00010000 |
| 62 | 62 | ||
| 63 | static int ahci_skip_host_reset; | 63 | static int ahci_skip_host_reset; |
| 64 | static int ahci_ignore_sss; | ||
| 65 | |||
| 64 | module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); | 66 | module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); |
| 65 | MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); | 67 | MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); |
| 66 | 68 | ||
| 69 | module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); | ||
| 70 | MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); | ||
| 71 | |||
| 67 | static int ahci_enable_alpm(struct ata_port *ap, | 72 | static int ahci_enable_alpm(struct ata_port *ap, |
| 68 | enum link_pm policy); | 73 | enum link_pm policy); |
| 69 | static void ahci_disable_alpm(struct ata_port *ap); | 74 | static void ahci_disable_alpm(struct ata_port *ap); |
| @@ -2692,8 +2697,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2692 | host->iomap = pcim_iomap_table(pdev); | 2697 | host->iomap = pcim_iomap_table(pdev); |
| 2693 | host->private_data = hpriv; | 2698 | host->private_data = hpriv; |
| 2694 | 2699 | ||
| 2695 | if (!(hpriv->cap & HOST_CAP_SSS)) | 2700 | if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) |
| 2696 | host->flags |= ATA_HOST_PARALLEL_SCAN; | 2701 | host->flags |= ATA_HOST_PARALLEL_SCAN; |
| 2702 | else | ||
| 2703 | printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); | ||
| 2697 | 2704 | ||
| 2698 | if (pi.flags & ATA_FLAG_EM) | 2705 | if (pi.flags & ATA_FLAG_EM) |
| 2699 | ahci_reset_em(host); | 2706 | ahci_reset_em(host); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 88c242856dae..9fbf0595f3d4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -164,6 +164,11 @@ MODULE_LICENSE("GPL"); | |||
| 164 | MODULE_VERSION(DRV_VERSION); | 164 | MODULE_VERSION(DRV_VERSION); |
| 165 | 165 | ||
| 166 | 166 | ||
| 167 | static bool ata_sstatus_online(u32 sstatus) | ||
| 168 | { | ||
| 169 | return (sstatus & 0xf) == 0x3; | ||
| 170 | } | ||
| 171 | |||
| 167 | /** | 172 | /** |
| 168 | * ata_link_next - link iteration helper | 173 | * ata_link_next - link iteration helper |
| 169 | * @link: the previous link, NULL to start | 174 | * @link: the previous link, NULL to start |
| @@ -1015,18 +1020,6 @@ static const char *sata_spd_string(unsigned int spd) | |||
| 1015 | return spd_str[spd - 1]; | 1020 | return spd_str[spd - 1]; |
| 1016 | } | 1021 | } |
| 1017 | 1022 | ||
| 1018 | void ata_dev_disable(struct ata_device *dev) | ||
| 1019 | { | ||
| 1020 | if (ata_dev_enabled(dev)) { | ||
| 1021 | if (ata_msg_drv(dev->link->ap)) | ||
| 1022 | ata_dev_printk(dev, KERN_WARNING, "disabled\n"); | ||
| 1023 | ata_acpi_on_disable(dev); | ||
| 1024 | ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | | ||
| 1025 | ATA_DNXFER_QUIET); | ||
| 1026 | dev->class++; | ||
| 1027 | } | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) | 1023 | static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) |
| 1031 | { | 1024 | { |
| 1032 | struct ata_link *link = dev->link; | 1025 | struct ata_link *link = dev->link; |
| @@ -2239,6 +2232,40 @@ retry: | |||
| 2239 | return rc; | 2232 | return rc; |
| 2240 | } | 2233 | } |
| 2241 | 2234 | ||
| 2235 | static int ata_do_link_spd_horkage(struct ata_device *dev) | ||
| 2236 | { | ||
| 2237 | struct ata_link *plink = ata_dev_phys_link(dev); | ||
| 2238 | u32 target, target_limit; | ||
| 2239 | |||
| 2240 | if (!sata_scr_valid(plink)) | ||
| 2241 | return 0; | ||
| 2242 | |||
| 2243 | if (dev->horkage & ATA_HORKAGE_1_5_GBPS) | ||
| 2244 | target = 1; | ||
| 2245 | else | ||
| 2246 | return 0; | ||
| 2247 | |||
| 2248 | target_limit = (1 << target) - 1; | ||
| 2249 | |||
| 2250 | /* if already on stricter limit, no need to push further */ | ||
| 2251 | if (plink->sata_spd_limit <= target_limit) | ||
| 2252 | return 0; | ||
| 2253 | |||
| 2254 | plink->sata_spd_limit = target_limit; | ||
| 2255 | |||
| 2256 | /* Request another EH round by returning -EAGAIN if link is | ||
| 2257 | * going faster than the target speed. Forward progress is | ||
| 2258 | * guaranteed by setting sata_spd_limit to target_limit above. | ||
| 2259 | */ | ||
| 2260 | if (plink->sata_spd > target) { | ||
| 2261 | ata_dev_printk(dev, KERN_INFO, | ||
| 2262 | "applying link speed limit horkage to %s\n", | ||
| 2263 | sata_spd_string(target)); | ||
| 2264 | return -EAGAIN; | ||
| 2265 | } | ||
| 2266 | return 0; | ||
| 2267 | } | ||
| 2268 | |||
| 2242 | static inline u8 ata_dev_knobble(struct ata_device *dev) | 2269 | static inline u8 ata_dev_knobble(struct ata_device *dev) |
| 2243 | { | 2270 | { |
| 2244 | struct ata_port *ap = dev->link->ap; | 2271 | struct ata_port *ap = dev->link->ap; |
| @@ -2329,6 +2356,10 @@ int ata_dev_configure(struct ata_device *dev) | |||
| 2329 | return 0; | 2356 | return 0; |
| 2330 | } | 2357 | } |
| 2331 | 2358 | ||
| 2359 | rc = ata_do_link_spd_horkage(dev); | ||
| 2360 | if (rc) | ||
| 2361 | return rc; | ||
| 2362 | |||
| 2332 | /* let ACPI work its magic */ | 2363 | /* let ACPI work its magic */ |
| 2333 | rc = ata_acpi_on_devcfg(dev); | 2364 | rc = ata_acpi_on_devcfg(dev); |
| 2334 | if (rc) | 2365 | if (rc) |
| @@ -2784,7 +2815,7 @@ int ata_bus_probe(struct ata_port *ap) | |||
| 2784 | /* This is the last chance, better to slow | 2815 | /* This is the last chance, better to slow |
| 2785 | * down than lose it. | 2816 | * down than lose it. |
| 2786 | */ | 2817 | */ |
| 2787 | sata_down_spd_limit(&ap->link); | 2818 | sata_down_spd_limit(&ap->link, 0); |
| 2788 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | 2819 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); |
| 2789 | } | 2820 | } |
| 2790 | } | 2821 | } |
| @@ -2880,21 +2911,27 @@ void ata_port_disable(struct ata_port *ap) | |||
| 2880 | /** | 2911 | /** |
| 2881 | * sata_down_spd_limit - adjust SATA spd limit downward | 2912 | * sata_down_spd_limit - adjust SATA spd limit downward |
| 2882 | * @link: Link to adjust SATA spd limit for | 2913 | * @link: Link to adjust SATA spd limit for |
| 2914 | * @spd_limit: Additional limit | ||
| 2883 | * | 2915 | * |
| 2884 | * Adjust SATA spd limit of @link downward. Note that this | 2916 | * Adjust SATA spd limit of @link downward. Note that this |
| 2885 | * function only adjusts the limit. The change must be applied | 2917 | * function only adjusts the limit. The change must be applied |
| 2886 | * using sata_set_spd(). | 2918 | * using sata_set_spd(). |
| 2887 | * | 2919 | * |
| 2920 | * If @spd_limit is non-zero, the speed is limited to equal to or | ||
| 2921 | * lower than @spd_limit if such speed is supported. If | ||
| 2922 | * @spd_limit is slower than any supported speed, only the lowest | ||
| 2923 | * supported speed is allowed. | ||
| 2924 | * | ||
| 2888 | * LOCKING: | 2925 | * LOCKING: |
| 2889 | * Inherited from caller. | 2926 | * Inherited from caller. |
| 2890 | * | 2927 | * |
| 2891 | * RETURNS: | 2928 | * RETURNS: |
| 2892 | * 0 on success, negative errno on failure | 2929 | * 0 on success, negative errno on failure |
| 2893 | */ | 2930 | */ |
| 2894 | int sata_down_spd_limit(struct ata_link *link) | 2931 | int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) |
| 2895 | { | 2932 | { |
| 2896 | u32 sstatus, spd, mask; | 2933 | u32 sstatus, spd, mask; |
| 2897 | int rc, highbit; | 2934 | int rc, bit; |
| 2898 | 2935 | ||
| 2899 | if (!sata_scr_valid(link)) | 2936 | if (!sata_scr_valid(link)) |
| 2900 | return -EOPNOTSUPP; | 2937 | return -EOPNOTSUPP; |
| @@ -2903,7 +2940,7 @@ int sata_down_spd_limit(struct ata_link *link) | |||
| 2903 | * If not, use cached value in link->sata_spd. | 2940 | * If not, use cached value in link->sata_spd. |
| 2904 | */ | 2941 | */ |
| 2905 | rc = sata_scr_read(link, SCR_STATUS, &sstatus); | 2942 | rc = sata_scr_read(link, SCR_STATUS, &sstatus); |
| 2906 | if (rc == 0) | 2943 | if (rc == 0 && ata_sstatus_online(sstatus)) |
| 2907 | spd = (sstatus >> 4) & 0xf; | 2944 | spd = (sstatus >> 4) & 0xf; |
| 2908 | else | 2945 | else |
| 2909 | spd = link->sata_spd; | 2946 | spd = link->sata_spd; |
| @@ -2913,8 +2950,8 @@ int sata_down_spd_limit(struct ata_link *link) | |||
| 2913 | return -EINVAL; | 2950 | return -EINVAL; |
| 2914 | 2951 | ||
| 2915 | /* unconditionally mask off the highest bit */ | 2952 | /* unconditionally mask off the highest bit */ |
| 2916 | highbit = fls(mask) - 1; | 2953 | bit = fls(mask) - 1; |
| 2917 | mask &= ~(1 << highbit); | 2954 | mask &= ~(1 << bit); |
| 2918 | 2955 | ||
| 2919 | /* Mask off all speeds higher than or equal to the current | 2956 | /* Mask off all speeds higher than or equal to the current |
| 2920 | * one. Force 1.5Gbps if current SPD is not available. | 2957 | * one. Force 1.5Gbps if current SPD is not available. |
| @@ -2928,6 +2965,15 @@ int sata_down_spd_limit(struct ata_link *link) | |||
| 2928 | if (!mask) | 2965 | if (!mask) |
| 2929 | return -EINVAL; | 2966 | return -EINVAL; |
| 2930 | 2967 | ||
| 2968 | if (spd_limit) { | ||
| 2969 | if (mask & ((1 << spd_limit) - 1)) | ||
| 2970 | mask &= (1 << spd_limit) - 1; | ||
| 2971 | else { | ||
| 2972 | bit = ffs(mask) - 1; | ||
| 2973 | mask = 1 << bit; | ||
| 2974 | } | ||
| 2975 | } | ||
| 2976 | |||
| 2931 | link->sata_spd_limit = mask; | 2977 | link->sata_spd_limit = mask; |
| 2932 | 2978 | ||
| 2933 | ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", | 2979 | ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", |
| @@ -4215,6 +4261,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4215 | /* Devices that do not need bridging limits applied */ | 4261 | /* Devices that do not need bridging limits applied */ |
| 4216 | { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, | 4262 | { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, |
| 4217 | 4263 | ||
| 4264 | /* Devices which aren't very happy with higher link speeds */ | ||
| 4265 | { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, | ||
| 4266 | |||
| 4218 | /* End Marker */ | 4267 | /* End Marker */ |
| 4219 | { } | 4268 | { } |
| 4220 | }; | 4269 | }; |
| @@ -4709,8 +4758,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
| 4709 | 4758 | ||
| 4710 | /** | 4759 | /** |
| 4711 | * ata_qc_new - Request an available ATA command, for queueing | 4760 | * ata_qc_new - Request an available ATA command, for queueing |
| 4712 | * @ap: Port associated with device @dev | 4761 | * @ap: target port |
| 4713 | * @dev: Device from whom we request an available command structure | ||
| 4714 | * | 4762 | * |
| 4715 | * LOCKING: | 4763 | * LOCKING: |
| 4716 | * None. | 4764 | * None. |
| @@ -5175,7 +5223,7 @@ bool ata_phys_link_online(struct ata_link *link) | |||
| 5175 | u32 sstatus; | 5223 | u32 sstatus; |
| 5176 | 5224 | ||
| 5177 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && | 5225 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
| 5178 | (sstatus & 0xf) == 0x3) | 5226 | ata_sstatus_online(sstatus)) |
| 5179 | return true; | 5227 | return true; |
| 5180 | return false; | 5228 | return false; |
| 5181 | } | 5229 | } |
| @@ -5199,7 +5247,7 @@ bool ata_phys_link_offline(struct ata_link *link) | |||
| 5199 | u32 sstatus; | 5247 | u32 sstatus; |
| 5200 | 5248 | ||
| 5201 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && | 5249 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
| 5202 | (sstatus & 0xf) != 0x3) | 5250 | !ata_sstatus_online(sstatus)) |
| 5203 | return true; | 5251 | return true; |
| 5204 | return false; | 5252 | return false; |
| 5205 | } | 5253 | } |
| @@ -5412,8 +5460,8 @@ void ata_dev_init(struct ata_device *dev) | |||
| 5412 | dev->horkage = 0; | 5460 | dev->horkage = 0; |
| 5413 | spin_unlock_irqrestore(ap->lock, flags); | 5461 | spin_unlock_irqrestore(ap->lock, flags); |
| 5414 | 5462 | ||
| 5415 | memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, | 5463 | memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, |
| 5416 | sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); | 5464 | ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); |
| 5417 | dev->pio_mask = UINT_MAX; | 5465 | dev->pio_mask = UINT_MAX; |
| 5418 | dev->mwdma_mask = UINT_MAX; | 5466 | dev->mwdma_mask = UINT_MAX; |
| 5419 | dev->udma_mask = UINT_MAX; | 5467 | dev->udma_mask = UINT_MAX; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 8147a8386370..ce2ef0475339 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -82,6 +82,10 @@ enum { | |||
| 82 | ATA_EH_FASTDRAIN_INTERVAL = 3000, | 82 | ATA_EH_FASTDRAIN_INTERVAL = 3000, |
| 83 | 83 | ||
| 84 | ATA_EH_UA_TRIES = 5, | 84 | ATA_EH_UA_TRIES = 5, |
| 85 | |||
| 86 | /* probe speed down parameters, see ata_eh_schedule_probe() */ | ||
| 87 | ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ | ||
| 88 | ATA_EH_PROBE_TRIALS = 2, | ||
| 85 | }; | 89 | }; |
| 86 | 90 | ||
| 87 | /* The following table determines how we sequence resets. Each entry | 91 | /* The following table determines how we sequence resets. Each entry |
| @@ -1176,6 +1180,32 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc) | |||
| 1176 | } | 1180 | } |
| 1177 | 1181 | ||
| 1178 | /** | 1182 | /** |
| 1183 | * ata_dev_disable - disable ATA device | ||
| 1184 | * @dev: ATA device to disable | ||
| 1185 | * | ||
| 1186 | * Disable @dev. | ||
| 1187 | * | ||
| 1188 | * Locking: | ||
| 1189 | * EH context. | ||
| 1190 | */ | ||
| 1191 | void ata_dev_disable(struct ata_device *dev) | ||
| 1192 | { | ||
| 1193 | if (!ata_dev_enabled(dev)) | ||
| 1194 | return; | ||
| 1195 | |||
| 1196 | if (ata_msg_drv(dev->link->ap)) | ||
| 1197 | ata_dev_printk(dev, KERN_WARNING, "disabled\n"); | ||
| 1198 | ata_acpi_on_disable(dev); | ||
| 1199 | ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); | ||
| 1200 | dev->class++; | ||
| 1201 | |||
| 1202 | /* From now till the next successful probe, ering is used to | ||
| 1203 | * track probe failures. Clear accumulated device error info. | ||
| 1204 | */ | ||
| 1205 | ata_ering_clear(&dev->ering); | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | /** | ||
| 1179 | * ata_eh_detach_dev - detach ATA device | 1209 | * ata_eh_detach_dev - detach ATA device |
| 1180 | * @dev: ATA device to detach | 1210 | * @dev: ATA device to detach |
| 1181 | * | 1211 | * |
| @@ -1849,7 +1879,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, | |||
| 1849 | /* speed down? */ | 1879 | /* speed down? */ |
| 1850 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { | 1880 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { |
| 1851 | /* speed down SATA link speed if possible */ | 1881 | /* speed down SATA link speed if possible */ |
| 1852 | if (sata_down_spd_limit(link) == 0) { | 1882 | if (sata_down_spd_limit(link, 0) == 0) { |
| 1853 | action |= ATA_EH_RESET; | 1883 | action |= ATA_EH_RESET; |
| 1854 | goto done; | 1884 | goto done; |
| 1855 | } | 1885 | } |
| @@ -2601,11 +2631,11 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
| 2601 | } | 2631 | } |
| 2602 | 2632 | ||
| 2603 | if (try == max_tries - 1) { | 2633 | if (try == max_tries - 1) { |
| 2604 | sata_down_spd_limit(link); | 2634 | sata_down_spd_limit(link, 0); |
| 2605 | if (slave) | 2635 | if (slave) |
| 2606 | sata_down_spd_limit(slave); | 2636 | sata_down_spd_limit(slave, 0); |
| 2607 | } else if (rc == -EPIPE) | 2637 | } else if (rc == -EPIPE) |
| 2608 | sata_down_spd_limit(failed_link); | 2638 | sata_down_spd_limit(failed_link, 0); |
| 2609 | 2639 | ||
| 2610 | if (hardreset) | 2640 | if (hardreset) |
| 2611 | reset = hardreset; | 2641 | reset = hardreset; |
| @@ -2744,6 +2774,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, | |||
| 2744 | readid_flags, dev->id); | 2774 | readid_flags, dev->id); |
| 2745 | switch (rc) { | 2775 | switch (rc) { |
| 2746 | case 0: | 2776 | case 0: |
| 2777 | /* clear error info accumulated during probe */ | ||
| 2778 | ata_ering_clear(&dev->ering); | ||
| 2747 | new_mask |= 1 << dev->devno; | 2779 | new_mask |= 1 << dev->devno; |
| 2748 | break; | 2780 | break; |
| 2749 | case -ENOENT: | 2781 | case -ENOENT: |
| @@ -2947,9 +2979,24 @@ static int ata_eh_skip_recovery(struct ata_link *link) | |||
| 2947 | return 1; | 2979 | return 1; |
| 2948 | } | 2980 | } |
| 2949 | 2981 | ||
| 2982 | static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) | ||
| 2983 | { | ||
| 2984 | u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); | ||
| 2985 | u64 now = get_jiffies_64(); | ||
| 2986 | int *trials = void_arg; | ||
| 2987 | |||
| 2988 | if (ent->timestamp < now - min(now, interval)) | ||
| 2989 | return -1; | ||
| 2990 | |||
| 2991 | (*trials)++; | ||
| 2992 | return 0; | ||
| 2993 | } | ||
| 2994 | |||
| 2950 | static int ata_eh_schedule_probe(struct ata_device *dev) | 2995 | static int ata_eh_schedule_probe(struct ata_device *dev) |
| 2951 | { | 2996 | { |
| 2952 | struct ata_eh_context *ehc = &dev->link->eh_context; | 2997 | struct ata_eh_context *ehc = &dev->link->eh_context; |
| 2998 | struct ata_link *link = ata_dev_phys_link(dev); | ||
| 2999 | int trials = 0; | ||
| 2953 | 3000 | ||
| 2954 | if (!(ehc->i.probe_mask & (1 << dev->devno)) || | 3001 | if (!(ehc->i.probe_mask & (1 << dev->devno)) || |
| 2955 | (ehc->did_probe_mask & (1 << dev->devno))) | 3002 | (ehc->did_probe_mask & (1 << dev->devno))) |
| @@ -2962,6 +3009,25 @@ static int ata_eh_schedule_probe(struct ata_device *dev) | |||
| 2962 | ehc->saved_xfer_mode[dev->devno] = 0; | 3009 | ehc->saved_xfer_mode[dev->devno] = 0; |
| 2963 | ehc->saved_ncq_enabled &= ~(1 << dev->devno); | 3010 | ehc->saved_ncq_enabled &= ~(1 << dev->devno); |
| 2964 | 3011 | ||
| 3012 | /* Record and count probe trials on the ering. The specific | ||
| 3013 | * error mask used is irrelevant. Because a successful device | ||
| 3014 | * detection clears the ering, this count accumulates only if | ||
| 3015 | * there are consecutive failed probes. | ||
| 3016 | * | ||
| 3017 | * If the count is equal to or higher than ATA_EH_PROBE_TRIALS | ||
| 3018 | * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is | ||
| 3019 | * forced to 1.5Gbps. | ||
| 3020 | * | ||
| 3021 | * This is to work around cases where failed link speed | ||
| 3022 | * negotiation results in device misdetection leading to | ||
| 3023 | * infinite DEVXCHG or PHRDY CHG events. | ||
| 3024 | */ | ||
| 3025 | ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); | ||
| 3026 | ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); | ||
| 3027 | |||
| 3028 | if (trials > ATA_EH_PROBE_TRIALS) | ||
| 3029 | sata_down_spd_limit(link, 1); | ||
| 3030 | |||
| 2965 | return 1; | 3031 | return 1; |
| 2966 | } | 3032 | } |
| 2967 | 3033 | ||
| @@ -2969,7 +3035,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
| 2969 | { | 3035 | { |
| 2970 | struct ata_eh_context *ehc = &dev->link->eh_context; | 3036 | struct ata_eh_context *ehc = &dev->link->eh_context; |
| 2971 | 3037 | ||
| 2972 | ehc->tries[dev->devno]--; | 3038 | /* -EAGAIN from EH routine indicates retry without prejudice. |
| 3039 | * The requester is responsible for ensuring forward progress. | ||
| 3040 | */ | ||
| 3041 | if (err != -EAGAIN) | ||
| 3042 | ehc->tries[dev->devno]--; | ||
| 2973 | 3043 | ||
| 2974 | switch (err) { | 3044 | switch (err) { |
| 2975 | case -ENODEV: | 3045 | case -ENODEV: |
| @@ -2979,12 +3049,13 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
| 2979 | /* give it just one more chance */ | 3049 | /* give it just one more chance */ |
| 2980 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); | 3050 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); |
| 2981 | case -EIO: | 3051 | case -EIO: |
| 2982 | if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) { | 3052 | if (ehc->tries[dev->devno] == 1) { |
| 2983 | /* This is the last chance, better to slow | 3053 | /* This is the last chance, better to slow |
| 2984 | * down than lose it. | 3054 | * down than lose it. |
| 2985 | */ | 3055 | */ |
| 2986 | sata_down_spd_limit(ata_dev_phys_link(dev)); | 3056 | sata_down_spd_limit(ata_dev_phys_link(dev), 0); |
| 2987 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | 3057 | if (dev->pio_mode > XFER_PIO_0) |
| 3058 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | ||
| 2988 | } | 3059 | } |
| 2989 | } | 3060 | } |
| 2990 | 3061 | ||
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 98ca07a2db87..619f2c33950e 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
| @@ -729,7 +729,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | |||
| 729 | if (tries) { | 729 | if (tries) { |
| 730 | /* consecutive revalidation failures? speed down */ | 730 | /* consecutive revalidation failures? speed down */ |
| 731 | if (reval_failed) | 731 | if (reval_failed) |
| 732 | sata_down_spd_limit(link); | 732 | sata_down_spd_limit(link, 0); |
| 733 | else | 733 | else |
| 734 | reval_failed = 1; | 734 | reval_failed = 1; |
| 735 | 735 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3c4c5ae277ba..b9747fa59e54 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -415,6 +415,7 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, | |||
| 415 | 415 | ||
| 416 | /** | 416 | /** |
| 417 | * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl | 417 | * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl |
| 418 | * @ap: target port | ||
| 418 | * @sdev: SCSI device to get identify data for | 419 | * @sdev: SCSI device to get identify data for |
| 419 | * @arg: User buffer area for identify data | 420 | * @arg: User buffer area for identify data |
| 420 | * | 421 | * |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index fe2839e58774..cea8014cd87e 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
| @@ -79,7 +79,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, | |||
| 79 | u64 block, u32 n_block, unsigned int tf_flags, | 79 | u64 block, u32 n_block, unsigned int tf_flags, |
| 80 | unsigned int tag); | 80 | unsigned int tag); |
| 81 | extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); | 81 | extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); |
| 82 | extern void ata_dev_disable(struct ata_device *dev); | ||
| 83 | extern void ata_pio_queue_task(struct ata_port *ap, void *data, | 82 | extern void ata_pio_queue_task(struct ata_port *ap, void *data, |
| 84 | unsigned long delay); | 83 | unsigned long delay); |
| 85 | extern void ata_port_flush_task(struct ata_port *ap); | 84 | extern void ata_port_flush_task(struct ata_port *ap); |
| @@ -100,7 +99,7 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); | |||
| 100 | extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | 99 | extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, |
| 101 | unsigned int readid_flags); | 100 | unsigned int readid_flags); |
| 102 | extern int ata_dev_configure(struct ata_device *dev); | 101 | extern int ata_dev_configure(struct ata_device *dev); |
| 103 | extern int sata_down_spd_limit(struct ata_link *link); | 102 | extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); |
| 104 | extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); | 103 | extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); |
| 105 | extern void ata_sg_clean(struct ata_queued_cmd *qc); | 104 | extern void ata_sg_clean(struct ata_queued_cmd *qc); |
| 106 | extern void ata_qc_free(struct ata_queued_cmd *qc); | 105 | extern void ata_qc_free(struct ata_queued_cmd *qc); |
| @@ -160,6 +159,7 @@ extern void ata_scsi_error(struct Scsi_Host *host); | |||
| 160 | extern void ata_port_wait_eh(struct ata_port *ap); | 159 | extern void ata_port_wait_eh(struct ata_port *ap); |
| 161 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); | 160 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); |
| 162 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); | 161 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); |
| 162 | extern void ata_dev_disable(struct ata_device *dev); | ||
| 163 | extern void ata_eh_detach_dev(struct ata_device *dev); | 163 | extern void ata_eh_detach_dev(struct ata_device *dev); |
| 164 | extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | 164 | extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, |
| 165 | unsigned int action); | 165 | unsigned int action); |
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c index 3080f371222c..f1b26f7c8e4d 100644 --- a/drivers/ata/pata_qdi.c +++ b/drivers/ata/pata_qdi.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * | 12 | * |
| 13 | * Probe code based on drivers/ide/legacy/qd65xx.c | 13 | * Probe code based on drivers/ide/legacy/qd65xx.c |
| 14 | * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by | 14 | * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by |
| 15 | * Samuel Thibault <samuel.thibault@fnac.net> | 15 | * Samuel Thibault <samuel.thibault@ens-lyon.org> |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f2d8a020ea53..4ae1a4138b47 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -663,8 +663,8 @@ static const struct pci_device_id mv_pci_tbl[] = { | |||
| 663 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, | 663 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, |
| 664 | /* RocketRAID 1720/174x have different identifiers */ | 664 | /* RocketRAID 1720/174x have different identifiers */ |
| 665 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, | 665 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, |
| 666 | { PCI_VDEVICE(TTI, 0x1740), chip_508x }, | 666 | { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, |
| 667 | { PCI_VDEVICE(TTI, 0x1742), chip_508x }, | 667 | { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, |
| 668 | 668 | ||
| 669 | { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, | 669 | { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, |
| 670 | { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, | 670 | { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index c49ad0e61b6f..444af0415ca1 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
| @@ -436,11 +436,16 @@ static struct ata_port_operations nv_nf2_ops = { | |||
| 436 | .hardreset = nv_noclassify_hardreset, | 436 | .hardreset = nv_noclassify_hardreset, |
| 437 | }; | 437 | }; |
| 438 | 438 | ||
| 439 | /* CK804 finally gets hardreset right */ | 439 | /* For initial probing after boot and hot plugging, hardreset mostly |
| 440 | * works fine on CK804 but curiously, reprobing on the initial port by | ||
| 441 | * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS | ||
| 442 | * in somewhat undeterministic way. Use noclassify hardreset. | ||
| 443 | */ | ||
| 440 | static struct ata_port_operations nv_ck804_ops = { | 444 | static struct ata_port_operations nv_ck804_ops = { |
| 441 | .inherits = &nv_common_ops, | 445 | .inherits = &nv_common_ops, |
| 442 | .freeze = nv_ck804_freeze, | 446 | .freeze = nv_ck804_freeze, |
| 443 | .thaw = nv_ck804_thaw, | 447 | .thaw = nv_ck804_thaw, |
| 448 | .hardreset = nv_noclassify_hardreset, | ||
| 444 | .host_stop = nv_ck804_host_stop, | 449 | .host_stop = nv_ck804_host_stop, |
| 445 | }; | 450 | }; |
| 446 | 451 | ||
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 9f029595f454..d0091609e210 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
| @@ -324,7 +324,7 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) | |||
| 324 | 324 | ||
| 325 | prd->addr = cpu_to_le32(addr); | 325 | prd->addr = cpu_to_le32(addr); |
| 326 | prd->flags_len = cpu_to_le32(sg_len); | 326 | prd->flags_len = cpu_to_le32(sg_len); |
| 327 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, sg_len); | 327 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len); |
| 328 | 328 | ||
| 329 | last_prd = prd; | 329 | last_prd = prd; |
| 330 | prd++; | 330 | prd++; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index f5be8081cd81..735bbe2be51a 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -761,7 +761,7 @@ source "drivers/char/hw_random/Kconfig" | |||
| 761 | 761 | ||
| 762 | config NVRAM | 762 | config NVRAM |
| 763 | tristate "/dev/nvram support" | 763 | tristate "/dev/nvram support" |
| 764 | depends on ATARI || X86 || ARM || GENERIC_NVRAM | 764 | depends on ATARI || X86 || (ARM && RTC_DRV_CMOS) || GENERIC_NVRAM |
| 765 | ---help--- | 765 | ---help--- |
| 766 | If you say Y here and create a character special file /dev/nvram | 766 | If you say Y here and create a character special file /dev/nvram |
| 767 | with major number 10 and minor number 144 using mknod ("man mknod"), | 767 | with major number 10 and minor number 144 using mknod ("man mknod"), |
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c index 5b2e3af43c4b..08c4fa35e9b1 100644 --- a/drivers/ide/qd65xx.c +++ b/drivers/ide/qd65xx.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by | 18 | * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by |
| 19 | * Samuel Thibault <samuel.thibault@fnac.net> | 19 | * Samuel Thibault <samuel.thibault@ens-lyon.org> |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
diff --git a/drivers/ide/qd65xx.h b/drivers/ide/qd65xx.h index 6636f9665d16..d7e67a1a1dcc 100644 --- a/drivers/ide/qd65xx.h +++ b/drivers/ide/qd65xx.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | /* | 5 | /* |
| 6 | * Authors: Petr Soucek <petr@ryston.cz> | 6 | * Authors: Petr Soucek <petr@ryston.cz> |
| 7 | * Samuel Thibault <samuel.thibault@fnac.net> | 7 | * Samuel Thibault <samuel.thibault@ens-lyon.org> |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | /* truncates a in [b,c] */ | 10 | /* truncates a in [b,c] */ |
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 24508e28e3fb..ea9488e7ad6d 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c | |||
| @@ -626,7 +626,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client, | |||
| 626 | } | 626 | } |
| 627 | 627 | ||
| 628 | if (client->irq) { | 628 | if (client->irq) { |
| 629 | set_irq_handler(client->irq, handle_level_irq); | ||
| 630 | ret = request_irq(client->irq, pcf50633_irq, | 629 | ret = request_irq(client->irq, pcf50633_irq, |
| 631 | IRQF_TRIGGER_LOW, "pcf50633", pcf); | 630 | IRQF_TRIGGER_LOW, "pcf50633", pcf); |
| 632 | 631 | ||
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 7df6bbf0e4d9..6f6a0f6dafd6 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
| @@ -453,7 +453,7 @@ static struct platform_driver sa1100_mtd_driver = { | |||
| 453 | .resume = sa1100_mtd_resume, | 453 | .resume = sa1100_mtd_resume, |
| 454 | .shutdown = sa1100_mtd_shutdown, | 454 | .shutdown = sa1100_mtd_shutdown, |
| 455 | .driver = { | 455 | .driver = { |
| 456 | .name = "flash", | 456 | .name = "sa1100-mtd", |
| 457 | .owner = THIS_MODULE, | 457 | .owner = THIS_MODULE, |
| 458 | }, | 458 | }, |
| 459 | }; | 459 | }; |
| @@ -474,4 +474,4 @@ module_exit(sa1100_mtd_exit); | |||
| 474 | MODULE_AUTHOR("Nicolas Pitre"); | 474 | MODULE_AUTHOR("Nicolas Pitre"); |
| 475 | MODULE_DESCRIPTION("SA1100 CFI map driver"); | 475 | MODULE_DESCRIPTION("SA1100 CFI map driver"); |
| 476 | MODULE_LICENSE("GPL"); | 476 | MODULE_LICENSE("GPL"); |
| 477 | MODULE_ALIAS("platform:flash"); | 477 | MODULE_ALIAS("platform:sa1100-mtd"); |
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index d15d8b79d8e5..54b52e5b1821 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
| @@ -646,7 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = { | |||
| 646 | .ndo_get_stats = ei_get_stats, | 646 | .ndo_get_stats = ei_get_stats, |
| 647 | .ndo_set_multicast_list = ei_set_multicast_list, | 647 | .ndo_set_multicast_list = ei_set_multicast_list, |
| 648 | .ndo_validate_addr = eth_validate_addr, | 648 | .ndo_validate_addr = eth_validate_addr, |
| 649 | .ndo_set_mac_address = eth_set_mac_addr, | 649 | .ndo_set_mac_address = eth_mac_addr, |
| 650 | .ndo_change_mtu = eth_change_mtu, | 650 | .ndo_change_mtu = eth_change_mtu, |
| 651 | #ifdef CONFIG_NET_POLL_CONTROLLER | 651 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 652 | .ndo_poll_controller = ei_poll, | 652 | .ndo_poll_controller = ei_poll, |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 8e93341f3e82..9c2358391147 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -553,12 +553,24 @@ static void detach_groups(struct config_group *group) | |||
| 553 | 553 | ||
| 554 | child = sd->s_dentry; | 554 | child = sd->s_dentry; |
| 555 | 555 | ||
| 556 | /* | ||
| 557 | * Note: we hide this from lockdep since we have no way | ||
| 558 | * to teach lockdep about recursive | ||
| 559 | * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path | ||
| 560 | * in an inode tree, which are valid as soon as | ||
| 561 | * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a | ||
| 562 | * parent inode to one of its children. | ||
| 563 | */ | ||
| 564 | lockdep_off(); | ||
| 556 | mutex_lock(&child->d_inode->i_mutex); | 565 | mutex_lock(&child->d_inode->i_mutex); |
| 566 | lockdep_on(); | ||
| 557 | 567 | ||
| 558 | configfs_detach_group(sd->s_element); | 568 | configfs_detach_group(sd->s_element); |
| 559 | child->d_inode->i_flags |= S_DEAD; | 569 | child->d_inode->i_flags |= S_DEAD; |
| 560 | 570 | ||
| 571 | lockdep_off(); | ||
| 561 | mutex_unlock(&child->d_inode->i_mutex); | 572 | mutex_unlock(&child->d_inode->i_mutex); |
| 573 | lockdep_on(); | ||
| 562 | 574 | ||
| 563 | d_delete(child); | 575 | d_delete(child); |
| 564 | dput(child); | 576 | dput(child); |
| @@ -748,11 +760,22 @@ static int configfs_attach_item(struct config_item *parent_item, | |||
| 748 | * We are going to remove an inode and its dentry but | 760 | * We are going to remove an inode and its dentry but |
| 749 | * the VFS may already have hit and used them. Thus, | 761 | * the VFS may already have hit and used them. Thus, |
| 750 | * we must lock them as rmdir() would. | 762 | * we must lock them as rmdir() would. |
| 763 | * | ||
| 764 | * Note: we hide this from lockdep since we have no way | ||
| 765 | * to teach lockdep about recursive | ||
| 766 | * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path | ||
| 767 | * in an inode tree, which are valid as soon as | ||
| 768 | * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a | ||
| 769 | * parent inode to one of its children. | ||
| 751 | */ | 770 | */ |
| 771 | lockdep_off(); | ||
| 752 | mutex_lock(&dentry->d_inode->i_mutex); | 772 | mutex_lock(&dentry->d_inode->i_mutex); |
| 773 | lockdep_on(); | ||
| 753 | configfs_remove_dir(item); | 774 | configfs_remove_dir(item); |
| 754 | dentry->d_inode->i_flags |= S_DEAD; | 775 | dentry->d_inode->i_flags |= S_DEAD; |
| 776 | lockdep_off(); | ||
| 755 | mutex_unlock(&dentry->d_inode->i_mutex); | 777 | mutex_unlock(&dentry->d_inode->i_mutex); |
| 778 | lockdep_on(); | ||
| 756 | d_delete(dentry); | 779 | d_delete(dentry); |
| 757 | } | 780 | } |
| 758 | } | 781 | } |
| @@ -787,14 +810,25 @@ static int configfs_attach_group(struct config_item *parent_item, | |||
| 787 | * | 810 | * |
| 788 | * We must also lock the inode to remove it safely in case of | 811 | * We must also lock the inode to remove it safely in case of |
| 789 | * error, as rmdir() would. | 812 | * error, as rmdir() would. |
| 813 | * | ||
| 814 | * Note: we hide this from lockdep since we have no way | ||
| 815 | * to teach lockdep about recursive | ||
| 816 | * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path | ||
| 817 | * in an inode tree, which are valid as soon as | ||
| 818 | * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a | ||
| 819 | * parent inode to one of its children. | ||
| 790 | */ | 820 | */ |
| 821 | lockdep_off(); | ||
| 791 | mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); | 822 | mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); |
| 823 | lockdep_on(); | ||
| 792 | ret = populate_groups(to_config_group(item)); | 824 | ret = populate_groups(to_config_group(item)); |
| 793 | if (ret) { | 825 | if (ret) { |
| 794 | configfs_detach_item(item); | 826 | configfs_detach_item(item); |
| 795 | dentry->d_inode->i_flags |= S_DEAD; | 827 | dentry->d_inode->i_flags |= S_DEAD; |
| 796 | } | 828 | } |
| 829 | lockdep_off(); | ||
| 797 | mutex_unlock(&dentry->d_inode->i_mutex); | 830 | mutex_unlock(&dentry->d_inode->i_mutex); |
| 831 | lockdep_on(); | ||
| 798 | if (ret) | 832 | if (ret) |
| 799 | d_delete(dentry); | 833 | d_delete(dentry); |
| 800 | } | 834 | } |
| @@ -956,7 +990,17 @@ static int configfs_depend_prep(struct dentry *origin, | |||
| 956 | BUG_ON(!origin || !sd); | 990 | BUG_ON(!origin || !sd); |
| 957 | 991 | ||
| 958 | /* Lock this guy on the way down */ | 992 | /* Lock this guy on the way down */ |
| 993 | /* | ||
| 994 | * Note: we hide this from lockdep since we have no way | ||
| 995 | * to teach lockdep about recursive | ||
| 996 | * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path | ||
| 997 | * in an inode tree, which are valid as soon as | ||
| 998 | * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a | ||
| 999 | * parent inode to one of its children. | ||
| 1000 | */ | ||
| 1001 | lockdep_off(); | ||
| 959 | mutex_lock(&sd->s_dentry->d_inode->i_mutex); | 1002 | mutex_lock(&sd->s_dentry->d_inode->i_mutex); |
| 1003 | lockdep_on(); | ||
| 960 | if (sd->s_element == target) /* Boo-yah */ | 1004 | if (sd->s_element == target) /* Boo-yah */ |
| 961 | goto out; | 1005 | goto out; |
| 962 | 1006 | ||
| @@ -970,7 +1014,9 @@ static int configfs_depend_prep(struct dentry *origin, | |||
| 970 | } | 1014 | } |
| 971 | 1015 | ||
| 972 | /* We looped all our children and didn't find target */ | 1016 | /* We looped all our children and didn't find target */ |
| 1017 | lockdep_off(); | ||
| 973 | mutex_unlock(&sd->s_dentry->d_inode->i_mutex); | 1018 | mutex_unlock(&sd->s_dentry->d_inode->i_mutex); |
| 1019 | lockdep_on(); | ||
| 974 | ret = -ENOENT; | 1020 | ret = -ENOENT; |
| 975 | 1021 | ||
| 976 | out: | 1022 | out: |
| @@ -990,11 +1036,16 @@ static void configfs_depend_rollback(struct dentry *origin, | |||
| 990 | struct dentry *dentry = item->ci_dentry; | 1036 | struct dentry *dentry = item->ci_dentry; |
| 991 | 1037 | ||
| 992 | while (dentry != origin) { | 1038 | while (dentry != origin) { |
| 1039 | /* See comments in configfs_depend_prep() */ | ||
| 1040 | lockdep_off(); | ||
| 993 | mutex_unlock(&dentry->d_inode->i_mutex); | 1041 | mutex_unlock(&dentry->d_inode->i_mutex); |
| 1042 | lockdep_on(); | ||
| 994 | dentry = dentry->d_parent; | 1043 | dentry = dentry->d_parent; |
| 995 | } | 1044 | } |
| 996 | 1045 | ||
| 1046 | lockdep_off(); | ||
| 997 | mutex_unlock(&origin->d_inode->i_mutex); | 1047 | mutex_unlock(&origin->d_inode->i_mutex); |
| 1048 | lockdep_on(); | ||
| 998 | } | 1049 | } |
| 999 | 1050 | ||
| 1000 | int configfs_depend_item(struct configfs_subsystem *subsys, | 1051 | int configfs_depend_item(struct configfs_subsystem *subsys, |
| @@ -1329,8 +1380,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 1329 | } | 1380 | } |
| 1330 | 1381 | ||
| 1331 | /* Wait until the racing operation terminates */ | 1382 | /* Wait until the racing operation terminates */ |
| 1383 | /* | ||
| 1384 | * Note: we hide this from lockdep since we are locked | ||
| 1385 | * with subclass I_MUTEX_NORMAL from vfs_rmdir() (why | ||
| 1386 | * not I_MUTEX_CHILD?), and I_MUTEX_XATTR or | ||
| 1387 | * I_MUTEX_QUOTA are not relevant for the locked inode. | ||
| 1388 | */ | ||
| 1389 | lockdep_off(); | ||
| 1332 | mutex_lock(wait_mutex); | 1390 | mutex_lock(wait_mutex); |
| 1333 | mutex_unlock(wait_mutex); | 1391 | mutex_unlock(wait_mutex); |
| 1392 | lockdep_on(); | ||
| 1334 | } | 1393 | } |
| 1335 | } while (ret == -EAGAIN); | 1394 | } while (ret == -EAGAIN); |
| 1336 | 1395 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d861096c9d81..60fe74035db5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
| @@ -5390,6 +5390,9 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
| 5390 | goto out; | 5390 | goto out; |
| 5391 | } | 5391 | } |
| 5392 | 5392 | ||
| 5393 | vfs_dq_free_space_nodirty(inode, | ||
| 5394 | ocfs2_clusters_to_bytes(inode->i_sb, len)); | ||
| 5395 | |||
| 5393 | ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac, | 5396 | ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac, |
| 5394 | dealloc); | 5397 | dealloc); |
| 5395 | if (ret) { | 5398 | if (ret) { |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index b1cc7c381e88..e9d7c2038c0f 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include "dlmglue.h" | 38 | #include "dlmglue.h" |
| 39 | #include "file.h" | 39 | #include "file.h" |
| 40 | #include "inode.h" | 40 | #include "inode.h" |
| 41 | #include "super.h" | ||
| 41 | 42 | ||
| 42 | 43 | ||
| 43 | static int ocfs2_dentry_revalidate(struct dentry *dentry, | 44 | static int ocfs2_dentry_revalidate(struct dentry *dentry, |
| @@ -294,6 +295,34 @@ out_attach: | |||
| 294 | return ret; | 295 | return ret; |
| 295 | } | 296 | } |
| 296 | 297 | ||
| 298 | static DEFINE_SPINLOCK(dentry_list_lock); | ||
| 299 | |||
| 300 | /* We limit the number of dentry locks to drop in one go. We have | ||
| 301 | * this limit so that we don't starve other users of ocfs2_wq. */ | ||
| 302 | #define DL_INODE_DROP_COUNT 64 | ||
| 303 | |||
| 304 | /* Drop inode references from dentry locks */ | ||
| 305 | void ocfs2_drop_dl_inodes(struct work_struct *work) | ||
| 306 | { | ||
| 307 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, | ||
| 308 | dentry_lock_work); | ||
| 309 | struct ocfs2_dentry_lock *dl; | ||
| 310 | int drop_count = DL_INODE_DROP_COUNT; | ||
| 311 | |||
| 312 | spin_lock(&dentry_list_lock); | ||
| 313 | while (osb->dentry_lock_list && drop_count--) { | ||
| 314 | dl = osb->dentry_lock_list; | ||
| 315 | osb->dentry_lock_list = dl->dl_next; | ||
| 316 | spin_unlock(&dentry_list_lock); | ||
| 317 | iput(dl->dl_inode); | ||
| 318 | kfree(dl); | ||
| 319 | spin_lock(&dentry_list_lock); | ||
| 320 | } | ||
| 321 | if (osb->dentry_lock_list) | ||
| 322 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | ||
| 323 | spin_unlock(&dentry_list_lock); | ||
| 324 | } | ||
| 325 | |||
| 297 | /* | 326 | /* |
| 298 | * ocfs2_dentry_iput() and friends. | 327 | * ocfs2_dentry_iput() and friends. |
| 299 | * | 328 | * |
| @@ -318,16 +347,23 @@ out_attach: | |||
| 318 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, | 347 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, |
| 319 | struct ocfs2_dentry_lock *dl) | 348 | struct ocfs2_dentry_lock *dl) |
| 320 | { | 349 | { |
| 321 | iput(dl->dl_inode); | ||
| 322 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); | 350 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); |
| 323 | ocfs2_lock_res_free(&dl->dl_lockres); | 351 | ocfs2_lock_res_free(&dl->dl_lockres); |
| 324 | kfree(dl); | 352 | |
| 353 | /* We leave dropping of inode reference to ocfs2_wq as that can | ||
| 354 | * possibly lead to inode deletion which gets tricky */ | ||
| 355 | spin_lock(&dentry_list_lock); | ||
| 356 | if (!osb->dentry_lock_list) | ||
| 357 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | ||
| 358 | dl->dl_next = osb->dentry_lock_list; | ||
| 359 | osb->dentry_lock_list = dl; | ||
| 360 | spin_unlock(&dentry_list_lock); | ||
| 325 | } | 361 | } |
| 326 | 362 | ||
| 327 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 363 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
| 328 | struct ocfs2_dentry_lock *dl) | 364 | struct ocfs2_dentry_lock *dl) |
| 329 | { | 365 | { |
| 330 | int unlock = 0; | 366 | int unlock; |
| 331 | 367 | ||
| 332 | BUG_ON(dl->dl_count == 0); | 368 | BUG_ON(dl->dl_count == 0); |
| 333 | 369 | ||
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index c091c34d9883..d06e16c06640 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h | |||
| @@ -29,8 +29,13 @@ | |||
| 29 | extern struct dentry_operations ocfs2_dentry_ops; | 29 | extern struct dentry_operations ocfs2_dentry_ops; |
| 30 | 30 | ||
| 31 | struct ocfs2_dentry_lock { | 31 | struct ocfs2_dentry_lock { |
| 32 | /* Use count of dentry lock */ | ||
| 32 | unsigned int dl_count; | 33 | unsigned int dl_count; |
| 33 | u64 dl_parent_blkno; | 34 | union { |
| 35 | /* Linked list of dentry locks to release */ | ||
| 36 | struct ocfs2_dentry_lock *dl_next; | ||
| 37 | u64 dl_parent_blkno; | ||
| 38 | }; | ||
| 34 | 39 | ||
| 35 | /* | 40 | /* |
| 36 | * The ocfs2_dentry_lock keeps an inode reference until | 41 | * The ocfs2_dentry_lock keeps an inode reference until |
| @@ -47,6 +52,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, | |||
| 47 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 52 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
| 48 | struct ocfs2_dentry_lock *dl); | 53 | struct ocfs2_dentry_lock *dl); |
| 49 | 54 | ||
| 55 | void ocfs2_drop_dl_inodes(struct work_struct *work); | ||
| 56 | |||
| 50 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, | 57 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, |
| 51 | int skip_unhashed); | 58 | int skip_unhashed); |
| 52 | 59 | ||
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index b0c4cadd4c45..206a2370876a 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
| @@ -2860,6 +2860,10 @@ static void ocfs2_unlock_ast(void *opaque, int error) | |||
| 2860 | case OCFS2_UNLOCK_CANCEL_CONVERT: | 2860 | case OCFS2_UNLOCK_CANCEL_CONVERT: |
| 2861 | mlog(0, "Cancel convert success for %s\n", lockres->l_name); | 2861 | mlog(0, "Cancel convert success for %s\n", lockres->l_name); |
| 2862 | lockres->l_action = OCFS2_AST_INVALID; | 2862 | lockres->l_action = OCFS2_AST_INVALID; |
| 2863 | /* Downconvert thread may have requeued this lock, we | ||
| 2864 | * need to wake it. */ | ||
| 2865 | if (lockres->l_flags & OCFS2_LOCK_BLOCKED) | ||
| 2866 | ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres)); | ||
| 2863 | break; | 2867 | break; |
| 2864 | case OCFS2_UNLOCK_DROP_LOCK: | 2868 | case OCFS2_UNLOCK_DROP_LOCK: |
| 2865 | lockres->l_level = DLM_LOCK_IV; | 2869 | lockres->l_level = DLM_LOCK_IV; |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index ad5c24a29edd..077384135f4e 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
| @@ -210,6 +210,7 @@ struct ocfs2_journal; | |||
| 210 | struct ocfs2_slot_info; | 210 | struct ocfs2_slot_info; |
| 211 | struct ocfs2_recovery_map; | 211 | struct ocfs2_recovery_map; |
| 212 | struct ocfs2_quota_recovery; | 212 | struct ocfs2_quota_recovery; |
| 213 | struct ocfs2_dentry_lock; | ||
| 213 | struct ocfs2_super | 214 | struct ocfs2_super |
| 214 | { | 215 | { |
| 215 | struct task_struct *commit_task; | 216 | struct task_struct *commit_task; |
| @@ -325,6 +326,11 @@ struct ocfs2_super | |||
| 325 | struct list_head blocked_lock_list; | 326 | struct list_head blocked_lock_list; |
| 326 | unsigned long blocked_lock_count; | 327 | unsigned long blocked_lock_count; |
| 327 | 328 | ||
| 329 | /* List of dentry locks to release. Anyone can add locks to | ||
| 330 | * the list, ocfs2_wq processes the list */ | ||
| 331 | struct ocfs2_dentry_lock *dentry_lock_list; | ||
| 332 | struct work_struct dentry_lock_work; | ||
| 333 | |||
| 328 | wait_queue_head_t osb_mount_event; | 334 | wait_queue_head_t osb_mount_event; |
| 329 | 335 | ||
| 330 | /* Truncate log info */ | 336 | /* Truncate log info */ |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index f4efa89baee5..1ed0f7c86869 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
| @@ -754,7 +754,9 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot) | |||
| 754 | if (dquot->dq_flags & mask) | 754 | if (dquot->dq_flags & mask) |
| 755 | sync = 1; | 755 | sync = 1; |
| 756 | spin_unlock(&dq_data_lock); | 756 | spin_unlock(&dq_data_lock); |
| 757 | if (!sync) { | 757 | /* This is a slight hack but we can't afford getting global quota |
| 758 | * lock if we already have a transaction started. */ | ||
| 759 | if (!sync || journal_current_handle()) { | ||
| 758 | status = ocfs2_write_dquot(dquot); | 760 | status = ocfs2_write_dquot(dquot); |
| 759 | goto out; | 761 | goto out; |
| 760 | } | 762 | } |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 43ed11345b59..b1cb38fbe807 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
| @@ -1887,6 +1887,9 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
| 1887 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); | 1887 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); |
| 1888 | journal->j_state = OCFS2_JOURNAL_FREE; | 1888 | journal->j_state = OCFS2_JOURNAL_FREE; |
| 1889 | 1889 | ||
| 1890 | INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes); | ||
| 1891 | osb->dentry_lock_list = NULL; | ||
| 1892 | |||
| 1890 | /* get some pseudo constants for clustersize bits */ | 1893 | /* get some pseudo constants for clustersize bits */ |
| 1891 | osb->s_clustersize_bits = | 1894 | osb->s_clustersize_bits = |
| 1892 | le32_to_cpu(di->id2.i_super.s_clustersize_bits); | 1895 | le32_to_cpu(di->id2.i_super.s_clustersize_bits); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index e1d638af6ac3..915039fffe6e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
| @@ -4729,13 +4729,6 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, | |||
| 4729 | vb.vb_xv = (struct ocfs2_xattr_value_root *) | 4729 | vb.vb_xv = (struct ocfs2_xattr_value_root *) |
| 4730 | (vb.vb_bh->b_data + offset % blocksize); | 4730 | (vb.vb_bh->b_data + offset % blocksize); |
| 4731 | 4731 | ||
| 4732 | ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, | ||
| 4733 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
| 4734 | if (ret) { | ||
| 4735 | mlog_errno(ret); | ||
| 4736 | goto out; | ||
| 4737 | } | ||
| 4738 | |||
| 4739 | /* | 4732 | /* |
| 4740 | * From here on out we have to dirty the bucket. The generic | 4733 | * From here on out we have to dirty the bucket. The generic |
| 4741 | * value calls only modify one of the bucket's bhs, but we need | 4734 | * value calls only modify one of the bucket's bhs, but we need |
| @@ -4748,12 +4741,18 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, | |||
| 4748 | ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); | 4741 | ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); |
| 4749 | if (ret) { | 4742 | if (ret) { |
| 4750 | mlog_errno(ret); | 4743 | mlog_errno(ret); |
| 4751 | goto out_dirty; | 4744 | goto out; |
| 4745 | } | ||
| 4746 | |||
| 4747 | ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, | ||
| 4748 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
| 4749 | if (ret) { | ||
| 4750 | mlog_errno(ret); | ||
| 4751 | goto out; | ||
| 4752 | } | 4752 | } |
| 4753 | 4753 | ||
| 4754 | xe->xe_value_size = cpu_to_le64(len); | 4754 | xe->xe_value_size = cpu_to_le64(len); |
| 4755 | 4755 | ||
| 4756 | out_dirty: | ||
| 4757 | ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); | 4756 | ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); |
| 4758 | 4757 | ||
| 4759 | out: | 4758 | out: |
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 175f9c590b77..f393620890ee 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c | |||
| @@ -689,7 +689,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) | |||
| 689 | } | 689 | } |
| 690 | 690 | ||
| 691 | /** | 691 | /** |
| 692 | * ubifs_get_free_space - return amount of free space. | 692 | * ubifs_get_free_space_nolock - return amount of free space. |
| 693 | * @c: UBIFS file-system description object | 693 | * @c: UBIFS file-system description object |
| 694 | * | 694 | * |
| 695 | * This function calculates amount of free space to report to user-space. | 695 | * This function calculates amount of free space to report to user-space. |
| @@ -704,16 +704,14 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) | |||
| 704 | * traditional file-systems, because they have way less overhead than UBIFS. | 704 | * traditional file-systems, because they have way less overhead than UBIFS. |
| 705 | * So, to keep users happy, UBIFS tries to take the overhead into account. | 705 | * So, to keep users happy, UBIFS tries to take the overhead into account. |
| 706 | */ | 706 | */ |
| 707 | long long ubifs_get_free_space(struct ubifs_info *c) | 707 | long long ubifs_get_free_space_nolock(struct ubifs_info *c) |
| 708 | { | 708 | { |
| 709 | int min_idx_lebs, rsvd_idx_lebs, lebs; | 709 | int rsvd_idx_lebs, lebs; |
| 710 | long long available, outstanding, free; | 710 | long long available, outstanding, free; |
| 711 | 711 | ||
| 712 | spin_lock(&c->space_lock); | 712 | ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); |
| 713 | min_idx_lebs = c->min_idx_lebs; | ||
| 714 | ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c)); | ||
| 715 | outstanding = c->budg_data_growth + c->budg_dd_growth; | 713 | outstanding = c->budg_data_growth + c->budg_dd_growth; |
| 716 | available = ubifs_calc_available(c, min_idx_lebs); | 714 | available = ubifs_calc_available(c, c->min_idx_lebs); |
| 717 | 715 | ||
| 718 | /* | 716 | /* |
| 719 | * When reporting free space to user-space, UBIFS guarantees that it is | 717 | * When reporting free space to user-space, UBIFS guarantees that it is |
| @@ -726,15 +724,14 @@ long long ubifs_get_free_space(struct ubifs_info *c) | |||
| 726 | * Note, the calculations below are similar to what we have in | 724 | * Note, the calculations below are similar to what we have in |
| 727 | * 'do_budget_space()', so refer there for comments. | 725 | * 'do_budget_space()', so refer there for comments. |
| 728 | */ | 726 | */ |
| 729 | if (min_idx_lebs > c->lst.idx_lebs) | 727 | if (c->min_idx_lebs > c->lst.idx_lebs) |
| 730 | rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; | 728 | rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; |
| 731 | else | 729 | else |
| 732 | rsvd_idx_lebs = 0; | 730 | rsvd_idx_lebs = 0; |
| 733 | lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - | 731 | lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - |
| 734 | c->lst.taken_empty_lebs; | 732 | c->lst.taken_empty_lebs; |
| 735 | lebs -= rsvd_idx_lebs; | 733 | lebs -= rsvd_idx_lebs; |
| 736 | available += lebs * (c->dark_wm - c->leb_overhead); | 734 | available += lebs * (c->dark_wm - c->leb_overhead); |
| 737 | spin_unlock(&c->space_lock); | ||
| 738 | 735 | ||
| 739 | if (available > outstanding) | 736 | if (available > outstanding) |
| 740 | free = ubifs_reported_space(c, available - outstanding); | 737 | free = ubifs_reported_space(c, available - outstanding); |
| @@ -742,3 +739,21 @@ long long ubifs_get_free_space(struct ubifs_info *c) | |||
| 742 | free = 0; | 739 | free = 0; |
| 743 | return free; | 740 | return free; |
| 744 | } | 741 | } |
| 742 | |||
| 743 | /** | ||
| 744 | * ubifs_get_free_space - return amount of free space. | ||
| 745 | * @c: UBIFS file-system description object | ||
| 746 | * | ||
| 747 | * This function calculates and retuns amount of free space to report to | ||
| 748 | * user-space. | ||
| 749 | */ | ||
| 750 | long long ubifs_get_free_space(struct ubifs_info *c) | ||
| 751 | { | ||
| 752 | long long free; | ||
| 753 | |||
| 754 | spin_lock(&c->space_lock); | ||
| 755 | free = ubifs_get_free_space_nolock(c); | ||
| 756 | spin_unlock(&c->space_lock); | ||
| 757 | |||
| 758 | return free; | ||
| 759 | } | ||
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 792c5a16c182..e975bd82f38b 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
| @@ -620,9 +620,11 @@ void dbg_dump_budg(struct ubifs_info *c) | |||
| 620 | c->dark_wm, c->dead_wm, c->max_idx_node_sz); | 620 | c->dark_wm, c->dead_wm, c->max_idx_node_sz); |
| 621 | printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n", | 621 | printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n", |
| 622 | c->gc_lnum, c->ihead_lnum); | 622 | c->gc_lnum, c->ihead_lnum); |
| 623 | for (i = 0; i < c->jhead_cnt; i++) | 623 | /* If we are in R/O mode, journal heads do not exist */ |
| 624 | printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", | 624 | if (c->jheads) |
| 625 | c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); | 625 | for (i = 0; i < c->jhead_cnt; i++) |
| 626 | printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", | ||
| 627 | c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); | ||
| 626 | for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { | 628 | for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { |
| 627 | bud = rb_entry(rb, struct ubifs_bud, rb); | 629 | bud = rb_entry(rb, struct ubifs_bud, rb); |
| 628 | printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); | 630 | printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); |
| @@ -637,10 +639,7 @@ void dbg_dump_budg(struct ubifs_info *c) | |||
| 637 | /* Print budgeting predictions */ | 639 | /* Print budgeting predictions */ |
| 638 | available = ubifs_calc_available(c, c->min_idx_lebs); | 640 | available = ubifs_calc_available(c, c->min_idx_lebs); |
| 639 | outstanding = c->budg_data_growth + c->budg_dd_growth; | 641 | outstanding = c->budg_data_growth + c->budg_dd_growth; |
| 640 | if (available > outstanding) | 642 | free = ubifs_get_free_space_nolock(c); |
| 641 | free = ubifs_reported_space(c, available - outstanding); | ||
| 642 | else | ||
| 643 | free = 0; | ||
| 644 | printk(KERN_DEBUG "Budgeting predictions:\n"); | 643 | printk(KERN_DEBUG "Budgeting predictions:\n"); |
| 645 | printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", | 644 | printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", |
| 646 | available, outstanding, free); | 645 | available, outstanding, free); |
| @@ -861,6 +860,65 @@ void dbg_dump_index(struct ubifs_info *c) | |||
| 861 | } | 860 | } |
| 862 | 861 | ||
| 863 | /** | 862 | /** |
| 863 | * dbg_save_space_info - save information about flash space. | ||
| 864 | * @c: UBIFS file-system description object | ||
| 865 | * | ||
| 866 | * This function saves information about UBIFS free space, dirty space, etc, in | ||
| 867 | * order to check it later. | ||
| 868 | */ | ||
| 869 | void dbg_save_space_info(struct ubifs_info *c) | ||
| 870 | { | ||
| 871 | struct ubifs_debug_info *d = c->dbg; | ||
| 872 | |||
| 873 | ubifs_get_lp_stats(c, &d->saved_lst); | ||
| 874 | |||
| 875 | spin_lock(&c->space_lock); | ||
| 876 | d->saved_free = ubifs_get_free_space_nolock(c); | ||
| 877 | spin_unlock(&c->space_lock); | ||
| 878 | } | ||
| 879 | |||
| 880 | /** | ||
| 881 | * dbg_check_space_info - check flash space information. | ||
| 882 | * @c: UBIFS file-system description object | ||
| 883 | * | ||
| 884 | * This function compares current flash space information with the information | ||
| 885 | * which was saved when the 'dbg_save_space_info()' function was called. | ||
| 886 | * Returns zero if the information has not changed, and %-EINVAL it it has | ||
| 887 | * changed. | ||
| 888 | */ | ||
| 889 | int dbg_check_space_info(struct ubifs_info *c) | ||
| 890 | { | ||
| 891 | struct ubifs_debug_info *d = c->dbg; | ||
| 892 | struct ubifs_lp_stats lst; | ||
| 893 | long long avail, free; | ||
| 894 | |||
| 895 | spin_lock(&c->space_lock); | ||
| 896 | avail = ubifs_calc_available(c, c->min_idx_lebs); | ||
| 897 | spin_unlock(&c->space_lock); | ||
| 898 | free = ubifs_get_free_space(c); | ||
| 899 | |||
| 900 | if (free != d->saved_free) { | ||
| 901 | ubifs_err("free space changed from %lld to %lld", | ||
| 902 | d->saved_free, free); | ||
| 903 | goto out; | ||
| 904 | } | ||
| 905 | |||
| 906 | return 0; | ||
| 907 | |||
| 908 | out: | ||
| 909 | ubifs_msg("saved lprops statistics dump"); | ||
| 910 | dbg_dump_lstats(&d->saved_lst); | ||
| 911 | ubifs_get_lp_stats(c, &lst); | ||
| 912 | ubifs_msg("current lprops statistics dump"); | ||
| 913 | dbg_dump_lstats(&d->saved_lst); | ||
| 914 | spin_lock(&c->space_lock); | ||
| 915 | dbg_dump_budg(c); | ||
| 916 | spin_unlock(&c->space_lock); | ||
| 917 | dump_stack(); | ||
| 918 | return -EINVAL; | ||
| 919 | } | ||
| 920 | |||
| 921 | /** | ||
| 864 | * dbg_check_synced_i_size - check synchronized inode size. | 922 | * dbg_check_synced_i_size - check synchronized inode size. |
| 865 | * @inode: inode to check | 923 | * @inode: inode to check |
| 866 | * | 924 | * |
| @@ -1349,7 +1407,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) | |||
| 1349 | * @c: UBIFS file-system description object | 1407 | * @c: UBIFS file-system description object |
| 1350 | * @leaf_cb: called for each leaf node | 1408 | * @leaf_cb: called for each leaf node |
| 1351 | * @znode_cb: called for each indexing node | 1409 | * @znode_cb: called for each indexing node |
| 1352 | * @priv: private date which is passed to callbacks | 1410 | * @priv: private data which is passed to callbacks |
| 1353 | * | 1411 | * |
| 1354 | * This function walks the UBIFS index and calls the @leaf_cb for each leaf | 1412 | * This function walks the UBIFS index and calls the @leaf_cb for each leaf |
| 1355 | * node and @znode_cb for each indexing node. Returns zero in case of success | 1413 | * node and @znode_cb for each indexing node. Returns zero in case of success |
| @@ -2409,7 +2467,7 @@ void ubifs_debugging_exit(struct ubifs_info *c) | |||
| 2409 | * Root directory for UBIFS stuff in debugfs. Contains sub-directories which | 2467 | * Root directory for UBIFS stuff in debugfs. Contains sub-directories which |
| 2410 | * contain the stuff specific to particular file-system mounts. | 2468 | * contain the stuff specific to particular file-system mounts. |
| 2411 | */ | 2469 | */ |
| 2412 | static struct dentry *debugfs_rootdir; | 2470 | static struct dentry *dfs_rootdir; |
| 2413 | 2471 | ||
| 2414 | /** | 2472 | /** |
| 2415 | * dbg_debugfs_init - initialize debugfs file-system. | 2473 | * dbg_debugfs_init - initialize debugfs file-system. |
| @@ -2421,9 +2479,9 @@ static struct dentry *debugfs_rootdir; | |||
| 2421 | */ | 2479 | */ |
| 2422 | int dbg_debugfs_init(void) | 2480 | int dbg_debugfs_init(void) |
| 2423 | { | 2481 | { |
| 2424 | debugfs_rootdir = debugfs_create_dir("ubifs", NULL); | 2482 | dfs_rootdir = debugfs_create_dir("ubifs", NULL); |
| 2425 | if (IS_ERR(debugfs_rootdir)) { | 2483 | if (IS_ERR(dfs_rootdir)) { |
| 2426 | int err = PTR_ERR(debugfs_rootdir); | 2484 | int err = PTR_ERR(dfs_rootdir); |
| 2427 | ubifs_err("cannot create \"ubifs\" debugfs directory, " | 2485 | ubifs_err("cannot create \"ubifs\" debugfs directory, " |
| 2428 | "error %d\n", err); | 2486 | "error %d\n", err); |
| 2429 | return err; | 2487 | return err; |
| @@ -2437,7 +2495,7 @@ int dbg_debugfs_init(void) | |||
| 2437 | */ | 2495 | */ |
| 2438 | void dbg_debugfs_exit(void) | 2496 | void dbg_debugfs_exit(void) |
| 2439 | { | 2497 | { |
| 2440 | debugfs_remove(debugfs_rootdir); | 2498 | debugfs_remove(dfs_rootdir); |
| 2441 | } | 2499 | } |
| 2442 | 2500 | ||
| 2443 | static int open_debugfs_file(struct inode *inode, struct file *file) | 2501 | static int open_debugfs_file(struct inode *inode, struct file *file) |
| @@ -2452,13 +2510,13 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf, | |||
| 2452 | struct ubifs_info *c = file->private_data; | 2510 | struct ubifs_info *c = file->private_data; |
| 2453 | struct ubifs_debug_info *d = c->dbg; | 2511 | struct ubifs_debug_info *d = c->dbg; |
| 2454 | 2512 | ||
| 2455 | if (file->f_path.dentry == d->dump_lprops) | 2513 | if (file->f_path.dentry == d->dfs_dump_lprops) |
| 2456 | dbg_dump_lprops(c); | 2514 | dbg_dump_lprops(c); |
| 2457 | else if (file->f_path.dentry == d->dump_budg) { | 2515 | else if (file->f_path.dentry == d->dfs_dump_budg) { |
| 2458 | spin_lock(&c->space_lock); | 2516 | spin_lock(&c->space_lock); |
| 2459 | dbg_dump_budg(c); | 2517 | dbg_dump_budg(c); |
| 2460 | spin_unlock(&c->space_lock); | 2518 | spin_unlock(&c->space_lock); |
| 2461 | } else if (file->f_path.dentry == d->dump_tnc) { | 2519 | } else if (file->f_path.dentry == d->dfs_dump_tnc) { |
| 2462 | mutex_lock(&c->tnc_mutex); | 2520 | mutex_lock(&c->tnc_mutex); |
| 2463 | dbg_dump_tnc(c); | 2521 | dbg_dump_tnc(c); |
| 2464 | mutex_unlock(&c->tnc_mutex); | 2522 | mutex_unlock(&c->tnc_mutex); |
| @@ -2469,7 +2527,7 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf, | |||
| 2469 | return count; | 2527 | return count; |
| 2470 | } | 2528 | } |
| 2471 | 2529 | ||
| 2472 | static const struct file_operations debugfs_fops = { | 2530 | static const struct file_operations dfs_fops = { |
| 2473 | .open = open_debugfs_file, | 2531 | .open = open_debugfs_file, |
| 2474 | .write = write_debugfs_file, | 2532 | .write = write_debugfs_file, |
| 2475 | .owner = THIS_MODULE, | 2533 | .owner = THIS_MODULE, |
| @@ -2494,36 +2552,32 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) | |||
| 2494 | struct dentry *dent; | 2552 | struct dentry *dent; |
| 2495 | struct ubifs_debug_info *d = c->dbg; | 2553 | struct ubifs_debug_info *d = c->dbg; |
| 2496 | 2554 | ||
| 2497 | sprintf(d->debugfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); | 2555 | sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); |
| 2498 | d->debugfs_dir = debugfs_create_dir(d->debugfs_dir_name, | 2556 | d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); |
| 2499 | debugfs_rootdir); | 2557 | if (IS_ERR(d->dfs_dir)) { |
| 2500 | if (IS_ERR(d->debugfs_dir)) { | 2558 | err = PTR_ERR(d->dfs_dir); |
| 2501 | err = PTR_ERR(d->debugfs_dir); | ||
| 2502 | ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", | 2559 | ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", |
| 2503 | d->debugfs_dir_name, err); | 2560 | d->dfs_dir_name, err); |
| 2504 | goto out; | 2561 | goto out; |
| 2505 | } | 2562 | } |
| 2506 | 2563 | ||
| 2507 | fname = "dump_lprops"; | 2564 | fname = "dump_lprops"; |
| 2508 | dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, | 2565 | dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); |
| 2509 | &debugfs_fops); | ||
| 2510 | if (IS_ERR(dent)) | 2566 | if (IS_ERR(dent)) |
| 2511 | goto out_remove; | 2567 | goto out_remove; |
| 2512 | d->dump_lprops = dent; | 2568 | d->dfs_dump_lprops = dent; |
| 2513 | 2569 | ||
| 2514 | fname = "dump_budg"; | 2570 | fname = "dump_budg"; |
| 2515 | dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, | 2571 | dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); |
| 2516 | &debugfs_fops); | ||
| 2517 | if (IS_ERR(dent)) | 2572 | if (IS_ERR(dent)) |
| 2518 | goto out_remove; | 2573 | goto out_remove; |
| 2519 | d->dump_budg = dent; | 2574 | d->dfs_dump_budg = dent; |
| 2520 | 2575 | ||
| 2521 | fname = "dump_tnc"; | 2576 | fname = "dump_tnc"; |
| 2522 | dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, | 2577 | dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); |
| 2523 | &debugfs_fops); | ||
| 2524 | if (IS_ERR(dent)) | 2578 | if (IS_ERR(dent)) |
| 2525 | goto out_remove; | 2579 | goto out_remove; |
| 2526 | d->dump_tnc = dent; | 2580 | d->dfs_dump_tnc = dent; |
| 2527 | 2581 | ||
| 2528 | return 0; | 2582 | return 0; |
| 2529 | 2583 | ||
| @@ -2531,7 +2585,7 @@ out_remove: | |||
| 2531 | err = PTR_ERR(dent); | 2585 | err = PTR_ERR(dent); |
| 2532 | ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", | 2586 | ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", |
| 2533 | fname, err); | 2587 | fname, err); |
| 2534 | debugfs_remove_recursive(d->debugfs_dir); | 2588 | debugfs_remove_recursive(d->dfs_dir); |
| 2535 | out: | 2589 | out: |
| 2536 | return err; | 2590 | return err; |
| 2537 | } | 2591 | } |
| @@ -2542,7 +2596,7 @@ out: | |||
| 2542 | */ | 2596 | */ |
| 2543 | void dbg_debugfs_exit_fs(struct ubifs_info *c) | 2597 | void dbg_debugfs_exit_fs(struct ubifs_info *c) |
| 2544 | { | 2598 | { |
| 2545 | debugfs_remove_recursive(c->dbg->debugfs_dir); | 2599 | debugfs_remove_recursive(c->dbg->dfs_dir); |
| 2546 | } | 2600 | } |
| 2547 | 2601 | ||
| 2548 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | 2602 | #endif /* CONFIG_UBIFS_FS_DEBUG */ |
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 9820d6999f7e..c1cd73b2e06e 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h | |||
| @@ -41,15 +41,17 @@ | |||
| 41 | * @chk_lpt_wastage: used by LPT tree size checker | 41 | * @chk_lpt_wastage: used by LPT tree size checker |
| 42 | * @chk_lpt_lebs: used by LPT tree size checker | 42 | * @chk_lpt_lebs: used by LPT tree size checker |
| 43 | * @new_nhead_offs: used by LPT tree size checker | 43 | * @new_nhead_offs: used by LPT tree size checker |
| 44 | * @new_ihead_lnum: used by debugging to check ihead_lnum | 44 | * @new_ihead_lnum: used by debugging to check @c->ihead_lnum |
| 45 | * @new_ihead_offs: used by debugging to check ihead_offs | 45 | * @new_ihead_offs: used by debugging to check @c->ihead_offs |
| 46 | * | 46 | * |
| 47 | * debugfs_dir_name: name of debugfs directory containing this file-system's | 47 | * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()') |
| 48 | * files | 48 | * @saved_free: saved free space (used by 'dbg_save_space_info()') |
| 49 | * debugfs_dir: direntry object of the file-system debugfs directory | 49 | * |
| 50 | * dump_lprops: "dump lprops" debugfs knob | 50 | * dfs_dir_name: name of debugfs directory containing this file-system's files |
| 51 | * dump_budg: "dump budgeting information" debugfs knob | 51 | * dfs_dir: direntry object of the file-system debugfs directory |
| 52 | * dump_tnc: "dump TNC" debugfs knob | 52 | * dfs_dump_lprops: "dump lprops" debugfs knob |
| 53 | * dfs_dump_budg: "dump budgeting information" debugfs knob | ||
| 54 | * dfs_dump_tnc: "dump TNC" debugfs knob | ||
| 53 | */ | 55 | */ |
| 54 | struct ubifs_debug_info { | 56 | struct ubifs_debug_info { |
| 55 | void *buf; | 57 | void *buf; |
| @@ -69,11 +71,14 @@ struct ubifs_debug_info { | |||
| 69 | int new_ihead_lnum; | 71 | int new_ihead_lnum; |
| 70 | int new_ihead_offs; | 72 | int new_ihead_offs; |
| 71 | 73 | ||
| 72 | char debugfs_dir_name[100]; | 74 | struct ubifs_lp_stats saved_lst; |
| 73 | struct dentry *debugfs_dir; | 75 | long long saved_free; |
| 74 | struct dentry *dump_lprops; | 76 | |
| 75 | struct dentry *dump_budg; | 77 | char dfs_dir_name[100]; |
| 76 | struct dentry *dump_tnc; | 78 | struct dentry *dfs_dir; |
| 79 | struct dentry *dfs_dump_lprops; | ||
| 80 | struct dentry *dfs_dump_budg; | ||
| 81 | struct dentry *dfs_dump_tnc; | ||
| 77 | }; | 82 | }; |
| 78 | 83 | ||
| 79 | #define ubifs_assert(expr) do { \ | 84 | #define ubifs_assert(expr) do { \ |
| @@ -297,7 +302,8 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, | |||
| 297 | dbg_znode_callback znode_cb, void *priv); | 302 | dbg_znode_callback znode_cb, void *priv); |
| 298 | 303 | ||
| 299 | /* Checking functions */ | 304 | /* Checking functions */ |
| 300 | 305 | void dbg_save_space_info(struct ubifs_info *c); | |
| 306 | int dbg_check_space_info(struct ubifs_info *c); | ||
| 301 | int dbg_check_lprops(struct ubifs_info *c); | 307 | int dbg_check_lprops(struct ubifs_info *c); |
| 302 | int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot); | 308 | int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot); |
| 303 | int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot); | 309 | int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot); |
| @@ -439,6 +445,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); | |||
| 439 | 445 | ||
| 440 | #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 | 446 | #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 |
| 441 | #define dbg_old_index_check_init(c, zroot) 0 | 447 | #define dbg_old_index_check_init(c, zroot) 0 |
| 448 | #define dbg_save_space_info(c) ({}) | ||
| 449 | #define dbg_check_space_info(c) 0 | ||
| 442 | #define dbg_check_old_index(c, zroot) 0 | 450 | #define dbg_check_old_index(c, zroot) 0 |
| 443 | #define dbg_check_cats(c) 0 | 451 | #define dbg_check_cats(c) 0 |
| 444 | #define dbg_check_ltab(c) 0 | 452 | #define dbg_check_ltab(c) 0 |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index f448ab1f9c38..f55d523c52bb 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
| @@ -482,30 +482,29 @@ static int ubifs_dir_release(struct inode *dir, struct file *file) | |||
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | /** | 484 | /** |
| 485 | * lock_2_inodes - lock two UBIFS inodes. | 485 | * lock_2_inodes - a wrapper for locking two UBIFS inodes. |
| 486 | * @inode1: first inode | 486 | * @inode1: first inode |
| 487 | * @inode2: second inode | 487 | * @inode2: second inode |
| 488 | * | ||
| 489 | * We do not implement any tricks to guarantee strict lock ordering, because | ||
| 490 | * VFS has already done it for us on the @i_mutex. So this is just a simple | ||
| 491 | * wrapper function. | ||
| 488 | */ | 492 | */ |
| 489 | static void lock_2_inodes(struct inode *inode1, struct inode *inode2) | 493 | static void lock_2_inodes(struct inode *inode1, struct inode *inode2) |
| 490 | { | 494 | { |
| 491 | if (inode1->i_ino < inode2->i_ino) { | 495 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); |
| 492 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_2); | 496 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); |
| 493 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_3); | ||
| 494 | } else { | ||
| 495 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); | ||
| 496 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_3); | ||
| 497 | } | ||
| 498 | } | 497 | } |
| 499 | 498 | ||
| 500 | /** | 499 | /** |
| 501 | * unlock_2_inodes - unlock two UBIFS inodes inodes. | 500 | * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes. |
| 502 | * @inode1: first inode | 501 | * @inode1: first inode |
| 503 | * @inode2: second inode | 502 | * @inode2: second inode |
| 504 | */ | 503 | */ |
| 505 | static void unlock_2_inodes(struct inode *inode1, struct inode *inode2) | 504 | static void unlock_2_inodes(struct inode *inode1, struct inode *inode2) |
| 506 | { | 505 | { |
| 507 | mutex_unlock(&ubifs_inode(inode1)->ui_mutex); | ||
| 508 | mutex_unlock(&ubifs_inode(inode2)->ui_mutex); | 506 | mutex_unlock(&ubifs_inode(inode2)->ui_mutex); |
| 507 | mutex_unlock(&ubifs_inode(inode1)->ui_mutex); | ||
| 509 | } | 508 | } |
| 510 | 509 | ||
| 511 | static int ubifs_link(struct dentry *old_dentry, struct inode *dir, | 510 | static int ubifs_link(struct dentry *old_dentry, struct inode *dir, |
| @@ -527,6 +526,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 527 | dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu", | 526 | dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu", |
| 528 | dentry->d_name.len, dentry->d_name.name, inode->i_ino, | 527 | dentry->d_name.len, dentry->d_name.name, inode->i_ino, |
| 529 | inode->i_nlink, dir->i_ino); | 528 | inode->i_nlink, dir->i_ino); |
| 529 | ubifs_assert(mutex_is_locked(&dir->i_mutex)); | ||
| 530 | ubifs_assert(mutex_is_locked(&inode->i_mutex)); | ||
| 530 | err = dbg_check_synced_i_size(inode); | 531 | err = dbg_check_synced_i_size(inode); |
| 531 | if (err) | 532 | if (err) |
| 532 | return err; | 533 | return err; |
| @@ -580,6 +581,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) | |||
| 580 | dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu", | 581 | dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu", |
| 581 | dentry->d_name.len, dentry->d_name.name, inode->i_ino, | 582 | dentry->d_name.len, dentry->d_name.name, inode->i_ino, |
| 582 | inode->i_nlink, dir->i_ino); | 583 | inode->i_nlink, dir->i_ino); |
| 584 | ubifs_assert(mutex_is_locked(&dir->i_mutex)); | ||
| 585 | ubifs_assert(mutex_is_locked(&inode->i_mutex)); | ||
| 583 | err = dbg_check_synced_i_size(inode); | 586 | err = dbg_check_synced_i_size(inode); |
| 584 | if (err) | 587 | if (err) |
| 585 | return err; | 588 | return err; |
| @@ -667,7 +670,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 667 | 670 | ||
| 668 | dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len, | 671 | dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len, |
| 669 | dentry->d_name.name, inode->i_ino, dir->i_ino); | 672 | dentry->d_name.name, inode->i_ino, dir->i_ino); |
| 670 | 673 | ubifs_assert(mutex_is_locked(&dir->i_mutex)); | |
| 674 | ubifs_assert(mutex_is_locked(&inode->i_mutex)); | ||
| 671 | err = check_dir_empty(c, dentry->d_inode); | 675 | err = check_dir_empty(c, dentry->d_inode); |
| 672 | if (err) | 676 | if (err) |
| 673 | return err; | 677 | return err; |
| @@ -922,59 +926,30 @@ out_budg: | |||
| 922 | } | 926 | } |
| 923 | 927 | ||
| 924 | /** | 928 | /** |
| 925 | * lock_3_inodes - lock three UBIFS inodes for rename. | 929 | * lock_3_inodes - a wrapper for locking three UBIFS inodes. |
| 926 | * @inode1: first inode | 930 | * @inode1: first inode |
| 927 | * @inode2: second inode | 931 | * @inode2: second inode |
| 928 | * @inode3: third inode | 932 | * @inode3: third inode |
| 929 | * | 933 | * |
| 930 | * For 'ubifs_rename()', @inode1 may be the same as @inode2 whereas @inode3 may | 934 | * This function is used for 'ubifs_rename()' and @inode1 may be the same as |
| 931 | * be null. | 935 | * @inode2 whereas @inode3 may be %NULL. |
| 936 | * | ||
| 937 | * We do not implement any tricks to guarantee strict lock ordering, because | ||
| 938 | * VFS has already done it for us on the @i_mutex. So this is just a simple | ||
| 939 | * wrapper function. | ||
| 932 | */ | 940 | */ |
| 933 | static void lock_3_inodes(struct inode *inode1, struct inode *inode2, | 941 | static void lock_3_inodes(struct inode *inode1, struct inode *inode2, |
| 934 | struct inode *inode3) | 942 | struct inode *inode3) |
| 935 | { | 943 | { |
| 936 | struct inode *i1, *i2, *i3; | 944 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); |
| 937 | 945 | if (inode2 != inode1) | |
| 938 | if (!inode3) { | 946 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); |
| 939 | if (inode1 != inode2) { | 947 | if (inode3) |
| 940 | lock_2_inodes(inode1, inode2); | 948 | mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3); |
| 941 | return; | ||
| 942 | } | ||
| 943 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); | ||
| 944 | return; | ||
| 945 | } | ||
| 946 | |||
| 947 | if (inode1 == inode2) { | ||
| 948 | lock_2_inodes(inode1, inode3); | ||
| 949 | return; | ||
| 950 | } | ||
| 951 | |||
| 952 | /* 3 different inodes */ | ||
| 953 | if (inode1 < inode2) { | ||
| 954 | i3 = inode2; | ||
| 955 | if (inode1 < inode3) { | ||
| 956 | i1 = inode1; | ||
| 957 | i2 = inode3; | ||
| 958 | } else { | ||
| 959 | i1 = inode3; | ||
| 960 | i2 = inode1; | ||
| 961 | } | ||
| 962 | } else { | ||
| 963 | i3 = inode1; | ||
| 964 | if (inode2 < inode3) { | ||
| 965 | i1 = inode2; | ||
| 966 | i2 = inode3; | ||
| 967 | } else { | ||
| 968 | i1 = inode3; | ||
| 969 | i2 = inode2; | ||
| 970 | } | ||
| 971 | } | ||
| 972 | mutex_lock_nested(&ubifs_inode(i1)->ui_mutex, WB_MUTEX_1); | ||
| 973 | lock_2_inodes(i2, i3); | ||
| 974 | } | 949 | } |
| 975 | 950 | ||
| 976 | /** | 951 | /** |
| 977 | * unlock_3_inodes - unlock three UBIFS inodes for rename. | 952 | * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename. |
| 978 | * @inode1: first inode | 953 | * @inode1: first inode |
| 979 | * @inode2: second inode | 954 | * @inode2: second inode |
| 980 | * @inode3: third inode | 955 | * @inode3: third inode |
| @@ -982,11 +957,11 @@ static void lock_3_inodes(struct inode *inode1, struct inode *inode2, | |||
| 982 | static void unlock_3_inodes(struct inode *inode1, struct inode *inode2, | 957 | static void unlock_3_inodes(struct inode *inode1, struct inode *inode2, |
| 983 | struct inode *inode3) | 958 | struct inode *inode3) |
| 984 | { | 959 | { |
| 985 | mutex_unlock(&ubifs_inode(inode1)->ui_mutex); | ||
| 986 | if (inode1 != inode2) | ||
| 987 | mutex_unlock(&ubifs_inode(inode2)->ui_mutex); | ||
| 988 | if (inode3) | 960 | if (inode3) |
| 989 | mutex_unlock(&ubifs_inode(inode3)->ui_mutex); | 961 | mutex_unlock(&ubifs_inode(inode3)->ui_mutex); |
| 962 | if (inode1 != inode2) | ||
| 963 | mutex_unlock(&ubifs_inode(inode2)->ui_mutex); | ||
| 964 | mutex_unlock(&ubifs_inode(inode1)->ui_mutex); | ||
| 990 | } | 965 | } |
| 991 | 966 | ||
| 992 | static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | 967 | static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, |
| @@ -1020,6 +995,11 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1020 | "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name, | 995 | "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name, |
| 1021 | old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len, | 996 | old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len, |
| 1022 | new_dentry->d_name.name, new_dir->i_ino); | 997 | new_dentry->d_name.name, new_dir->i_ino); |
| 998 | ubifs_assert(mutex_is_locked(&old_dir->i_mutex)); | ||
| 999 | ubifs_assert(mutex_is_locked(&new_dir->i_mutex)); | ||
| 1000 | if (unlink) | ||
| 1001 | ubifs_assert(mutex_is_locked(&new_inode->i_mutex)); | ||
| 1002 | |||
| 1023 | 1003 | ||
| 1024 | if (unlink && is_dir) { | 1004 | if (unlink && is_dir) { |
| 1025 | err = check_dir_empty(c, new_inode); | 1005 | err = check_dir_empty(c, new_inode); |
| @@ -1199,7 +1179,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
| 1199 | return 0; | 1179 | return 0; |
| 1200 | } | 1180 | } |
| 1201 | 1181 | ||
| 1202 | struct inode_operations ubifs_dir_inode_operations = { | 1182 | const struct inode_operations ubifs_dir_inode_operations = { |
| 1203 | .lookup = ubifs_lookup, | 1183 | .lookup = ubifs_lookup, |
| 1204 | .create = ubifs_create, | 1184 | .create = ubifs_create, |
| 1205 | .link = ubifs_link, | 1185 | .link = ubifs_link, |
| @@ -1219,7 +1199,7 @@ struct inode_operations ubifs_dir_inode_operations = { | |||
| 1219 | #endif | 1199 | #endif |
| 1220 | }; | 1200 | }; |
| 1221 | 1201 | ||
| 1222 | struct file_operations ubifs_dir_operations = { | 1202 | const struct file_operations ubifs_dir_operations = { |
| 1223 | .llseek = ubifs_dir_llseek, | 1203 | .llseek = ubifs_dir_llseek, |
| 1224 | .release = ubifs_dir_release, | 1204 | .release = ubifs_dir_release, |
| 1225 | .read = generic_read_dir, | 1205 | .read = generic_read_dir, |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index bf37374567fa..93b6de51f261 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -432,7 +432,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, | |||
| 432 | int uninitialized_var(err), appending = !!(pos + len > inode->i_size); | 432 | int uninitialized_var(err), appending = !!(pos + len > inode->i_size); |
| 433 | struct page *page; | 433 | struct page *page; |
| 434 | 434 | ||
| 435 | |||
| 436 | ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); | 435 | ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); |
| 437 | 436 | ||
| 438 | if (unlikely(c->ro_media)) | 437 | if (unlikely(c->ro_media)) |
| @@ -1541,7 +1540,7 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 1541 | return 0; | 1540 | return 0; |
| 1542 | } | 1541 | } |
| 1543 | 1542 | ||
| 1544 | struct address_space_operations ubifs_file_address_operations = { | 1543 | const struct address_space_operations ubifs_file_address_operations = { |
| 1545 | .readpage = ubifs_readpage, | 1544 | .readpage = ubifs_readpage, |
| 1546 | .writepage = ubifs_writepage, | 1545 | .writepage = ubifs_writepage, |
| 1547 | .write_begin = ubifs_write_begin, | 1546 | .write_begin = ubifs_write_begin, |
| @@ -1551,7 +1550,7 @@ struct address_space_operations ubifs_file_address_operations = { | |||
| 1551 | .releasepage = ubifs_releasepage, | 1550 | .releasepage = ubifs_releasepage, |
| 1552 | }; | 1551 | }; |
| 1553 | 1552 | ||
| 1554 | struct inode_operations ubifs_file_inode_operations = { | 1553 | const struct inode_operations ubifs_file_inode_operations = { |
| 1555 | .setattr = ubifs_setattr, | 1554 | .setattr = ubifs_setattr, |
| 1556 | .getattr = ubifs_getattr, | 1555 | .getattr = ubifs_getattr, |
| 1557 | #ifdef CONFIG_UBIFS_FS_XATTR | 1556 | #ifdef CONFIG_UBIFS_FS_XATTR |
| @@ -1562,14 +1561,14 @@ struct inode_operations ubifs_file_inode_operations = { | |||
| 1562 | #endif | 1561 | #endif |
| 1563 | }; | 1562 | }; |
| 1564 | 1563 | ||
| 1565 | struct inode_operations ubifs_symlink_inode_operations = { | 1564 | const struct inode_operations ubifs_symlink_inode_operations = { |
| 1566 | .readlink = generic_readlink, | 1565 | .readlink = generic_readlink, |
| 1567 | .follow_link = ubifs_follow_link, | 1566 | .follow_link = ubifs_follow_link, |
| 1568 | .setattr = ubifs_setattr, | 1567 | .setattr = ubifs_setattr, |
| 1569 | .getattr = ubifs_getattr, | 1568 | .getattr = ubifs_getattr, |
| 1570 | }; | 1569 | }; |
| 1571 | 1570 | ||
| 1572 | struct file_operations ubifs_file_operations = { | 1571 | const struct file_operations ubifs_file_operations = { |
| 1573 | .llseek = generic_file_llseek, | 1572 | .llseek = generic_file_llseek, |
| 1574 | .read = do_sync_read, | 1573 | .read = do_sync_read, |
| 1575 | .write = do_sync_write, | 1574 | .write = do_sync_write, |
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 9832f9abe28e..a711d33b3d3e 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c | |||
| @@ -31,6 +31,26 @@ | |||
| 31 | * to be reused. Garbage collection will cause the number of dirty index nodes | 31 | * to be reused. Garbage collection will cause the number of dirty index nodes |
| 32 | * to grow, however sufficient space is reserved for the index to ensure the | 32 | * to grow, however sufficient space is reserved for the index to ensure the |
| 33 | * commit will never run out of space. | 33 | * commit will never run out of space. |
| 34 | * | ||
| 35 | * Notes about dead watermark. At current UBIFS implementation we assume that | ||
| 36 | * LEBs which have less than @c->dead_wm bytes of free + dirty space are full | ||
| 37 | * and not worth garbage-collecting. The dead watermark is one min. I/O unit | ||
| 38 | * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS | ||
| 39 | * Garbage Collector has to synchronize the GC head's write buffer before | ||
| 40 | * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can | ||
| 41 | * actually reclaim even very small pieces of dirty space by garbage collecting | ||
| 42 | * enough dirty LEBs, but we do not bother doing this at this implementation. | ||
| 43 | * | ||
| 44 | * Notes about dark watermark. The results of GC work depends on how big are | ||
| 45 | * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, | ||
| 46 | * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would | ||
| 47 | * have to waste large pieces of free space at the end of LEB B, because nodes | ||
| 48 | * from LEB A would not fit. And the worst situation is when all nodes are of | ||
| 49 | * maximum size. So dark watermark is the amount of free + dirty space in LEB | ||
| 50 | * which are guaranteed to be reclaimable. If LEB has less space, the GC migh | ||
| 51 | * be unable to reclaim it. So, LEBs with free + dirty greater than dark | ||
| 52 | * watermark are "good" LEBs from GC's point of few. The other LEBs are not so | ||
| 53 | * good, and GC takes extra care when moving them. | ||
| 34 | */ | 54 | */ |
| 35 | 55 | ||
| 36 | #include <linux/pagemap.h> | 56 | #include <linux/pagemap.h> |
| @@ -381,7 +401,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) | |||
| 381 | 401 | ||
| 382 | /* | 402 | /* |
| 383 | * Don't release the LEB until after the next commit, because | 403 | * Don't release the LEB until after the next commit, because |
| 384 | * it may contain date which is needed for recovery. So | 404 | * it may contain data which is needed for recovery. So |
| 385 | * although we freed this LEB, it will become usable only after | 405 | * although we freed this LEB, it will become usable only after |
| 386 | * the commit. | 406 | * the commit. |
| 387 | */ | 407 | */ |
| @@ -810,8 +830,9 @@ out: | |||
| 810 | * ubifs_destroy_idx_gc - destroy idx_gc list. | 830 | * ubifs_destroy_idx_gc - destroy idx_gc list. |
| 811 | * @c: UBIFS file-system description object | 831 | * @c: UBIFS file-system description object |
| 812 | * | 832 | * |
| 813 | * This function destroys the idx_gc list. It is called when unmounting or | 833 | * This function destroys the @c->idx_gc list. It is called when unmounting |
| 814 | * remounting read-only so locks are not needed. | 834 | * so locks are not needed. Returns zero in case of success and a negative |
| 835 | * error code in case of failure. | ||
| 815 | */ | 836 | */ |
| 816 | void ubifs_destroy_idx_gc(struct ubifs_info *c) | 837 | void ubifs_destroy_idx_gc(struct ubifs_info *c) |
| 817 | { | 838 | { |
| @@ -824,7 +845,6 @@ void ubifs_destroy_idx_gc(struct ubifs_info *c) | |||
| 824 | list_del(&idx_gc->list); | 845 | list_del(&idx_gc->list); |
| 825 | kfree(idx_gc); | 846 | kfree(idx_gc); |
| 826 | } | 847 | } |
| 827 | |||
| 828 | } | 848 | } |
| 829 | 849 | ||
| 830 | /** | 850 | /** |
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 01682713af69..e8e632a1dcdf 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | * would have been wasted for padding to the nearest minimal I/O unit boundary. | 29 | * would have been wasted for padding to the nearest minimal I/O unit boundary. |
| 30 | * Instead, data first goes to the write-buffer and is flushed when the | 30 | * Instead, data first goes to the write-buffer and is flushed when the |
| 31 | * buffer is full or when it is not used for some time (by timer). This is | 31 | * buffer is full or when it is not used for some time (by timer). This is |
| 32 | * similarto the mechanism is used by JFFS2. | 32 | * similar to the mechanism is used by JFFS2. |
| 33 | * | 33 | * |
| 34 | * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by | 34 | * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by |
| 35 | * mutexes defined inside these objects. Since sometimes upper-level code | 35 | * mutexes defined inside these objects. Since sometimes upper-level code |
| @@ -75,7 +75,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) | |||
| 75 | * @lnum: logical eraseblock number | 75 | * @lnum: logical eraseblock number |
| 76 | * @offs: offset within the logical eraseblock | 76 | * @offs: offset within the logical eraseblock |
| 77 | * @quiet: print no messages | 77 | * @quiet: print no messages |
| 78 | * @chk_crc: indicates whether to always check the CRC | 78 | * @must_chk_crc: indicates whether to always check the CRC |
| 79 | * | 79 | * |
| 80 | * This function checks node magic number and CRC checksum. This function also | 80 | * This function checks node magic number and CRC checksum. This function also |
| 81 | * validates node length to prevent UBIFS from becoming crazy when an attacker | 81 | * validates node length to prevent UBIFS from becoming crazy when an attacker |
| @@ -83,11 +83,17 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) | |||
| 83 | * node length in the common header could cause UBIFS to read memory outside of | 83 | * node length in the common header could cause UBIFS to read memory outside of |
| 84 | * allocated buffer when checking the CRC checksum. | 84 | * allocated buffer when checking the CRC checksum. |
| 85 | * | 85 | * |
| 86 | * This function returns zero in case of success %-EUCLEAN in case of bad CRC | 86 | * This function may skip data nodes CRC checking if @c->no_chk_data_crc is |
| 87 | * or magic. | 87 | * true, which is controlled by corresponding UBIFS mount option. However, if |
| 88 | * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is | ||
| 89 | * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is | ||
| 90 | * ignored and CRC is checked. | ||
| 91 | * | ||
| 92 | * This function returns zero in case of success and %-EUCLEAN in case of bad | ||
| 93 | * CRC or magic. | ||
| 88 | */ | 94 | */ |
| 89 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, | 95 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
| 90 | int offs, int quiet, int chk_crc) | 96 | int offs, int quiet, int must_chk_crc) |
| 91 | { | 97 | { |
| 92 | int err = -EINVAL, type, node_len; | 98 | int err = -EINVAL, type, node_len; |
| 93 | uint32_t crc, node_crc, magic; | 99 | uint32_t crc, node_crc, magic; |
| @@ -123,9 +129,9 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, | |||
| 123 | node_len > c->ranges[type].max_len) | 129 | node_len > c->ranges[type].max_len) |
| 124 | goto out_len; | 130 | goto out_len; |
| 125 | 131 | ||
| 126 | if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc) | 132 | if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc && |
| 127 | if (c->no_chk_data_crc) | 133 | c->no_chk_data_crc) |
| 128 | return 0; | 134 | return 0; |
| 129 | 135 | ||
| 130 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); | 136 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); |
| 131 | node_crc = le32_to_cpu(ch->crc); | 137 | node_crc = le32_to_cpu(ch->crc); |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 9b7c54e0cd2a..a11ca0958a23 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
| @@ -208,7 +208,7 @@ again: | |||
| 208 | offs = 0; | 208 | offs = 0; |
| 209 | 209 | ||
| 210 | out: | 210 | out: |
| 211 | err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, UBI_SHORTTERM); | 211 | err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype); |
| 212 | if (err) | 212 | if (err) |
| 213 | goto out_unlock; | 213 | goto out_unlock; |
| 214 | 214 | ||
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index dfd2bcece27a..4cdd284dea56 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c | |||
| @@ -635,10 +635,10 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, | |||
| 635 | * @c: UBIFS file-system description object | 635 | * @c: UBIFS file-system description object |
| 636 | * @st: return statistics | 636 | * @st: return statistics |
| 637 | */ | 637 | */ |
| 638 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *st) | 638 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) |
| 639 | { | 639 | { |
| 640 | spin_lock(&c->space_lock); | 640 | spin_lock(&c->space_lock); |
| 641 | memcpy(st, &c->lst, sizeof(struct ubifs_lp_stats)); | 641 | memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats)); |
| 642 | spin_unlock(&c->space_lock); | 642 | spin_unlock(&c->space_lock); |
| 643 | } | 643 | } |
| 644 | 644 | ||
| @@ -678,6 +678,9 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, | |||
| 678 | 678 | ||
| 679 | out: | 679 | out: |
| 680 | ubifs_release_lprops(c); | 680 | ubifs_release_lprops(c); |
| 681 | if (err) | ||
| 682 | ubifs_err("cannot change properties of LEB %d, error %d", | ||
| 683 | lnum, err); | ||
| 681 | return err; | 684 | return err; |
| 682 | } | 685 | } |
| 683 | 686 | ||
| @@ -714,6 +717,9 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, | |||
| 714 | 717 | ||
| 715 | out: | 718 | out: |
| 716 | ubifs_release_lprops(c); | 719 | ubifs_release_lprops(c); |
| 720 | if (err) | ||
| 721 | ubifs_err("cannot update properties of LEB %d, error %d", | ||
| 722 | lnum, err); | ||
| 717 | return err; | 723 | return err; |
| 718 | } | 724 | } |
| 719 | 725 | ||
| @@ -737,6 +743,8 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp) | |||
| 737 | lpp = ubifs_lpt_lookup(c, lnum); | 743 | lpp = ubifs_lpt_lookup(c, lnum); |
| 738 | if (IS_ERR(lpp)) { | 744 | if (IS_ERR(lpp)) { |
| 739 | err = PTR_ERR(lpp); | 745 | err = PTR_ERR(lpp); |
| 746 | ubifs_err("cannot read properties of LEB %d, error %d", | ||
| 747 | lnum, err); | ||
| 740 | goto out; | 748 | goto out; |
| 741 | } | 749 | } |
| 742 | 750 | ||
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 96ca95707175..3216a1f277f8 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
| @@ -556,23 +556,23 @@ no_space: | |||
| 556 | } | 556 | } |
| 557 | 557 | ||
| 558 | /** | 558 | /** |
| 559 | * next_pnode - find next pnode. | 559 | * next_pnode_to_dirty - find next pnode to dirty. |
| 560 | * @c: UBIFS file-system description object | 560 | * @c: UBIFS file-system description object |
| 561 | * @pnode: pnode | 561 | * @pnode: pnode |
| 562 | * | 562 | * |
| 563 | * This function returns the next pnode or %NULL if there are no more pnodes. | 563 | * This function returns the next pnode to dirty or %NULL if there are no more |
| 564 | * pnodes. Note that pnodes that have never been written (lnum == 0) are | ||
| 565 | * skipped. | ||
| 564 | */ | 566 | */ |
| 565 | static struct ubifs_pnode *next_pnode(struct ubifs_info *c, | 567 | static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, |
| 566 | struct ubifs_pnode *pnode) | 568 | struct ubifs_pnode *pnode) |
| 567 | { | 569 | { |
| 568 | struct ubifs_nnode *nnode; | 570 | struct ubifs_nnode *nnode; |
| 569 | int iip; | 571 | int iip; |
| 570 | 572 | ||
| 571 | /* Try to go right */ | 573 | /* Try to go right */ |
| 572 | nnode = pnode->parent; | 574 | nnode = pnode->parent; |
| 573 | iip = pnode->iip + 1; | 575 | for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) { |
| 574 | if (iip < UBIFS_LPT_FANOUT) { | ||
| 575 | /* We assume here that LEB zero is never an LPT LEB */ | ||
| 576 | if (nnode->nbranch[iip].lnum) | 576 | if (nnode->nbranch[iip].lnum) |
| 577 | return ubifs_get_pnode(c, nnode, iip); | 577 | return ubifs_get_pnode(c, nnode, iip); |
| 578 | } | 578 | } |
| @@ -583,8 +583,11 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c, | |||
| 583 | nnode = nnode->parent; | 583 | nnode = nnode->parent; |
| 584 | if (!nnode) | 584 | if (!nnode) |
| 585 | return NULL; | 585 | return NULL; |
| 586 | /* We assume here that LEB zero is never an LPT LEB */ | 586 | for (; iip < UBIFS_LPT_FANOUT; iip++) { |
| 587 | } while (iip >= UBIFS_LPT_FANOUT || !nnode->nbranch[iip].lnum); | 587 | if (nnode->nbranch[iip].lnum) |
| 588 | break; | ||
| 589 | } | ||
| 590 | } while (iip >= UBIFS_LPT_FANOUT); | ||
| 588 | 591 | ||
| 589 | /* Go right */ | 592 | /* Go right */ |
| 590 | nnode = ubifs_get_nnode(c, nnode, iip); | 593 | nnode = ubifs_get_nnode(c, nnode, iip); |
| @@ -593,12 +596,29 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c, | |||
| 593 | 596 | ||
| 594 | /* Go down to level 1 */ | 597 | /* Go down to level 1 */ |
| 595 | while (nnode->level > 1) { | 598 | while (nnode->level > 1) { |
| 596 | nnode = ubifs_get_nnode(c, nnode, 0); | 599 | for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) { |
| 600 | if (nnode->nbranch[iip].lnum) | ||
| 601 | break; | ||
| 602 | } | ||
| 603 | if (iip >= UBIFS_LPT_FANOUT) { | ||
| 604 | /* | ||
| 605 | * Should not happen, but we need to keep going | ||
| 606 | * if it does. | ||
| 607 | */ | ||
| 608 | iip = 0; | ||
| 609 | } | ||
| 610 | nnode = ubifs_get_nnode(c, nnode, iip); | ||
| 597 | if (IS_ERR(nnode)) | 611 | if (IS_ERR(nnode)) |
| 598 | return (void *)nnode; | 612 | return (void *)nnode; |
| 599 | } | 613 | } |
| 600 | 614 | ||
| 601 | return ubifs_get_pnode(c, nnode, 0); | 615 | for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) |
| 616 | if (nnode->nbranch[iip].lnum) | ||
| 617 | break; | ||
| 618 | if (iip >= UBIFS_LPT_FANOUT) | ||
| 619 | /* Should not happen, but we need to keep going if it does */ | ||
| 620 | iip = 0; | ||
| 621 | return ubifs_get_pnode(c, nnode, iip); | ||
| 602 | } | 622 | } |
| 603 | 623 | ||
| 604 | /** | 624 | /** |
| @@ -688,7 +708,7 @@ static int make_tree_dirty(struct ubifs_info *c) | |||
| 688 | pnode = pnode_lookup(c, 0); | 708 | pnode = pnode_lookup(c, 0); |
| 689 | while (pnode) { | 709 | while (pnode) { |
| 690 | do_make_pnode_dirty(c, pnode); | 710 | do_make_pnode_dirty(c, pnode); |
| 691 | pnode = next_pnode(c, pnode); | 711 | pnode = next_pnode_to_dirty(c, pnode); |
| 692 | if (IS_ERR(pnode)) | 712 | if (IS_ERR(pnode)) |
| 693 | return PTR_ERR(pnode); | 713 | return PTR_ERR(pnode); |
| 694 | } | 714 | } |
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 71d5493bf565..a88f33801b98 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c | |||
| @@ -354,7 +354,7 @@ int ubifs_write_master(struct ubifs_info *c) | |||
| 354 | int err, lnum, offs, len; | 354 | int err, lnum, offs, len; |
| 355 | 355 | ||
| 356 | if (c->ro_media) | 356 | if (c->ro_media) |
| 357 | return -EINVAL; | 357 | return -EROFS; |
| 358 | 358 | ||
| 359 | lnum = UBIFS_MST_LNUM; | 359 | lnum = UBIFS_MST_LNUM; |
| 360 | offs = c->mst_offs + c->mst_node_alsz; | 360 | offs = c->mst_offs + c->mst_node_alsz; |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 9e6f403f170e..152a7b34a141 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | * Orphans are accumulated in a rb-tree. When an inode's link count drops to | 46 | * Orphans are accumulated in a rb-tree. When an inode's link count drops to |
| 47 | * zero, the inode number is added to the rb-tree. It is removed from the tree | 47 | * zero, the inode number is added to the rb-tree. It is removed from the tree |
| 48 | * when the inode is deleted. Any new orphans that are in the orphan tree when | 48 | * when the inode is deleted. Any new orphans that are in the orphan tree when |
| 49 | * the commit is run, are written to the orphan area in 1 or more orph nodes. | 49 | * the commit is run, are written to the orphan area in 1 or more orphan nodes. |
| 50 | * If the orphan area is full, it is consolidated to make space. There is | 50 | * If the orphan area is full, it is consolidated to make space. There is |
| 51 | * always enough space because validation prevents the user from creating more | 51 | * always enough space because validation prevents the user from creating more |
| 52 | * than the maximum number of orphans allowed. | 52 | * than the maximum number of orphans allowed. |
| @@ -231,7 +231,7 @@ static int tot_avail_orphs(struct ubifs_info *c) | |||
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | /** | 233 | /** |
| 234 | * do_write_orph_node - write a node | 234 | * do_write_orph_node - write a node to the orphan head. |
| 235 | * @c: UBIFS file-system description object | 235 | * @c: UBIFS file-system description object |
| 236 | * @len: length of node | 236 | * @len: length of node |
| 237 | * @atomic: write atomically | 237 | * @atomic: write atomically |
| @@ -264,11 +264,11 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic) | |||
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | /** | 266 | /** |
| 267 | * write_orph_node - write an orph node | 267 | * write_orph_node - write an orphan node. |
| 268 | * @c: UBIFS file-system description object | 268 | * @c: UBIFS file-system description object |
| 269 | * @atomic: write atomically | 269 | * @atomic: write atomically |
| 270 | * | 270 | * |
| 271 | * This function builds an orph node from the cnext list and writes it to the | 271 | * This function builds an orphan node from the cnext list and writes it to the |
| 272 | * orphan head. On success, %0 is returned, otherwise a negative error code | 272 | * orphan head. On success, %0 is returned, otherwise a negative error code |
| 273 | * is returned. | 273 | * is returned. |
| 274 | */ | 274 | */ |
| @@ -326,11 +326,11 @@ static int write_orph_node(struct ubifs_info *c, int atomic) | |||
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | /** | 328 | /** |
| 329 | * write_orph_nodes - write orph nodes until there are no more to commit | 329 | * write_orph_nodes - write orphan nodes until there are no more to commit. |
| 330 | * @c: UBIFS file-system description object | 330 | * @c: UBIFS file-system description object |
| 331 | * @atomic: write atomically | 331 | * @atomic: write atomically |
| 332 | * | 332 | * |
| 333 | * This function writes orph nodes for all the orphans to commit. On success, | 333 | * This function writes orphan nodes for all the orphans to commit. On success, |
| 334 | * %0 is returned, otherwise a negative error code is returned. | 334 | * %0 is returned, otherwise a negative error code is returned. |
| 335 | */ | 335 | */ |
| 336 | static int write_orph_nodes(struct ubifs_info *c, int atomic) | 336 | static int write_orph_nodes(struct ubifs_info *c, int atomic) |
| @@ -478,14 +478,14 @@ int ubifs_orphan_end_commit(struct ubifs_info *c) | |||
| 478 | } | 478 | } |
| 479 | 479 | ||
| 480 | /** | 480 | /** |
| 481 | * clear_orphans - erase all LEBs used for orphans. | 481 | * ubifs_clear_orphans - erase all LEBs used for orphans. |
| 482 | * @c: UBIFS file-system description object | 482 | * @c: UBIFS file-system description object |
| 483 | * | 483 | * |
| 484 | * If recovery is not required, then the orphans from the previous session | 484 | * If recovery is not required, then the orphans from the previous session |
| 485 | * are not needed. This function locates the LEBs used to record | 485 | * are not needed. This function locates the LEBs used to record |
| 486 | * orphans, and un-maps them. | 486 | * orphans, and un-maps them. |
| 487 | */ | 487 | */ |
| 488 | static int clear_orphans(struct ubifs_info *c) | 488 | int ubifs_clear_orphans(struct ubifs_info *c) |
| 489 | { | 489 | { |
| 490 | int lnum, err; | 490 | int lnum, err; |
| 491 | 491 | ||
| @@ -547,9 +547,9 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) | |||
| 547 | * do_kill_orphans - remove orphan inodes from the index. | 547 | * do_kill_orphans - remove orphan inodes from the index. |
| 548 | * @c: UBIFS file-system description object | 548 | * @c: UBIFS file-system description object |
| 549 | * @sleb: scanned LEB | 549 | * @sleb: scanned LEB |
| 550 | * @last_cmt_no: cmt_no of last orph node read is passed and returned here | 550 | * @last_cmt_no: cmt_no of last orphan node read is passed and returned here |
| 551 | * @outofdate: whether the LEB is out of date is returned here | 551 | * @outofdate: whether the LEB is out of date is returned here |
| 552 | * @last_flagged: whether the end orph node is encountered | 552 | * @last_flagged: whether the end orphan node is encountered |
| 553 | * | 553 | * |
| 554 | * This function is a helper to the 'kill_orphans()' function. It goes through | 554 | * This function is a helper to the 'kill_orphans()' function. It goes through |
| 555 | * every orphan node in a LEB and for every inode number recorded, removes | 555 | * every orphan node in a LEB and for every inode number recorded, removes |
| @@ -580,8 +580,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
| 580 | /* | 580 | /* |
| 581 | * The commit number on the master node may be less, because | 581 | * The commit number on the master node may be less, because |
| 582 | * of a failed commit. If there are several failed commits in a | 582 | * of a failed commit. If there are several failed commits in a |
| 583 | * row, the commit number written on orph nodes will continue to | 583 | * row, the commit number written on orphan nodes will continue |
| 584 | * increase (because the commit number is adjusted here) even | 584 | * to increase (because the commit number is adjusted here) even |
| 585 | * though the commit number on the master node stays the same | 585 | * though the commit number on the master node stays the same |
| 586 | * because the master node has not been re-written. | 586 | * because the master node has not been re-written. |
| 587 | */ | 587 | */ |
| @@ -589,9 +589,9 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
| 589 | c->cmt_no = cmt_no; | 589 | c->cmt_no = cmt_no; |
| 590 | if (cmt_no < *last_cmt_no && *last_flagged) { | 590 | if (cmt_no < *last_cmt_no && *last_flagged) { |
| 591 | /* | 591 | /* |
| 592 | * The last orph node had a higher commit number and was | 592 | * The last orphan node had a higher commit number and |
| 593 | * flagged as the last written for that commit number. | 593 | * was flagged as the last written for that commit |
| 594 | * That makes this orph node, out of date. | 594 | * number. That makes this orphan node, out of date. |
| 595 | */ | 595 | */ |
| 596 | if (!first) { | 596 | if (!first) { |
| 597 | ubifs_err("out of order commit number %llu in " | 597 | ubifs_err("out of order commit number %llu in " |
| @@ -658,10 +658,10 @@ static int kill_orphans(struct ubifs_info *c) | |||
| 658 | /* | 658 | /* |
| 659 | * Orph nodes always start at c->orph_first and are written to each | 659 | * Orph nodes always start at c->orph_first and are written to each |
| 660 | * successive LEB in turn. Generally unused LEBs will have been unmapped | 660 | * successive LEB in turn. Generally unused LEBs will have been unmapped |
| 661 | * but may contain out of date orph nodes if the unmap didn't go | 661 | * but may contain out of date orphan nodes if the unmap didn't go |
| 662 | * through. In addition, the last orph node written for each commit is | 662 | * through. In addition, the last orphan node written for each commit is |
| 663 | * marked (top bit of orph->cmt_no is set to 1). It is possible that | 663 | * marked (top bit of orph->cmt_no is set to 1). It is possible that |
| 664 | * there are orph nodes from the next commit (i.e. the commit did not | 664 | * there are orphan nodes from the next commit (i.e. the commit did not |
| 665 | * complete successfully). In that case, no orphans will have been lost | 665 | * complete successfully). In that case, no orphans will have been lost |
| 666 | * due to the way that orphans are written, and any orphans added will | 666 | * due to the way that orphans are written, and any orphans added will |
| 667 | * be valid orphans anyway and so can be deleted. | 667 | * be valid orphans anyway and so can be deleted. |
| @@ -718,7 +718,7 @@ int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only) | |||
| 718 | if (unclean) | 718 | if (unclean) |
| 719 | err = kill_orphans(c); | 719 | err = kill_orphans(c); |
| 720 | else if (!read_only) | 720 | else if (!read_only) |
| 721 | err = clear_orphans(c); | 721 | err = ubifs_clear_orphans(c); |
| 722 | 722 | ||
| 723 | return err; | 723 | return err; |
| 724 | } | 724 | } |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 89556ee72518..1182b66a5491 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -397,6 +397,7 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 397 | buf->f_namelen = UBIFS_MAX_NLEN; | 397 | buf->f_namelen = UBIFS_MAX_NLEN; |
| 398 | buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); | 398 | buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); |
| 399 | buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); | 399 | buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); |
| 400 | ubifs_assert(buf->f_bfree <= c->block_cnt); | ||
| 400 | return 0; | 401 | return 0; |
| 401 | } | 402 | } |
| 402 | 403 | ||
| @@ -432,33 +433,24 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) | |||
| 432 | int i, err; | 433 | int i, err; |
| 433 | struct ubifs_info *c = sb->s_fs_info; | 434 | struct ubifs_info *c = sb->s_fs_info; |
| 434 | struct writeback_control wbc = { | 435 | struct writeback_control wbc = { |
| 435 | .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, | 436 | .sync_mode = WB_SYNC_ALL, |
| 436 | .range_start = 0, | 437 | .range_start = 0, |
| 437 | .range_end = LLONG_MAX, | 438 | .range_end = LLONG_MAX, |
| 438 | .nr_to_write = LONG_MAX, | 439 | .nr_to_write = LONG_MAX, |
| 439 | }; | 440 | }; |
| 440 | 441 | ||
| 441 | /* | 442 | /* |
| 442 | * Note by akpm about WB_SYNC_NONE used above: zero @wait is just an | 443 | * Zero @wait is just an advisory thing to help the file system shove |
| 443 | * advisory thing to help the file system shove lots of data into the | 444 | * lots of data into the queues, and there will be the second |
| 444 | * queues. If some gets missed then it'll be picked up on the second | ||
| 445 | * '->sync_fs()' call, with non-zero @wait. | 445 | * '->sync_fs()' call, with non-zero @wait. |
| 446 | */ | 446 | */ |
| 447 | if (!wait) | ||
| 448 | return 0; | ||
| 447 | 449 | ||
| 448 | if (sb->s_flags & MS_RDONLY) | 450 | if (sb->s_flags & MS_RDONLY) |
| 449 | return 0; | 451 | return 0; |
| 450 | 452 | ||
| 451 | /* | 453 | /* |
| 452 | * Synchronize write buffers, because 'ubifs_run_commit()' does not | ||
| 453 | * do this if it waits for an already running commit. | ||
| 454 | */ | ||
| 455 | for (i = 0; i < c->jhead_cnt; i++) { | ||
| 456 | err = ubifs_wbuf_sync(&c->jheads[i].wbuf); | ||
| 457 | if (err) | ||
| 458 | return err; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* | ||
| 462 | * VFS calls '->sync_fs()' before synchronizing all dirty inodes and | 454 | * VFS calls '->sync_fs()' before synchronizing all dirty inodes and |
| 463 | * pages, so synchronize them first, then commit the journal. Strictly | 455 | * pages, so synchronize them first, then commit the journal. Strictly |
| 464 | * speaking, it is not necessary to commit the journal here, | 456 | * speaking, it is not necessary to commit the journal here, |
| @@ -469,6 +461,16 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) | |||
| 469 | */ | 461 | */ |
| 470 | generic_sync_sb_inodes(sb, &wbc); | 462 | generic_sync_sb_inodes(sb, &wbc); |
| 471 | 463 | ||
| 464 | /* | ||
| 465 | * Synchronize write buffers, because 'ubifs_run_commit()' does not | ||
| 466 | * do this if it waits for an already running commit. | ||
| 467 | */ | ||
| 468 | for (i = 0; i < c->jhead_cnt; i++) { | ||
| 469 | err = ubifs_wbuf_sync(&c->jheads[i].wbuf); | ||
| 470 | if (err) | ||
| 471 | return err; | ||
| 472 | } | ||
| 473 | |||
| 472 | err = ubifs_run_commit(c); | 474 | err = ubifs_run_commit(c); |
| 473 | if (err) | 475 | if (err) |
| 474 | return err; | 476 | return err; |
| @@ -572,15 +574,8 @@ static int init_constants_early(struct ubifs_info *c) | |||
| 572 | c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX; | 574 | c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX; |
| 573 | 575 | ||
| 574 | /* | 576 | /* |
| 575 | * Initialize dead and dark LEB space watermarks. | 577 | * Initialize dead and dark LEB space watermarks. See gc.c for comments |
| 576 | * | 578 | * about these values. |
| 577 | * Dead space is the space which cannot be used. Its watermark is | ||
| 578 | * equivalent to min. I/O unit or minimum node size if it is greater | ||
| 579 | * then min. I/O unit. | ||
| 580 | * | ||
| 581 | * Dark space is the space which might be used, or might not, depending | ||
| 582 | * on which node should be written to the LEB. Its watermark is | ||
| 583 | * equivalent to maximum UBIFS node size. | ||
| 584 | */ | 579 | */ |
| 585 | c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); | 580 | c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); |
| 586 | c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); | 581 | c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); |
| @@ -741,12 +736,12 @@ static void init_constants_master(struct ubifs_info *c) | |||
| 741 | * take_gc_lnum - reserve GC LEB. | 736 | * take_gc_lnum - reserve GC LEB. |
| 742 | * @c: UBIFS file-system description object | 737 | * @c: UBIFS file-system description object |
| 743 | * | 738 | * |
| 744 | * This function ensures that the LEB reserved for garbage collection is | 739 | * This function ensures that the LEB reserved for garbage collection is marked |
| 745 | * unmapped and is marked as "taken" in lprops. We also have to set free space | 740 | * as "taken" in lprops. We also have to set free space to LEB size and dirty |
| 746 | * to LEB size and dirty space to zero, because lprops may contain out-of-date | 741 | * space to zero, because lprops may contain out-of-date information if the |
| 747 | * information if the file-system was un-mounted before it has been committed. | 742 | * file-system was un-mounted before it has been committed. This function |
| 748 | * This function returns zero in case of success and a negative error code in | 743 | * returns zero in case of success and a negative error code in case of |
| 749 | * case of failure. | 744 | * failure. |
| 750 | */ | 745 | */ |
| 751 | static int take_gc_lnum(struct ubifs_info *c) | 746 | static int take_gc_lnum(struct ubifs_info *c) |
| 752 | { | 747 | { |
| @@ -757,10 +752,6 @@ static int take_gc_lnum(struct ubifs_info *c) | |||
| 757 | return -EINVAL; | 752 | return -EINVAL; |
| 758 | } | 753 | } |
| 759 | 754 | ||
| 760 | err = ubifs_leb_unmap(c, c->gc_lnum); | ||
| 761 | if (err) | ||
| 762 | return err; | ||
| 763 | |||
| 764 | /* And we have to tell lprops that this LEB is taken */ | 755 | /* And we have to tell lprops that this LEB is taken */ |
| 765 | err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0, | 756 | err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0, |
| 766 | LPROPS_TAKEN, 0, 0); | 757 | LPROPS_TAKEN, 0, 0); |
| @@ -966,13 +957,16 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, | |||
| 966 | 957 | ||
| 967 | token = match_token(p, tokens, args); | 958 | token = match_token(p, tokens, args); |
| 968 | switch (token) { | 959 | switch (token) { |
| 960 | /* | ||
| 961 | * %Opt_fast_unmount and %Opt_norm_unmount options are ignored. | ||
| 962 | * We accepte them in order to be backware-compatible. But this | ||
| 963 | * should be removed at some point. | ||
| 964 | */ | ||
| 969 | case Opt_fast_unmount: | 965 | case Opt_fast_unmount: |
| 970 | c->mount_opts.unmount_mode = 2; | 966 | c->mount_opts.unmount_mode = 2; |
| 971 | c->fast_unmount = 1; | ||
| 972 | break; | 967 | break; |
| 973 | case Opt_norm_unmount: | 968 | case Opt_norm_unmount: |
| 974 | c->mount_opts.unmount_mode = 1; | 969 | c->mount_opts.unmount_mode = 1; |
| 975 | c->fast_unmount = 0; | ||
| 976 | break; | 970 | break; |
| 977 | case Opt_bulk_read: | 971 | case Opt_bulk_read: |
| 978 | c->mount_opts.bulk_read = 2; | 972 | c->mount_opts.bulk_read = 2; |
| @@ -1094,12 +1088,7 @@ static int check_free_space(struct ubifs_info *c) | |||
| 1094 | ubifs_err("insufficient free space to mount in read/write mode"); | 1088 | ubifs_err("insufficient free space to mount in read/write mode"); |
| 1095 | dbg_dump_budg(c); | 1089 | dbg_dump_budg(c); |
| 1096 | dbg_dump_lprops(c); | 1090 | dbg_dump_lprops(c); |
| 1097 | /* | 1091 | return -ENOSPC; |
| 1098 | * We return %-EINVAL instead of %-ENOSPC because it seems to | ||
| 1099 | * be the closest error code mentioned in the mount function | ||
| 1100 | * documentation. | ||
| 1101 | */ | ||
| 1102 | return -EINVAL; | ||
| 1103 | } | 1092 | } |
| 1104 | return 0; | 1093 | return 0; |
| 1105 | } | 1094 | } |
| @@ -1286,10 +1275,19 @@ static int mount_ubifs(struct ubifs_info *c) | |||
| 1286 | if (err) | 1275 | if (err) |
| 1287 | goto out_orphans; | 1276 | goto out_orphans; |
| 1288 | err = ubifs_rcvry_gc_commit(c); | 1277 | err = ubifs_rcvry_gc_commit(c); |
| 1289 | } else | 1278 | } else { |
| 1290 | err = take_gc_lnum(c); | 1279 | err = take_gc_lnum(c); |
| 1291 | if (err) | 1280 | if (err) |
| 1292 | goto out_orphans; | 1281 | goto out_orphans; |
| 1282 | |||
| 1283 | /* | ||
| 1284 | * GC LEB may contain garbage if there was an unclean | ||
| 1285 | * reboot, and it should be un-mapped. | ||
| 1286 | */ | ||
| 1287 | err = ubifs_leb_unmap(c, c->gc_lnum); | ||
| 1288 | if (err) | ||
| 1289 | return err; | ||
| 1290 | } | ||
| 1293 | 1291 | ||
| 1294 | err = dbg_check_lprops(c); | 1292 | err = dbg_check_lprops(c); |
| 1295 | if (err) | 1293 | if (err) |
| @@ -1298,6 +1296,16 @@ static int mount_ubifs(struct ubifs_info *c) | |||
| 1298 | err = ubifs_recover_size(c); | 1296 | err = ubifs_recover_size(c); |
| 1299 | if (err) | 1297 | if (err) |
| 1300 | goto out_orphans; | 1298 | goto out_orphans; |
| 1299 | } else { | ||
| 1300 | /* | ||
| 1301 | * Even if we mount read-only, we have to set space in GC LEB | ||
| 1302 | * to proper value because this affects UBIFS free space | ||
| 1303 | * reporting. We do not want to have a situation when | ||
| 1304 | * re-mounting from R/O to R/W changes amount of free space. | ||
| 1305 | */ | ||
| 1306 | err = take_gc_lnum(c); | ||
| 1307 | if (err) | ||
| 1308 | goto out_orphans; | ||
| 1301 | } | 1309 | } |
| 1302 | 1310 | ||
| 1303 | spin_lock(&ubifs_infos_lock); | 1311 | spin_lock(&ubifs_infos_lock); |
| @@ -1310,14 +1318,17 @@ static int mount_ubifs(struct ubifs_info *c) | |||
| 1310 | else { | 1318 | else { |
| 1311 | c->need_recovery = 0; | 1319 | c->need_recovery = 0; |
| 1312 | ubifs_msg("recovery completed"); | 1320 | ubifs_msg("recovery completed"); |
| 1321 | /* GC LEB has to be empty and taken at this point */ | ||
| 1322 | ubifs_assert(c->lst.taken_empty_lebs == 1); | ||
| 1313 | } | 1323 | } |
| 1314 | } | 1324 | } else |
| 1325 | ubifs_assert(c->lst.taken_empty_lebs == 1); | ||
| 1315 | 1326 | ||
| 1316 | err = dbg_debugfs_init_fs(c); | 1327 | err = dbg_check_filesystem(c); |
| 1317 | if (err) | 1328 | if (err) |
| 1318 | goto out_infos; | 1329 | goto out_infos; |
| 1319 | 1330 | ||
| 1320 | err = dbg_check_filesystem(c); | 1331 | err = dbg_debugfs_init_fs(c); |
| 1321 | if (err) | 1332 | if (err) |
| 1322 | goto out_infos; | 1333 | goto out_infos; |
| 1323 | 1334 | ||
| @@ -1351,7 +1362,6 @@ static int mount_ubifs(struct ubifs_info *c) | |||
| 1351 | c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7], | 1362 | c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7], |
| 1352 | c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11], | 1363 | c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11], |
| 1353 | c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]); | 1364 | c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]); |
| 1354 | dbg_msg("fast unmount: %d", c->fast_unmount); | ||
| 1355 | dbg_msg("big_lpt %d", c->big_lpt); | 1365 | dbg_msg("big_lpt %d", c->big_lpt); |
| 1356 | dbg_msg("log LEBs: %d (%d - %d)", | 1366 | dbg_msg("log LEBs: %d (%d - %d)", |
| 1357 | c->log_lebs, UBIFS_LOG_LNUM, c->log_last); | 1367 | c->log_lebs, UBIFS_LOG_LNUM, c->log_last); |
| @@ -1475,10 +1485,8 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
| 1475 | { | 1485 | { |
| 1476 | int err, lnum; | 1486 | int err, lnum; |
| 1477 | 1487 | ||
| 1478 | if (c->ro_media) | ||
| 1479 | return -EINVAL; | ||
| 1480 | |||
| 1481 | mutex_lock(&c->umount_mutex); | 1488 | mutex_lock(&c->umount_mutex); |
| 1489 | dbg_save_space_info(c); | ||
| 1482 | c->remounting_rw = 1; | 1490 | c->remounting_rw = 1; |
| 1483 | c->always_chk_crc = 1; | 1491 | c->always_chk_crc = 1; |
| 1484 | 1492 | ||
| @@ -1514,6 +1522,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
| 1514 | err = ubifs_recover_inl_heads(c, c->sbuf); | 1522 | err = ubifs_recover_inl_heads(c, c->sbuf); |
| 1515 | if (err) | 1523 | if (err) |
| 1516 | goto out; | 1524 | goto out; |
| 1525 | } else { | ||
| 1526 | /* A readonly mount is not allowed to have orphans */ | ||
| 1527 | ubifs_assert(c->tot_orphans == 0); | ||
| 1528 | err = ubifs_clear_orphans(c); | ||
| 1529 | if (err) | ||
| 1530 | goto out; | ||
| 1517 | } | 1531 | } |
| 1518 | 1532 | ||
| 1519 | if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) { | 1533 | if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) { |
| @@ -1569,7 +1583,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
| 1569 | if (c->need_recovery) | 1583 | if (c->need_recovery) |
| 1570 | err = ubifs_rcvry_gc_commit(c); | 1584 | err = ubifs_rcvry_gc_commit(c); |
| 1571 | else | 1585 | else |
| 1572 | err = take_gc_lnum(c); | 1586 | err = ubifs_leb_unmap(c, c->gc_lnum); |
| 1573 | if (err) | 1587 | if (err) |
| 1574 | goto out; | 1588 | goto out; |
| 1575 | 1589 | ||
| @@ -1582,8 +1596,9 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
| 1582 | c->vfs_sb->s_flags &= ~MS_RDONLY; | 1596 | c->vfs_sb->s_flags &= ~MS_RDONLY; |
| 1583 | c->remounting_rw = 0; | 1597 | c->remounting_rw = 0; |
| 1584 | c->always_chk_crc = 0; | 1598 | c->always_chk_crc = 0; |
| 1599 | err = dbg_check_space_info(c); | ||
| 1585 | mutex_unlock(&c->umount_mutex); | 1600 | mutex_unlock(&c->umount_mutex); |
| 1586 | return 0; | 1601 | return err; |
| 1587 | 1602 | ||
| 1588 | out: | 1603 | out: |
| 1589 | vfree(c->orph_buf); | 1604 | vfree(c->orph_buf); |
| @@ -1603,43 +1618,18 @@ out: | |||
| 1603 | } | 1618 | } |
| 1604 | 1619 | ||
| 1605 | /** | 1620 | /** |
| 1606 | * commit_on_unmount - commit the journal when un-mounting. | ||
| 1607 | * @c: UBIFS file-system description object | ||
| 1608 | * | ||
| 1609 | * This function is called during un-mounting and re-mounting, and it commits | ||
| 1610 | * the journal unless the "fast unmount" mode is enabled. | ||
| 1611 | */ | ||
| 1612 | static void commit_on_unmount(struct ubifs_info *c) | ||
| 1613 | { | ||
| 1614 | struct super_block *sb = c->vfs_sb; | ||
| 1615 | long long bud_bytes; | ||
| 1616 | |||
| 1617 | /* | ||
| 1618 | * This function is called before the background thread is stopped, so | ||
| 1619 | * we may race with ongoing commit, which means we have to take | ||
| 1620 | * @c->bud_lock to access @c->bud_bytes. | ||
| 1621 | */ | ||
| 1622 | spin_lock(&c->buds_lock); | ||
| 1623 | bud_bytes = c->bud_bytes; | ||
| 1624 | spin_unlock(&c->buds_lock); | ||
| 1625 | |||
| 1626 | if (!c->fast_unmount && !(sb->s_flags & MS_RDONLY) && bud_bytes) | ||
| 1627 | ubifs_run_commit(c); | ||
| 1628 | } | ||
| 1629 | |||
| 1630 | /** | ||
| 1631 | * ubifs_remount_ro - re-mount in read-only mode. | 1621 | * ubifs_remount_ro - re-mount in read-only mode. |
| 1632 | * @c: UBIFS file-system description object | 1622 | * @c: UBIFS file-system description object |
| 1633 | * | 1623 | * |
| 1634 | * We rely on VFS to have stopped writing. Possibly the background thread could | 1624 | * We assume VFS has stopped writing. Possibly the background thread could be |
| 1635 | * be running a commit, however kthread_stop will wait in that case. | 1625 | * running a commit, however kthread_stop will wait in that case. |
| 1636 | */ | 1626 | */ |
| 1637 | static void ubifs_remount_ro(struct ubifs_info *c) | 1627 | static void ubifs_remount_ro(struct ubifs_info *c) |
| 1638 | { | 1628 | { |
| 1639 | int i, err; | 1629 | int i, err; |
| 1640 | 1630 | ||
| 1641 | ubifs_assert(!c->need_recovery); | 1631 | ubifs_assert(!c->need_recovery); |
| 1642 | commit_on_unmount(c); | 1632 | ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); |
| 1643 | 1633 | ||
| 1644 | mutex_lock(&c->umount_mutex); | 1634 | mutex_lock(&c->umount_mutex); |
| 1645 | if (c->bgt) { | 1635 | if (c->bgt) { |
| @@ -1647,27 +1637,29 @@ static void ubifs_remount_ro(struct ubifs_info *c) | |||
| 1647 | c->bgt = NULL; | 1637 | c->bgt = NULL; |
| 1648 | } | 1638 | } |
| 1649 | 1639 | ||
| 1640 | dbg_save_space_info(c); | ||
| 1641 | |||
| 1650 | for (i = 0; i < c->jhead_cnt; i++) { | 1642 | for (i = 0; i < c->jhead_cnt; i++) { |
| 1651 | ubifs_wbuf_sync(&c->jheads[i].wbuf); | 1643 | ubifs_wbuf_sync(&c->jheads[i].wbuf); |
| 1652 | del_timer_sync(&c->jheads[i].wbuf.timer); | 1644 | del_timer_sync(&c->jheads[i].wbuf.timer); |
| 1653 | } | 1645 | } |
| 1654 | 1646 | ||
| 1655 | if (!c->ro_media) { | 1647 | c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); |
| 1656 | c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); | 1648 | c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); |
| 1657 | c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); | 1649 | c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); |
| 1658 | c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); | 1650 | err = ubifs_write_master(c); |
| 1659 | err = ubifs_write_master(c); | 1651 | if (err) |
| 1660 | if (err) | 1652 | ubifs_ro_mode(c, err); |
| 1661 | ubifs_ro_mode(c, err); | ||
| 1662 | } | ||
| 1663 | 1653 | ||
| 1664 | ubifs_destroy_idx_gc(c); | ||
| 1665 | free_wbufs(c); | 1654 | free_wbufs(c); |
| 1666 | vfree(c->orph_buf); | 1655 | vfree(c->orph_buf); |
| 1667 | c->orph_buf = NULL; | 1656 | c->orph_buf = NULL; |
| 1668 | vfree(c->ileb_buf); | 1657 | vfree(c->ileb_buf); |
| 1669 | c->ileb_buf = NULL; | 1658 | c->ileb_buf = NULL; |
| 1670 | ubifs_lpt_free(c, 1); | 1659 | ubifs_lpt_free(c, 1); |
| 1660 | err = dbg_check_space_info(c); | ||
| 1661 | if (err) | ||
| 1662 | ubifs_ro_mode(c, err); | ||
| 1671 | mutex_unlock(&c->umount_mutex); | 1663 | mutex_unlock(&c->umount_mutex); |
| 1672 | } | 1664 | } |
| 1673 | 1665 | ||
| @@ -1760,11 +1752,20 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1760 | } | 1752 | } |
| 1761 | 1753 | ||
| 1762 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { | 1754 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { |
| 1755 | if (c->ro_media) { | ||
| 1756 | ubifs_msg("cannot re-mount due to prior errors"); | ||
| 1757 | return -EROFS; | ||
| 1758 | } | ||
| 1763 | err = ubifs_remount_rw(c); | 1759 | err = ubifs_remount_rw(c); |
| 1764 | if (err) | 1760 | if (err) |
| 1765 | return err; | 1761 | return err; |
| 1766 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) | 1762 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) { |
| 1763 | if (c->ro_media) { | ||
| 1764 | ubifs_msg("cannot re-mount due to prior errors"); | ||
| 1765 | return -EROFS; | ||
| 1766 | } | ||
| 1767 | ubifs_remount_ro(c); | 1767 | ubifs_remount_ro(c); |
| 1768 | } | ||
| 1768 | 1769 | ||
| 1769 | if (c->bulk_read == 1) | 1770 | if (c->bulk_read == 1) |
| 1770 | bu_init(c); | 1771 | bu_init(c); |
| @@ -1774,10 +1775,11 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1774 | c->bu.buf = NULL; | 1775 | c->bu.buf = NULL; |
| 1775 | } | 1776 | } |
| 1776 | 1777 | ||
| 1778 | ubifs_assert(c->lst.taken_empty_lebs == 1); | ||
| 1777 | return 0; | 1779 | return 0; |
| 1778 | } | 1780 | } |
| 1779 | 1781 | ||
| 1780 | struct super_operations ubifs_super_operations = { | 1782 | const struct super_operations ubifs_super_operations = { |
| 1781 | .alloc_inode = ubifs_alloc_inode, | 1783 | .alloc_inode = ubifs_alloc_inode, |
| 1782 | .destroy_inode = ubifs_destroy_inode, | 1784 | .destroy_inode = ubifs_destroy_inode, |
| 1783 | .put_super = ubifs_put_super, | 1785 | .put_super = ubifs_put_super, |
| @@ -2044,15 +2046,6 @@ out_close: | |||
| 2044 | 2046 | ||
| 2045 | static void ubifs_kill_sb(struct super_block *sb) | 2047 | static void ubifs_kill_sb(struct super_block *sb) |
| 2046 | { | 2048 | { |
| 2047 | struct ubifs_info *c = sb->s_fs_info; | ||
| 2048 | |||
| 2049 | /* | ||
| 2050 | * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()' | ||
| 2051 | * in order to be outside BKL. | ||
| 2052 | */ | ||
| 2053 | if (sb->s_root) | ||
| 2054 | commit_on_unmount(c); | ||
| 2055 | /* The un-mount routine is actually done in put_super() */ | ||
| 2056 | generic_shutdown_super(sb); | 2049 | generic_shutdown_super(sb); |
| 2057 | } | 2050 | } |
| 2058 | 2051 | ||
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index f7e36f545527..fa28a84c6a1b 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
| @@ -443,6 +443,11 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
| 443 | * This function performs that same function as ubifs_read_node except that | 443 | * This function performs that same function as ubifs_read_node except that |
| 444 | * it does not require that there is actually a node present and instead | 444 | * it does not require that there is actually a node present and instead |
| 445 | * the return code indicates if a node was read. | 445 | * the return code indicates if a node was read. |
| 446 | * | ||
| 447 | * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc | ||
| 448 | * is true (it is controlled by corresponding mount option). However, if | ||
| 449 | * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always | ||
| 450 | * checked. | ||
| 446 | */ | 451 | */ |
| 447 | static int try_read_node(const struct ubifs_info *c, void *buf, int type, | 452 | static int try_read_node(const struct ubifs_info *c, void *buf, int type, |
| 448 | int len, int lnum, int offs) | 453 | int len, int lnum, int offs) |
| @@ -470,9 +475,8 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type, | |||
| 470 | if (node_len != len) | 475 | if (node_len != len) |
| 471 | return 0; | 476 | return 0; |
| 472 | 477 | ||
| 473 | if (type == UBIFS_DATA_NODE && !c->always_chk_crc) | 478 | if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc) |
| 474 | if (c->no_chk_data_crc) | 479 | return 1; |
| 475 | return 0; | ||
| 476 | 480 | ||
| 477 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); | 481 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); |
| 478 | node_crc = le32_to_cpu(ch->crc); | 482 | node_crc = le32_to_cpu(ch->crc); |
| @@ -1506,7 +1510,7 @@ out: | |||
| 1506 | * | 1510 | * |
| 1507 | * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function | 1511 | * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function |
| 1508 | * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares | 1512 | * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares |
| 1509 | * maxumum possible amount of nodes for bulk-read. | 1513 | * maximum possible amount of nodes for bulk-read. |
| 1510 | */ | 1514 | */ |
| 1511 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) | 1515 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) |
| 1512 | { | 1516 | { |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index fc2a4cc66d03..039a68bee29a 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
| @@ -426,9 +426,9 @@ struct ubifs_unclean_leb { | |||
| 426 | * LEB properties flags. | 426 | * LEB properties flags. |
| 427 | * | 427 | * |
| 428 | * LPROPS_UNCAT: not categorized | 428 | * LPROPS_UNCAT: not categorized |
| 429 | * LPROPS_DIRTY: dirty > 0, not index | 429 | * LPROPS_DIRTY: dirty > free, dirty >= @c->dead_wm, not index |
| 430 | * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index | 430 | * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index |
| 431 | * LPROPS_FREE: free > 0, not empty, not index | 431 | * LPROPS_FREE: free > 0, dirty < @c->dead_wm, not empty, not index |
| 432 | * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs | 432 | * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs |
| 433 | * LPROPS_EMPTY: LEB is empty, not taken | 433 | * LPROPS_EMPTY: LEB is empty, not taken |
| 434 | * LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken | 434 | * LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken |
| @@ -961,7 +961,6 @@ struct ubifs_debug_info; | |||
| 961 | * @cs_lock: commit state lock | 961 | * @cs_lock: commit state lock |
| 962 | * @cmt_wq: wait queue to sleep on if the log is full and a commit is running | 962 | * @cmt_wq: wait queue to sleep on if the log is full and a commit is running |
| 963 | * | 963 | * |
| 964 | * @fast_unmount: do not run journal commit before un-mounting | ||
| 965 | * @big_lpt: flag that LPT is too big to write whole during commit | 964 | * @big_lpt: flag that LPT is too big to write whole during commit |
| 966 | * @no_chk_data_crc: do not check CRCs when reading data nodes (except during | 965 | * @no_chk_data_crc: do not check CRCs when reading data nodes (except during |
| 967 | * recovery) | 966 | * recovery) |
| @@ -1202,7 +1201,6 @@ struct ubifs_info { | |||
| 1202 | spinlock_t cs_lock; | 1201 | spinlock_t cs_lock; |
| 1203 | wait_queue_head_t cmt_wq; | 1202 | wait_queue_head_t cmt_wq; |
| 1204 | 1203 | ||
| 1205 | unsigned int fast_unmount:1; | ||
| 1206 | unsigned int big_lpt:1; | 1204 | unsigned int big_lpt:1; |
| 1207 | unsigned int no_chk_data_crc:1; | 1205 | unsigned int no_chk_data_crc:1; |
| 1208 | unsigned int bulk_read:1; | 1206 | unsigned int bulk_read:1; |
| @@ -1405,13 +1403,13 @@ extern struct list_head ubifs_infos; | |||
| 1405 | extern spinlock_t ubifs_infos_lock; | 1403 | extern spinlock_t ubifs_infos_lock; |
| 1406 | extern atomic_long_t ubifs_clean_zn_cnt; | 1404 | extern atomic_long_t ubifs_clean_zn_cnt; |
| 1407 | extern struct kmem_cache *ubifs_inode_slab; | 1405 | extern struct kmem_cache *ubifs_inode_slab; |
| 1408 | extern struct super_operations ubifs_super_operations; | 1406 | extern const struct super_operations ubifs_super_operations; |
| 1409 | extern struct address_space_operations ubifs_file_address_operations; | 1407 | extern const struct address_space_operations ubifs_file_address_operations; |
| 1410 | extern struct file_operations ubifs_file_operations; | 1408 | extern const struct file_operations ubifs_file_operations; |
| 1411 | extern struct inode_operations ubifs_file_inode_operations; | 1409 | extern const struct inode_operations ubifs_file_inode_operations; |
| 1412 | extern struct file_operations ubifs_dir_operations; | 1410 | extern const struct file_operations ubifs_dir_operations; |
| 1413 | extern struct inode_operations ubifs_dir_inode_operations; | 1411 | extern const struct inode_operations ubifs_dir_inode_operations; |
| 1414 | extern struct inode_operations ubifs_symlink_inode_operations; | 1412 | extern const struct inode_operations ubifs_symlink_inode_operations; |
| 1415 | extern struct backing_dev_info ubifs_backing_dev_info; | 1413 | extern struct backing_dev_info ubifs_backing_dev_info; |
| 1416 | extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; | 1414 | extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; |
| 1417 | 1415 | ||
| @@ -1428,7 +1426,7 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, | |||
| 1428 | int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, | 1426 | int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, |
| 1429 | int offs, int dtype); | 1427 | int offs, int dtype); |
| 1430 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, | 1428 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
| 1431 | int offs, int quiet, int chk_crc); | 1429 | int offs, int quiet, int must_chk_crc); |
| 1432 | void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); | 1430 | void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); |
| 1433 | void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); | 1431 | void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); |
| 1434 | int ubifs_io_init(struct ubifs_info *c); | 1432 | int ubifs_io_init(struct ubifs_info *c); |
| @@ -1495,6 +1493,7 @@ void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode, | |||
| 1495 | void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, | 1493 | void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, |
| 1496 | struct ubifs_budget_req *req); | 1494 | struct ubifs_budget_req *req); |
| 1497 | long long ubifs_get_free_space(struct ubifs_info *c); | 1495 | long long ubifs_get_free_space(struct ubifs_info *c); |
| 1496 | long long ubifs_get_free_space_nolock(struct ubifs_info *c); | ||
| 1498 | int ubifs_calc_min_idx_lebs(struct ubifs_info *c); | 1497 | int ubifs_calc_min_idx_lebs(struct ubifs_info *c); |
| 1499 | void ubifs_convert_page_budget(struct ubifs_info *c); | 1498 | void ubifs_convert_page_budget(struct ubifs_info *c); |
| 1500 | long long ubifs_reported_space(const struct ubifs_info *c, long long free); | 1499 | long long ubifs_reported_space(const struct ubifs_info *c, long long free); |
| @@ -1603,6 +1602,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum); | |||
| 1603 | int ubifs_orphan_start_commit(struct ubifs_info *c); | 1602 | int ubifs_orphan_start_commit(struct ubifs_info *c); |
| 1604 | int ubifs_orphan_end_commit(struct ubifs_info *c); | 1603 | int ubifs_orphan_end_commit(struct ubifs_info *c); |
| 1605 | int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only); | 1604 | int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only); |
| 1605 | int ubifs_clear_orphans(struct ubifs_info *c); | ||
| 1606 | 1606 | ||
| 1607 | /* lpt.c */ | 1607 | /* lpt.c */ |
| 1608 | int ubifs_calc_lpt_geom(struct ubifs_info *c); | 1608 | int ubifs_calc_lpt_geom(struct ubifs_info *c); |
| @@ -1646,7 +1646,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, | |||
| 1646 | const struct ubifs_lprops *lp, | 1646 | const struct ubifs_lprops *lp, |
| 1647 | int free, int dirty, int flags, | 1647 | int free, int dirty, int flags, |
| 1648 | int idx_gc_cnt); | 1648 | int idx_gc_cnt); |
| 1649 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats); | 1649 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst); |
| 1650 | void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, | 1650 | void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, |
| 1651 | int cat); | 1651 | int cat); |
| 1652 | void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, | 1652 | void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 2ed035354c26..a608e72fa405 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -371,7 +371,11 @@ xfs_quiesce_attr( | |||
| 371 | /* flush inodes and push all remaining buffers out to disk */ | 371 | /* flush inodes and push all remaining buffers out to disk */ |
| 372 | xfs_quiesce_fs(mp); | 372 | xfs_quiesce_fs(mp); |
| 373 | 373 | ||
| 374 | ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); | 374 | /* |
| 375 | * Just warn here till VFS can correctly support | ||
| 376 | * read-only remount without racing. | ||
| 377 | */ | ||
| 378 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | ||
| 375 | 379 | ||
| 376 | /* Push the superblock and write an unmount record */ | 380 | /* Push the superblock and write an unmount record */ |
| 377 | error = xfs_log_sbcount(mp, 1); | 381 | error = xfs_log_sbcount(mp, 1); |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index b4c1ee713492..f8278cfcc1d3 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
| @@ -55,17 +55,11 @@ xfs_swapext( | |||
| 55 | struct file *file, *target_file; | 55 | struct file *file, *target_file; |
| 56 | int error = 0; | 56 | int error = 0; |
| 57 | 57 | ||
| 58 | sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL); | ||
| 59 | if (!sxp) { | ||
| 60 | error = XFS_ERROR(ENOMEM); | ||
| 61 | goto out; | ||
| 62 | } | ||
| 63 | |||
| 64 | /* Pull information for the target fd */ | 58 | /* Pull information for the target fd */ |
| 65 | file = fget((int)sxp->sx_fdtarget); | 59 | file = fget((int)sxp->sx_fdtarget); |
| 66 | if (!file) { | 60 | if (!file) { |
| 67 | error = XFS_ERROR(EINVAL); | 61 | error = XFS_ERROR(EINVAL); |
| 68 | goto out_free_sxp; | 62 | goto out; |
| 69 | } | 63 | } |
| 70 | 64 | ||
| 71 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { | 65 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { |
| @@ -109,8 +103,6 @@ xfs_swapext( | |||
| 109 | fput(target_file); | 103 | fput(target_file); |
| 110 | out_put_file: | 104 | out_put_file: |
| 111 | fput(file); | 105 | fput(file); |
| 112 | out_free_sxp: | ||
| 113 | kmem_free(sxp); | ||
| 114 | out: | 106 | out: |
| 115 | return error; | 107 | return error; |
| 116 | } | 108 | } |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 35cca98bd94c..b1047de2fffd 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
| @@ -70,16 +70,21 @@ STATIC void xlog_recover_check_summary(xlog_t *); | |||
| 70 | xfs_buf_t * | 70 | xfs_buf_t * |
| 71 | xlog_get_bp( | 71 | xlog_get_bp( |
| 72 | xlog_t *log, | 72 | xlog_t *log, |
| 73 | int num_bblks) | 73 | int nbblks) |
| 74 | { | 74 | { |
| 75 | ASSERT(num_bblks > 0); | 75 | if (nbblks <= 0 || nbblks > log->l_logBBsize) { |
| 76 | xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); | ||
| 77 | XFS_ERROR_REPORT("xlog_get_bp(1)", | ||
| 78 | XFS_ERRLEVEL_HIGH, log->l_mp); | ||
| 79 | return NULL; | ||
| 80 | } | ||
| 76 | 81 | ||
| 77 | if (log->l_sectbb_log) { | 82 | if (log->l_sectbb_log) { |
| 78 | if (num_bblks > 1) | 83 | if (nbblks > 1) |
| 79 | num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1); | 84 | nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1); |
| 80 | num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks); | 85 | nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); |
| 81 | } | 86 | } |
| 82 | return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp); | 87 | return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); |
| 83 | } | 88 | } |
| 84 | 89 | ||
| 85 | void | 90 | void |
| @@ -102,6 +107,13 @@ xlog_bread( | |||
| 102 | { | 107 | { |
| 103 | int error; | 108 | int error; |
| 104 | 109 | ||
| 110 | if (nbblks <= 0 || nbblks > log->l_logBBsize) { | ||
| 111 | xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); | ||
| 112 | XFS_ERROR_REPORT("xlog_bread(1)", | ||
| 113 | XFS_ERRLEVEL_HIGH, log->l_mp); | ||
| 114 | return EFSCORRUPTED; | ||
| 115 | } | ||
| 116 | |||
| 105 | if (log->l_sectbb_log) { | 117 | if (log->l_sectbb_log) { |
| 106 | blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); | 118 | blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); |
| 107 | nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); | 119 | nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); |
| @@ -139,6 +151,13 @@ xlog_bwrite( | |||
| 139 | { | 151 | { |
| 140 | int error; | 152 | int error; |
| 141 | 153 | ||
| 154 | if (nbblks <= 0 || nbblks > log->l_logBBsize) { | ||
| 155 | xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); | ||
| 156 | XFS_ERROR_REPORT("xlog_bwrite(1)", | ||
| 157 | XFS_ERRLEVEL_HIGH, log->l_mp); | ||
| 158 | return EFSCORRUPTED; | ||
| 159 | } | ||
| 160 | |||
| 142 | if (log->l_sectbb_log) { | 161 | if (log->l_sectbb_log) { |
| 143 | blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); | 162 | blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); |
| 144 | nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); | 163 | nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); |
diff --git a/include/linux/ata.h b/include/linux/ata.h index a53318b8cbd0..08a86d5cdf1b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -731,12 +731,17 @@ static inline int ata_id_current_chs_valid(const u16 *id) | |||
| 731 | 731 | ||
| 732 | static inline int ata_id_is_cfa(const u16 *id) | 732 | static inline int ata_id_is_cfa(const u16 *id) |
| 733 | { | 733 | { |
| 734 | if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ | 734 | if (id[ATA_ID_CONFIG] == 0x848A) /* Traditional CF */ |
| 735 | return 1; | 735 | return 1; |
| 736 | /* Could be CF hiding as standard ATA */ | 736 | /* |
| 737 | if (ata_id_major_version(id) >= 3 && | 737 | * CF specs don't require specific value in the word 0 anymore and yet |
| 738 | id[ATA_ID_COMMAND_SET_1] != 0xFFFF && | 738 | * they forbid to report the ATA version in the word 80 and require the |
| 739 | (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) | 739 | * CFA feature set support to be indicated in the word 83 in this case. |
| 740 | * Unfortunately, some cards only follow either of this requirements, | ||
| 741 | * and while those that don't indicate CFA feature support need some | ||
| 742 | * sort of quirk list, it seems impractical for the ones that do... | ||
| 743 | */ | ||
| 744 | if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004) | ||
| 740 | return 1; | 745 | return 1; |
| 741 | return 0; | 746 | return 0; |
| 742 | } | 747 | } |
diff --git a/include/linux/libata.h b/include/linux/libata.h index bca3ba25f52a..5d87bc09a1f5 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -380,6 +380,7 @@ enum { | |||
| 380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands | 380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands |
| 381 | not multiple of 16 bytes */ | 381 | not multiple of 16 bytes */ |
| 382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ | 382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ |
| 383 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ | ||
| 383 | 384 | ||
| 384 | /* DMA mask for user DMA control: User visible values; DO NOT | 385 | /* DMA mask for user DMA control: User visible values; DO NOT |
| 385 | renumber */ | 386 | renumber */ |
| @@ -580,7 +581,7 @@ struct ata_device { | |||
| 580 | acpi_handle acpi_handle; | 581 | acpi_handle acpi_handle; |
| 581 | union acpi_object *gtf_cache; | 582 | union acpi_object *gtf_cache; |
| 582 | #endif | 583 | #endif |
| 583 | /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ | 584 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
| 584 | u64 n_sectors; /* size of device, if ATA */ | 585 | u64 n_sectors; /* size of device, if ATA */ |
| 585 | unsigned int class; /* ATA_DEV_xxx */ | 586 | unsigned int class; /* ATA_DEV_xxx */ |
| 586 | unsigned long unpark_deadline; | 587 | unsigned long unpark_deadline; |
| @@ -605,20 +606,22 @@ struct ata_device { | |||
| 605 | u16 heads; /* Number of heads */ | 606 | u16 heads; /* Number of heads */ |
| 606 | u16 sectors; /* Number of sectors per track */ | 607 | u16 sectors; /* Number of sectors per track */ |
| 607 | 608 | ||
| 608 | /* error history */ | ||
| 609 | int spdn_cnt; | ||
| 610 | struct ata_ering ering; | ||
| 611 | |||
| 612 | union { | 609 | union { |
| 613 | u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ | 610 | u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ |
| 614 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ | 611 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ |
| 615 | }; | 612 | }; |
| 613 | |||
| 614 | /* error history */ | ||
| 615 | int spdn_cnt; | ||
| 616 | /* ering is CLEAR_END, read comment above CLEAR_END */ | ||
| 617 | struct ata_ering ering; | ||
| 616 | }; | 618 | }; |
| 617 | 619 | ||
| 618 | /* Offset into struct ata_device. Fields above it are maintained | 620 | /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are |
| 619 | * acress device init. Fields below are zeroed. | 621 | * cleared to zero on ata_dev_init(). |
| 620 | */ | 622 | */ |
| 621 | #define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) | 623 | #define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) |
| 624 | #define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) | ||
| 622 | 625 | ||
| 623 | struct ata_eh_info { | 626 | struct ata_eh_info { |
| 624 | struct ata_device *dev; /* offending device */ | 627 | struct ata_device *dev; /* offending device */ |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b493db7841dc..dc32dae01e5f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -1051,13 +1051,22 @@ continue_unlock: | |||
| 1051 | } | 1051 | } |
| 1052 | } | 1052 | } |
| 1053 | 1053 | ||
| 1054 | if (wbc->sync_mode == WB_SYNC_NONE) { | 1054 | if (nr_to_write > 0) |
| 1055 | wbc->nr_to_write--; | 1055 | nr_to_write--; |
| 1056 | if (wbc->nr_to_write <= 0) { | 1056 | else if (wbc->sync_mode == WB_SYNC_NONE) { |
| 1057 | done = 1; | 1057 | /* |
| 1058 | break; | 1058 | * We stop writing back only if we are not |
| 1059 | } | 1059 | * doing integrity sync. In case of integrity |
| 1060 | * sync we have to keep going because someone | ||
| 1061 | * may be concurrently dirtying pages, and we | ||
| 1062 | * might have synced a lot of newly appeared | ||
| 1063 | * dirty pages, but have not synced all of the | ||
| 1064 | * old dirty pages. | ||
| 1065 | */ | ||
| 1066 | done = 1; | ||
| 1067 | break; | ||
| 1060 | } | 1068 | } |
| 1069 | |||
| 1061 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | 1070 | if (wbc->nonblocking && bdi_write_congested(bdi)) { |
| 1062 | wbc->encountered_congestion = 1; | 1071 | wbc->encountered_congestion = 1; |
| 1063 | done = 1; | 1072 | done = 1; |
