diff options
63 files changed, 608 insertions, 255 deletions
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 000000000000..b2830b435895 --- /dev/null +++ b/Documentation/block/null_blk.txt | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | Null block device driver | ||
| 2 | ================================================================================ | ||
| 3 | |||
| 4 | I. Overview | ||
| 5 | |||
| 6 | The null block device (/dev/nullb*) is used for benchmarking the various | ||
| 7 | block-layer implementations. It emulates a block device of X gigabytes in size. | ||
| 8 | The following instances are possible: | ||
| 9 | |||
| 10 | Single-queue block-layer | ||
| 11 | - Request-based. | ||
| 12 | - Single submission queue per device. | ||
| 13 | - Implements IO scheduling algorithms (CFQ, Deadline, noop). | ||
| 14 | Multi-queue block-layer | ||
| 15 | - Request-based. | ||
| 16 | - Configurable submission queues per device. | ||
| 17 | No block-layer (Known as bio-based) | ||
| 18 | - Bio-based. IO requests are submitted directly to the device driver. | ||
| 19 | - Directly accepts bio data structure and returns them. | ||
| 20 | |||
| 21 | All of them have a completion queue for each core in the system. | ||
| 22 | |||
| 23 | II. Module parameters applicable for all instances: | ||
| 24 | |||
| 25 | queue_mode=[0-2]: Default: 2-Multi-queue | ||
| 26 | Selects which block-layer the module should instantiate with. | ||
| 27 | |||
| 28 | 0: Bio-based. | ||
| 29 | 1: Single-queue. | ||
| 30 | 2: Multi-queue. | ||
| 31 | |||
| 32 | home_node=[0--nr_nodes]: Default: NUMA_NO_NODE | ||
| 33 | Selects what CPU node the data structures are allocated from. | ||
| 34 | |||
| 35 | gb=[Size in GB]: Default: 250GB | ||
| 36 | The size of the device reported to the system. | ||
| 37 | |||
| 38 | bs=[Block size (in bytes)]: Default: 512 bytes | ||
| 39 | The block size reported to the system. | ||
| 40 | |||
| 41 | nr_devices=[Number of devices]: Default: 2 | ||
| 42 | Number of block devices instantiated. They are instantiated as /dev/nullb0, | ||
| 43 | etc. | ||
| 44 | |||
| 45 | irq_mode=[0-2]: Default: 1-Soft-irq | ||
| 46 | The completion mode used for completing IOs to the block-layer. | ||
| 47 | |||
| 48 | 0: None. | ||
| 49 | 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead | ||
| 50 | when IOs are issued from another CPU node than the home the device is | ||
| 51 | connected to. | ||
| 52 | 2: Timer: Waits a specific period (completion_nsec) for each IO before | ||
| 53 | completion. | ||
| 54 | |||
| 55 | completion_nsec=[ns]: Default: 10.000ns | ||
| 56 | Combined with irq_mode=2 (timer). The time each completion event must wait. | ||
| 57 | |||
| 58 | submit_queues=[0..nr_cpus]: | ||
| 59 | The number of submission queues attached to the device driver. If unset, it | ||
| 60 | defaults to 1 on single-queue and bio-based instances. For multi-queue, | ||
| 61 | it is ignored when use_per_node_hctx module parameter is 1. | ||
| 62 | |||
| 63 | hw_queue_depth=[0..qdepth]: Default: 64 | ||
| 64 | The hardware queue depth of the device. | ||
| 65 | |||
| 66 | III: Multi-queue specific parameters | ||
| 67 | |||
| 68 | use_per_node_hctx=[0/1]: Default: 0 | ||
| 69 | 0: The number of submit queues are set to the value of the submit_queues | ||
| 70 | parameter. | ||
| 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | ||
| 72 | queue for each CPU node in the system. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50680a59a2ff..b9e9bd854298 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 1529 | 1529 | ||
| 1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support | 1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support |
| 1531 | 1531 | ||
| 1532 | * disable: Disable this device. | ||
| 1533 | |||
| 1532 | If there are multiple matching configurations changing | 1534 | If there are multiple matching configurations changing |
| 1533 | the same attribute, the last one is used. | 1535 | the same attribute, the last one is used. |
| 1534 | 1536 | ||
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 13 | 2 | PATCHLEVEL = 13 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
| 5 | NAME = One Giant Leap for Frogkind | 5 | NAME = One Giant Leap for Frogkind |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 46e1d7ef163f..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
| @@ -241,7 +241,7 @@ | |||
| 241 | 241 | ||
| 242 | sdhi0: sdhi@ee100000 { | 242 | sdhi0: sdhi@ee100000 { |
| 243 | compatible = "renesas,sdhi-r8a7790"; | 243 | compatible = "renesas,sdhi-r8a7790"; |
| 244 | reg = <0 0xee100000 0 0x100>; | 244 | reg = <0 0xee100000 0 0x200>; |
| 245 | interrupt-parent = <&gic>; | 245 | interrupt-parent = <&gic>; |
| 246 | interrupts = <0 165 4>; | 246 | interrupts = <0 165 4>; |
| 247 | cap-sd-highspeed; | 247 | cap-sd-highspeed; |
| @@ -250,7 +250,7 @@ | |||
| 250 | 250 | ||
| 251 | sdhi1: sdhi@ee120000 { | 251 | sdhi1: sdhi@ee120000 { |
| 252 | compatible = "renesas,sdhi-r8a7790"; | 252 | compatible = "renesas,sdhi-r8a7790"; |
| 253 | reg = <0 0xee120000 0 0x100>; | 253 | reg = <0 0xee120000 0 0x200>; |
| 254 | interrupt-parent = <&gic>; | 254 | interrupt-parent = <&gic>; |
| 255 | interrupts = <0 166 4>; | 255 | interrupts = <0 166 4>; |
| 256 | cap-sd-highspeed; | 256 | cap-sd-highspeed; |
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
| @@ -242,12 +242,18 @@ static void __init ldp_display_init(void) | |||
| 242 | 242 | ||
| 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) |
| 244 | { | 244 | { |
| 245 | int res; | ||
| 246 | |||
| 245 | /* LCD enable GPIO */ | 247 | /* LCD enable GPIO */ |
| 246 | ldp_lcd_pdata.enable_gpio = gpio + 7; | 248 | ldp_lcd_pdata.enable_gpio = gpio + 7; |
| 247 | 249 | ||
| 248 | /* Backlight enable GPIO */ | 250 | /* Backlight enable GPIO */ |
| 249 | ldp_lcd_pdata.backlight_gpio = gpio + 15; | 251 | ldp_lcd_pdata.backlight_gpio = gpio + 15; |
| 250 | 252 | ||
| 253 | res = platform_device_register(&ldp_lcd_device); | ||
| 254 | if (res) | ||
| 255 | pr_err("Unable to register LCD: %d\n", res); | ||
| 256 | |||
| 251 | return 0; | 257 | return 0; |
| 252 | } | 258 | } |
| 253 | 259 | ||
| @@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
| 346 | 352 | ||
| 347 | static struct platform_device *ldp_devices[] __initdata = { | 353 | static struct platform_device *ldp_devices[] __initdata = { |
| 348 | &ldp_gpio_keys_device, | 354 | &ldp_gpio_keys_device, |
| 349 | &ldp_lcd_device, | ||
| 350 | }; | 355 | }; |
| 351 | 356 | ||
| 352 | #ifdef CONFIG_OMAP_MUX | 357 | #ifdef CONFIG_OMAP_MUX |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
| @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | |||
| 796 | 796 | ||
| 797 | /* gpmc */ | 797 | /* gpmc */ |
| 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { |
| 799 | { .irq = 20 }, | 799 | { .irq = 20 + OMAP_INTC_START, }, |
| 800 | { .irq = -1 } | 800 | { .irq = -1 } |
| 801 | }; | 801 | }; |
| 802 | 802 | ||
| @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | |||
| 841 | }; | 841 | }; |
| 842 | 842 | ||
| 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { |
| 844 | { .irq = 52 }, | 844 | { .irq = 52 + OMAP_INTC_START, }, |
| 845 | { .irq = -1 } | 845 | { .irq = -1 } |
| 846 | }; | 846 | }; |
| 847 | 847 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d33742908f97..4c3b1e6df508 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
| @@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | |||
| 2165 | }; | 2165 | }; |
| 2166 | 2166 | ||
| 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { |
| 2168 | { .irq = 20 }, | 2168 | { .irq = 20 + OMAP_INTC_START, }, |
| 2169 | { .irq = -1 } | 2169 | { .irq = -1 } |
| 2170 | }; | 2170 | }; |
| 2171 | 2171 | ||
| @@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | |||
| 2999 | 2999 | ||
| 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; |
| 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { |
| 3002 | { .irq = 24 }, | 3002 | { .irq = 24 + OMAP_INTC_START, }, |
| 3003 | { .irq = -1 } | 3003 | { .irq = -1 } |
| 3004 | }; | 3004 | }; |
| 3005 | 3005 | ||
| @@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | |||
| 3041 | 3041 | ||
| 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; |
| 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { |
| 3044 | { .irq = 28 }, | 3044 | { .irq = 28 + OMAP_INTC_START, }, |
| 3045 | { .irq = -1 } | 3045 | { .irq = -1 } |
| 3046 | }; | 3046 | }; |
| 3047 | 3047 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
| @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | |||
| 1637 | .class = &dra7xx_uart_hwmod_class, | 1637 | .class = &dra7xx_uart_hwmod_class, |
| 1638 | .clkdm_name = "l4per_clkdm", | 1638 | .clkdm_name = "l4per_clkdm", |
| 1639 | .main_clk = "uart1_gfclk_mux", | 1639 | .main_clk = "uart1_gfclk_mux", |
| 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, |
| 1641 | .prcm = { | 1641 | .prcm = { |
| 1642 | .omap4 = { | 1642 | .omap4 = { |
| 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <mach/irqs.h> | ||
| 14 | |||
| 13 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS | 15 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS |
| 14 | 16 | ||
| 15 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS | 17 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..c18689123023 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
| @@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | |||
| 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), |
| 615 | }; | 615 | }; |
| 616 | 616 | ||
| 617 | /* Fixed 3.3V regulator used by LCD backlight */ | ||
| 618 | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||
| 619 | REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||
| 620 | }; | ||
| 621 | |||
| 617 | /* Fixed 3.3V regulator to be used by SDHI0 */ | 622 | /* Fixed 3.3V regulator to be used by SDHI0 */ |
| 618 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | 623 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { |
| 619 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 624 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), |
| @@ -1196,6 +1201,8 @@ static void __init eva_init(void) | |||
| 1196 | 1201 | ||
| 1197 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 1202 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, |
| 1198 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 1203 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); |
| 1204 | regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||
| 1205 | ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||
| 1199 | 1206 | ||
| 1200 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 1207 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); |
| 1201 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 1208 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
| @@ -679,7 +679,7 @@ static void __init bockw_init(void) | |||
| 679 | .id = i, | 679 | .id = i, |
| 680 | .data = &rsnd_card_info[i], | 680 | .data = &rsnd_card_info[i], |
| 681 | .size_data = sizeof(struct asoc_simple_card_info), | 681 | .size_data = sizeof(struct asoc_simple_card_info), |
| 682 | .dma_mask = ~0, | 682 | .dma_mask = DMA_BIT_MASK(32), |
| 683 | }; | 683 | }; |
| 684 | 684 | ||
| 685 | platform_device_register_full(&cardinfo); | 685 | platform_device_register_full(&cardinfo); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
| 387 | set_cpu_cap(c, X86_FEATURE_PEBS); | 387 | set_cpu_cap(c, X86_FEATURE_PEBS); |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 390 | if (c->x86 == 6 && cpu_has_clflush && |
| 391 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||
| 391 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 392 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); |
| 392 | 393 | ||
| 393 | #ifdef CONFIG_X86_64 | 394 | #ifdef CONFIG_X86_64 |
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a..b91ce75bd35d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
| @@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { | |||
| 335 | void blk_mq_unregister_disk(struct gendisk *disk) | 335 | void blk_mq_unregister_disk(struct gendisk *disk) |
| 336 | { | 336 | { |
| 337 | struct request_queue *q = disk->queue; | 337 | struct request_queue *q = disk->queue; |
| 338 | struct blk_mq_hw_ctx *hctx; | ||
| 339 | struct blk_mq_ctx *ctx; | ||
| 340 | int i, j; | ||
| 341 | |||
| 342 | queue_for_each_hw_ctx(q, hctx, i) { | ||
| 343 | hctx_for_each_ctx(hctx, ctx, j) { | ||
| 344 | kobject_del(&ctx->kobj); | ||
| 345 | kobject_put(&ctx->kobj); | ||
| 346 | } | ||
| 347 | kobject_del(&hctx->kobj); | ||
| 348 | kobject_put(&hctx->kobj); | ||
| 349 | } | ||
| 338 | 350 | ||
| 339 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | 351 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
| 340 | kobject_del(&q->mq_kobj); | 352 | kobject_del(&q->mq_kobj); |
| 353 | kobject_put(&q->mq_kobj); | ||
| 341 | 354 | ||
| 342 | kobject_put(&disk_to_dev(disk)->kobj); | 355 | kobject_put(&disk_to_dev(disk)->kobj); |
| 343 | } | 356 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5d9248526d78..4770de5707b9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" | |||
| 348 | config ACPI_EXTLOG | 348 | config ACPI_EXTLOG |
| 349 | tristate "Extended Error Log support" | 349 | tristate "Extended Error Log support" |
| 350 | depends on X86_MCE && X86_LOCAL_APIC | 350 | depends on X86_MCE && X86_LOCAL_APIC |
| 351 | select EFI | ||
| 352 | select UEFI_CPER | 351 | select UEFI_CPER |
| 353 | default n | 352 | default n |
| 354 | help | 353 | help |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 6745fe137b9e..e60390597372 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
| 162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, | 162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, |
| 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, |
| 164 | { "INT33B2", }, | 164 | { "INT33B2", }, |
| 165 | { "INT33FC", }, | ||
| 165 | 166 | ||
| 166 | { "INT3430", (unsigned long)&lpt_dev_desc }, | 167 | { "INT3430", (unsigned long)&lpt_dev_desc }, |
| 167 | { "INT3431", (unsigned long)&lpt_dev_desc }, | 168 | { "INT3431", (unsigned long)&lpt_dev_desc }, |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 786294bb682c..3650b2183227 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
| @@ -2,7 +2,6 @@ config ACPI_APEI | |||
| 2 | bool "ACPI Platform Error Interface (APEI)" | 2 | bool "ACPI Platform Error Interface (APEI)" |
| 3 | select MISC_FILESYSTEMS | 3 | select MISC_FILESYSTEMS |
| 4 | select PSTORE | 4 | select PSTORE |
| 5 | select EFI | ||
| 6 | select UEFI_CPER | 5 | select UEFI_CPER |
| 7 | depends on X86 | 6 | depends on X86 |
| 8 | help | 7 | help |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 14f1e9506338..c0ed4f273cf2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1238 | if (rc) | 1238 | if (rc) |
| 1239 | return rc; | 1239 | return rc; |
| 1240 | 1240 | ||
| 1241 | /* AHCI controllers often implement SFF compatible interface. | ||
| 1242 | * Grab all PCI BARs just in case. | ||
| 1243 | */ | ||
| 1244 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
| 1245 | if (rc == -EBUSY) | ||
| 1246 | pcim_pin_device(pdev); | ||
| 1247 | if (rc) | ||
| 1248 | return rc; | ||
| 1249 | |||
| 1250 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 1241 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
| 1251 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { | 1242 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { |
| 1252 | u8 map; | 1243 | u8 map; |
| @@ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1263 | } | 1254 | } |
| 1264 | } | 1255 | } |
| 1265 | 1256 | ||
| 1257 | /* AHCI controllers often implement SFF compatible interface. | ||
| 1258 | * Grab all PCI BARs just in case. | ||
| 1259 | */ | ||
| 1260 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
| 1261 | if (rc == -EBUSY) | ||
| 1262 | pcim_pin_device(pdev); | ||
| 1263 | if (rc) | ||
| 1264 | return rc; | ||
| 1265 | |||
| 1266 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 1266 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); |
| 1267 | if (!hpriv) | 1267 | if (!hpriv) |
| 1268 | return -ENOMEM; | 1268 | return -ENOMEM; |
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index ae2d73fe321e..3e23e9941dad 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c | |||
| @@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
| 113 | /* | 113 | /* |
| 114 | * set PHY Paremeters, two steps to configure the GPR13, | 114 | * set PHY Paremeters, two steps to configure the GPR13, |
| 115 | * one write for rest of parameters, mask of first write | 115 | * one write for rest of parameters, mask of first write |
| 116 | * is 0x07fffffd, and the other one write for setting | 116 | * is 0x07ffffff, and the other one write for setting |
| 117 | * the mpll_clk_en. | 117 | * the mpll_clk_en. |
| 118 | */ | 118 | */ |
| 119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | 119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
| @@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
| 124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | 124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
| 125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK | 125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK |
| 126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK | 126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK |
| 127 | | IMX6Q_GPR13_SATA_MPLL_CLK_EN | ||
| 127 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE | 128 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE |
| 128 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | 129 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
| 129 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | 130 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 75b93678bbcd..1393a5890ed5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, | |||
| 2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", | 2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", |
| 2150 | err_mask); | 2150 | err_mask); |
| 2151 | } else { | 2151 | } else { |
| 2152 | u8 *cmds = dev->ncq_send_recv_cmds; | ||
| 2153 | |||
| 2152 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; | 2154 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; |
| 2153 | memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, | 2155 | memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); |
| 2154 | ATA_LOG_NCQ_SEND_RECV_SIZE); | 2156 | |
| 2157 | if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { | ||
| 2158 | ata_dev_dbg(dev, "disabling queued TRIM support\n"); | ||
| 2159 | cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= | ||
| 2160 | ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; | ||
| 2161 | } | ||
| 2155 | } | 2162 | } |
| 2156 | } | 2163 | } |
| 2157 | 2164 | ||
| @@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4156 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | | 4163 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
| 4157 | ATA_HORKAGE_FIRMWARE_WARN }, | 4164 | ATA_HORKAGE_FIRMWARE_WARN }, |
| 4158 | 4165 | ||
| 4166 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | ||
| 4167 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||
| 4168 | |||
| 4159 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4169 | /* Blacklist entries taken from Silicon Image 3124/3132 |
| 4160 | Windows driver .inf file - also several Linux problem reports */ | 4170 | Windows driver .inf file - also several Linux problem reports */ |
| 4161 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, | 4171 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, |
| @@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4202 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | 4212 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4203 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4213 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4204 | 4214 | ||
| 4215 | /* devices that don't properly handle queued TRIM commands */ | ||
| 4216 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4217 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4218 | |||
| 4205 | /* End Marker */ | 4219 | /* End Marker */ |
| 4206 | { } | 4220 | { } |
| 4207 | }; | 4221 | }; |
| @@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur, | |||
| 6519 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | 6533 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, |
| 6520 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, | 6534 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, |
| 6521 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, | 6535 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, |
| 6536 | { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, | ||
| 6522 | }; | 6537 | }; |
| 6523 | char *start = *cur, *p = *cur; | 6538 | char *start = *cur, *p = *cur; |
| 6524 | char *id, *val, *endp; | 6539 | char *id, *val, *endp; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ab58556d347c..377eb889f555 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
| 3872 | return; | 3872 | return; |
| 3873 | } | 3873 | } |
| 3874 | 3874 | ||
| 3875 | /* | ||
| 3876 | * XXX - UGLY HACK | ||
| 3877 | * | ||
| 3878 | * The block layer suspend/resume path is fundamentally broken due | ||
| 3879 | * to freezable kthreads and workqueue and may deadlock if a block | ||
| 3880 | * device gets removed while resume is in progress. I don't know | ||
| 3881 | * what the solution is short of removing freezable kthreads and | ||
| 3882 | * workqueues altogether. | ||
| 3883 | * | ||
| 3884 | * The following is an ugly hack to avoid kicking off device | ||
| 3885 | * removal while freezer is active. This is a joke but does avoid | ||
| 3886 | * this particular deadlock scenario. | ||
| 3887 | * | ||
| 3888 | * https://bugzilla.kernel.org/show_bug.cgi?id=62801 | ||
| 3889 | * http://marc.info/?l=linux-kernel&m=138695698516487 | ||
| 3890 | */ | ||
| 3891 | #ifdef CONFIG_FREEZER | ||
| 3892 | while (pm_freezing) | ||
| 3893 | msleep(10); | ||
| 3894 | #endif | ||
| 3895 | |||
| 3875 | DPRINTK("ENTER\n"); | 3896 | DPRINTK("ENTER\n"); |
| 3876 | mutex_lock(&ap->scsi_scan_mutex); | 3897 | mutex_lock(&ap->scsi_scan_mutex); |
| 3877 | 3898 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5..a2e69d26266d 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
| 2 | |||
| 2 | #include <linux/moduleparam.h> | 3 | #include <linux/moduleparam.h> |
| 3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
| @@ -65,7 +66,7 @@ enum { | |||
| 65 | NULL_Q_MQ = 2, | 66 | NULL_Q_MQ = 2, |
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | static int submit_queues = 1; | 69 | static int submit_queues; |
| 69 | module_param(submit_queues, int, S_IRUGO); | 70 | module_param(submit_queues, int, S_IRUGO); |
| 70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | 71 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
| 71 | 72 | ||
| @@ -101,9 +102,9 @@ static int hw_queue_depth = 64; | |||
| 101 | module_param(hw_queue_depth, int, S_IRUGO); | 102 | module_param(hw_queue_depth, int, S_IRUGO); |
| 102 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | 103 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
| 103 | 104 | ||
| 104 | static bool use_per_node_hctx = true; | 105 | static bool use_per_node_hctx = false; |
| 105 | module_param(use_per_node_hctx, bool, S_IRUGO); | 106 | module_param(use_per_node_hctx, bool, S_IRUGO); |
| 106 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); | 107 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
| 107 | 108 | ||
| 108 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | 109 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
| 109 | { | 110 | { |
| @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
| 346 | 347 | ||
| 347 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | 348 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) |
| 348 | { | 349 | { |
| 349 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | 350 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); |
| 350 | hctx_index); | 351 | int tip = (reg->nr_hw_queues % nr_online_nodes); |
| 352 | int node = 0, i, n; | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Split submit queues evenly wrt to the number of nodes. If uneven, | ||
| 356 | * fill the first buckets with one extra, until the rest is filled with | ||
| 357 | * no extra. | ||
| 358 | */ | ||
| 359 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||
| 360 | if (n % b_size == 0) { | ||
| 361 | n = 0; | ||
| 362 | node++; | ||
| 363 | |||
| 364 | tip--; | ||
| 365 | if (!tip) | ||
| 366 | b_size = reg->nr_hw_queues / nr_online_nodes; | ||
| 367 | } | ||
| 368 | } | ||
| 369 | |||
| 370 | /* | ||
| 371 | * A node might not be online, therefore map the relative node id to the | ||
| 372 | * real node id. | ||
| 373 | */ | ||
| 374 | for_each_online_node(n) { | ||
| 375 | if (!node) | ||
| 376 | break; | ||
| 377 | node--; | ||
| 378 | } | ||
| 379 | |||
| 380 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||
| 351 | } | 381 | } |
| 352 | 382 | ||
| 353 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | 383 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) |
| @@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | |||
| 355 | kfree(hctx); | 385 | kfree(hctx); |
| 356 | } | 386 | } |
| 357 | 387 | ||
| 388 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | ||
| 389 | { | ||
| 390 | BUG_ON(!nullb); | ||
| 391 | BUG_ON(!nq); | ||
| 392 | |||
| 393 | init_waitqueue_head(&nq->wait); | ||
| 394 | nq->queue_depth = nullb->queue_depth; | ||
| 395 | } | ||
| 396 | |||
| 358 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 397 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 359 | unsigned int index) | 398 | unsigned int index) |
| 360 | { | 399 | { |
| 361 | struct nullb *nullb = data; | 400 | struct nullb *nullb = data; |
| 362 | struct nullb_queue *nq = &nullb->queues[index]; | 401 | struct nullb_queue *nq = &nullb->queues[index]; |
| 363 | 402 | ||
| 364 | init_waitqueue_head(&nq->wait); | ||
| 365 | nq->queue_depth = nullb->queue_depth; | ||
| 366 | nullb->nr_queues++; | ||
| 367 | hctx->driver_data = nq; | 403 | hctx->driver_data = nq; |
| 404 | null_init_queue(nullb, nq); | ||
| 405 | nullb->nr_queues++; | ||
| 368 | 406 | ||
| 369 | return 0; | 407 | return 0; |
| 370 | } | 408 | } |
| @@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq) | |||
| 417 | 455 | ||
| 418 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | 456 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
| 419 | if (!nq->cmds) | 457 | if (!nq->cmds) |
| 420 | return 1; | 458 | return -ENOMEM; |
| 421 | 459 | ||
| 422 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | 460 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 423 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | 461 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
| 424 | if (!nq->tag_map) { | 462 | if (!nq->tag_map) { |
| 425 | kfree(nq->cmds); | 463 | kfree(nq->cmds); |
| 426 | return 1; | 464 | return -ENOMEM; |
| 427 | } | 465 | } |
| 428 | 466 | ||
| 429 | for (i = 0; i < nq->queue_depth; i++) { | 467 | for (i = 0; i < nq->queue_depth; i++) { |
| @@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb) | |||
| 454 | 492 | ||
| 455 | static int setup_queues(struct nullb *nullb) | 493 | static int setup_queues(struct nullb *nullb) |
| 456 | { | 494 | { |
| 457 | struct nullb_queue *nq; | 495 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
| 458 | int i; | 496 | GFP_KERNEL); |
| 459 | |||
| 460 | nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); | ||
| 461 | if (!nullb->queues) | 497 | if (!nullb->queues) |
| 462 | return 1; | 498 | return -ENOMEM; |
| 463 | 499 | ||
| 464 | nullb->nr_queues = 0; | 500 | nullb->nr_queues = 0; |
| 465 | nullb->queue_depth = hw_queue_depth; | 501 | nullb->queue_depth = hw_queue_depth; |
| 466 | 502 | ||
| 467 | if (queue_mode == NULL_Q_MQ) | 503 | return 0; |
| 468 | return 0; | 504 | } |
| 505 | |||
| 506 | static int init_driver_queues(struct nullb *nullb) | ||
| 507 | { | ||
| 508 | struct nullb_queue *nq; | ||
| 509 | int i, ret = 0; | ||
| 469 | 510 | ||
| 470 | for (i = 0; i < submit_queues; i++) { | 511 | for (i = 0; i < submit_queues; i++) { |
| 471 | nq = &nullb->queues[i]; | 512 | nq = &nullb->queues[i]; |
| 472 | init_waitqueue_head(&nq->wait); | 513 | |
| 473 | nq->queue_depth = hw_queue_depth; | 514 | null_init_queue(nullb, nq); |
| 474 | if (setup_commands(nq)) | 515 | |
| 475 | break; | 516 | ret = setup_commands(nq); |
| 517 | if (ret) | ||
| 518 | goto err_queue; | ||
| 476 | nullb->nr_queues++; | 519 | nullb->nr_queues++; |
| 477 | } | 520 | } |
| 478 | 521 | ||
| 479 | if (i == submit_queues) | 522 | return 0; |
| 480 | return 0; | 523 | err_queue: |
| 481 | |||
| 482 | cleanup_queues(nullb); | 524 | cleanup_queues(nullb); |
| 483 | return 1; | 525 | return ret; |
| 484 | } | 526 | } |
| 485 | 527 | ||
| 486 | static int null_add_dev(void) | 528 | static int null_add_dev(void) |
| @@ -518,11 +560,13 @@ static int null_add_dev(void) | |||
| 518 | } else if (queue_mode == NULL_Q_BIO) { | 560 | } else if (queue_mode == NULL_Q_BIO) { |
| 519 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 561 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
| 520 | blk_queue_make_request(nullb->q, null_queue_bio); | 562 | blk_queue_make_request(nullb->q, null_queue_bio); |
| 563 | init_driver_queues(nullb); | ||
| 521 | } else { | 564 | } else { |
| 522 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 565 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
| 523 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 566 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
| 524 | if (nullb->q) | 567 | if (nullb->q) |
| 525 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 568 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
| 569 | init_driver_queues(nullb); | ||
| 526 | } | 570 | } |
| 527 | 571 | ||
| 528 | if (!nullb->q) | 572 | if (!nullb->q) |
| @@ -579,7 +623,13 @@ static int __init null_init(void) | |||
| 579 | } | 623 | } |
| 580 | #endif | 624 | #endif |
| 581 | 625 | ||
| 582 | if (submit_queues > nr_cpu_ids) | 626 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
| 627 | if (submit_queues < nr_online_nodes) { | ||
| 628 | pr_warn("null_blk: submit_queues param is set to %u.", | ||
| 629 | nr_online_nodes); | ||
| 630 | submit_queues = nr_online_nodes; | ||
| 631 | } | ||
| 632 | } else if (submit_queues > nr_cpu_ids) | ||
| 583 | submit_queues = nr_cpu_ids; | 633 | submit_queues = nr_cpu_ids; |
| 584 | else if (!submit_queues) | 634 | else if (!submit_queues) |
| 585 | submit_queues = 1; | 635 | submit_queues = 1; |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926..eb6e1e0e8db2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
| @@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |||
| 5269 | } | 5269 | } |
| 5270 | } | 5270 | } |
| 5271 | 5271 | ||
| 5272 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | 5272 | static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) |
| 5273 | { | 5273 | { |
| 5274 | switch (state) { | 5274 | switch (state) { |
| 5275 | case SKD_MSG_STATE_IDLE: | 5275 | case SKD_MSG_STATE_IDLE: |
| @@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | |||
| 5281 | } | 5281 | } |
| 5282 | } | 5282 | } |
| 5283 | 5283 | ||
| 5284 | const char *skd_skreq_state_to_str(enum skd_req_state state) | 5284 | static const char *skd_skreq_state_to_str(enum skd_req_state state) |
| 5285 | { | 5285 | { |
| 5286 | switch (state) { | 5286 | switch (state) { |
| 5287 | case SKD_REQ_STATE_IDLE: | 5287 | case SKD_REQ_STATE_IDLE: |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02d534da22dd..16d7b4ac94be 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
| 828 | int ret = 0; | 828 | int ret = 0; |
| 829 | 829 | ||
| 830 | memcpy(&new_policy, policy, sizeof(*policy)); | 830 | memcpy(&new_policy, policy, sizeof(*policy)); |
| 831 | |||
| 832 | /* Use the default policy if its valid. */ | ||
| 833 | if (cpufreq_driver->setpolicy) | ||
| 834 | cpufreq_parse_governor(policy->governor->name, | ||
| 835 | &new_policy.policy, NULL); | ||
| 836 | |||
| 831 | /* assure that the starting sequence is run in cpufreq_set_policy */ | 837 | /* assure that the starting sequence is run in cpufreq_set_policy */ |
| 832 | policy->governor = NULL; | 838 | policy->governor = NULL; |
| 833 | 839 | ||
| @@ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
| 845 | 851 | ||
| 846 | #ifdef CONFIG_HOTPLUG_CPU | 852 | #ifdef CONFIG_HOTPLUG_CPU |
| 847 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | 853 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, |
| 848 | unsigned int cpu, struct device *dev, | 854 | unsigned int cpu, struct device *dev) |
| 849 | bool frozen) | ||
| 850 | { | 855 | { |
| 851 | int ret = 0; | 856 | int ret = 0; |
| 852 | unsigned long flags; | 857 | unsigned long flags; |
| @@ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
| 877 | } | 882 | } |
| 878 | } | 883 | } |
| 879 | 884 | ||
| 880 | /* Don't touch sysfs links during light-weight init */ | 885 | return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |
| 881 | if (!frozen) | ||
| 882 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
| 883 | |||
| 884 | return ret; | ||
| 885 | } | 886 | } |
| 886 | #endif | 887 | #endif |
| 887 | 888 | ||
| @@ -926,6 +927,27 @@ err_free_policy: | |||
| 926 | return NULL; | 927 | return NULL; |
| 927 | } | 928 | } |
| 928 | 929 | ||
| 930 | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) | ||
| 931 | { | ||
| 932 | struct kobject *kobj; | ||
| 933 | struct completion *cmp; | ||
| 934 | |||
| 935 | down_read(&policy->rwsem); | ||
| 936 | kobj = &policy->kobj; | ||
| 937 | cmp = &policy->kobj_unregister; | ||
| 938 | up_read(&policy->rwsem); | ||
| 939 | kobject_put(kobj); | ||
| 940 | |||
| 941 | /* | ||
| 942 | * We need to make sure that the underlying kobj is | ||
| 943 | * actually not referenced anymore by anybody before we | ||
| 944 | * proceed with unloading. | ||
| 945 | */ | ||
| 946 | pr_debug("waiting for dropping of refcount\n"); | ||
| 947 | wait_for_completion(cmp); | ||
| 948 | pr_debug("wait complete\n"); | ||
| 949 | } | ||
| 950 | |||
| 929 | static void cpufreq_policy_free(struct cpufreq_policy *policy) | 951 | static void cpufreq_policy_free(struct cpufreq_policy *policy) |
| 930 | { | 952 | { |
| 931 | free_cpumask_var(policy->related_cpus); | 953 | free_cpumask_var(policy->related_cpus); |
| @@ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 986 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | 1008 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { |
| 987 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | 1009 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { |
| 988 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1010 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 989 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); | 1011 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); |
| 990 | up_read(&cpufreq_rwsem); | 1012 | up_read(&cpufreq_rwsem); |
| 991 | return ret; | 1013 | return ret; |
| 992 | } | 1014 | } |
| @@ -1096,7 +1118,10 @@ err_get_freq: | |||
| 1096 | if (cpufreq_driver->exit) | 1118 | if (cpufreq_driver->exit) |
| 1097 | cpufreq_driver->exit(policy); | 1119 | cpufreq_driver->exit(policy); |
| 1098 | err_set_policy_cpu: | 1120 | err_set_policy_cpu: |
| 1121 | if (frozen) | ||
| 1122 | cpufreq_policy_put_kobj(policy); | ||
| 1099 | cpufreq_policy_free(policy); | 1123 | cpufreq_policy_free(policy); |
| 1124 | |||
| 1100 | nomem_out: | 1125 | nomem_out: |
| 1101 | up_read(&cpufreq_rwsem); | 1126 | up_read(&cpufreq_rwsem); |
| 1102 | 1127 | ||
| @@ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
| 1118 | } | 1143 | } |
| 1119 | 1144 | ||
| 1120 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | 1145 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, |
| 1121 | unsigned int old_cpu, bool frozen) | 1146 | unsigned int old_cpu) |
| 1122 | { | 1147 | { |
| 1123 | struct device *cpu_dev; | 1148 | struct device *cpu_dev; |
| 1124 | int ret; | 1149 | int ret; |
| @@ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
| 1126 | /* first sibling now owns the new sysfs dir */ | 1151 | /* first sibling now owns the new sysfs dir */ |
| 1127 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); | 1152 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); |
| 1128 | 1153 | ||
| 1129 | /* Don't touch sysfs files during light-weight tear-down */ | ||
| 1130 | if (frozen) | ||
| 1131 | return cpu_dev->id; | ||
| 1132 | |||
| 1133 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 1154 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
| 1134 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | 1155 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); |
| 1135 | if (ret) { | 1156 | if (ret) { |
| @@ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
| 1196 | if (!frozen) | 1217 | if (!frozen) |
| 1197 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1218 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
| 1198 | } else if (cpus > 1) { | 1219 | } else if (cpus > 1) { |
| 1199 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 1220 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); |
| 1200 | if (new_cpu >= 0) { | 1221 | if (new_cpu >= 0) { |
| 1201 | update_policy_cpu(policy, new_cpu); | 1222 | update_policy_cpu(policy, new_cpu); |
| 1202 | 1223 | ||
| @@ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
| 1218 | int ret; | 1239 | int ret; |
| 1219 | unsigned long flags; | 1240 | unsigned long flags; |
| 1220 | struct cpufreq_policy *policy; | 1241 | struct cpufreq_policy *policy; |
| 1221 | struct kobject *kobj; | ||
| 1222 | struct completion *cmp; | ||
| 1223 | 1242 | ||
| 1224 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1243 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
| 1225 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1244 | policy = per_cpu(cpufreq_cpu_data, cpu); |
| @@ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
| 1249 | } | 1268 | } |
| 1250 | } | 1269 | } |
| 1251 | 1270 | ||
| 1252 | if (!frozen) { | 1271 | if (!frozen) |
| 1253 | down_read(&policy->rwsem); | 1272 | cpufreq_policy_put_kobj(policy); |
| 1254 | kobj = &policy->kobj; | ||
| 1255 | cmp = &policy->kobj_unregister; | ||
| 1256 | up_read(&policy->rwsem); | ||
| 1257 | kobject_put(kobj); | ||
| 1258 | |||
| 1259 | /* | ||
| 1260 | * We need to make sure that the underlying kobj is | ||
| 1261 | * actually not referenced anymore by anybody before we | ||
| 1262 | * proceed with unloading. | ||
| 1263 | */ | ||
| 1264 | pr_debug("waiting for dropping of refcount\n"); | ||
| 1265 | wait_for_completion(cmp); | ||
| 1266 | pr_debug("wait complete\n"); | ||
| 1267 | } | ||
| 1268 | 1273 | ||
| 1269 | /* | 1274 | /* |
| 1270 | * Perform the ->exit() even during light-weight tear-down, | 1275 | * Perform the ->exit() even during light-weight tear-down, |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 299fad6b5867..5373dc5b6011 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
| @@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | |||
| 14 | 14 | ||
| 15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | 15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ |
| 16 | obj-$(CONFIG_EFI) += efi/ | 16 | obj-$(CONFIG_EFI) += efi/ |
| 17 | obj-$(CONFIG_UEFI_CPER) += efi/ | ||
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 3150aa4874e8..6aecbc86ec94 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
| @@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE | |||
| 36 | backend for pstore by default. This setting can be overridden | 36 | backend for pstore by default. This setting can be overridden |
| 37 | using the efivars module's pstore_disable parameter. | 37 | using the efivars module's pstore_disable parameter. |
| 38 | 38 | ||
| 39 | config UEFI_CPER | ||
| 40 | def_bool n | ||
| 41 | |||
| 42 | endmenu | 39 | endmenu |
| 40 | |||
| 41 | config UEFI_CPER | ||
| 42 | bool | ||
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9ba156d3c775..6c2a41ec21ba 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for linux kernel | 2 | # Makefile for linux kernel |
| 3 | # | 3 | # |
| 4 | obj-y += efi.o vars.o | 4 | obj-$(CONFIG_EFI) += efi.o vars.o |
| 5 | obj-$(CONFIG_EFI_VARS) += efivars.o | 5 | obj-$(CONFIG_EFI_VARS) += efivars.o |
| 6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o | 6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o |
| 7 | obj-$(CONFIG_UEFI_CPER) += cper.o | 7 | obj-$(CONFIG_UEFI_CPER) += cper.o |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 92d1206482a6..f80b700f821c 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
| 377 | 377 | ||
| 378 | if (!current_set_polling_and_test()) { | 378 | if (!current_set_polling_and_test()) { |
| 379 | 379 | ||
| 380 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
| 381 | clflush((void *)¤t_thread_info()->flags); | ||
| 382 | |||
| 380 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 383 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| 381 | smp_mb(); | 384 | smp_mb(); |
| 382 | if (!need_resched()) | 385 | if (!need_resched()) |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c47c2034ca71..0717940ec3b5 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
| @@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id) | |||
| 181 | static void rem_ref(struct iw_cm_id *cm_id) | 181 | static void rem_ref(struct iw_cm_id *cm_id) |
| 182 | { | 182 | { |
| 183 | struct iwcm_id_private *cm_id_priv; | 183 | struct iwcm_id_private *cm_id_priv; |
| 184 | int cb_destroy; | ||
| 185 | |||
| 184 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 186 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
| 185 | if (iwcm_deref_id(cm_id_priv) && | 187 | |
| 186 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | 188 | /* |
| 189 | * Test bit before deref in case the cm_id gets freed on another | ||
| 190 | * thread. | ||
| 191 | */ | ||
| 192 | cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
| 193 | if (iwcm_deref_id(cm_id_priv) && cb_destroy) { | ||
| 187 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | 194 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
| 188 | free_cm_id(cm_id_priv); | 195 | free_cm_id(cm_id_priv); |
| 189 | } | 196 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bdc842e9faef..a283274a5a09 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
| @@ -49,12 +49,20 @@ | |||
| 49 | 49 | ||
| 50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
| 51 | do { \ | 51 | do { \ |
| 52 | (udata)->inbuf = (void __user *) (ibuf); \ | 52 | (udata)->inbuf = (const void __user *) (ibuf); \ |
| 53 | (udata)->outbuf = (void __user *) (obuf); \ | 53 | (udata)->outbuf = (void __user *) (obuf); \ |
| 54 | (udata)->inlen = (ilen); \ | 54 | (udata)->inlen = (ilen); \ |
| 55 | (udata)->outlen = (olen); \ | 55 | (udata)->outlen = (olen); \ |
| 56 | } while (0) | 56 | } while (0) |
| 57 | 57 | ||
| 58 | #define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ | ||
| 59 | do { \ | ||
| 60 | (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ | ||
| 61 | (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ | ||
| 62 | (udata)->inlen = (ilen); \ | ||
| 63 | (udata)->outlen = (olen); \ | ||
| 64 | } while (0) | ||
| 65 | |||
| 58 | /* | 66 | /* |
| 59 | * Our lifetime rules for these structs are the following: | 67 | * Our lifetime rules for these structs are the following: |
| 60 | * | 68 | * |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 65f6e7dc380c..f1cc83855af6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -2593,6 +2593,9 @@ out_put: | |||
| 2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | 2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, |
| 2594 | union ib_flow_spec *ib_spec) | 2594 | union ib_flow_spec *ib_spec) |
| 2595 | { | 2595 | { |
| 2596 | if (kern_spec->reserved) | ||
| 2597 | return -EINVAL; | ||
| 2598 | |||
| 2596 | ib_spec->type = kern_spec->type; | 2599 | ib_spec->type = kern_spec->type; |
| 2597 | 2600 | ||
| 2598 | switch (ib_spec->type) { | 2601 | switch (ib_spec->type) { |
| @@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2646 | void *ib_spec; | 2649 | void *ib_spec; |
| 2647 | int i; | 2650 | int i; |
| 2648 | 2651 | ||
| 2652 | if (ucore->inlen < sizeof(cmd)) | ||
| 2653 | return -EINVAL; | ||
| 2654 | |||
| 2649 | if (ucore->outlen < sizeof(resp)) | 2655 | if (ucore->outlen < sizeof(resp)) |
| 2650 | return -ENOSPC; | 2656 | return -ENOSPC; |
| 2651 | 2657 | ||
| @@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2671 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) | 2677 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) |
| 2672 | return -EINVAL; | 2678 | return -EINVAL; |
| 2673 | 2679 | ||
| 2680 | if (cmd.flow_attr.reserved[0] || | ||
| 2681 | cmd.flow_attr.reserved[1]) | ||
| 2682 | return -EINVAL; | ||
| 2683 | |||
| 2674 | if (cmd.flow_attr.num_of_specs) { | 2684 | if (cmd.flow_attr.num_of_specs) { |
| 2675 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, | 2685 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, |
| 2676 | GFP_KERNEL); | 2686 | GFP_KERNEL); |
| @@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2731 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { | 2741 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { |
| 2732 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", | 2742 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", |
| 2733 | i, cmd.flow_attr.size); | 2743 | i, cmd.flow_attr.size); |
| 2744 | err = -EINVAL; | ||
| 2734 | goto err_free; | 2745 | goto err_free; |
| 2735 | } | 2746 | } |
| 2736 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); | 2747 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); |
| @@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, | |||
| 2791 | struct ib_uobject *uobj; | 2802 | struct ib_uobject *uobj; |
| 2792 | int ret; | 2803 | int ret; |
| 2793 | 2804 | ||
| 2805 | if (ucore->inlen < sizeof(cmd)) | ||
| 2806 | return -EINVAL; | ||
| 2807 | |||
| 2794 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); | 2808 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); |
| 2795 | if (ret) | 2809 | if (ret) |
| 2796 | return ret; | 2810 | return ret; |
| 2797 | 2811 | ||
| 2812 | if (cmd.comp_mask) | ||
| 2813 | return -EINVAL; | ||
| 2814 | |||
| 2798 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, | 2815 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, |
| 2799 | file->ucontext); | 2816 | file->ucontext); |
| 2800 | if (!uobj) | 2817 | if (!uobj) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 34386943ebcf..08219fb3338b 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
| 668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) | 668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) |
| 669 | return -EINVAL; | 669 | return -EINVAL; |
| 670 | 670 | ||
| 671 | if (ex_hdr.cmd_hdr_reserved) | ||
| 672 | return -EINVAL; | ||
| 673 | |||
| 671 | if (ex_hdr.response) { | 674 | if (ex_hdr.response) { |
| 672 | if (!hdr.out_words && !ex_hdr.provider_out_words) | 675 | if (!hdr.out_words && !ex_hdr.provider_out_words) |
| 673 | return -EINVAL; | 676 | return -EINVAL; |
| 677 | |||
| 678 | if (!access_ok(VERIFY_WRITE, | ||
| 679 | (void __user *) (unsigned long) ex_hdr.response, | ||
| 680 | (hdr.out_words + ex_hdr.provider_out_words) * 8)) | ||
| 681 | return -EFAULT; | ||
| 674 | } else { | 682 | } else { |
| 675 | if (hdr.out_words || ex_hdr.provider_out_words) | 683 | if (hdr.out_words || ex_hdr.provider_out_words) |
| 676 | return -EINVAL; | 684 | return -EINVAL; |
| 677 | } | 685 | } |
| 678 | 686 | ||
| 679 | INIT_UDATA(&ucore, | 687 | INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, |
| 680 | (hdr.in_words) ? buf : 0, | 688 | hdr.in_words * 8, hdr.out_words * 8); |
| 681 | (unsigned long)ex_hdr.response, | 689 | |
| 682 | hdr.in_words * 8, | 690 | INIT_UDATA_BUF_OR_NULL(&uhw, |
| 683 | hdr.out_words * 8); | 691 | buf + ucore.inlen, |
| 684 | 692 | (unsigned long) ex_hdr.response + ucore.outlen, | |
| 685 | INIT_UDATA(&uhw, | 693 | ex_hdr.provider_in_words * 8, |
| 686 | (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, | 694 | ex_hdr.provider_out_words * 8); |
| 687 | (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, | ||
| 688 | ex_hdr.provider_in_words * 8, | ||
| 689 | ex_hdr.provider_out_words * 8); | ||
| 690 | 695 | ||
| 691 | err = uverbs_ex_cmd_table[command](file, | 696 | err = uverbs_ex_cmd_table[command](file, |
| 692 | &ucore, | 697 | &ucore, |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 4cb8eb24497c..84e45006451c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
| 173 | return ret; | 173 | return ret; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) | 176 | static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) |
| 177 | { | 177 | { |
| 178 | u32 remain = len; | 178 | u32 remain = len; |
| 179 | u32 dmalen; | 179 | u32 dmalen; |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40..4c9852d92b0a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
| @@ -421,9 +421,11 @@ out: | |||
| 421 | 421 | ||
| 422 | if (watermark <= WATERMARK_METADATA) { | 422 | if (watermark <= WATERMARK_METADATA) { |
| 423 | SET_GC_MARK(b, GC_MARK_METADATA); | 423 | SET_GC_MARK(b, GC_MARK_METADATA); |
| 424 | SET_GC_MOVE(b, 0); | ||
| 424 | b->prio = BTREE_PRIO; | 425 | b->prio = BTREE_PRIO; |
| 425 | } else { | 426 | } else { |
| 426 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | 427 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); |
| 428 | SET_GC_MOVE(b, 0); | ||
| 427 | b->prio = INITIAL_PRIO; | 429 | b->prio = INITIAL_PRIO; |
| 428 | } | 430 | } |
| 429 | 431 | ||
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30..754f43177483 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
| @@ -197,7 +197,7 @@ struct bucket { | |||
| 197 | uint8_t disk_gen; | 197 | uint8_t disk_gen; |
| 198 | uint8_t last_gc; /* Most out of date gen in the btree */ | 198 | uint8_t last_gc; /* Most out of date gen in the btree */ |
| 199 | uint8_t gc_gen; | 199 | uint8_t gc_gen; |
| 200 | uint16_t gc_mark; | 200 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | /* | 203 | /* |
| @@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |||
| 209 | #define GC_MARK_RECLAIMABLE 0 | 209 | #define GC_MARK_RECLAIMABLE 0 |
| 210 | #define GC_MARK_DIRTY 1 | 210 | #define GC_MARK_DIRTY 1 |
| 211 | #define GC_MARK_METADATA 2 | 211 | #define GC_MARK_METADATA 2 |
| 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); | 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); |
| 213 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); | ||
| 213 | 214 | ||
| 214 | #include "journal.h" | 215 | #include "journal.h" |
| 215 | #include "stats.h" | 216 | #include "stats.h" |
| @@ -372,14 +373,14 @@ struct cached_dev { | |||
| 372 | unsigned char writeback_percent; | 373 | unsigned char writeback_percent; |
| 373 | unsigned writeback_delay; | 374 | unsigned writeback_delay; |
| 374 | 375 | ||
| 375 | int writeback_rate_change; | ||
| 376 | int64_t writeback_rate_derivative; | ||
| 377 | uint64_t writeback_rate_target; | 376 | uint64_t writeback_rate_target; |
| 377 | int64_t writeback_rate_proportional; | ||
| 378 | int64_t writeback_rate_derivative; | ||
| 379 | int64_t writeback_rate_change; | ||
| 378 | 380 | ||
| 379 | unsigned writeback_rate_update_seconds; | 381 | unsigned writeback_rate_update_seconds; |
| 380 | unsigned writeback_rate_d_term; | 382 | unsigned writeback_rate_d_term; |
| 381 | unsigned writeback_rate_p_term_inverse; | 383 | unsigned writeback_rate_p_term_inverse; |
| 382 | unsigned writeback_rate_d_smooth; | ||
| 383 | }; | 384 | }; |
| 384 | 385 | ||
| 385 | enum alloc_watermarks { | 386 | enum alloc_watermarks { |
| @@ -445,7 +446,6 @@ struct cache { | |||
| 445 | * call prio_write() to keep gens from wrapping. | 446 | * call prio_write() to keep gens from wrapping. |
| 446 | */ | 447 | */ |
| 447 | uint8_t need_save_prio; | 448 | uint8_t need_save_prio; |
| 448 | unsigned gc_move_threshold; | ||
| 449 | 449 | ||
| 450 | /* | 450 | /* |
| 451 | * If nonzero, we know we aren't going to find any buckets to invalidate | 451 | * If nonzero, we know we aren't going to find any buckets to invalidate |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..31bb53fcc67a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
| @@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
| 1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), | 1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), |
| 1562 | GC_MARK_METADATA); | 1562 | GC_MARK_METADATA); |
| 1563 | 1563 | ||
| 1564 | /* don't reclaim buckets to which writeback keys point */ | ||
| 1565 | rcu_read_lock(); | ||
| 1566 | for (i = 0; i < c->nr_uuids; i++) { | ||
| 1567 | struct bcache_device *d = c->devices[i]; | ||
| 1568 | struct cached_dev *dc; | ||
| 1569 | struct keybuf_key *w, *n; | ||
| 1570 | unsigned j; | ||
| 1571 | |||
| 1572 | if (!d || UUID_FLASH_ONLY(&c->uuids[i])) | ||
| 1573 | continue; | ||
| 1574 | dc = container_of(d, struct cached_dev, disk); | ||
| 1575 | |||
| 1576 | spin_lock(&dc->writeback_keys.lock); | ||
| 1577 | rbtree_postorder_for_each_entry_safe(w, n, | ||
| 1578 | &dc->writeback_keys.keys, node) | ||
| 1579 | for (j = 0; j < KEY_PTRS(&w->key); j++) | ||
| 1580 | SET_GC_MARK(PTR_BUCKET(c, &w->key, j), | ||
| 1581 | GC_MARK_DIRTY); | ||
| 1582 | spin_unlock(&dc->writeback_keys.lock); | ||
| 1583 | } | ||
| 1584 | rcu_read_unlock(); | ||
| 1585 | |||
| 1564 | for_each_cache(ca, c, i) { | 1586 | for_each_cache(ca, c, i) { |
| 1565 | uint64_t *i; | 1587 | uint64_t *i; |
| 1566 | 1588 | ||
| @@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, | |||
| 1817 | if (KEY_START(k) > KEY_START(insert) + sectors_found) | 1839 | if (KEY_START(k) > KEY_START(insert) + sectors_found) |
| 1818 | goto check_failed; | 1840 | goto check_failed; |
| 1819 | 1841 | ||
| 1820 | if (KEY_PTRS(replace_key) != KEY_PTRS(k)) | 1842 | if (KEY_PTRS(k) != KEY_PTRS(replace_key) || |
| 1843 | KEY_DIRTY(k) != KEY_DIRTY(replace_key)) | ||
| 1821 | goto check_failed; | 1844 | goto check_failed; |
| 1822 | 1845 | ||
| 1823 | /* skip past gen */ | 1846 | /* skip past gen */ |
| @@ -2217,7 +2240,7 @@ struct btree_insert_op { | |||
| 2217 | struct bkey *replace_key; | 2240 | struct bkey *replace_key; |
| 2218 | }; | 2241 | }; |
| 2219 | 2242 | ||
| 2220 | int btree_insert_fn(struct btree_op *b_op, struct btree *b) | 2243 | static int btree_insert_fn(struct btree_op *b_op, struct btree *b) |
| 2221 | { | 2244 | { |
| 2222 | struct btree_insert_op *op = container_of(b_op, | 2245 | struct btree_insert_op *op = container_of(b_op, |
| 2223 | struct btree_insert_op, op); | 2246 | struct btree_insert_op, op); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..f2f0998c4a91 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
| @@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) | |||
| 25 | unsigned i; | 25 | unsigned i; |
| 26 | 26 | ||
| 27 | for (i = 0; i < KEY_PTRS(k); i++) { | 27 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 28 | struct cache *ca = PTR_CACHE(c, k, i); | ||
| 29 | struct bucket *g = PTR_BUCKET(c, k, i); | 28 | struct bucket *g = PTR_BUCKET(c, k, i); |
| 30 | 29 | ||
| 31 | if (GC_SECTORS_USED(g) < ca->gc_move_threshold) | 30 | if (GC_MOVE(g)) |
| 32 | return true; | 31 | return true; |
| 33 | } | 32 | } |
| 34 | 33 | ||
| @@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl) | |||
| 65 | 64 | ||
| 66 | static void read_moving_endio(struct bio *bio, int error) | 65 | static void read_moving_endio(struct bio *bio, int error) |
| 67 | { | 66 | { |
| 67 | struct bbio *b = container_of(bio, struct bbio, bio); | ||
| 68 | struct moving_io *io = container_of(bio->bi_private, | 68 | struct moving_io *io = container_of(bio->bi_private, |
| 69 | struct moving_io, cl); | 69 | struct moving_io, cl); |
| 70 | 70 | ||
| 71 | if (error) | 71 | if (error) |
| 72 | io->op.error = error; | 72 | io->op.error = error; |
| 73 | else if (!KEY_DIRTY(&b->key) && | ||
| 74 | ptr_stale(io->op.c, &b->key, 0)) { | ||
| 75 | io->op.error = -EINTR; | ||
| 76 | } | ||
| 73 | 77 | ||
| 74 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); | 78 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); |
| 75 | } | 79 | } |
| @@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c) | |||
| 141 | if (!w) | 145 | if (!w) |
| 142 | break; | 146 | break; |
| 143 | 147 | ||
| 148 | if (ptr_stale(c, &w->key, 0)) { | ||
| 149 | bch_keybuf_del(&c->moving_gc_keys, w); | ||
| 150 | continue; | ||
| 151 | } | ||
| 152 | |||
| 144 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) | 153 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) |
| 145 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | 154 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
| 146 | GFP_KERNEL); | 155 | GFP_KERNEL); |
| @@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) | |||
| 184 | 193 | ||
| 185 | static unsigned bucket_heap_top(struct cache *ca) | 194 | static unsigned bucket_heap_top(struct cache *ca) |
| 186 | { | 195 | { |
| 187 | return GC_SECTORS_USED(heap_peek(&ca->heap)); | 196 | struct bucket *b; |
| 197 | return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; | ||
| 188 | } | 198 | } |
| 189 | 199 | ||
| 190 | void bch_moving_gc(struct cache_set *c) | 200 | void bch_moving_gc(struct cache_set *c) |
| @@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c) | |||
| 226 | sectors_to_move -= GC_SECTORS_USED(b); | 236 | sectors_to_move -= GC_SECTORS_USED(b); |
| 227 | } | 237 | } |
| 228 | 238 | ||
| 229 | ca->gc_move_threshold = bucket_heap_top(ca); | 239 | while (heap_pop(&ca->heap, b, bucket_cmp)) |
| 230 | 240 | SET_GC_MOVE(b, 1); | |
| 231 | pr_debug("threshold %u", ca->gc_move_threshold); | ||
| 232 | } | 241 | } |
| 233 | 242 | ||
| 234 | mutex_unlock(&c->bucket_lock); | 243 | mutex_unlock(&c->bucket_lock); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797..c57bfa071a57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -1676,7 +1676,7 @@ err: | |||
| 1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | 1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) |
| 1677 | { | 1677 | { |
| 1678 | return ca->sb.block_size == c->sb.block_size && | 1678 | return ca->sb.block_size == c->sb.block_size && |
| 1679 | ca->sb.bucket_size == c->sb.block_size && | 1679 | ca->sb.bucket_size == c->sb.bucket_size && |
| 1680 | ca->sb.nr_in_set == c->sb.nr_in_set; | 1680 | ca->sb.nr_in_set == c->sb.nr_in_set; |
| 1681 | } | 1681 | } |
| 1682 | 1682 | ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18a..a1f85612f0b3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
| @@ -83,7 +83,6 @@ rw_attribute(writeback_rate); | |||
| 83 | rw_attribute(writeback_rate_update_seconds); | 83 | rw_attribute(writeback_rate_update_seconds); |
| 84 | rw_attribute(writeback_rate_d_term); | 84 | rw_attribute(writeback_rate_d_term); |
| 85 | rw_attribute(writeback_rate_p_term_inverse); | 85 | rw_attribute(writeback_rate_p_term_inverse); |
| 86 | rw_attribute(writeback_rate_d_smooth); | ||
| 87 | read_attribute(writeback_rate_debug); | 86 | read_attribute(writeback_rate_debug); |
| 88 | 87 | ||
| 89 | read_attribute(stripe_size); | 88 | read_attribute(stripe_size); |
| @@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) | |||
| 129 | var_printf(writeback_running, "%i"); | 128 | var_printf(writeback_running, "%i"); |
| 130 | var_print(writeback_delay); | 129 | var_print(writeback_delay); |
| 131 | var_print(writeback_percent); | 130 | var_print(writeback_percent); |
| 132 | sysfs_print(writeback_rate, dc->writeback_rate.rate); | 131 | sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); |
| 133 | 132 | ||
| 134 | var_print(writeback_rate_update_seconds); | 133 | var_print(writeback_rate_update_seconds); |
| 135 | var_print(writeback_rate_d_term); | 134 | var_print(writeback_rate_d_term); |
| 136 | var_print(writeback_rate_p_term_inverse); | 135 | var_print(writeback_rate_p_term_inverse); |
| 137 | var_print(writeback_rate_d_smooth); | ||
| 138 | 136 | ||
| 139 | if (attr == &sysfs_writeback_rate_debug) { | 137 | if (attr == &sysfs_writeback_rate_debug) { |
| 138 | char rate[20]; | ||
| 140 | char dirty[20]; | 139 | char dirty[20]; |
| 141 | char derivative[20]; | ||
| 142 | char target[20]; | 140 | char target[20]; |
| 143 | bch_hprint(dirty, | 141 | char proportional[20]; |
| 144 | bcache_dev_sectors_dirty(&dc->disk) << 9); | 142 | char derivative[20]; |
| 145 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | 143 | char change[20]; |
| 144 | s64 next_io; | ||
| 145 | |||
| 146 | bch_hprint(rate, dc->writeback_rate.rate << 9); | ||
| 147 | bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); | ||
| 146 | bch_hprint(target, dc->writeback_rate_target << 9); | 148 | bch_hprint(target, dc->writeback_rate_target << 9); |
| 149 | bch_hprint(proportional,dc->writeback_rate_proportional << 9); | ||
| 150 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | ||
| 151 | bch_hprint(change, dc->writeback_rate_change << 9); | ||
| 152 | |||
| 153 | next_io = div64_s64(dc->writeback_rate.next - local_clock(), | ||
| 154 | NSEC_PER_MSEC); | ||
| 147 | 155 | ||
| 148 | return sprintf(buf, | 156 | return sprintf(buf, |
| 149 | "rate:\t\t%u\n" | 157 | "rate:\t\t%s/sec\n" |
| 150 | "change:\t\t%i\n" | ||
| 151 | "dirty:\t\t%s\n" | 158 | "dirty:\t\t%s\n" |
| 159 | "target:\t\t%s\n" | ||
| 160 | "proportional:\t%s\n" | ||
| 152 | "derivative:\t%s\n" | 161 | "derivative:\t%s\n" |
| 153 | "target:\t\t%s\n", | 162 | "change:\t\t%s/sec\n" |
| 154 | dc->writeback_rate.rate, | 163 | "next io:\t%llims\n", |
| 155 | dc->writeback_rate_change, | 164 | rate, dirty, target, proportional, |
| 156 | dirty, derivative, target); | 165 | derivative, change, next_io); |
| 157 | } | 166 | } |
| 158 | 167 | ||
| 159 | sysfs_hprint(dirty_data, | 168 | sysfs_hprint(dirty_data, |
| @@ -189,6 +198,7 @@ STORE(__cached_dev) | |||
| 189 | struct kobj_uevent_env *env; | 198 | struct kobj_uevent_env *env; |
| 190 | 199 | ||
| 191 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) | 200 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) |
| 201 | #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) | ||
| 192 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) | 202 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) |
| 193 | 203 | ||
| 194 | sysfs_strtoul(data_csum, dc->disk.data_csum); | 204 | sysfs_strtoul(data_csum, dc->disk.data_csum); |
| @@ -197,16 +207,15 @@ STORE(__cached_dev) | |||
| 197 | d_strtoul(writeback_metadata); | 207 | d_strtoul(writeback_metadata); |
| 198 | d_strtoul(writeback_running); | 208 | d_strtoul(writeback_running); |
| 199 | d_strtoul(writeback_delay); | 209 | d_strtoul(writeback_delay); |
| 200 | sysfs_strtoul_clamp(writeback_rate, | 210 | |
| 201 | dc->writeback_rate.rate, 1, 1000000); | ||
| 202 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); | 211 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); |
| 203 | 212 | ||
| 204 | d_strtoul(writeback_rate_update_seconds); | 213 | sysfs_strtoul_clamp(writeback_rate, |
| 214 | dc->writeback_rate.rate, 1, INT_MAX); | ||
| 215 | |||
| 216 | d_strtoul_nonzero(writeback_rate_update_seconds); | ||
| 205 | d_strtoul(writeback_rate_d_term); | 217 | d_strtoul(writeback_rate_d_term); |
| 206 | d_strtoul(writeback_rate_p_term_inverse); | 218 | d_strtoul_nonzero(writeback_rate_p_term_inverse); |
| 207 | sysfs_strtoul_clamp(writeback_rate_p_term_inverse, | ||
| 208 | dc->writeback_rate_p_term_inverse, 1, INT_MAX); | ||
| 209 | d_strtoul(writeback_rate_d_smooth); | ||
| 210 | 219 | ||
| 211 | d_strtoi_h(sequential_cutoff); | 220 | d_strtoi_h(sequential_cutoff); |
| 212 | d_strtoi_h(readahead); | 221 | d_strtoi_h(readahead); |
| @@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { | |||
| 313 | &sysfs_writeback_rate_update_seconds, | 322 | &sysfs_writeback_rate_update_seconds, |
| 314 | &sysfs_writeback_rate_d_term, | 323 | &sysfs_writeback_rate_d_term, |
| 315 | &sysfs_writeback_rate_p_term_inverse, | 324 | &sysfs_writeback_rate_p_term_inverse, |
| 316 | &sysfs_writeback_rate_d_smooth, | ||
| 317 | &sysfs_writeback_rate_debug, | 325 | &sysfs_writeback_rate_debug, |
| 318 | &sysfs_dirty_data, | 326 | &sysfs_dirty_data, |
| 319 | &sysfs_stripe_size, | 327 | &sysfs_stripe_size, |
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..bb37618e7664 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
| @@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) | |||
| 209 | { | 209 | { |
| 210 | uint64_t now = local_clock(); | 210 | uint64_t now = local_clock(); |
| 211 | 211 | ||
| 212 | d->next += div_u64(done, d->rate); | 212 | d->next += div_u64(done * NSEC_PER_SEC, d->rate); |
| 213 | |||
| 214 | if (time_before64(now + NSEC_PER_SEC, d->next)) | ||
| 215 | d->next = now + NSEC_PER_SEC; | ||
| 216 | |||
| 217 | if (time_after64(now - NSEC_PER_SEC * 2, d->next)) | ||
| 218 | d->next = now - NSEC_PER_SEC * 2; | ||
| 213 | 219 | ||
| 214 | return time_after64(d->next, now) | 220 | return time_after64(d->next, now) |
| 215 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) | 221 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a..1030c6020e98 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
| @@ -110,7 +110,7 @@ do { \ | |||
| 110 | _r; \ | 110 | _r; \ |
| 111 | }) | 111 | }) |
| 112 | 112 | ||
| 113 | #define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) | 113 | #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) |
| 114 | 114 | ||
| 115 | #define heap_full(h) ((h)->used == (h)->size) | 115 | #define heap_full(h) ((h)->used == (h)->size) |
| 116 | 116 | ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..6c44fe059c27 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
| @@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) | |||
| 30 | 30 | ||
| 31 | /* PD controller */ | 31 | /* PD controller */ |
| 32 | 32 | ||
| 33 | int change = 0; | ||
| 34 | int64_t error; | ||
| 35 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); | 33 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
| 36 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; | 34 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
| 35 | int64_t proportional = dirty - target; | ||
| 36 | int64_t change; | ||
| 37 | 37 | ||
| 38 | dc->disk.sectors_dirty_last = dirty; | 38 | dc->disk.sectors_dirty_last = dirty; |
| 39 | 39 | ||
| 40 | derivative *= dc->writeback_rate_d_term; | 40 | /* Scale to sectors per second */ |
| 41 | derivative = clamp(derivative, -dirty, dirty); | ||
| 42 | 41 | ||
| 43 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, | 42 | proportional *= dc->writeback_rate_update_seconds; |
| 44 | dc->writeback_rate_d_smooth, 0); | 43 | proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); |
| 45 | 44 | ||
| 46 | /* Avoid divide by zero */ | 45 | derivative = div_s64(derivative, dc->writeback_rate_update_seconds); |
| 47 | if (!target) | ||
| 48 | goto out; | ||
| 49 | 46 | ||
| 50 | error = div64_s64((dirty + derivative - target) << 8, target); | 47 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, |
| 48 | (dc->writeback_rate_d_term / | ||
| 49 | dc->writeback_rate_update_seconds) ?: 1, 0); | ||
| 50 | |||
| 51 | derivative *= dc->writeback_rate_d_term; | ||
| 52 | derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); | ||
| 51 | 53 | ||
| 52 | change = div_s64((dc->writeback_rate.rate * error) >> 8, | 54 | change = proportional + derivative; |
| 53 | dc->writeback_rate_p_term_inverse); | ||
| 54 | 55 | ||
| 55 | /* Don't increase writeback rate if the device isn't keeping up */ | 56 | /* Don't increase writeback rate if the device isn't keeping up */ |
| 56 | if (change > 0 && | 57 | if (change > 0 && |
| 57 | time_after64(local_clock(), | 58 | time_after64(local_clock(), |
| 58 | dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) | 59 | dc->writeback_rate.next + NSEC_PER_MSEC)) |
| 59 | change = 0; | 60 | change = 0; |
| 60 | 61 | ||
| 61 | dc->writeback_rate.rate = | 62 | dc->writeback_rate.rate = |
| 62 | clamp_t(int64_t, dc->writeback_rate.rate + change, | 63 | clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, |
| 63 | 1, NSEC_PER_MSEC); | 64 | 1, NSEC_PER_MSEC); |
| 64 | out: | 65 | |
| 66 | dc->writeback_rate_proportional = proportional; | ||
| 65 | dc->writeback_rate_derivative = derivative; | 67 | dc->writeback_rate_derivative = derivative; |
| 66 | dc->writeback_rate_change = change; | 68 | dc->writeback_rate_change = change; |
| 67 | dc->writeback_rate_target = target; | 69 | dc->writeback_rate_target = target; |
| @@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) | |||
| 87 | 89 | ||
| 88 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | 90 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
| 89 | { | 91 | { |
| 90 | uint64_t ret; | ||
| 91 | |||
| 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || | 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
| 93 | !dc->writeback_percent) | 93 | !dc->writeback_percent) |
| 94 | return 0; | 94 | return 0; |
| 95 | 95 | ||
| 96 | ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); | 96 | return bch_next_delay(&dc->writeback_rate, sectors); |
| 97 | |||
| 98 | return min_t(uint64_t, ret, HZ); | ||
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | struct dirty_io { | 99 | struct dirty_io { |
| @@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc) | |||
| 241 | if (KEY_START(&w->key) != dc->last_read || | 239 | if (KEY_START(&w->key) != dc->last_read || |
| 242 | jiffies_to_msecs(delay) > 50) | 240 | jiffies_to_msecs(delay) > 50) |
| 243 | while (!kthread_should_stop() && delay) | 241 | while (!kthread_should_stop() && delay) |
| 244 | delay = schedule_timeout_interruptible(delay); | 242 | delay = schedule_timeout_uninterruptible(delay); |
| 245 | 243 | ||
| 246 | dc->last_read = KEY_OFFSET(&w->key); | 244 | dc->last_read = KEY_OFFSET(&w->key); |
| 247 | 245 | ||
| @@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg) | |||
| 438 | while (delay && | 436 | while (delay && |
| 439 | !kthread_should_stop() && | 437 | !kthread_should_stop() && |
| 440 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) | 438 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
| 441 | delay = schedule_timeout_interruptible(delay); | 439 | delay = schedule_timeout_uninterruptible(delay); |
| 442 | } | 440 | } |
| 443 | } | 441 | } |
| 444 | 442 | ||
| @@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) | |||
| 476 | 474 | ||
| 477 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), | 475 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
| 478 | sectors_dirty_init_fn, 0); | 476 | sectors_dirty_init_fn, 0); |
| 477 | |||
| 478 | dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); | ||
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) | 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) |
| @@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) | |||
| 490 | dc->writeback_delay = 30; | 490 | dc->writeback_delay = 30; |
| 491 | dc->writeback_rate.rate = 1024; | 491 | dc->writeback_rate.rate = 1024; |
| 492 | 492 | ||
| 493 | dc->writeback_rate_update_seconds = 30; | 493 | dc->writeback_rate_update_seconds = 5; |
| 494 | dc->writeback_rate_d_term = 16; | 494 | dc->writeback_rate_d_term = 30; |
| 495 | dc->writeback_rate_p_term_inverse = 64; | 495 | dc->writeback_rate_p_term_inverse = 6000; |
| 496 | dc->writeback_rate_d_smooth = 8; | ||
| 497 | 496 | ||
| 498 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, | 497 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
| 499 | "bcache_writeback"); | 498 | "bcache_writeback"); |
| 500 | if (IS_ERR(dc->writeback_thread)) | 499 | if (IS_ERR(dc->writeback_thread)) |
| 501 | return PTR_ERR(dc->writeback_thread); | 500 | return PTR_ERR(dc->writeback_thread); |
| 502 | 501 | ||
| 503 | set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); | ||
| 504 | |||
| 505 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); | 502 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
| 506 | schedule_delayed_work(&dc->writeback_rate_update, | 503 | schedule_delayed_work(&dc->writeback_rate_update, |
| 507 | dc->writeback_rate_update_seconds * HZ); | 504 | dc->writeback_rate_update_seconds * HZ); |
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 2832576d8b12..114f5ef4b73a 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c | |||
| @@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { | |||
| 512 | 512 | ||
| 513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { | 513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
| 514 | { "INT33B2", 0 }, | 514 | { "INT33B2", 0 }, |
| 515 | { "INT33FC", 0 }, | ||
| 515 | { } | 516 | { } |
| 516 | }; | 517 | }; |
| 517 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); | 518 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 2a786c504460..3c6768378a94 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
| @@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
| 833 | return 0; | 833 | return 0; |
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | static const struct x86_cpu_id energy_unit_quirk_ids[] = { | ||
| 837 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
| 838 | {} | ||
| 839 | }; | ||
| 840 | |||
| 836 | static int rapl_check_unit(struct rapl_package *rp, int cpu) | 841 | static int rapl_check_unit(struct rapl_package *rp, int cpu) |
| 837 | { | 842 | { |
| 838 | u64 msr_val; | 843 | u64 msr_val; |
| @@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu) | |||
| 853 | * time unit: 1/time_unit_divisor Seconds | 858 | * time unit: 1/time_unit_divisor Seconds |
| 854 | */ | 859 | */ |
| 855 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 860 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
| 856 | rp->energy_unit_divisor = 1 << value; | 861 | /* some CPUs have different way to calculate energy unit */ |
| 857 | 862 | if (x86_match_cpu(energy_unit_quirk_ids)) | |
| 863 | rp->energy_unit_divisor = 1000000 / (1 << value); | ||
| 864 | else | ||
| 865 | rp->energy_unit_divisor = 1 << value; | ||
| 858 | 866 | ||
| 859 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 867 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
| 860 | rp->power_unit_divisor = 1 << value; | 868 | rp->power_unit_divisor = 1 << value; |
| @@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id) | |||
| 941 | static const struct x86_cpu_id rapl_ids[] = { | 949 | static const struct x86_cpu_id rapl_ids[] = { |
| 942 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ | 950 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ |
| 943 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ | 951 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ |
| 952 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
| 944 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ | 953 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ |
| 945 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ | 954 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ |
| 946 | /* TODO: Add more CPU IDs after testing */ | 955 | /* TODO: Add more CPU IDs after testing */ |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c444654fc33f..5c4a95b516cf 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb) | |||
| 285 | { | 285 | { |
| 286 | __le32 actual = cpu_to_le32(vb->num_pages); | 286 | __le32 actual = cpu_to_le32(vb->num_pages); |
| 287 | 287 | ||
| 288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, | 288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, |
| 289 | &actual); | 289 | &actual); |
| 290 | } | 290 | } |
| 291 | 291 | ||
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 288534920fe5..20d6697bd638 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, | |||
| 1493 | sb->s_blocksize - offset : towrite; | 1493 | sb->s_blocksize - offset : towrite; |
| 1494 | 1494 | ||
| 1495 | tmp_bh.b_state = 0; | 1495 | tmp_bh.b_state = 0; |
| 1496 | tmp_bh.b_size = sb->s_blocksize; | ||
| 1496 | err = ext2_get_block(inode, blk, &tmp_bh, 1); | 1497 | err = ext2_get_block(inode, blk, &tmp_bh, 1); |
| 1497 | if (err < 0) | 1498 | if (err < 0) |
| 1498 | goto out; | 1499 | goto out; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e6185031c1cc..ece55565b9cd 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -268,6 +268,16 @@ struct ext4_io_submit { | |||
| 268 | /* Translate # of blks to # of clusters */ | 268 | /* Translate # of blks to # of clusters */ |
| 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ | 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ |
| 270 | (sbi)->s_cluster_bits) | 270 | (sbi)->s_cluster_bits) |
| 271 | /* Mask out the low bits to get the starting block of the cluster */ | ||
| 272 | #define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ | ||
| 273 | ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 274 | #define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ | ||
| 275 | ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 276 | /* Get the cluster offset */ | ||
| 277 | #define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ | ||
| 278 | ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 279 | #define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ | ||
| 280 | ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 271 | 281 | ||
| 272 | /* | 282 | /* |
| 273 | * Structure of a blocks group descriptor | 283 | * Structure of a blocks group descriptor |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 17ac112ab101..3fe29de832c8 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
| @@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
| 259 | if (WARN_ON_ONCE(err)) { | 259 | if (WARN_ON_ONCE(err)) { |
| 260 | ext4_journal_abort_handle(where, line, __func__, bh, | 260 | ext4_journal_abort_handle(where, line, __func__, bh, |
| 261 | handle, err); | 261 | handle, err); |
| 262 | ext4_error_inode(inode, where, line, | ||
| 263 | bh->b_blocknr, | ||
| 264 | "journal_dirty_metadata failed: " | ||
| 265 | "handle type %u started at line %u, " | ||
| 266 | "credits %u/%u, errcode %d", | ||
| 267 | handle->h_type, | ||
| 268 | handle->h_line_no, | ||
| 269 | handle->h_requested_credits, | ||
| 270 | handle->h_buffer_credits, err); | ||
| 262 | } | 271 | } |
| 263 | } else { | 272 | } else { |
| 264 | if (inode) | 273 | if (inode) |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 35f65cf4f318..4410cc3d6ee2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) | |||
| 360 | { | 360 | { |
| 361 | ext4_fsblk_t block = ext4_ext_pblock(ext); | 361 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
| 362 | int len = ext4_ext_get_actual_len(ext); | 362 | int len = ext4_ext_get_actual_len(ext); |
| 363 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); | ||
| 364 | ext4_lblk_t last = lblock + len - 1; | ||
| 363 | 365 | ||
| 364 | if (len == 0) | 366 | if (lblock > last) |
| 365 | return 0; | 367 | return 0; |
| 366 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); | 368 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
| 367 | } | 369 | } |
| @@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode, | |||
| 387 | if (depth == 0) { | 389 | if (depth == 0) { |
| 388 | /* leaf entries */ | 390 | /* leaf entries */ |
| 389 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); | 391 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
| 392 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | ||
| 393 | ext4_fsblk_t pblock = 0; | ||
| 394 | ext4_lblk_t lblock = 0; | ||
| 395 | ext4_lblk_t prev = 0; | ||
| 396 | int len = 0; | ||
| 390 | while (entries) { | 397 | while (entries) { |
| 391 | if (!ext4_valid_extent(inode, ext)) | 398 | if (!ext4_valid_extent(inode, ext)) |
| 392 | return 0; | 399 | return 0; |
| 400 | |||
| 401 | /* Check for overlapping extents */ | ||
| 402 | lblock = le32_to_cpu(ext->ee_block); | ||
| 403 | len = ext4_ext_get_actual_len(ext); | ||
| 404 | if ((lblock <= prev) && prev) { | ||
| 405 | pblock = ext4_ext_pblock(ext); | ||
| 406 | es->s_last_error_block = cpu_to_le64(pblock); | ||
| 407 | return 0; | ||
| 408 | } | ||
| 393 | ext++; | 409 | ext++; |
| 394 | entries--; | 410 | entries--; |
| 411 | prev = lblock + len - 1; | ||
| 395 | } | 412 | } |
| 396 | } else { | 413 | } else { |
| 397 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); | 414 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
| @@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1834 | depth = ext_depth(inode); | 1851 | depth = ext_depth(inode); |
| 1835 | if (!path[depth].p_ext) | 1852 | if (!path[depth].p_ext) |
| 1836 | goto out; | 1853 | goto out; |
| 1837 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | 1854 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
| 1838 | b2 &= ~(sbi->s_cluster_ratio - 1); | ||
| 1839 | 1855 | ||
| 1840 | /* | 1856 | /* |
| 1841 | * get the next allocated block if the extent in the path | 1857 | * get the next allocated block if the extent in the path |
| @@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1845 | b2 = ext4_ext_next_allocated_block(path); | 1861 | b2 = ext4_ext_next_allocated_block(path); |
| 1846 | if (b2 == EXT_MAX_BLOCKS) | 1862 | if (b2 == EXT_MAX_BLOCKS) |
| 1847 | goto out; | 1863 | goto out; |
| 1848 | b2 &= ~(sbi->s_cluster_ratio - 1); | 1864 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
| 1849 | } | 1865 | } |
| 1850 | 1866 | ||
| 1851 | /* check for wrap through zero on extent logical start block*/ | 1867 | /* check for wrap through zero on extent logical start block*/ |
| @@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
| 2504 | * extent, we have to mark the cluster as used (store negative | 2520 | * extent, we have to mark the cluster as used (store negative |
| 2505 | * cluster number in partial_cluster). | 2521 | * cluster number in partial_cluster). |
| 2506 | */ | 2522 | */ |
| 2507 | unaligned = pblk & (sbi->s_cluster_ratio - 1); | 2523 | unaligned = EXT4_PBLK_COFF(sbi, pblk); |
| 2508 | if (unaligned && (ee_len == num) && | 2524 | if (unaligned && (ee_len == num) && |
| 2509 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) | 2525 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) |
| 2510 | *partial_cluster = EXT4_B2C(sbi, pblk); | 2526 | *partial_cluster = EXT4_B2C(sbi, pblk); |
| @@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
| 2598 | * accidentally freeing it later on | 2614 | * accidentally freeing it later on |
| 2599 | */ | 2615 | */ |
| 2600 | pblk = ext4_ext_pblock(ex); | 2616 | pblk = ext4_ext_pblock(ex); |
| 2601 | if (pblk & (sbi->s_cluster_ratio - 1)) | 2617 | if (EXT4_PBLK_COFF(sbi, pblk)) |
| 2602 | *partial_cluster = | 2618 | *partial_cluster = |
| 2603 | -((long long)EXT4_B2C(sbi, pblk)); | 2619 | -((long long)EXT4_B2C(sbi, pblk)); |
| 2604 | ex--; | 2620 | ex--; |
| @@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) | |||
| 3753 | { | 3769 | { |
| 3754 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 3770 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 3755 | ext4_lblk_t lblk_start, lblk_end; | 3771 | ext4_lblk_t lblk_start, lblk_end; |
| 3756 | lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); | 3772 | lblk_start = EXT4_LBLK_CMASK(sbi, lblk); |
| 3757 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; | 3773 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; |
| 3758 | 3774 | ||
| 3759 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); | 3775 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); |
| @@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3812 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); | 3828 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); |
| 3813 | 3829 | ||
| 3814 | /* Check towards left side */ | 3830 | /* Check towards left side */ |
| 3815 | c_offset = lblk_start & (sbi->s_cluster_ratio - 1); | 3831 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start); |
| 3816 | if (c_offset) { | 3832 | if (c_offset) { |
| 3817 | lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); | 3833 | lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); |
| 3818 | lblk_to = lblk_from + c_offset - 1; | 3834 | lblk_to = lblk_from + c_offset - 1; |
| 3819 | 3835 | ||
| 3820 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) | 3836 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) |
| @@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3822 | } | 3838 | } |
| 3823 | 3839 | ||
| 3824 | /* Now check towards right. */ | 3840 | /* Now check towards right. */ |
| 3825 | c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); | 3841 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); |
| 3826 | if (allocated_clusters && c_offset) { | 3842 | if (allocated_clusters && c_offset) { |
| 3827 | lblk_from = lblk_start + num_blks; | 3843 | lblk_from = lblk_start + num_blks; |
| 3828 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; | 3844 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; |
| @@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4030 | struct ext4_ext_path *path) | 4046 | struct ext4_ext_path *path) |
| 4031 | { | 4047 | { |
| 4032 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4048 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 4033 | ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4049 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4034 | ext4_lblk_t ex_cluster_start, ex_cluster_end; | 4050 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
| 4035 | ext4_lblk_t rr_cluster_start; | 4051 | ext4_lblk_t rr_cluster_start; |
| 4036 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); | 4052 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
| @@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4048 | (rr_cluster_start == ex_cluster_start)) { | 4064 | (rr_cluster_start == ex_cluster_start)) { |
| 4049 | if (rr_cluster_start == ex_cluster_end) | 4065 | if (rr_cluster_start == ex_cluster_end) |
| 4050 | ee_start += ee_len - 1; | 4066 | ee_start += ee_len - 1; |
| 4051 | map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + | 4067 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
| 4052 | c_offset; | ||
| 4053 | map->m_len = min(map->m_len, | 4068 | map->m_len = min(map->m_len, |
| 4054 | (unsigned) sbi->s_cluster_ratio - c_offset); | 4069 | (unsigned) sbi->s_cluster_ratio - c_offset); |
| 4055 | /* | 4070 | /* |
| @@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4203 | */ | 4218 | */ |
| 4204 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; | 4219 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; |
| 4205 | newex.ee_block = cpu_to_le32(map->m_lblk); | 4220 | newex.ee_block = cpu_to_le32(map->m_lblk); |
| 4206 | cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4221 | cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk); |
| 4207 | 4222 | ||
| 4208 | /* | 4223 | /* |
| 4209 | * If we are doing bigalloc, check to see if the extent returned | 4224 | * If we are doing bigalloc, check to see if the extent returned |
| @@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4271 | * needed so that future calls to get_implied_cluster_alloc() | 4286 | * needed so that future calls to get_implied_cluster_alloc() |
| 4272 | * work correctly. | 4287 | * work correctly. |
| 4273 | */ | 4288 | */ |
| 4274 | offset = map->m_lblk & (sbi->s_cluster_ratio - 1); | 4289 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4275 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); | 4290 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
| 4276 | ar.goal -= offset; | 4291 | ar.goal -= offset; |
| 4277 | ar.logical -= offset; | 4292 | ar.logical -= offset; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 075763474118..61d49ff22c81 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file, | |||
| 1206 | */ | 1206 | */ |
| 1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | 1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) |
| 1208 | { | 1208 | { |
| 1209 | int retries = 0; | ||
| 1210 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1209 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1211 | struct ext4_inode_info *ei = EXT4_I(inode); | 1210 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 1212 | unsigned int md_needed; | 1211 | unsigned int md_needed; |
| @@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | |||
| 1218 | * in order to allocate nrblocks | 1217 | * in order to allocate nrblocks |
| 1219 | * worse case is one extent per block | 1218 | * worse case is one extent per block |
| 1220 | */ | 1219 | */ |
| 1221 | repeat: | ||
| 1222 | spin_lock(&ei->i_block_reservation_lock); | 1220 | spin_lock(&ei->i_block_reservation_lock); |
| 1223 | /* | 1221 | /* |
| 1224 | * ext4_calc_metadata_amount() has side effects, which we have | 1222 | * ext4_calc_metadata_amount() has side effects, which we have |
| @@ -1238,10 +1236,6 @@ repeat: | |||
| 1238 | ei->i_da_metadata_calc_len = save_len; | 1236 | ei->i_da_metadata_calc_len = save_len; |
| 1239 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1237 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
| 1240 | spin_unlock(&ei->i_block_reservation_lock); | 1238 | spin_unlock(&ei->i_block_reservation_lock); |
| 1241 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
| 1242 | cond_resched(); | ||
| 1243 | goto repeat; | ||
| 1244 | } | ||
| 1245 | return -ENOSPC; | 1239 | return -ENOSPC; |
| 1246 | } | 1240 | } |
| 1247 | ei->i_reserved_meta_blocks += md_needed; | 1241 | ei->i_reserved_meta_blocks += md_needed; |
| @@ -1255,7 +1249,6 @@ repeat: | |||
| 1255 | */ | 1249 | */ |
| 1256 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | 1250 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
| 1257 | { | 1251 | { |
| 1258 | int retries = 0; | ||
| 1259 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1252 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1260 | struct ext4_inode_info *ei = EXT4_I(inode); | 1253 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 1261 | unsigned int md_needed; | 1254 | unsigned int md_needed; |
| @@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | |||
| 1277 | * in order to allocate nrblocks | 1270 | * in order to allocate nrblocks |
| 1278 | * worse case is one extent per block | 1271 | * worse case is one extent per block |
| 1279 | */ | 1272 | */ |
| 1280 | repeat: | ||
| 1281 | spin_lock(&ei->i_block_reservation_lock); | 1273 | spin_lock(&ei->i_block_reservation_lock); |
| 1282 | /* | 1274 | /* |
| 1283 | * ext4_calc_metadata_amount() has side effects, which we have | 1275 | * ext4_calc_metadata_amount() has side effects, which we have |
| @@ -1297,10 +1289,6 @@ repeat: | |||
| 1297 | ei->i_da_metadata_calc_len = save_len; | 1289 | ei->i_da_metadata_calc_len = save_len; |
| 1298 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1290 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
| 1299 | spin_unlock(&ei->i_block_reservation_lock); | 1291 | spin_unlock(&ei->i_block_reservation_lock); |
| 1300 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
| 1301 | cond_resched(); | ||
| 1302 | goto repeat; | ||
| 1303 | } | ||
| 1304 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); | 1292 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
| 1305 | return -ENOSPC; | 1293 | return -ENOSPC; |
| 1306 | } | 1294 | } |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4d113efa024c..04a5c7504be9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head) | |||
| 3442 | { | 3442 | { |
| 3443 | struct ext4_prealloc_space *pa; | 3443 | struct ext4_prealloc_space *pa; |
| 3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); | 3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); |
| 3445 | |||
| 3446 | BUG_ON(atomic_read(&pa->pa_count)); | ||
| 3447 | BUG_ON(pa->pa_deleted == 0); | ||
| 3445 | kmem_cache_free(ext4_pspace_cachep, pa); | 3448 | kmem_cache_free(ext4_pspace_cachep, pa); |
| 3446 | } | 3449 | } |
| 3447 | 3450 | ||
| @@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |||
| 3455 | ext4_group_t grp; | 3458 | ext4_group_t grp; |
| 3456 | ext4_fsblk_t grp_blk; | 3459 | ext4_fsblk_t grp_blk; |
| 3457 | 3460 | ||
| 3458 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | ||
| 3459 | return; | ||
| 3460 | |||
| 3461 | /* in this short window concurrent discard can set pa_deleted */ | 3461 | /* in this short window concurrent discard can set pa_deleted */ |
| 3462 | spin_lock(&pa->pa_lock); | 3462 | spin_lock(&pa->pa_lock); |
| 3463 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { | ||
| 3464 | spin_unlock(&pa->pa_lock); | ||
| 3465 | return; | ||
| 3466 | } | ||
| 3467 | |||
| 3463 | if (pa->pa_deleted == 1) { | 3468 | if (pa->pa_deleted == 1) { |
| 3464 | spin_unlock(&pa->pa_lock); | 3469 | spin_unlock(&pa->pa_lock); |
| 3465 | return; | 3470 | return; |
| @@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |||
| 4121 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | 4126 | ext4_get_group_no_and_offset(sb, goal, &group, &block); |
| 4122 | 4127 | ||
| 4123 | /* set up allocation goals */ | 4128 | /* set up allocation goals */ |
| 4124 | ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); | 4129 | ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); |
| 4125 | ac->ac_status = AC_STATUS_CONTINUE; | 4130 | ac->ac_status = AC_STATUS_CONTINUE; |
| 4126 | ac->ac_sb = sb; | 4131 | ac->ac_sb = sb; |
| 4127 | ac->ac_inode = ar->inode; | 4132 | ac->ac_inode = ar->inode; |
| @@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4663 | * blocks at the beginning or the end unless we are explicitly | 4668 | * blocks at the beginning or the end unless we are explicitly |
| 4664 | * requested to avoid doing so. | 4669 | * requested to avoid doing so. |
| 4665 | */ | 4670 | */ |
| 4666 | overflow = block & (sbi->s_cluster_ratio - 1); | 4671 | overflow = EXT4_PBLK_COFF(sbi, block); |
| 4667 | if (overflow) { | 4672 | if (overflow) { |
| 4668 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { | 4673 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { |
| 4669 | overflow = sbi->s_cluster_ratio - overflow; | 4674 | overflow = sbi->s_cluster_ratio - overflow; |
| @@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4677 | count += overflow; | 4682 | count += overflow; |
| 4678 | } | 4683 | } |
| 4679 | } | 4684 | } |
| 4680 | overflow = count & (sbi->s_cluster_ratio - 1); | 4685 | overflow = EXT4_LBLK_COFF(sbi, count); |
| 4681 | if (overflow) { | 4686 | if (overflow) { |
| 4682 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { | 4687 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { |
| 4683 | if (count > overflow) | 4688 | if (count > overflow) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c977f4e4e63b..1f7784de05b6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb) | |||
| 792 | } | 792 | } |
| 793 | 793 | ||
| 794 | ext4_es_unregister_shrinker(sbi); | 794 | ext4_es_unregister_shrinker(sbi); |
| 795 | del_timer(&sbi->s_err_report); | 795 | del_timer_sync(&sbi->s_err_report); |
| 796 | ext4_release_system_zone(sb); | 796 | ext4_release_system_zone(sb); |
| 797 | ext4_mb_release(sb); | 797 | ext4_mb_release(sb); |
| 798 | ext4_ext_release(sb); | 798 | ext4_ext_release(sb); |
| @@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb) | |||
| 3316 | } | 3316 | } |
| 3317 | 3317 | ||
| 3318 | 3318 | ||
| 3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | 3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb) |
| 3320 | { | 3320 | { |
| 3321 | ext4_fsblk_t resv_clusters; | 3321 | ext4_fsblk_t resv_clusters; |
| 3322 | 3322 | ||
| 3323 | /* | 3323 | /* |
| 3324 | * There's no need to reserve anything when we aren't using extents. | ||
| 3325 | * The space estimates are exact, there are no unwritten extents, | ||
| 3326 | * hole punching doesn't need new metadata... This is needed especially | ||
| 3327 | * to keep ext2/3 backward compatibility. | ||
| 3328 | */ | ||
| 3329 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) | ||
| 3330 | return 0; | ||
| 3331 | /* | ||
| 3324 | * By default we reserve 2% or 4096 clusters, whichever is smaller. | 3332 | * By default we reserve 2% or 4096 clusters, whichever is smaller. |
| 3325 | * This should cover the situations where we can not afford to run | 3333 | * This should cover the situations where we can not afford to run |
| 3326 | * out of space like for example punch hole, or converting | 3334 | * out of space like for example punch hole, or converting |
| @@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | |||
| 3328 | * allocation would require 1, or 2 blocks, higher numbers are | 3336 | * allocation would require 1, or 2 blocks, higher numbers are |
| 3329 | * very rare. | 3337 | * very rare. |
| 3330 | */ | 3338 | */ |
| 3331 | resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; | 3339 | resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >> |
| 3340 | EXT4_SB(sb)->s_cluster_bits; | ||
| 3332 | 3341 | ||
| 3333 | do_div(resv_clusters, 50); | 3342 | do_div(resv_clusters, 50); |
| 3334 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); | 3343 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); |
| @@ -4071,10 +4080,10 @@ no_journal: | |||
| 4071 | "available"); | 4080 | "available"); |
| 4072 | } | 4081 | } |
| 4073 | 4082 | ||
| 4074 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); | 4083 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb)); |
| 4075 | if (err) { | 4084 | if (err) { |
| 4076 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " | 4085 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " |
| 4077 | "reserved pool", ext4_calculate_resv_clusters(sbi)); | 4086 | "reserved pool", ext4_calculate_resv_clusters(sb)); |
| 4078 | goto failed_mount4a; | 4087 | goto failed_mount4a; |
| 4079 | } | 4088 | } |
| 4080 | 4089 | ||
| @@ -4184,7 +4193,7 @@ failed_mount_wq: | |||
| 4184 | } | 4193 | } |
| 4185 | failed_mount3: | 4194 | failed_mount3: |
| 4186 | ext4_es_unregister_shrinker(sbi); | 4195 | ext4_es_unregister_shrinker(sbi); |
| 4187 | del_timer(&sbi->s_err_report); | 4196 | del_timer_sync(&sbi->s_err_report); |
| 4188 | if (sbi->s_flex_groups) | 4197 | if (sbi->s_flex_groups) |
| 4189 | ext4_kvfree(sbi->s_flex_groups); | 4198 | ext4_kvfree(sbi->s_flex_groups); |
| 4190 | percpu_counter_destroy(&sbi->s_freeclusters_counter); | 4199 | percpu_counter_destroy(&sbi->s_freeclusters_counter); |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 52032647dd4a..5fa344afb49a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
| 702 | read_lock(&journal->j_state_lock); | 702 | read_lock(&journal->j_state_lock); |
| 703 | #ifdef CONFIG_JBD2_DEBUG | 703 | #ifdef CONFIG_JBD2_DEBUG |
| 704 | if (!tid_geq(journal->j_commit_request, tid)) { | 704 | if (!tid_geq(journal->j_commit_request, tid)) { |
| 705 | printk(KERN_EMERG | 705 | printk(KERN_ERR |
| 706 | "%s: error: j_commit_request=%d, tid=%d\n", | 706 | "%s: error: j_commit_request=%d, tid=%d\n", |
| 707 | __func__, journal->j_commit_request, tid); | 707 | __func__, journal->j_commit_request, tid); |
| 708 | } | 708 | } |
| @@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
| 718 | } | 718 | } |
| 719 | read_unlock(&journal->j_state_lock); | 719 | read_unlock(&journal->j_state_lock); |
| 720 | 720 | ||
| 721 | if (unlikely(is_journal_aborted(journal))) { | 721 | if (unlikely(is_journal_aborted(journal))) |
| 722 | printk(KERN_EMERG "journal commit I/O error\n"); | ||
| 723 | err = -EIO; | 722 | err = -EIO; |
| 724 | } | ||
| 725 | return err; | 723 | return err; |
| 726 | } | 724 | } |
| 727 | 725 | ||
| @@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1527 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && | 1525 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && |
| 1528 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1526 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
| 1529 | /* Can't have checksum v1 and v2 on at the same time! */ | 1527 | /* Can't have checksum v1 and v2 on at the same time! */ |
| 1530 | printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " | 1528 | printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " |
| 1531 | "at the same time!\n"); | 1529 | "at the same time!\n"); |
| 1532 | goto out; | 1530 | goto out; |
| 1533 | } | 1531 | } |
| 1534 | 1532 | ||
| 1535 | if (!jbd2_verify_csum_type(journal, sb)) { | 1533 | if (!jbd2_verify_csum_type(journal, sb)) { |
| 1536 | printk(KERN_ERR "JBD: Unknown checksum type\n"); | 1534 | printk(KERN_ERR "JBD2: Unknown checksum type\n"); |
| 1537 | goto out; | 1535 | goto out; |
| 1538 | } | 1536 | } |
| 1539 | 1537 | ||
| @@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1541 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1539 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
| 1542 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); | 1540 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); |
| 1543 | if (IS_ERR(journal->j_chksum_driver)) { | 1541 | if (IS_ERR(journal->j_chksum_driver)) { |
| 1544 | printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); | 1542 | printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); |
| 1545 | err = PTR_ERR(journal->j_chksum_driver); | 1543 | err = PTR_ERR(journal->j_chksum_driver); |
| 1546 | journal->j_chksum_driver = NULL; | 1544 | journal->j_chksum_driver = NULL; |
| 1547 | goto out; | 1545 | goto out; |
| @@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1550 | 1548 | ||
| 1551 | /* Check superblock checksum */ | 1549 | /* Check superblock checksum */ |
| 1552 | if (!jbd2_superblock_csum_verify(journal, sb)) { | 1550 | if (!jbd2_superblock_csum_verify(journal, sb)) { |
| 1553 | printk(KERN_ERR "JBD: journal checksum error\n"); | 1551 | printk(KERN_ERR "JBD2: journal checksum error\n"); |
| 1554 | goto out; | 1552 | goto out; |
| 1555 | } | 1553 | } |
| 1556 | 1554 | ||
| @@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, | |||
| 1836 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", | 1834 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", |
| 1837 | 0, 0); | 1835 | 0, 0); |
| 1838 | if (IS_ERR(journal->j_chksum_driver)) { | 1836 | if (IS_ERR(journal->j_chksum_driver)) { |
| 1839 | printk(KERN_ERR "JBD: Cannot load crc32c " | 1837 | printk(KERN_ERR "JBD2: Cannot load crc32c " |
| 1840 | "driver.\n"); | 1838 | "driver.\n"); |
| 1841 | journal->j_chksum_driver = NULL; | 1839 | journal->j_chksum_driver = NULL; |
| 1842 | return 0; | 1840 | return 0; |
| @@ -2645,7 +2643,7 @@ static void __exit journal_exit(void) | |||
| 2645 | #ifdef CONFIG_JBD2_DEBUG | 2643 | #ifdef CONFIG_JBD2_DEBUG |
| 2646 | int n = atomic_read(&nr_journal_heads); | 2644 | int n = atomic_read(&nr_journal_heads); |
| 2647 | if (n) | 2645 | if (n) |
| 2648 | printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); | 2646 | printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); |
| 2649 | #endif | 2647 | #endif |
| 2650 | jbd2_remove_jbd_stats_proc_entry(); | 2648 | jbd2_remove_jbd_stats_proc_entry(); |
| 2651 | jbd2_journal_destroy_caches(); | 2649 | jbd2_journal_destroy_caches(); |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 3929c50428b1..3b6bb19d60b1 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
| @@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal, | |||
| 594 | be32_to_cpu(tmp->h_sequence))) { | 594 | be32_to_cpu(tmp->h_sequence))) { |
| 595 | brelse(obh); | 595 | brelse(obh); |
| 596 | success = -EIO; | 596 | success = -EIO; |
| 597 | printk(KERN_ERR "JBD: Invalid " | 597 | printk(KERN_ERR "JBD2: Invalid " |
| 598 | "checksum recovering " | 598 | "checksum recovering " |
| 599 | "block %llu in log\n", | 599 | "block %llu in log\n", |
| 600 | blocknr); | 600 | blocknr); |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7aa9a32573bb..8360674c85bc 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
| @@ -932,7 +932,7 @@ repeat: | |||
| 932 | jbd2_alloc(jh2bh(jh)->b_size, | 932 | jbd2_alloc(jh2bh(jh)->b_size, |
| 933 | GFP_NOFS); | 933 | GFP_NOFS); |
| 934 | if (!frozen_buffer) { | 934 | if (!frozen_buffer) { |
| 935 | printk(KERN_EMERG | 935 | printk(KERN_ERR |
| 936 | "%s: OOM for frozen_buffer\n", | 936 | "%s: OOM for frozen_buffer\n", |
| 937 | __func__); | 937 | __func__); |
| 938 | JBUFFER_TRACE(jh, "oom!"); | 938 | JBUFFER_TRACE(jh, "oom!"); |
| @@ -1166,7 +1166,7 @@ repeat: | |||
| 1166 | if (!jh->b_committed_data) { | 1166 | if (!jh->b_committed_data) { |
| 1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); | 1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
| 1168 | if (!committed_data) { | 1168 | if (!committed_data) { |
| 1169 | printk(KERN_EMERG "%s: No memory for committed data\n", | 1169 | printk(KERN_ERR "%s: No memory for committed data\n", |
| 1170 | __func__); | 1170 | __func__); |
| 1171 | err = -ENOMEM; | 1171 | err = -ENOMEM; |
| 1172 | goto out; | 1172 | goto out; |
| @@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1290 | * once a transaction -bzzz | 1290 | * once a transaction -bzzz |
| 1291 | */ | 1291 | */ |
| 1292 | jh->b_modified = 1; | 1292 | jh->b_modified = 1; |
| 1293 | J_ASSERT_JH(jh, handle->h_buffer_credits > 0); | 1293 | if (handle->h_buffer_credits <= 0) { |
| 1294 | ret = -ENOSPC; | ||
| 1295 | goto out_unlock_bh; | ||
| 1296 | } | ||
| 1294 | handle->h_buffer_credits--; | 1297 | handle->h_buffer_credits--; |
| 1295 | } | 1298 | } |
| 1296 | 1299 | ||
| @@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1305 | JBUFFER_TRACE(jh, "fastpath"); | 1308 | JBUFFER_TRACE(jh, "fastpath"); |
| 1306 | if (unlikely(jh->b_transaction != | 1309 | if (unlikely(jh->b_transaction != |
| 1307 | journal->j_running_transaction)) { | 1310 | journal->j_running_transaction)) { |
| 1308 | printk(KERN_EMERG "JBD: %s: " | 1311 | printk(KERN_ERR "JBD2: %s: " |
| 1309 | "jh->b_transaction (%llu, %p, %u) != " | 1312 | "jh->b_transaction (%llu, %p, %u) != " |
| 1310 | "journal->j_running_transaction (%p, %u)", | 1313 | "journal->j_running_transaction (%p, %u)", |
| 1311 | journal->j_devname, | 1314 | journal->j_devname, |
| @@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1332 | JBUFFER_TRACE(jh, "already on other transaction"); | 1335 | JBUFFER_TRACE(jh, "already on other transaction"); |
| 1333 | if (unlikely(jh->b_transaction != | 1336 | if (unlikely(jh->b_transaction != |
| 1334 | journal->j_committing_transaction)) { | 1337 | journal->j_committing_transaction)) { |
| 1335 | printk(KERN_EMERG "JBD: %s: " | 1338 | printk(KERN_ERR "JBD2: %s: " |
| 1336 | "jh->b_transaction (%llu, %p, %u) != " | 1339 | "jh->b_transaction (%llu, %p, %u) != " |
| 1337 | "journal->j_committing_transaction (%p, %u)", | 1340 | "journal->j_committing_transaction (%p, %u)", |
| 1338 | journal->j_devname, | 1341 | journal->j_devname, |
| @@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1345 | ret = -EINVAL; | 1348 | ret = -EINVAL; |
| 1346 | } | 1349 | } |
| 1347 | if (unlikely(jh->b_next_transaction != transaction)) { | 1350 | if (unlikely(jh->b_next_transaction != transaction)) { |
| 1348 | printk(KERN_EMERG "JBD: %s: " | 1351 | printk(KERN_ERR "JBD2: %s: " |
| 1349 | "jh->b_next_transaction (%llu, %p, %u) != " | 1352 | "jh->b_next_transaction (%llu, %p, %u) != " |
| 1350 | "transaction (%p, %u)", | 1353 | "transaction (%p, %u)", |
| 1351 | journal->j_devname, | 1354 | journal->j_devname, |
| @@ -1373,7 +1376,6 @@ out_unlock_bh: | |||
| 1373 | jbd2_journal_put_journal_head(jh); | 1376 | jbd2_journal_put_journal_head(jh); |
| 1374 | out: | 1377 | out: |
| 1375 | JBUFFER_TRACE(jh, "exit"); | 1378 | JBUFFER_TRACE(jh, "exit"); |
| 1376 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ | ||
| 1377 | return ret; | 1379 | return ret; |
| 1378 | } | 1380 | } |
| 1379 | 1381 | ||
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h | |||
| @@ -3,6 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | #include <uapi/linux/auxvec.h> | 4 | #include <uapi/linux/auxvec.h> |
| 5 | 5 | ||
| 6 | #define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ | 6 | #define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ |
| 7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ | 7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ |
| 8 | #endif /* _LINUX_AUXVEC_H */ | 8 | #endif /* _LINUX_AUXVEC_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -418,6 +418,7 @@ enum { | |||
| 418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
| 419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | 419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ |
| 420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ | 420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ |
| 421 | ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ | ||
| 421 | 422 | ||
| 422 | /* DMA mask for user DMA control: User visible values; DO NOT | 423 | /* DMA mask for user DMA control: User visible values; DO NOT |
| 423 | renumber */ | 424 | renumber */ |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
| @@ -69,6 +69,7 @@ | |||
| 69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | 69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
| 70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
| 71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
| 72 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ | ||
| 72 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ | 73 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
| 73 | __typeof__(type) name | 74 | __typeof__(type) name |
| 74 | #else | 75 | #else |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -978,7 +978,7 @@ struct ib_uobject { | |||
| 978 | }; | 978 | }; |
| 979 | 979 | ||
| 980 | struct ib_udata { | 980 | struct ib_udata { |
| 981 | void __user *inbuf; | 981 | const void __user *inbuf; |
| 982 | void __user *outbuf; | 982 | void __user *outbuf; |
| 983 | size_t inlen; | 983 | size_t inlen; |
| 984 | size_t outlen; | 984 | size_t outlen; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8b729c278b64..bc1dcabe9217 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
| 890 | struct cgroup *cgrp = dentry->d_fsdata; | 890 | struct cgroup *cgrp = dentry->d_fsdata; |
| 891 | 891 | ||
| 892 | BUG_ON(!(cgroup_is_dead(cgrp))); | 892 | BUG_ON(!(cgroup_is_dead(cgrp))); |
| 893 | |||
| 894 | /* | ||
| 895 | * XXX: cgrp->id is only used to look up css's. As cgroup | ||
| 896 | * and css's lifetimes will be decoupled, it should be made | ||
| 897 | * per-subsystem and moved to css->id so that lookups are | ||
| 898 | * successful until the target css is released. | ||
| 899 | */ | ||
| 900 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 901 | cgrp->id = -1; | ||
| 902 | |||
| 893 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 903 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| 894 | } else { | 904 | } else { |
| 895 | struct cfent *cfe = __d_cfe(dentry); | 905 | struct cfent *cfe = __d_cfe(dentry); |
| @@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref) | |||
| 4268 | struct cgroup_subsys_state *css = | 4278 | struct cgroup_subsys_state *css = |
| 4269 | container_of(ref, struct cgroup_subsys_state, refcnt); | 4279 | container_of(ref, struct cgroup_subsys_state, refcnt); |
| 4270 | 4280 | ||
| 4281 | rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); | ||
| 4271 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 4282 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
| 4272 | } | 4283 | } |
| 4273 | 4284 | ||
| @@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4426 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); | 4437 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); |
| 4427 | root->number_of_cgroups++; | 4438 | root->number_of_cgroups++; |
| 4428 | 4439 | ||
| 4429 | /* each css holds a ref to the cgroup's dentry and the parent css */ | ||
| 4430 | for_each_root_subsys(root, ss) { | ||
| 4431 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
| 4432 | |||
| 4433 | dget(dentry); | ||
| 4434 | css_get(css->parent); | ||
| 4435 | } | ||
| 4436 | |||
| 4437 | /* hold a ref to the parent's dentry */ | 4440 | /* hold a ref to the parent's dentry */ |
| 4438 | dget(parent->dentry); | 4441 | dget(parent->dentry); |
| 4439 | 4442 | ||
| @@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4445 | if (err) | 4448 | if (err) |
| 4446 | goto err_destroy; | 4449 | goto err_destroy; |
| 4447 | 4450 | ||
| 4451 | /* each css holds a ref to the cgroup's dentry and parent css */ | ||
| 4452 | dget(dentry); | ||
| 4453 | css_get(css->parent); | ||
| 4454 | |||
| 4455 | /* mark it consumed for error path */ | ||
| 4456 | css_ar[ss->subsys_id] = NULL; | ||
| 4457 | |||
| 4448 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && | 4458 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && |
| 4449 | parent->parent) { | 4459 | parent->parent) { |
| 4450 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", | 4460 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", |
| @@ -4491,6 +4501,14 @@ err_free_cgrp: | |||
| 4491 | return err; | 4501 | return err; |
| 4492 | 4502 | ||
| 4493 | err_destroy: | 4503 | err_destroy: |
| 4504 | for_each_root_subsys(root, ss) { | ||
| 4505 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
| 4506 | |||
| 4507 | if (css) { | ||
| 4508 | percpu_ref_cancel_init(&css->refcnt); | ||
| 4509 | ss->css_free(css); | ||
| 4510 | } | ||
| 4511 | } | ||
| 4494 | cgroup_destroy_locked(cgrp); | 4512 | cgroup_destroy_locked(cgrp); |
| 4495 | mutex_unlock(&cgroup_mutex); | 4513 | mutex_unlock(&cgroup_mutex); |
| 4496 | mutex_unlock(&dentry->d_inode->i_mutex); | 4514 | mutex_unlock(&dentry->d_inode->i_mutex); |
| @@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4652 | * will be invoked to perform the rest of destruction once the | 4670 | * will be invoked to perform the rest of destruction once the |
| 4653 | * percpu refs of all css's are confirmed to be killed. | 4671 | * percpu refs of all css's are confirmed to be killed. |
| 4654 | */ | 4672 | */ |
| 4655 | for_each_root_subsys(cgrp->root, ss) | 4673 | for_each_root_subsys(cgrp->root, ss) { |
| 4656 | kill_css(cgroup_css(cgrp, ss)); | 4674 | struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); |
| 4675 | |||
| 4676 | if (css) | ||
| 4677 | kill_css(css); | ||
| 4678 | } | ||
| 4657 | 4679 | ||
| 4658 | /* | 4680 | /* |
| 4659 | * Mark @cgrp dead. This prevents further task migration and child | 4681 | * Mark @cgrp dead. This prevents further task migration and child |
| @@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) | |||
| 4722 | /* delete this cgroup from parent->children */ | 4744 | /* delete this cgroup from parent->children */ |
| 4723 | list_del_rcu(&cgrp->sibling); | 4745 | list_del_rcu(&cgrp->sibling); |
| 4724 | 4746 | ||
| 4725 | /* | ||
| 4726 | * We should remove the cgroup object from idr before its grace | ||
| 4727 | * period starts, so we won't be looking up a cgroup while the | ||
| 4728 | * cgroup is being freed. | ||
| 4729 | */ | ||
| 4730 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 4731 | cgrp->id = -1; | ||
| 4732 | |||
| 4733 | dput(d); | 4747 | dput(d); |
| 4734 | 4748 | ||
| 4735 | set_bit(CGRP_RELEASABLE, &parent->flags); | 4749 | set_bit(CGRP_RELEASABLE, &parent->flags); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index b462fa197517..aa6a8aadb911 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
| @@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt); | |||
| 19 | bool pm_freezing; | 19 | bool pm_freezing; |
| 20 | bool pm_nosig_freezing; | 20 | bool pm_nosig_freezing; |
| 21 | 21 | ||
| 22 | /* | ||
| 23 | * Temporary export for the deadlock workaround in ata_scsi_hotplug(). | ||
| 24 | * Remove once the hack becomes unnecessary. | ||
| 25 | */ | ||
| 26 | EXPORT_SYMBOL_GPL(pm_freezing); | ||
| 27 | |||
| 22 | /* protects freezing and frozen transitions */ | 28 | /* protects freezing and frozen transitions */ |
| 23 | static DEFINE_SPINLOCK(freezer_lock); | 29 | static DEFINE_SPINLOCK(freezer_lock); |
| 24 | 30 | ||
diff --git a/kernel/power/console.c b/kernel/power/console.c index 463aa6736751..eacb8bd8cab4 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
| @@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev) | |||
| 81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { | 81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { |
| 82 | if (tmp->dev == dev) { | 82 | if (tmp->dev == dev) { |
| 83 | list_del(&tmp->head); | 83 | list_del(&tmp->head); |
| 84 | kfree(tmp); | ||
| 84 | break; | 85 | break; |
| 85 | } | 86 | } |
| 86 | } | 87 | } |
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 32b10f53d0b4..2dcb37736d84 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh | |||
| @@ -82,7 +82,9 @@ kallsyms() | |||
| 82 | kallsymopt="${kallsymopt} --all-symbols" | 82 | kallsymopt="${kallsymopt} --all-symbols" |
| 83 | fi | 83 | fi |
| 84 | 84 | ||
| 85 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | 85 | if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then |
| 86 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | ||
| 87 | fi | ||
| 86 | 88 | ||
| 87 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ | 89 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ |
| 88 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" | 90 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 419491d8e7d2..6625699f497c 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
| @@ -4334,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 4334 | } | 4334 | } |
| 4335 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, | 4335 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, |
| 4336 | PEER__RECV, &ad); | 4336 | PEER__RECV, &ad); |
| 4337 | if (err) | 4337 | if (err) { |
| 4338 | selinux_netlbl_err(skb, err, 0); | 4338 | selinux_netlbl_err(skb, err, 0); |
| 4339 | return err; | ||
| 4340 | } | ||
| 4339 | } | 4341 | } |
| 4340 | 4342 | ||
| 4341 | if (secmark_active) { | 4343 | if (secmark_active) { |
| @@ -5586,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p, | |||
| 5586 | /* Check for ptracing, and update the task SID if ok. | 5588 | /* Check for ptracing, and update the task SID if ok. |
| 5587 | Otherwise, leave SID unchanged and fail. */ | 5589 | Otherwise, leave SID unchanged and fail. */ |
| 5588 | ptsid = 0; | 5590 | ptsid = 0; |
| 5589 | task_lock(p); | 5591 | rcu_read_lock(); |
| 5590 | tracer = ptrace_parent(p); | 5592 | tracer = ptrace_parent(p); |
| 5591 | if (tracer) | 5593 | if (tracer) |
| 5592 | ptsid = task_sid(tracer); | 5594 | ptsid = task_sid(tracer); |
| 5593 | task_unlock(p); | 5595 | rcu_read_unlock(); |
| 5594 | 5596 | ||
| 5595 | if (tracer) { | 5597 | if (tracer) { |
| 5596 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, | 5598 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, |
