diff options
124 files changed, 1207 insertions, 873 deletions
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401 index 8eb88e974055..711f75e189eb 100644 --- a/Documentation/hwmon/tmp401 +++ b/Documentation/hwmon/tmp401 | |||
| @@ -20,7 +20,7 @@ Supported chips: | |||
| 20 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html | 20 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html |
| 21 | * Texas Instruments TMP435 | 21 | * Texas Instruments TMP435 |
| 22 | Prefix: 'tmp435' | 22 | Prefix: 'tmp435' |
| 23 | Addresses scanned: I2C 0x37, 0x48 - 0x4f | 23 | Addresses scanned: I2C 0x48 - 0x4f |
| 24 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html | 24 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html |
| 25 | 25 | ||
| 26 | Authors: | 26 | Authors: |
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index 43e94ea6d2ca..263b907517ac 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt | |||
| @@ -15,8 +15,7 @@ Contents: | |||
| 15 | a) Discovering and configuring TCMU uio devices | 15 | a) Discovering and configuring TCMU uio devices |
| 16 | b) Waiting for events on the device(s) | 16 | b) Waiting for events on the device(s) |
| 17 | c) Managing the command ring | 17 | c) Managing the command ring |
| 18 | 3) Command filtering and pass_level | 18 | 3) A final note |
| 19 | 4) A final note | ||
| 20 | 19 | ||
| 21 | 20 | ||
| 22 | TCM Userspace Design | 21 | TCM Userspace Design |
| @@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map) | |||
| 324 | /* Process events from cmd ring until we catch up with cmd_head */ | 323 | /* Process events from cmd ring until we catch up with cmd_head */ |
| 325 | while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { | 324 | while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { |
| 326 | 325 | ||
| 327 | if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { | 326 | if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) { |
| 328 | uint8_t *cdb = (void *)mb + ent->req.cdb_off; | 327 | uint8_t *cdb = (void *)mb + ent->req.cdb_off; |
| 329 | bool success = true; | 328 | bool success = true; |
| 330 | 329 | ||
| @@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map) | |||
| 339 | ent->rsp.scsi_status = SCSI_CHECK_CONDITION; | 338 | ent->rsp.scsi_status = SCSI_CHECK_CONDITION; |
| 340 | } | 339 | } |
| 341 | } | 340 | } |
| 341 | else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) { | ||
| 342 | /* Tell the kernel we didn't handle unknown opcodes */ | ||
| 343 | ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP; | ||
| 344 | } | ||
| 342 | else { | 345 | else { |
| 343 | /* Do nothing for PAD entries */ | 346 | /* Do nothing for PAD entries except update cmd_tail */ |
| 344 | } | 347 | } |
| 345 | 348 | ||
| 346 | /* update cmd_tail */ | 349 | /* update cmd_tail */ |
| @@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map) | |||
| 360 | } | 363 | } |
| 361 | 364 | ||
| 362 | 365 | ||
| 363 | Command filtering and pass_level | ||
| 364 | -------------------------------- | ||
| 365 | |||
| 366 | TCMU supports a "pass_level" option with valid values of 0 or 1. When | ||
| 367 | the value is 0 (the default), nearly all SCSI commands received for | ||
| 368 | the device are passed through to the handler. This allows maximum | ||
| 369 | flexibility but increases the amount of code required by the handler, | ||
| 370 | to support all mandatory SCSI commands. If pass_level is set to 1, | ||
| 371 | then only IO-related commands are presented, and the rest are handled | ||
| 372 | by LIO's in-kernel command emulation. The commands presented at level | ||
| 373 | 1 include all versions of: | ||
| 374 | |||
| 375 | READ | ||
| 376 | WRITE | ||
| 377 | WRITE_VERIFY | ||
| 378 | XDWRITEREAD | ||
| 379 | WRITE_SAME | ||
| 380 | COMPARE_AND_WRITE | ||
| 381 | SYNCHRONIZE_CACHE | ||
| 382 | UNMAP | ||
| 383 | |||
| 384 | |||
| 385 | A final note | 366 | A final note |
| 386 | ------------ | 367 | ------------ |
| 387 | 368 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 1bbacdaf5cf3..e30871880fdb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2427,7 +2427,6 @@ L: linux-security-module@vger.kernel.org | |||
| 2427 | S: Supported | 2427 | S: Supported |
| 2428 | F: include/linux/capability.h | 2428 | F: include/linux/capability.h |
| 2429 | F: include/uapi/linux/capability.h | 2429 | F: include/uapi/linux/capability.h |
| 2430 | F: security/capability.c | ||
| 2431 | F: security/commoncap.c | 2430 | F: security/commoncap.c |
| 2432 | F: kernel/capability.c | 2431 | F: kernel/capability.c |
| 2433 | 2432 | ||
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
| 5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 86217db2937a..992736b5229b 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
| @@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \ | |||
| 223 | imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ | 223 | imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ |
| 224 | imx25-karo-tx25.dtb \ | 224 | imx25-karo-tx25.dtb \ |
| 225 | imx25-pdk.dtb | 225 | imx25-pdk.dtb |
| 226 | dtb-$(CONFIG_SOC_IMX31) += \ | 226 | dtb-$(CONFIG_SOC_IMX27) += \ |
| 227 | imx27-apf27.dtb \ | 227 | imx27-apf27.dtb \ |
| 228 | imx27-apf27dev.dtb \ | 228 | imx27-apf27dev.dtb \ |
| 229 | imx27-eukrea-mbimxsd27-baseboard.dtb \ | 229 | imx27-eukrea-mbimxsd27-baseboard.dtb \ |
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 5c42d259fa68..901739fcb85a 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts | |||
| @@ -80,7 +80,3 @@ | |||
| 80 | status = "okay"; | 80 | status = "okay"; |
| 81 | }; | 81 | }; |
| 82 | }; | 82 | }; |
| 83 | |||
| 84 | &rtc { | ||
| 85 | system-power-controller; | ||
| 86 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 87fc7a35e802..156d05efcb70 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts | |||
| @@ -654,7 +654,7 @@ | |||
| 654 | wlcore: wlcore@2 { | 654 | wlcore: wlcore@2 { |
| 655 | compatible = "ti,wl1271"; | 655 | compatible = "ti,wl1271"; |
| 656 | reg = <2>; | 656 | reg = <2>; |
| 657 | interrupt-parent = <&gpio1>; | 657 | interrupt-parent = <&gpio0>; |
| 658 | interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ | 658 | interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ |
| 659 | ref-clock-frequency = <38400000>; | 659 | ref-clock-frequency = <38400000>; |
| 660 | }; | 660 | }; |
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 173ffa479ad3..792394dd0f2a 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts | |||
| @@ -736,7 +736,7 @@ | |||
| 736 | 736 | ||
| 737 | display-timings { | 737 | display-timings { |
| 738 | timing-0 { | 738 | timing-0 { |
| 739 | clock-frequency = <0>; | 739 | clock-frequency = <57153600>; |
| 740 | hactive = <720>; | 740 | hactive = <720>; |
| 741 | vactive = <1280>; | 741 | vactive = <1280>; |
| 742 | hfront-porch = <5>; | 742 | hfront-porch = <5>; |
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 6951b66d1ab7..bc215e4b75fd 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi | |||
| @@ -533,7 +533,7 @@ | |||
| 533 | 533 | ||
| 534 | fec: ethernet@1002b000 { | 534 | fec: ethernet@1002b000 { |
| 535 | compatible = "fsl,imx27-fec"; | 535 | compatible = "fsl,imx27-fec"; |
| 536 | reg = <0x1002b000 0x4000>; | 536 | reg = <0x1002b000 0x1000>; |
| 537 | interrupts = <50>; | 537 | interrupts = <50>; |
| 538 | clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, | 538 | clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, |
| 539 | <&clks IMX27_CLK_FEC_AHB_GATE>; | 539 | <&clks IMX27_CLK_FEC_AHB_GATE>; |
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index 134d3f27a8ec..921de6605f07 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts | |||
| @@ -110,6 +110,8 @@ | |||
| 110 | nand@0,0 { | 110 | nand@0,0 { |
| 111 | reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ | 111 | reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ |
| 112 | nand-bus-width = <16>; | 112 | nand-bus-width = <16>; |
| 113 | gpmc,device-width = <2>; | ||
| 114 | ti,nand-ecc-opt = "sw"; | ||
| 113 | 115 | ||
| 114 | gpmc,sync-clk-ps = <0>; | 116 | gpmc,sync-clk-ps = <0>; |
| 115 | gpmc,cs-on-ns = <0>; | 117 | gpmc,cs-on-ns = <0>; |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 0ca4a3eaf65d..fbbb1915c6a9 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
| @@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y | |||
| 429 | CONFIG_USB_EHCI_TEGRA=y | 429 | CONFIG_USB_EHCI_TEGRA=y |
| 430 | CONFIG_USB_EHCI_HCD_STI=y | 430 | CONFIG_USB_EHCI_HCD_STI=y |
| 431 | CONFIG_USB_EHCI_HCD_PLATFORM=y | 431 | CONFIG_USB_EHCI_HCD_PLATFORM=y |
| 432 | CONFIG_USB_ISP1760_HCD=y | 432 | CONFIG_USB_ISP1760=y |
| 433 | CONFIG_USB_OHCI_HCD=y | 433 | CONFIG_USB_OHCI_HCD=y |
| 434 | CONFIG_USB_OHCI_HCD_STI=y | 434 | CONFIG_USB_OHCI_HCD_STI=y |
| 435 | CONFIG_USB_OHCI_HCD_PLATFORM=y | 435 | CONFIG_USB_OHCI_HCD_PLATFORM=y |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f8ccc21fa032..4e7f40c577e6 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
| @@ -33,7 +33,9 @@ ret_fast_syscall: | |||
| 33 | UNWIND(.fnstart ) | 33 | UNWIND(.fnstart ) |
| 34 | UNWIND(.cantunwind ) | 34 | UNWIND(.cantunwind ) |
| 35 | disable_irq @ disable interrupts | 35 | disable_irq @ disable interrupts |
| 36 | ldr r1, [tsk, #TI_FLAGS] | 36 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
| 37 | tst r1, #_TIF_SYSCALL_WORK | ||
| 38 | bne __sys_trace_return | ||
| 37 | tst r1, #_TIF_WORK_MASK | 39 | tst r1, #_TIF_WORK_MASK |
| 38 | bne fast_work_pending | 40 | bne fast_work_pending |
| 39 | asm_trace_hardirqs_on | 41 | asm_trace_hardirqs_on |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 213919ba326f..3b8c2833c537 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
| @@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
| 304 | static int of_pmu_irq_cfg(struct platform_device *pdev) | 304 | static int of_pmu_irq_cfg(struct platform_device *pdev) |
| 305 | { | 305 | { |
| 306 | int i, irq; | 306 | int i, irq; |
| 307 | int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | 307 | int *irqs; |
| 308 | |||
| 309 | if (!irqs) | ||
| 310 | return -ENOMEM; | ||
| 311 | 308 | ||
| 312 | /* Don't bother with PPIs; they're already affine */ | 309 | /* Don't bother with PPIs; they're already affine */ |
| 313 | irq = platform_get_irq(pdev, 0); | 310 | irq = platform_get_irq(pdev, 0); |
| 314 | if (irq >= 0 && irq_is_percpu(irq)) | 311 | if (irq >= 0 && irq_is_percpu(irq)) |
| 315 | return 0; | 312 | return 0; |
| 316 | 313 | ||
| 314 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | ||
| 315 | if (!irqs) | ||
| 316 | return -ENOMEM; | ||
| 317 | |||
| 317 | for (i = 0; i < pdev->num_resources; ++i) { | 318 | for (i = 0; i < pdev->num_resources; ++i) { |
| 318 | struct device_node *dn; | 319 | struct device_node *dn; |
| 319 | int cpu; | 320 | int cpu; |
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 4d60005e9277..6d0893a3828e 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c | |||
| @@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void) | |||
| 280 | struct device_node *np; | 280 | struct device_node *np; |
| 281 | 281 | ||
| 282 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); | 282 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); |
| 283 | if (WARN_ON(!np || | 283 | if (WARN_ON(!np)) |
| 284 | !of_find_property(np, "interrupt-controller", NULL))) | 284 | return; |
| 285 | pr_warn("Outdated DT detected, system is about to crash!!!\n"); | 285 | |
| 286 | if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { | ||
| 287 | pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); | ||
| 288 | |||
| 289 | /* map GPC, so that at least CPUidle and WARs keep working */ | ||
| 290 | gpc_base = of_iomap(np, 0); | ||
| 291 | } | ||
| 286 | } | 292 | } |
| 287 | 293 | ||
| 288 | #ifdef CONFIG_PM_GENERIC_DOMAINS | 294 | #ifdef CONFIG_PM_GENERIC_DOMAINS |
| @@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev) | |||
| 443 | struct regulator *pu_reg; | 449 | struct regulator *pu_reg; |
| 444 | int ret; | 450 | int ret; |
| 445 | 451 | ||
| 452 | /* bail out if DT too old and doesn't provide the necessary info */ | ||
| 453 | if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells")) | ||
| 454 | return 0; | ||
| 455 | |||
| 446 | pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); | 456 | pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); |
| 447 | if (PTR_ERR(pu_reg) == -ENODEV) | 457 | if (PTR_ERR(pu_reg) == -ENODEV) |
| 448 | pu_reg = NULL; | 458 | pu_reg = NULL; |
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c index f1aeb54fabe3..2385052b0ce1 100644 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c | |||
| @@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev) | |||
| 107 | struct resource *res; | 107 | struct resource *res; |
| 108 | struct cplds *fpga; | 108 | struct cplds *fpga; |
| 109 | int ret; | 109 | int ret; |
| 110 | unsigned int base_irq = 0; | 110 | int base_irq; |
| 111 | unsigned long irqflags = 0; | 111 | unsigned long irqflags = 0; |
| 112 | 112 | ||
| 113 | fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); | 113 | fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4e6ef896c619..7186382672b5 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void) | |||
| 1112 | } | 1112 | } |
| 1113 | 1113 | ||
| 1114 | /* | 1114 | /* |
| 1115 | * Find the first non-section-aligned page, and point | 1115 | * Find the first non-pmd-aligned page, and point |
| 1116 | * memblock_limit at it. This relies on rounding the | 1116 | * memblock_limit at it. This relies on rounding the |
| 1117 | * limit down to be section-aligned, which happens at | 1117 | * limit down to be pmd-aligned, which happens at the |
| 1118 | * the end of this function. | 1118 | * end of this function. |
| 1119 | * | 1119 | * |
| 1120 | * With this algorithm, the start or end of almost any | 1120 | * With this algorithm, the start or end of almost any |
| 1121 | * bank can be non-section-aligned. The only exception | 1121 | * bank can be non-pmd-aligned. The only exception is |
| 1122 | * is that the start of the bank 0 must be section- | 1122 | * that the start of the bank 0 must be section- |
| 1123 | * aligned, since otherwise memory would need to be | 1123 | * aligned, since otherwise memory would need to be |
| 1124 | * allocated when mapping the start of bank 0, which | 1124 | * allocated when mapping the start of bank 0, which |
| 1125 | * occurs before any free memory is mapped. | 1125 | * occurs before any free memory is mapped. |
| 1126 | */ | 1126 | */ |
| 1127 | if (!memblock_limit) { | 1127 | if (!memblock_limit) { |
| 1128 | if (!IS_ALIGNED(block_start, SECTION_SIZE)) | 1128 | if (!IS_ALIGNED(block_start, PMD_SIZE)) |
| 1129 | memblock_limit = block_start; | 1129 | memblock_limit = block_start; |
| 1130 | else if (!IS_ALIGNED(block_end, SECTION_SIZE)) | 1130 | else if (!IS_ALIGNED(block_end, PMD_SIZE)) |
| 1131 | memblock_limit = arm_lowmem_limit; | 1131 | memblock_limit = arm_lowmem_limit; |
| 1132 | } | 1132 | } |
| 1133 | 1133 | ||
| @@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void) | |||
| 1137 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1137 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
| 1138 | 1138 | ||
| 1139 | /* | 1139 | /* |
| 1140 | * Round the memblock limit down to a section size. This | 1140 | * Round the memblock limit down to a pmd size. This |
| 1141 | * helps to ensure that we will allocate memory from the | 1141 | * helps to ensure that we will allocate memory from the |
| 1142 | * last full section, which should be mapped. | 1142 | * last full pmd, which should be mapped. |
| 1143 | */ | 1143 | */ |
| 1144 | if (memblock_limit) | 1144 | if (memblock_limit) |
| 1145 | memblock_limit = round_down(memblock_limit, SECTION_SIZE); | 1145 | memblock_limit = round_down(memblock_limit, PMD_SIZE); |
| 1146 | if (!memblock_limit) | 1146 | if (!memblock_limit) |
| 1147 | memblock_limit = arm_lowmem_limit; | 1147 | memblock_limit = arm_lowmem_limit; |
| 1148 | 1148 | ||
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d4e162d35b34..7cc3be9fa7c6 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
| @@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | |||
| 478 | 478 | ||
| 479 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) | 479 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) |
| 480 | { | 480 | { |
| 481 | struct pci_controller *controller = bridge->bus->sysdata; | 481 | /* |
| 482 | 482 | * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL | |
| 483 | ACPI_COMPANION_SET(&bridge->dev, controller->companion); | 483 | * here, pci_create_root_bus() has been called by someone else and |
| 484 | * sysdata is likely to be different from what we expect. Let it go in | ||
| 485 | * that case. | ||
| 486 | */ | ||
| 487 | if (!bridge->dev.parent) { | ||
| 488 | struct pci_controller *controller = bridge->bus->sysdata; | ||
| 489 | ACPI_COMPANION_SET(&bridge->dev, controller->companion); | ||
| 490 | } | ||
| 484 | return 0; | 491 | return 0; |
| 485 | } | 492 | } |
| 486 | 493 | ||
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c index e1fe63051136..597899ad5438 100644 --- a/arch/mips/ath79/prom.c +++ b/arch/mips/ath79/prom.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Atheros AR71XX/AR724X/AR913X specific prom routines | 2 | * Atheros AR71XX/AR724X/AR913X specific prom routines |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2015 Laurent Fasnacht <l@libres.ch> | ||
| 4 | * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> | 5 | * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> |
| 5 | * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> | 6 | * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> |
| 6 | * | 7 | * |
| @@ -25,12 +26,14 @@ void __init prom_init(void) | |||
| 25 | { | 26 | { |
| 26 | fw_init_cmdline(); | 27 | fw_init_cmdline(); |
| 27 | 28 | ||
| 29 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 28 | /* Read the initrd address from the firmware environment */ | 30 | /* Read the initrd address from the firmware environment */ |
| 29 | initrd_start = fw_getenvl("initrd_start"); | 31 | initrd_start = fw_getenvl("initrd_start"); |
| 30 | if (initrd_start) { | 32 | if (initrd_start) { |
| 31 | initrd_start = KSEG0ADDR(initrd_start); | 33 | initrd_start = KSEG0ADDR(initrd_start); |
| 32 | initrd_end = initrd_start + fw_getenvl("initrd_size"); | 34 | initrd_end = initrd_start + fw_getenvl("initrd_size"); |
| 33 | } | 35 | } |
| 36 | #endif | ||
| 34 | } | 37 | } |
| 35 | 38 | ||
| 36 | void __init prom_free_prom_memory(void) | 39 | void __init prom_free_prom_memory(void) |
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 002680648dcb..b2a577ebce0b 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig | |||
| @@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m | |||
| 194 | CONFIG_USB_C67X00_HCD=m | 194 | CONFIG_USB_C67X00_HCD=m |
| 195 | CONFIG_USB_EHCI_HCD=y | 195 | CONFIG_USB_EHCI_HCD=y |
| 196 | CONFIG_USB_EHCI_ROOT_HUB_TT=y | 196 | CONFIG_USB_EHCI_ROOT_HUB_TT=y |
| 197 | CONFIG_USB_ISP1760_HCD=m | 197 | CONFIG_USB_ISP1760=m |
| 198 | CONFIG_USB_OHCI_HCD=y | 198 | CONFIG_USB_OHCI_HCD=y |
| 199 | CONFIG_USB_UHCI_HCD=m | 199 | CONFIG_USB_UHCI_HCD=m |
| 200 | CONFIG_USB_R8A66597_HCD=m | 200 | CONFIG_USB_R8A66597_HCD=m |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d2bfbc2e8995..51f57d841662 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | int kgdb_early_setup; | 29 | int kgdb_early_setup; |
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
| 32 | static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; | 32 | static DECLARE_BITMAP(irq_map, NR_IRQS); |
| 33 | 33 | ||
| 34 | int allocate_irqno(void) | 34 | int allocate_irqno(void) |
| 35 | { | 35 | { |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index fd528d7ea278..336708ae5c5b 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
| @@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = { | |||
| 444 | static void bmips_wr_vec(unsigned long dst, char *start, char *end) | 444 | static void bmips_wr_vec(unsigned long dst, char *start, char *end) |
| 445 | { | 445 | { |
| 446 | memcpy((void *)dst, start, end - start); | 446 | memcpy((void *)dst, start, end - start); |
| 447 | dma_cache_wback((unsigned long)start, end - start); | 447 | dma_cache_wback(dst, end - start); |
| 448 | local_flush_icache_range(dst, dst + (end - start)); | 448 | local_flush_icache_range(dst, dst + (end - start)); |
| 449 | instruction_hazard(); | 449 | instruction_hazard(); |
| 450 | } | 450 | } |
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index 7d12c0dded3d..77e64942f004 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S | |||
| @@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm) | |||
| 34 | FEXPORT(__strnlen_\func\()_nocheck_asm) | 34 | FEXPORT(__strnlen_\func\()_nocheck_asm) |
| 35 | move v0, a0 | 35 | move v0, a0 |
| 36 | PTR_ADDU a1, a0 # stop pointer | 36 | PTR_ADDU a1, a0 # stop pointer |
| 37 | 1: beq v0, a1, 1f # limit reached? | 37 | 1: |
| 38 | #ifdef CONFIG_CPU_DADDI_WORKAROUNDS | ||
| 39 | .set noat | ||
| 40 | li AT, 1 | ||
| 41 | #endif | ||
| 42 | beq v0, a1, 1f # limit reached? | ||
| 38 | .ifeqs "\func", "kernel" | 43 | .ifeqs "\func", "kernel" |
| 39 | EX(lb, t0, (v0), .Lfault\@) | 44 | EX(lb, t0, (v0), .Lfault\@) |
| 40 | .else | 45 | .else |
| @@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm) | |||
| 42 | .endif | 47 | .endif |
| 43 | .set noreorder | 48 | .set noreorder |
| 44 | bnez t0, 1b | 49 | bnez t0, 1b |
| 45 | 1: PTR_ADDIU v0, 1 | 50 | 1: |
| 51 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS | ||
| 52 | PTR_ADDIU v0, 1 | ||
| 53 | #else | ||
| 54 | PTR_ADDU v0, AT | ||
| 55 | .set at | ||
| 56 | #endif | ||
| 46 | .set reorder | 57 | .set reorder |
| 47 | PTR_SUBU v0, a0 | 58 | PTR_SUBU v0, a0 |
| 48 | jr ra | 59 | jr ra |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a6e424d185d0..a6cfdabb6054 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
| @@ -24,7 +24,8 @@ typedef struct { | |||
| 24 | unsigned int icache_line_size; | 24 | unsigned int icache_line_size; |
| 25 | unsigned int ecache_size; | 25 | unsigned int ecache_size; |
| 26 | unsigned int ecache_line_size; | 26 | unsigned int ecache_line_size; |
| 27 | int core_id; | 27 | unsigned short sock_id; |
| 28 | unsigned short core_id; | ||
| 28 | int proc_id; | 29 | int proc_id; |
| 29 | } cpuinfo_sparc; | 30 | } cpuinfo_sparc; |
| 30 | 31 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index dc165ebdf05a..2a52c91d2c8a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | |||
| 308 | " sllx %1, 32, %1\n" | 308 | " sllx %1, 32, %1\n" |
| 309 | " or %0, %1, %0\n" | 309 | " or %0, %1, %0\n" |
| 310 | " .previous\n" | 310 | " .previous\n" |
| 311 | " .section .sun_m7_2insn_patch, \"ax\"\n" | ||
| 312 | " .word 661b\n" | ||
| 313 | " sethi %%uhi(%4), %1\n" | ||
| 314 | " sethi %%hi(%4), %0\n" | ||
| 315 | " .word 662b\n" | ||
| 316 | " or %1, %%ulo(%4), %1\n" | ||
| 317 | " or %0, %%lo(%4), %0\n" | ||
| 318 | " .word 663b\n" | ||
| 319 | " sllx %1, 32, %1\n" | ||
| 320 | " or %0, %1, %0\n" | ||
| 321 | " .previous\n" | ||
| 311 | : "=r" (mask), "=r" (tmp) | 322 | : "=r" (mask), "=r" (tmp) |
| 312 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | 323 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | |
| 313 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | | 324 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | |
| 314 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), | 325 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), |
| 315 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | 326 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | |
| 316 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | | 327 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | |
| 328 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V), | ||
| 329 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | ||
| 330 | _PAGE_CP_4V | _PAGE_E_4V | | ||
| 317 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); | 331 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); |
| 318 | 332 | ||
| 319 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | 333 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); |
| @@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) | |||
| 342 | " andn %0, %4, %0\n" | 356 | " andn %0, %4, %0\n" |
| 343 | " or %0, %5, %0\n" | 357 | " or %0, %5, %0\n" |
| 344 | " .previous\n" | 358 | " .previous\n" |
| 359 | " .section .sun_m7_2insn_patch, \"ax\"\n" | ||
| 360 | " .word 661b\n" | ||
| 361 | " andn %0, %6, %0\n" | ||
| 362 | " or %0, %5, %0\n" | ||
| 363 | " .previous\n" | ||
| 345 | : "=r" (val) | 364 | : "=r" (val) |
| 346 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | 365 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), |
| 347 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); | 366 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V), |
| 367 | "i" (_PAGE_CP_4V)); | ||
| 348 | 368 | ||
| 349 | return __pgprot(val); | 369 | return __pgprot(val); |
| 350 | } | 370 | } |
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index ed8f071132e4..d1761df5cca6 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
| @@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus) | |||
| 40 | #ifdef CONFIG_SMP | 40 | #ifdef CONFIG_SMP |
| 41 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) | 41 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) |
| 42 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) | 42 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) |
| 43 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | 43 | #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) |
| 44 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | 44 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) |
| 45 | #endif /* CONFIG_SMP */ | 45 | #endif /* CONFIG_SMP */ |
| 46 | 46 | ||
| 47 | extern cpumask_t cpu_core_map[NR_CPUS]; | 47 | extern cpumask_t cpu_core_map[NR_CPUS]; |
| 48 | extern cpumask_t cpu_core_sib_map[NR_CPUS]; | ||
| 48 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) | 49 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) |
| 49 | { | 50 | { |
| 50 | return &cpu_core_map[cpu]; | 51 | return &cpu_core_map[cpu]; |
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index 6fd4436d32f0..ec9c04de3664 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h | |||
| @@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry { | |||
| 79 | }; | 79 | }; |
| 80 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | 80 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, |
| 81 | __sun4v_2insn_patch_end; | 81 | __sun4v_2insn_patch_end; |
| 82 | extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, | ||
| 83 | __sun_m7_2insn_patch_end; | ||
| 82 | 84 | ||
| 83 | 85 | ||
| 84 | #endif /* !(__ASSEMBLY__) */ | 86 | #endif /* !(__ASSEMBLY__) */ |
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 07cc49e541f4..0f679421b468 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h | |||
| @@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, | |||
| 69 | struct sun4v_1insn_patch_entry *); | 69 | struct sun4v_1insn_patch_entry *); |
| 70 | void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, | 70 | void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, |
| 71 | struct sun4v_2insn_patch_entry *); | 71 | struct sun4v_2insn_patch_entry *); |
| 72 | void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *, | ||
| 73 | struct sun4v_2insn_patch_entry *); | ||
| 72 | extern unsigned int dcache_parity_tl1_occurred; | 74 | extern unsigned int dcache_parity_tl1_occurred; |
| 73 | extern unsigned int icache_parity_tl1_occurred; | 75 | extern unsigned int icache_parity_tl1_occurred; |
| 74 | 76 | ||
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 94e392bdee7d..814fb1729b12 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c | |||
| @@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev) | |||
| 723 | err = -ENOMEM; | 723 | err = -ENOMEM; |
| 724 | goto err1; | 724 | goto err1; |
| 725 | } | 725 | } |
| 726 | memset(grpci2priv, 0, sizeof(*grpci2priv)); | ||
| 727 | priv->regs = regs; | 726 | priv->regs = regs; |
| 728 | priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ | 727 | priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ |
| 729 | priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; | 728 | priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; |
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 26c80e18d7b1..6f80936e0eea 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c | |||
| @@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) | |||
| 614 | } | 614 | } |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) | 617 | static void find_back_node_value(struct mdesc_handle *hp, u64 node, |
| 618 | char *srch_val, | ||
| 619 | void (*func)(struct mdesc_handle *, u64, int), | ||
| 620 | u64 val, int depth) | ||
| 618 | { | 621 | { |
| 619 | u64 a; | 622 | u64 arc; |
| 620 | |||
| 621 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { | ||
| 622 | u64 t = mdesc_arc_target(hp, a); | ||
| 623 | const char *name; | ||
| 624 | const u64 *id; | ||
| 625 | 623 | ||
| 626 | name = mdesc_node_name(hp, t); | 624 | /* Since we have an estimate of recursion depth, do a sanity check. */ |
| 627 | if (!strcmp(name, "cpu")) { | 625 | if (depth == 0) |
| 628 | id = mdesc_get_property(hp, t, "id", NULL); | 626 | return; |
| 629 | if (*id < NR_CPUS) | ||
| 630 | cpu_data(*id).core_id = core_id; | ||
| 631 | } else { | ||
| 632 | u64 j; | ||
| 633 | 627 | ||
| 634 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { | 628 | mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { |
| 635 | u64 n = mdesc_arc_target(hp, j); | 629 | u64 n = mdesc_arc_target(hp, arc); |
| 636 | const char *n_name; | 630 | const char *name = mdesc_node_name(hp, n); |
| 637 | 631 | ||
| 638 | n_name = mdesc_node_name(hp, n); | 632 | if (!strcmp(srch_val, name)) |
| 639 | if (strcmp(n_name, "cpu")) | 633 | (*func)(hp, n, val); |
| 640 | continue; | ||
| 641 | 634 | ||
| 642 | id = mdesc_get_property(hp, n, "id", NULL); | 635 | find_back_node_value(hp, n, srch_val, func, val, depth-1); |
| 643 | if (*id < NR_CPUS) | ||
| 644 | cpu_data(*id).core_id = core_id; | ||
| 645 | } | ||
| 646 | } | ||
| 647 | } | 636 | } |
| 648 | } | 637 | } |
| 649 | 638 | ||
| 639 | static void __mark_core_id(struct mdesc_handle *hp, u64 node, | ||
| 640 | int core_id) | ||
| 641 | { | ||
| 642 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); | ||
| 643 | |||
| 644 | if (*id < num_possible_cpus()) | ||
| 645 | cpu_data(*id).core_id = core_id; | ||
| 646 | } | ||
| 647 | |||
| 648 | static void __mark_sock_id(struct mdesc_handle *hp, u64 node, | ||
| 649 | int sock_id) | ||
| 650 | { | ||
| 651 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); | ||
| 652 | |||
| 653 | if (*id < num_possible_cpus()) | ||
| 654 | cpu_data(*id).sock_id = sock_id; | ||
| 655 | } | ||
| 656 | |||
| 657 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, | ||
| 658 | int core_id) | ||
| 659 | { | ||
| 660 | find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); | ||
| 661 | } | ||
| 662 | |||
| 663 | static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, | ||
| 664 | int sock_id) | ||
| 665 | { | ||
| 666 | find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); | ||
| 667 | } | ||
| 668 | |||
| 650 | static void set_core_ids(struct mdesc_handle *hp) | 669 | static void set_core_ids(struct mdesc_handle *hp) |
| 651 | { | 670 | { |
| 652 | int idx; | 671 | int idx; |
| 653 | u64 mp; | 672 | u64 mp; |
| 654 | 673 | ||
| 655 | idx = 1; | 674 | idx = 1; |
| 675 | |||
| 676 | /* Identify unique cores by looking for cpus backpointed to by | ||
| 677 | * level 1 instruction caches. | ||
| 678 | */ | ||
| 656 | mdesc_for_each_node_by_name(hp, mp, "cache") { | 679 | mdesc_for_each_node_by_name(hp, mp, "cache") { |
| 657 | const u64 *level; | 680 | const u64 *level; |
| 658 | const char *type; | 681 | const char *type; |
| @@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp) | |||
| 667 | continue; | 690 | continue; |
| 668 | 691 | ||
| 669 | mark_core_ids(hp, mp, idx); | 692 | mark_core_ids(hp, mp, idx); |
| 693 | idx++; | ||
| 694 | } | ||
| 695 | } | ||
| 696 | |||
| 697 | static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) | ||
| 698 | { | ||
| 699 | u64 mp; | ||
| 700 | int idx = 1; | ||
| 701 | int fnd = 0; | ||
| 702 | |||
| 703 | /* Identify unique sockets by looking for cpus backpointed to by | ||
| 704 | * shared level n caches. | ||
| 705 | */ | ||
| 706 | mdesc_for_each_node_by_name(hp, mp, "cache") { | ||
| 707 | const u64 *cur_lvl; | ||
| 708 | |||
| 709 | cur_lvl = mdesc_get_property(hp, mp, "level", NULL); | ||
| 710 | if (*cur_lvl != level) | ||
| 711 | continue; | ||
| 712 | |||
| 713 | mark_sock_ids(hp, mp, idx); | ||
| 714 | idx++; | ||
| 715 | fnd = 1; | ||
| 716 | } | ||
| 717 | return fnd; | ||
| 718 | } | ||
| 719 | |||
| 720 | static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) | ||
| 721 | { | ||
| 722 | int idx = 1; | ||
| 670 | 723 | ||
| 724 | mdesc_for_each_node_by_name(hp, mp, "socket") { | ||
| 725 | u64 a; | ||
| 726 | |||
| 727 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
| 728 | u64 t = mdesc_arc_target(hp, a); | ||
| 729 | const char *name; | ||
| 730 | const u64 *id; | ||
| 731 | |||
| 732 | name = mdesc_node_name(hp, t); | ||
| 733 | if (strcmp(name, "cpu")) | ||
| 734 | continue; | ||
| 735 | |||
| 736 | id = mdesc_get_property(hp, t, "id", NULL); | ||
| 737 | if (*id < num_possible_cpus()) | ||
| 738 | cpu_data(*id).sock_id = idx; | ||
| 739 | } | ||
| 671 | idx++; | 740 | idx++; |
| 672 | } | 741 | } |
| 673 | } | 742 | } |
| 674 | 743 | ||
| 744 | static void set_sock_ids(struct mdesc_handle *hp) | ||
| 745 | { | ||
| 746 | u64 mp; | ||
| 747 | |||
| 748 | /* If machine description exposes sockets data use it. | ||
| 749 | * Otherwise fallback to use shared L3 or L2 caches. | ||
| 750 | */ | ||
| 751 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); | ||
| 752 | if (mp != MDESC_NODE_NULL) | ||
| 753 | return set_sock_ids_by_socket(hp, mp); | ||
| 754 | |||
| 755 | if (!set_sock_ids_by_cache(hp, 3)) | ||
| 756 | set_sock_ids_by_cache(hp, 2); | ||
| 757 | } | ||
| 758 | |||
| 675 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) | 759 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) |
| 676 | { | 760 | { |
| 677 | u64 a; | 761 | u64 a; |
| @@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) | |||
| 707 | continue; | 791 | continue; |
| 708 | 792 | ||
| 709 | mark_proc_ids(hp, mp, idx); | 793 | mark_proc_ids(hp, mp, idx); |
| 710 | |||
| 711 | idx++; | 794 | idx++; |
| 712 | } | 795 | } |
| 713 | } | 796 | } |
| @@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask) | |||
| 900 | 983 | ||
| 901 | set_core_ids(hp); | 984 | set_core_ids(hp); |
| 902 | set_proc_ids(hp); | 985 | set_proc_ids(hp); |
| 986 | set_sock_ids(hp); | ||
| 903 | 987 | ||
| 904 | mdesc_release(hp); | 988 | mdesc_release(hp); |
| 905 | 989 | ||
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 6f7251fd2eab..c928bc64b4ba 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
| @@ -1002,6 +1002,38 @@ static int __init pcibios_init(void) | |||
| 1002 | subsys_initcall(pcibios_init); | 1002 | subsys_initcall(pcibios_init); |
| 1003 | 1003 | ||
| 1004 | #ifdef CONFIG_SYSFS | 1004 | #ifdef CONFIG_SYSFS |
| 1005 | |||
| 1006 | #define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ | ||
| 1007 | |||
| 1008 | static void pcie_bus_slot_names(struct pci_bus *pbus) | ||
| 1009 | { | ||
| 1010 | struct pci_dev *pdev; | ||
| 1011 | struct pci_bus *bus; | ||
| 1012 | |||
| 1013 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
| 1014 | char name[SLOT_NAME_SIZE]; | ||
| 1015 | struct pci_slot *pci_slot; | ||
| 1016 | const u32 *slot_num; | ||
| 1017 | int len; | ||
| 1018 | |||
| 1019 | slot_num = of_get_property(pdev->dev.of_node, | ||
| 1020 | "physical-slot#", &len); | ||
| 1021 | |||
| 1022 | if (slot_num == NULL || len != 4) | ||
| 1023 | continue; | ||
| 1024 | |||
| 1025 | snprintf(name, sizeof(name), "%u", slot_num[0]); | ||
| 1026 | pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); | ||
| 1027 | |||
| 1028 | if (IS_ERR(pci_slot)) | ||
| 1029 | pr_err("PCI: pci_create_slot returned %ld.\n", | ||
| 1030 | PTR_ERR(pci_slot)); | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | list_for_each_entry(bus, &pbus->children, node) | ||
| 1034 | pcie_bus_slot_names(bus); | ||
| 1035 | } | ||
| 1036 | |||
| 1005 | static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) | 1037 | static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) |
| 1006 | { | 1038 | { |
| 1007 | const struct pci_slot_names { | 1039 | const struct pci_slot_names { |
| @@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void) | |||
| 1053 | 1085 | ||
| 1054 | while ((pbus = pci_find_next_bus(pbus)) != NULL) { | 1086 | while ((pbus = pci_find_next_bus(pbus)) != NULL) { |
| 1055 | struct device_node *node; | 1087 | struct device_node *node; |
| 1088 | struct pci_dev *pdev; | ||
| 1089 | |||
| 1090 | pdev = list_first_entry(&pbus->devices, struct pci_dev, | ||
| 1091 | bus_list); | ||
| 1056 | 1092 | ||
| 1057 | if (pbus->self) { | 1093 | if (pdev && pci_is_pcie(pdev)) { |
| 1058 | /* PCI->PCI bridge */ | 1094 | pcie_bus_slot_names(pbus); |
| 1059 | node = pbus->self->dev.of_node; | ||
| 1060 | } else { | 1095 | } else { |
| 1061 | struct pci_pbm_info *pbm = pbus->sysdata; | ||
| 1062 | 1096 | ||
| 1063 | /* Host PCI controller */ | 1097 | if (pbus->self) { |
| 1064 | node = pbm->op->dev.of_node; | 1098 | |
| 1065 | } | 1099 | /* PCI->PCI bridge */ |
| 1100 | node = pbus->self->dev.of_node; | ||
| 1101 | |||
| 1102 | } else { | ||
| 1103 | struct pci_pbm_info *pbm = pbus->sysdata; | ||
| 1066 | 1104 | ||
| 1067 | pci_bus_slot_names(node, pbus); | 1105 | /* Host PCI controller */ |
| 1106 | node = pbm->op->dev.of_node; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | pci_bus_slot_names(node, pbus); | ||
| 1110 | } | ||
| 1068 | } | 1111 | } |
| 1069 | 1112 | ||
| 1070 | return 0; | 1113 | return 0; |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index c38d19fc27ba..f7b261749383 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
| @@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, | |||
| 255 | } | 255 | } |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, | ||
| 259 | struct sun4v_2insn_patch_entry *end) | ||
| 260 | { | ||
| 261 | while (start < end) { | ||
| 262 | unsigned long addr = start->addr; | ||
| 263 | |||
| 264 | *(unsigned int *) (addr + 0) = start->insns[0]; | ||
| 265 | wmb(); | ||
| 266 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
| 267 | |||
| 268 | *(unsigned int *) (addr + 4) = start->insns[1]; | ||
| 269 | wmb(); | ||
| 270 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
| 271 | |||
| 272 | start++; | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 258 | static void __init sun4v_patch(void) | 276 | static void __init sun4v_patch(void) |
| 259 | { | 277 | { |
| 260 | extern void sun4v_hvapi_init(void); | 278 | extern void sun4v_hvapi_init(void); |
| @@ -267,6 +285,9 @@ static void __init sun4v_patch(void) | |||
| 267 | 285 | ||
| 268 | sun4v_patch_2insn_range(&__sun4v_2insn_patch, | 286 | sun4v_patch_2insn_range(&__sun4v_2insn_patch, |
| 269 | &__sun4v_2insn_patch_end); | 287 | &__sun4v_2insn_patch_end); |
| 288 | if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) | ||
| 289 | sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, | ||
| 290 | &__sun_m7_2insn_patch_end); | ||
| 270 | 291 | ||
| 271 | sun4v_hvapi_init(); | 292 | sun4v_hvapi_init(); |
| 272 | } | 293 | } |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 61139d9924ca..19cd08d18672 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
| @@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | |||
| 60 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = | 60 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = |
| 61 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | 61 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; |
| 62 | 62 | ||
| 63 | cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { | ||
| 64 | [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
| 65 | |||
| 63 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 66 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 64 | EXPORT_SYMBOL(cpu_core_map); | 67 | EXPORT_SYMBOL(cpu_core_map); |
| 68 | EXPORT_SYMBOL(cpu_core_sib_map); | ||
| 65 | 69 | ||
| 66 | static cpumask_t smp_commenced_mask; | 70 | static cpumask_t smp_commenced_mask; |
| 67 | 71 | ||
| @@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void) | |||
| 1243 | } | 1247 | } |
| 1244 | } | 1248 | } |
| 1245 | 1249 | ||
| 1250 | for_each_present_cpu(i) { | ||
| 1251 | unsigned int j; | ||
| 1252 | |||
| 1253 | for_each_present_cpu(j) { | ||
| 1254 | if (cpu_data(i).sock_id == cpu_data(j).sock_id) | ||
| 1255 | cpumask_set_cpu(j, &cpu_core_sib_map[i]); | ||
| 1256 | } | ||
| 1257 | } | ||
| 1258 | |||
| 1246 | for_each_present_cpu(i) { | 1259 | for_each_present_cpu(i) { |
| 1247 | unsigned int j; | 1260 | unsigned int j; |
| 1248 | 1261 | ||
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 09243057cb0b..f1a2f688b28a 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
| @@ -138,6 +138,11 @@ SECTIONS | |||
| 138 | *(.pause_3insn_patch) | 138 | *(.pause_3insn_patch) |
| 139 | __pause_3insn_patch_end = .; | 139 | __pause_3insn_patch_end = .; |
| 140 | } | 140 | } |
| 141 | .sun_m7_2insn_patch : { | ||
| 142 | __sun_m7_2insn_patch = .; | ||
| 143 | *(.sun_m7_2insn_patch) | ||
| 144 | __sun_m7_2insn_patch_end = .; | ||
| 145 | } | ||
| 141 | PERCPU_SECTION(SMP_CACHE_BYTES) | 146 | PERCPU_SECTION(SMP_CACHE_BYTES) |
| 142 | 147 | ||
| 143 | . = ALIGN(PAGE_SIZE); | 148 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 4ca0d6ba5ec8..559cb744112c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
| @@ -54,6 +54,7 @@ | |||
| 54 | #include "init_64.h" | 54 | #include "init_64.h" |
| 55 | 55 | ||
| 56 | unsigned long kern_linear_pte_xor[4] __read_mostly; | 56 | unsigned long kern_linear_pte_xor[4] __read_mostly; |
| 57 | static unsigned long page_cache4v_flag; | ||
| 57 | 58 | ||
| 58 | /* A bitmap, two bits for every 256MB of physical memory. These two | 59 | /* A bitmap, two bits for every 256MB of physical memory. These two |
| 59 | * bits determine what page size we use for kernel linear | 60 | * bits determine what page size we use for kernel linear |
| @@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void) | |||
| 1909 | 1910 | ||
| 1910 | static void __init sun4v_linear_pte_xor_finalize(void) | 1911 | static void __init sun4v_linear_pte_xor_finalize(void) |
| 1911 | { | 1912 | { |
| 1913 | unsigned long pagecv_flag; | ||
| 1914 | |||
| 1915 | /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead | ||
| 1916 | * enables MCD error. Do not set bit 9 on M7 processor. | ||
| 1917 | */ | ||
| 1918 | switch (sun4v_chip_type) { | ||
| 1919 | case SUN4V_CHIP_SPARC_M7: | ||
| 1920 | pagecv_flag = 0x00; | ||
| 1921 | break; | ||
| 1922 | default: | ||
| 1923 | pagecv_flag = _PAGE_CV_4V; | ||
| 1924 | break; | ||
| 1925 | } | ||
| 1912 | #ifndef CONFIG_DEBUG_PAGEALLOC | 1926 | #ifndef CONFIG_DEBUG_PAGEALLOC |
| 1913 | if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { | 1927 | if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { |
| 1914 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ | 1928 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ |
| 1915 | PAGE_OFFSET; | 1929 | PAGE_OFFSET; |
| 1916 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 1930 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | |
| 1917 | _PAGE_P_4V | _PAGE_W_4V); | 1931 | _PAGE_P_4V | _PAGE_W_4V); |
| 1918 | } else { | 1932 | } else { |
| 1919 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; | 1933 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; |
| @@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) | |||
| 1922 | if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { | 1936 | if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { |
| 1923 | kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ | 1937 | kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ |
| 1924 | PAGE_OFFSET; | 1938 | PAGE_OFFSET; |
| 1925 | kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 1939 | kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | |
| 1926 | _PAGE_P_4V | _PAGE_W_4V); | 1940 | _PAGE_P_4V | _PAGE_W_4V); |
| 1927 | } else { | 1941 | } else { |
| 1928 | kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; | 1942 | kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; |
| @@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) | |||
| 1931 | if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { | 1945 | if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { |
| 1932 | kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ | 1946 | kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ |
| 1933 | PAGE_OFFSET; | 1947 | PAGE_OFFSET; |
| 1934 | kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 1948 | kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | |
| 1935 | _PAGE_P_4V | _PAGE_W_4V); | 1949 | _PAGE_P_4V | _PAGE_W_4V); |
| 1936 | } else { | 1950 | } else { |
| 1937 | kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; | 1951 | kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; |
| @@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void) | |||
| 1958 | return available; | 1972 | return available; |
| 1959 | } | 1973 | } |
| 1960 | 1974 | ||
| 1975 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) | ||
| 1976 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | ||
| 1977 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | ||
| 1978 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | ||
| 1979 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | ||
| 1980 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | ||
| 1981 | |||
| 1961 | /* We need to exclude reserved regions. This exclusion will include | 1982 | /* We need to exclude reserved regions. This exclusion will include |
| 1962 | * vmlinux and initrd. To be more precise the initrd size could be used to | 1983 | * vmlinux and initrd. To be more precise the initrd size could be used to |
| 1963 | * compute a new lower limit because it is freed later during initialization. | 1984 | * compute a new lower limit because it is freed later during initialization. |
| @@ -2034,6 +2055,25 @@ void __init paging_init(void) | |||
| 2034 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | 2055 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); |
| 2035 | #endif | 2056 | #endif |
| 2036 | 2057 | ||
| 2058 | /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde | ||
| 2059 | * bit on M7 processor. This is a conflicting usage of the same | ||
| 2060 | * bit. Enabling TTE.cv on M7 would turn on Memory Corruption | ||
| 2061 | * Detection error on all pages and this will lead to problems | ||
| 2062 | * later. Kernel does not run with MCD enabled and hence rest | ||
| 2063 | * of the required steps to fully configure memory corruption | ||
| 2064 | * detection are not taken. We need to ensure TTE.mcde is not | ||
| 2065 | * set on M7 processor. Compute the value of cacheability | ||
| 2066 | * flag for use later taking this into consideration. | ||
| 2067 | */ | ||
| 2068 | switch (sun4v_chip_type) { | ||
| 2069 | case SUN4V_CHIP_SPARC_M7: | ||
| 2070 | page_cache4v_flag = _PAGE_CP_4V; | ||
| 2071 | break; | ||
| 2072 | default: | ||
| 2073 | page_cache4v_flag = _PAGE_CACHE_4V; | ||
| 2074 | break; | ||
| 2075 | } | ||
| 2076 | |||
| 2037 | if (tlb_type == hypervisor) | 2077 | if (tlb_type == hypervisor) |
| 2038 | sun4v_pgprot_init(); | 2078 | sun4v_pgprot_init(); |
| 2039 | else | 2079 | else |
| @@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 2274 | } | 2314 | } |
| 2275 | #endif | 2315 | #endif |
| 2276 | 2316 | ||
| 2277 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) | ||
| 2278 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | ||
| 2279 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | ||
| 2280 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | ||
| 2281 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | ||
| 2282 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | ||
| 2283 | |||
| 2284 | pgprot_t PAGE_KERNEL __read_mostly; | 2317 | pgprot_t PAGE_KERNEL __read_mostly; |
| 2285 | EXPORT_SYMBOL(PAGE_KERNEL); | 2318 | EXPORT_SYMBOL(PAGE_KERNEL); |
| 2286 | 2319 | ||
| @@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, | |||
| 2312 | _PAGE_P_4U | _PAGE_W_4U); | 2345 | _PAGE_P_4U | _PAGE_W_4U); |
| 2313 | if (tlb_type == hypervisor) | 2346 | if (tlb_type == hypervisor) |
| 2314 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | | 2347 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | |
| 2315 | _PAGE_CP_4V | _PAGE_CV_4V | | 2348 | page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); |
| 2316 | _PAGE_P_4V | _PAGE_W_4V); | ||
| 2317 | 2349 | ||
| 2318 | pte_base |= _PAGE_PMD_HUGE; | 2350 | pte_base |= _PAGE_PMD_HUGE; |
| 2319 | 2351 | ||
| @@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void) | |||
| 2450 | int i; | 2482 | int i; |
| 2451 | 2483 | ||
| 2452 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | 2484 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | |
| 2453 | _PAGE_CACHE_4V | _PAGE_P_4V | | 2485 | page_cache4v_flag | _PAGE_P_4V | |
| 2454 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | 2486 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | |
| 2455 | _PAGE_EXEC_4V); | 2487 | _PAGE_EXEC_4V); |
| 2456 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | 2488 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; |
| 2457 | 2489 | ||
| 2458 | _PAGE_IE = _PAGE_IE_4V; | 2490 | _PAGE_IE = _PAGE_IE_4V; |
| 2459 | _PAGE_E = _PAGE_E_4V; | 2491 | _PAGE_E = _PAGE_E_4V; |
| 2460 | _PAGE_CACHE = _PAGE_CACHE_4V; | 2492 | _PAGE_CACHE = page_cache4v_flag; |
| 2461 | 2493 | ||
| 2462 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2494 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 2463 | kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; | 2495 | kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; |
| @@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void) | |||
| 2465 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ | 2497 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ |
| 2466 | PAGE_OFFSET; | 2498 | PAGE_OFFSET; |
| 2467 | #endif | 2499 | #endif |
| 2468 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 2500 | kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | |
| 2469 | _PAGE_P_4V | _PAGE_W_4V); | 2501 | _PAGE_W_4V); |
| 2470 | 2502 | ||
| 2471 | for (i = 1; i < 4; i++) | 2503 | for (i = 1; i < 4; i++) |
| 2472 | kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; | 2504 | kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; |
| @@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void) | |||
| 2479 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | 2511 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | |
| 2480 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | 2512 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); |
| 2481 | 2513 | ||
| 2482 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | 2514 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; |
| 2483 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 2515 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | |
| 2484 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | 2516 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); |
| 2485 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 2517 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | |
| 2486 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | 2518 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); |
| 2487 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 2519 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | |
| 2488 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | 2520 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); |
| 2489 | 2521 | ||
| 2490 | page_exec_bit = _PAGE_EXEC_4V; | 2522 | page_exec_bit = _PAGE_EXEC_4V; |
| @@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr) | |||
| 2542 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | 2574 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); |
| 2543 | if (tlb_type == hypervisor) | 2575 | if (tlb_type == hypervisor) |
| 2544 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | 2576 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | |
| 2545 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | 2577 | page_cache4v_flag | _PAGE_P_4V | |
| 2546 | _PAGE_EXEC_4V | _PAGE_W_4V); | 2578 | _PAGE_EXEC_4V | _PAGE_W_4V); |
| 2547 | 2579 | ||
| 2548 | return val | paddr; | 2580 | return val | paddr; |
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c469490db4a8..3c6bb342a48f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
| @@ -140,6 +140,7 @@ | |||
| 140 | #define MSR_CORE_C3_RESIDENCY 0x000003fc | 140 | #define MSR_CORE_C3_RESIDENCY 0x000003fc |
| 141 | #define MSR_CORE_C6_RESIDENCY 0x000003fd | 141 | #define MSR_CORE_C6_RESIDENCY 0x000003fd |
| 142 | #define MSR_CORE_C7_RESIDENCY 0x000003fe | 142 | #define MSR_CORE_C7_RESIDENCY 0x000003fe |
| 143 | #define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff | ||
| 143 | #define MSR_PKG_C2_RESIDENCY 0x0000060d | 144 | #define MSR_PKG_C2_RESIDENCY 0x0000060d |
| 144 | #define MSR_PKG_C8_RESIDENCY 0x00000630 | 145 | #define MSR_PKG_C8_RESIDENCY 0x00000630 |
| 145 | #define MSR_PKG_C9_RESIDENCY 0x00000631 | 146 | #define MSR_PKG_C9_RESIDENCY 0x00000631 |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index d93963340c3c..14a63ed6fe09 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
| @@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | |||
| 482 | 482 | ||
| 483 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) | 483 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) |
| 484 | { | 484 | { |
| 485 | struct pci_sysdata *sd = bridge->bus->sysdata; | 485 | /* |
| 486 | 486 | * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL | |
| 487 | ACPI_COMPANION_SET(&bridge->dev, sd->companion); | 487 | * here, pci_create_root_bus() has been called by someone else and |
| 488 | * sysdata is likely to be different from what we expect. Let it go in | ||
| 489 | * that case. | ||
| 490 | */ | ||
| 491 | if (!bridge->dev.parent) { | ||
| 492 | struct pci_sysdata *sd = bridge->bus->sysdata; | ||
| 493 | ACPI_COMPANION_SET(&bridge->dev, sd->companion); | ||
| 494 | } | ||
| 488 | return 0; | 495 | return 0; |
| 489 | } | 496 | } |
| 490 | 497 | ||
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 172a02a6ad14..ba78ccf651e7 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h | |||
| @@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
| 185 | return -EINVAL; | 185 | return -EINVAL; |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
| 189 | dma_addr_t *dma_handle, gfp_t flag, | ||
| 190 | struct dma_attrs *attrs) | ||
| 191 | { | ||
| 192 | return NULL; | ||
| 193 | } | ||
| 194 | |||
| 195 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
| 196 | void *vaddr, dma_addr_t dma_handle, | ||
| 197 | struct dma_attrs *attrs) | ||
| 198 | { | ||
| 199 | } | ||
| 200 | |||
| 188 | #endif /* _XTENSA_DMA_MAPPING_H */ | 201 | #endif /* _XTENSA_DMA_MAPPING_H */ |
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 5bd792c68f9b..ab3bde16ecb4 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c | |||
| @@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type) | |||
| 453 | 453 | ||
| 454 | /* Look for a specific device type */ | 454 | /* Look for a specific device type */ |
| 455 | for (; drb < bus->drbs; drb += size + 1) { | 455 | for (; drb < bus->drbs; drb += size + 1) { |
| 456 | acsr = readl(cdmm + drb * CDMM_DRB_SIZE); | 456 | acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); |
| 457 | type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; | 457 | type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; |
| 458 | if (type == dev_type) | 458 | if (type == dev_type) |
| 459 | return cdmm + drb * CDMM_DRB_SIZE; | 459 | return cdmm + drb * CDMM_DRB_SIZE; |
| @@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) | |||
| 500 | bus->discovered = true; | 500 | bus->discovered = true; |
| 501 | pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); | 501 | pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); |
| 502 | for (; drb < bus->drbs; drb += size + 1) { | 502 | for (; drb < bus->drbs; drb += size + 1) { |
| 503 | acsr = readl(cdmm + drb * CDMM_DRB_SIZE); | 503 | acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); |
| 504 | type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; | 504 | type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; |
| 505 | size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; | 505 | size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; |
| 506 | rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT; | 506 | rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT; |
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 40c1db9ad7c3..2f0ed11024eb 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c | |||
| @@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane, | |||
| 465 | if (!crtc[i]) | 465 | if (!crtc[i]) |
| 466 | continue; | 466 | continue; |
| 467 | 467 | ||
| 468 | if (crtc[i]->cursor == plane) | ||
| 469 | continue; | ||
| 470 | |||
| 468 | /* There's no other way to figure out whether the crtc is running. */ | 471 | /* There's no other way to figure out whether the crtc is running. */ |
| 469 | ret = drm_crtc_vblank_get(crtc[i]); | 472 | ret = drm_crtc_vblank_get(crtc[i]); |
| 470 | if (ret == 0) { | 473 | if (ret == 0) { |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 0b5af0fe8659..64f8b2f687d2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #define FERMI_TWOD_A 0x0000902d | 15 | #define FERMI_TWOD_A 0x0000902d |
| 16 | 16 | ||
| 17 | #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d | 17 | #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 |
| 18 | 18 | ||
| 19 | #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 | 19 | #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 |
| 20 | #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 | 20 | #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c index 2f5eadd12a9b..fdb1dcf16a59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c | |||
| @@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object) | |||
| 329 | nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); | 329 | nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); |
| 330 | 330 | ||
| 331 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 331 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
| 332 | printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]); | ||
| 333 | for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) | 332 | for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) |
| 334 | nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); | 333 | nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); |
| 335 | nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); | 334 | nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c index e8778c67578e..c61102f70805 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c | |||
| @@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit) | |||
| 90 | return disable; | 90 | return disable; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static int | 93 | int |
| 94 | gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 94 | gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
| 95 | struct nvkm_oclass *oclass, void *data, u32 size, | 95 | struct nvkm_oclass *oclass, void *data, u32 size, |
| 96 | struct nvkm_object **pobject) | 96 | struct nvkm_object **pobject) |
| 97 | { | 97 | { |
| 98 | struct nvkm_devinit_impl *impl = (void *)oclass; | ||
| 98 | struct nv50_devinit_priv *priv; | 99 | struct nv50_devinit_priv *priv; |
| 100 | u64 disable; | ||
| 99 | int ret; | 101 | int ret; |
| 100 | 102 | ||
| 101 | ret = nvkm_devinit_create(parent, engine, oclass, &priv); | 103 | ret = nvkm_devinit_create(parent, engine, oclass, &priv); |
| @@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
| 103 | if (ret) | 105 | if (ret) |
| 104 | return ret; | 106 | return ret; |
| 105 | 107 | ||
| 106 | if (nv_rd32(priv, 0x022500) & 0x00000001) | 108 | disable = impl->disable(&priv->base); |
| 109 | if (disable & (1ULL << NVDEV_ENGINE_DISP)) | ||
| 107 | priv->base.post = true; | 110 | priv->base.post = true; |
| 108 | 111 | ||
| 109 | return 0; | 112 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c index b345a53e881d..87ca0ece37b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c | |||
| @@ -48,7 +48,7 @@ struct nvkm_oclass * | |||
| 48 | gm107_devinit_oclass = &(struct nvkm_devinit_impl) { | 48 | gm107_devinit_oclass = &(struct nvkm_devinit_impl) { |
| 49 | .base.handle = NV_SUBDEV(DEVINIT, 0x07), | 49 | .base.handle = NV_SUBDEV(DEVINIT, 0x07), |
| 50 | .base.ofuncs = &(struct nvkm_ofuncs) { | 50 | .base.ofuncs = &(struct nvkm_ofuncs) { |
| 51 | .ctor = nv50_devinit_ctor, | 51 | .ctor = gf100_devinit_ctor, |
| 52 | .dtor = _nvkm_devinit_dtor, | 52 | .dtor = _nvkm_devinit_dtor, |
| 53 | .init = nv50_devinit_init, | 53 | .init = nv50_devinit_init, |
| 54 | .fini = _nvkm_devinit_fini, | 54 | .fini = _nvkm_devinit_fini, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c index 535172c5f1ad..1076fcf0d716 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c | |||
| @@ -161,7 +161,7 @@ struct nvkm_oclass * | |||
| 161 | gm204_devinit_oclass = &(struct nvkm_devinit_impl) { | 161 | gm204_devinit_oclass = &(struct nvkm_devinit_impl) { |
| 162 | .base.handle = NV_SUBDEV(DEVINIT, 0x07), | 162 | .base.handle = NV_SUBDEV(DEVINIT, 0x07), |
| 163 | .base.ofuncs = &(struct nvkm_ofuncs) { | 163 | .base.ofuncs = &(struct nvkm_ofuncs) { |
| 164 | .ctor = nv50_devinit_ctor, | 164 | .ctor = gf100_devinit_ctor, |
| 165 | .dtor = _nvkm_devinit_dtor, | 165 | .dtor = _nvkm_devinit_dtor, |
| 166 | .init = nv50_devinit_init, | 166 | .init = nv50_devinit_init, |
| 167 | .fini = _nvkm_devinit_fini, | 167 | .fini = _nvkm_devinit_fini, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h index b882b65ff3cd..9243521c80ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h | |||
| @@ -15,6 +15,9 @@ int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32); | |||
| 15 | 15 | ||
| 16 | int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); | 16 | int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); |
| 17 | 17 | ||
| 18 | int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *, | ||
| 19 | struct nvkm_oclass *, void *, u32, | ||
| 20 | struct nvkm_object **); | ||
| 18 | int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); | 21 | int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); |
| 19 | 22 | ||
| 20 | u64 gm107_devinit_disable(struct nvkm_devinit *); | 23 | u64 gm107_devinit_disable(struct nvkm_devinit *); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 42b2ea3fdcf3..e597ffc26563 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -1798,7 +1798,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) | |||
| 1798 | if ((crtc->mode.clock == test_crtc->mode.clock) && | 1798 | if ((crtc->mode.clock == test_crtc->mode.clock) && |
| 1799 | (adjusted_clock == test_adjusted_clock) && | 1799 | (adjusted_clock == test_adjusted_clock) && |
| 1800 | (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && | 1800 | (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && |
| 1801 | (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) | 1801 | (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && |
| 1802 | (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == | ||
| 1803 | drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) | ||
| 1802 | return test_radeon_crtc->pll_id; | 1804 | return test_radeon_crtc->pll_id; |
| 1803 | } | 1805 | } |
| 1804 | } | 1806 | } |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a0c35bbc8546..ba50f3c1c2e0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 5822 | L2_CACHE_BIGK_FRAGMENT_SIZE(4)); | 5822 | L2_CACHE_BIGK_FRAGMENT_SIZE(4)); |
| 5823 | /* setup context0 */ | 5823 | /* setup context0 */ |
| 5824 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 5824 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 5825 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 5825 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 5826 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 5826 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 5827 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 5827 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
| 5828 | (u32)(rdev->dummy_page.addr >> 12)); | 5828 | (u32)(rdev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 05e6d6ef5963..f848acfd3fc8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
| 2485 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 2485 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
| 2486 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 2486 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
| 2487 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 2487 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 2488 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 2488 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 2489 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 2489 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 2490 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 2490 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 2491 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 2491 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0926739c9fa7..9953356fe263 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 400 | if (enable) { | 400 | if (enable) { |
| 401 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 401 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 402 | 402 | ||
| 403 | if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { | 403 | if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
| 404 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, | 404 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
| 405 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ | 405 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ |
| 406 | HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ | 406 | HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ |
| @@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) | |||
| 438 | if (!dig || !dig->afmt) | 438 | if (!dig || !dig->afmt) |
| 439 | return; | 439 | return; |
| 440 | 440 | ||
| 441 | if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { | 441 | if (enable && connector && |
| 442 | drm_detect_monitor_audio(radeon_connector_edid(connector))) { | ||
| 442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 443 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 444 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 444 | struct radeon_connector_atom_dig *dig_connector; | 445 | struct radeon_connector_atom_dig *dig_connector; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index aba2f428c0a8..64d3a771920d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
| 1282 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); | 1282 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); |
| 1283 | /* setup context0 */ | 1283 | /* setup context0 */ |
| 1284 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 1284 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 1285 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 1285 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 1286 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 1286 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 1287 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 1287 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
| 1288 | (u32)(rdev->dummy_page.addr >> 12)); | 1288 | (u32)(rdev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 25b4ac967742..8f6d862a1882 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
| 1112 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 1112 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 1113 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 1113 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 1114 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 1114 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 1115 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 1115 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 1116 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 1116 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 1117 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 1117 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 1118 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 1118 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index dcb779647c57..25191f126f3b 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
| @@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
| 460 | if (!connector || !connector->encoder) | 460 | if (!connector || !connector->encoder) |
| 461 | return; | 461 | return; |
| 462 | 462 | ||
| 463 | if (!radeon_encoder_is_digital(connector->encoder)) | ||
| 464 | return; | ||
| 465 | |||
| 466 | rdev = connector->encoder->dev->dev_private; | 463 | rdev = connector->encoder->dev->dev_private; |
| 467 | 464 | ||
| 468 | if (!radeon_audio_chipset_supported(rdev)) | 465 | if (!radeon_audio_chipset_supported(rdev)) |
| @@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
| 471 | radeon_encoder = to_radeon_encoder(connector->encoder); | 468 | radeon_encoder = to_radeon_encoder(connector->encoder); |
| 472 | dig = radeon_encoder->enc_priv; | 469 | dig = radeon_encoder->enc_priv; |
| 473 | 470 | ||
| 474 | if (!dig->afmt) | ||
| 475 | return; | ||
| 476 | |||
| 477 | if (status == connector_status_connected) { | 471 | if (status == connector_status_connected) { |
| 478 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 472 | struct radeon_connector *radeon_connector; |
| 473 | int sink_type; | ||
| 474 | |||
| 475 | if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { | ||
| 476 | radeon_encoder->audio = NULL; | ||
| 477 | return; | ||
| 478 | } | ||
| 479 | |||
| 480 | radeon_connector = to_radeon_connector(connector); | ||
| 481 | sink_type = radeon_dp_getsinktype(radeon_connector); | ||
| 479 | 482 | ||
| 480 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && | 483 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && |
| 481 | radeon_dp_getsinktype(radeon_connector) == | 484 | sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) |
| 482 | CONNECTOR_OBJECT_ID_DISPLAYPORT) | ||
| 483 | radeon_encoder->audio = rdev->audio.dp_funcs; | 485 | radeon_encoder->audio = rdev->audio.dp_funcs; |
| 484 | else | 486 | else |
| 485 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 487 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
| 486 | 488 | ||
| 487 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); | 489 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
| 488 | if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { | 490 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
| 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
| 490 | } else { | ||
| 491 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
| 492 | dig->afmt->pin = NULL; | ||
| 493 | } | ||
| 494 | } else { | 491 | } else { |
| 495 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 492 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
| 496 | dig->afmt->pin = NULL; | 493 | dig->afmt->pin = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index d17d251dbd4f..cebb65e07e1d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -1379,10 +1379,8 @@ out: | |||
| 1379 | /* updated in get modes as well since we need to know if it's analog or digital */ | 1379 | /* updated in get modes as well since we need to know if it's analog or digital */ |
| 1380 | radeon_connector_update_scratch_regs(connector, ret); | 1380 | radeon_connector_update_scratch_regs(connector, ret); |
| 1381 | 1381 | ||
| 1382 | if (radeon_audio != 0) { | 1382 | if (radeon_audio != 0) |
| 1383 | radeon_connector_get_edid(connector); | ||
| 1384 | radeon_audio_detect(connector, ret); | 1383 | radeon_audio_detect(connector, ret); |
| 1385 | } | ||
| 1386 | 1384 | ||
| 1387 | exit: | 1385 | exit: |
| 1388 | pm_runtime_mark_last_busy(connector->dev->dev); | 1386 | pm_runtime_mark_last_busy(connector->dev->dev); |
| @@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
| 1719 | 1717 | ||
| 1720 | radeon_connector_update_scratch_regs(connector, ret); | 1718 | radeon_connector_update_scratch_regs(connector, ret); |
| 1721 | 1719 | ||
| 1722 | if (radeon_audio != 0) { | 1720 | if (radeon_audio != 0) |
| 1723 | radeon_connector_get_edid(connector); | ||
| 1724 | radeon_audio_detect(connector, ret); | 1721 | radeon_audio_detect(connector, ret); |
| 1725 | } | ||
| 1726 | 1722 | ||
| 1727 | out: | 1723 | out: |
| 1728 | pm_runtime_mark_last_busy(connector->dev->dev); | 1724 | pm_runtime_mark_last_busy(connector->dev->dev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c54d6313a46d..01ee96acb398 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
| 921 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 921 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
| 922 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 922 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
| 923 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 923 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 924 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 924 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 925 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 925 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 926 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 926 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 927 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 927 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 5326f753e107..4c679b802bc8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) | |||
| 4303 | L2_CACHE_BIGK_FRAGMENT_SIZE(4)); | 4303 | L2_CACHE_BIGK_FRAGMENT_SIZE(4)); |
| 4304 | /* setup context0 */ | 4304 | /* setup context0 */ |
| 4305 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 4305 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 4306 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); | 4306 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 4307 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 4307 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 4308 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 4308 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
| 4309 | (u32)(rdev->dummy_page.addr >> 12)); | 4309 | (u32)(rdev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile index 1055cb79096c..3f4c7b842028 100644 --- a/drivers/gpu/drm/vgem/Makefile +++ b/drivers/gpu/drm/vgem/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | ccflags-y := -Iinclude/drm | 1 | ccflags-y := -Iinclude/drm |
| 2 | vgem-y := vgem_drv.o vgem_dma_buf.o | 2 | vgem-y := vgem_drv.o |
| 3 | 3 | ||
| 4 | obj-$(CONFIG_DRM_VGEM) += vgem.o | 4 | obj-$(CONFIG_DRM_VGEM) += vgem.o |
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c deleted file mode 100644 index 0254438ad1a6..000000000000 --- a/drivers/gpu/drm/vgem/vgem_dma_buf.c +++ /dev/null | |||
| @@ -1,94 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2012 Intel Corporation | ||
| 3 | * Copyright © 2014 The Chromium OS Authors | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice (including the next | ||
| 13 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 14 | * Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
| 22 | * IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: | ||
| 25 | * Ben Widawsky <ben@bwidawsk.net> | ||
| 26 | * | ||
| 27 | */ | ||
| 28 | |||
| 29 | #include <linux/dma-buf.h> | ||
| 30 | #include "vgem_drv.h" | ||
| 31 | |||
| 32 | struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj) | ||
| 33 | { | ||
| 34 | struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); | ||
| 35 | BUG_ON(obj->pages == NULL); | ||
| 36 | |||
| 37 | return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE); | ||
| 38 | } | ||
| 39 | |||
| 40 | int vgem_gem_prime_pin(struct drm_gem_object *gobj) | ||
| 41 | { | ||
| 42 | struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); | ||
| 43 | return vgem_gem_get_pages(obj); | ||
| 44 | } | ||
| 45 | |||
| 46 | void vgem_gem_prime_unpin(struct drm_gem_object *gobj) | ||
| 47 | { | ||
| 48 | struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); | ||
| 49 | vgem_gem_put_pages(obj); | ||
| 50 | } | ||
| 51 | |||
| 52 | void *vgem_gem_prime_vmap(struct drm_gem_object *gobj) | ||
| 53 | { | ||
| 54 | struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); | ||
| 55 | BUG_ON(obj->pages == NULL); | ||
| 56 | |||
| 57 | return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); | ||
| 58 | } | ||
| 59 | |||
| 60 | void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | ||
| 61 | { | ||
| 62 | vunmap(vaddr); | ||
| 63 | } | ||
| 64 | |||
| 65 | struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, | ||
| 66 | struct dma_buf *dma_buf) | ||
| 67 | { | ||
| 68 | struct drm_vgem_gem_object *obj = NULL; | ||
| 69 | int ret; | ||
| 70 | |||
| 71 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
| 72 | if (obj == NULL) { | ||
| 73 | ret = -ENOMEM; | ||
| 74 | goto fail; | ||
| 75 | } | ||
| 76 | |||
| 77 | ret = drm_gem_object_init(dev, &obj->base, dma_buf->size); | ||
| 78 | if (ret) { | ||
| 79 | ret = -ENOMEM; | ||
| 80 | goto fail_free; | ||
| 81 | } | ||
| 82 | |||
| 83 | get_dma_buf(dma_buf); | ||
| 84 | |||
| 85 | obj->base.dma_buf = dma_buf; | ||
| 86 | obj->use_dma_buf = true; | ||
| 87 | |||
| 88 | return &obj->base; | ||
| 89 | |||
| 90 | fail_free: | ||
| 91 | kfree(obj); | ||
| 92 | fail: | ||
| 93 | return ERR_PTR(ret); | ||
| 94 | } | ||
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index cb3b43525b2d..7a207ca547be 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c | |||
| @@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = { | |||
| 302 | }; | 302 | }; |
| 303 | 303 | ||
| 304 | static struct drm_driver vgem_driver = { | 304 | static struct drm_driver vgem_driver = { |
| 305 | .driver_features = DRIVER_GEM | DRIVER_PRIME, | 305 | .driver_features = DRIVER_GEM, |
| 306 | .gem_free_object = vgem_gem_free_object, | 306 | .gem_free_object = vgem_gem_free_object, |
| 307 | .gem_vm_ops = &vgem_gem_vm_ops, | 307 | .gem_vm_ops = &vgem_gem_vm_ops, |
| 308 | .ioctls = vgem_ioctls, | 308 | .ioctls = vgem_ioctls, |
| 309 | .fops = &vgem_driver_fops, | 309 | .fops = &vgem_driver_fops, |
| 310 | .dumb_create = vgem_gem_dumb_create, | 310 | .dumb_create = vgem_gem_dumb_create, |
| 311 | .dumb_map_offset = vgem_gem_dumb_map, | 311 | .dumb_map_offset = vgem_gem_dumb_map, |
| 312 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 313 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 314 | .gem_prime_export = drm_gem_prime_export, | ||
| 315 | .gem_prime_import = vgem_gem_prime_import, | ||
| 316 | .gem_prime_pin = vgem_gem_prime_pin, | ||
| 317 | .gem_prime_unpin = vgem_gem_prime_unpin, | ||
| 318 | .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table, | ||
| 319 | .gem_prime_vmap = vgem_gem_prime_vmap, | ||
| 320 | .gem_prime_vunmap = vgem_gem_prime_vunmap, | ||
| 321 | .name = DRIVER_NAME, | 312 | .name = DRIVER_NAME, |
| 322 | .desc = DRIVER_DESC, | 313 | .desc = DRIVER_DESC, |
| 323 | .date = DRIVER_DATE, | 314 | .date = DRIVER_DATE, |
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 57ab4d8f41f9..e9f92f7ee275 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h | |||
| @@ -43,15 +43,4 @@ struct drm_vgem_gem_object { | |||
| 43 | extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); | 43 | extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); |
| 44 | extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); | 44 | extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); |
| 45 | 45 | ||
| 46 | /* vgem_dma_buf.c */ | ||
| 47 | extern struct sg_table *vgem_gem_prime_get_sg_table( | ||
| 48 | struct drm_gem_object *gobj); | ||
| 49 | extern int vgem_gem_prime_pin(struct drm_gem_object *gobj); | ||
| 50 | extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj); | ||
| 51 | extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj); | ||
| 52 | extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | ||
| 53 | extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, | ||
| 54 | struct dma_buf *dma_buf); | ||
| 55 | |||
| 56 | |||
| 57 | #endif | 46 | #endif |
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index f3830db02d46..37f01702d081 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c | |||
| @@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, | |||
| 439 | (*t)->dev_attr.attr.name, tg->base + i); | 439 | (*t)->dev_attr.attr.name, tg->base + i); |
| 440 | if ((*t)->s2) { | 440 | if ((*t)->s2) { |
| 441 | a2 = &su->u.a2; | 441 | a2 = &su->u.a2; |
| 442 | sysfs_attr_init(&a2->dev_attr.attr); | ||
| 442 | a2->dev_attr.attr.name = su->name; | 443 | a2->dev_attr.attr.name = su->name; |
| 443 | a2->nr = (*t)->u.s.nr + i; | 444 | a2->nr = (*t)->u.s.nr + i; |
| 444 | a2->index = (*t)->u.s.index; | 445 | a2->index = (*t)->u.s.index; |
| @@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, | |||
| 449 | *attrs = &a2->dev_attr.attr; | 450 | *attrs = &a2->dev_attr.attr; |
| 450 | } else { | 451 | } else { |
| 451 | a = &su->u.a1; | 452 | a = &su->u.a1; |
| 453 | sysfs_attr_init(&a->dev_attr.attr); | ||
| 452 | a->dev_attr.attr.name = su->name; | 454 | a->dev_attr.attr.name = su->name; |
| 453 | a->index = (*t)->u.index + i; | 455 | a->index = (*t)->u.index + i; |
| 454 | a->dev_attr.attr.mode = | 456 | a->dev_attr.attr.mode = |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4fcb48103299..bd1c99deac71 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, | |||
| 995 | (*t)->dev_attr.attr.name, tg->base + i); | 995 | (*t)->dev_attr.attr.name, tg->base + i); |
| 996 | if ((*t)->s2) { | 996 | if ((*t)->s2) { |
| 997 | a2 = &su->u.a2; | 997 | a2 = &su->u.a2; |
| 998 | sysfs_attr_init(&a2->dev_attr.attr); | ||
| 998 | a2->dev_attr.attr.name = su->name; | 999 | a2->dev_attr.attr.name = su->name; |
| 999 | a2->nr = (*t)->u.s.nr + i; | 1000 | a2->nr = (*t)->u.s.nr + i; |
| 1000 | a2->index = (*t)->u.s.index; | 1001 | a2->index = (*t)->u.s.index; |
| @@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, | |||
| 1005 | *attrs = &a2->dev_attr.attr; | 1006 | *attrs = &a2->dev_attr.attr; |
| 1006 | } else { | 1007 | } else { |
| 1007 | a = &su->u.a1; | 1008 | a = &su->u.a1; |
| 1009 | sysfs_attr_init(&a->dev_attr.attr); | ||
| 1008 | a->dev_attr.attr.name = su->name; | 1010 | a->dev_attr.attr.name = su->name; |
| 1009 | a->index = (*t)->u.index + i; | 1011 | a->index = (*t)->u.index + i; |
| 1010 | a->dev_attr.attr.mode = | 1012 | a->dev_attr.attr.mode = |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 112e4d45e4a0..68800115876b 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
| @@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data * | |||
| 239 | ntc_thermistor_parse_dt(struct platform_device *pdev) | 239 | ntc_thermistor_parse_dt(struct platform_device *pdev) |
| 240 | { | 240 | { |
| 241 | struct iio_channel *chan; | 241 | struct iio_channel *chan; |
| 242 | enum iio_chan_type type; | ||
| 242 | struct device_node *np = pdev->dev.of_node; | 243 | struct device_node *np = pdev->dev.of_node; |
| 243 | struct ntc_thermistor_platform_data *pdata; | 244 | struct ntc_thermistor_platform_data *pdata; |
| 245 | int ret; | ||
| 244 | 246 | ||
| 245 | if (!np) | 247 | if (!np) |
| 246 | return NULL; | 248 | return NULL; |
| @@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) | |||
| 253 | if (IS_ERR(chan)) | 255 | if (IS_ERR(chan)) |
| 254 | return ERR_CAST(chan); | 256 | return ERR_CAST(chan); |
| 255 | 257 | ||
| 258 | ret = iio_get_channel_type(chan, &type); | ||
| 259 | if (ret < 0) | ||
| 260 | return ERR_PTR(ret); | ||
| 261 | |||
| 262 | if (type != IIO_VOLTAGE) | ||
| 263 | return ERR_PTR(-EINVAL); | ||
| 264 | |||
| 256 | if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) | 265 | if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) |
| 257 | return ERR_PTR(-ENODEV); | 266 | return ERR_PTR(-ENODEV); |
| 258 | if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) | 267 | if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) |
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 99664ebc738d..ccf4cffe0ee1 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | #include <linux/sysfs.h> | 44 | #include <linux/sysfs.h> |
| 45 | 45 | ||
| 46 | /* Addresses to scan */ | 46 | /* Addresses to scan */ |
| 47 | static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, | 47 | static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, |
| 48 | 0x4e, 0x4f, I2C_CLIENT_END }; | 48 | 0x4e, 0x4f, I2C_CLIENT_END }; |
| 49 | 49 | ||
| 50 | enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; | 50 | enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 327529ee85eb..3f40319a55da 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
| @@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, | |||
| 547 | return 0; | 547 | return 0; |
| 548 | 548 | ||
| 549 | err_prot_mr: | 549 | err_prot_mr: |
| 550 | ib_dereg_mr(desc->pi_ctx->prot_mr); | 550 | ib_dereg_mr(pi_ctx->prot_mr); |
| 551 | err_prot_frpl: | 551 | err_prot_frpl: |
| 552 | ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); | 552 | ib_free_fast_reg_page_list(pi_ctx->prot_frpl); |
| 553 | err_pi_ctx: | 553 | err_pi_ctx: |
| 554 | kfree(desc->pi_ctx); | 554 | kfree(pi_ctx); |
| 555 | 555 | ||
| 556 | return ret; | 556 | return ret; |
| 557 | } | 557 | } |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 63953477a07c..eff7bdd7731d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, | |||
| 429 | /* blk-mq request-based interface */ | 429 | /* blk-mq request-based interface */ |
| 430 | *__clone = blk_get_request(bdev_get_queue(bdev), | 430 | *__clone = blk_get_request(bdev_get_queue(bdev), |
| 431 | rq_data_dir(rq), GFP_ATOMIC); | 431 | rq_data_dir(rq), GFP_ATOMIC); |
| 432 | if (IS_ERR(*__clone)) | 432 | if (IS_ERR(*__clone)) { |
| 433 | /* ENOMEM, requeue */ | 433 | /* ENOMEM, requeue */ |
| 434 | clear_mapinfo(m, map_context); | ||
| 434 | return r; | 435 | return r; |
| 436 | } | ||
| 435 | (*__clone)->bio = (*__clone)->biotail = NULL; | 437 | (*__clone)->bio = (*__clone)->biotail = NULL; |
| 436 | (*__clone)->rq_disk = bdev->bd_disk; | 438 | (*__clone)->rq_disk = bdev->bd_disk; |
| 437 | (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; | 439 | (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d9b00b8565c6..16ba55ad7089 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) | |||
| 820 | } | 820 | } |
| 821 | EXPORT_SYMBOL(dm_consume_args); | 821 | EXPORT_SYMBOL(dm_consume_args); |
| 822 | 822 | ||
| 823 | static bool __table_type_request_based(unsigned table_type) | ||
| 824 | { | ||
| 825 | return (table_type == DM_TYPE_REQUEST_BASED || | ||
| 826 | table_type == DM_TYPE_MQ_REQUEST_BASED); | ||
| 827 | } | ||
| 828 | |||
| 823 | static int dm_table_set_type(struct dm_table *t) | 829 | static int dm_table_set_type(struct dm_table *t) |
| 824 | { | 830 | { |
| 825 | unsigned i; | 831 | unsigned i; |
| @@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t) | |||
| 852 | * Determine the type from the live device. | 858 | * Determine the type from the live device. |
| 853 | * Default to bio-based if device is new. | 859 | * Default to bio-based if device is new. |
| 854 | */ | 860 | */ |
| 855 | if (live_md_type == DM_TYPE_REQUEST_BASED || | 861 | if (__table_type_request_based(live_md_type)) |
| 856 | live_md_type == DM_TYPE_MQ_REQUEST_BASED) | ||
| 857 | request_based = 1; | 862 | request_based = 1; |
| 858 | else | 863 | else |
| 859 | bio_based = 1; | 864 | bio_based = 1; |
| @@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t) | |||
| 903 | } | 908 | } |
| 904 | t->type = DM_TYPE_MQ_REQUEST_BASED; | 909 | t->type = DM_TYPE_MQ_REQUEST_BASED; |
| 905 | 910 | ||
| 906 | } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { | 911 | } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { |
| 907 | /* inherit live MD type */ | 912 | /* inherit live MD type */ |
| 908 | t->type = live_md_type; | 913 | t->type = live_md_type; |
| 909 | 914 | ||
| @@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) | |||
| 925 | 930 | ||
| 926 | bool dm_table_request_based(struct dm_table *t) | 931 | bool dm_table_request_based(struct dm_table *t) |
| 927 | { | 932 | { |
| 928 | unsigned table_type = dm_table_get_type(t); | 933 | return __table_type_request_based(dm_table_get_type(t)); |
| 929 | |||
| 930 | return (table_type == DM_TYPE_REQUEST_BASED || | ||
| 931 | table_type == DM_TYPE_MQ_REQUEST_BASED); | ||
| 932 | } | 934 | } |
| 933 | 935 | ||
| 934 | bool dm_table_mq_request_based(struct dm_table *t) | 936 | bool dm_table_mq_request_based(struct dm_table *t) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a930b72314ac..2caf492890d6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
| 1082 | dm_put(md); | 1082 | dm_put(md); |
| 1083 | } | 1083 | } |
| 1084 | 1084 | ||
| 1085 | static void free_rq_clone(struct request *clone, bool must_be_mapped) | 1085 | static void free_rq_clone(struct request *clone) |
| 1086 | { | 1086 | { |
| 1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1087 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 1088 | struct mapped_device *md = tio->md; | 1088 | struct mapped_device *md = tio->md; |
| 1089 | 1089 | ||
| 1090 | WARN_ON_ONCE(must_be_mapped && !clone->q); | ||
| 1091 | |||
| 1092 | blk_rq_unprep_clone(clone); | 1090 | blk_rq_unprep_clone(clone); |
| 1093 | 1091 | ||
| 1094 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) | 1092 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
| @@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error) | |||
| 1132 | rq->sense_len = clone->sense_len; | 1130 | rq->sense_len = clone->sense_len; |
| 1133 | } | 1131 | } |
| 1134 | 1132 | ||
| 1135 | free_rq_clone(clone, true); | 1133 | free_rq_clone(clone); |
| 1136 | if (!rq->q->mq_ops) | 1134 | if (!rq->q->mq_ops) |
| 1137 | blk_end_request_all(rq, error); | 1135 | blk_end_request_all(rq, error); |
| 1138 | else | 1136 | else |
| @@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq) | |||
| 1151 | } | 1149 | } |
| 1152 | 1150 | ||
| 1153 | if (clone) | 1151 | if (clone) |
| 1154 | free_rq_clone(clone, false); | 1152 | free_rq_clone(clone); |
| 1155 | } | 1153 | } |
| 1156 | 1154 | ||
| 1157 | /* | 1155 | /* |
| @@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq) | |||
| 1164 | 1162 | ||
| 1165 | spin_lock_irqsave(q->queue_lock, flags); | 1163 | spin_lock_irqsave(q->queue_lock, flags); |
| 1166 | blk_requeue_request(q, rq); | 1164 | blk_requeue_request(q, rq); |
| 1165 | blk_run_queue_async(q); | ||
| 1167 | spin_unlock_irqrestore(q->queue_lock, flags); | 1166 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 1168 | } | 1167 | } |
| 1169 | 1168 | ||
| @@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
| 1724 | struct mapped_device *md = q->queuedata; | 1723 | struct mapped_device *md = q->queuedata; |
| 1725 | struct dm_table *map = dm_get_live_table_fast(md); | 1724 | struct dm_table *map = dm_get_live_table_fast(md); |
| 1726 | struct dm_target *ti; | 1725 | struct dm_target *ti; |
| 1727 | sector_t max_sectors; | 1726 | sector_t max_sectors, max_size = 0; |
| 1728 | int max_size = 0; | ||
| 1729 | 1727 | ||
| 1730 | if (unlikely(!map)) | 1728 | if (unlikely(!map)) |
| 1731 | goto out; | 1729 | goto out; |
| @@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
| 1740 | max_sectors = min(max_io_len(bvm->bi_sector, ti), | 1738 | max_sectors = min(max_io_len(bvm->bi_sector, ti), |
| 1741 | (sector_t) queue_max_sectors(q)); | 1739 | (sector_t) queue_max_sectors(q)); |
| 1742 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; | 1740 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; |
| 1743 | if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ | 1741 | |
| 1744 | max_size = 0; | 1742 | /* |
| 1743 | * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t | ||
| 1744 | * to the targets' merge function since it holds sectors not bytes). | ||
| 1745 | * Just doing this as an interim fix for stable@ because the more | ||
| 1746 | * comprehensive cleanup of switching to sector_t will impact every | ||
| 1747 | * DM target that implements a ->merge hook. | ||
| 1748 | */ | ||
| 1749 | if (max_size > INT_MAX) | ||
| 1750 | max_size = INT_MAX; | ||
| 1745 | 1751 | ||
| 1746 | /* | 1752 | /* |
| 1747 | * merge_bvec_fn() returns number of bytes | 1753 | * merge_bvec_fn() returns number of bytes |
| @@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
| 1749 | * max is precomputed maximal io size | 1755 | * max is precomputed maximal io size |
| 1750 | */ | 1756 | */ |
| 1751 | if (max_size && ti->type->merge) | 1757 | if (max_size && ti->type->merge) |
| 1752 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 1758 | max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); |
| 1753 | /* | 1759 | /* |
| 1754 | * If the target doesn't support merge method and some of the devices | 1760 | * If the target doesn't support merge method and some of the devices |
| 1755 | * provided their merge_bvec method (we know this by looking for the | 1761 | * provided their merge_bvec method (we know this by looking for the |
| @@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, | |||
| 1971 | dm_kill_unmapped_request(rq, r); | 1977 | dm_kill_unmapped_request(rq, r); |
| 1972 | return r; | 1978 | return r; |
| 1973 | } | 1979 | } |
| 1974 | if (IS_ERR(clone)) | 1980 | if (r != DM_MAPIO_REMAPPED) |
| 1975 | return DM_MAPIO_REQUEUE; | 1981 | return r; |
| 1976 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { | 1982 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
| 1977 | /* -ENOMEM */ | 1983 | /* -ENOMEM */ |
| 1978 | ti->type->release_clone_rq(clone); | 1984 | ti->type->release_clone_rq(clone); |
| @@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 2753 | if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { | 2759 | if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { |
| 2754 | /* clone request is allocated at the end of the pdu */ | 2760 | /* clone request is allocated at the end of the pdu */ |
| 2755 | tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); | 2761 | tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); |
| 2756 | if (!clone_rq(rq, md, tio, GFP_ATOMIC)) | 2762 | (void) clone_rq(rq, md, tio, GFP_ATOMIC); |
| 2757 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
| 2758 | queue_kthread_work(&md->kworker, &tio->work); | 2763 | queue_kthread_work(&md->kworker, &tio->work); |
| 2759 | } else { | 2764 | } else { |
| 2760 | /* Direct call is fine since .queue_rq allows allocations */ | 2765 | /* Direct call is fine since .queue_rq allows allocations */ |
| 2761 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) | 2766 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { |
| 2762 | dm_requeue_unmapped_original_request(md, rq); | 2767 | /* Undo dm_start_request() before requeuing */ |
| 2768 | rq_completed(md, rq_data_dir(rq), false); | ||
| 2769 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
| 2770 | } | ||
| 2763 | } | 2771 | } |
| 2764 | 2772 | ||
| 2765 | return BLK_MQ_RQ_QUEUE_OK; | 2773 | return BLK_MQ_RQ_QUEUE_OK; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 593a02476c78..27506302eb7a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -4211,12 +4211,12 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
| 4211 | if (!mddev->pers || !mddev->pers->sync_request) | 4211 | if (!mddev->pers || !mddev->pers->sync_request) |
| 4212 | return -EINVAL; | 4212 | return -EINVAL; |
| 4213 | 4213 | ||
| 4214 | if (cmd_match(page, "frozen")) | ||
| 4215 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4216 | else | ||
| 4217 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4218 | 4214 | ||
| 4219 | if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { | 4215 | if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { |
| 4216 | if (cmd_match(page, "frozen")) | ||
| 4217 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4218 | else | ||
| 4219 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4220 | flush_workqueue(md_misc_wq); | 4220 | flush_workqueue(md_misc_wq); |
| 4221 | if (mddev->sync_thread) { | 4221 | if (mddev->sync_thread) { |
| 4222 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 4222 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| @@ -4229,16 +4229,17 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
| 4229 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | 4229 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
| 4230 | return -EBUSY; | 4230 | return -EBUSY; |
| 4231 | else if (cmd_match(page, "resync")) | 4231 | else if (cmd_match(page, "resync")) |
| 4232 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4232 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 4233 | else if (cmd_match(page, "recover")) { | 4233 | else if (cmd_match(page, "recover")) { |
| 4234 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4234 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | 4235 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
| 4235 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
| 4236 | } else if (cmd_match(page, "reshape")) { | 4236 | } else if (cmd_match(page, "reshape")) { |
| 4237 | int err; | 4237 | int err; |
| 4238 | if (mddev->pers->start_reshape == NULL) | 4238 | if (mddev->pers->start_reshape == NULL) |
| 4239 | return -EINVAL; | 4239 | return -EINVAL; |
| 4240 | err = mddev_lock(mddev); | 4240 | err = mddev_lock(mddev); |
| 4241 | if (!err) { | 4241 | if (!err) { |
| 4242 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4242 | err = mddev->pers->start_reshape(mddev); | 4243 | err = mddev->pers->start_reshape(mddev); |
| 4243 | mddev_unlock(mddev); | 4244 | mddev_unlock(mddev); |
| 4244 | } | 4245 | } |
| @@ -4250,6 +4251,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
| 4250 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 4251 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 4251 | else if (!cmd_match(page, "repair")) | 4252 | else if (!cmd_match(page, "repair")) |
| 4252 | return -EINVAL; | 4253 | return -EINVAL; |
| 4254 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 4253 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | 4255 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
| 4254 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 4256 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 4255 | } | 4257 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b9f2b9cc6060..553d54b87052 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) | |||
| 749 | static bool stripe_can_batch(struct stripe_head *sh) | 749 | static bool stripe_can_batch(struct stripe_head *sh) |
| 750 | { | 750 | { |
| 751 | return test_bit(STRIPE_BATCH_READY, &sh->state) && | 751 | return test_bit(STRIPE_BATCH_READY, &sh->state) && |
| 752 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && | ||
| 752 | is_full_stripe_write(sh); | 753 | is_full_stripe_write(sh); |
| 753 | } | 754 | } |
| 754 | 755 | ||
| @@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
| 837 | < IO_THRESHOLD) | 838 | < IO_THRESHOLD) |
| 838 | md_wakeup_thread(conf->mddev->thread); | 839 | md_wakeup_thread(conf->mddev->thread); |
| 839 | 840 | ||
| 841 | if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { | ||
| 842 | int seq = sh->bm_seq; | ||
| 843 | if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && | ||
| 844 | sh->batch_head->bm_seq > seq) | ||
| 845 | seq = sh->batch_head->bm_seq; | ||
| 846 | set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); | ||
| 847 | sh->batch_head->bm_seq = seq; | ||
| 848 | } | ||
| 849 | |||
| 840 | atomic_inc(&sh->count); | 850 | atomic_inc(&sh->count); |
| 841 | unlock_out: | 851 | unlock_out: |
| 842 | unlock_two_stripes(head, sh); | 852 | unlock_two_stripes(head, sh); |
| @@ -2987,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, | |||
| 2987 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", | 2997 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
| 2988 | (unsigned long long)(*bip)->bi_iter.bi_sector, | 2998 | (unsigned long long)(*bip)->bi_iter.bi_sector, |
| 2989 | (unsigned long long)sh->sector, dd_idx); | 2999 | (unsigned long long)sh->sector, dd_idx); |
| 2990 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2991 | 3000 | ||
| 2992 | if (conf->mddev->bitmap && firstwrite) { | 3001 | if (conf->mddev->bitmap && firstwrite) { |
| 3002 | /* Cannot hold spinlock over bitmap_startwrite, | ||
| 3003 | * but must ensure this isn't added to a batch until | ||
| 3004 | * we have added to the bitmap and set bm_seq. | ||
| 3005 | * So set STRIPE_BITMAP_PENDING to prevent | ||
| 3006 | * batching. | ||
| 3007 | * If multiple add_stripe_bio() calls race here they | ||
| 3008 | * much all set STRIPE_BITMAP_PENDING. So only the first one | ||
| 3009 | * to complete "bitmap_startwrite" gets to set | ||
| 3010 | * STRIPE_BIT_DELAY. This is important as once a stripe | ||
| 3011 | * is added to a batch, STRIPE_BIT_DELAY cannot be changed | ||
| 3012 | * any more. | ||
| 3013 | */ | ||
| 3014 | set_bit(STRIPE_BITMAP_PENDING, &sh->state); | ||
| 3015 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2993 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, | 3016 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, |
| 2994 | STRIPE_SECTORS, 0); | 3017 | STRIPE_SECTORS, 0); |
| 2995 | sh->bm_seq = conf->seq_flush+1; | 3018 | spin_lock_irq(&sh->stripe_lock); |
| 2996 | set_bit(STRIPE_BIT_DELAY, &sh->state); | 3019 | clear_bit(STRIPE_BITMAP_PENDING, &sh->state); |
| 3020 | if (!sh->batch_head) { | ||
| 3021 | sh->bm_seq = conf->seq_flush+1; | ||
| 3022 | set_bit(STRIPE_BIT_DELAY, &sh->state); | ||
| 3023 | } | ||
| 2997 | } | 3024 | } |
| 3025 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2998 | 3026 | ||
| 2999 | if (stripe_can_batch(sh)) | 3027 | if (stripe_can_batch(sh)) |
| 3000 | stripe_add_to_batch_list(conf, sh); | 3028 | stripe_add_to_batch_list(conf, sh); |
| @@ -3392,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
| 3392 | set_bit(STRIPE_HANDLE, &sh->state); | 3420 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3393 | } | 3421 | } |
| 3394 | 3422 | ||
| 3423 | static void break_stripe_batch_list(struct stripe_head *head_sh, | ||
| 3424 | unsigned long handle_flags); | ||
| 3395 | /* handle_stripe_clean_event | 3425 | /* handle_stripe_clean_event |
| 3396 | * any written block on an uptodate or failed drive can be returned. | 3426 | * any written block on an uptodate or failed drive can be returned. |
| 3397 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but | 3427 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
| @@ -3405,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
| 3405 | int discard_pending = 0; | 3435 | int discard_pending = 0; |
| 3406 | struct stripe_head *head_sh = sh; | 3436 | struct stripe_head *head_sh = sh; |
| 3407 | bool do_endio = false; | 3437 | bool do_endio = false; |
| 3408 | int wakeup_nr = 0; | ||
| 3409 | 3438 | ||
| 3410 | for (i = disks; i--; ) | 3439 | for (i = disks; i--; ) |
| 3411 | if (sh->dev[i].written) { | 3440 | if (sh->dev[i].written) { |
| @@ -3494,44 +3523,8 @@ unhash: | |||
| 3494 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 3523 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 3495 | md_wakeup_thread(conf->mddev->thread); | 3524 | md_wakeup_thread(conf->mddev->thread); |
| 3496 | 3525 | ||
| 3497 | if (!head_sh->batch_head || !do_endio) | 3526 | if (head_sh->batch_head && do_endio) |
| 3498 | return; | 3527 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); |
| 3499 | for (i = 0; i < head_sh->disks; i++) { | ||
| 3500 | if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) | ||
| 3501 | wakeup_nr++; | ||
| 3502 | } | ||
| 3503 | while (!list_empty(&head_sh->batch_list)) { | ||
| 3504 | int i; | ||
| 3505 | sh = list_first_entry(&head_sh->batch_list, | ||
| 3506 | struct stripe_head, batch_list); | ||
| 3507 | list_del_init(&sh->batch_list); | ||
| 3508 | |||
| 3509 | set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, | ||
| 3510 | head_sh->state & ~((1 << STRIPE_ACTIVE) | | ||
| 3511 | (1 << STRIPE_PREREAD_ACTIVE) | | ||
| 3512 | STRIPE_EXPAND_SYNC_FLAG)); | ||
| 3513 | sh->check_state = head_sh->check_state; | ||
| 3514 | sh->reconstruct_state = head_sh->reconstruct_state; | ||
| 3515 | for (i = 0; i < sh->disks; i++) { | ||
| 3516 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
| 3517 | wakeup_nr++; | ||
| 3518 | sh->dev[i].flags = head_sh->dev[i].flags; | ||
| 3519 | } | ||
| 3520 | |||
| 3521 | spin_lock_irq(&sh->stripe_lock); | ||
| 3522 | sh->batch_head = NULL; | ||
| 3523 | spin_unlock_irq(&sh->stripe_lock); | ||
| 3524 | if (sh->state & STRIPE_EXPAND_SYNC_FLAG) | ||
| 3525 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 3526 | release_stripe(sh); | ||
| 3527 | } | ||
| 3528 | |||
| 3529 | spin_lock_irq(&head_sh->stripe_lock); | ||
| 3530 | head_sh->batch_head = NULL; | ||
| 3531 | spin_unlock_irq(&head_sh->stripe_lock); | ||
| 3532 | wake_up_nr(&conf->wait_for_overlap, wakeup_nr); | ||
| 3533 | if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) | ||
| 3534 | set_bit(STRIPE_HANDLE, &head_sh->state); | ||
| 3535 | } | 3528 | } |
| 3536 | 3529 | ||
| 3537 | static void handle_stripe_dirtying(struct r5conf *conf, | 3530 | static void handle_stripe_dirtying(struct r5conf *conf, |
| @@ -4172,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
| 4172 | 4165 | ||
| 4173 | static int clear_batch_ready(struct stripe_head *sh) | 4166 | static int clear_batch_ready(struct stripe_head *sh) |
| 4174 | { | 4167 | { |
| 4168 | /* Return '1' if this is a member of batch, or | ||
| 4169 | * '0' if it is a lone stripe or a head which can now be | ||
| 4170 | * handled. | ||
| 4171 | */ | ||
| 4175 | struct stripe_head *tmp; | 4172 | struct stripe_head *tmp; |
| 4176 | if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) | 4173 | if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) |
| 4177 | return 0; | 4174 | return (sh->batch_head && sh->batch_head != sh); |
| 4178 | spin_lock(&sh->stripe_lock); | 4175 | spin_lock(&sh->stripe_lock); |
| 4179 | if (!sh->batch_head) { | 4176 | if (!sh->batch_head) { |
| 4180 | spin_unlock(&sh->stripe_lock); | 4177 | spin_unlock(&sh->stripe_lock); |
| @@ -4202,38 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh) | |||
| 4202 | return 0; | 4199 | return 0; |
| 4203 | } | 4200 | } |
| 4204 | 4201 | ||
| 4205 | static void check_break_stripe_batch_list(struct stripe_head *sh) | 4202 | static void break_stripe_batch_list(struct stripe_head *head_sh, |
| 4203 | unsigned long handle_flags) | ||
| 4206 | { | 4204 | { |
| 4207 | struct stripe_head *head_sh, *next; | 4205 | struct stripe_head *sh, *next; |
| 4208 | int i; | 4206 | int i; |
| 4209 | 4207 | int do_wakeup = 0; | |
| 4210 | if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) | ||
| 4211 | return; | ||
| 4212 | |||
| 4213 | head_sh = sh; | ||
| 4214 | 4208 | ||
| 4215 | list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { | 4209 | list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { |
| 4216 | 4210 | ||
| 4217 | list_del_init(&sh->batch_list); | 4211 | list_del_init(&sh->batch_list); |
| 4218 | 4212 | ||
| 4219 | set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, | 4213 | WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | |
| 4220 | head_sh->state & ~((1 << STRIPE_ACTIVE) | | 4214 | (1 << STRIPE_SYNCING) | |
| 4221 | (1 << STRIPE_PREREAD_ACTIVE) | | 4215 | (1 << STRIPE_REPLACED) | |
| 4222 | (1 << STRIPE_DEGRADED) | | 4216 | (1 << STRIPE_PREREAD_ACTIVE) | |
| 4223 | STRIPE_EXPAND_SYNC_FLAG)); | 4217 | (1 << STRIPE_DELAYED) | |
| 4218 | (1 << STRIPE_BIT_DELAY) | | ||
| 4219 | (1 << STRIPE_FULL_WRITE) | | ||
| 4220 | (1 << STRIPE_BIOFILL_RUN) | | ||
| 4221 | (1 << STRIPE_COMPUTE_RUN) | | ||
| 4222 | (1 << STRIPE_OPS_REQ_PENDING) | | ||
| 4223 | (1 << STRIPE_DISCARD) | | ||
| 4224 | (1 << STRIPE_BATCH_READY) | | ||
| 4225 | (1 << STRIPE_BATCH_ERR) | | ||
| 4226 | (1 << STRIPE_BITMAP_PENDING))); | ||
| 4227 | WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | | ||
| 4228 | (1 << STRIPE_REPLACED))); | ||
| 4229 | |||
| 4230 | set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | | ||
| 4231 | (1 << STRIPE_DEGRADED)), | ||
| 4232 | head_sh->state & (1 << STRIPE_INSYNC)); | ||
| 4233 | |||
| 4224 | sh->check_state = head_sh->check_state; | 4234 | sh->check_state = head_sh->check_state; |
| 4225 | sh->reconstruct_state = head_sh->reconstruct_state; | 4235 | sh->reconstruct_state = head_sh->reconstruct_state; |
| 4226 | for (i = 0; i < sh->disks; i++) | 4236 | for (i = 0; i < sh->disks; i++) { |
| 4237 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
| 4238 | do_wakeup = 1; | ||
| 4227 | sh->dev[i].flags = head_sh->dev[i].flags & | 4239 | sh->dev[i].flags = head_sh->dev[i].flags & |
| 4228 | (~((1 << R5_WriteError) | (1 << R5_Overlap))); | 4240 | (~((1 << R5_WriteError) | (1 << R5_Overlap))); |
| 4229 | 4241 | } | |
| 4230 | spin_lock_irq(&sh->stripe_lock); | 4242 | spin_lock_irq(&sh->stripe_lock); |
| 4231 | sh->batch_head = NULL; | 4243 | sh->batch_head = NULL; |
| 4232 | spin_unlock_irq(&sh->stripe_lock); | 4244 | spin_unlock_irq(&sh->stripe_lock); |
| 4233 | 4245 | if (handle_flags == 0 || | |
| 4234 | set_bit(STRIPE_HANDLE, &sh->state); | 4246 | sh->state & handle_flags) |
| 4247 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 4235 | release_stripe(sh); | 4248 | release_stripe(sh); |
| 4236 | } | 4249 | } |
| 4250 | spin_lock_irq(&head_sh->stripe_lock); | ||
| 4251 | head_sh->batch_head = NULL; | ||
| 4252 | spin_unlock_irq(&head_sh->stripe_lock); | ||
| 4253 | for (i = 0; i < head_sh->disks; i++) | ||
| 4254 | if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) | ||
| 4255 | do_wakeup = 1; | ||
| 4256 | if (head_sh->state & handle_flags) | ||
| 4257 | set_bit(STRIPE_HANDLE, &head_sh->state); | ||
| 4258 | |||
| 4259 | if (do_wakeup) | ||
| 4260 | wake_up(&head_sh->raid_conf->wait_for_overlap); | ||
| 4237 | } | 4261 | } |
| 4238 | 4262 | ||
| 4239 | static void handle_stripe(struct stripe_head *sh) | 4263 | static void handle_stripe(struct stripe_head *sh) |
| @@ -4258,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 4258 | return; | 4282 | return; |
| 4259 | } | 4283 | } |
| 4260 | 4284 | ||
| 4261 | check_break_stripe_batch_list(sh); | 4285 | if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) |
| 4286 | break_stripe_batch_list(sh, 0); | ||
| 4262 | 4287 | ||
| 4263 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { | 4288 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { |
| 4264 | spin_lock(&sh->stripe_lock); | 4289 | spin_lock(&sh->stripe_lock); |
| @@ -4312,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 4312 | if (s.failed > conf->max_degraded) { | 4337 | if (s.failed > conf->max_degraded) { |
| 4313 | sh->check_state = 0; | 4338 | sh->check_state = 0; |
| 4314 | sh->reconstruct_state = 0; | 4339 | sh->reconstruct_state = 0; |
| 4340 | break_stripe_batch_list(sh, 0); | ||
| 4315 | if (s.to_read+s.to_write+s.written) | 4341 | if (s.to_read+s.to_write+s.written) |
| 4316 | handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); | 4342 | handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); |
| 4317 | if (s.syncing + s.replacing) | 4343 | if (s.syncing + s.replacing) |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7dc0dd86074b..896d603ad0da 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
| @@ -337,9 +337,12 @@ enum { | |||
| 337 | STRIPE_ON_RELEASE_LIST, | 337 | STRIPE_ON_RELEASE_LIST, |
| 338 | STRIPE_BATCH_READY, | 338 | STRIPE_BATCH_READY, |
| 339 | STRIPE_BATCH_ERR, | 339 | STRIPE_BATCH_ERR, |
| 340 | STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add | ||
| 341 | * to batch yet. | ||
| 342 | */ | ||
| 340 | }; | 343 | }; |
| 341 | 344 | ||
| 342 | #define STRIPE_EXPAND_SYNC_FLAG \ | 345 | #define STRIPE_EXPAND_SYNC_FLAGS \ |
| 343 | ((1 << STRIPE_EXPAND_SOURCE) |\ | 346 | ((1 << STRIPE_EXPAND_SOURCE) |\ |
| 344 | (1 << STRIPE_EXPAND_READY) |\ | 347 | (1 << STRIPE_EXPAND_READY) |\ |
| 345 | (1 << STRIPE_EXPANDING) |\ | 348 | (1 << STRIPE_EXPANDING) |\ |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a6dcbf850c1f..6f9ffb9026cd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) | |||
| 2358 | adapter->cfg_num_qs); | 2358 | adapter->cfg_num_qs); |
| 2359 | 2359 | ||
| 2360 | for_all_evt_queues(adapter, eqo, i) { | 2360 | for_all_evt_queues(adapter, eqo, i) { |
| 2361 | int numa_node = dev_to_node(&adapter->pdev->dev); | ||
| 2361 | if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) | 2362 | if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) |
| 2362 | return -ENOMEM; | 2363 | return -ENOMEM; |
| 2363 | cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), | 2364 | cpumask_set_cpu(cpumask_local_spread(i, numa_node), |
| 2364 | eqo->affinity_mask); | 2365 | eqo->affinity_mask); |
| 2365 | |||
| 2366 | netif_napi_add(adapter->netdev, &eqo->napi, be_poll, | 2366 | netif_napi_add(adapter->netdev, &eqo->napi, be_poll, |
| 2367 | BE_NAPI_WEIGHT); | 2367 | BE_NAPI_WEIGHT); |
| 2368 | napi_hash_add(&eqo->napi); | 2368 | napi_hash_add(&eqo->napi); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 32f5ec737472..cf467a9f6cc7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) | |||
| 1501 | { | 1501 | { |
| 1502 | struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; | 1502 | struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; |
| 1503 | int numa_node = priv->mdev->dev->numa_node; | 1503 | int numa_node = priv->mdev->dev->numa_node; |
| 1504 | int ret = 0; | ||
| 1505 | 1504 | ||
| 1506 | if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) | 1505 | if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) |
| 1507 | return -ENOMEM; | 1506 | return -ENOMEM; |
| 1508 | 1507 | ||
| 1509 | ret = cpumask_set_cpu_local_first(ring_idx, numa_node, | 1508 | cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), |
| 1510 | ring->affinity_mask); | 1509 | ring->affinity_mask); |
| 1511 | if (ret) | 1510 | return 0; |
| 1512 | free_cpumask_var(ring->affinity_mask); | ||
| 1513 | |||
| 1514 | return ret; | ||
| 1515 | } | 1511 | } |
| 1516 | 1512 | ||
| 1517 | static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) | 1513 | static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f7bf312fb443..7bed3a88579f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
| @@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
| 144 | ring->queue_index = queue_index; | 144 | ring->queue_index = queue_index; |
| 145 | 145 | ||
| 146 | if (queue_index < priv->num_tx_rings_p_up) | 146 | if (queue_index < priv->num_tx_rings_p_up) |
| 147 | cpumask_set_cpu_local_first(queue_index, | 147 | cpumask_set_cpu(cpumask_local_spread(queue_index, |
| 148 | priv->mdev->dev->numa_node, | 148 | priv->mdev->dev->numa_node), |
| 149 | &ring->affinity_mask); | 149 | &ring->affinity_mask); |
| 150 | 150 | ||
| 151 | *pring = ring; | 151 | *pring = ring; |
| 152 | return 0; | 152 | return 0; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9bb9ad6d4a1b..28f328136f0d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev, | |||
| 2897 | return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); | 2897 | return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); |
| 2898 | } | 2898 | } |
| 2899 | 2899 | ||
| 2900 | static DEVICE_ATTR_RO(hotkey_wakeup_reason); | 2900 | static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); |
| 2901 | 2901 | ||
| 2902 | static void hotkey_wakeup_reason_notify_change(void) | 2902 | static void hotkey_wakeup_reason_notify_change(void) |
| 2903 | { | 2903 | { |
| @@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev, | |||
| 2913 | return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); | 2913 | return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); |
| 2914 | } | 2914 | } |
| 2915 | 2915 | ||
| 2916 | static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete); | 2916 | static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO, |
| 2917 | hotkey_wakeup_hotunplug_complete_show, NULL); | ||
| 2917 | 2918 | ||
| 2918 | static void hotkey_wakeup_hotunplug_complete_notify_change(void) | 2919 | static void hotkey_wakeup_hotunplug_complete_notify_change(void) |
| 2919 | { | 2920 | { |
| @@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = { | |||
| 2978 | &dev_attr_hotkey_enable.attr, | 2979 | &dev_attr_hotkey_enable.attr, |
| 2979 | &dev_attr_hotkey_bios_enabled.attr, | 2980 | &dev_attr_hotkey_bios_enabled.attr, |
| 2980 | &dev_attr_hotkey_bios_mask.attr, | 2981 | &dev_attr_hotkey_bios_mask.attr, |
| 2981 | &dev_attr_hotkey_wakeup_reason.attr, | 2982 | &dev_attr_wakeup_reason.attr, |
| 2982 | &dev_attr_hotkey_wakeup_hotunplug_complete.attr, | 2983 | &dev_attr_wakeup_hotunplug_complete.attr, |
| 2983 | &dev_attr_hotkey_mask.attr, | 2984 | &dev_attr_hotkey_mask.attr, |
| 2984 | &dev_attr_hotkey_all_mask.attr, | 2985 | &dev_attr_hotkey_all_mask.attr, |
| 2985 | &dev_attr_hotkey_recommended_mask.attr, | 2986 | &dev_attr_hotkey_recommended_mask.attr, |
| @@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev, | |||
| 4393 | attr, buf, count); | 4394 | attr, buf, count); |
| 4394 | } | 4395 | } |
| 4395 | 4396 | ||
| 4396 | static DEVICE_ATTR_RW(wan_enable); | 4397 | static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO, |
| 4398 | wan_enable_show, wan_enable_store); | ||
| 4397 | 4399 | ||
| 4398 | /* --------------------------------------------------------------------- */ | 4400 | /* --------------------------------------------------------------------- */ |
| 4399 | 4401 | ||
| 4400 | static struct attribute *wan_attributes[] = { | 4402 | static struct attribute *wan_attributes[] = { |
| 4401 | &dev_attr_wan_enable.attr, | 4403 | &dev_attr_wwan_enable.attr, |
| 4402 | NULL | 4404 | NULL |
| 4403 | }; | 4405 | }; |
| 4404 | 4406 | ||
| @@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev, | |||
| 8138 | return count; | 8140 | return count; |
| 8139 | } | 8141 | } |
| 8140 | 8142 | ||
| 8141 | static DEVICE_ATTR_RW(fan_pwm1_enable); | 8143 | static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, |
| 8144 | fan_pwm1_enable_show, fan_pwm1_enable_store); | ||
| 8142 | 8145 | ||
| 8143 | /* sysfs fan pwm1 ------------------------------------------------------ */ | 8146 | /* sysfs fan pwm1 ------------------------------------------------------ */ |
| 8144 | static ssize_t fan_pwm1_show(struct device *dev, | 8147 | static ssize_t fan_pwm1_show(struct device *dev, |
| @@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev, | |||
| 8198 | return (rc) ? rc : count; | 8201 | return (rc) ? rc : count; |
| 8199 | } | 8202 | } |
| 8200 | 8203 | ||
| 8201 | static DEVICE_ATTR_RW(fan_pwm1); | 8204 | static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store); |
| 8202 | 8205 | ||
| 8203 | /* sysfs fan fan1_input ------------------------------------------------ */ | 8206 | /* sysfs fan fan1_input ------------------------------------------------ */ |
| 8204 | static ssize_t fan_fan1_input_show(struct device *dev, | 8207 | static ssize_t fan_fan1_input_show(struct device *dev, |
| @@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev, | |||
| 8215 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); | 8218 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); |
| 8216 | } | 8219 | } |
| 8217 | 8220 | ||
| 8218 | static DEVICE_ATTR_RO(fan_fan1_input); | 8221 | static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); |
| 8219 | 8222 | ||
| 8220 | /* sysfs fan fan2_input ------------------------------------------------ */ | 8223 | /* sysfs fan fan2_input ------------------------------------------------ */ |
| 8221 | static ssize_t fan_fan2_input_show(struct device *dev, | 8224 | static ssize_t fan_fan2_input_show(struct device *dev, |
| @@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev, | |||
| 8232 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); | 8235 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); |
| 8233 | } | 8236 | } |
| 8234 | 8237 | ||
| 8235 | static DEVICE_ATTR_RO(fan_fan2_input); | 8238 | static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); |
| 8236 | 8239 | ||
| 8237 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ | 8240 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ |
| 8238 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, | 8241 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, |
| @@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, | |||
| 8265 | 8268 | ||
| 8266 | /* --------------------------------------------------------------------- */ | 8269 | /* --------------------------------------------------------------------- */ |
| 8267 | static struct attribute *fan_attributes[] = { | 8270 | static struct attribute *fan_attributes[] = { |
| 8268 | &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, | 8271 | &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, |
| 8269 | &dev_attr_fan_fan1_input.attr, | 8272 | &dev_attr_fan1_input.attr, |
| 8270 | NULL, /* for fan2_input */ | 8273 | NULL, /* for fan2_input */ |
| 8271 | NULL | 8274 | NULL |
| 8272 | }; | 8275 | }; |
| @@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
| 8400 | if (tp_features.second_fan) { | 8403 | if (tp_features.second_fan) { |
| 8401 | /* attach second fan tachometer */ | 8404 | /* attach second fan tachometer */ |
| 8402 | fan_attributes[ARRAY_SIZE(fan_attributes)-2] = | 8405 | fan_attributes[ARRAY_SIZE(fan_attributes)-2] = |
| 8403 | &dev_attr_fan_fan2_input.attr; | 8406 | &dev_attr_fan2_input.attr; |
| 8404 | } | 8407 | } |
| 8405 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, | 8408 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, |
| 8406 | &fan_attr_group); | 8409 | &fan_attr_group); |
| @@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, | |||
| 8848 | return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); | 8851 | return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); |
| 8849 | } | 8852 | } |
| 8850 | 8853 | ||
| 8851 | static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name); | 8854 | static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); |
| 8852 | 8855 | ||
| 8853 | /* --------------------------------------------------------------------- */ | 8856 | /* --------------------------------------------------------------------- */ |
| 8854 | 8857 | ||
| @@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void) | |||
| 9390 | hwmon_device_unregister(tpacpi_hwmon); | 9393 | hwmon_device_unregister(tpacpi_hwmon); |
| 9391 | 9394 | ||
| 9392 | if (tp_features.sensors_pdev_attrs_registered) | 9395 | if (tp_features.sensors_pdev_attrs_registered) |
| 9393 | device_remove_file(&tpacpi_sensors_pdev->dev, | 9396 | device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); |
| 9394 | &dev_attr_thinkpad_acpi_pdev_name); | ||
| 9395 | if (tpacpi_sensors_pdev) | 9397 | if (tpacpi_sensors_pdev) |
| 9396 | platform_device_unregister(tpacpi_sensors_pdev); | 9398 | platform_device_unregister(tpacpi_sensors_pdev); |
| 9397 | if (tpacpi_pdev) | 9399 | if (tpacpi_pdev) |
| @@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void) | |||
| 9512 | thinkpad_acpi_module_exit(); | 9514 | thinkpad_acpi_module_exit(); |
| 9513 | return ret; | 9515 | return ret; |
| 9514 | } | 9516 | } |
| 9515 | ret = device_create_file(&tpacpi_sensors_pdev->dev, | 9517 | ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); |
| 9516 | &dev_attr_thinkpad_acpi_pdev_name); | ||
| 9517 | if (ret) { | 9518 | if (ret) { |
| 9518 | pr_err("unable to create sysfs hwmon device attributes\n"); | 9519 | pr_err("unable to create sysfs hwmon device attributes\n"); |
| 9519 | thinkpad_acpi_module_exit(); | 9520 | thinkpad_acpi_module_exit(); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68c2002e78bf..5c9e680aa375 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work) | |||
| 1020 | struct se_portal_group *se_tpg = &base_tpg->se_tpg; | 1020 | struct se_portal_group *se_tpg = &base_tpg->se_tpg; |
| 1021 | struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; | 1021 | struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; |
| 1022 | 1022 | ||
| 1023 | if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, | 1023 | if (!target_depend_item(&se_tpg->tpg_group.cg_item)) { |
| 1024 | &se_tpg->tpg_group.cg_item)) { | ||
| 1025 | atomic_set(&base_tpg->lport_tpg_enabled, 1); | 1024 | atomic_set(&base_tpg->lport_tpg_enabled, 1); |
| 1026 | qlt_enable_vha(base_vha); | 1025 | qlt_enable_vha(base_vha); |
| 1027 | } | 1026 | } |
| @@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) | |||
| 1037 | 1036 | ||
| 1038 | if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { | 1037 | if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { |
| 1039 | atomic_set(&base_tpg->lport_tpg_enabled, 0); | 1038 | atomic_set(&base_tpg->lport_tpg_enabled, 0); |
| 1040 | configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, | 1039 | target_undepend_item(&se_tpg->tpg_group.cg_item); |
| 1041 | &se_tpg->tpg_group.cg_item); | ||
| 1042 | } | 1040 | } |
| 1043 | complete(&base_tpg->tpg_base_comp); | 1041 | complete(&base_tpg->tpg_base_comp); |
| 1044 | } | 1042 | } |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 34871a628b11..74e6114ff18f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) | |||
| 230 | * Here we serialize access across the TIQN+TPG Tuple. | 230 | * Here we serialize access across the TIQN+TPG Tuple. |
| 231 | */ | 231 | */ |
| 232 | ret = down_interruptible(&tpg->np_login_sem); | 232 | ret = down_interruptible(&tpg->np_login_sem); |
| 233 | if ((ret != 0) || signal_pending(current)) | 233 | if (ret != 0) |
| 234 | return -1; | 234 | return -1; |
| 235 | 235 | ||
| 236 | spin_lock_bh(&tpg->tpg_state_lock); | 236 | spin_lock_bh(&tpg->tpg_state_lock); |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8ce94ff744e6..70d799dfab03 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1( | |||
| 346 | if (IS_ERR(sess->se_sess)) { | 346 | if (IS_ERR(sess->se_sess)) { |
| 347 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | 347 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
| 348 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | 348 | ISCSI_LOGIN_STATUS_NO_RESOURCES); |
| 349 | kfree(sess->sess_ops); | ||
| 349 | kfree(sess); | 350 | kfree(sess); |
| 350 | return -ENOMEM; | 351 | return -ENOMEM; |
| 351 | } | 352 | } |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index e8a240818353..5e3295fe404d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
| @@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( | |||
| 161 | int iscsit_get_tpg( | 161 | int iscsit_get_tpg( |
| 162 | struct iscsi_portal_group *tpg) | 162 | struct iscsi_portal_group *tpg) |
| 163 | { | 163 | { |
| 164 | int ret; | 164 | return mutex_lock_interruptible(&tpg->tpg_access_lock); |
| 165 | |||
| 166 | ret = mutex_lock_interruptible(&tpg->tpg_access_lock); | ||
| 167 | return ((ret != 0) || signal_pending(current)) ? -1 : 0; | ||
| 168 | } | 165 | } |
| 169 | 166 | ||
| 170 | void iscsit_put_tpg(struct iscsi_portal_group *tpg) | 167 | void iscsit_put_tpg(struct iscsi_portal_group *tpg) |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 75cbde1f7c5b..4f8d4d459aa4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
| @@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd) | |||
| 704 | 704 | ||
| 705 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) | 705 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) |
| 706 | return 0; | 706 | return 0; |
| 707 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 707 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 708 | return 0; | 708 | return 0; |
| 709 | 709 | ||
| 710 | if (!port) | 710 | if (!port) |
| @@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
| 2377 | 2377 | ||
| 2378 | int core_setup_alua(struct se_device *dev) | 2378 | int core_setup_alua(struct se_device *dev) |
| 2379 | { | 2379 | { |
| 2380 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && | 2380 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && |
| 2381 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { | 2381 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { |
| 2382 | struct t10_alua_lu_gp_member *lu_gp_mem; | 2382 | struct t10_alua_lu_gp_member *lu_gp_mem; |
| 2383 | 2383 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index ddaf76a4ac2a..e7b0430a0575 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
| @@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric( | |||
| 212 | 212 | ||
| 213 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" | 213 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" |
| 214 | " %s\n", tf->tf_group.cg_item.ci_name); | 214 | " %s\n", tf->tf_group.cg_item.ci_name); |
| 215 | /* | ||
| 216 | * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() | ||
| 217 | */ | ||
| 218 | tf->tf_ops.tf_subsys = tf->tf_subsys; | ||
| 219 | tf->tf_fabric = &tf->tf_group.cg_item; | 215 | tf->tf_fabric = &tf->tf_group.cg_item; |
| 220 | pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" | 216 | pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" |
| 221 | " for %s\n", name); | 217 | " for %s\n", name); |
| @@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = { | |||
| 291 | }, | 287 | }, |
| 292 | }; | 288 | }; |
| 293 | 289 | ||
| 294 | struct configfs_subsystem *target_core_subsystem[] = { | 290 | int target_depend_item(struct config_item *item) |
| 295 | &target_core_fabrics, | 291 | { |
| 296 | NULL, | 292 | return configfs_depend_item(&target_core_fabrics, item); |
| 297 | }; | 293 | } |
| 294 | EXPORT_SYMBOL(target_depend_item); | ||
| 295 | |||
| 296 | void target_undepend_item(struct config_item *item) | ||
| 297 | { | ||
| 298 | return configfs_undepend_item(&target_core_fabrics, item); | ||
| 299 | } | ||
| 300 | EXPORT_SYMBOL(target_undepend_item); | ||
| 298 | 301 | ||
| 299 | /*############################################################################## | 302 | /*############################################################################## |
| 300 | // Start functions called by external Target Fabrics Modules | 303 | // Start functions called by external Target Fabrics Modules |
| @@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo) | |||
| 467 | * struct target_fabric_configfs->tf_cit_tmpl | 470 | * struct target_fabric_configfs->tf_cit_tmpl |
| 468 | */ | 471 | */ |
| 469 | tf->tf_module = fo->module; | 472 | tf->tf_module = fo->module; |
| 470 | tf->tf_subsys = target_core_subsystem[0]; | ||
| 471 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); | 473 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); |
| 472 | 474 | ||
| 473 | tf->tf_ops = *fo; | 475 | tf->tf_ops = *fo; |
| @@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, | |||
| 809 | { | 811 | { |
| 810 | int ret; | 812 | int ret; |
| 811 | 813 | ||
| 812 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 814 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 813 | return sprintf(page, "Passthrough\n"); | 815 | return sprintf(page, "Passthrough\n"); |
| 814 | 816 | ||
| 815 | spin_lock(&dev->dev_reservation_lock); | 817 | spin_lock(&dev->dev_reservation_lock); |
| @@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type); | |||
| 960 | static ssize_t target_core_dev_pr_show_attr_res_type( | 962 | static ssize_t target_core_dev_pr_show_attr_res_type( |
| 961 | struct se_device *dev, char *page) | 963 | struct se_device *dev, char *page) |
| 962 | { | 964 | { |
| 963 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 965 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 964 | return sprintf(page, "SPC_PASSTHROUGH\n"); | 966 | return sprintf(page, "SPC_PASSTHROUGH\n"); |
| 965 | else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | 967 | else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) |
| 966 | return sprintf(page, "SPC2_RESERVATIONS\n"); | 968 | return sprintf(page, "SPC2_RESERVATIONS\n"); |
| @@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type); | |||
| 973 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( | 975 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( |
| 974 | struct se_device *dev, char *page) | 976 | struct se_device *dev, char *page) |
| 975 | { | 977 | { |
| 976 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 978 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 977 | return 0; | 979 | return 0; |
| 978 | 980 | ||
| 979 | return sprintf(page, "APTPL Bit Status: %s\n", | 981 | return sprintf(page, "APTPL Bit Status: %s\n", |
| @@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active); | |||
| 988 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( | 990 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( |
| 989 | struct se_device *dev, char *page) | 991 | struct se_device *dev, char *page) |
| 990 | { | 992 | { |
| 991 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 993 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 992 | return 0; | 994 | return 0; |
| 993 | 995 | ||
| 994 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); | 996 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); |
| @@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
| 1035 | u16 port_rpti = 0, tpgt = 0; | 1037 | u16 port_rpti = 0, tpgt = 0; |
| 1036 | u8 type = 0, scope; | 1038 | u8 type = 0, scope; |
| 1037 | 1039 | ||
| 1038 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 1040 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 1039 | return 0; | 1041 | return 0; |
| 1040 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | 1042 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) |
| 1041 | return 0; | 1043 | return 0; |
| @@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void) | |||
| 2870 | { | 2872 | { |
| 2871 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 2873 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
| 2872 | struct config_group *lu_gp_cg = NULL; | 2874 | struct config_group *lu_gp_cg = NULL; |
| 2873 | struct configfs_subsystem *subsys; | 2875 | struct configfs_subsystem *subsys = &target_core_fabrics; |
| 2874 | struct t10_alua_lu_gp *lu_gp; | 2876 | struct t10_alua_lu_gp *lu_gp; |
| 2875 | int ret; | 2877 | int ret; |
| 2876 | 2878 | ||
| @@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void) | |||
| 2878 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", | 2880 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", |
| 2879 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); | 2881 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); |
| 2880 | 2882 | ||
| 2881 | subsys = target_core_subsystem[0]; | ||
| 2882 | config_group_init(&subsys->su_group); | 2883 | config_group_init(&subsys->su_group); |
| 2883 | mutex_init(&subsys->su_mutex); | 2884 | mutex_init(&subsys->su_mutex); |
| 2884 | 2885 | ||
| @@ -3008,13 +3009,10 @@ out_global: | |||
| 3008 | 3009 | ||
| 3009 | static void __exit target_core_exit_configfs(void) | 3010 | static void __exit target_core_exit_configfs(void) |
| 3010 | { | 3011 | { |
| 3011 | struct configfs_subsystem *subsys; | ||
| 3012 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; | 3012 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; |
| 3013 | struct config_item *item; | 3013 | struct config_item *item; |
| 3014 | int i; | 3014 | int i; |
| 3015 | 3015 | ||
| 3016 | subsys = target_core_subsystem[0]; | ||
| 3017 | |||
| 3018 | lu_gp_cg = &alua_lu_gps_group; | 3016 | lu_gp_cg = &alua_lu_gps_group; |
| 3019 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { | 3017 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { |
| 3020 | item = &lu_gp_cg->default_groups[i]->cg_item; | 3018 | item = &lu_gp_cg->default_groups[i]->cg_item; |
| @@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void) | |||
| 3045 | * We expect subsys->su_group.default_groups to be released | 3043 | * We expect subsys->su_group.default_groups to be released |
| 3046 | * by configfs subsystem provider logic.. | 3044 | * by configfs subsystem provider logic.. |
| 3047 | */ | 3045 | */ |
| 3048 | configfs_unregister_subsystem(subsys); | 3046 | configfs_unregister_subsystem(&target_core_fabrics); |
| 3049 | kfree(subsys->su_group.default_groups); | 3047 | kfree(target_core_fabrics.su_group.default_groups); |
| 3050 | 3048 | ||
| 3051 | core_alua_free_lu_gp(default_lu_gp); | 3049 | core_alua_free_lu_gp(default_lu_gp); |
| 3052 | default_lu_gp = NULL; | 3050 | default_lu_gp = NULL; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7faa6aef9a4d..ce5f768181ff 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/kthread.h> | 33 | #include <linux/kthread.h> |
| 34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
| 35 | #include <linux/export.h> | 35 | #include <linux/export.h> |
| 36 | #include <asm/unaligned.h> | ||
| 36 | #include <net/sock.h> | 37 | #include <net/sock.h> |
| 37 | #include <net/tcp.h> | 38 | #include <net/tcp.h> |
| 38 | #include <scsi/scsi.h> | 39 | #include <scsi/scsi.h> |
| @@ -527,7 +528,7 @@ static void core_export_port( | |||
| 527 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | 528 | list_add_tail(&port->sep_list, &dev->dev_sep_list); |
| 528 | spin_unlock(&dev->se_port_lock); | 529 | spin_unlock(&dev->se_port_lock); |
| 529 | 530 | ||
| 530 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && | 531 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && |
| 531 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { | 532 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { |
| 532 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | 533 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
| 533 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | 534 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { |
| @@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev) | |||
| 1603 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | 1604 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI |
| 1604 | * passthrough because this is being provided by the backend LLD. | 1605 | * passthrough because this is being provided by the backend LLD. |
| 1605 | */ | 1606 | */ |
| 1606 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | 1607 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { |
| 1607 | strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); | 1608 | strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
| 1608 | strncpy(&dev->t10_wwn.model[0], | 1609 | strncpy(&dev->t10_wwn.model[0], |
| 1609 | dev->transport->inquiry_prod, 16); | 1610 | dev->transport->inquiry_prod, 16); |
| @@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void) | |||
| 1707 | target_free_device(g_lun0_dev); | 1708 | target_free_device(g_lun0_dev); |
| 1708 | core_delete_hba(hba); | 1709 | core_delete_hba(hba); |
| 1709 | } | 1710 | } |
| 1711 | |||
| 1712 | /* | ||
| 1713 | * Common CDB parsing for kernel and user passthrough. | ||
| 1714 | */ | ||
| 1715 | sense_reason_t | ||
| 1716 | passthrough_parse_cdb(struct se_cmd *cmd, | ||
| 1717 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) | ||
| 1718 | { | ||
| 1719 | unsigned char *cdb = cmd->t_task_cdb; | ||
| 1720 | |||
| 1721 | /* | ||
| 1722 | * Clear a lun set in the cdb if the initiator talking to use spoke | ||
| 1723 | * and old standards version, as we can't assume the underlying device | ||
| 1724 | * won't choke up on it. | ||
| 1725 | */ | ||
| 1726 | switch (cdb[0]) { | ||
| 1727 | case READ_10: /* SBC - RDProtect */ | ||
| 1728 | case READ_12: /* SBC - RDProtect */ | ||
| 1729 | case READ_16: /* SBC - RDProtect */ | ||
| 1730 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | ||
| 1731 | case VERIFY: /* SBC - VRProtect */ | ||
| 1732 | case VERIFY_16: /* SBC - VRProtect */ | ||
| 1733 | case WRITE_VERIFY: /* SBC - VRProtect */ | ||
| 1734 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | ||
| 1735 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ | ||
| 1736 | break; | ||
| 1737 | default: | ||
| 1738 | cdb[1] &= 0x1f; /* clear logical unit number */ | ||
| 1739 | break; | ||
| 1740 | } | ||
| 1741 | |||
| 1742 | /* | ||
| 1743 | * For REPORT LUNS we always need to emulate the response, for everything | ||
| 1744 | * else, pass it up. | ||
| 1745 | */ | ||
| 1746 | if (cdb[0] == REPORT_LUNS) { | ||
| 1747 | cmd->execute_cmd = spc_emulate_report_luns; | ||
| 1748 | return TCM_NO_SENSE; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | /* Set DATA_CDB flag for ops that should have it */ | ||
| 1752 | switch (cdb[0]) { | ||
| 1753 | case READ_6: | ||
| 1754 | case READ_10: | ||
| 1755 | case READ_12: | ||
| 1756 | case READ_16: | ||
| 1757 | case WRITE_6: | ||
| 1758 | case WRITE_10: | ||
| 1759 | case WRITE_12: | ||
| 1760 | case WRITE_16: | ||
| 1761 | case WRITE_VERIFY: | ||
| 1762 | case WRITE_VERIFY_12: | ||
| 1763 | case 0x8e: /* WRITE_VERIFY_16 */ | ||
| 1764 | case COMPARE_AND_WRITE: | ||
| 1765 | case XDWRITEREAD_10: | ||
| 1766 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
| 1767 | break; | ||
| 1768 | case VARIABLE_LENGTH_CMD: | ||
| 1769 | switch (get_unaligned_be16(&cdb[8])) { | ||
| 1770 | case READ_32: | ||
| 1771 | case WRITE_32: | ||
| 1772 | case 0x0c: /* WRITE_VERIFY_32 */ | ||
| 1773 | case XDWRITEREAD_32: | ||
| 1774 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
| 1775 | break; | ||
| 1776 | } | ||
| 1777 | } | ||
| 1778 | |||
| 1779 | cmd->execute_cmd = exec_cmd; | ||
| 1780 | |||
| 1781 | return TCM_NO_SENSE; | ||
| 1782 | } | ||
| 1783 | EXPORT_SYMBOL(passthrough_parse_cdb); | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index f7e6e51aed36..3f27bfd816d8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
| @@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = { | |||
| 958 | .inquiry_prod = "FILEIO", | 958 | .inquiry_prod = "FILEIO", |
| 959 | .inquiry_rev = FD_VERSION, | 959 | .inquiry_rev = FD_VERSION, |
| 960 | .owner = THIS_MODULE, | 960 | .owner = THIS_MODULE, |
| 961 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | ||
| 962 | .attach_hba = fd_attach_hba, | 961 | .attach_hba = fd_attach_hba, |
| 963 | .detach_hba = fd_detach_hba, | 962 | .detach_hba = fd_detach_hba, |
| 964 | .alloc_device = fd_alloc_device, | 963 | .alloc_device = fd_alloc_device, |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 1b7947c2510f..8c965683789f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
| @@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = { | |||
| 904 | .inquiry_prod = "IBLOCK", | 904 | .inquiry_prod = "IBLOCK", |
| 905 | .inquiry_rev = IBLOCK_VERSION, | 905 | .inquiry_rev = IBLOCK_VERSION, |
| 906 | .owner = THIS_MODULE, | 906 | .owner = THIS_MODULE, |
| 907 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | ||
| 908 | .attach_hba = iblock_attach_hba, | 907 | .attach_hba = iblock_attach_hba, |
| 909 | .detach_hba = iblock_detach_hba, | 908 | .detach_hba = iblock_detach_hba, |
| 910 | .alloc_device = iblock_alloc_device, | 909 | .alloc_device = iblock_alloc_device, |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 874a9bc988d8..68bd7f5d9f73 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
| @@ -4,9 +4,6 @@ | |||
| 4 | /* target_core_alua.c */ | 4 | /* target_core_alua.c */ |
| 5 | extern struct t10_alua_lu_gp *default_lu_gp; | 5 | extern struct t10_alua_lu_gp *default_lu_gp; |
| 6 | 6 | ||
| 7 | /* target_core_configfs.c */ | ||
| 8 | extern struct configfs_subsystem *target_core_subsystem[]; | ||
| 9 | |||
| 10 | /* target_core_device.c */ | 7 | /* target_core_device.c */ |
| 11 | extern struct mutex g_device_mutex; | 8 | extern struct mutex g_device_mutex; |
| 12 | extern struct list_head g_device_list; | 9 | extern struct list_head g_device_list; |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c1aa9655e96e..a15411c79ae9 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
| @@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations( | |||
| 1367 | 1367 | ||
| 1368 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) | 1368 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) |
| 1369 | { | 1369 | { |
| 1370 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, | 1370 | return target_depend_item(&tpg->tpg_group.cg_item); |
| 1371 | &tpg->tpg_group.cg_item); | ||
| 1372 | } | 1371 | } |
| 1373 | 1372 | ||
| 1374 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) | 1373 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) |
| 1375 | { | 1374 | { |
| 1376 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, | 1375 | target_undepend_item(&tpg->tpg_group.cg_item); |
| 1377 | &tpg->tpg_group.cg_item); | ||
| 1378 | |||
| 1379 | atomic_dec_mb(&tpg->tpg_pr_ref_count); | 1376 | atomic_dec_mb(&tpg->tpg_pr_ref_count); |
| 1380 | } | 1377 | } |
| 1381 | 1378 | ||
| 1382 | static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) | 1379 | static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) |
| 1383 | { | 1380 | { |
| 1384 | struct se_portal_group *tpg = nacl->se_tpg; | ||
| 1385 | |||
| 1386 | if (nacl->dynamic_node_acl) | 1381 | if (nacl->dynamic_node_acl) |
| 1387 | return 0; | 1382 | return 0; |
| 1388 | 1383 | return target_depend_item(&nacl->acl_group.cg_item); | |
| 1389 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, | ||
| 1390 | &nacl->acl_group.cg_item); | ||
| 1391 | } | 1384 | } |
| 1392 | 1385 | ||
| 1393 | static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | 1386 | static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) |
| 1394 | { | 1387 | { |
| 1395 | struct se_portal_group *tpg = nacl->se_tpg; | 1388 | if (!nacl->dynamic_node_acl) |
| 1396 | 1389 | target_undepend_item(&nacl->acl_group.cg_item); | |
| 1397 | if (nacl->dynamic_node_acl) { | ||
| 1398 | atomic_dec_mb(&nacl->acl_pr_ref_count); | ||
| 1399 | return; | ||
| 1400 | } | ||
| 1401 | |||
| 1402 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, | ||
| 1403 | &nacl->acl_group.cg_item); | ||
| 1404 | |||
| 1405 | atomic_dec_mb(&nacl->acl_pr_ref_count); | 1390 | atomic_dec_mb(&nacl->acl_pr_ref_count); |
| 1406 | } | 1391 | } |
| 1407 | 1392 | ||
| @@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | |||
| 1419 | nacl = lun_acl->se_lun_nacl; | 1404 | nacl = lun_acl->se_lun_nacl; |
| 1420 | tpg = nacl->se_tpg; | 1405 | tpg = nacl->se_tpg; |
| 1421 | 1406 | ||
| 1422 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, | 1407 | return target_depend_item(&lun_acl->se_lun_group.cg_item); |
| 1423 | &lun_acl->se_lun_group.cg_item); | ||
| 1424 | } | 1408 | } |
| 1425 | 1409 | ||
| 1426 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | 1410 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) |
| @@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
| 1438 | nacl = lun_acl->se_lun_nacl; | 1422 | nacl = lun_acl->se_lun_nacl; |
| 1439 | tpg = nacl->se_tpg; | 1423 | tpg = nacl->se_tpg; |
| 1440 | 1424 | ||
| 1441 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, | 1425 | target_undepend_item(&lun_acl->se_lun_group.cg_item); |
| 1442 | &lun_acl->se_lun_group.cg_item); | ||
| 1443 | |||
| 1444 | atomic_dec_mb(&se_deve->pr_ref_count); | 1426 | atomic_dec_mb(&se_deve->pr_ref_count); |
| 1445 | } | 1427 | } |
| 1446 | 1428 | ||
| @@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd) | |||
| 4111 | return 0; | 4093 | return 0; |
| 4112 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) | 4094 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) |
| 4113 | return 0; | 4095 | return 0; |
| 4114 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 4096 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 4115 | return 0; | 4097 | return 0; |
| 4116 | 4098 | ||
| 4117 | spin_lock(&dev->dev_reservation_lock); | 4099 | spin_lock(&dev->dev_reservation_lock); |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index f6c954c4635f..ecc5eaef13d6 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
| @@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) | |||
| 521 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 521 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
| 522 | return -EINVAL; | 522 | return -EINVAL; |
| 523 | } | 523 | } |
| 524 | pdv->pdv_lld_host = sh; | ||
| 524 | } | 525 | } |
| 525 | } else { | 526 | } else { |
| 526 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { | 527 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { |
| @@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev) | |||
| 603 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && | 604 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && |
| 604 | (phv->phv_lld_host != NULL)) | 605 | (phv->phv_lld_host != NULL)) |
| 605 | scsi_host_put(phv->phv_lld_host); | 606 | scsi_host_put(phv->phv_lld_host); |
| 607 | else if (pdv->pdv_lld_host) | ||
| 608 | scsi_host_put(pdv->pdv_lld_host); | ||
| 606 | 609 | ||
| 607 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) | 610 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
| 608 | scsi_device_put(sd); | 611 | scsi_device_put(sd); |
| @@ -970,64 +973,13 @@ fail: | |||
| 970 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 973 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 971 | } | 974 | } |
| 972 | 975 | ||
| 973 | /* | ||
| 974 | * Clear a lun set in the cdb if the initiator talking to use spoke | ||
| 975 | * and old standards version, as we can't assume the underlying device | ||
| 976 | * won't choke up on it. | ||
| 977 | */ | ||
| 978 | static inline void pscsi_clear_cdb_lun(unsigned char *cdb) | ||
| 979 | { | ||
| 980 | switch (cdb[0]) { | ||
| 981 | case READ_10: /* SBC - RDProtect */ | ||
| 982 | case READ_12: /* SBC - RDProtect */ | ||
| 983 | case READ_16: /* SBC - RDProtect */ | ||
| 984 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | ||
| 985 | case VERIFY: /* SBC - VRProtect */ | ||
| 986 | case VERIFY_16: /* SBC - VRProtect */ | ||
| 987 | case WRITE_VERIFY: /* SBC - VRProtect */ | ||
| 988 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | ||
| 989 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ | ||
| 990 | break; | ||
| 991 | default: | ||
| 992 | cdb[1] &= 0x1f; /* clear logical unit number */ | ||
| 993 | break; | ||
| 994 | } | ||
| 995 | } | ||
| 996 | |||
| 997 | static sense_reason_t | 976 | static sense_reason_t |
| 998 | pscsi_parse_cdb(struct se_cmd *cmd) | 977 | pscsi_parse_cdb(struct se_cmd *cmd) |
| 999 | { | 978 | { |
| 1000 | unsigned char *cdb = cmd->t_task_cdb; | ||
| 1001 | |||
| 1002 | if (cmd->se_cmd_flags & SCF_BIDI) | 979 | if (cmd->se_cmd_flags & SCF_BIDI) |
| 1003 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 980 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
| 1004 | 981 | ||
| 1005 | pscsi_clear_cdb_lun(cdb); | 982 | return passthrough_parse_cdb(cmd, pscsi_execute_cmd); |
| 1006 | |||
| 1007 | /* | ||
| 1008 | * For REPORT LUNS we always need to emulate the response, for everything | ||
| 1009 | * else the default for pSCSI is to pass the command to the underlying | ||
| 1010 | * LLD / physical hardware. | ||
| 1011 | */ | ||
| 1012 | switch (cdb[0]) { | ||
| 1013 | case REPORT_LUNS: | ||
| 1014 | cmd->execute_cmd = spc_emulate_report_luns; | ||
| 1015 | return 0; | ||
| 1016 | case READ_6: | ||
| 1017 | case READ_10: | ||
| 1018 | case READ_12: | ||
| 1019 | case READ_16: | ||
| 1020 | case WRITE_6: | ||
| 1021 | case WRITE_10: | ||
| 1022 | case WRITE_12: | ||
| 1023 | case WRITE_16: | ||
| 1024 | case WRITE_VERIFY: | ||
| 1025 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
| 1026 | /* FALLTHROUGH*/ | ||
| 1027 | default: | ||
| 1028 | cmd->execute_cmd = pscsi_execute_cmd; | ||
| 1029 | return 0; | ||
| 1030 | } | ||
| 1031 | } | 983 | } |
| 1032 | 984 | ||
| 1033 | static sense_reason_t | 985 | static sense_reason_t |
| @@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = { | |||
| 1189 | static struct se_subsystem_api pscsi_template = { | 1141 | static struct se_subsystem_api pscsi_template = { |
| 1190 | .name = "pscsi", | 1142 | .name = "pscsi", |
| 1191 | .owner = THIS_MODULE, | 1143 | .owner = THIS_MODULE, |
| 1192 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | 1144 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
| 1193 | .attach_hba = pscsi_attach_hba, | 1145 | .attach_hba = pscsi_attach_hba, |
| 1194 | .detach_hba = pscsi_detach_hba, | 1146 | .detach_hba = pscsi_detach_hba, |
| 1195 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1147 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 1bd757dff8ee..820d3052b775 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
| @@ -45,6 +45,7 @@ struct pscsi_dev_virt { | |||
| 45 | int pdv_lun_id; | 45 | int pdv_lun_id; |
| 46 | struct block_device *pdv_bd; | 46 | struct block_device *pdv_bd; |
| 47 | struct scsi_device *pdv_sd; | 47 | struct scsi_device *pdv_sd; |
| 48 | struct Scsi_Host *pdv_lld_host; | ||
| 48 | } ____cacheline_aligned; | 49 | } ____cacheline_aligned; |
| 49 | 50 | ||
| 50 | typedef enum phv_modes { | 51 | typedef enum phv_modes { |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index a263bf5fab8d..d16489b6a1a4 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
| @@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = { | |||
| 733 | .name = "rd_mcp", | 733 | .name = "rd_mcp", |
| 734 | .inquiry_prod = "RAMDISK-MCP", | 734 | .inquiry_prod = "RAMDISK-MCP", |
| 735 | .inquiry_rev = RD_MCP_VERSION, | 735 | .inquiry_rev = RD_MCP_VERSION, |
| 736 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | ||
| 737 | .attach_hba = rd_attach_hba, | 736 | .attach_hba = rd_attach_hba, |
| 738 | .detach_hba = rd_detach_hba, | 737 | .detach_hba = rd_detach_hba, |
| 739 | .alloc_device = rd_alloc_device, | 738 | .alloc_device = rd_alloc_device, |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 8855781ac653..733824e3825f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
| @@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd) | |||
| 568 | * comparision using SGLs at cmd->t_bidi_data_sg.. | 568 | * comparision using SGLs at cmd->t_bidi_data_sg.. |
| 569 | */ | 569 | */ |
| 570 | rc = down_interruptible(&dev->caw_sem); | 570 | rc = down_interruptible(&dev->caw_sem); |
| 571 | if ((rc != 0) || signal_pending(current)) { | 571 | if (rc != 0) { |
| 572 | cmd->transport_complete_callback = NULL; | 572 | cmd->transport_complete_callback = NULL; |
| 573 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 573 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 574 | } | 574 | } |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3fe5cb240b6f..675f2d9d1f14 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) | |||
| 1196 | * Check if SAM Task Attribute emulation is enabled for this | 1196 | * Check if SAM Task Attribute emulation is enabled for this |
| 1197 | * struct se_device storage object | 1197 | * struct se_device storage object |
| 1198 | */ | 1198 | */ |
| 1199 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 1199 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 1200 | return 0; | 1200 | return 0; |
| 1201 | 1201 | ||
| 1202 | if (cmd->sam_task_attr == TCM_ACA_TAG) { | 1202 | if (cmd->sam_task_attr == TCM_ACA_TAG) { |
| @@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd) | |||
| 1770 | sectors, 0, NULL, 0); | 1770 | sectors, 0, NULL, 0); |
| 1771 | if (unlikely(cmd->pi_err)) { | 1771 | if (unlikely(cmd->pi_err)) { |
| 1772 | spin_lock_irq(&cmd->t_state_lock); | 1772 | spin_lock_irq(&cmd->t_state_lock); |
| 1773 | cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; | 1773 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
| 1774 | spin_unlock_irq(&cmd->t_state_lock); | 1774 | spin_unlock_irq(&cmd->t_state_lock); |
| 1775 | transport_generic_request_failure(cmd, cmd->pi_err); | 1775 | transport_generic_request_failure(cmd, cmd->pi_err); |
| 1776 | return -1; | 1776 | return -1; |
| @@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) | |||
| 1787 | { | 1787 | { |
| 1788 | struct se_device *dev = cmd->se_dev; | 1788 | struct se_device *dev = cmd->se_dev; |
| 1789 | 1789 | ||
| 1790 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 1790 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 1791 | return false; | 1791 | return false; |
| 1792 | 1792 | ||
| 1793 | /* | 1793 | /* |
| @@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd) | |||
| 1868 | 1868 | ||
| 1869 | if (target_handle_task_attr(cmd)) { | 1869 | if (target_handle_task_attr(cmd)) { |
| 1870 | spin_lock_irq(&cmd->t_state_lock); | 1870 | spin_lock_irq(&cmd->t_state_lock); |
| 1871 | cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; | 1871 | cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); |
| 1872 | spin_unlock_irq(&cmd->t_state_lock); | 1872 | spin_unlock_irq(&cmd->t_state_lock); |
| 1873 | return; | 1873 | return; |
| 1874 | } | 1874 | } |
| @@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
| 1912 | { | 1912 | { |
| 1913 | struct se_device *dev = cmd->se_dev; | 1913 | struct se_device *dev = cmd->se_dev; |
| 1914 | 1914 | ||
| 1915 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 1915 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
| 1916 | return; | 1916 | return; |
| 1917 | 1917 | ||
| 1918 | if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { | 1918 | if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { |
| @@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd) | |||
| 1957 | case DMA_TO_DEVICE: | 1957 | case DMA_TO_DEVICE: |
| 1958 | if (cmd->se_cmd_flags & SCF_BIDI) { | 1958 | if (cmd->se_cmd_flags & SCF_BIDI) { |
| 1959 | ret = cmd->se_tfo->queue_data_in(cmd); | 1959 | ret = cmd->se_tfo->queue_data_in(cmd); |
| 1960 | if (ret < 0) | 1960 | break; |
| 1961 | break; | ||
| 1962 | } | 1961 | } |
| 1963 | /* Fall through for DMA_TO_DEVICE */ | 1962 | /* Fall through for DMA_TO_DEVICE */ |
| 1964 | case DMA_NONE: | 1963 | case DMA_NONE: |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dbc872a6c981..07d2996d8c1f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -71,13 +71,6 @@ struct tcmu_hba { | |||
| 71 | u32 host_id; | 71 | u32 host_id; |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | /* User wants all cmds or just some */ | ||
| 75 | enum passthru_level { | ||
| 76 | TCMU_PASS_ALL = 0, | ||
| 77 | TCMU_PASS_IO, | ||
| 78 | TCMU_PASS_INVALID, | ||
| 79 | }; | ||
| 80 | |||
| 81 | #define TCMU_CONFIG_LEN 256 | 74 | #define TCMU_CONFIG_LEN 256 |
| 82 | 75 | ||
| 83 | struct tcmu_dev { | 76 | struct tcmu_dev { |
| @@ -89,7 +82,6 @@ struct tcmu_dev { | |||
| 89 | #define TCMU_DEV_BIT_OPEN 0 | 82 | #define TCMU_DEV_BIT_OPEN 0 |
| 90 | #define TCMU_DEV_BIT_BROKEN 1 | 83 | #define TCMU_DEV_BIT_BROKEN 1 |
| 91 | unsigned long flags; | 84 | unsigned long flags; |
| 92 | enum passthru_level pass_level; | ||
| 93 | 85 | ||
| 94 | struct uio_info uio_info; | 86 | struct uio_info uio_info; |
| 95 | 87 | ||
| @@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 683 | setup_timer(&udev->timeout, tcmu_device_timedout, | 675 | setup_timer(&udev->timeout, tcmu_device_timedout, |
| 684 | (unsigned long)udev); | 676 | (unsigned long)udev); |
| 685 | 677 | ||
| 686 | udev->pass_level = TCMU_PASS_ALL; | ||
| 687 | |||
| 688 | return &udev->se_dev; | 678 | return &udev->se_dev; |
| 689 | } | 679 | } |
| 690 | 680 | ||
| @@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev) | |||
| 948 | } | 938 | } |
| 949 | 939 | ||
| 950 | enum { | 940 | enum { |
| 951 | Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, | 941 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, |
| 952 | }; | 942 | }; |
| 953 | 943 | ||
| 954 | static match_table_t tokens = { | 944 | static match_table_t tokens = { |
| 955 | {Opt_dev_config, "dev_config=%s"}, | 945 | {Opt_dev_config, "dev_config=%s"}, |
| 956 | {Opt_dev_size, "dev_size=%u"}, | 946 | {Opt_dev_size, "dev_size=%u"}, |
| 957 | {Opt_pass_level, "pass_level=%u"}, | 947 | {Opt_hw_block_size, "hw_block_size=%u"}, |
| 958 | {Opt_err, NULL} | 948 | {Opt_err, NULL} |
| 959 | }; | 949 | }; |
| 960 | 950 | ||
| @@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
| 965 | char *orig, *ptr, *opts, *arg_p; | 955 | char *orig, *ptr, *opts, *arg_p; |
| 966 | substring_t args[MAX_OPT_ARGS]; | 956 | substring_t args[MAX_OPT_ARGS]; |
| 967 | int ret = 0, token; | 957 | int ret = 0, token; |
| 968 | int arg; | 958 | unsigned long tmp_ul; |
| 969 | 959 | ||
| 970 | opts = kstrdup(page, GFP_KERNEL); | 960 | opts = kstrdup(page, GFP_KERNEL); |
| 971 | if (!opts) | 961 | if (!opts) |
| @@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
| 998 | if (ret < 0) | 988 | if (ret < 0) |
| 999 | pr_err("kstrtoul() failed for dev_size=\n"); | 989 | pr_err("kstrtoul() failed for dev_size=\n"); |
| 1000 | break; | 990 | break; |
| 1001 | case Opt_pass_level: | 991 | case Opt_hw_block_size: |
| 1002 | match_int(args, &arg); | 992 | arg_p = match_strdup(&args[0]); |
| 1003 | if (arg >= TCMU_PASS_INVALID) { | 993 | if (!arg_p) { |
| 1004 | pr_warn("TCMU: Invalid pass_level: %d\n", arg); | 994 | ret = -ENOMEM; |
| 1005 | break; | 995 | break; |
| 1006 | } | 996 | } |
| 1007 | 997 | ret = kstrtoul(arg_p, 0, &tmp_ul); | |
| 1008 | pr_debug("TCMU: Setting pass_level to %d\n", arg); | 998 | kfree(arg_p); |
| 1009 | udev->pass_level = arg; | 999 | if (ret < 0) { |
| 1000 | pr_err("kstrtoul() failed for hw_block_size=\n"); | ||
| 1001 | break; | ||
| 1002 | } | ||
| 1003 | if (!tmp_ul) { | ||
| 1004 | pr_err("hw_block_size must be nonzero\n"); | ||
| 1005 | break; | ||
| 1006 | } | ||
| 1007 | dev->dev_attrib.hw_block_size = tmp_ul; | ||
| 1010 | break; | 1008 | break; |
| 1011 | default: | 1009 | default: |
| 1012 | break; | 1010 | break; |
| @@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) | |||
| 1024 | 1022 | ||
| 1025 | bl = sprintf(b + bl, "Config: %s ", | 1023 | bl = sprintf(b + bl, "Config: %s ", |
| 1026 | udev->dev_config[0] ? udev->dev_config : "NULL"); | 1024 | udev->dev_config[0] ? udev->dev_config : "NULL"); |
| 1027 | bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", | 1025 | bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); |
| 1028 | udev->dev_size, udev->pass_level); | ||
| 1029 | 1026 | ||
| 1030 | return bl; | 1027 | return bl; |
| 1031 | } | 1028 | } |
| @@ -1039,20 +1036,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev) | |||
| 1039 | } | 1036 | } |
| 1040 | 1037 | ||
| 1041 | static sense_reason_t | 1038 | static sense_reason_t |
| 1042 | tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, | ||
| 1043 | enum dma_data_direction data_direction) | ||
| 1044 | { | ||
| 1045 | int ret; | ||
| 1046 | |||
| 1047 | ret = tcmu_queue_cmd(se_cmd); | ||
| 1048 | |||
| 1049 | if (ret != 0) | ||
| 1050 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
| 1051 | else | ||
| 1052 | return TCM_NO_SENSE; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | static sense_reason_t | ||
| 1056 | tcmu_pass_op(struct se_cmd *se_cmd) | 1039 | tcmu_pass_op(struct se_cmd *se_cmd) |
| 1057 | { | 1040 | { |
| 1058 | int ret = tcmu_queue_cmd(se_cmd); | 1041 | int ret = tcmu_queue_cmd(se_cmd); |
| @@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd) | |||
| 1063 | return TCM_NO_SENSE; | 1046 | return TCM_NO_SENSE; |
| 1064 | } | 1047 | } |
| 1065 | 1048 | ||
| 1066 | static struct sbc_ops tcmu_sbc_ops = { | ||
| 1067 | .execute_rw = tcmu_execute_rw, | ||
| 1068 | .execute_sync_cache = tcmu_pass_op, | ||
| 1069 | .execute_write_same = tcmu_pass_op, | ||
| 1070 | .execute_write_same_unmap = tcmu_pass_op, | ||
| 1071 | .execute_unmap = tcmu_pass_op, | ||
| 1072 | }; | ||
| 1073 | |||
| 1074 | static sense_reason_t | 1049 | static sense_reason_t |
| 1075 | tcmu_parse_cdb(struct se_cmd *cmd) | 1050 | tcmu_parse_cdb(struct se_cmd *cmd) |
| 1076 | { | 1051 | { |
| 1077 | unsigned char *cdb = cmd->t_task_cdb; | 1052 | return passthrough_parse_cdb(cmd, tcmu_pass_op); |
| 1078 | struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); | ||
| 1079 | sense_reason_t ret; | ||
| 1080 | |||
| 1081 | switch (udev->pass_level) { | ||
| 1082 | case TCMU_PASS_ALL: | ||
| 1083 | /* We're just like pscsi, then */ | ||
| 1084 | /* | ||
| 1085 | * For REPORT LUNS we always need to emulate the response, for everything | ||
| 1086 | * else, pass it up. | ||
| 1087 | */ | ||
| 1088 | switch (cdb[0]) { | ||
| 1089 | case REPORT_LUNS: | ||
| 1090 | cmd->execute_cmd = spc_emulate_report_luns; | ||
| 1091 | break; | ||
| 1092 | case READ_6: | ||
| 1093 | case READ_10: | ||
| 1094 | case READ_12: | ||
| 1095 | case READ_16: | ||
| 1096 | case WRITE_6: | ||
| 1097 | case WRITE_10: | ||
| 1098 | case WRITE_12: | ||
| 1099 | case WRITE_16: | ||
| 1100 | case WRITE_VERIFY: | ||
| 1101 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
| 1102 | /* FALLTHROUGH */ | ||
| 1103 | default: | ||
| 1104 | cmd->execute_cmd = tcmu_pass_op; | ||
| 1105 | } | ||
| 1106 | ret = TCM_NO_SENSE; | ||
| 1107 | break; | ||
| 1108 | case TCMU_PASS_IO: | ||
| 1109 | ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops); | ||
| 1110 | break; | ||
| 1111 | default: | ||
| 1112 | pr_err("Unknown tcm-user pass level %d\n", udev->pass_level); | ||
| 1113 | ret = TCM_CHECK_CONDITION_ABORT_CMD; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | return ret; | ||
| 1117 | } | 1053 | } |
| 1118 | 1054 | ||
| 1119 | DEF_TB_DEFAULT_ATTRIBS(tcmu); | 1055 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); |
| 1056 | TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); | ||
| 1057 | |||
| 1058 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); | ||
| 1059 | TB_DEV_ATTR_RO(tcmu, hw_block_size); | ||
| 1060 | |||
| 1061 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); | ||
| 1062 | TB_DEV_ATTR_RO(tcmu, hw_max_sectors); | ||
| 1063 | |||
| 1064 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); | ||
| 1065 | TB_DEV_ATTR_RO(tcmu, hw_queue_depth); | ||
| 1120 | 1066 | ||
| 1121 | static struct configfs_attribute *tcmu_backend_dev_attrs[] = { | 1067 | static struct configfs_attribute *tcmu_backend_dev_attrs[] = { |
| 1122 | &tcmu_dev_attrib_emulate_model_alias.attr, | ||
| 1123 | &tcmu_dev_attrib_emulate_dpo.attr, | ||
| 1124 | &tcmu_dev_attrib_emulate_fua_write.attr, | ||
| 1125 | &tcmu_dev_attrib_emulate_fua_read.attr, | ||
| 1126 | &tcmu_dev_attrib_emulate_write_cache.attr, | ||
| 1127 | &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
| 1128 | &tcmu_dev_attrib_emulate_tas.attr, | ||
| 1129 | &tcmu_dev_attrib_emulate_tpu.attr, | ||
| 1130 | &tcmu_dev_attrib_emulate_tpws.attr, | ||
| 1131 | &tcmu_dev_attrib_emulate_caw.attr, | ||
| 1132 | &tcmu_dev_attrib_emulate_3pc.attr, | ||
| 1133 | &tcmu_dev_attrib_pi_prot_type.attr, | ||
| 1134 | &tcmu_dev_attrib_hw_pi_prot_type.attr, | 1068 | &tcmu_dev_attrib_hw_pi_prot_type.attr, |
| 1135 | &tcmu_dev_attrib_pi_prot_format.attr, | ||
| 1136 | &tcmu_dev_attrib_enforce_pr_isids.attr, | ||
| 1137 | &tcmu_dev_attrib_is_nonrot.attr, | ||
| 1138 | &tcmu_dev_attrib_emulate_rest_reord.attr, | ||
| 1139 | &tcmu_dev_attrib_force_pr_aptpl.attr, | ||
| 1140 | &tcmu_dev_attrib_hw_block_size.attr, | 1069 | &tcmu_dev_attrib_hw_block_size.attr, |
| 1141 | &tcmu_dev_attrib_block_size.attr, | ||
| 1142 | &tcmu_dev_attrib_hw_max_sectors.attr, | 1070 | &tcmu_dev_attrib_hw_max_sectors.attr, |
| 1143 | &tcmu_dev_attrib_optimal_sectors.attr, | ||
| 1144 | &tcmu_dev_attrib_hw_queue_depth.attr, | 1071 | &tcmu_dev_attrib_hw_queue_depth.attr, |
| 1145 | &tcmu_dev_attrib_queue_depth.attr, | ||
| 1146 | &tcmu_dev_attrib_max_unmap_lba_count.attr, | ||
| 1147 | &tcmu_dev_attrib_max_unmap_block_desc_count.attr, | ||
| 1148 | &tcmu_dev_attrib_unmap_granularity.attr, | ||
| 1149 | &tcmu_dev_attrib_unmap_granularity_alignment.attr, | ||
| 1150 | &tcmu_dev_attrib_max_write_same_len.attr, | ||
| 1151 | NULL, | 1072 | NULL, |
| 1152 | }; | 1073 | }; |
| 1153 | 1074 | ||
| @@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = { | |||
| 1156 | .inquiry_prod = "USER", | 1077 | .inquiry_prod = "USER", |
| 1157 | .inquiry_rev = TCMU_VERSION, | 1078 | .inquiry_rev = TCMU_VERSION, |
| 1158 | .owner = THIS_MODULE, | 1079 | .owner = THIS_MODULE, |
| 1159 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | 1080 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
| 1160 | .attach_hba = tcmu_attach_hba, | 1081 | .attach_hba = tcmu_attach_hba, |
| 1161 | .detach_hba = tcmu_detach_hba, | 1082 | .detach_hba = tcmu_detach_hba, |
| 1162 | .alloc_device = tcmu_alloc_device, | 1083 | .alloc_device = tcmu_alloc_device, |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index a600ff15dcfd..8fd680ac941b 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
| @@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op | |||
| 58 | bool src) | 58 | bool src) |
| 59 | { | 59 | { |
| 60 | struct se_device *se_dev; | 60 | struct se_device *se_dev; |
| 61 | struct configfs_subsystem *subsys = target_core_subsystem[0]; | ||
| 62 | unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; | 61 | unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; |
| 63 | int rc; | 62 | int rc; |
| 64 | 63 | ||
| @@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op | |||
| 90 | " se_dev\n", xop->src_dev); | 89 | " se_dev\n", xop->src_dev); |
| 91 | } | 90 | } |
| 92 | 91 | ||
| 93 | rc = configfs_depend_item(subsys, | 92 | rc = target_depend_item(&se_dev->dev_group.cg_item); |
| 94 | &se_dev->dev_group.cg_item); | ||
| 95 | if (rc != 0) { | 93 | if (rc != 0) { |
| 96 | pr_err("configfs_depend_item attempt failed:" | 94 | pr_err("configfs_depend_item attempt failed:" |
| 97 | " %d for se_dev: %p\n", rc, se_dev); | 95 | " %d for se_dev: %p\n", rc, se_dev); |
| @@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op | |||
| 99 | return rc; | 97 | return rc; |
| 100 | } | 98 | } |
| 101 | 99 | ||
| 102 | pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" | 100 | pr_debug("Called configfs_depend_item for se_dev: %p" |
| 103 | " se_dev->se_dev_group: %p\n", subsys, se_dev, | 101 | " se_dev->se_dev_group: %p\n", se_dev, |
| 104 | &se_dev->dev_group); | 102 | &se_dev->dev_group); |
| 105 | 103 | ||
| 106 | mutex_unlock(&g_device_mutex); | 104 | mutex_unlock(&g_device_mutex); |
| @@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) | |||
| 373 | 371 | ||
| 374 | static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) | 372 | static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) |
| 375 | { | 373 | { |
| 376 | struct configfs_subsystem *subsys = target_core_subsystem[0]; | ||
| 377 | struct se_device *remote_dev; | 374 | struct se_device *remote_dev; |
| 378 | 375 | ||
| 379 | if (xop->op_origin == XCOL_SOURCE_RECV_OP) | 376 | if (xop->op_origin == XCOL_SOURCE_RECV_OP) |
| @@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) | |||
| 381 | else | 378 | else |
| 382 | remote_dev = xop->src_dev; | 379 | remote_dev = xop->src_dev; |
| 383 | 380 | ||
| 384 | pr_debug("Calling configfs_undepend_item for subsys: %p" | 381 | pr_debug("Calling configfs_undepend_item for" |
| 385 | " remote_dev: %p remote_dev->dev_group: %p\n", | 382 | " remote_dev: %p remote_dev->dev_group: %p\n", |
| 386 | subsys, remote_dev, &remote_dev->dev_group.cg_item); | 383 | remote_dev, &remote_dev->dev_group.cg_item); |
| 387 | 384 | ||
| 388 | configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); | 385 | target_undepend_item(&remote_dev->dev_group.cg_item); |
| 389 | } | 386 | } |
| 390 | 387 | ||
| 391 | static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) | 388 | static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) |
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 04d9e23d1ee1..358323c83b4f 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c | |||
| @@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty { | |||
| 174 | static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, | 174 | static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, |
| 175 | unsigned int offs, unsigned int data) | 175 | unsigned int offs, unsigned int data) |
| 176 | { | 176 | { |
| 177 | iowrite32(data, priv->reg + offs); | 177 | __raw_writel(data, priv->reg + offs); |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, | 180 | static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, |
| 181 | unsigned int offs) | 181 | unsigned int offs) |
| 182 | { | 182 | { |
| 183 | return ioread32(priv->reg + offs); | 183 | return __raw_readl(priv->reg + offs); |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | /* Encoding of byte stream in FDC words */ | 186 | /* Encoding of byte stream in FDC words */ |
| @@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s, | |||
| 347 | s += inc[word.bytes - 1]; | 347 | s += inc[word.bytes - 1]; |
| 348 | 348 | ||
| 349 | /* Busy wait until there's space in fifo */ | 349 | /* Busy wait until there's space in fifo */ |
| 350 | while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) | 350 | while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) |
| 351 | ; | 351 | ; |
| 352 | iowrite32(word.word, regs + REG_FDTX(c->index)); | 352 | __raw_writel(word.word, regs + REG_FDTX(c->index)); |
| 353 | } | 353 | } |
| 354 | out: | 354 | out: |
| 355 | local_irq_restore(flags); | 355 | local_irq_restore(flags); |
| @@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void) | |||
| 1227 | 1227 | ||
| 1228 | /* Read next word from KGDB channel */ | 1228 | /* Read next word from KGDB channel */ |
| 1229 | do { | 1229 | do { |
| 1230 | stat = ioread32(regs + REG_FDSTAT); | 1230 | stat = __raw_readl(regs + REG_FDSTAT); |
| 1231 | 1231 | ||
| 1232 | /* No data waiting? */ | 1232 | /* No data waiting? */ |
| 1233 | if (stat & REG_FDSTAT_RXE) | 1233 | if (stat & REG_FDSTAT_RXE) |
| @@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void) | |||
| 1236 | /* Read next word */ | 1236 | /* Read next word */ |
| 1237 | channel = (stat & REG_FDSTAT_RXCHAN) >> | 1237 | channel = (stat & REG_FDSTAT_RXCHAN) >> |
| 1238 | REG_FDSTAT_RXCHAN_SHIFT; | 1238 | REG_FDSTAT_RXCHAN_SHIFT; |
| 1239 | data = ioread32(regs + REG_FDRX); | 1239 | data = __raw_readl(regs + REG_FDRX); |
| 1240 | } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); | 1240 | } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); |
| 1241 | 1241 | ||
| 1242 | /* Decode into rbuf */ | 1242 | /* Decode into rbuf */ |
| @@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void) | |||
| 1266 | return; | 1266 | return; |
| 1267 | 1267 | ||
| 1268 | /* Busy wait until there's space in fifo */ | 1268 | /* Busy wait until there's space in fifo */ |
| 1269 | while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) | 1269 | while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) |
| 1270 | ; | 1270 | ; |
| 1271 | iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); | 1271 | __raw_writel(word.word, |
| 1272 | regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); | ||
| 1272 | } | 1273 | } |
| 1273 | 1274 | ||
| 1274 | /* flush the whole write buffer to the TX FIFO */ | 1275 | /* flush the whole write buffer to the TX FIFO */ |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5e19bb53b3a9..ea32b386797f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
| @@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
| 1409 | * dependency now. | 1409 | * dependency now. |
| 1410 | */ | 1410 | */ |
| 1411 | se_tpg = &tpg->se_tpg; | 1411 | se_tpg = &tpg->se_tpg; |
| 1412 | ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, | 1412 | ret = target_depend_item(&se_tpg->tpg_group.cg_item); |
| 1413 | &se_tpg->tpg_group.cg_item); | ||
| 1414 | if (ret) { | 1413 | if (ret) { |
| 1415 | pr_warn("configfs_depend_item() failed: %d\n", ret); | 1414 | pr_warn("configfs_depend_item() failed: %d\n", ret); |
| 1416 | kfree(vs_tpg); | 1415 | kfree(vs_tpg); |
| @@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | |||
| 1513 | * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. | 1512 | * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. |
| 1514 | */ | 1513 | */ |
| 1515 | se_tpg = &tpg->se_tpg; | 1514 | se_tpg = &tpg->se_tpg; |
| 1516 | configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, | 1515 | target_undepend_item(&se_tpg->tpg_group.cg_item); |
| 1517 | &se_tpg->tpg_group.cg_item); | ||
| 1518 | } | 1516 | } |
| 1519 | if (match) { | 1517 | if (match) { |
| 1520 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1518 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 241ef68d2893..cd46e4158830 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm) | |||
| 918 | total_size = total_mapping_size(elf_phdata, | 918 | total_size = total_mapping_size(elf_phdata, |
| 919 | loc->elf_ex.e_phnum); | 919 | loc->elf_ex.e_phnum); |
| 920 | if (!total_size) { | 920 | if (!total_size) { |
| 921 | error = -EINVAL; | 921 | retval = -EINVAL; |
| 922 | goto out_free_dentry; | 922 | goto out_free_dentry; |
| 923 | } | 923 | } |
| 924 | } | 924 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 656ce522a218..37b5afdaf698 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -1239,13 +1239,13 @@ ascend: | |||
| 1239 | /* might go back up the wrong parent if we have had a rename. */ | 1239 | /* might go back up the wrong parent if we have had a rename. */ |
| 1240 | if (need_seqretry(&rename_lock, seq)) | 1240 | if (need_seqretry(&rename_lock, seq)) |
| 1241 | goto rename_retry; | 1241 | goto rename_retry; |
| 1242 | next = child->d_child.next; | 1242 | /* go into the first sibling still alive */ |
| 1243 | while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { | 1243 | do { |
| 1244 | next = child->d_child.next; | ||
| 1244 | if (next == &this_parent->d_subdirs) | 1245 | if (next == &this_parent->d_subdirs) |
| 1245 | goto ascend; | 1246 | goto ascend; |
| 1246 | child = list_entry(next, struct dentry, d_child); | 1247 | child = list_entry(next, struct dentry, d_child); |
| 1247 | next = next->next; | 1248 | } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); |
| 1248 | } | ||
| 1249 | rcu_read_unlock(); | 1249 | rcu_read_unlock(); |
| 1250 | goto resume; | 1250 | goto resume; |
| 1251 | } | 1251 | } |
diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c index 082234581d05..83f4e76511c2 100644 --- a/fs/omfs/bitmap.c +++ b/fs/omfs/bitmap.c | |||
| @@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb, | |||
| 159 | goto out; | 159 | goto out; |
| 160 | 160 | ||
| 161 | found: | 161 | found: |
| 162 | *return_block = i * bits_per_entry + bit; | 162 | *return_block = (u64) i * bits_per_entry + bit; |
| 163 | *return_size = run; | 163 | *return_size = run; |
| 164 | ret = set_run(sb, i, bits_per_entry, bit, run, 1); | 164 | ret = set_run(sb, i, bits_per_entry, bit, run, 1); |
| 165 | 165 | ||
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 138321b0c6c2..3d935c81789a 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c | |||
| @@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = { | |||
| 306 | */ | 306 | */ |
| 307 | static int omfs_get_imap(struct super_block *sb) | 307 | static int omfs_get_imap(struct super_block *sb) |
| 308 | { | 308 | { |
| 309 | unsigned int bitmap_size, count, array_size; | 309 | unsigned int bitmap_size, array_size; |
| 310 | int count; | ||
| 310 | struct omfs_sb_info *sbi = OMFS_SB(sb); | 311 | struct omfs_sb_info *sbi = OMFS_SB(sb); |
| 311 | struct buffer_head *bh; | 312 | struct buffer_head *bh; |
| 312 | unsigned long **ptr; | 313 | unsigned long **ptr; |
| @@ -359,7 +360,7 @@ nomem: | |||
| 359 | } | 360 | } |
| 360 | 361 | ||
| 361 | enum { | 362 | enum { |
| 362 | Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask | 363 | Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err |
| 363 | }; | 364 | }; |
| 364 | 365 | ||
| 365 | static const match_table_t tokens = { | 366 | static const match_table_t tokens = { |
| @@ -368,6 +369,7 @@ static const match_table_t tokens = { | |||
| 368 | {Opt_umask, "umask=%o"}, | 369 | {Opt_umask, "umask=%o"}, |
| 369 | {Opt_dmask, "dmask=%o"}, | 370 | {Opt_dmask, "dmask=%o"}, |
| 370 | {Opt_fmask, "fmask=%o"}, | 371 | {Opt_fmask, "fmask=%o"}, |
| 372 | {Opt_err, NULL}, | ||
| 371 | }; | 373 | }; |
| 372 | 374 | ||
| 373 | static int parse_options(char *options, struct omfs_sb_info *sbi) | 375 | static int parse_options(char *options, struct omfs_sb_info *sbi) |
| @@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 548 | } | 550 | } |
| 549 | 551 | ||
| 550 | sb->s_root = d_make_root(root); | 552 | sb->s_root = d_make_root(root); |
| 551 | if (!sb->s_root) | 553 | if (!sb->s_root) { |
| 554 | ret = -ENOMEM; | ||
| 552 | goto out_brelse_bh2; | 555 | goto out_brelse_bh2; |
| 556 | } | ||
| 553 | printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); | 557 | printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); |
| 554 | 558 | ||
| 555 | ret = 0; | 559 | ret = 0; |
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 04e79d57bca6..e9d401ce93bb 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c | |||
| @@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) | |||
| 574 | * After the last attribute is removed revert to original inode format, | 574 | * After the last attribute is removed revert to original inode format, |
| 575 | * making all literal area available to the data fork once more. | 575 | * making all literal area available to the data fork once more. |
| 576 | */ | 576 | */ |
| 577 | STATIC void | 577 | void |
| 578 | xfs_attr_fork_reset( | 578 | xfs_attr_fork_remove( |
| 579 | struct xfs_inode *ip, | 579 | struct xfs_inode *ip, |
| 580 | struct xfs_trans *tp) | 580 | struct xfs_trans *tp) |
| 581 | { | 581 | { |
| @@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
| 641 | (mp->m_flags & XFS_MOUNT_ATTR2) && | 641 | (mp->m_flags & XFS_MOUNT_ATTR2) && |
| 642 | (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && | 642 | (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && |
| 643 | !(args->op_flags & XFS_DA_OP_ADDNAME)) { | 643 | !(args->op_flags & XFS_DA_OP_ADDNAME)) { |
| 644 | xfs_attr_fork_reset(dp, args->trans); | 644 | xfs_attr_fork_remove(dp, args->trans); |
| 645 | } else { | 645 | } else { |
| 646 | xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); | 646 | xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); |
| 647 | dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); | 647 | dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); |
| @@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform( | |||
| 905 | if (forkoff == -1) { | 905 | if (forkoff == -1) { |
| 906 | ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); | 906 | ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); |
| 907 | ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); | 907 | ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); |
| 908 | xfs_attr_fork_reset(dp, args->trans); | 908 | xfs_attr_fork_remove(dp, args->trans); |
| 909 | goto out; | 909 | goto out; |
| 910 | } | 910 | } |
| 911 | 911 | ||
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index 025c4b820c03..882c8d338891 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h | |||
| @@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args); | |||
| 53 | int xfs_attr_shortform_list(struct xfs_attr_list_context *context); | 53 | int xfs_attr_shortform_list(struct xfs_attr_list_context *context); |
| 54 | int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); | 54 | int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); |
| 55 | int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); | 55 | int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); |
| 56 | 56 | void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp); | |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * Internal routines when attribute fork size == XFS_LBSIZE(mp). | 59 | * Internal routines when attribute fork size == XFS_LBSIZE(mp). |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index aeffeaaac0ec..f1026e86dabc 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align( | |||
| 3224 | align_alen += temp; | 3224 | align_alen += temp; |
| 3225 | align_off -= temp; | 3225 | align_off -= temp; |
| 3226 | } | 3226 | } |
| 3227 | |||
| 3228 | /* Same adjustment for the end of the requested area. */ | ||
| 3229 | temp = (align_alen % extsz); | ||
| 3230 | if (temp) | ||
| 3231 | align_alen += extsz - temp; | ||
| 3232 | |||
| 3227 | /* | 3233 | /* |
| 3228 | * Same adjustment for the end of the requested area. | 3234 | * For large extent hint sizes, the aligned extent might be larger than |
| 3235 | * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls | ||
| 3236 | * the length back under MAXEXTLEN. The outer allocation loops handle | ||
| 3237 | * short allocation just fine, so it is safe to do this. We only want to | ||
| 3238 | * do it when we are forced to, though, because it means more allocation | ||
| 3239 | * operations are required. | ||
| 3229 | */ | 3240 | */ |
| 3230 | if ((temp = (align_alen % extsz))) { | 3241 | while (align_alen > MAXEXTLEN) |
| 3231 | align_alen += extsz - temp; | 3242 | align_alen -= extsz; |
| 3232 | } | 3243 | ASSERT(align_alen <= MAXEXTLEN); |
| 3244 | |||
| 3233 | /* | 3245 | /* |
| 3234 | * If the previous block overlaps with this proposed allocation | 3246 | * If the previous block overlaps with this proposed allocation |
| 3235 | * then move the start forward without adjusting the length. | 3247 | * then move the start forward without adjusting the length. |
| @@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align( | |||
| 3318 | return -EINVAL; | 3330 | return -EINVAL; |
| 3319 | } else { | 3331 | } else { |
| 3320 | ASSERT(orig_off >= align_off); | 3332 | ASSERT(orig_off >= align_off); |
| 3321 | ASSERT(orig_end <= align_off + align_alen); | 3333 | /* see MAXEXTLEN handling above */ |
| 3334 | ASSERT(orig_end <= align_off + align_alen || | ||
| 3335 | align_alen + extsz > MAXEXTLEN); | ||
| 3322 | } | 3336 | } |
| 3323 | 3337 | ||
| 3324 | #ifdef DEBUG | 3338 | #ifdef DEBUG |
| @@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc( | |||
| 4099 | /* Figure out the extent size, adjust alen */ | 4113 | /* Figure out the extent size, adjust alen */ |
| 4100 | extsz = xfs_get_extsz_hint(ip); | 4114 | extsz = xfs_get_extsz_hint(ip); |
| 4101 | if (extsz) { | 4115 | if (extsz) { |
| 4102 | /* | ||
| 4103 | * Make sure we don't exceed a single extent length when we | ||
| 4104 | * align the extent by reducing length we are going to | ||
| 4105 | * allocate by the maximum amount extent size aligment may | ||
| 4106 | * require. | ||
| 4107 | */ | ||
| 4108 | alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); | ||
| 4109 | error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, | 4116 | error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, |
| 4110 | 1, 0, &aoff, &alen); | 4117 | 1, 0, &aoff, &alen); |
| 4111 | ASSERT(!error); | 4118 | ASSERT(!error); |
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 07349a183a11..1c9e75521250 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
| @@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc( | |||
| 376 | */ | 376 | */ |
| 377 | newlen = args.mp->m_ialloc_inos; | 377 | newlen = args.mp->m_ialloc_inos; |
| 378 | if (args.mp->m_maxicount && | 378 | if (args.mp->m_maxicount && |
| 379 | percpu_counter_read(&args.mp->m_icount) + newlen > | 379 | percpu_counter_read_positive(&args.mp->m_icount) + newlen > |
| 380 | args.mp->m_maxicount) | 380 | args.mp->m_maxicount) |
| 381 | return -ENOSPC; | 381 | return -ENOSPC; |
| 382 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; | 382 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; |
| @@ -1339,10 +1339,13 @@ xfs_dialloc( | |||
| 1339 | * If we have already hit the ceiling of inode blocks then clear | 1339 | * If we have already hit the ceiling of inode blocks then clear |
| 1340 | * okalloc so we scan all available agi structures for a free | 1340 | * okalloc so we scan all available agi structures for a free |
| 1341 | * inode. | 1341 | * inode. |
| 1342 | * | ||
| 1343 | * Read rough value of mp->m_icount by percpu_counter_read_positive, | ||
| 1344 | * which will sacrifice the preciseness but improve the performance. | ||
| 1342 | */ | 1345 | */ |
| 1343 | if (mp->m_maxicount && | 1346 | if (mp->m_maxicount && |
| 1344 | percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > | 1347 | percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos |
| 1345 | mp->m_maxicount) { | 1348 | > mp->m_maxicount) { |
| 1346 | noroom = 1; | 1349 | noroom = 1; |
| 1347 | okalloc = 0; | 1350 | okalloc = 0; |
| 1348 | } | 1351 | } |
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index f9c1c64782d3..3fbf167cfb4c 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c | |||
| @@ -380,23 +380,31 @@ xfs_attr3_root_inactive( | |||
| 380 | return error; | 380 | return error; |
| 381 | } | 381 | } |
| 382 | 382 | ||
| 383 | /* | ||
| 384 | * xfs_attr_inactive kills all traces of an attribute fork on an inode. It | ||
| 385 | * removes both the on-disk and in-memory inode fork. Note that this also has to | ||
| 386 | * handle the condition of inodes without attributes but with an attribute fork | ||
| 387 | * configured, so we can't use xfs_inode_hasattr() here. | ||
| 388 | * | ||
| 389 | * The in-memory attribute fork is removed even on error. | ||
| 390 | */ | ||
| 383 | int | 391 | int |
| 384 | xfs_attr_inactive(xfs_inode_t *dp) | 392 | xfs_attr_inactive( |
| 393 | struct xfs_inode *dp) | ||
| 385 | { | 394 | { |
| 386 | xfs_trans_t *trans; | 395 | struct xfs_trans *trans; |
| 387 | xfs_mount_t *mp; | 396 | struct xfs_mount *mp; |
| 388 | int error; | 397 | int cancel_flags = 0; |
| 398 | int lock_mode = XFS_ILOCK_SHARED; | ||
| 399 | int error = 0; | ||
| 389 | 400 | ||
| 390 | mp = dp->i_mount; | 401 | mp = dp->i_mount; |
| 391 | ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); | 402 | ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); |
| 392 | 403 | ||
| 393 | xfs_ilock(dp, XFS_ILOCK_SHARED); | 404 | xfs_ilock(dp, lock_mode); |
| 394 | if (!xfs_inode_hasattr(dp) || | 405 | if (!XFS_IFORK_Q(dp)) |
| 395 | dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { | 406 | goto out_destroy_fork; |
| 396 | xfs_iunlock(dp, XFS_ILOCK_SHARED); | 407 | xfs_iunlock(dp, lock_mode); |
| 397 | return 0; | ||
| 398 | } | ||
| 399 | xfs_iunlock(dp, XFS_ILOCK_SHARED); | ||
| 400 | 408 | ||
| 401 | /* | 409 | /* |
| 402 | * Start our first transaction of the day. | 410 | * Start our first transaction of the day. |
| @@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp) | |||
| 408 | * the inode in every transaction to let it float upward through | 416 | * the inode in every transaction to let it float upward through |
| 409 | * the log. | 417 | * the log. |
| 410 | */ | 418 | */ |
| 419 | lock_mode = 0; | ||
| 411 | trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); | 420 | trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); |
| 412 | error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); | 421 | error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); |
| 413 | if (error) { | 422 | if (error) |
| 414 | xfs_trans_cancel(trans, 0); | 423 | goto out_cancel; |
| 415 | return error; | 424 | |
| 416 | } | 425 | lock_mode = XFS_ILOCK_EXCL; |
| 417 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 426 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT; |
| 427 | xfs_ilock(dp, lock_mode); | ||
| 428 | |||
| 429 | if (!XFS_IFORK_Q(dp)) | ||
| 430 | goto out_cancel; | ||
| 418 | 431 | ||
| 419 | /* | 432 | /* |
| 420 | * No need to make quota reservations here. We expect to release some | 433 | * No need to make quota reservations here. We expect to release some |
| @@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp) | |||
| 422 | */ | 435 | */ |
| 423 | xfs_trans_ijoin(trans, dp, 0); | 436 | xfs_trans_ijoin(trans, dp, 0); |
| 424 | 437 | ||
| 425 | /* | 438 | /* invalidate and truncate the attribute fork extents */ |
| 426 | * Decide on what work routines to call based on the inode size. | 439 | if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { |
| 427 | */ | 440 | error = xfs_attr3_root_inactive(&trans, dp); |
| 428 | if (!xfs_inode_hasattr(dp) || | 441 | if (error) |
| 429 | dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { | 442 | goto out_cancel; |
| 430 | error = 0; | 443 | |
| 431 | goto out; | 444 | error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); |
| 445 | if (error) | ||
| 446 | goto out_cancel; | ||
| 432 | } | 447 | } |
| 433 | error = xfs_attr3_root_inactive(&trans, dp); | ||
| 434 | if (error) | ||
| 435 | goto out; | ||
| 436 | 448 | ||
| 437 | error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); | 449 | /* Reset the attribute fork - this also destroys the in-core fork */ |
| 438 | if (error) | 450 | xfs_attr_fork_remove(dp, trans); |
| 439 | goto out; | ||
| 440 | 451 | ||
| 441 | error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); | 452 | error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); |
| 442 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 453 | xfs_iunlock(dp, lock_mode); |
| 443 | |||
| 444 | return error; | 454 | return error; |
| 445 | 455 | ||
| 446 | out: | 456 | out_cancel: |
| 447 | xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); | 457 | xfs_trans_cancel(trans, cancel_flags); |
| 448 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 458 | out_destroy_fork: |
| 459 | /* kill the in-core attr fork before we drop the inode lock */ | ||
| 460 | if (dp->i_afp) | ||
| 461 | xfs_idestroy_fork(dp, XFS_ATTR_FORK); | ||
| 462 | if (lock_mode) | ||
| 463 | xfs_iunlock(dp, lock_mode); | ||
| 449 | return error; | 464 | return error; |
| 450 | } | 465 | } |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 8121e75352ee..3b7591224f4a 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -124,7 +124,7 @@ xfs_iozero( | |||
| 124 | status = 0; | 124 | status = 0; |
| 125 | } while (count); | 125 | } while (count); |
| 126 | 126 | ||
| 127 | return (-status); | 127 | return status; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | int | 130 | int |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d6ebc85192b7..539a85fddbc2 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -1946,21 +1946,17 @@ xfs_inactive( | |||
| 1946 | /* | 1946 | /* |
| 1947 | * If there are attributes associated with the file then blow them away | 1947 | * If there are attributes associated with the file then blow them away |
| 1948 | * now. The code calls a routine that recursively deconstructs the | 1948 | * now. The code calls a routine that recursively deconstructs the |
| 1949 | * attribute fork. We need to just commit the current transaction | 1949 | * attribute fork. If also blows away the in-core attribute fork. |
| 1950 | * because we can't use it for xfs_attr_inactive(). | ||
| 1951 | */ | 1950 | */ |
| 1952 | if (ip->i_d.di_anextents > 0) { | 1951 | if (XFS_IFORK_Q(ip)) { |
| 1953 | ASSERT(ip->i_d.di_forkoff != 0); | ||
| 1954 | |||
| 1955 | error = xfs_attr_inactive(ip); | 1952 | error = xfs_attr_inactive(ip); |
| 1956 | if (error) | 1953 | if (error) |
| 1957 | return; | 1954 | return; |
| 1958 | } | 1955 | } |
| 1959 | 1956 | ||
| 1960 | if (ip->i_afp) | 1957 | ASSERT(!ip->i_afp); |
| 1961 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | ||
| 1962 | |||
| 1963 | ASSERT(ip->i_d.di_anextents == 0); | 1958 | ASSERT(ip->i_d.di_anextents == 0); |
| 1959 | ASSERT(ip->i_d.di_forkoff == 0); | ||
| 1964 | 1960 | ||
| 1965 | /* | 1961 | /* |
| 1966 | * Free the inode. | 1962 | * Free the inode. |
| @@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout( | |||
| 2883 | if (error) | 2879 | if (error) |
| 2884 | return error; | 2880 | return error; |
| 2885 | 2881 | ||
| 2886 | /* Satisfy xfs_bumplink that this is a real tmpfile */ | 2882 | /* |
| 2883 | * Prepare the tmpfile inode as if it were created through the VFS. | ||
| 2884 | * Otherwise, the link increment paths will complain about nlink 0->1. | ||
| 2885 | * Drop the link count as done by d_tmpfile(), complete the inode setup | ||
| 2886 | * and flag it as linkable. | ||
| 2887 | */ | ||
| 2888 | drop_nlink(VFS_I(tmpfile)); | ||
| 2887 | xfs_finish_inode_setup(tmpfile); | 2889 | xfs_finish_inode_setup(tmpfile); |
| 2888 | VFS_I(tmpfile)->i_state |= I_LINKABLE; | 2890 | VFS_I(tmpfile)->i_state |= I_LINKABLE; |
| 2889 | 2891 | ||
| @@ -3151,7 +3153,7 @@ xfs_rename( | |||
| 3151 | * intermediate state on disk. | 3153 | * intermediate state on disk. |
| 3152 | */ | 3154 | */ |
| 3153 | if (wip) { | 3155 | if (wip) { |
| 3154 | ASSERT(wip->i_d.di_nlink == 0); | 3156 | ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0); |
| 3155 | error = xfs_bumplink(tp, wip); | 3157 | error = xfs_bumplink(tp, wip); |
| 3156 | if (error) | 3158 | if (error) |
| 3157 | goto out_trans_abort; | 3159 | goto out_trans_abort; |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2ce7ee3b4ec1..6f23fbdfb365 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp) | |||
| 1084 | return xfs_sync_sb(mp, true); | 1084 | return xfs_sync_sb(mp, true); |
| 1085 | } | 1085 | } |
| 1086 | 1086 | ||
| 1087 | /* | ||
| 1088 | * Deltas for the inode count are +/-64, hence we use a large batch size | ||
| 1089 | * of 128 so we don't need to take the counter lock on every update. | ||
| 1090 | */ | ||
| 1091 | #define XFS_ICOUNT_BATCH 128 | ||
| 1087 | int | 1092 | int |
| 1088 | xfs_mod_icount( | 1093 | xfs_mod_icount( |
| 1089 | struct xfs_mount *mp, | 1094 | struct xfs_mount *mp, |
| 1090 | int64_t delta) | 1095 | int64_t delta) |
| 1091 | { | 1096 | { |
| 1092 | /* deltas are +/-64, hence the large batch size of 128. */ | 1097 | __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); |
| 1093 | __percpu_counter_add(&mp->m_icount, delta, 128); | 1098 | if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { |
| 1094 | if (percpu_counter_compare(&mp->m_icount, 0) < 0) { | ||
| 1095 | ASSERT(0); | 1099 | ASSERT(0); |
| 1096 | percpu_counter_add(&mp->m_icount, -delta); | 1100 | percpu_counter_add(&mp->m_icount, -delta); |
| 1097 | return -EINVAL; | 1101 | return -EINVAL; |
| @@ -1113,6 +1117,14 @@ xfs_mod_ifree( | |||
| 1113 | return 0; | 1117 | return 0; |
| 1114 | } | 1118 | } |
| 1115 | 1119 | ||
| 1120 | /* | ||
| 1121 | * Deltas for the block count can vary from 1 to very large, but lock contention | ||
| 1122 | * only occurs on frequent small block count updates such as in the delayed | ||
| 1123 | * allocation path for buffered writes (page a time updates). Hence we set | ||
| 1124 | * a large batch count (1024) to minimise global counter updates except when | ||
| 1125 | * we get near to ENOSPC and we have to be very accurate with our updates. | ||
| 1126 | */ | ||
| 1127 | #define XFS_FDBLOCKS_BATCH 1024 | ||
| 1116 | int | 1128 | int |
| 1117 | xfs_mod_fdblocks( | 1129 | xfs_mod_fdblocks( |
| 1118 | struct xfs_mount *mp, | 1130 | struct xfs_mount *mp, |
| @@ -1151,25 +1163,19 @@ xfs_mod_fdblocks( | |||
| 1151 | * Taking blocks away, need to be more accurate the closer we | 1163 | * Taking blocks away, need to be more accurate the closer we |
| 1152 | * are to zero. | 1164 | * are to zero. |
| 1153 | * | 1165 | * |
| 1154 | * batch size is set to a maximum of 1024 blocks - if we are | ||
| 1155 | * allocating of freeing extents larger than this then we aren't | ||
| 1156 | * going to be hammering the counter lock so a lock per update | ||
| 1157 | * is not a problem. | ||
| 1158 | * | ||
| 1159 | * If the counter has a value of less than 2 * max batch size, | 1166 | * If the counter has a value of less than 2 * max batch size, |
| 1160 | * then make everything serialise as we are real close to | 1167 | * then make everything serialise as we are real close to |
| 1161 | * ENOSPC. | 1168 | * ENOSPC. |
| 1162 | */ | 1169 | */ |
| 1163 | #define __BATCH 1024 | 1170 | if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, |
| 1164 | if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) | 1171 | XFS_FDBLOCKS_BATCH) < 0) |
| 1165 | batch = 1; | 1172 | batch = 1; |
| 1166 | else | 1173 | else |
| 1167 | batch = __BATCH; | 1174 | batch = XFS_FDBLOCKS_BATCH; |
| 1168 | #undef __BATCH | ||
| 1169 | 1175 | ||
| 1170 | __percpu_counter_add(&mp->m_fdblocks, delta, batch); | 1176 | __percpu_counter_add(&mp->m_fdblocks, delta, batch); |
| 1171 | if (percpu_counter_compare(&mp->m_fdblocks, | 1177 | if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp), |
| 1172 | XFS_ALLOC_SET_ASIDE(mp)) >= 0) { | 1178 | XFS_FDBLOCKS_BATCH) >= 0) { |
| 1173 | /* we had space! */ | 1179 | /* we had space! */ |
| 1174 | return 0; | 1180 | return 0; |
| 1175 | } | 1181 | } |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 27e285b92b5f..59915ea5373c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, | |||
| 151 | return 1; | 151 | return 1; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) | 154 | static inline unsigned int cpumask_local_spread(unsigned int i, int node) |
| 155 | { | 155 | { |
| 156 | set_bit(0, cpumask_bits(dstp)); | ||
| 157 | |||
| 158 | return 0; | 156 | return 0; |
| 159 | } | 157 | } |
| 160 | 158 | ||
| @@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) | |||
| 208 | 206 | ||
| 209 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); | 207 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
| 210 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); | 208 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
| 211 | int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); | 209 | unsigned int cpumask_local_spread(unsigned int i, int node); |
| 212 | 210 | ||
| 213 | /** | 211 | /** |
| 214 | * for_each_cpu - iterate over every cpu in a mask | 212 | * for_each_cpu - iterate over every cpu in a mask |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 50e50095c8d1..84a109449610 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
| @@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc); | |||
| 41 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); | 41 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
| 42 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); | 42 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
| 43 | s64 __percpu_counter_sum(struct percpu_counter *fbc); | 43 | s64 __percpu_counter_sum(struct percpu_counter *fbc); |
| 44 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); | 44 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); |
| 45 | |||
| 46 | static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | ||
| 47 | { | ||
| 48 | return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); | ||
| 49 | } | ||
| 45 | 50 | ||
| 46 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 51 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 47 | { | 52 | { |
| @@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | |||
| 116 | return 0; | 121 | return 0; |
| 117 | } | 122 | } |
| 118 | 123 | ||
| 124 | static inline int | ||
| 125 | __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) | ||
| 126 | { | ||
| 127 | return percpu_counter_compare(fbc, rhs); | ||
| 128 | } | ||
| 129 | |||
| 119 | static inline void | 130 | static inline void |
| 120 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 131 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 121 | { | 132 | { |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index d61be7297b2c..5f1225706993 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
| @@ -1,9 +1,7 @@ | |||
| 1 | #ifndef TARGET_CORE_BACKEND_H | 1 | #ifndef TARGET_CORE_BACKEND_H |
| 2 | #define TARGET_CORE_BACKEND_H | 2 | #define TARGET_CORE_BACKEND_H |
| 3 | 3 | ||
| 4 | #define TRANSPORT_PLUGIN_PHBA_PDEV 1 | 4 | #define TRANSPORT_FLAG_PASSTHROUGH 1 |
| 5 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 | ||
| 6 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 | ||
| 7 | 5 | ||
| 8 | struct target_backend_cits { | 6 | struct target_backend_cits { |
| 9 | struct config_item_type tb_dev_cit; | 7 | struct config_item_type tb_dev_cit; |
| @@ -22,7 +20,7 @@ struct se_subsystem_api { | |||
| 22 | char inquiry_rev[4]; | 20 | char inquiry_rev[4]; |
| 23 | struct module *owner; | 21 | struct module *owner; |
| 24 | 22 | ||
| 25 | u8 transport_type; | 23 | u8 transport_flags; |
| 26 | 24 | ||
| 27 | int (*attach_hba)(struct se_hba *, u32); | 25 | int (*attach_hba)(struct se_hba *, u32); |
| 28 | void (*detach_hba)(struct se_hba *); | 26 | void (*detach_hba)(struct se_hba *); |
| @@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32); | |||
| 138 | int se_dev_set_max_sectors(struct se_device *, u32); | 136 | int se_dev_set_max_sectors(struct se_device *, u32); |
| 139 | int se_dev_set_optimal_sectors(struct se_device *, u32); | 137 | int se_dev_set_optimal_sectors(struct se_device *, u32); |
| 140 | int se_dev_set_block_size(struct se_device *, u32); | 138 | int se_dev_set_block_size(struct se_device *, u32); |
| 139 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, | ||
| 140 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); | ||
| 141 | 141 | ||
| 142 | #endif /* TARGET_CORE_BACKEND_H */ | 142 | #endif /* TARGET_CORE_BACKEND_H */ |
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index 25bb04c4209e..b99c01170392 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h | |||
| @@ -40,8 +40,6 @@ struct target_fabric_configfs { | |||
| 40 | struct config_item *tf_fabric; | 40 | struct config_item *tf_fabric; |
| 41 | /* Passed from fabric modules */ | 41 | /* Passed from fabric modules */ |
| 42 | struct config_item_type *tf_fabric_cit; | 42 | struct config_item_type *tf_fabric_cit; |
| 43 | /* Pointer to target core subsystem */ | ||
| 44 | struct configfs_subsystem *tf_subsys; | ||
| 45 | /* Pointer to fabric's struct module */ | 43 | /* Pointer to fabric's struct module */ |
| 46 | struct module *tf_module; | 44 | struct module *tf_module; |
| 47 | struct target_core_fabric_ops tf_ops; | 45 | struct target_core_fabric_ops tf_ops; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 17c7f5ac7ea0..0f4dc3768587 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | struct target_core_fabric_ops { | 4 | struct target_core_fabric_ops { |
| 5 | struct module *module; | 5 | struct module *module; |
| 6 | const char *name; | 6 | const char *name; |
| 7 | struct configfs_subsystem *tf_subsys; | ||
| 8 | char *(*get_fabric_name)(void); | 7 | char *(*get_fabric_name)(void); |
| 9 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | 8 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); |
| 10 | char *(*tpg_get_wwn)(struct se_portal_group *); | 9 | char *(*tpg_get_wwn)(struct se_portal_group *); |
| @@ -109,6 +108,9 @@ struct target_core_fabric_ops { | |||
| 109 | int target_register_template(const struct target_core_fabric_ops *fo); | 108 | int target_register_template(const struct target_core_fabric_ops *fo); |
| 110 | void target_unregister_template(const struct target_core_fabric_ops *fo); | 109 | void target_unregister_template(const struct target_core_fabric_ops *fo); |
| 111 | 110 | ||
| 111 | int target_depend_item(struct config_item *item); | ||
| 112 | void target_undepend_item(struct config_item *item); | ||
| 113 | |||
| 112 | struct se_session *transport_init_session(enum target_prot_op); | 114 | struct se_session *transport_init_session(enum target_prot_op); |
| 113 | int transport_alloc_session_tags(struct se_session *, unsigned int, | 115 | int transport_alloc_session_tags(struct se_session *, unsigned int, |
| 114 | unsigned int); | 116 | unsigned int); |
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 81ea59812117..f7554fd7fc62 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h | |||
| @@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree, | |||
| 140 | TP_ARGS(call_site, ptr) | 140 | TP_ARGS(call_site, ptr) |
| 141 | ); | 141 | ); |
| 142 | 142 | ||
| 143 | DEFINE_EVENT(kmem_free, kmem_cache_free, | 143 | DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, |
| 144 | 144 | ||
| 145 | TP_PROTO(unsigned long call_site, const void *ptr), | 145 | TP_PROTO(unsigned long call_site, const void *ptr), |
| 146 | 146 | ||
| 147 | TP_ARGS(call_site, ptr) | 147 | TP_ARGS(call_site, ptr), |
| 148 | |||
| 149 | /* | ||
| 150 | * This trace can be potentially called from an offlined cpu. | ||
| 151 | * Since trace points use RCU and RCU should not be used from | ||
| 152 | * offline cpus, filter such calls out. | ||
| 153 | * While this trace can be called from a preemptable section, | ||
| 154 | * it has no impact on the condition since tasks can migrate | ||
| 155 | * only from online cpus to other online cpus. Thus its safe | ||
| 156 | * to use raw_smp_processor_id. | ||
| 157 | */ | ||
| 158 | TP_CONDITION(cpu_online(raw_smp_processor_id())) | ||
| 148 | ); | 159 | ); |
| 149 | 160 | ||
| 150 | TRACE_EVENT(mm_page_free, | 161 | TRACE_EVENT_CONDITION(mm_page_free, |
| 151 | 162 | ||
| 152 | TP_PROTO(struct page *page, unsigned int order), | 163 | TP_PROTO(struct page *page, unsigned int order), |
| 153 | 164 | ||
| 154 | TP_ARGS(page, order), | 165 | TP_ARGS(page, order), |
| 155 | 166 | ||
| 167 | |||
| 168 | /* | ||
| 169 | * This trace can be potentially called from an offlined cpu. | ||
| 170 | * Since trace points use RCU and RCU should not be used from | ||
| 171 | * offline cpus, filter such calls out. | ||
| 172 | * While this trace can be called from a preemptable section, | ||
| 173 | * it has no impact on the condition since tasks can migrate | ||
| 174 | * only from online cpus to other online cpus. Thus its safe | ||
| 175 | * to use raw_smp_processor_id. | ||
| 176 | */ | ||
| 177 | TP_CONDITION(cpu_online(raw_smp_processor_id())), | ||
| 178 | |||
| 156 | TP_STRUCT__entry( | 179 | TP_STRUCT__entry( |
| 157 | __field( unsigned long, pfn ) | 180 | __field( unsigned long, pfn ) |
| 158 | __field( unsigned int, order ) | 181 | __field( unsigned int, order ) |
| @@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, | |||
| 253 | TP_ARGS(page, order, migratetype) | 276 | TP_ARGS(page, order, migratetype) |
| 254 | ); | 277 | ); |
| 255 | 278 | ||
| 256 | DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, | 279 | TRACE_EVENT_CONDITION(mm_page_pcpu_drain, |
| 257 | 280 | ||
| 258 | TP_PROTO(struct page *page, unsigned int order, int migratetype), | 281 | TP_PROTO(struct page *page, unsigned int order, int migratetype), |
| 259 | 282 | ||
| 260 | TP_ARGS(page, order, migratetype), | 283 | TP_ARGS(page, order, migratetype), |
| 261 | 284 | ||
| 285 | /* | ||
| 286 | * This trace can be potentially called from an offlined cpu. | ||
| 287 | * Since trace points use RCU and RCU should not be used from | ||
| 288 | * offline cpus, filter such calls out. | ||
| 289 | * While this trace can be called from a preemptable section, | ||
| 290 | * it has no impact on the condition since tasks can migrate | ||
| 291 | * only from online cpus to other online cpus. Thus its safe | ||
| 292 | * to use raw_smp_processor_id. | ||
| 293 | */ | ||
| 294 | TP_CONDITION(cpu_online(raw_smp_processor_id())), | ||
| 295 | |||
| 296 | TP_STRUCT__entry( | ||
| 297 | __field( unsigned long, pfn ) | ||
| 298 | __field( unsigned int, order ) | ||
| 299 | __field( int, migratetype ) | ||
| 300 | ), | ||
| 301 | |||
| 302 | TP_fast_assign( | ||
| 303 | __entry->pfn = page ? page_to_pfn(page) : -1UL; | ||
| 304 | __entry->order = order; | ||
| 305 | __entry->migratetype = migratetype; | ||
| 306 | ), | ||
| 307 | |||
| 262 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", | 308 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", |
| 263 | pfn_to_page(__entry->pfn), __entry->pfn, | 309 | pfn_to_page(__entry->pfn), __entry->pfn, |
| 264 | __entry->order, __entry->migratetype) | 310 | __entry->order, __entry->migratetype) |
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 984169a819ee..d7f1cbc3766c 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 | * SUCH DAMAGE. */ | 27 | * SUCH DAMAGE. */ |
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/virtio_types.h> | ||
| 29 | #include <linux/virtio_ids.h> | 30 | #include <linux/virtio_ids.h> |
| 30 | #include <linux/virtio_config.h> | 31 | #include <linux/virtio_config.h> |
| 31 | 32 | ||
diff --git a/kernel/module.c b/kernel/module.c index 42a1d2afb217..cfc9e843a924 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3370 | module_bug_cleanup(mod); | 3370 | module_bug_cleanup(mod); |
| 3371 | mutex_unlock(&module_mutex); | 3371 | mutex_unlock(&module_mutex); |
| 3372 | 3372 | ||
| 3373 | blocking_notifier_call_chain(&module_notify_list, | ||
| 3374 | MODULE_STATE_GOING, mod); | ||
| 3375 | |||
| 3373 | /* we can't deallocate the module until we clear memory protection */ | 3376 | /* we can't deallocate the module until we clear memory protection */ |
| 3374 | unset_module_init_ro_nx(mod); | 3377 | unset_module_init_ro_nx(mod); |
| 3375 | unset_module_core_ro_nx(mod); | 3378 | unset_module_core_ro_nx(mod); |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 830dd5dec40f..5f627084f2e9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) | |||
| 139 | #endif | 139 | #endif |
| 140 | 140 | ||
| 141 | /** | 141 | /** |
| 142 | * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first | 142 | * cpumask_local_spread - select the i'th cpu with local numa cpu's first |
| 143 | * | ||
| 144 | * @i: index number | 143 | * @i: index number |
| 145 | * @numa_node: local numa_node | 144 | * @node: local numa_node |
| 146 | * @dstp: cpumask with the relevant cpu bit set according to the policy | ||
| 147 | * | 145 | * |
| 148 | * This function sets the cpumask according to a numa aware policy. | 146 | * This function selects an online CPU according to a numa aware policy; |
| 149 | * cpumask could be used as an affinity hint for the IRQ related to a | 147 | * local cpus are returned first, followed by non-local ones, then it |
| 150 | * queue. When the policy is to spread queues across cores - local cores | 148 | * wraps around. |
| 151 | * first. | ||
| 152 | * | 149 | * |
| 153 | * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set | 150 | * It's not very efficient, but useful for setup. |
| 154 | * the cpu bit and need to re-call the function. | ||
| 155 | */ | 151 | */ |
| 156 | int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) | 152 | unsigned int cpumask_local_spread(unsigned int i, int node) |
| 157 | { | 153 | { |
| 158 | cpumask_var_t mask; | ||
| 159 | int cpu; | 154 | int cpu; |
| 160 | int ret = 0; | ||
| 161 | |||
| 162 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
| 163 | return -ENOMEM; | ||
| 164 | 155 | ||
| 156 | /* Wrap: we always want a cpu. */ | ||
| 165 | i %= num_online_cpus(); | 157 | i %= num_online_cpus(); |
| 166 | 158 | ||
| 167 | if (numa_node == -1 || !cpumask_of_node(numa_node)) { | 159 | if (node == -1) { |
| 168 | /* Use all online cpu's for non numa aware system */ | 160 | for_each_cpu(cpu, cpu_online_mask) |
| 169 | cpumask_copy(mask, cpu_online_mask); | 161 | if (i-- == 0) |
| 162 | return cpu; | ||
| 170 | } else { | 163 | } else { |
| 171 | int n; | 164 | /* NUMA first. */ |
| 172 | 165 | for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) | |
| 173 | cpumask_and(mask, | 166 | if (i-- == 0) |
| 174 | cpumask_of_node(numa_node), cpu_online_mask); | 167 | return cpu; |
| 175 | 168 | ||
| 176 | n = cpumask_weight(mask); | 169 | for_each_cpu(cpu, cpu_online_mask) { |
| 177 | if (i >= n) { | 170 | /* Skip NUMA nodes, done above. */ |
| 178 | i -= n; | 171 | if (cpumask_test_cpu(cpu, cpumask_of_node(node))) |
| 179 | 172 | continue; | |
| 180 | /* If index > number of local cpu's, mask out local | 173 | |
| 181 | * cpu's | 174 | if (i-- == 0) |
| 182 | */ | 175 | return cpu; |
| 183 | cpumask_andnot(mask, cpu_online_mask, mask); | ||
| 184 | } | 176 | } |
| 185 | } | 177 | } |
| 186 | 178 | BUG(); | |
| 187 | for_each_cpu(cpu, mask) { | ||
| 188 | if (--i < 0) | ||
| 189 | goto out; | ||
| 190 | } | ||
| 191 | |||
| 192 | ret = -EAGAIN; | ||
| 193 | |||
| 194 | out: | ||
| 195 | free_cpumask_var(mask); | ||
| 196 | |||
| 197 | if (!ret) | ||
| 198 | cpumask_set_cpu(cpu, dstp); | ||
| 199 | |||
| 200 | return ret; | ||
| 201 | } | 179 | } |
| 202 | EXPORT_SYMBOL(cpumask_set_cpu_local_first); | 180 | EXPORT_SYMBOL(cpumask_local_spread); |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 48144cdae819..f051d69f0910 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
| 197 | * Compare counter against given value. | 197 | * Compare counter against given value. |
| 198 | * Return 1 if greater, 0 if equal and -1 if less | 198 | * Return 1 if greater, 0 if equal and -1 if less |
| 199 | */ | 199 | */ |
| 200 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | 200 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) |
| 201 | { | 201 | { |
| 202 | s64 count; | 202 | s64 count; |
| 203 | 203 | ||
| 204 | count = percpu_counter_read(fbc); | 204 | count = percpu_counter_read(fbc); |
| 205 | /* Check to see if rough count will be sufficient for comparison */ | 205 | /* Check to see if rough count will be sufficient for comparison */ |
| 206 | if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { | 206 | if (abs(count - rhs) > (batch * num_online_cpus())) { |
| 207 | if (count > rhs) | 207 | if (count > rhs) |
| 208 | return 1; | 208 | return 1; |
| 209 | else | 209 | else |
| @@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | |||
| 218 | else | 218 | else |
| 219 | return 0; | 219 | return 0; |
| 220 | } | 220 | } |
| 221 | EXPORT_SYMBOL(percpu_counter_compare); | 221 | EXPORT_SYMBOL(__percpu_counter_compare); |
| 222 | 222 | ||
| 223 | static int __init percpu_counter_startup(void) | 223 | static int __init percpu_counter_startup(void) |
| 224 | { | 224 | { |
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py index a1504c4f1900..25db8cff44a2 100644 --- a/scripts/gdb/linux/modules.py +++ b/scripts/gdb/linux/modules.py | |||
| @@ -73,18 +73,11 @@ class LxLsmod(gdb.Command): | |||
| 73 | " " if utils.get_long_type().sizeof == 8 else "")) | 73 | " " if utils.get_long_type().sizeof == 8 else "")) |
| 74 | 74 | ||
| 75 | for module in module_list(): | 75 | for module in module_list(): |
| 76 | ref = 0 | ||
| 77 | module_refptr = module['refptr'] | ||
| 78 | for cpu in cpus.cpu_list("cpu_possible_mask"): | ||
| 79 | refptr = cpus.per_cpu(module_refptr, cpu) | ||
| 80 | ref += refptr['incs'] | ||
| 81 | ref -= refptr['decs'] | ||
| 82 | |||
| 83 | gdb.write("{address} {name:<19} {size:>8} {ref}".format( | 76 | gdb.write("{address} {name:<19} {size:>8} {ref}".format( |
| 84 | address=str(module['module_core']).split()[0], | 77 | address=str(module['module_core']).split()[0], |
| 85 | name=module['name'].string(), | 78 | name=module['name'].string(), |
| 86 | size=str(module['core_size']), | 79 | size=str(module['core_size']), |
| 87 | ref=str(ref))) | 80 | ref=str(module['refcnt']['counter']))) |
| 88 | 81 | ||
| 89 | source_list = module['source_list'] | 82 | source_list = module['source_list'] |
| 90 | t = self._module_use_type.get_type().pointer() | 83 | t = self._module_use_type.get_type().pointer() |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 1c8678775f40..ac0db1679f09 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
| @@ -4926,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, | |||
| 4926 | dig_only: | 4926 | dig_only: |
| 4927 | parse_digital(codec); | 4927 | parse_digital(codec); |
| 4928 | 4928 | ||
| 4929 | if (spec->power_down_unused || codec->power_save_node) | 4929 | if (spec->power_down_unused || codec->power_save_node) { |
| 4930 | if (!codec->power_filter) | 4930 | if (!codec->power_filter) |
| 4931 | codec->power_filter = snd_hda_gen_path_power_filter; | 4931 | codec->power_filter = snd_hda_gen_path_power_filter; |
| 4932 | if (!codec->patch_ops.stream_pm) | ||
| 4933 | codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; | ||
| 4934 | } | ||
| 4932 | 4935 | ||
| 4933 | if (!spec->no_analog && spec->beep_nid) { | 4936 | if (!spec->no_analog && spec->beep_nid) { |
| 4934 | err = snd_hda_attach_beep_device(codec, spec->beep_nid); | 4937 | err = snd_hda_attach_beep_device(codec, spec->beep_nid); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 34040d26c94f..fea198c58196 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2089,6 +2089,8 @@ static const struct pci_device_id azx_ids[] = { | |||
| 2089 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2089 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
| 2090 | { PCI_DEVICE(0x1002, 0xaab0), | 2090 | { PCI_DEVICE(0x1002, 0xaab0), |
| 2091 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2091 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
| 2092 | { PCI_DEVICE(0x1002, 0xaac8), | ||
| 2093 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
| 2092 | /* VIA VT8251/VT8237A */ | 2094 | /* VIA VT8251/VT8237A */ |
| 2093 | { PCI_DEVICE(0x1106, 0x3288), | 2095 | { PCI_DEVICE(0x1106, 0x3288), |
| 2094 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, | 2096 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 31f8f13be907..464168426465 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -884,6 +884,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { | |||
| 884 | { 0x10ec0275, 0x1028, 0, "ALC3260" }, | 884 | { 0x10ec0275, 0x1028, 0, "ALC3260" }, |
| 885 | { 0x10ec0899, 0x1028, 0, "ALC3861" }, | 885 | { 0x10ec0899, 0x1028, 0, "ALC3861" }, |
| 886 | { 0x10ec0298, 0x1028, 0, "ALC3266" }, | 886 | { 0x10ec0298, 0x1028, 0, "ALC3266" }, |
| 887 | { 0x10ec0256, 0x1028, 0, "ALC3246" }, | ||
| 887 | { 0x10ec0670, 0x1025, 0, "ALC669X" }, | 888 | { 0x10ec0670, 0x1025, 0, "ALC669X" }, |
| 888 | { 0x10ec0676, 0x1025, 0, "ALC679X" }, | 889 | { 0x10ec0676, 0x1025, 0, "ALC679X" }, |
| 889 | { 0x10ec0282, 0x1043, 0, "ALC3229" }, | 890 | { 0x10ec0282, 0x1043, 0, "ALC3229" }, |
| @@ -4227,6 +4228,11 @@ static void alc_fixup_headset_mode_alc662(struct hda_codec *codec, | |||
| 4227 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | 4228 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
| 4228 | spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; | 4229 | spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; |
| 4229 | spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ | 4230 | spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ |
| 4231 | |||
| 4232 | /* Disable boost for mic-in permanently. (This code is only called | ||
| 4233 | from quirks that guarantee that the headphone is at NID 0x1b.) */ | ||
| 4234 | snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000); | ||
| 4235 | snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP); | ||
| 4230 | } else | 4236 | } else |
| 4231 | alc_fixup_headset_mode(codec, fix, action); | 4237 | alc_fixup_headset_mode(codec, fix, action); |
| 4232 | } | 4238 | } |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 43c99ce4a520..6833c74ed6ff 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -4403,7 +4403,6 @@ static const struct hda_codec_ops stac_patch_ops = { | |||
| 4403 | #ifdef CONFIG_PM | 4403 | #ifdef CONFIG_PM |
| 4404 | .suspend = stac_suspend, | 4404 | .suspend = stac_suspend, |
| 4405 | #endif | 4405 | #endif |
| 4406 | .stream_pm = snd_hda_gen_stream_pm, | ||
| 4407 | .reboot_notify = stac_shutup, | 4406 | .reboot_notify = stac_shutup, |
| 4408 | }; | 4407 | }; |
| 4409 | 4408 | ||
| @@ -4697,7 +4696,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) | |||
| 4697 | return err; | 4696 | return err; |
| 4698 | 4697 | ||
| 4699 | spec = codec->spec; | 4698 | spec = codec->spec; |
| 4700 | codec->power_save_node = 1; | 4699 | /* disabled power_save_node since it causes noises on a Dell machine */ |
| 4700 | /* codec->power_save_node = 1; */ | ||
| 4701 | spec->linear_tone_beep = 0; | 4701 | spec->linear_tone_beep = 0; |
| 4702 | spec->gen.own_eapd_ctl = 1; | 4702 | spec->gen.own_eapd_ctl = 1; |
| 4703 | spec->gen.power_down_unused = 1; | 4703 | spec->gen.power_down_unused = 1; |
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index d51703e30523..0a4ad5feb82e 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c | |||
| @@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, | |||
| 72 | if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { | 72 | if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { |
| 73 | old_vmaster_hook = spec->vmaster_mute.hook; | 73 | old_vmaster_hook = spec->vmaster_mute.hook; |
| 74 | spec->vmaster_mute.hook = update_tpacpi_mute_led; | 74 | spec->vmaster_mute.hook = update_tpacpi_mute_led; |
| 75 | spec->vmaster_mute_enum = 1; | ||
| 76 | removefunc = false; | 75 | removefunc = false; |
| 77 | } | 76 | } |
| 78 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { | 77 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 46facfc9aec1..29175346cc4f 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -1118,6 +1118,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
| 1118 | case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ | 1118 | case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ |
| 1119 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ | 1119 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ |
| 1120 | case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ | 1120 | case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ |
| 1121 | case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ | ||
| 1121 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1122 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
| 1122 | return true; | 1123 | return true; |
| 1123 | } | 1124 | } |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bac98ca3d4ca..323b65edfc97 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -52,6 +52,7 @@ unsigned int skip_c0; | |||
| 52 | unsigned int skip_c1; | 52 | unsigned int skip_c1; |
| 53 | unsigned int do_nhm_cstates; | 53 | unsigned int do_nhm_cstates; |
| 54 | unsigned int do_snb_cstates; | 54 | unsigned int do_snb_cstates; |
| 55 | unsigned int do_knl_cstates; | ||
| 55 | unsigned int do_pc2; | 56 | unsigned int do_pc2; |
| 56 | unsigned int do_pc3; | 57 | unsigned int do_pc3; |
| 57 | unsigned int do_pc6; | 58 | unsigned int do_pc6; |
| @@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons; | |||
| 91 | unsigned int do_ring_perf_limit_reasons; | 92 | unsigned int do_ring_perf_limit_reasons; |
| 92 | unsigned int crystal_hz; | 93 | unsigned int crystal_hz; |
| 93 | unsigned long long tsc_hz; | 94 | unsigned long long tsc_hz; |
| 95 | int base_cpu; | ||
| 94 | 96 | ||
| 95 | #define RAPL_PKG (1 << 0) | 97 | #define RAPL_PKG (1 << 0) |
| 96 | /* 0x610 MSR_PKG_POWER_LIMIT */ | 98 | /* 0x610 MSR_PKG_POWER_LIMIT */ |
| @@ -316,7 +318,7 @@ void print_header(void) | |||
| 316 | 318 | ||
| 317 | if (do_nhm_cstates) | 319 | if (do_nhm_cstates) |
| 318 | outp += sprintf(outp, " CPU%%c1"); | 320 | outp += sprintf(outp, " CPU%%c1"); |
| 319 | if (do_nhm_cstates && !do_slm_cstates) | 321 | if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) |
| 320 | outp += sprintf(outp, " CPU%%c3"); | 322 | outp += sprintf(outp, " CPU%%c3"); |
| 321 | if (do_nhm_cstates) | 323 | if (do_nhm_cstates) |
| 322 | outp += sprintf(outp, " CPU%%c6"); | 324 | outp += sprintf(outp, " CPU%%c6"); |
| @@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 546 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 548 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 547 | goto done; | 549 | goto done; |
| 548 | 550 | ||
| 549 | if (do_nhm_cstates && !do_slm_cstates) | 551 | if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) |
| 550 | outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); | 552 | outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); |
| 551 | if (do_nhm_cstates) | 553 | if (do_nhm_cstates) |
| 552 | outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); | 554 | outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); |
| @@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 1018 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 1020 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 1019 | return 0; | 1021 | return 0; |
| 1020 | 1022 | ||
| 1021 | if (do_nhm_cstates && !do_slm_cstates) { | 1023 | if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { |
| 1022 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) | 1024 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) |
| 1023 | return -6; | 1025 | return -6; |
| 1024 | } | 1026 | } |
| 1025 | 1027 | ||
| 1026 | if (do_nhm_cstates) { | 1028 | if (do_nhm_cstates && !do_knl_cstates) { |
| 1027 | if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) | 1029 | if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) |
| 1028 | return -7; | 1030 | return -7; |
| 1031 | } else if (do_knl_cstates) { | ||
| 1032 | if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) | ||
| 1033 | return -7; | ||
| 1029 | } | 1034 | } |
| 1030 | 1035 | ||
| 1031 | if (do_snb_cstates) | 1036 | if (do_snb_cstates) |
| @@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void) | |||
| 1150 | unsigned long long msr; | 1155 | unsigned long long msr; |
| 1151 | unsigned int ratio; | 1156 | unsigned int ratio; |
| 1152 | 1157 | ||
| 1153 | get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); | 1158 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); |
| 1154 | 1159 | ||
| 1155 | fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); | 1160 | fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); |
| 1156 | 1161 | ||
| @@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void) | |||
| 1162 | fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", | 1167 | fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", |
| 1163 | ratio, bclk, ratio * bclk); | 1168 | ratio, bclk, ratio * bclk); |
| 1164 | 1169 | ||
| 1165 | get_msr(0, MSR_IA32_POWER_CTL, &msr); | 1170 | get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); |
| 1166 | fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", | 1171 | fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", |
| 1167 | msr, msr & 0x2 ? "EN" : "DIS"); | 1172 | msr, msr & 0x2 ? "EN" : "DIS"); |
| 1168 | 1173 | ||
| @@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void) | |||
| 1175 | unsigned long long msr; | 1180 | unsigned long long msr; |
| 1176 | unsigned int ratio; | 1181 | unsigned int ratio; |
| 1177 | 1182 | ||
| 1178 | get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr); | 1183 | get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); |
| 1179 | 1184 | ||
| 1180 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); | 1185 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); |
| 1181 | 1186 | ||
| @@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void) | |||
| 1197 | unsigned long long msr; | 1202 | unsigned long long msr; |
| 1198 | unsigned int ratio; | 1203 | unsigned int ratio; |
| 1199 | 1204 | ||
| 1200 | get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr); | 1205 | get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); |
| 1201 | 1206 | ||
| 1202 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); | 1207 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); |
| 1203 | 1208 | ||
| @@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void) | |||
| 1249 | unsigned long long msr; | 1254 | unsigned long long msr; |
| 1250 | unsigned int ratio; | 1255 | unsigned int ratio; |
| 1251 | 1256 | ||
| 1252 | get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr); | 1257 | get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); |
| 1253 | 1258 | ||
| 1254 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); | 1259 | fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); |
| 1255 | 1260 | ||
| @@ -1296,11 +1301,72 @@ dump_nhm_turbo_ratio_limits(void) | |||
| 1296 | } | 1301 | } |
| 1297 | 1302 | ||
| 1298 | static void | 1303 | static void |
| 1304 | dump_knl_turbo_ratio_limits(void) | ||
| 1305 | { | ||
| 1306 | int cores; | ||
| 1307 | unsigned int ratio; | ||
| 1308 | unsigned long long msr; | ||
| 1309 | int delta_cores; | ||
| 1310 | int delta_ratio; | ||
| 1311 | int i; | ||
| 1312 | |||
| 1313 | get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); | ||
| 1314 | |||
| 1315 | fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", | ||
| 1316 | msr); | ||
| 1317 | |||
| 1318 | /** | ||
| 1319 | * Turbo encoding in KNL is as follows: | ||
| 1320 | * [7:0] -- Base value of number of active cores of bucket 1. | ||
| 1321 | * [15:8] -- Base value of freq ratio of bucket 1. | ||
| 1322 | * [20:16] -- +ve delta of number of active cores of bucket 2. | ||
| 1323 | * i.e. active cores of bucket 2 = | ||
| 1324 | * active cores of bucket 1 + delta | ||
| 1325 | * [23:21] -- Negative delta of freq ratio of bucket 2. | ||
| 1326 | * i.e. freq ratio of bucket 2 = | ||
| 1327 | * freq ratio of bucket 1 - delta | ||
| 1328 | * [28:24]-- +ve delta of number of active cores of bucket 3. | ||
| 1329 | * [31:29]-- -ve delta of freq ratio of bucket 3. | ||
| 1330 | * [36:32]-- +ve delta of number of active cores of bucket 4. | ||
| 1331 | * [39:37]-- -ve delta of freq ratio of bucket 4. | ||
| 1332 | * [44:40]-- +ve delta of number of active cores of bucket 5. | ||
| 1333 | * [47:45]-- -ve delta of freq ratio of bucket 5. | ||
| 1334 | * [52:48]-- +ve delta of number of active cores of bucket 6. | ||
| 1335 | * [55:53]-- -ve delta of freq ratio of bucket 6. | ||
| 1336 | * [60:56]-- +ve delta of number of active cores of bucket 7. | ||
| 1337 | * [63:61]-- -ve delta of freq ratio of bucket 7. | ||
| 1338 | */ | ||
| 1339 | cores = msr & 0xFF; | ||
| 1340 | ratio = (msr >> 8) && 0xFF; | ||
| 1341 | if (ratio > 0) | ||
| 1342 | fprintf(stderr, | ||
| 1343 | "%d * %.0f = %.0f MHz max turbo %d active cores\n", | ||
| 1344 | ratio, bclk, ratio * bclk, cores); | ||
| 1345 | |||
| 1346 | for (i = 16; i < 64; i = i + 8) { | ||
| 1347 | delta_cores = (msr >> i) & 0x1F; | ||
| 1348 | delta_ratio = (msr >> (i + 5)) && 0x7; | ||
| 1349 | if (!delta_cores || !delta_ratio) | ||
| 1350 | return; | ||
| 1351 | cores = cores + delta_cores; | ||
| 1352 | ratio = ratio - delta_ratio; | ||
| 1353 | |||
| 1354 | /** -ve ratios will make successive ratio calculations | ||
| 1355 | * negative. Hence return instead of carrying on. | ||
| 1356 | */ | ||
| 1357 | if (ratio > 0) | ||
| 1358 | fprintf(stderr, | ||
| 1359 | "%d * %.0f = %.0f MHz max turbo %d active cores\n", | ||
| 1360 | ratio, bclk, ratio * bclk, cores); | ||
| 1361 | } | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | static void | ||
| 1299 | dump_nhm_cst_cfg(void) | 1365 | dump_nhm_cst_cfg(void) |
| 1300 | { | 1366 | { |
| 1301 | unsigned long long msr; | 1367 | unsigned long long msr; |
| 1302 | 1368 | ||
| 1303 | get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); | 1369 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); |
| 1304 | 1370 | ||
| 1305 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) | 1371 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
| 1306 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) | 1372 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
| @@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...) | |||
| 1381 | } | 1447 | } |
| 1382 | 1448 | ||
| 1383 | /* | 1449 | /* |
| 1384 | * cpu_is_first_sibling_in_core(cpu) | 1450 | * get_cpu_position_in_core(cpu) |
| 1385 | * return 1 if given CPU is 1st HT sibling in the core | 1451 | * return the position of the CPU among its HT siblings in the core |
| 1452 | * return -1 if the sibling is not in list | ||
| 1386 | */ | 1453 | */ |
| 1387 | int cpu_is_first_sibling_in_core(int cpu) | 1454 | int get_cpu_position_in_core(int cpu) |
| 1388 | { | 1455 | { |
| 1389 | return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); | 1456 | char path[64]; |
| 1457 | FILE *filep; | ||
| 1458 | int this_cpu; | ||
| 1459 | char character; | ||
| 1460 | int i; | ||
| 1461 | |||
| 1462 | sprintf(path, | ||
| 1463 | "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", | ||
| 1464 | cpu); | ||
| 1465 | filep = fopen(path, "r"); | ||
| 1466 | if (filep == NULL) { | ||
| 1467 | perror(path); | ||
| 1468 | exit(1); | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | for (i = 0; i < topo.num_threads_per_core; i++) { | ||
| 1472 | fscanf(filep, "%d", &this_cpu); | ||
| 1473 | if (this_cpu == cpu) { | ||
| 1474 | fclose(filep); | ||
| 1475 | return i; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | /* Account for no separator after last thread*/ | ||
| 1479 | if (i != (topo.num_threads_per_core - 1)) | ||
| 1480 | fscanf(filep, "%c", &character); | ||
| 1481 | } | ||
| 1482 | |||
| 1483 | fclose(filep); | ||
| 1484 | return -1; | ||
| 1390 | } | 1485 | } |
| 1391 | 1486 | ||
| 1392 | /* | 1487 | /* |
| @@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu) | |||
| 1412 | { | 1507 | { |
| 1413 | char path[80]; | 1508 | char path[80]; |
| 1414 | FILE *filep; | 1509 | FILE *filep; |
| 1415 | int sib1, sib2; | 1510 | int sib1; |
| 1416 | int matches; | 1511 | int matches = 0; |
| 1417 | char character; | 1512 | char character; |
| 1513 | char str[100]; | ||
| 1514 | char *ch; | ||
| 1418 | 1515 | ||
| 1419 | sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); | 1516 | sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); |
| 1420 | filep = fopen_or_die(path, "r"); | 1517 | filep = fopen_or_die(path, "r"); |
| 1518 | |||
| 1421 | /* | 1519 | /* |
| 1422 | * file format: | 1520 | * file format: |
| 1423 | * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) | 1521 | * A ',' separated or '-' separated set of numbers |
| 1424 | * otherwinse 1 sibling (self). | 1522 | * (eg 1-2 or 1,3,4,5) |
| 1425 | */ | 1523 | */ |
| 1426 | matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); | 1524 | fscanf(filep, "%d%c\n", &sib1, &character); |
| 1525 | fseek(filep, 0, SEEK_SET); | ||
| 1526 | fgets(str, 100, filep); | ||
| 1527 | ch = strchr(str, character); | ||
| 1528 | while (ch != NULL) { | ||
| 1529 | matches++; | ||
| 1530 | ch = strchr(ch+1, character); | ||
| 1531 | } | ||
| 1427 | 1532 | ||
| 1428 | fclose(filep); | 1533 | fclose(filep); |
| 1429 | 1534 | return matches+1; | |
| 1430 | if (matches == 3) | ||
| 1431 | return 2; | ||
| 1432 | else | ||
| 1433 | return 1; | ||
| 1434 | } | 1535 | } |
| 1435 | 1536 | ||
| 1436 | /* | 1537 | /* |
| @@ -1594,8 +1695,10 @@ restart: | |||
| 1594 | void check_dev_msr() | 1695 | void check_dev_msr() |
| 1595 | { | 1696 | { |
| 1596 | struct stat sb; | 1697 | struct stat sb; |
| 1698 | char pathname[32]; | ||
| 1597 | 1699 | ||
| 1598 | if (stat("/dev/cpu/0/msr", &sb)) | 1700 | sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); |
| 1701 | if (stat(pathname, &sb)) | ||
| 1599 | if (system("/sbin/modprobe msr > /dev/null 2>&1")) | 1702 | if (system("/sbin/modprobe msr > /dev/null 2>&1")) |
| 1600 | err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); | 1703 | err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); |
| 1601 | } | 1704 | } |
| @@ -1608,6 +1711,7 @@ void check_permissions() | |||
| 1608 | cap_user_data_t cap_data = &cap_data_data; | 1711 | cap_user_data_t cap_data = &cap_data_data; |
| 1609 | extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); | 1712 | extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); |
| 1610 | int do_exit = 0; | 1713 | int do_exit = 0; |
| 1714 | char pathname[32]; | ||
| 1611 | 1715 | ||
| 1612 | /* check for CAP_SYS_RAWIO */ | 1716 | /* check for CAP_SYS_RAWIO */ |
| 1613 | cap_header->pid = getpid(); | 1717 | cap_header->pid = getpid(); |
| @@ -1622,7 +1726,8 @@ void check_permissions() | |||
| 1622 | } | 1726 | } |
| 1623 | 1727 | ||
| 1624 | /* test file permissions */ | 1728 | /* test file permissions */ |
| 1625 | if (euidaccess("/dev/cpu/0/msr", R_OK)) { | 1729 | sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); |
| 1730 | if (euidaccess(pathname, R_OK)) { | ||
| 1626 | do_exit++; | 1731 | do_exit++; |
| 1627 | warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); | 1732 | warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); |
| 1628 | } | 1733 | } |
| @@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) | |||
| 1704 | default: | 1809 | default: |
| 1705 | return 0; | 1810 | return 0; |
| 1706 | } | 1811 | } |
| 1707 | get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); | 1812 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); |
| 1708 | 1813 | ||
| 1709 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; | 1814 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; |
| 1710 | 1815 | ||
| @@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) | |||
| 1753 | } | 1858 | } |
| 1754 | } | 1859 | } |
| 1755 | 1860 | ||
| 1861 | int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) | ||
| 1862 | { | ||
| 1863 | if (!genuine_intel) | ||
| 1864 | return 0; | ||
| 1865 | |||
| 1866 | if (family != 6) | ||
| 1867 | return 0; | ||
| 1868 | |||
| 1869 | switch (model) { | ||
| 1870 | case 0x57: /* Knights Landing */ | ||
| 1871 | return 1; | ||
| 1872 | default: | ||
| 1873 | return 0; | ||
| 1874 | } | ||
| 1875 | } | ||
| 1756 | static void | 1876 | static void |
| 1757 | dump_cstate_pstate_config_info(family, model) | 1877 | dump_cstate_pstate_config_info(family, model) |
| 1758 | { | 1878 | { |
| @@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model) | |||
| 1770 | if (has_nhm_turbo_ratio_limit(family, model)) | 1890 | if (has_nhm_turbo_ratio_limit(family, model)) |
| 1771 | dump_nhm_turbo_ratio_limits(); | 1891 | dump_nhm_turbo_ratio_limits(); |
| 1772 | 1892 | ||
| 1893 | if (has_knl_turbo_ratio_limit(family, model)) | ||
| 1894 | dump_knl_turbo_ratio_limits(); | ||
| 1895 | |||
| 1773 | dump_nhm_cst_cfg(); | 1896 | dump_nhm_cst_cfg(); |
| 1774 | } | 1897 | } |
| 1775 | 1898 | ||
| @@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 1801 | if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) | 1924 | if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) |
| 1802 | return 0; | 1925 | return 0; |
| 1803 | 1926 | ||
| 1804 | switch (msr & 0x7) { | 1927 | switch (msr & 0xF) { |
| 1805 | case ENERGY_PERF_BIAS_PERFORMANCE: | 1928 | case ENERGY_PERF_BIAS_PERFORMANCE: |
| 1806 | epb_string = "performance"; | 1929 | epb_string = "performance"; |
| 1807 | break; | 1930 | break; |
| @@ -1925,7 +2048,7 @@ double get_tdp(model) | |||
| 1925 | unsigned long long msr; | 2048 | unsigned long long msr; |
| 1926 | 2049 | ||
| 1927 | if (do_rapl & RAPL_PKG_POWER_INFO) | 2050 | if (do_rapl & RAPL_PKG_POWER_INFO) |
| 1928 | if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) | 2051 | if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) |
| 1929 | return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; | 2052 | return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; |
| 1930 | 2053 | ||
| 1931 | switch (model) { | 2054 | switch (model) { |
| @@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) | |||
| 1950 | case 0x3F: /* HSX */ | 2073 | case 0x3F: /* HSX */ |
| 1951 | case 0x4F: /* BDX */ | 2074 | case 0x4F: /* BDX */ |
| 1952 | case 0x56: /* BDX-DE */ | 2075 | case 0x56: /* BDX-DE */ |
| 2076 | case 0x57: /* KNL */ | ||
| 1953 | return (rapl_dram_energy_units = 15.3 / 1000000); | 2077 | return (rapl_dram_energy_units = 15.3 / 1000000); |
| 1954 | default: | 2078 | default: |
| 1955 | return (rapl_energy_units); | 2079 | return (rapl_energy_units); |
| @@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model) | |||
| 1991 | case 0x3F: /* HSX */ | 2115 | case 0x3F: /* HSX */ |
| 1992 | case 0x4F: /* BDX */ | 2116 | case 0x4F: /* BDX */ |
| 1993 | case 0x56: /* BDX-DE */ | 2117 | case 0x56: /* BDX-DE */ |
| 2118 | case 0x57: /* KNL */ | ||
| 1994 | do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; | 2119 | do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; |
| 1995 | break; | 2120 | break; |
| 1996 | case 0x2D: | 2121 | case 0x2D: |
| @@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model) | |||
| 2006 | } | 2131 | } |
| 2007 | 2132 | ||
| 2008 | /* units on package 0, verify later other packages match */ | 2133 | /* units on package 0, verify later other packages match */ |
| 2009 | if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) | 2134 | if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) |
| 2010 | return; | 2135 | return; |
| 2011 | 2136 | ||
| 2012 | rapl_power_units = 1.0 / (1 << (msr & 0xF)); | 2137 | rapl_power_units = 1.0 / (1 << (msr & 0xF)); |
| @@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model) | |||
| 2331 | return 0; | 2456 | return 0; |
| 2332 | } | 2457 | } |
| 2333 | 2458 | ||
| 2459 | int is_knl(unsigned int family, unsigned int model) | ||
| 2460 | { | ||
| 2461 | if (!genuine_intel) | ||
| 2462 | return 0; | ||
| 2463 | switch (model) { | ||
| 2464 | case 0x57: /* KNL */ | ||
| 2465 | return 1; | ||
| 2466 | } | ||
| 2467 | return 0; | ||
| 2468 | } | ||
| 2469 | |||
| 2334 | #define SLM_BCLK_FREQS 5 | 2470 | #define SLM_BCLK_FREQS 5 |
| 2335 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; | 2471 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; |
| 2336 | 2472 | ||
| @@ -2340,7 +2476,7 @@ double slm_bclk(void) | |||
| 2340 | unsigned int i; | 2476 | unsigned int i; |
| 2341 | double freq; | 2477 | double freq; |
| 2342 | 2478 | ||
| 2343 | if (get_msr(0, MSR_FSB_FREQ, &msr)) | 2479 | if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) |
| 2344 | fprintf(stderr, "SLM BCLK: unknown\n"); | 2480 | fprintf(stderr, "SLM BCLK: unknown\n"); |
| 2345 | 2481 | ||
| 2346 | i = msr & 0xf; | 2482 | i = msr & 0xf; |
| @@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk | |||
| 2408 | if (!do_nhm_platform_info) | 2544 | if (!do_nhm_platform_info) |
| 2409 | goto guess; | 2545 | goto guess; |
| 2410 | 2546 | ||
| 2411 | if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) | 2547 | if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) |
| 2412 | goto guess; | 2548 | goto guess; |
| 2413 | 2549 | ||
| 2414 | target_c_local = (msr >> 16) & 0xFF; | 2550 | target_c_local = (msr >> 16) & 0xFF; |
| @@ -2541,6 +2677,7 @@ void process_cpuid() | |||
| 2541 | do_c8_c9_c10 = has_hsw_msrs(family, model); | 2677 | do_c8_c9_c10 = has_hsw_msrs(family, model); |
| 2542 | do_skl_residency = has_skl_msrs(family, model); | 2678 | do_skl_residency = has_skl_msrs(family, model); |
| 2543 | do_slm_cstates = is_slm(family, model); | 2679 | do_slm_cstates = is_slm(family, model); |
| 2680 | do_knl_cstates = is_knl(family, model); | ||
| 2544 | bclk = discover_bclk(family, model); | 2681 | bclk = discover_bclk(family, model); |
| 2545 | 2682 | ||
| 2546 | rapl_probe(family, model); | 2683 | rapl_probe(family, model); |
| @@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id) | |||
| 2755 | 2892 | ||
| 2756 | my_package_id = get_physical_package_id(cpu_id); | 2893 | my_package_id = get_physical_package_id(cpu_id); |
| 2757 | my_core_id = get_core_id(cpu_id); | 2894 | my_core_id = get_core_id(cpu_id); |
| 2758 | 2895 | my_thread_id = get_cpu_position_in_core(cpu_id); | |
| 2759 | if (cpu_is_first_sibling_in_core(cpu_id)) { | 2896 | if (!my_thread_id) |
| 2760 | my_thread_id = 0; | ||
| 2761 | topo.num_cores++; | 2897 | topo.num_cores++; |
| 2762 | } else { | ||
| 2763 | my_thread_id = 1; | ||
| 2764 | } | ||
| 2765 | 2898 | ||
| 2766 | init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); | 2899 | init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); |
| 2767 | init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); | 2900 | init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); |
| @@ -2785,13 +2918,24 @@ void setup_all_buffers(void) | |||
| 2785 | for_all_proc_cpus(initialize_counters); | 2918 | for_all_proc_cpus(initialize_counters); |
| 2786 | } | 2919 | } |
| 2787 | 2920 | ||
| 2921 | void set_base_cpu(void) | ||
| 2922 | { | ||
| 2923 | base_cpu = sched_getcpu(); | ||
| 2924 | if (base_cpu < 0) | ||
| 2925 | err(-ENODEV, "No valid cpus found"); | ||
| 2926 | |||
| 2927 | if (debug > 1) | ||
| 2928 | fprintf(stderr, "base_cpu = %d\n", base_cpu); | ||
| 2929 | } | ||
| 2930 | |||
| 2788 | void turbostat_init() | 2931 | void turbostat_init() |
| 2789 | { | 2932 | { |
| 2933 | setup_all_buffers(); | ||
| 2934 | set_base_cpu(); | ||
| 2790 | check_dev_msr(); | 2935 | check_dev_msr(); |
| 2791 | check_permissions(); | 2936 | check_permissions(); |
| 2792 | process_cpuid(); | 2937 | process_cpuid(); |
| 2793 | 2938 | ||
| 2794 | setup_all_buffers(); | ||
| 2795 | 2939 | ||
| 2796 | if (debug) | 2940 | if (debug) |
| 2797 | for_all_cpus(print_epb, ODD_COUNTERS); | 2941 | for_all_cpus(print_epb, ODD_COUNTERS); |
| @@ -2870,7 +3014,7 @@ int get_and_dump_counters(void) | |||
| 2870 | } | 3014 | } |
| 2871 | 3015 | ||
| 2872 | void print_version() { | 3016 | void print_version() { |
| 2873 | fprintf(stderr, "turbostat version 4.5 2 Apr, 2015" | 3017 | fprintf(stderr, "turbostat version 4.7 27-May, 2015" |
| 2874 | " - Len Brown <lenb@kernel.org>\n"); | 3018 | " - Len Brown <lenb@kernel.org>\n"); |
| 2875 | } | 3019 | } |
| 2876 | 3020 | ||
