diff options
149 files changed, 1378 insertions, 496 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 20bb1d00098c..a306795a7450 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1004,6 +1004,7 @@ N: meson | |||
1004 | ARM/Annapurna Labs ALPINE ARCHITECTURE | 1004 | ARM/Annapurna Labs ALPINE ARCHITECTURE |
1005 | M: Tsahee Zidenberg <tsahee@annapurnalabs.com> | 1005 | M: Tsahee Zidenberg <tsahee@annapurnalabs.com> |
1006 | M: Antoine Tenart <antoine.tenart@free-electrons.com> | 1006 | M: Antoine Tenart <antoine.tenart@free-electrons.com> |
1007 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1007 | S: Maintained | 1008 | S: Maintained |
1008 | F: arch/arm/mach-alpine/ | 1009 | F: arch/arm/mach-alpine/ |
1009 | F: arch/arm/boot/dts/alpine* | 1010 | F: arch/arm/boot/dts/alpine* |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 56ea5c60b318..61f6ccc19cfa 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -260,12 +260,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y)) | |||
260 | platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) | 260 | platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) |
261 | 261 | ||
262 | ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) | 262 | ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) |
263 | ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y) | ||
263 | ifeq ($(KBUILD_SRC),) | 264 | ifeq ($(KBUILD_SRC),) |
264 | KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) | 265 | KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) |
265 | else | 266 | else |
266 | KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) | 267 | KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) |
267 | endif | 268 | endif |
268 | endif | 269 | endif |
270 | endif | ||
269 | 271 | ||
270 | export TEXT_OFFSET GZFLAGS MMUEXT | 272 | export TEXT_OFFSET GZFLAGS MMUEXT |
271 | 273 | ||
diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts index db808f92dd79..90d00b407f85 100644 --- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts +++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts | |||
@@ -70,13 +70,12 @@ | |||
70 | * associativity as these may be erroneously set | 70 | * associativity as these may be erroneously set |
71 | * up by boot loader(s). | 71 | * up by boot loader(s). |
72 | */ | 72 | */ |
73 | cache-size = <1048576>; // 1MB | 73 | cache-size = <131072>; // 128KB |
74 | cache-sets = <4096>; | 74 | cache-sets = <512>; |
75 | cache-line-size = <32>; | 75 | cache-line-size = <32>; |
76 | arm,parity-disable; | 76 | arm,parity-disable; |
77 | arm,tag-latency = <1>; | 77 | arm,tag-latency = <1 1 1>; |
78 | arm,data-latency = <1 1>; | 78 | arm,data-latency = <1 1 1>; |
79 | arm,dirty-latency = <1>; | ||
80 | }; | 79 | }; |
81 | 80 | ||
82 | scu: scu@1f000000 { | 81 | scu: scu@1f000000 { |
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts index cf06e32ee108..4b34b54e09a1 100644 --- a/arch/arm/boot/dts/integratorap.dts +++ b/arch/arm/boot/dts/integratorap.dts | |||
@@ -42,7 +42,7 @@ | |||
42 | }; | 42 | }; |
43 | 43 | ||
44 | syscon { | 44 | syscon { |
45 | compatible = "arm,integrator-ap-syscon"; | 45 | compatible = "arm,integrator-ap-syscon", "syscon"; |
46 | reg = <0x11000000 0x100>; | 46 | reg = <0x11000000 0x100>; |
47 | interrupt-parent = <&pic>; | 47 | interrupt-parent = <&pic>; |
48 | /* These are the logical module IRQs */ | 48 | /* These are the logical module IRQs */ |
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts index d43f15b4f79a..79430fbfec3b 100644 --- a/arch/arm/boot/dts/integratorcp.dts +++ b/arch/arm/boot/dts/integratorcp.dts | |||
@@ -94,7 +94,7 @@ | |||
94 | }; | 94 | }; |
95 | 95 | ||
96 | syscon { | 96 | syscon { |
97 | compatible = "arm,integrator-cp-syscon"; | 97 | compatible = "arm,integrator-cp-syscon", "syscon"; |
98 | reg = <0xcb000000 0x100>; | 98 | reg = <0xcb000000 0x100>; |
99 | }; | 99 | }; |
100 | 100 | ||
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi index 00cb314d5e4d..e23f46d15c80 100644 --- a/arch/arm/boot/dts/keystone.dtsi +++ b/arch/arm/boot/dts/keystone.dtsi | |||
@@ -70,14 +70,6 @@ | |||
70 | cpu_on = <0x84000003>; | 70 | cpu_on = <0x84000003>; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | psci { | ||
74 | compatible = "arm,psci"; | ||
75 | method = "smc"; | ||
76 | cpu_suspend = <0x84000001>; | ||
77 | cpu_off = <0x84000002>; | ||
78 | cpu_on = <0x84000003>; | ||
79 | }; | ||
80 | |||
81 | soc { | 73 | soc { |
82 | #address-cells = <1>; | 74 | #address-cells = <1>; |
83 | #size-cells = <1>; | 75 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index e52b82449a79..6403e0de540e 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts | |||
@@ -1382,7 +1382,7 @@ | |||
1382 | * Pin 41: BR_UART1_TXD | 1382 | * Pin 41: BR_UART1_TXD |
1383 | * Pin 44: BR_UART1_RXD | 1383 | * Pin 44: BR_UART1_RXD |
1384 | */ | 1384 | */ |
1385 | serial@70006000 { | 1385 | serial@0,70006000 { |
1386 | compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; | 1386 | compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; |
1387 | status = "okay"; | 1387 | status = "okay"; |
1388 | }; | 1388 | }; |
@@ -1394,7 +1394,7 @@ | |||
1394 | * Pin 71: UART2_CTS_L | 1394 | * Pin 71: UART2_CTS_L |
1395 | * Pin 74: UART2_RTS_L | 1395 | * Pin 74: UART2_RTS_L |
1396 | */ | 1396 | */ |
1397 | serial@70006040 { | 1397 | serial@0,70006040 { |
1398 | compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; | 1398 | compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; |
1399 | status = "okay"; | 1399 | status = "okay"; |
1400 | }; | 1400 | }; |
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig index b6e54ee9bdbd..ca39c04fec6b 100644 --- a/arch/arm/configs/aspeed_g4_defconfig +++ b/arch/arm/configs/aspeed_g4_defconfig | |||
@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y | |||
58 | # CONFIG_IOMMU_SUPPORT is not set | 58 | # CONFIG_IOMMU_SUPPORT is not set |
59 | CONFIG_FIRMWARE_MEMMAP=y | 59 | CONFIG_FIRMWARE_MEMMAP=y |
60 | CONFIG_FANOTIFY=y | 60 | CONFIG_FANOTIFY=y |
61 | CONFIG_PRINTK_TIME=1 | 61 | CONFIG_PRINTK_TIME=y |
62 | CONFIG_DYNAMIC_DEBUG=y | 62 | CONFIG_DYNAMIC_DEBUG=y |
63 | CONFIG_STRIP_ASM_SYMS=y | 63 | CONFIG_STRIP_ASM_SYMS=y |
64 | CONFIG_PAGE_POISONING=y | 64 | CONFIG_PAGE_POISONING=y |
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig index 892605167357..4f366b0370e9 100644 --- a/arch/arm/configs/aspeed_g5_defconfig +++ b/arch/arm/configs/aspeed_g5_defconfig | |||
@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y | |||
59 | # CONFIG_IOMMU_SUPPORT is not set | 59 | # CONFIG_IOMMU_SUPPORT is not set |
60 | CONFIG_FIRMWARE_MEMMAP=y | 60 | CONFIG_FIRMWARE_MEMMAP=y |
61 | CONFIG_FANOTIFY=y | 61 | CONFIG_FANOTIFY=y |
62 | CONFIG_PRINTK_TIME=1 | 62 | CONFIG_PRINTK_TIME=y |
63 | CONFIG_DYNAMIC_DEBUG=y | 63 | CONFIG_DYNAMIC_DEBUG=y |
64 | CONFIG_STRIP_ASM_SYMS=y | 64 | CONFIG_STRIP_ASM_SYMS=y |
65 | CONFIG_PAGE_POISONING=y | 65 | CONFIG_PAGE_POISONING=y |
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 087acb569b63..5f221acd21ae 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c | |||
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, | |||
279 | mm_segment_t fs; | 279 | mm_segment_t fs; |
280 | long ret, err, i; | 280 | long ret, err, i; |
281 | 281 | ||
282 | if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) | 282 | if (maxevents <= 0 || |
283 | maxevents > (INT_MAX/sizeof(*kbuf)) || | ||
284 | maxevents > (INT_MAX/sizeof(*events))) | ||
283 | return -EINVAL; | 285 | return -EINVAL; |
286 | if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) | ||
287 | return -EFAULT; | ||
284 | kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); | 288 | kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); |
285 | if (!kbuf) | 289 | if (!kbuf) |
286 | return -ENOMEM; | 290 | return -ENOMEM; |
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid, | |||
317 | 321 | ||
318 | if (nsops < 1 || nsops > SEMOPM) | 322 | if (nsops < 1 || nsops > SEMOPM) |
319 | return -EINVAL; | 323 | return -EINVAL; |
324 | if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) | ||
325 | return -EFAULT; | ||
320 | sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); | 326 | sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); |
321 | if (!sops) | 327 | if (!sops) |
322 | return -ENOMEM; | 328 | return -ENOMEM; |
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig index dc7c6edeab39..61284b9389cf 100644 --- a/arch/arm/mach-clps711x/Kconfig +++ b/arch/arm/mach-clps711x/Kconfig | |||
@@ -1,13 +1,13 @@ | |||
1 | menuconfig ARCH_CLPS711X | 1 | menuconfig ARCH_CLPS711X |
2 | bool "Cirrus Logic EP721x/EP731x-based" | 2 | bool "Cirrus Logic EP721x/EP731x-based" |
3 | depends on ARCH_MULTI_V4T | 3 | depends on ARCH_MULTI_V4T |
4 | select ARCH_REQUIRE_GPIOLIB | ||
5 | select AUTO_ZRELADDR | 4 | select AUTO_ZRELADDR |
6 | select CLKSRC_OF | 5 | select CLKSRC_OF |
7 | select CLPS711X_TIMER | 6 | select CLPS711X_TIMER |
8 | select COMMON_CLK | 7 | select COMMON_CLK |
9 | select CPU_ARM720T | 8 | select CPU_ARM720T |
10 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select GPIOLIB | ||
11 | select MFD_SYSCON | 11 | select MFD_SYSCON |
12 | select OF_IRQ | 12 | select OF_IRQ |
13 | select USE_OF | 13 | select USE_OF |
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile index e53c6cfcab51..6c6497e80a7b 100644 --- a/arch/arm/mach-mvebu/Makefile +++ b/arch/arm/mach-mvebu/Makefile | |||
@@ -1,5 +1,4 @@ | |||
1 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ | 1 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include |
2 | -I$(srctree)/arch/arm/plat-orion/include | ||
3 | 2 | ||
4 | AFLAGS_coherency_ll.o := -Wa,-march=armv7-a | 3 | AFLAGS_coherency_ll.o := -Wa,-march=armv7-a |
5 | CFLAGS_pmsu.o := -march=armv7-a | 4 | CFLAGS_pmsu.o := -march=armv7-a |
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig index 567496bd250a..29100beb2e7f 100644 --- a/arch/arm/mach-oxnas/Kconfig +++ b/arch/arm/mach-oxnas/Kconfig | |||
@@ -11,11 +11,13 @@ if ARCH_OXNAS | |||
11 | 11 | ||
12 | config MACH_OX810SE | 12 | config MACH_OX810SE |
13 | bool "Support OX810SE Based Products" | 13 | bool "Support OX810SE Based Products" |
14 | select ARCH_HAS_RESET_CONTROLLER | ||
14 | select COMMON_CLK_OXNAS | 15 | select COMMON_CLK_OXNAS |
15 | select CPU_ARM926T | 16 | select CPU_ARM926T |
16 | select MFD_SYSCON | 17 | select MFD_SYSCON |
17 | select OXNAS_RPS_TIMER | 18 | select OXNAS_RPS_TIMER |
18 | select PINCTRL_OXNAS | 19 | select PINCTRL_OXNAS |
20 | select RESET_CONTROLLER | ||
19 | select RESET_OXNAS | 21 | select RESET_OXNAS |
20 | select VERSATILE_FPGA_IRQ | 22 | select VERSATILE_FPGA_IRQ |
21 | help | 23 | help |
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index dc109dc3a622..10bfdb169366 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> /* symbol_get ; symbol_put */ | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
18 | #include <linux/major.h> | 19 | #include <linux/major.h> |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 1080580b1343..2c150bfc0cd5 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> /* symbol_get ; symbol_put */ | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
18 | #include <linux/gpio_keys.h> | 19 | #include <linux/gpio_keys.h> |
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile index dae8d86ef4cc..404882130956 100644 --- a/arch/arm/mach-realview/Makefile +++ b/arch/arm/mach-realview/Makefile | |||
@@ -1,8 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ | 4 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include |
5 | -I$(srctree)/arch/arm/plat-versatile/include | ||
6 | 5 | ||
7 | obj-y := core.o | 6 | obj-y := core.o |
8 | obj-$(CONFIG_REALVIEW_DT) += realview-dt.o | 7 | obj-$(CONFIG_REALVIEW_DT) += realview-dt.o |
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile index 72b9e9671507..fa7fb716e388 100644 --- a/arch/arm/mach-s5pv210/Makefile +++ b/arch/arm/mach-s5pv210/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | # | 5 | # |
6 | # Licensed under GPLv2 | 6 | # Licensed under GPLv2 |
7 | 7 | ||
8 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include | 8 | ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include |
9 | 9 | ||
10 | # Core | 10 | # Core |
11 | 11 | ||
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3dba6f356e2..02e21bceb085 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c | |||
@@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu) | |||
40 | bool __init shmobile_smp_init_fallback_ops(void) | 40 | bool __init shmobile_smp_init_fallback_ops(void) |
41 | { | 41 | { |
42 | /* fallback on PSCI/smp_ops if no other DT based method is detected */ | 42 | /* fallback on PSCI/smp_ops if no other DT based method is detected */ |
43 | if (!IS_ENABLED(CONFIG_SMP)) | ||
44 | return false; | ||
45 | |||
43 | return platform_can_secondary_boot() ? true : false; | 46 | return platform_can_secondary_boot() ? true : false; |
44 | } | 47 | } |
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index bb2616b16157..be5d824ebdba 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms | |||
@@ -8,7 +8,7 @@ config ARCH_SUNXI | |||
8 | 8 | ||
9 | config ARCH_ALPINE | 9 | config ARCH_ALPINE |
10 | bool "Annapurna Labs Alpine platform" | 10 | bool "Annapurna Labs Alpine platform" |
11 | select ALPINE_MSI | 11 | select ALPINE_MSI if PCI |
12 | help | 12 | help |
13 | This enables support for the Annapurna Labs Alpine | 13 | This enables support for the Annapurna Labs Alpine |
14 | Soc family. | 14 | Soc family. |
@@ -66,7 +66,7 @@ config ARCH_LG1K | |||
66 | config ARCH_HISI | 66 | config ARCH_HISI |
67 | bool "Hisilicon SoC Family" | 67 | bool "Hisilicon SoC Family" |
68 | select ARM_TIMER_SP804 | 68 | select ARM_TIMER_SP804 |
69 | select HISILICON_IRQ_MBIGEN | 69 | select HISILICON_IRQ_MBIGEN if PCI |
70 | help | 70 | help |
71 | This enables support for Hisilicon ARMv8 SoC family | 71 | This enables support for Hisilicon ARMv8 SoC family |
72 | 72 | ||
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 299f3ce969ab..c528dd52ba2d 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts | |||
@@ -12,6 +12,7 @@ | |||
12 | /dts-v1/; | 12 | /dts-v1/; |
13 | #include "exynos7.dtsi" | 13 | #include "exynos7.dtsi" |
14 | #include <dt-bindings/interrupt-controller/irq.h> | 14 | #include <dt-bindings/interrupt-controller/irq.h> |
15 | #include <dt-bindings/clock/samsung,s2mps11.h> | ||
15 | 16 | ||
16 | / { | 17 | / { |
17 | model = "Samsung Exynos7 Espresso board based on EXYNOS7"; | 18 | model = "Samsung Exynos7 Espresso board based on EXYNOS7"; |
@@ -43,6 +44,8 @@ | |||
43 | 44 | ||
44 | &rtc { | 45 | &rtc { |
45 | status = "okay"; | 46 | status = "okay"; |
47 | clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>; | ||
48 | clock-names = "rtc", "rtc_src"; | ||
46 | }; | 49 | }; |
47 | 50 | ||
48 | &watchdog { | 51 | &watchdog { |
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 11fa51c89617..c0ec116b3993 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c | |||
@@ -390,7 +390,6 @@ void __init mem_init(void) | |||
390 | 390 | ||
391 | free_all_bootmem(); | 391 | free_all_bootmem(); |
392 | mem_init_print_info(NULL); | 392 | mem_init_print_info(NULL); |
393 | show_mem(0); | ||
394 | } | 393 | } |
395 | 394 | ||
396 | void free_initmem(void) | 395 | void free_initmem(void) |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index ca254546cd05..1934707bf321 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -66,29 +66,28 @@ endif | |||
66 | UTS_MACHINE := $(OLDARCH) | 66 | UTS_MACHINE := $(OLDARCH) |
67 | 67 | ||
68 | ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) | 68 | ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) |
69 | override CC += -mlittle-endian | ||
70 | ifneq ($(cc-name),clang) | ||
71 | override CC += -mno-strict-align | ||
72 | endif | ||
73 | override AS += -mlittle-endian | ||
74 | override LD += -EL | 69 | override LD += -EL |
75 | override CROSS32CC += -mlittle-endian | ||
76 | override CROSS32AS += -mlittle-endian | 70 | override CROSS32AS += -mlittle-endian |
77 | LDEMULATION := lppc | 71 | LDEMULATION := lppc |
78 | GNUTARGET := powerpcle | 72 | GNUTARGET := powerpcle |
79 | MULTIPLEWORD := -mno-multiple | 73 | MULTIPLEWORD := -mno-multiple |
80 | KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) | 74 | KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) |
81 | else | 75 | else |
82 | ifeq ($(call cc-option-yn,-mbig-endian),y) | ||
83 | override CC += -mbig-endian | ||
84 | override AS += -mbig-endian | ||
85 | endif | ||
86 | override LD += -EB | 76 | override LD += -EB |
87 | LDEMULATION := ppc | 77 | LDEMULATION := ppc |
88 | GNUTARGET := powerpc | 78 | GNUTARGET := powerpc |
89 | MULTIPLEWORD := -mmultiple | 79 | MULTIPLEWORD := -mmultiple |
90 | endif | 80 | endif |
91 | 81 | ||
82 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) | ||
83 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian | ||
84 | ifneq ($(cc-name),clang) | ||
85 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align | ||
86 | endif | ||
87 | |||
88 | aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) | ||
89 | aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian | ||
90 | |||
92 | ifeq ($(HAS_BIARCH),y) | 91 | ifeq ($(HAS_BIARCH),y) |
93 | override AS += -a$(CONFIG_WORD_SIZE) | 92 | override AS += -a$(CONFIG_WORD_SIZE) |
94 | override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) | 93 | override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) |
@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200 | |||
232 | KBUILD_AFLAGS += $(cpu-as-y) | 231 | KBUILD_AFLAGS += $(cpu-as-y) |
233 | KBUILD_CFLAGS += $(cpu-as-y) | 232 | KBUILD_CFLAGS += $(cpu-as-y) |
234 | 233 | ||
234 | KBUILD_AFLAGS += $(aflags-y) | ||
235 | KBUILD_CFLAGS += $(cflags-y) | ||
236 | |||
235 | head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o | 237 | head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o |
236 | head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o | 238 | head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o |
237 | head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o | 239 | head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o |
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index bfe3d37a24ef..9fa046d56eba 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/cpufeature.h> | ||
7 | #include <asm/switch_to.h> | 8 | #include <asm/switch_to.h> |
8 | 9 | ||
9 | #define CHKSUM_BLOCK_SIZE 1 | 10 | #define CHKSUM_BLOCK_SIZE 1 |
@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void) | |||
157 | crypto_unregister_shash(&alg); | 158 | crypto_unregister_shash(&alg); |
158 | } | 159 | } |
159 | 160 | ||
160 | module_init(crc32c_vpmsum_mod_init); | 161 | module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); |
161 | module_exit(crc32c_vpmsum_mod_fini); | 162 | module_exit(crc32c_vpmsum_mod_fini); |
162 | 163 | ||
163 | MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); | 164 | MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); |
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index 3d7fc06532a1..01b8a13f0224 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h | |||
@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state; | |||
19 | 19 | ||
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | /* Idle state entry routines */ | ||
23 | #ifdef CONFIG_PPC_P7_NAP | ||
24 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ | ||
25 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ | ||
26 | std r0,0(r1); \ | ||
27 | ptesync; \ | ||
28 | ld r0,0(r1); \ | ||
29 | 1: cmp cr0,r0,r0; \ | ||
30 | bne 1b; \ | ||
31 | IDLE_INST; \ | ||
32 | b . | ||
33 | #endif /* CONFIG_PPC_P7_NAP */ | ||
34 | |||
22 | #endif | 35 | #endif |
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 57fec8ac7b92..ddf54f5bbdd1 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h | |||
@@ -186,6 +186,7 @@ label##3: \ | |||
186 | 186 | ||
187 | #ifndef __ASSEMBLY__ | 187 | #ifndef __ASSEMBLY__ |
188 | void apply_feature_fixups(void); | 188 | void apply_feature_fixups(void); |
189 | void setup_feature_keys(void); | ||
189 | #endif | 190 | #endif |
190 | 191 | ||
191 | #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ | 192 | #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 0a74ebe934e1..17c8380673a6 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void) | |||
75 | static inline void __giveup_spe(struct task_struct *t) { } | 75 | static inline void __giveup_spe(struct task_struct *t) { } |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
79 | extern void flush_tmregs_to_thread(struct task_struct *); | ||
80 | #else | ||
81 | static inline void flush_tmregs_to_thread(struct task_struct *t) | ||
82 | { | ||
83 | } | ||
84 | #endif | ||
85 | |||
86 | static inline void clear_task_ebb(struct task_struct *t) | 78 | static inline void clear_task_ebb(struct task_struct *t) |
87 | { | 79 | { |
88 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f5f729c11578..f0b238516e9b 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void); | |||
159 | extern void xics_kexec_teardown_cpu(int secondary); | 159 | extern void xics_kexec_teardown_cpu(int secondary); |
160 | extern void xics_migrate_irqs_away(void); | 160 | extern void xics_migrate_irqs_away(void); |
161 | extern void icp_native_eoi(struct irq_data *d); | 161 | extern void icp_native_eoi(struct irq_data *d); |
162 | extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type); | ||
163 | extern int xics_retrigger(struct irq_data *data); | ||
162 | #ifdef CONFIG_SMP | 164 | #ifdef CONFIG_SMP |
163 | extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | 165 | extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, |
164 | unsigned int strict_check); | 166 | unsigned int strict_check); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index c9bc78e9c610..7429556eb8df 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
168 | int n = 0, l = 0; | 168 | int n = 0, l = 0; |
169 | char buffer[128]; | 169 | char buffer[128]; |
170 | 170 | ||
171 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", | 171 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", |
172 | edev->phb->global_number, pdn->busno, | 172 | edev->phb->global_number, pdn->busno, |
173 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | 173 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); |
174 | pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", | 174 | pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", |
175 | edev->phb->global_number, pdn->busno, | 175 | edev->phb->global_number, pdn->busno, |
176 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | 176 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); |
177 | 177 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 41091fdf9bd8..df6d45eb4115 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -144,29 +144,14 @@ machine_check_pSeries_1: | |||
144 | * vector | 144 | * vector |
145 | */ | 145 | */ |
146 | SET_SCRATCH0(r13) /* save r13 */ | 146 | SET_SCRATCH0(r13) /* save r13 */ |
147 | #ifdef CONFIG_PPC_P7_NAP | 147 | /* |
148 | BEGIN_FTR_SECTION | 148 | * Running native on arch 2.06 or later, we may wakeup from winkle |
149 | /* Running native on arch 2.06 or later, check if we are | 149 | * inside machine check. If yes, then last bit of HSPGR0 would be set |
150 | * waking up from nap. We only handle no state loss and | 150 | * to 1. Hence clear it unconditionally. |
151 | * supervisor state loss. We do -not- handle hypervisor | ||
152 | * state loss at this time. | ||
153 | */ | 151 | */ |
154 | mfspr r13,SPRN_SRR1 | 152 | GET_PACA(r13) |
155 | rlwinm. r13,r13,47-31,30,31 | 153 | clrrdi r13,r13,1 |
156 | OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) | 154 | SET_PACA(r13) |
157 | beq 9f | ||
158 | |||
159 | mfspr r13,SPRN_SRR1 | ||
160 | rlwinm. r13,r13,47-31,30,31 | ||
161 | /* waking up from powersave (nap) state */ | ||
162 | cmpwi cr1,r13,2 | ||
163 | /* Total loss of HV state is fatal. let's just stay stuck here */ | ||
164 | OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) | ||
165 | bgt cr1,. | ||
166 | 9: | ||
167 | OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) | ||
168 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) | ||
169 | #endif /* CONFIG_PPC_P7_NAP */ | ||
170 | EXCEPTION_PROLOG_0(PACA_EXMC) | 155 | EXCEPTION_PROLOG_0(PACA_EXMC) |
171 | BEGIN_FTR_SECTION | 156 | BEGIN_FTR_SECTION |
172 | b machine_check_powernv_early | 157 | b machine_check_powernv_early |
@@ -1273,25 +1258,51 @@ machine_check_handle_early: | |||
1273 | * Check if thread was in power saving mode. We come here when any | 1258 | * Check if thread was in power saving mode. We come here when any |
1274 | * of the following is true: | 1259 | * of the following is true: |
1275 | * a. thread wasn't in power saving mode | 1260 | * a. thread wasn't in power saving mode |
1276 | * b. thread was in power saving mode with no state loss or | 1261 | * b. thread was in power saving mode with no state loss, |
1277 | * supervisor state loss | 1262 | * supervisor state loss or hypervisor state loss. |
1278 | * | 1263 | * |
1279 | * Go back to nap again if (b) is true. | 1264 | * Go back to nap/sleep/winkle mode again if (b) is true. |
1280 | */ | 1265 | */ |
1281 | rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ | 1266 | rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ |
1282 | beq 4f /* No, it wasn;t */ | 1267 | beq 4f /* No, it wasn;t */ |
1283 | /* Thread was in power saving mode. Go back to nap again. */ | 1268 | /* Thread was in power saving mode. Go back to nap again. */ |
1284 | cmpwi r11,2 | 1269 | cmpwi r11,2 |
1285 | bne 3f | 1270 | blt 3f |
1286 | /* Supervisor state loss */ | 1271 | /* Supervisor/Hypervisor state loss */ |
1287 | li r0,1 | 1272 | li r0,1 |
1288 | stb r0,PACA_NAPSTATELOST(r13) | 1273 | stb r0,PACA_NAPSTATELOST(r13) |
1289 | 3: bl machine_check_queue_event | 1274 | 3: bl machine_check_queue_event |
1290 | MACHINE_CHECK_HANDLER_WINDUP | 1275 | MACHINE_CHECK_HANDLER_WINDUP |
1291 | GET_PACA(r13) | 1276 | GET_PACA(r13) |
1292 | ld r1,PACAR1(r13) | 1277 | ld r1,PACAR1(r13) |
1293 | li r3,PNV_THREAD_NAP | 1278 | /* |
1294 | b pnv_enter_arch207_idle_mode | 1279 | * Check what idle state this CPU was in and go back to same mode |
1280 | * again. | ||
1281 | */ | ||
1282 | lbz r3,PACA_THREAD_IDLE_STATE(r13) | ||
1283 | cmpwi r3,PNV_THREAD_NAP | ||
1284 | bgt 10f | ||
1285 | IDLE_STATE_ENTER_SEQ(PPC_NAP) | ||
1286 | /* No return */ | ||
1287 | 10: | ||
1288 | cmpwi r3,PNV_THREAD_SLEEP | ||
1289 | bgt 2f | ||
1290 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) | ||
1291 | /* No return */ | ||
1292 | |||
1293 | 2: | ||
1294 | /* | ||
1295 | * Go back to winkle. Please note that this thread was woken up in | ||
1296 | * machine check from winkle and have not restored the per-subcore | ||
1297 | * state. Hence before going back to winkle, set last bit of HSPGR0 | ||
1298 | * to 1. This will make sure that if this thread gets woken up | ||
1299 | * again at reset vector 0x100 then it will get chance to restore | ||
1300 | * the subcore state. | ||
1301 | */ | ||
1302 | ori r13,r13,1 | ||
1303 | SET_PACA(r13) | ||
1304 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) | ||
1305 | /* No return */ | ||
1295 | 4: | 1306 | 4: |
1296 | #endif | 1307 | #endif |
1297 | /* | 1308 | /* |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index ba79d15f4ddd..2265c6398a17 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
@@ -44,18 +44,6 @@ | |||
44 | PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ | 44 | PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ |
45 | PSSCR_MTL_MASK | 45 | PSSCR_MTL_MASK |
46 | 46 | ||
47 | /* Idle state entry routines */ | ||
48 | |||
49 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ | ||
50 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ | ||
51 | std r0,0(r1); \ | ||
52 | ptesync; \ | ||
53 | ld r0,0(r1); \ | ||
54 | 1: cmp cr0,r0,r0; \ | ||
55 | bne 1b; \ | ||
56 | IDLE_INST; \ | ||
57 | b . | ||
58 | |||
59 | .text | 47 | .text |
60 | 48 | ||
61 | /* | 49 | /* |
@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop) | |||
363 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss | 351 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss |
364 | */ | 352 | */ |
365 | _GLOBAL(pnv_restore_hyp_resource) | 353 | _GLOBAL(pnv_restore_hyp_resource) |
366 | ld r2,PACATOC(r13); | ||
367 | BEGIN_FTR_SECTION | 354 | BEGIN_FTR_SECTION |
355 | ld r2,PACATOC(r13); | ||
368 | /* | 356 | /* |
369 | * POWER ISA 3. Use PSSCR to determine if we | 357 | * POWER ISA 3. Use PSSCR to determine if we |
370 | * are waking up from deep idle state | 358 | * are waking up from deep idle state |
@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
395 | */ | 383 | */ |
396 | clrldi r5,r13,63 | 384 | clrldi r5,r13,63 |
397 | clrrdi r13,r13,1 | 385 | clrrdi r13,r13,1 |
386 | |||
387 | /* Now that we are sure r13 is corrected, load TOC */ | ||
388 | ld r2,PACATOC(r13); | ||
398 | cmpwi cr4,r5,1 | 389 | cmpwi cr4,r5,1 |
399 | mtspr SPRN_HSPRG0,r13 | 390 | mtspr SPRN_HSPRG0,r13 |
400 | 391 | ||
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index ef267fd9dd22..5e7ece0fda9f 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c | |||
@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled, | |||
92 | mce->in_use = 1; | 92 | mce->in_use = 1; |
93 | 93 | ||
94 | mce->initiator = MCE_INITIATOR_CPU; | 94 | mce->initiator = MCE_INITIATOR_CPU; |
95 | if (handled) | 95 | /* Mark it recovered if we have handled it and MSR(RI=1). */ |
96 | if (handled && (regs->msr & MSR_RI)) | ||
96 | mce->disposition = MCE_DISPOSITION_RECOVERED; | 97 | mce->disposition = MCE_DISPOSITION_RECOVERED; |
97 | else | 98 | else |
98 | mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; | 99 | mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a5c0153ede37..7fdf324d5b51 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops); | |||
78 | static int get_phb_number(struct device_node *dn) | 78 | static int get_phb_number(struct device_node *dn) |
79 | { | 79 | { |
80 | int ret, phb_id = -1; | 80 | int ret, phb_id = -1; |
81 | u32 prop_32; | ||
81 | u64 prop; | 82 | u64 prop; |
82 | 83 | ||
83 | /* | 84 | /* |
@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn) | |||
86 | * reading "ibm,opal-phbid", only present in OPAL environment. | 87 | * reading "ibm,opal-phbid", only present in OPAL environment. |
87 | */ | 88 | */ |
88 | ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); | 89 | ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); |
89 | if (ret) | 90 | if (ret) { |
90 | ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop); | 91 | ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); |
92 | prop = prop_32; | ||
93 | } | ||
91 | 94 | ||
92 | if (!ret) | 95 | if (!ret) |
93 | phb_id = (int)(prop & (MAX_PHBS - 1)); | 96 | phb_id = (int)(prop & (MAX_PHBS - 1)); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 58ccf86415b4..9ee2623e0f67 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread, | |||
1074 | #endif | 1074 | #endif |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
1078 | void flush_tmregs_to_thread(struct task_struct *tsk) | ||
1079 | { | ||
1080 | /* | ||
1081 | * Process self tracing is not yet supported through | ||
1082 | * ptrace interface. Ptrace generic code should have | ||
1083 | * prevented this from happening in the first place. | ||
1084 | * Warn once here with the message, if some how it | ||
1085 | * is attempted. | ||
1086 | */ | ||
1087 | WARN_ONCE(tsk == current, | ||
1088 | "Not expecting ptrace on self: TM regs may be incorrect\n"); | ||
1089 | |||
1090 | /* | ||
1091 | * If task is not current, it should have been flushed | ||
1092 | * already to it's thread_struct during __switch_to(). | ||
1093 | */ | ||
1094 | } | ||
1095 | #endif | ||
1096 | |||
1097 | struct task_struct *__switch_to(struct task_struct *prev, | 1077 | struct task_struct *__switch_to(struct task_struct *prev, |
1098 | struct task_struct *new) | 1078 | struct task_struct *new) |
1099 | { | 1079 | { |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 6ee4b72cda42..4e74fc588a3f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2940 | 2940 | ||
2941 | /* Don't print anything after quiesce under OPAL, it crashes OFW */ | 2941 | /* Don't print anything after quiesce under OPAL, it crashes OFW */ |
2942 | if (of_platform != PLATFORM_OPAL) { | 2942 | if (of_platform != PLATFORM_OPAL) { |
2943 | prom_printf("Booting Linux via __start() ...\n"); | 2943 | prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); |
2944 | prom_debug("->dt_header_start=0x%x\n", hdr); | 2944 | prom_debug("->dt_header_start=0x%x\n", hdr); |
2945 | } | 2945 | } |
2946 | 2946 | ||
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 4f3c5756cc09..bf91658a8a40 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/page.h> | 38 | #include <asm/page.h> |
39 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
40 | #include <asm/switch_to.h> | 40 | #include <asm/switch_to.h> |
41 | #include <asm/tm.h> | ||
41 | 42 | ||
42 | #define CREATE_TRACE_POINTS | 43 | #define CREATE_TRACE_POINTS |
43 | #include <trace/events/syscalls.h> | 44 | #include <trace/events/syscalls.h> |
@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = { | |||
118 | REG_OFFSET_END, | 119 | REG_OFFSET_END, |
119 | }; | 120 | }; |
120 | 121 | ||
122 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
123 | static void flush_tmregs_to_thread(struct task_struct *tsk) | ||
124 | { | ||
125 | /* | ||
126 | * If task is not current, it will have been flushed already to | ||
127 | * it's thread_struct during __switch_to(). | ||
128 | * | ||
129 | * A reclaim flushes ALL the state. | ||
130 | */ | ||
131 | |||
132 | if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) | ||
133 | tm_reclaim_current(TM_CAUSE_SIGNAL); | ||
134 | |||
135 | } | ||
136 | #else | ||
137 | static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } | ||
138 | #endif | ||
139 | |||
121 | /** | 140 | /** |
122 | * regs_query_register_offset() - query register offset from its name | 141 | * regs_query_register_offset() - query register offset from its name |
123 | * @name: the name of a register | 142 | * @name: the name of a register |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c3e861df4b20..24ec3ea4b3a2 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) | |||
93 | * and we are running with enough of the MMU enabled to have our | 93 | * and we are running with enough of the MMU enabled to have our |
94 | * proper kernel virtual addresses | 94 | * proper kernel virtual addresses |
95 | * | 95 | * |
96 | * Find out what kind of machine we're on and save any data we need | 96 | * We do the initial parsing of the flat device-tree and prepares |
97 | * from the early boot process (devtree is copied on pmac by prom_init()). | 97 | * for the MMU to be fully initialized. |
98 | * This is called very early on the boot process, after a minimal | ||
99 | * MMU environment has been set up but before MMU_init is called. | ||
100 | */ | 98 | */ |
101 | extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ | 99 | extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ |
102 | 100 | ||
103 | notrace void __init machine_init(u64 dt_ptr) | 101 | notrace void __init machine_init(u64 dt_ptr) |
104 | { | 102 | { |
103 | /* Configure static keys first, now that we're relocated. */ | ||
104 | setup_feature_keys(); | ||
105 | |||
105 | /* Enable early debugging if any specified (see udbg.h) */ | 106 | /* Enable early debugging if any specified (see udbg.h) */ |
106 | udbg_early_init(); | 107 | udbg_early_init(); |
107 | 108 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index eafb9a79e011..7ac8e6eaab5b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr) | |||
300 | 300 | ||
301 | /* Apply all the dynamic patching */ | 301 | /* Apply all the dynamic patching */ |
302 | apply_feature_fixups(); | 302 | apply_feature_fixups(); |
303 | setup_feature_keys(); | ||
303 | 304 | ||
304 | /* Initialize the hash table or TLB handling */ | 305 | /* Initialize the hash table or TLB handling */ |
305 | early_init_mmu(); | 306 | early_init_mmu(); |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 6767605ea8da..4111d30badfa 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
24 | 24 | ||
25 | #include <asm/cpu_has_feature.h> | ||
25 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
27 | #include <asm/mmu.h> | 28 | #include <asm/mmu.h> |
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index cbabd143acae..78a7449bf489 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile | |||
@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc | |||
30 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so | 30 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so |
31 | 31 | ||
32 | # link rule for the .so file, .lds has to be first | 32 | # link rule for the .so file, .lds has to be first |
33 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) | 33 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE |
34 | $(call if_changed,vdso32ld) | 34 | $(call if_changed,vdso32ld) |
35 | 35 | ||
36 | # strip rule for the .so file | 36 | # strip rule for the .so file |
@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
39 | $(call if_changed,objcopy) | 39 | $(call if_changed,objcopy) |
40 | 40 | ||
41 | # assembly rules for the .S files | 41 | # assembly rules for the .S files |
42 | $(obj-vdso32): %.o: %.S | 42 | $(obj-vdso32): %.o: %.S FORCE |
43 | $(call if_changed_dep,vdso32as) | 43 | $(call if_changed_dep,vdso32as) |
44 | 44 | ||
45 | # actual build commands | 45 | # actual build commands |
46 | quiet_cmd_vdso32ld = VDSO32L $@ | 46 | quiet_cmd_vdso32ld = VDSO32L $@ |
47 | cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ | 47 | cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) |
48 | quiet_cmd_vdso32as = VDSO32A $@ | 48 | quiet_cmd_vdso32as = VDSO32A $@ |
49 | cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< | 49 | cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< |
50 | 50 | ||
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index c710802b8fb6..366ae09b14c1 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) | |||
23 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so | 23 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so |
24 | 24 | ||
25 | # link rule for the .so file, .lds has to be first | 25 | # link rule for the .so file, .lds has to be first |
26 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) | 26 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE |
27 | $(call if_changed,vdso64ld) | 27 | $(call if_changed,vdso64ld) |
28 | 28 | ||
29 | # strip rule for the .so file | 29 | # strip rule for the .so file |
@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
32 | $(call if_changed,objcopy) | 32 | $(call if_changed,objcopy) |
33 | 33 | ||
34 | # assembly rules for the .S files | 34 | # assembly rules for the .S files |
35 | $(obj-vdso64): %.o: %.S | 35 | $(obj-vdso64): %.o: %.S FORCE |
36 | $(call if_changed_dep,vdso64as) | 36 | $(call if_changed_dep,vdso64as) |
37 | 37 | ||
38 | # actual build commands | 38 | # actual build commands |
39 | quiet_cmd_vdso64ld = VDSO64L $@ | 39 | quiet_cmd_vdso64ld = VDSO64L $@ |
40 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | 40 | cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) |
41 | quiet_cmd_vdso64as = VDSO64A $@ | 41 | quiet_cmd_vdso64as = VDSO64A $@ |
42 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< | 42 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< |
43 | 43 | ||
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index d90870a66b60..0a57fe6d49cc 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S | |||
@@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic) | |||
127 | stw r7,12(r1) | 127 | stw r7,12(r1) |
128 | stw r8,8(r1) | 128 | stw r8,8(r1) |
129 | 129 | ||
130 | andi. r0,r4,1 /* is destination address even ? */ | 130 | rlwinm r0,r4,3,0x8 |
131 | cmplwi cr7,r0,0 | 131 | rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */ |
132 | cmplwi cr7,r0,0 /* is destination address even ? */ | ||
132 | addic r12,r6,0 | 133 | addic r12,r6,0 |
133 | addi r6,r4,-4 | 134 | addi r6,r4,-4 |
134 | neg r0,r4 | 135 | neg r0,r4 |
@@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic) | |||
237 | 66: addze r3,r12 | 238 | 66: addze r3,r12 |
238 | addi r1,r1,16 | 239 | addi r1,r1,16 |
239 | beqlr+ cr7 | 240 | beqlr+ cr7 |
240 | rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */ | 241 | rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ |
241 | blr | 242 | blr |
242 | 243 | ||
243 | /* read fault */ | 244 | /* read fault */ |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 74145f02ad41..043415f0bdb1 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void) | |||
188 | &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); | 188 | &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); |
189 | #endif | 189 | #endif |
190 | do_final_fixups(); | 190 | do_final_fixups(); |
191 | } | ||
191 | 192 | ||
193 | void __init setup_feature_keys(void) | ||
194 | { | ||
192 | /* | 195 | /* |
193 | * Initialise jump label. This causes all the cpu/mmu_has_feature() | 196 | * Initialise jump label. This causes all the cpu/mmu_has_feature() |
194 | * checks to take on their correct polarity based on the current set of | 197 | * checks to take on their correct polarity based on the current set of |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5be15cff758d..2975754c65ea 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
496 | gang = alloc_spu_gang(); | 496 | gang = alloc_spu_gang(); |
497 | SPUFS_I(inode)->i_ctx = NULL; | 497 | SPUFS_I(inode)->i_ctx = NULL; |
498 | SPUFS_I(inode)->i_gang = gang; | 498 | SPUFS_I(inode)->i_gang = gang; |
499 | if (!gang) | 499 | if (!gang) { |
500 | ret = -ENOMEM; | ||
500 | goto out_iput; | 501 | goto out_iput; |
502 | } | ||
501 | 503 | ||
502 | inode->i_op = &simple_dir_inode_operations; | 504 | inode->i_op = &simple_dir_inode_operations; |
503 | inode->i_fop = &simple_dir_operations; | 505 | inode->i_fop = &simple_dir_operations; |
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 309d9ccccd50..c61667e8bb06 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) | |||
187 | if (dev->vendor == 0x1959 && dev->device == 0xa007 && | 187 | if (dev->vendor == 0x1959 && dev->device == 0xa007 && |
188 | !firmware_has_feature(FW_FEATURE_LPAR)) { | 188 | !firmware_has_feature(FW_FEATURE_LPAR)) { |
189 | dev->dev.archdata.dma_ops = &dma_direct_ops; | 189 | dev->dev.archdata.dma_ops = &dma_direct_ops; |
190 | /* | ||
191 | * Set the coherent DMA mask to prevent the iommu | ||
192 | * being used unnecessarily | ||
193 | */ | ||
194 | dev->dev.coherent_dma_mask = DMA_BIT_MASK(44); | ||
190 | return; | 195 | return; |
191 | } | 196 | } |
192 | #endif | 197 | #endif |
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index e505223b4ec5..ed8bba68a162 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c | |||
@@ -228,7 +228,8 @@ int __init opal_event_init(void) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | /* Install interrupt handler */ | 230 | /* Install interrupt handler */ |
231 | rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); | 231 | rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW, |
232 | "opal", NULL); | ||
232 | if (rc) { | 233 | if (rc) { |
233 | irq_dispose_mapping(virq); | 234 | irq_dispose_mapping(virq); |
234 | pr_warn("Error %d requesting irq %d (0x%x)\n", | 235 | pr_warn("Error %d requesting irq %d (0x%x)\n", |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 8b4fc68cebcb..6c9a65b52e63 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs, | |||
399 | 399 | ||
400 | if (!(regs->msr & MSR_RI)) { | 400 | if (!(regs->msr & MSR_RI)) { |
401 | /* If MSR_RI isn't set, we cannot recover */ | 401 | /* If MSR_RI isn't set, we cannot recover */ |
402 | pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); | ||
402 | recovered = 0; | 403 | recovered = 0; |
403 | } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { | 404 | } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { |
404 | /* Platform corrected itself */ | 405 | /* Platform corrected itself */ |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6b9528307f62..fd9444f9fb0c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -111,10 +111,17 @@ static int __init iommu_setup(char *str) | |||
111 | } | 111 | } |
112 | early_param("iommu", iommu_setup); | 112 | early_param("iommu", iommu_setup); |
113 | 113 | ||
114 | static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) | 114 | static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) |
115 | { | 115 | { |
116 | return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == | 116 | /* |
117 | (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); | 117 | * WARNING: We cannot rely on the resource flags. The Linux PCI |
118 | * allocation code sometimes decides to put a 64-bit prefetchable | ||
119 | * BAR in the 32-bit window, so we have to compare the addresses. | ||
120 | * | ||
121 | * For simplicity we only test resource start. | ||
122 | */ | ||
123 | return (r->start >= phb->ioda.m64_base && | ||
124 | r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); | ||
118 | } | 125 | } |
119 | 126 | ||
120 | static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) | 127 | static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) |
@@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, | |||
229 | sgsz = phb->ioda.m64_segsize; | 236 | sgsz = phb->ioda.m64_segsize; |
230 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | 237 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { |
231 | r = &pdev->resource[i]; | 238 | r = &pdev->resource[i]; |
232 | if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) | 239 | if (!r->parent || !pnv_pci_is_m64(phb, r)) |
233 | continue; | 240 | continue; |
234 | 241 | ||
235 | start = _ALIGN_DOWN(r->start - base, sgsz); | 242 | start = _ALIGN_DOWN(r->start - base, sgsz); |
@@ -1877,7 +1884,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, | |||
1877 | unsigned shift, unsigned long index, | 1884 | unsigned shift, unsigned long index, |
1878 | unsigned long npages) | 1885 | unsigned long npages) |
1879 | { | 1886 | { |
1880 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); | 1887 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); |
1881 | unsigned long start, end, inc; | 1888 | unsigned long start, end, inc; |
1882 | 1889 | ||
1883 | /* We'll invalidate DMA address in PE scope */ | 1890 | /* We'll invalidate DMA address in PE scope */ |
@@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) | |||
2863 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; | 2870 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; |
2864 | if (!res->flags || res->parent) | 2871 | if (!res->flags || res->parent) |
2865 | continue; | 2872 | continue; |
2866 | if (!pnv_pci_is_mem_pref_64(res->flags)) { | 2873 | if (!pnv_pci_is_m64(phb, res)) { |
2867 | dev_warn(&pdev->dev, "Don't support SR-IOV with" | 2874 | dev_warn(&pdev->dev, "Don't support SR-IOV with" |
2868 | " non M64 VF BAR%d: %pR. \n", | 2875 | " non M64 VF BAR%d: %pR. \n", |
2869 | i, res); | 2876 | i, res); |
@@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, | |||
2958 | index++; | 2965 | index++; |
2959 | } | 2966 | } |
2960 | } else if ((res->flags & IORESOURCE_MEM) && | 2967 | } else if ((res->flags & IORESOURCE_MEM) && |
2961 | !pnv_pci_is_mem_pref_64(res->flags)) { | 2968 | !pnv_pci_is_m64(phb, res)) { |
2962 | region.start = res->start - | 2969 | region.start = res->start - |
2963 | phb->hose->mem_offset[0] - | 2970 | phb->hose->mem_offset[0] - |
2964 | phb->ioda.m32_pci_base; | 2971 | phb->ioda.m32_pci_base; |
@@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, | |||
3083 | bridge = bridge->bus->self; | 3090 | bridge = bridge->bus->self; |
3084 | } | 3091 | } |
3085 | 3092 | ||
3086 | /* We fail back to M32 if M64 isn't supported */ | 3093 | /* |
3087 | if (phb->ioda.m64_segsize && | 3094 | * We fall back to M32 if M64 isn't supported. We enforce the M64 |
3088 | pnv_pci_is_mem_pref_64(type)) | 3095 | * alignment for any 64-bit resource, PCIe doesn't care and |
3096 | * bridges only do 64-bit prefetchable anyway. | ||
3097 | */ | ||
3098 | if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) | ||
3089 | return phb->ioda.m64_segsize; | 3099 | return phb->ioda.m64_segsize; |
3090 | if (type & IORESOURCE_MEM) | 3100 | if (type & IORESOURCE_MEM) |
3091 | return phb->ioda.m32_segsize; | 3101 | return phb->ioda.m32_segsize; |
@@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, | |||
3125 | w = NULL; | 3135 | w = NULL; |
3126 | if (r->flags & type & IORESOURCE_IO) | 3136 | if (r->flags & type & IORESOURCE_IO) |
3127 | w = &hose->io_resource; | 3137 | w = &hose->io_resource; |
3128 | else if (pnv_pci_is_mem_pref_64(r->flags) && | 3138 | else if (pnv_pci_is_m64(phb, r) && |
3129 | (type & IORESOURCE_PREFETCH) && | 3139 | (type & IORESOURCE_PREFETCH) && |
3130 | phb->ioda.m64_segsize) | 3140 | phb->ioda.m64_segsize) |
3131 | w = &hose->mem_resources[1]; | 3141 | w = &hose->mem_resources[1]; |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 43f7beb2902d..76ec104e88be 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) | |||
320 | return dlpar_update_device_tree_lmb(lmb); | 320 | return dlpar_update_device_tree_lmb(lmb); |
321 | } | 321 | } |
322 | 322 | ||
323 | static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) | ||
324 | { | ||
325 | unsigned long section_nr; | ||
326 | struct mem_section *mem_sect; | ||
327 | struct memory_block *mem_block; | ||
328 | |||
329 | section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); | ||
330 | mem_sect = __nr_to_section(section_nr); | ||
331 | |||
332 | mem_block = find_memory_block(mem_sect); | ||
333 | return mem_block; | ||
334 | } | ||
335 | |||
336 | #ifdef CONFIG_MEMORY_HOTREMOVE | 323 | #ifdef CONFIG_MEMORY_HOTREMOVE |
337 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) | 324 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) |
338 | { | 325 | { |
@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) | |||
420 | 407 | ||
421 | static int dlpar_add_lmb(struct of_drconf_cell *); | 408 | static int dlpar_add_lmb(struct of_drconf_cell *); |
422 | 409 | ||
410 | static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) | ||
411 | { | ||
412 | unsigned long section_nr; | ||
413 | struct mem_section *mem_sect; | ||
414 | struct memory_block *mem_block; | ||
415 | |||
416 | section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); | ||
417 | mem_sect = __nr_to_section(section_nr); | ||
418 | |||
419 | mem_block = find_memory_block(mem_sect); | ||
420 | return mem_block; | ||
421 | } | ||
422 | |||
423 | static int dlpar_remove_lmb(struct of_drconf_cell *lmb) | 423 | static int dlpar_remove_lmb(struct of_drconf_cell *lmb) |
424 | { | 424 | { |
425 | struct memory_block *mem_block; | 425 | struct memory_block *mem_block; |
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig index 0031eda320c3..385e7aa9e273 100644 --- a/arch/powerpc/sysdev/xics/Kconfig +++ b/arch/powerpc/sysdev/xics/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config PPC_XICS | 1 | config PPC_XICS |
2 | def_bool n | 2 | def_bool n |
3 | select PPC_SMP_MUXED_IPI | 3 | select PPC_SMP_MUXED_IPI |
4 | select HARDIRQS_SW_RESEND | ||
4 | 5 | ||
5 | config PPC_ICP_NATIVE | 6 | config PPC_ICP_NATIVE |
6 | def_bool n | 7 | def_bool n |
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 27c936c080a6..1c6bf4b66f56 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c | |||
@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = { | |||
156 | .irq_mask = ics_opal_mask_irq, | 156 | .irq_mask = ics_opal_mask_irq, |
157 | .irq_unmask = ics_opal_unmask_irq, | 157 | .irq_unmask = ics_opal_unmask_irq, |
158 | .irq_eoi = NULL, /* Patched at init time */ | 158 | .irq_eoi = NULL, /* Patched at init time */ |
159 | .irq_set_affinity = ics_opal_set_affinity | 159 | .irq_set_affinity = ics_opal_set_affinity, |
160 | .irq_set_type = xics_set_irq_type, | ||
161 | .irq_retrigger = xics_retrigger, | ||
160 | }; | 162 | }; |
161 | 163 | ||
162 | static int ics_opal_map(struct ics *ics, unsigned int virq); | 164 | static int ics_opal_map(struct ics *ics, unsigned int virq); |
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 3854dd41558d..78ee5c778ef8 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c | |||
@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = { | |||
163 | .irq_mask = ics_rtas_mask_irq, | 163 | .irq_mask = ics_rtas_mask_irq, |
164 | .irq_unmask = ics_rtas_unmask_irq, | 164 | .irq_unmask = ics_rtas_unmask_irq, |
165 | .irq_eoi = NULL, /* Patched at init time */ | 165 | .irq_eoi = NULL, /* Patched at init time */ |
166 | .irq_set_affinity = ics_rtas_set_affinity | 166 | .irq_set_affinity = ics_rtas_set_affinity, |
167 | .irq_set_type = xics_set_irq_type, | ||
168 | .irq_retrigger = xics_retrigger, | ||
167 | }; | 169 | }; |
168 | 170 | ||
169 | static int ics_rtas_map(struct ics *ics, unsigned int virq) | 171 | static int ics_rtas_map(struct ics *ics, unsigned int virq) |
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index a795a5f0301c..9d530f479588 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq, | |||
328 | 328 | ||
329 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | 329 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); |
330 | 330 | ||
331 | /* They aren't all level sensitive but we just don't really know */ | 331 | /* |
332 | irq_set_status_flags(virq, IRQ_LEVEL); | 332 | * Mark interrupts as edge sensitive by default so that resend |
333 | * actually works. The device-tree parsing will turn the LSIs | ||
334 | * back to level. | ||
335 | */ | ||
336 | irq_clear_status_flags(virq, IRQ_LEVEL); | ||
333 | 337 | ||
334 | /* Don't call into ICS for IPIs */ | 338 | /* Don't call into ICS for IPIs */ |
335 | if (hw == XICS_IPI) { | 339 | if (hw == XICS_IPI) { |
@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, | |||
351 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | 355 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) |
352 | 356 | ||
353 | { | 357 | { |
354 | /* Current xics implementation translates everything | ||
355 | * to level. It is not technically right for MSIs but this | ||
356 | * is irrelevant at this point. We might get smarter in the future | ||
357 | */ | ||
358 | *out_hwirq = intspec[0]; | 358 | *out_hwirq = intspec[0]; |
359 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
360 | 359 | ||
360 | /* | ||
361 | * If intsize is at least 2, we look for the type in the second cell, | ||
362 | * we assume the LSB indicates a level interrupt. | ||
363 | */ | ||
364 | if (intsize > 1) { | ||
365 | if (intspec[1] & 1) | ||
366 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
367 | else | ||
368 | *out_flags = IRQ_TYPE_EDGE_RISING; | ||
369 | } else | ||
370 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | int xics_set_irq_type(struct irq_data *d, unsigned int flow_type) | ||
376 | { | ||
377 | /* | ||
378 | * We only support these. This has really no effect other than setting | ||
379 | * the corresponding descriptor bits mind you but those will in turn | ||
380 | * affect the resend function when re-enabling an edge interrupt. | ||
381 | * | ||
382 | * Set set the default to edge as explained in map(). | ||
383 | */ | ||
384 | if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) | ||
385 | flow_type = IRQ_TYPE_EDGE_RISING; | ||
386 | |||
387 | if (flow_type != IRQ_TYPE_EDGE_RISING && | ||
388 | flow_type != IRQ_TYPE_LEVEL_LOW) | ||
389 | return -EINVAL; | ||
390 | |||
391 | irqd_set_trigger_type(d, flow_type); | ||
392 | |||
393 | return IRQ_SET_MASK_OK_NOCOPY; | ||
394 | } | ||
395 | |||
396 | int xics_retrigger(struct irq_data *data) | ||
397 | { | ||
398 | /* | ||
399 | * We need to push a dummy CPPR when retriggering, since the subsequent | ||
400 | * EOI will try to pop it. Passing 0 works, as the function hard codes | ||
401 | * the priority value anyway. | ||
402 | */ | ||
403 | xics_push_cppr(0); | ||
404 | |||
405 | /* Tell the core to do a soft retrigger */ | ||
361 | return 0; | 406 | return 0; |
362 | } | 407 | } |
363 | 408 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0e348781327b..e751fe25d6ab 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -872,4 +872,17 @@ config S390_GUEST | |||
872 | Select this option if you want to run the kernel as a guest under | 872 | Select this option if you want to run the kernel as a guest under |
873 | the KVM hypervisor. | 873 | the KVM hypervisor. |
874 | 874 | ||
875 | config S390_GUEST_OLD_TRANSPORT | ||
876 | def_bool y | ||
877 | prompt "Guest support for old s390 virtio transport (DEPRECATED)" | ||
878 | depends on S390_GUEST | ||
879 | help | ||
880 | Enable this option to add support for the old s390-virtio | ||
881 | transport (i.e. virtio devices NOT based on virtio-ccw). This | ||
882 | type of virtio devices is only available on the experimental | ||
883 | kuli userspace or with old (< 2.6) qemu. If you are running | ||
884 | with a modern version of qemu (which supports virtio-ccw since | ||
885 | 1.4 and uses it by default since version 2.4), you probably won't | ||
886 | need this. | ||
887 | |||
875 | endmenu | 888 | endmenu |
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 97a69dbba649..9d35ec0cb8fc 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
@@ -100,6 +100,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
104 | { | ||
105 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | ||
106 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | ||
107 | } | ||
108 | |||
103 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) | 109 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
104 | { | 110 | { |
105 | if (box->pmu->pmu_idx == 0) | 111 | if (box->pmu->pmu_idx == 0) |
@@ -127,6 +133,7 @@ static struct attribute_group snb_uncore_format_group = { | |||
127 | 133 | ||
128 | static struct intel_uncore_ops snb_uncore_msr_ops = { | 134 | static struct intel_uncore_ops snb_uncore_msr_ops = { |
129 | .init_box = snb_uncore_msr_init_box, | 135 | .init_box = snb_uncore_msr_init_box, |
136 | .enable_box = snb_uncore_msr_enable_box, | ||
130 | .exit_box = snb_uncore_msr_exit_box, | 137 | .exit_box = snb_uncore_msr_exit_box, |
131 | .disable_event = snb_uncore_msr_disable_event, | 138 | .disable_event = snb_uncore_msr_disable_event, |
132 | .enable_event = snb_uncore_msr_enable_event, | 139 | .enable_event = snb_uncore_msr_enable_event, |
@@ -192,6 +199,12 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box) | |||
192 | } | 199 | } |
193 | } | 200 | } |
194 | 201 | ||
202 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
203 | { | ||
204 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | ||
205 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | ||
206 | } | ||
207 | |||
195 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) | 208 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
196 | { | 209 | { |
197 | if (box->pmu->pmu_idx == 0) | 210 | if (box->pmu->pmu_idx == 0) |
@@ -200,6 +213,7 @@ static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) | |||
200 | 213 | ||
201 | static struct intel_uncore_ops skl_uncore_msr_ops = { | 214 | static struct intel_uncore_ops skl_uncore_msr_ops = { |
202 | .init_box = skl_uncore_msr_init_box, | 215 | .init_box = skl_uncore_msr_init_box, |
216 | .enable_box = skl_uncore_msr_enable_box, | ||
203 | .exit_box = skl_uncore_msr_exit_box, | 217 | .exit_box = skl_uncore_msr_exit_box, |
204 | .disable_event = snb_uncore_msr_disable_event, | 218 | .disable_event = snb_uncore_msr_disable_event, |
205 | .enable_event = snb_uncore_msr_enable_event, | 219 | .enable_event = snb_uncore_msr_enable_event, |
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 824e54086e07..8aee83bcf71f 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -2626,7 +2626,7 @@ void hswep_uncore_cpu_init(void) | |||
2626 | 2626 | ||
2627 | static struct intel_uncore_type hswep_uncore_ha = { | 2627 | static struct intel_uncore_type hswep_uncore_ha = { |
2628 | .name = "ha", | 2628 | .name = "ha", |
2629 | .num_counters = 5, | 2629 | .num_counters = 4, |
2630 | .num_boxes = 2, | 2630 | .num_boxes = 2, |
2631 | .perf_ctr_bits = 48, | 2631 | .perf_ctr_bits = 48, |
2632 | SNBEP_UNCORE_PCI_COMMON_INIT(), | 2632 | SNBEP_UNCORE_PCI_COMMON_INIT(), |
@@ -2645,7 +2645,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = { | |||
2645 | 2645 | ||
2646 | static struct intel_uncore_type hswep_uncore_imc = { | 2646 | static struct intel_uncore_type hswep_uncore_imc = { |
2647 | .name = "imc", | 2647 | .name = "imc", |
2648 | .num_counters = 5, | 2648 | .num_counters = 4, |
2649 | .num_boxes = 8, | 2649 | .num_boxes = 8, |
2650 | .perf_ctr_bits = 48, | 2650 | .perf_ctr_bits = 48, |
2651 | .fixed_ctr_bits = 48, | 2651 | .fixed_ctr_bits = 48, |
@@ -2691,7 +2691,7 @@ static struct intel_uncore_type hswep_uncore_irp = { | |||
2691 | 2691 | ||
2692 | static struct intel_uncore_type hswep_uncore_qpi = { | 2692 | static struct intel_uncore_type hswep_uncore_qpi = { |
2693 | .name = "qpi", | 2693 | .name = "qpi", |
2694 | .num_counters = 5, | 2694 | .num_counters = 4, |
2695 | .num_boxes = 3, | 2695 | .num_boxes = 3, |
2696 | .perf_ctr_bits = 48, | 2696 | .perf_ctr_bits = 48, |
2697 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | 2697 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
@@ -2773,7 +2773,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = { | |||
2773 | 2773 | ||
2774 | static struct intel_uncore_type hswep_uncore_r3qpi = { | 2774 | static struct intel_uncore_type hswep_uncore_r3qpi = { |
2775 | .name = "r3qpi", | 2775 | .name = "r3qpi", |
2776 | .num_counters = 4, | 2776 | .num_counters = 3, |
2777 | .num_boxes = 3, | 2777 | .num_boxes = 3, |
2778 | .perf_ctr_bits = 44, | 2778 | .perf_ctr_bits = 44, |
2779 | .constraints = hswep_uncore_r3qpi_constraints, | 2779 | .constraints = hswep_uncore_r3qpi_constraints, |
@@ -2972,7 +2972,7 @@ static struct intel_uncore_type bdx_uncore_ha = { | |||
2972 | 2972 | ||
2973 | static struct intel_uncore_type bdx_uncore_imc = { | 2973 | static struct intel_uncore_type bdx_uncore_imc = { |
2974 | .name = "imc", | 2974 | .name = "imc", |
2975 | .num_counters = 5, | 2975 | .num_counters = 4, |
2976 | .num_boxes = 8, | 2976 | .num_boxes = 8, |
2977 | .perf_ctr_bits = 48, | 2977 | .perf_ctr_bits = 48, |
2978 | .fixed_ctr_bits = 48, | 2978 | .fixed_ctr_bits = 48, |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f5befd4945f2..124357773ffa 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -135,6 +135,7 @@ extern void init_apic_mappings(void); | |||
135 | void register_lapic_address(unsigned long address); | 135 | void register_lapic_address(unsigned long address); |
136 | extern void setup_boot_APIC_clock(void); | 136 | extern void setup_boot_APIC_clock(void); |
137 | extern void setup_secondary_APIC_clock(void); | 137 | extern void setup_secondary_APIC_clock(void); |
138 | extern void lapic_update_tsc_freq(void); | ||
138 | extern int APIC_init_uniprocessor(void); | 139 | extern int APIC_init_uniprocessor(void); |
139 | 140 | ||
140 | #ifdef CONFIG_X86_64 | 141 | #ifdef CONFIG_X86_64 |
@@ -170,6 +171,7 @@ static inline void init_apic_mappings(void) { } | |||
170 | static inline void disable_local_APIC(void) { } | 171 | static inline void disable_local_APIC(void) { } |
171 | # define setup_boot_APIC_clock x86_init_noop | 172 | # define setup_boot_APIC_clock x86_init_noop |
172 | # define setup_secondary_APIC_clock x86_init_noop | 173 | # define setup_secondary_APIC_clock x86_init_noop |
174 | static inline void lapic_update_tsc_freq(void) { } | ||
173 | #endif /* !CONFIG_X86_LOCAL_APIC */ | 175 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
174 | 176 | ||
175 | #ifdef CONFIG_X86_X2APIC | 177 | #ifdef CONFIG_X86_X2APIC |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 20abd912f0e4..cea4fc19e844 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -313,7 +313,7 @@ int lapic_get_maxlvt(void) | |||
313 | 313 | ||
314 | /* Clock divisor */ | 314 | /* Clock divisor */ |
315 | #define APIC_DIVISOR 16 | 315 | #define APIC_DIVISOR 16 |
316 | #define TSC_DIVISOR 32 | 316 | #define TSC_DIVISOR 8 |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * This function sets up the local APIC timer, with a timeout of | 319 | * This function sets up the local APIC timer, with a timeout of |
@@ -565,13 +565,37 @@ static void setup_APIC_timer(void) | |||
565 | CLOCK_EVT_FEAT_DUMMY); | 565 | CLOCK_EVT_FEAT_DUMMY); |
566 | levt->set_next_event = lapic_next_deadline; | 566 | levt->set_next_event = lapic_next_deadline; |
567 | clockevents_config_and_register(levt, | 567 | clockevents_config_and_register(levt, |
568 | (tsc_khz / TSC_DIVISOR) * 1000, | 568 | tsc_khz * (1000 / TSC_DIVISOR), |
569 | 0xF, ~0UL); | 569 | 0xF, ~0UL); |
570 | } else | 570 | } else |
571 | clockevents_register_device(levt); | 571 | clockevents_register_device(levt); |
572 | } | 572 | } |
573 | 573 | ||
574 | /* | 574 | /* |
575 | * Install the updated TSC frequency from recalibration at the TSC | ||
576 | * deadline clockevent devices. | ||
577 | */ | ||
578 | static void __lapic_update_tsc_freq(void *info) | ||
579 | { | ||
580 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); | ||
581 | |||
582 | if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) | ||
583 | return; | ||
584 | |||
585 | clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR)); | ||
586 | } | ||
587 | |||
588 | void lapic_update_tsc_freq(void) | ||
589 | { | ||
590 | /* | ||
591 | * The clockevent device's ->mult and ->shift can both be | ||
592 | * changed. In order to avoid races, schedule the frequency | ||
593 | * update code on each CPU. | ||
594 | */ | ||
595 | on_each_cpu(__lapic_update_tsc_freq, NULL, 0); | ||
596 | } | ||
597 | |||
598 | /* | ||
575 | * In this functions we calibrate APIC bus clocks to the external timer. | 599 | * In this functions we calibrate APIC bus clocks to the external timer. |
576 | * | 600 | * |
577 | * We want to do the calibration only once since we want to have local timer | 601 | * We want to do the calibration only once since we want to have local timer |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ed16e58658a4..c6dfd801df97 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -1242,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | |||
1242 | memset(&curr_time, 0, sizeof(struct rtc_time)); | 1242 | memset(&curr_time, 0, sizeof(struct rtc_time)); |
1243 | 1243 | ||
1244 | if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) | 1244 | if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) |
1245 | mc146818_set_time(&curr_time); | 1245 | mc146818_get_time(&curr_time); |
1246 | 1246 | ||
1247 | if (hpet_rtc_flags & RTC_UIE && | 1247 | if (hpet_rtc_flags & RTC_UIE && |
1248 | curr_time.tm_sec != hpet_prev_update_sec) { | 1248 | curr_time.tm_sec != hpet_prev_update_sec) { |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1ef87e887051..78b9cb5a26af 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/nmi.h> | 22 | #include <asm/nmi.h> |
23 | #include <asm/x86_init.h> | 23 | #include <asm/x86_init.h> |
24 | #include <asm/geode.h> | 24 | #include <asm/geode.h> |
25 | #include <asm/apic.h> | ||
25 | 26 | ||
26 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ | 27 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
27 | EXPORT_SYMBOL(cpu_khz); | 28 | EXPORT_SYMBOL(cpu_khz); |
@@ -1249,6 +1250,9 @@ static void tsc_refine_calibration_work(struct work_struct *work) | |||
1249 | (unsigned long)tsc_khz / 1000, | 1250 | (unsigned long)tsc_khz / 1000, |
1250 | (unsigned long)tsc_khz % 1000); | 1251 | (unsigned long)tsc_khz % 1000); |
1251 | 1252 | ||
1253 | /* Inform the TSC deadline clockevent devices about the recalibration */ | ||
1254 | lapic_update_tsc_freq(); | ||
1255 | |||
1252 | out: | 1256 | out: |
1253 | if (boot_cpu_has(X86_FEATURE_ART)) | 1257 | if (boot_cpu_has(X86_FEATURE_ART)) |
1254 | art_related_clocksource = &clocksource_tsc; | 1258 | art_related_clocksource = &clocksource_tsc; |
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 6c1ff31d99ff..495c776de4b4 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) | |||
357 | *cursor &= 0xfe; | 357 | *cursor &= 0xfe; |
358 | } | 358 | } |
359 | /* | 359 | /* |
360 | * Similar treatment for VEX3 prefix. | 360 | * Similar treatment for VEX3/EVEX prefix. |
361 | * TODO: add XOP/EVEX treatment when insn decoder supports them | 361 | * TODO: add XOP treatment when insn decoder supports them |
362 | */ | 362 | */ |
363 | if (insn->vex_prefix.nbytes == 3) { | 363 | if (insn->vex_prefix.nbytes >= 3) { |
364 | /* | 364 | /* |
365 | * vex2: c5 rvvvvLpp (has no b bit) | 365 | * vex2: c5 rvvvvLpp (has no b bit) |
366 | * vex3/xop: c4/8f rxbmmmmm wvvvvLpp | 366 | * vex3/xop: c4/8f rxbmmmmm wvvvvLpp |
367 | * evex: 62 rxbR00mm wvvvv1pp zllBVaaa | 367 | * evex: 62 rxbR00mm wvvvv1pp zllBVaaa |
368 | * (evex will need setting of both b and x since | 368 | * Setting VEX3.b (setting because it has inverted meaning). |
369 | * in non-sib encoding evex.x is 4th bit of MODRM.rm) | 369 | * Setting EVEX.x since (in non-SIB encoding) EVEX.x |
370 | * Setting VEX3.b (setting because it has inverted meaning): | 370 | * is the 4th bit of MODRM.rm, and needs the same treatment. |
371 | * For VEX3-encoded insns, VEX3.x value has no effect in | ||
372 | * non-SIB encoding, the change is superfluous but harmless. | ||
371 | */ | 373 | */ |
372 | cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; | 374 | cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; |
373 | *cursor |= 0x20; | 375 | *cursor |= 0x60; |
374 | } | 376 | } |
375 | 377 | ||
376 | /* | 378 | /* |
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) | |||
415 | 417 | ||
416 | reg = MODRM_REG(insn); /* Fetch modrm.reg */ | 418 | reg = MODRM_REG(insn); /* Fetch modrm.reg */ |
417 | reg2 = 0xff; /* Fetch vex.vvvv */ | 419 | reg2 = 0xff; /* Fetch vex.vvvv */ |
418 | if (insn->vex_prefix.nbytes == 2) | 420 | if (insn->vex_prefix.nbytes) |
419 | reg2 = insn->vex_prefix.bytes[1]; | ||
420 | else if (insn->vex_prefix.nbytes == 3) | ||
421 | reg2 = insn->vex_prefix.bytes[2]; | 421 | reg2 = insn->vex_prefix.bytes[2]; |
422 | /* | 422 | /* |
423 | * TODO: add XOP, EXEV vvvv reading. | 423 | * TODO: add XOP vvvv reading. |
424 | * | 424 | * |
425 | * vex.vvvv field is in bits 6-3, bits are inverted. | 425 | * vex.vvvv field is in bits 6-3, bits are inverted. |
426 | * But in 32-bit mode, high-order bit may be ignored. | 426 | * But in 32-bit mode, high-order bit may be ignored. |
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4e9fd1378aec..23f2f3e41c7f 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -187,7 +187,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target); | |||
187 | void uv_bios_init(void) | 187 | void uv_bios_init(void) |
188 | { | 188 | { |
189 | uv_systab = NULL; | 189 | uv_systab = NULL; |
190 | if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) { | 190 | if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || |
191 | !efi.uv_systab || efi_runtime_disabled()) { | ||
191 | pr_crit("UV: UVsystab: missing\n"); | 192 | pr_crit("UV: UVsystab: missing\n"); |
192 | return; | 193 | return; |
193 | } | 194 | } |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 8c234dd9b8bc..80cc7c089a15 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | |||
1527 | { | 1527 | { |
1528 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; | 1528 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
1529 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; | 1529 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; |
1530 | const u32 STATUS_MASK = 0x80000037; | ||
1530 | 1531 | ||
1531 | if (mmio->num_lines) | 1532 | if (mmio->num_lines) |
1532 | offset = to_interleave_offset(offset, mmio); | 1533 | offset = to_interleave_offset(offset, mmio); |
1533 | 1534 | ||
1534 | return readl(mmio->addr.base + offset); | 1535 | return readl(mmio->addr.base + offset) & STATUS_MASK; |
1535 | } | 1536 | } |
1536 | 1537 | ||
1537 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | 1538 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1a04af6d2421..6c6519f6492a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev) | |||
3950 | bool need_put = !!rbd_dev->opts; | 3950 | bool need_put = !!rbd_dev->opts; |
3951 | 3951 | ||
3952 | ceph_oid_destroy(&rbd_dev->header_oid); | 3952 | ceph_oid_destroy(&rbd_dev->header_oid); |
3953 | ceph_oloc_destroy(&rbd_dev->header_oloc); | ||
3953 | 3954 | ||
3954 | rbd_put_client(rbd_dev->rbd_client); | 3955 | rbd_put_client(rbd_dev->rbd_client); |
3955 | rbd_spec_put(rbd_dev->spec); | 3956 | rbd_spec_put(rbd_dev->spec); |
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus, | |||
5336 | } | 5337 | } |
5337 | spec->pool_id = (u64)rc; | 5338 | spec->pool_id = (u64)rc; |
5338 | 5339 | ||
5339 | /* The ceph file layout needs to fit pool id in 32 bits */ | ||
5340 | |||
5341 | if (spec->pool_id > (u64)U32_MAX) { | ||
5342 | rbd_warn(NULL, "pool id too large (%llu > %u)", | ||
5343 | (unsigned long long)spec->pool_id, U32_MAX); | ||
5344 | rc = -EIO; | ||
5345 | goto err_out_client; | ||
5346 | } | ||
5347 | |||
5348 | rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); | 5340 | rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); |
5349 | if (!rbd_dev) { | 5341 | if (!rbd_dev) { |
5350 | rc = -ENOMEM; | 5342 | rc = -ENOMEM; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1523e05c46fc..93b1aaa5ba3b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk) | |||
391 | num_vqs = 1; | 391 | num_vqs = 1; |
392 | 392 | ||
393 | vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); | 393 | vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); |
394 | if (!vblk->vqs) { | 394 | if (!vblk->vqs) |
395 | err = -ENOMEM; | 395 | return -ENOMEM; |
396 | goto out; | ||
397 | } | ||
398 | 396 | ||
399 | names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); | 397 | names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); |
400 | if (!names) | ||
401 | goto err_names; | ||
402 | |||
403 | callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); | 398 | callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); |
404 | if (!callbacks) | ||
405 | goto err_callbacks; | ||
406 | |||
407 | vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); | 399 | vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); |
408 | if (!vqs) | 400 | if (!names || !callbacks || !vqs) { |
409 | goto err_vqs; | 401 | err = -ENOMEM; |
402 | goto out; | ||
403 | } | ||
410 | 404 | ||
411 | for (i = 0; i < num_vqs; i++) { | 405 | for (i = 0; i < num_vqs; i++) { |
412 | callbacks[i] = virtblk_done; | 406 | callbacks[i] = virtblk_done; |
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk) | |||
417 | /* Discover virtqueues and write information to configuration. */ | 411 | /* Discover virtqueues and write information to configuration. */ |
418 | err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); | 412 | err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); |
419 | if (err) | 413 | if (err) |
420 | goto err_find_vqs; | 414 | goto out; |
421 | 415 | ||
422 | for (i = 0; i < num_vqs; i++) { | 416 | for (i = 0; i < num_vqs; i++) { |
423 | spin_lock_init(&vblk->vqs[i].lock); | 417 | spin_lock_init(&vblk->vqs[i].lock); |
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk) | |||
425 | } | 419 | } |
426 | vblk->num_vqs = num_vqs; | 420 | vblk->num_vqs = num_vqs; |
427 | 421 | ||
428 | err_find_vqs: | 422 | out: |
429 | kfree(vqs); | 423 | kfree(vqs); |
430 | err_vqs: | ||
431 | kfree(callbacks); | 424 | kfree(callbacks); |
432 | err_callbacks: | ||
433 | kfree(names); | 425 | kfree(names); |
434 | err_names: | ||
435 | if (err) | 426 | if (err) |
436 | kfree(vblk->vqs); | 427 | kfree(vblk->vqs); |
437 | out: | ||
438 | return err; | 428 | return err; |
439 | } | 429 | } |
440 | 430 | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 28bce3f4f81d..57700541f951 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | |||
12 | #define pr_fmt(fmt) "arm_arch_timer: " fmt | ||
13 | |||
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
13 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void) | |||
370 | arch_timer_ppi[PHYS_NONSECURE_PPI]); | 373 | arch_timer_ppi[PHYS_NONSECURE_PPI]); |
371 | } | 374 | } |
372 | 375 | ||
376 | static u32 check_ppi_trigger(int irq) | ||
377 | { | ||
378 | u32 flags = irq_get_trigger_type(irq); | ||
379 | |||
380 | if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { | ||
381 | pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); | ||
382 | pr_warn("WARNING: Please fix your firmware\n"); | ||
383 | flags = IRQF_TRIGGER_LOW; | ||
384 | } | ||
385 | |||
386 | return flags; | ||
387 | } | ||
388 | |||
373 | static int arch_timer_starting_cpu(unsigned int cpu) | 389 | static int arch_timer_starting_cpu(unsigned int cpu) |
374 | { | 390 | { |
375 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); | 391 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); |
392 | u32 flags; | ||
376 | 393 | ||
377 | __arch_timer_setup(ARCH_CP15_TIMER, clk); | 394 | __arch_timer_setup(ARCH_CP15_TIMER, clk); |
378 | 395 | ||
379 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); | 396 | flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); |
397 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); | ||
380 | 398 | ||
381 | if (arch_timer_has_nonsecure_ppi()) | 399 | if (arch_timer_has_nonsecure_ppi()) { |
382 | enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); | 400 | flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); |
401 | enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); | ||
402 | } | ||
383 | 403 | ||
384 | arch_counter_set_user_access(); | 404 | arch_counter_set_user_access(); |
385 | if (evtstrm_enable) | 405 | if (evtstrm_enable) |
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index c99c24bc79b0..9ae6c116c474 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/efi.h> | 18 | #include <linux/efi.h> |
19 | #include <linux/vmalloc.h> | ||
19 | 20 | ||
20 | #define NO_FURTHER_WRITE_ACTION -1 | 21 | #define NO_FURTHER_WRITE_ACTION -1 |
21 | 22 | ||
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) | |||
108 | int ret; | 109 | int ret; |
109 | void *cap_hdr_temp; | 110 | void *cap_hdr_temp; |
110 | 111 | ||
111 | cap_hdr_temp = kmap(cap_info->pages[0]); | 112 | cap_hdr_temp = vmap(cap_info->pages, cap_info->index, |
113 | VM_MAP, PAGE_KERNEL); | ||
112 | if (!cap_hdr_temp) { | 114 | if (!cap_hdr_temp) { |
113 | pr_debug("%s: kmap() failed\n", __func__); | 115 | pr_debug("%s: vmap() failed\n", __func__); |
114 | return -EFAULT; | 116 | return -EFAULT; |
115 | } | 117 | } |
116 | 118 | ||
117 | ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); | 119 | ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); |
118 | kunmap(cap_info->pages[0]); | 120 | vunmap(cap_hdr_temp); |
119 | if (ret) { | 121 | if (ret) { |
120 | pr_err("%s: efi_capsule_update() failed\n", __func__); | 122 | pr_err("%s: efi_capsule_update() failed\n", __func__); |
121 | return ret; | 123 | return ret; |
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c index 53b9fd2293ee..6eedff45e6d7 100644 --- a/drivers/firmware/efi/capsule.c +++ b/drivers/firmware/efi/capsule.c | |||
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule, | |||
190 | * map the capsule described by @capsule with its data in @pages and | 190 | * map the capsule described by @capsule with its data in @pages and |
191 | * send it to the firmware via the UpdateCapsule() runtime service. | 191 | * send it to the firmware via the UpdateCapsule() runtime service. |
192 | * | 192 | * |
193 | * @capsule must be a virtual mapping of the first page in @pages | 193 | * @capsule must be a virtual mapping of the complete capsule update in the |
194 | * (@pages[0]) in the kernel address space. That is, a | 194 | * kernel address space, as the capsule can be consumed immediately. |
195 | * capsule_header_t that describes the entire contents of the capsule | 195 | * A capsule_header_t that describes the entire contents of the capsule |
196 | * must be at the start of the first data page. | 196 | * must be at the start of the first data page. |
197 | * | 197 | * |
198 | * Even though this function will validate that the firmware supports | 198 | * Even though this function will validate that the firmware supports |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index bdee9a01ef35..c466ee2b0c97 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, | |||
90 | */ | 90 | */ |
91 | mutex_lock(&afu->contexts_lock); | 91 | mutex_lock(&afu->contexts_lock); |
92 | idr_preload(GFP_KERNEL); | 92 | idr_preload(GFP_KERNEL); |
93 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, | 93 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe, |
94 | ctx->afu->adapter->native->sl_ops->min_pe, | ||
95 | ctx->afu->num_procs, GFP_NOWAIT); | 94 | ctx->afu->num_procs, GFP_NOWAIT); |
96 | idr_preload_end(); | 95 | idr_preload_end(); |
97 | mutex_unlock(&afu->contexts_lock); | 96 | mutex_unlock(&afu->contexts_lock); |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index de090533f18c..344a0ff8f8c7 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -561,7 +561,6 @@ struct cxl_service_layer_ops { | |||
561 | u64 (*timebase_read)(struct cxl *adapter); | 561 | u64 (*timebase_read)(struct cxl *adapter); |
562 | int capi_mode; | 562 | int capi_mode; |
563 | bool needs_reset_before_disable; | 563 | bool needs_reset_before_disable; |
564 | int min_pe; | ||
565 | }; | 564 | }; |
566 | 565 | ||
567 | struct cxl_native { | 566 | struct cxl_native { |
@@ -603,6 +602,7 @@ struct cxl { | |||
603 | struct bin_attribute cxl_attr; | 602 | struct bin_attribute cxl_attr; |
604 | int adapter_num; | 603 | int adapter_num; |
605 | int user_irqs; | 604 | int user_irqs; |
605 | int min_pe; | ||
606 | u64 ps_size; | 606 | u64 ps_size; |
607 | u16 psl_rev; | 607 | u16 psl_rev; |
608 | u16 base_image; | 608 | u16 base_image; |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 3bcdaee11ba1..e606fdc4bc9c 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) | |||
924 | return fail_psl_irq(afu, &irq_info); | 924 | return fail_psl_irq(afu, &irq_info); |
925 | } | 925 | } |
926 | 926 | ||
927 | void native_irq_wait(struct cxl_context *ctx) | 927 | static void native_irq_wait(struct cxl_context *ctx) |
928 | { | 928 | { |
929 | u64 dsisr; | 929 | u64 dsisr; |
930 | int timeout = 1000; | 930 | int timeout = 1000; |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d152e2de8c93..6f0c4ac4b649 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id | |||
379 | 379 | ||
380 | static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) | 380 | static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) |
381 | { | 381 | { |
382 | u64 psl_dsnctl; | 382 | u64 psl_dsnctl, psl_fircntl; |
383 | u64 chipid; | 383 | u64 chipid; |
384 | u64 capp_unit_id; | 384 | u64 capp_unit_id; |
385 | int rc; | 385 | int rc; |
@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_ | |||
398 | cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); | 398 | cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); |
399 | /* snoop write mask */ | 399 | /* snoop write mask */ |
400 | cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); | 400 | cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); |
401 | /* set fir_accum */ | 401 | /* set fir_cntl to recommended value for production env */ |
402 | cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); | 402 | psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ |
403 | psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ | ||
404 | psl_fircntl |= 0x1ULL; /* ce_thresh */ | ||
405 | cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); | ||
403 | /* for debugging with trace arrays */ | 406 | /* for debugging with trace arrays */ |
404 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); | 407 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); |
405 | 408 | ||
@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = { | |||
1521 | .write_timebase_ctrl = write_timebase_ctrl_xsl, | 1524 | .write_timebase_ctrl = write_timebase_ctrl_xsl, |
1522 | .timebase_read = timebase_read_xsl, | 1525 | .timebase_read = timebase_read_xsl, |
1523 | .capi_mode = OPAL_PHB_CAPI_MODE_DMA, | 1526 | .capi_mode = OPAL_PHB_CAPI_MODE_DMA, |
1524 | .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ | ||
1525 | }; | 1527 | }; |
1526 | 1528 | ||
1527 | static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) | 1529 | static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) |
1528 | { | 1530 | { |
1529 | if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { | 1531 | if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { |
1532 | /* Mellanox CX-4 */ | ||
1530 | dev_info(&adapter->dev, "Device uses an XSL\n"); | 1533 | dev_info(&adapter->dev, "Device uses an XSL\n"); |
1531 | adapter->native->sl_ops = &xsl_ops; | 1534 | adapter->native->sl_ops = &xsl_ops; |
1535 | adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ | ||
1532 | } else { | 1536 | } else { |
1533 | dev_info(&adapter->dev, "Device uses a PSL\n"); | 1537 | dev_info(&adapter->dev, "Device uses a PSL\n"); |
1534 | adapter->native->sl_ops = &psl_ops; | 1538 | adapter->native->sl_ops = &psl_ops; |
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index dee8def1c193..7ada5f1b7bb6 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c | |||
@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) | |||
221 | /* Setup the PHB using arch provided callback */ | 221 | /* Setup the PHB using arch provided callback */ |
222 | phb->ops = &cxl_pcie_pci_ops; | 222 | phb->ops = &cxl_pcie_pci_ops; |
223 | phb->cfg_addr = NULL; | 223 | phb->cfg_addr = NULL; |
224 | phb->cfg_data = 0; | 224 | phb->cfg_data = NULL; |
225 | phb->private_data = afu; | 225 | phb->private_data = afu; |
226 | phb->controller_ops = cxl_pci_controller_ops; | 226 | phb->controller_ops = cxl_pci_controller_ops; |
227 | 227 | ||
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 88e91666f145..368795aad5c9 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt) | |||
1269 | } | 1269 | } |
1270 | } | 1270 | } |
1271 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); | 1271 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); |
1272 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; | ||
1272 | revalidate_disk(btt->btt_disk); | 1273 | revalidate_disk(btt->btt_disk); |
1273 | 1274 | ||
1274 | return 0; | 1275 | return 0; |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 3fa7919f94a8..97dd2925ed6e 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev, | |||
140 | } | 140 | } |
141 | static DEVICE_ATTR_RW(namespace); | 141 | static DEVICE_ATTR_RW(namespace); |
142 | 142 | ||
143 | static ssize_t size_show(struct device *dev, | ||
144 | struct device_attribute *attr, char *buf) | ||
145 | { | ||
146 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
147 | ssize_t rc; | ||
148 | |||
149 | device_lock(dev); | ||
150 | if (dev->driver) | ||
151 | rc = sprintf(buf, "%llu\n", nd_btt->size); | ||
152 | else { | ||
153 | /* no size to convey if the btt instance is disabled */ | ||
154 | rc = -ENXIO; | ||
155 | } | ||
156 | device_unlock(dev); | ||
157 | |||
158 | return rc; | ||
159 | } | ||
160 | static DEVICE_ATTR_RO(size); | ||
161 | |||
143 | static struct attribute *nd_btt_attributes[] = { | 162 | static struct attribute *nd_btt_attributes[] = { |
144 | &dev_attr_sector_size.attr, | 163 | &dev_attr_sector_size.attr, |
145 | &dev_attr_namespace.attr, | 164 | &dev_attr_namespace.attr, |
146 | &dev_attr_uuid.attr, | 165 | &dev_attr_uuid.attr, |
166 | &dev_attr_size.attr, | ||
147 | NULL, | 167 | NULL, |
148 | }; | 168 | }; |
149 | 169 | ||
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 40476399d227..8024a0ef86d3 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
@@ -143,6 +143,7 @@ struct nd_btt { | |||
143 | struct nd_namespace_common *ndns; | 143 | struct nd_namespace_common *ndns; |
144 | struct btt *btt; | 144 | struct btt *btt; |
145 | unsigned long lbasize; | 145 | unsigned long lbasize; |
146 | u64 size; | ||
146 | u8 *uuid; | 147 | u8 *uuid; |
147 | int id; | 148 | int id; |
148 | }; | 149 | }; |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index a02981efdad5..eafa6138a6b8 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1411,6 +1411,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, | |||
1411 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 1411 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
1412 | pci_msi_domain_update_chip_ops(info); | 1412 | pci_msi_domain_update_chip_ops(info); |
1413 | 1413 | ||
1414 | info->flags |= MSI_FLAG_ACTIVATE_EARLY; | ||
1415 | |||
1414 | domain = msi_create_irq_domain(fwnode, info, parent); | 1416 | domain = msi_create_irq_domain(fwnode, info, parent); |
1415 | if (!domain) | 1417 | if (!domain) |
1416 | return NULL; | 1418 | return NULL; |
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index cecc15a880de..3fa17ac8df54 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c | |||
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch) | |||
1080 | static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | 1080 | static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, |
1081 | long timeout) | 1081 | long timeout) |
1082 | { | 1082 | { |
1083 | struct rio_channel *ch = NULL; | 1083 | struct rio_channel *ch; |
1084 | struct rio_channel *new_ch = NULL; | 1084 | struct rio_channel *new_ch; |
1085 | struct conn_req *req; | 1085 | struct conn_req *req; |
1086 | struct cm_peer *peer; | 1086 | struct cm_peer *peer; |
1087 | int found = 0; | 1087 | int found = 0; |
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1155 | 1155 | ||
1156 | spin_unlock_bh(&ch->lock); | 1156 | spin_unlock_bh(&ch->lock); |
1157 | riocm_put_channel(ch); | 1157 | riocm_put_channel(ch); |
1158 | ch = NULL; | ||
1158 | kfree(req); | 1159 | kfree(req); |
1159 | 1160 | ||
1160 | down_read(&rdev_sem); | 1161 | down_read(&rdev_sem); |
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1172 | if (!found) { | 1173 | if (!found) { |
1173 | /* If peer device object not found, simply ignore the request */ | 1174 | /* If peer device object not found, simply ignore the request */ |
1174 | err = -ENODEV; | 1175 | err = -ENODEV; |
1175 | goto err_nodev; | 1176 | goto err_put_new_ch; |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | new_ch->rdev = peer->rdev; | 1179 | new_ch->rdev = peer->rdev; |
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1184 | 1185 | ||
1185 | *new_ch_id = new_ch->id; | 1186 | *new_ch_id = new_ch->id; |
1186 | return new_ch; | 1187 | return new_ch; |
1188 | |||
1189 | err_put_new_ch: | ||
1190 | spin_lock_bh(&idr_lock); | ||
1191 | idr_remove(&ch_idr, new_ch->id); | ||
1192 | spin_unlock_bh(&idr_lock); | ||
1193 | riocm_put_channel(new_ch); | ||
1194 | |||
1187 | err_put: | 1195 | err_put: |
1188 | riocm_put_channel(ch); | 1196 | if (ch) |
1189 | err_nodev: | 1197 | riocm_put_channel(ch); |
1190 | if (new_ch) { | ||
1191 | spin_lock_bh(&idr_lock); | ||
1192 | idr_remove(&ch_idr, new_ch->id); | ||
1193 | spin_unlock_bh(&idr_lock); | ||
1194 | riocm_put_channel(new_ch); | ||
1195 | } | ||
1196 | *new_ch_id = 0; | 1198 | *new_ch_id = 0; |
1197 | return ERR_PTR(err); | 1199 | return ERR_PTR(err); |
1198 | } | 1200 | } |
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..df40692a9011 100644 --- a/drivers/s390/virtio/Makefile +++ b/drivers/s390/virtio/Makefile | |||
@@ -6,4 +6,8 @@ | |||
6 | # it under the terms of the GNU General Public License (version 2 only) | 6 | # it under the terms of the GNU General Public License (version 2 only) |
7 | # as published by the Free Software Foundation. | 7 | # as published by the Free Software Foundation. |
8 | 8 | ||
9 | obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o | 9 | s390-virtio-objs := virtio_ccw.o |
10 | ifdef CONFIG_S390_GUEST_OLD_TRANSPORT | ||
11 | s390-virtio-objs += kvm_virtio.o | ||
12 | endif | ||
13 | obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs) | ||
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 1d060fd293a3..5e5c11f37b24 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c | |||
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void) | |||
458 | if (test_devices_support(total_memory_size) < 0) | 458 | if (test_devices_support(total_memory_size) < 0) |
459 | return -ENODEV; | 459 | return -ENODEV; |
460 | 460 | ||
461 | pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); | ||
462 | |||
461 | rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); | 463 | rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); |
462 | if (rc) | 464 | if (rc) |
463 | return rc; | 465 | return rc; |
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void) | |||
482 | } | 484 | } |
483 | 485 | ||
484 | /* code for early console output with virtio_console */ | 486 | /* code for early console output with virtio_console */ |
485 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 487 | static int early_put_chars(u32 vtermno, const char *buf, int count) |
486 | { | 488 | { |
487 | char scratch[17]; | 489 | char scratch[17]; |
488 | unsigned int len = count; | 490 | unsigned int len = count; |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 0ddf3a2dbfc4..e3b30ea9ece5 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
@@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |||
307 | 307 | ||
308 | vhost_disable_notify(&vsock->dev, vq); | 308 | vhost_disable_notify(&vsock->dev, vq); |
309 | for (;;) { | 309 | for (;;) { |
310 | u32 len; | ||
311 | |||
310 | if (!vhost_vsock_more_replies(vsock)) { | 312 | if (!vhost_vsock_more_replies(vsock)) { |
311 | /* Stop tx until the device processes already | 313 | /* Stop tx until the device processes already |
312 | * pending replies. Leave tx virtqueue | 314 | * pending replies. Leave tx virtqueue |
@@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |||
334 | continue; | 336 | continue; |
335 | } | 337 | } |
336 | 338 | ||
339 | len = pkt->len; | ||
340 | |||
337 | /* Only accept correctly addressed packets */ | 341 | /* Only accept correctly addressed packets */ |
338 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) | 342 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) |
339 | virtio_transport_recv_pkt(pkt); | 343 | virtio_transport_recv_pkt(pkt); |
340 | else | 344 | else |
341 | virtio_transport_free_pkt(pkt); | 345 | virtio_transport_free_pkt(pkt); |
342 | 346 | ||
343 | vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); | 347 | vhost_add_used(vq, head, sizeof(pkt->hdr) + len); |
344 | added = true; | 348 | added = true; |
345 | } | 349 | } |
346 | 350 | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 114a0c88afb8..e383ecdaca59 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, | |||
327 | * host should service the ring ASAP. */ | 327 | * host should service the ring ASAP. */ |
328 | if (out_sgs) | 328 | if (out_sgs) |
329 | vq->notify(&vq->vq); | 329 | vq->notify(&vq->vq); |
330 | if (indirect) | ||
331 | kfree(desc); | ||
330 | END_USE(vq); | 332 | END_USE(vq); |
331 | return -ENOSPC; | 333 | return -ENOSPC; |
332 | } | 334 | } |
@@ -426,6 +428,7 @@ unmap_release: | |||
426 | if (indirect) | 428 | if (indirect) |
427 | kfree(desc); | 429 | kfree(desc); |
428 | 430 | ||
431 | END_USE(vq); | ||
429 | return -EIO; | 432 | return -EIO; |
430 | } | 433 | } |
431 | 434 | ||
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index b6d210e7a993..d9ddcfc18c91 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -862,33 +862,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
862 | return 0; | 862 | return 0; |
863 | } | 863 | } |
864 | 864 | ||
865 | int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, | ||
866 | struct btrfs_trans_handle *trans, | ||
867 | u64 ref_root, u64 bytenr, u64 num_bytes) | ||
868 | { | ||
869 | struct btrfs_delayed_ref_root *delayed_refs; | ||
870 | struct btrfs_delayed_ref_head *ref_head; | ||
871 | int ret = 0; | ||
872 | |||
873 | if (!fs_info->quota_enabled || !is_fstree(ref_root)) | ||
874 | return 0; | ||
875 | |||
876 | delayed_refs = &trans->transaction->delayed_refs; | ||
877 | |||
878 | spin_lock(&delayed_refs->lock); | ||
879 | ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0); | ||
880 | if (!ref_head) { | ||
881 | ret = -ENOENT; | ||
882 | goto out; | ||
883 | } | ||
884 | WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root); | ||
885 | ref_head->qgroup_ref_root = ref_root; | ||
886 | ref_head->qgroup_reserved = num_bytes; | ||
887 | out: | ||
888 | spin_unlock(&delayed_refs->lock); | ||
889 | return ret; | ||
890 | } | ||
891 | |||
892 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | 865 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
893 | struct btrfs_trans_handle *trans, | 866 | struct btrfs_trans_handle *trans, |
894 | u64 bytenr, u64 num_bytes, | 867 | u64 bytenr, u64 num_bytes, |
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 5fca9534a271..43f3629760e9 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
250 | u64 parent, u64 ref_root, | 250 | u64 parent, u64 ref_root, |
251 | u64 owner, u64 offset, u64 reserved, int action, | 251 | u64 owner, u64 offset, u64 reserved, int action, |
252 | struct btrfs_delayed_extent_op *extent_op); | 252 | struct btrfs_delayed_extent_op *extent_op); |
253 | int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, | ||
254 | struct btrfs_trans_handle *trans, | ||
255 | u64 ref_root, u64 bytenr, u64 num_bytes); | ||
256 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | 253 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
257 | struct btrfs_trans_handle *trans, | 254 | struct btrfs_trans_handle *trans, |
258 | u64 bytenr, u64 num_bytes, | 255 | u64 bytenr, u64 num_bytes, |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9404121fd5f7..5842423f8f47 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
2033 | */ | 2033 | */ |
2034 | clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | 2034 | clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
2035 | &BTRFS_I(inode)->runtime_flags); | 2035 | &BTRFS_I(inode)->runtime_flags); |
2036 | /* | ||
2037 | * An ordered extent might have started before and completed | ||
2038 | * already with io errors, in which case the inode was not | ||
2039 | * updated and we end up here. So check the inode's mapping | ||
2040 | * flags for any errors that might have happened while doing | ||
2041 | * writeback of file data. | ||
2042 | */ | ||
2043 | ret = btrfs_inode_check_errors(inode); | ||
2036 | inode_unlock(inode); | 2044 | inode_unlock(inode); |
2037 | goto out; | 2045 | goto out; |
2038 | } | 2046 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2f5975954ccf..08dfc57e2270 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3435,10 +3435,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) | |||
3435 | found_key.offset = 0; | 3435 | found_key.offset = 0; |
3436 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); | 3436 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); |
3437 | ret = PTR_ERR_OR_ZERO(inode); | 3437 | ret = PTR_ERR_OR_ZERO(inode); |
3438 | if (ret && ret != -ESTALE) | 3438 | if (ret && ret != -ENOENT) |
3439 | goto out; | 3439 | goto out; |
3440 | 3440 | ||
3441 | if (ret == -ESTALE && root == root->fs_info->tree_root) { | 3441 | if (ret == -ENOENT && root == root->fs_info->tree_root) { |
3442 | struct btrfs_root *dead_root; | 3442 | struct btrfs_root *dead_root; |
3443 | struct btrfs_fs_info *fs_info = root->fs_info; | 3443 | struct btrfs_fs_info *fs_info = root->fs_info; |
3444 | int is_dead_root = 0; | 3444 | int is_dead_root = 0; |
@@ -3474,7 +3474,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) | |||
3474 | * Inode is already gone but the orphan item is still there, | 3474 | * Inode is already gone but the orphan item is still there, |
3475 | * kill the orphan item. | 3475 | * kill the orphan item. |
3476 | */ | 3476 | */ |
3477 | if (ret == -ESTALE) { | 3477 | if (ret == -ENOENT) { |
3478 | trans = btrfs_start_transaction(root, 1); | 3478 | trans = btrfs_start_transaction(root, 1); |
3479 | if (IS_ERR(trans)) { | 3479 | if (IS_ERR(trans)) { |
3480 | ret = PTR_ERR(trans); | 3480 | ret = PTR_ERR(trans); |
@@ -3633,7 +3633,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf, | |||
3633 | /* | 3633 | /* |
3634 | * read an inode from the btree into the in-memory inode | 3634 | * read an inode from the btree into the in-memory inode |
3635 | */ | 3635 | */ |
3636 | static void btrfs_read_locked_inode(struct inode *inode) | 3636 | static int btrfs_read_locked_inode(struct inode *inode) |
3637 | { | 3637 | { |
3638 | struct btrfs_path *path; | 3638 | struct btrfs_path *path; |
3639 | struct extent_buffer *leaf; | 3639 | struct extent_buffer *leaf; |
@@ -3652,14 +3652,19 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
3652 | filled = true; | 3652 | filled = true; |
3653 | 3653 | ||
3654 | path = btrfs_alloc_path(); | 3654 | path = btrfs_alloc_path(); |
3655 | if (!path) | 3655 | if (!path) { |
3656 | ret = -ENOMEM; | ||
3656 | goto make_bad; | 3657 | goto make_bad; |
3658 | } | ||
3657 | 3659 | ||
3658 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); | 3660 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); |
3659 | 3661 | ||
3660 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); | 3662 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); |
3661 | if (ret) | 3663 | if (ret) { |
3664 | if (ret > 0) | ||
3665 | ret = -ENOENT; | ||
3662 | goto make_bad; | 3666 | goto make_bad; |
3667 | } | ||
3663 | 3668 | ||
3664 | leaf = path->nodes[0]; | 3669 | leaf = path->nodes[0]; |
3665 | 3670 | ||
@@ -3812,11 +3817,12 @@ cache_acl: | |||
3812 | } | 3817 | } |
3813 | 3818 | ||
3814 | btrfs_update_iflags(inode); | 3819 | btrfs_update_iflags(inode); |
3815 | return; | 3820 | return 0; |
3816 | 3821 | ||
3817 | make_bad: | 3822 | make_bad: |
3818 | btrfs_free_path(path); | 3823 | btrfs_free_path(path); |
3819 | make_bad_inode(inode); | 3824 | make_bad_inode(inode); |
3825 | return ret; | ||
3820 | } | 3826 | } |
3821 | 3827 | ||
3822 | /* | 3828 | /* |
@@ -4204,6 +4210,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
4204 | int err = 0; | 4210 | int err = 0; |
4205 | struct btrfs_root *root = BTRFS_I(dir)->root; | 4211 | struct btrfs_root *root = BTRFS_I(dir)->root; |
4206 | struct btrfs_trans_handle *trans; | 4212 | struct btrfs_trans_handle *trans; |
4213 | u64 last_unlink_trans; | ||
4207 | 4214 | ||
4208 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) | 4215 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) |
4209 | return -ENOTEMPTY; | 4216 | return -ENOTEMPTY; |
@@ -4226,11 +4233,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
4226 | if (err) | 4233 | if (err) |
4227 | goto out; | 4234 | goto out; |
4228 | 4235 | ||
4236 | last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; | ||
4237 | |||
4229 | /* now the directory is empty */ | 4238 | /* now the directory is empty */ |
4230 | err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), | 4239 | err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), |
4231 | dentry->d_name.name, dentry->d_name.len); | 4240 | dentry->d_name.name, dentry->d_name.len); |
4232 | if (!err) | 4241 | if (!err) { |
4233 | btrfs_i_size_write(inode, 0); | 4242 | btrfs_i_size_write(inode, 0); |
4243 | /* | ||
4244 | * Propagate the last_unlink_trans value of the deleted dir to | ||
4245 | * its parent directory. This is to prevent an unrecoverable | ||
4246 | * log tree in the case we do something like this: | ||
4247 | * 1) create dir foo | ||
4248 | * 2) create snapshot under dir foo | ||
4249 | * 3) delete the snapshot | ||
4250 | * 4) rmdir foo | ||
4251 | * 5) mkdir foo | ||
4252 | * 6) fsync foo or some file inside foo | ||
4253 | */ | ||
4254 | if (last_unlink_trans >= trans->transid) | ||
4255 | BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; | ||
4256 | } | ||
4234 | out: | 4257 | out: |
4235 | btrfs_end_transaction(trans, root); | 4258 | btrfs_end_transaction(trans, root); |
4236 | btrfs_btree_balance_dirty(root); | 4259 | btrfs_btree_balance_dirty(root); |
@@ -5606,7 +5629,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |||
5606 | return ERR_PTR(-ENOMEM); | 5629 | return ERR_PTR(-ENOMEM); |
5607 | 5630 | ||
5608 | if (inode->i_state & I_NEW) { | 5631 | if (inode->i_state & I_NEW) { |
5609 | btrfs_read_locked_inode(inode); | 5632 | int ret; |
5633 | |||
5634 | ret = btrfs_read_locked_inode(inode); | ||
5610 | if (!is_bad_inode(inode)) { | 5635 | if (!is_bad_inode(inode)) { |
5611 | inode_tree_add(inode); | 5636 | inode_tree_add(inode); |
5612 | unlock_new_inode(inode); | 5637 | unlock_new_inode(inode); |
@@ -5615,7 +5640,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |||
5615 | } else { | 5640 | } else { |
5616 | unlock_new_inode(inode); | 5641 | unlock_new_inode(inode); |
5617 | iput(inode); | 5642 | iput(inode); |
5618 | inode = ERR_PTR(-ESTALE); | 5643 | ASSERT(ret < 0); |
5644 | inode = ERR_PTR(ret < 0 ? ret : -ESTALE); | ||
5619 | } | 5645 | } |
5620 | } | 5646 | } |
5621 | 5647 | ||
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index b71dd298385c..efe129fe2678 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -231,7 +231,6 @@ struct pending_dir_move { | |||
231 | u64 parent_ino; | 231 | u64 parent_ino; |
232 | u64 ino; | 232 | u64 ino; |
233 | u64 gen; | 233 | u64 gen; |
234 | bool is_orphan; | ||
235 | struct list_head update_refs; | 234 | struct list_head update_refs; |
236 | }; | 235 | }; |
237 | 236 | ||
@@ -274,6 +273,39 @@ struct name_cache_entry { | |||
274 | char name[]; | 273 | char name[]; |
275 | }; | 274 | }; |
276 | 275 | ||
276 | static void inconsistent_snapshot_error(struct send_ctx *sctx, | ||
277 | enum btrfs_compare_tree_result result, | ||
278 | const char *what) | ||
279 | { | ||
280 | const char *result_string; | ||
281 | |||
282 | switch (result) { | ||
283 | case BTRFS_COMPARE_TREE_NEW: | ||
284 | result_string = "new"; | ||
285 | break; | ||
286 | case BTRFS_COMPARE_TREE_DELETED: | ||
287 | result_string = "deleted"; | ||
288 | break; | ||
289 | case BTRFS_COMPARE_TREE_CHANGED: | ||
290 | result_string = "updated"; | ||
291 | break; | ||
292 | case BTRFS_COMPARE_TREE_SAME: | ||
293 | ASSERT(0); | ||
294 | result_string = "unchanged"; | ||
295 | break; | ||
296 | default: | ||
297 | ASSERT(0); | ||
298 | result_string = "unexpected"; | ||
299 | } | ||
300 | |||
301 | btrfs_err(sctx->send_root->fs_info, | ||
302 | "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", | ||
303 | result_string, what, sctx->cmp_key->objectid, | ||
304 | sctx->send_root->root_key.objectid, | ||
305 | (sctx->parent_root ? | ||
306 | sctx->parent_root->root_key.objectid : 0)); | ||
307 | } | ||
308 | |||
277 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); | 309 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); |
278 | 310 | ||
279 | static struct waiting_dir_move * | 311 | static struct waiting_dir_move * |
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | |||
1861 | * was already unlinked/moved, so we can safely assume that we will not | 1893 | * was already unlinked/moved, so we can safely assume that we will not |
1862 | * overwrite anything at this point in time. | 1894 | * overwrite anything at this point in time. |
1863 | */ | 1895 | */ |
1864 | if (other_inode > sctx->send_progress) { | 1896 | if (other_inode > sctx->send_progress || |
1897 | is_waiting_for_move(sctx, other_inode)) { | ||
1865 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, | 1898 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, |
1866 | who_gen, NULL, NULL, NULL, NULL); | 1899 | who_gen, NULL, NULL, NULL, NULL); |
1867 | if (ret < 0) | 1900 | if (ret < 0) |
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino); | |||
2502 | key.type = BTRFS_INODE_ITEM_KEY; | 2535 | key.type = BTRFS_INODE_ITEM_KEY; |
2503 | key.offset = 0; | 2536 | key.offset = 0; |
2504 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | 2537 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); |
2538 | if (ret > 0) | ||
2539 | ret = -ENOENT; | ||
2505 | if (ret < 0) | 2540 | if (ret < 0) |
2506 | goto out; | 2541 | goto out; |
2507 | 2542 | ||
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, | |||
2947 | } | 2982 | } |
2948 | 2983 | ||
2949 | if (loc.objectid > send_progress) { | 2984 | if (loc.objectid > send_progress) { |
2985 | struct orphan_dir_info *odi; | ||
2986 | |||
2987 | odi = get_orphan_dir_info(sctx, dir); | ||
2988 | free_orphan_dir_info(sctx, odi); | ||
2950 | ret = 0; | 2989 | ret = 0; |
2951 | goto out; | 2990 | goto out; |
2952 | } | 2991 | } |
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx, | |||
3047 | pm->parent_ino = parent_ino; | 3086 | pm->parent_ino = parent_ino; |
3048 | pm->ino = ino; | 3087 | pm->ino = ino; |
3049 | pm->gen = ino_gen; | 3088 | pm->gen = ino_gen; |
3050 | pm->is_orphan = is_orphan; | ||
3051 | INIT_LIST_HEAD(&pm->list); | 3089 | INIT_LIST_HEAD(&pm->list); |
3052 | INIT_LIST_HEAD(&pm->update_refs); | 3090 | INIT_LIST_HEAD(&pm->update_refs); |
3053 | RB_CLEAR_NODE(&pm->node); | 3091 | RB_CLEAR_NODE(&pm->node); |
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, | |||
3113 | return NULL; | 3151 | return NULL; |
3114 | } | 3152 | } |
3115 | 3153 | ||
3154 | static int path_loop(struct send_ctx *sctx, struct fs_path *name, | ||
3155 | u64 ino, u64 gen, u64 *ancestor_ino) | ||
3156 | { | ||
3157 | int ret = 0; | ||
3158 | u64 parent_inode = 0; | ||
3159 | u64 parent_gen = 0; | ||
3160 | u64 start_ino = ino; | ||
3161 | |||
3162 | *ancestor_ino = 0; | ||
3163 | while (ino != BTRFS_FIRST_FREE_OBJECTID) { | ||
3164 | fs_path_reset(name); | ||
3165 | |||
3166 | if (is_waiting_for_rm(sctx, ino)) | ||
3167 | break; | ||
3168 | if (is_waiting_for_move(sctx, ino)) { | ||
3169 | if (*ancestor_ino == 0) | ||
3170 | *ancestor_ino = ino; | ||
3171 | ret = get_first_ref(sctx->parent_root, ino, | ||
3172 | &parent_inode, &parent_gen, name); | ||
3173 | } else { | ||
3174 | ret = __get_cur_name_and_parent(sctx, ino, gen, | ||
3175 | &parent_inode, | ||
3176 | &parent_gen, name); | ||
3177 | if (ret > 0) { | ||
3178 | ret = 0; | ||
3179 | break; | ||
3180 | } | ||
3181 | } | ||
3182 | if (ret < 0) | ||
3183 | break; | ||
3184 | if (parent_inode == start_ino) { | ||
3185 | ret = 1; | ||
3186 | if (*ancestor_ino == 0) | ||
3187 | *ancestor_ino = ino; | ||
3188 | break; | ||
3189 | } | ||
3190 | ino = parent_inode; | ||
3191 | gen = parent_gen; | ||
3192 | } | ||
3193 | return ret; | ||
3194 | } | ||
3195 | |||
3116 | static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | 3196 | static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) |
3117 | { | 3197 | { |
3118 | struct fs_path *from_path = NULL; | 3198 | struct fs_path *from_path = NULL; |
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3123 | u64 parent_ino, parent_gen; | 3203 | u64 parent_ino, parent_gen; |
3124 | struct waiting_dir_move *dm = NULL; | 3204 | struct waiting_dir_move *dm = NULL; |
3125 | u64 rmdir_ino = 0; | 3205 | u64 rmdir_ino = 0; |
3206 | u64 ancestor; | ||
3207 | bool is_orphan; | ||
3126 | int ret; | 3208 | int ret; |
3127 | 3209 | ||
3128 | name = fs_path_alloc(); | 3210 | name = fs_path_alloc(); |
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3135 | dm = get_waiting_dir_move(sctx, pm->ino); | 3217 | dm = get_waiting_dir_move(sctx, pm->ino); |
3136 | ASSERT(dm); | 3218 | ASSERT(dm); |
3137 | rmdir_ino = dm->rmdir_ino; | 3219 | rmdir_ino = dm->rmdir_ino; |
3220 | is_orphan = dm->orphanized; | ||
3138 | free_waiting_dir_move(sctx, dm); | 3221 | free_waiting_dir_move(sctx, dm); |
3139 | 3222 | ||
3140 | if (pm->is_orphan) { | 3223 | if (is_orphan) { |
3141 | ret = gen_unique_name(sctx, pm->ino, | 3224 | ret = gen_unique_name(sctx, pm->ino, |
3142 | pm->gen, from_path); | 3225 | pm->gen, from_path); |
3143 | } else { | 3226 | } else { |
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3155 | goto out; | 3238 | goto out; |
3156 | 3239 | ||
3157 | sctx->send_progress = sctx->cur_ino + 1; | 3240 | sctx->send_progress = sctx->cur_ino + 1; |
3241 | ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); | ||
3242 | if (ret < 0) | ||
3243 | goto out; | ||
3244 | if (ret) { | ||
3245 | LIST_HEAD(deleted_refs); | ||
3246 | ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); | ||
3247 | ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, | ||
3248 | &pm->update_refs, &deleted_refs, | ||
3249 | is_orphan); | ||
3250 | if (ret < 0) | ||
3251 | goto out; | ||
3252 | if (rmdir_ino) { | ||
3253 | dm = get_waiting_dir_move(sctx, pm->ino); | ||
3254 | ASSERT(dm); | ||
3255 | dm->rmdir_ino = rmdir_ino; | ||
3256 | } | ||
3257 | goto out; | ||
3258 | } | ||
3158 | fs_path_reset(name); | 3259 | fs_path_reset(name); |
3159 | to_path = name; | 3260 | to_path = name; |
3160 | name = NULL; | 3261 | name = NULL; |
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3174 | /* already deleted */ | 3275 | /* already deleted */ |
3175 | goto finish; | 3276 | goto finish; |
3176 | } | 3277 | } |
3177 | ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); | 3278 | ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino); |
3178 | if (ret < 0) | 3279 | if (ret < 0) |
3179 | goto out; | 3280 | goto out; |
3180 | if (!ret) | 3281 | if (!ret) |
@@ -3204,8 +3305,18 @@ finish: | |||
3204 | * and old parent(s). | 3305 | * and old parent(s). |
3205 | */ | 3306 | */ |
3206 | list_for_each_entry(cur, &pm->update_refs, list) { | 3307 | list_for_each_entry(cur, &pm->update_refs, list) { |
3207 | if (cur->dir == rmdir_ino) | 3308 | /* |
3309 | * The parent inode might have been deleted in the send snapshot | ||
3310 | */ | ||
3311 | ret = get_inode_info(sctx->send_root, cur->dir, NULL, | ||
3312 | NULL, NULL, NULL, NULL, NULL); | ||
3313 | if (ret == -ENOENT) { | ||
3314 | ret = 0; | ||
3208 | continue; | 3315 | continue; |
3316 | } | ||
3317 | if (ret < 0) | ||
3318 | goto out; | ||
3319 | |||
3209 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); | 3320 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); |
3210 | if (ret < 0) | 3321 | if (ret < 0) |
3211 | goto out; | 3322 | goto out; |
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, | |||
3325 | u64 left_gen; | 3436 | u64 left_gen; |
3326 | u64 right_gen; | 3437 | u64 right_gen; |
3327 | int ret = 0; | 3438 | int ret = 0; |
3439 | struct waiting_dir_move *wdm; | ||
3328 | 3440 | ||
3329 | if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) | 3441 | if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) |
3330 | return 0; | 3442 | return 0; |
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, | |||
3383 | goto out; | 3495 | goto out; |
3384 | } | 3496 | } |
3385 | 3497 | ||
3386 | if (is_waiting_for_move(sctx, di_key.objectid)) { | 3498 | wdm = get_waiting_dir_move(sctx, di_key.objectid); |
3499 | if (wdm && !wdm->orphanized) { | ||
3387 | ret = add_pending_dir_move(sctx, | 3500 | ret = add_pending_dir_move(sctx, |
3388 | sctx->cur_ino, | 3501 | sctx->cur_ino, |
3389 | sctx->cur_inode_gen, | 3502 | sctx->cur_inode_gen, |
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3470 | ret = is_ancestor(sctx->parent_root, | 3583 | ret = is_ancestor(sctx->parent_root, |
3471 | sctx->cur_ino, sctx->cur_inode_gen, | 3584 | sctx->cur_ino, sctx->cur_inode_gen, |
3472 | ino, path_before); | 3585 | ino, path_before); |
3473 | break; | 3586 | if (ret) |
3587 | break; | ||
3474 | } | 3588 | } |
3475 | 3589 | ||
3476 | fs_path_reset(path_before); | 3590 | fs_path_reset(path_before); |
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3643 | goto out; | 3757 | goto out; |
3644 | if (ret) { | 3758 | if (ret) { |
3645 | struct name_cache_entry *nce; | 3759 | struct name_cache_entry *nce; |
3760 | struct waiting_dir_move *wdm; | ||
3646 | 3761 | ||
3647 | ret = orphanize_inode(sctx, ow_inode, ow_gen, | 3762 | ret = orphanize_inode(sctx, ow_inode, ow_gen, |
3648 | cur->full_path); | 3763 | cur->full_path); |
3649 | if (ret < 0) | 3764 | if (ret < 0) |
3650 | goto out; | 3765 | goto out; |
3766 | |||
3767 | /* | ||
3768 | * If ow_inode has its rename operation delayed | ||
3769 | * make sure that its orphanized name is used in | ||
3770 | * the source path when performing its rename | ||
3771 | * operation. | ||
3772 | */ | ||
3773 | if (is_waiting_for_move(sctx, ow_inode)) { | ||
3774 | wdm = get_waiting_dir_move(sctx, | ||
3775 | ow_inode); | ||
3776 | ASSERT(wdm); | ||
3777 | wdm->orphanized = true; | ||
3778 | } | ||
3779 | |||
3651 | /* | 3780 | /* |
3652 | * Make sure we clear our orphanized inode's | 3781 | * Make sure we clear our orphanized inode's |
3653 | * name from the name cache. This is because the | 3782 | * name from the name cache. This is because the |
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3663 | name_cache_delete(sctx, nce); | 3792 | name_cache_delete(sctx, nce); |
3664 | kfree(nce); | 3793 | kfree(nce); |
3665 | } | 3794 | } |
3795 | |||
3796 | /* | ||
3797 | * ow_inode might currently be an ancestor of | ||
3798 | * cur_ino, therefore compute valid_path (the | ||
3799 | * current path of cur_ino) again because it | ||
3800 | * might contain the pre-orphanization name of | ||
3801 | * ow_inode, which is no longer valid. | ||
3802 | */ | ||
3803 | fs_path_reset(valid_path); | ||
3804 | ret = get_cur_path(sctx, sctx->cur_ino, | ||
3805 | sctx->cur_inode_gen, valid_path); | ||
3806 | if (ret < 0) | ||
3807 | goto out; | ||
3666 | } else { | 3808 | } else { |
3667 | ret = send_unlink(sctx, cur->full_path); | 3809 | ret = send_unlink(sctx, cur->full_path); |
3668 | if (ret < 0) | 3810 | if (ret < 0) |
@@ -5602,7 +5744,10 @@ static int changed_ref(struct send_ctx *sctx, | |||
5602 | { | 5744 | { |
5603 | int ret = 0; | 5745 | int ret = 0; |
5604 | 5746 | ||
5605 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 5747 | if (sctx->cur_ino != sctx->cmp_key->objectid) { |
5748 | inconsistent_snapshot_error(sctx, result, "reference"); | ||
5749 | return -EIO; | ||
5750 | } | ||
5606 | 5751 | ||
5607 | if (!sctx->cur_inode_new_gen && | 5752 | if (!sctx->cur_inode_new_gen && |
5608 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { | 5753 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { |
@@ -5627,7 +5772,10 @@ static int changed_xattr(struct send_ctx *sctx, | |||
5627 | { | 5772 | { |
5628 | int ret = 0; | 5773 | int ret = 0; |
5629 | 5774 | ||
5630 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 5775 | if (sctx->cur_ino != sctx->cmp_key->objectid) { |
5776 | inconsistent_snapshot_error(sctx, result, "xattr"); | ||
5777 | return -EIO; | ||
5778 | } | ||
5631 | 5779 | ||
5632 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 5780 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { |
5633 | if (result == BTRFS_COMPARE_TREE_NEW) | 5781 | if (result == BTRFS_COMPARE_TREE_NEW) |
@@ -5651,7 +5799,10 @@ static int changed_extent(struct send_ctx *sctx, | |||
5651 | { | 5799 | { |
5652 | int ret = 0; | 5800 | int ret = 0; |
5653 | 5801 | ||
5654 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 5802 | if (sctx->cur_ino != sctx->cmp_key->objectid) { |
5803 | inconsistent_snapshot_error(sctx, result, "extent"); | ||
5804 | return -EIO; | ||
5805 | } | ||
5655 | 5806 | ||
5656 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 5807 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { |
5657 | if (result != BTRFS_COMPARE_TREE_DELETED) | 5808 | if (result != BTRFS_COMPARE_TREE_DELETED) |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d31a0c4f56be..fff3f3efa436 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -4469,7 +4469,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, | |||
4469 | static int btrfs_check_ref_name_override(struct extent_buffer *eb, | 4469 | static int btrfs_check_ref_name_override(struct extent_buffer *eb, |
4470 | const int slot, | 4470 | const int slot, |
4471 | const struct btrfs_key *key, | 4471 | const struct btrfs_key *key, |
4472 | struct inode *inode) | 4472 | struct inode *inode, |
4473 | u64 *other_ino) | ||
4473 | { | 4474 | { |
4474 | int ret; | 4475 | int ret; |
4475 | struct btrfs_path *search_path; | 4476 | struct btrfs_path *search_path; |
@@ -4528,7 +4529,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb, | |||
4528 | search_path, parent, | 4529 | search_path, parent, |
4529 | name, this_name_len, 0); | 4530 | name, this_name_len, 0); |
4530 | if (di && !IS_ERR(di)) { | 4531 | if (di && !IS_ERR(di)) { |
4531 | ret = 1; | 4532 | struct btrfs_key di_key; |
4533 | |||
4534 | btrfs_dir_item_key_to_cpu(search_path->nodes[0], | ||
4535 | di, &di_key); | ||
4536 | if (di_key.type == BTRFS_INODE_ITEM_KEY) { | ||
4537 | ret = 1; | ||
4538 | *other_ino = di_key.objectid; | ||
4539 | } else { | ||
4540 | ret = -EAGAIN; | ||
4541 | } | ||
4532 | goto out; | 4542 | goto out; |
4533 | } else if (IS_ERR(di)) { | 4543 | } else if (IS_ERR(di)) { |
4534 | ret = PTR_ERR(di); | 4544 | ret = PTR_ERR(di); |
@@ -4722,16 +4732,71 @@ again: | |||
4722 | if ((min_key.type == BTRFS_INODE_REF_KEY || | 4732 | if ((min_key.type == BTRFS_INODE_REF_KEY || |
4723 | min_key.type == BTRFS_INODE_EXTREF_KEY) && | 4733 | min_key.type == BTRFS_INODE_EXTREF_KEY) && |
4724 | BTRFS_I(inode)->generation == trans->transid) { | 4734 | BTRFS_I(inode)->generation == trans->transid) { |
4735 | u64 other_ino = 0; | ||
4736 | |||
4725 | ret = btrfs_check_ref_name_override(path->nodes[0], | 4737 | ret = btrfs_check_ref_name_override(path->nodes[0], |
4726 | path->slots[0], | 4738 | path->slots[0], |
4727 | &min_key, inode); | 4739 | &min_key, inode, |
4740 | &other_ino); | ||
4728 | if (ret < 0) { | 4741 | if (ret < 0) { |
4729 | err = ret; | 4742 | err = ret; |
4730 | goto out_unlock; | 4743 | goto out_unlock; |
4731 | } else if (ret > 0) { | 4744 | } else if (ret > 0) { |
4732 | err = 1; | 4745 | struct btrfs_key inode_key; |
4733 | btrfs_set_log_full_commit(root->fs_info, trans); | 4746 | struct inode *other_inode; |
4734 | goto out_unlock; | 4747 | |
4748 | if (ins_nr > 0) { | ||
4749 | ins_nr++; | ||
4750 | } else { | ||
4751 | ins_nr = 1; | ||
4752 | ins_start_slot = path->slots[0]; | ||
4753 | } | ||
4754 | ret = copy_items(trans, inode, dst_path, path, | ||
4755 | &last_extent, ins_start_slot, | ||
4756 | ins_nr, inode_only, | ||
4757 | logged_isize); | ||
4758 | if (ret < 0) { | ||
4759 | err = ret; | ||
4760 | goto out_unlock; | ||
4761 | } | ||
4762 | ins_nr = 0; | ||
4763 | btrfs_release_path(path); | ||
4764 | inode_key.objectid = other_ino; | ||
4765 | inode_key.type = BTRFS_INODE_ITEM_KEY; | ||
4766 | inode_key.offset = 0; | ||
4767 | other_inode = btrfs_iget(root->fs_info->sb, | ||
4768 | &inode_key, root, | ||
4769 | NULL); | ||
4770 | /* | ||
4771 | * If the other inode that had a conflicting dir | ||
4772 | * entry was deleted in the current transaction, | ||
4773 | * we don't need to do more work nor fallback to | ||
4774 | * a transaction commit. | ||
4775 | */ | ||
4776 | if (IS_ERR(other_inode) && | ||
4777 | PTR_ERR(other_inode) == -ENOENT) { | ||
4778 | goto next_key; | ||
4779 | } else if (IS_ERR(other_inode)) { | ||
4780 | err = PTR_ERR(other_inode); | ||
4781 | goto out_unlock; | ||
4782 | } | ||
4783 | /* | ||
4784 | * We are safe logging the other inode without | ||
4785 | * acquiring its i_mutex as long as we log with | ||
4786 | * the LOG_INODE_EXISTS mode. We're safe against | ||
4787 | * concurrent renames of the other inode as well | ||
4788 | * because during a rename we pin the log and | ||
4789 | * update the log with the new name before we | ||
4790 | * unpin it. | ||
4791 | */ | ||
4792 | err = btrfs_log_inode(trans, root, other_inode, | ||
4793 | LOG_INODE_EXISTS, | ||
4794 | 0, LLONG_MAX, ctx); | ||
4795 | iput(other_inode); | ||
4796 | if (err) | ||
4797 | goto out_unlock; | ||
4798 | else | ||
4799 | goto next_key; | ||
4735 | } | 4800 | } |
4736 | } | 4801 | } |
4737 | 4802 | ||
@@ -4799,7 +4864,7 @@ next_slot: | |||
4799 | ins_nr = 0; | 4864 | ins_nr = 0; |
4800 | } | 4865 | } |
4801 | btrfs_release_path(path); | 4866 | btrfs_release_path(path); |
4802 | 4867 | next_key: | |
4803 | if (min_key.offset < (u64)-1) { | 4868 | if (min_key.offset < (u64)-1) { |
4804 | min_key.offset++; | 4869 | min_key.offset++; |
4805 | } else if (min_key.type < max_key.type) { | 4870 | } else if (min_key.type < max_key.type) { |
@@ -4993,8 +5058,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, | |||
4993 | if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) | 5058 | if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) |
4994 | break; | 5059 | break; |
4995 | 5060 | ||
4996 | if (IS_ROOT(parent)) | 5061 | if (IS_ROOT(parent)) { |
5062 | inode = d_inode(parent); | ||
5063 | if (btrfs_must_commit_transaction(trans, inode)) | ||
5064 | ret = 1; | ||
4997 | break; | 5065 | break; |
5066 | } | ||
4998 | 5067 | ||
4999 | parent = dget_parent(parent); | 5068 | parent = dget_parent(parent); |
5000 | dput(old_parent); | 5069 | dput(old_parent); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 99115cae1652..16e6ded0b7f2 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci, | |||
1347 | { | 1347 | { |
1348 | struct inode *inode = &ci->vfs_inode; | 1348 | struct inode *inode = &ci->vfs_inode; |
1349 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; | 1349 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; |
1350 | struct ceph_mds_session *session = *psession; | 1350 | struct ceph_mds_session *session = NULL; |
1351 | int mds; | 1351 | int mds; |
1352 | |||
1352 | dout("ceph_flush_snaps %p\n", inode); | 1353 | dout("ceph_flush_snaps %p\n", inode); |
1354 | if (psession) | ||
1355 | session = *psession; | ||
1353 | retry: | 1356 | retry: |
1354 | spin_lock(&ci->i_ceph_lock); | 1357 | spin_lock(&ci->i_ceph_lock); |
1355 | if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { | 1358 | if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fa59a85226b2..f72d4ae303b2 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
2759 | } else { | 2759 | } else { |
2760 | path = NULL; | 2760 | path = NULL; |
2761 | pathlen = 0; | 2761 | pathlen = 0; |
2762 | pathbase = 0; | ||
2762 | } | 2763 | } |
2763 | 2764 | ||
2764 | spin_lock(&ci->i_ceph_lock); | 2765 | spin_lock(&ci->i_ceph_lock); |
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 33da841a21bb..6f4752734804 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
@@ -338,6 +338,8 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata) | |||
338 | case 0: | 338 | case 0: |
339 | break; | 339 | break; |
340 | case -NFS4ERR_EXPIRED: | 340 | case -NFS4ERR_EXPIRED: |
341 | case -NFS4ERR_ADMIN_REVOKED: | ||
342 | case -NFS4ERR_DELEG_REVOKED: | ||
341 | case -NFS4ERR_STALE_STATEID: | 343 | case -NFS4ERR_STALE_STATEID: |
342 | case -NFS4ERR_OLD_STATEID: | 344 | case -NFS4ERR_OLD_STATEID: |
343 | case -NFS4ERR_BAD_STATEID: | 345 | case -NFS4ERR_BAD_STATEID: |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 324bfdc21250..9bf64eacba5b 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -396,6 +396,10 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *); | |||
396 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); | 396 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); |
397 | extern void nfs4_kill_renewd(struct nfs_client *); | 397 | extern void nfs4_kill_renewd(struct nfs_client *); |
398 | extern void nfs4_renew_state(struct work_struct *); | 398 | extern void nfs4_renew_state(struct work_struct *); |
399 | extern void nfs4_set_lease_period(struct nfs_client *clp, | ||
400 | unsigned long lease, | ||
401 | unsigned long lastrenewed); | ||
402 | |||
399 | 403 | ||
400 | /* nfs4state.c */ | 404 | /* nfs4state.c */ |
401 | struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp); | 405 | struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a036e93bdf96..1949bbd806eb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -4237,12 +4237,9 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str | |||
4237 | err = _nfs4_do_fsinfo(server, fhandle, fsinfo); | 4237 | err = _nfs4_do_fsinfo(server, fhandle, fsinfo); |
4238 | trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); | 4238 | trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); |
4239 | if (err == 0) { | 4239 | if (err == 0) { |
4240 | struct nfs_client *clp = server->nfs_client; | 4240 | nfs4_set_lease_period(server->nfs_client, |
4241 | 4241 | fsinfo->lease_time * HZ, | |
4242 | spin_lock(&clp->cl_lock); | 4242 | now); |
4243 | clp->cl_lease_time = fsinfo->lease_time * HZ; | ||
4244 | clp->cl_last_renewal = now; | ||
4245 | spin_unlock(&clp->cl_lock); | ||
4246 | break; | 4243 | break; |
4247 | } | 4244 | } |
4248 | err = nfs4_handle_exception(server, err, &exception); | 4245 | err = nfs4_handle_exception(server, err, &exception); |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index e1ba58c3d1ad..82e77198d17e 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -136,6 +136,26 @@ nfs4_kill_renewd(struct nfs_client *clp) | |||
136 | cancel_delayed_work_sync(&clp->cl_renewd); | 136 | cancel_delayed_work_sync(&clp->cl_renewd); |
137 | } | 137 | } |
138 | 138 | ||
139 | /** | ||
140 | * nfs4_set_lease_period - Sets the lease period on a nfs_client | ||
141 | * | ||
142 | * @clp: pointer to nfs_client | ||
143 | * @lease: new value for lease period | ||
144 | * @lastrenewed: time at which lease was last renewed | ||
145 | */ | ||
146 | void nfs4_set_lease_period(struct nfs_client *clp, | ||
147 | unsigned long lease, | ||
148 | unsigned long lastrenewed) | ||
149 | { | ||
150 | spin_lock(&clp->cl_lock); | ||
151 | clp->cl_lease_time = lease; | ||
152 | clp->cl_last_renewal = lastrenewed; | ||
153 | spin_unlock(&clp->cl_lock); | ||
154 | |||
155 | /* Cap maximum reconnect timeout at 1/2 lease period */ | ||
156 | rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1); | ||
157 | } | ||
158 | |||
139 | /* | 159 | /* |
140 | * Local variables: | 160 | * Local variables: |
141 | * c-basic-offset: 8 | 161 | * c-basic-offset: 8 |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 834b875900d6..cada00aa5096 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -277,20 +277,17 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp) | |||
277 | { | 277 | { |
278 | int status; | 278 | int status; |
279 | struct nfs_fsinfo fsinfo; | 279 | struct nfs_fsinfo fsinfo; |
280 | unsigned long now; | ||
280 | 281 | ||
281 | if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { | 282 | if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { |
282 | nfs4_schedule_state_renewal(clp); | 283 | nfs4_schedule_state_renewal(clp); |
283 | return 0; | 284 | return 0; |
284 | } | 285 | } |
285 | 286 | ||
287 | now = jiffies; | ||
286 | status = nfs4_proc_get_lease_time(clp, &fsinfo); | 288 | status = nfs4_proc_get_lease_time(clp, &fsinfo); |
287 | if (status == 0) { | 289 | if (status == 0) { |
288 | /* Update lease time and schedule renewal */ | 290 | nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now); |
289 | spin_lock(&clp->cl_lock); | ||
290 | clp->cl_lease_time = fsinfo.lease_time * HZ; | ||
291 | clp->cl_last_renewal = jiffies; | ||
292 | spin_unlock(&clp->cl_lock); | ||
293 | |||
294 | nfs4_schedule_state_renewal(clp); | 291 | nfs4_schedule_state_renewal(clp); |
295 | } | 292 | } |
296 | 293 | ||
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 09e18fdf61e5..b9a8c813e5e6 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
46 | cached = 0; | 46 | cached = 0; |
47 | 47 | ||
48 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) | 48 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) |
49 | pages[lru] = global_page_state(NR_LRU_BASE + lru); | 49 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
50 | 50 | ||
51 | available = si_mem_available(); | 51 | available = si_mem_available(); |
52 | 52 | ||
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 54a8e65e18b6..7d026bf27713 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h | |||
@@ -25,7 +25,20 @@ | |||
25 | #include <asm-generic/qrwlock_types.h> | 25 | #include <asm-generic/qrwlock_types.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Writer states & reader shift and bias | 28 | * Writer states & reader shift and bias. |
29 | * | ||
30 | * | +0 | +1 | +2 | +3 | | ||
31 | * ----+----+----+----+----+ | ||
32 | * LE | 78 | 56 | 34 | 12 | 0x12345678 | ||
33 | * ----+----+----+----+----+ | ||
34 | * | wr | rd | | ||
35 | * +----+----+----+----+ | ||
36 | * | ||
37 | * ----+----+----+----+----+ | ||
38 | * BE | 12 | 34 | 56 | 78 | 0x12345678 | ||
39 | * ----+----+----+----+----+ | ||
40 | * | rd | wr | | ||
41 | * +----+----+----+----+ | ||
29 | */ | 42 | */ |
30 | #define _QW_WAITING 1 /* A writer is waiting */ | 43 | #define _QW_WAITING 1 /* A writer is waiting */ |
31 | #define _QW_LOCKED 0xff /* A writer holds the lock */ | 44 | #define _QW_LOCKED 0xff /* A writer holds the lock */ |
@@ -134,12 +147,22 @@ static inline void queued_read_unlock(struct qrwlock *lock) | |||
134 | } | 147 | } |
135 | 148 | ||
136 | /** | 149 | /** |
150 | * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock | ||
151 | * @lock : Pointer to queue rwlock structure | ||
152 | * Return: the write byte address of a queue rwlock | ||
153 | */ | ||
154 | static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) | ||
155 | { | ||
156 | return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); | ||
157 | } | ||
158 | |||
159 | /** | ||
137 | * queued_write_unlock - release write lock of a queue rwlock | 160 | * queued_write_unlock - release write lock of a queue rwlock |
138 | * @lock : Pointer to queue rwlock structure | 161 | * @lock : Pointer to queue rwlock structure |
139 | */ | 162 | */ |
140 | static inline void queued_write_unlock(struct qrwlock *lock) | 163 | static inline void queued_write_unlock(struct qrwlock *lock) |
141 | { | 164 | { |
142 | smp_store_release((u8 *)&lock->cnts, 0); | 165 | smp_store_release(__qrwlock_write_byte(lock), 0); |
143 | } | 166 | } |
144 | 167 | ||
145 | /* | 168 | /* |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 4f0bfe5912b2..e8c81fbd5f9c 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -270,6 +270,8 @@ enum { | |||
270 | MSI_FLAG_MULTI_PCI_MSI = (1 << 2), | 270 | MSI_FLAG_MULTI_PCI_MSI = (1 << 2), |
271 | /* Support PCI MSIX interrupts */ | 271 | /* Support PCI MSIX interrupts */ |
272 | MSI_FLAG_PCI_MSIX = (1 << 3), | 272 | MSI_FLAG_PCI_MSIX = (1 << 3), |
273 | /* Needs early activate, required for PCI */ | ||
274 | MSI_FLAG_ACTIVATE_EARLY = (1 << 4), | ||
273 | }; | 275 | }; |
274 | 276 | ||
275 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, | 277 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8ed4326164cc..2b6b43cc0dd5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -743,7 +743,9 @@ struct perf_event_context { | |||
743 | u64 parent_gen; | 743 | u64 parent_gen; |
744 | u64 generation; | 744 | u64 generation; |
745 | int pin_count; | 745 | int pin_count; |
746 | #ifdef CONFIG_CGROUP_PERF | ||
746 | int nr_cgroups; /* cgroup evts */ | 747 | int nr_cgroups; /* cgroup evts */ |
748 | #endif | ||
747 | void *task_ctx_data; /* pmu specific data */ | 749 | void *task_ctx_data; /* pmu specific data */ |
748 | struct rcu_head rcu_head; | 750 | struct rcu_head rcu_head; |
749 | }; | 751 | }; |
@@ -769,7 +771,9 @@ struct perf_cpu_context { | |||
769 | unsigned int hrtimer_active; | 771 | unsigned int hrtimer_active; |
770 | 772 | ||
771 | struct pmu *unique_pmu; | 773 | struct pmu *unique_pmu; |
774 | #ifdef CONFIG_CGROUP_PERF | ||
772 | struct perf_cgroup *cgrp; | 775 | struct perf_cgroup *cgrp; |
776 | #endif | ||
773 | }; | 777 | }; |
774 | 778 | ||
775 | struct perf_output_handle { | 779 | struct perf_output_handle { |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index b6810c92b8bb..5c02b0691587 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -195,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, | |||
195 | struct rpc_xprt *, | 195 | struct rpc_xprt *, |
196 | void *), | 196 | void *), |
197 | void *data); | 197 | void *data); |
198 | void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, | ||
199 | unsigned long timeo); | ||
198 | 200 | ||
199 | const char *rpc_proc_name(const struct rpc_task *task); | 201 | const char *rpc_proc_name(const struct rpc_task *task); |
200 | #endif /* __KERNEL__ */ | 202 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 5e3e1b63dbb3..a16070dd03ee 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -218,7 +218,8 @@ struct rpc_xprt { | |||
218 | struct work_struct task_cleanup; | 218 | struct work_struct task_cleanup; |
219 | struct timer_list timer; | 219 | struct timer_list timer; |
220 | unsigned long last_used, | 220 | unsigned long last_used, |
221 | idle_timeout; | 221 | idle_timeout, |
222 | max_reconnect_timeout; | ||
222 | 223 | ||
223 | /* | 224 | /* |
224 | * Send stuff | 225 | * Send stuff |
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h index 6b011c19b50f..1d57ed3d84d2 100644 --- a/include/uapi/linux/virtio_vsock.h +++ b/include/uapi/linux/virtio_vsock.h | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H | 34 | #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H |
35 | #define _UAPI_LINUX_VIRTIO_VOSCK_H | 35 | #define _UAPI_LINUX_VIRTIO_VSOCK_H |
36 | 36 | ||
37 | #include <linux/types.h> | 37 | #include <linux/types.h> |
38 | #include <linux/virtio_ids.h> | 38 | #include <linux/virtio_ids.h> |
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h index cbae529b7ce0..180d526a55c3 100644 --- a/include/uapi/misc/cxl.h +++ b/include/uapi/misc/cxl.h | |||
@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved { | |||
136 | * | 136 | * |
137 | * Of course the contents will be ABI, but that's up the AFU driver. | 137 | * Of course the contents will be ABI, but that's up the AFU driver. |
138 | */ | 138 | */ |
139 | size_t data_size; | 139 | __u32 data_size; |
140 | u8 data[]; | 140 | __u8 data[]; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct cxl_event { | 143 | struct cxl_event { |
diff --git a/kernel/events/core.c b/kernel/events/core.c index a19550d80ab1..1903b8f3a705 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event, | |||
843 | } | 843 | } |
844 | } | 844 | } |
845 | } | 845 | } |
846 | |||
847 | /* | ||
848 | * Update cpuctx->cgrp so that it is set when first cgroup event is added and | ||
849 | * cleared when last cgroup event is removed. | ||
850 | */ | ||
851 | static inline void | ||
852 | list_update_cgroup_event(struct perf_event *event, | ||
853 | struct perf_event_context *ctx, bool add) | ||
854 | { | ||
855 | struct perf_cpu_context *cpuctx; | ||
856 | |||
857 | if (!is_cgroup_event(event)) | ||
858 | return; | ||
859 | |||
860 | if (add && ctx->nr_cgroups++) | ||
861 | return; | ||
862 | else if (!add && --ctx->nr_cgroups) | ||
863 | return; | ||
864 | /* | ||
865 | * Because cgroup events are always per-cpu events, | ||
866 | * this will always be called from the right CPU. | ||
867 | */ | ||
868 | cpuctx = __get_cpu_context(ctx); | ||
869 | cpuctx->cgrp = add ? event->cgrp : NULL; | ||
870 | } | ||
871 | |||
846 | #else /* !CONFIG_CGROUP_PERF */ | 872 | #else /* !CONFIG_CGROUP_PERF */ |
847 | 873 | ||
848 | static inline bool | 874 | static inline bool |
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event, | |||
920 | struct perf_event_context *ctx) | 946 | struct perf_event_context *ctx) |
921 | { | 947 | { |
922 | } | 948 | } |
949 | |||
950 | static inline void | ||
951 | list_update_cgroup_event(struct perf_event *event, | ||
952 | struct perf_event_context *ctx, bool add) | ||
953 | { | ||
954 | } | ||
955 | |||
923 | #endif | 956 | #endif |
924 | 957 | ||
925 | /* | 958 | /* |
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) | |||
1392 | static void | 1425 | static void |
1393 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 1426 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
1394 | { | 1427 | { |
1428 | |||
1395 | lockdep_assert_held(&ctx->lock); | 1429 | lockdep_assert_held(&ctx->lock); |
1396 | 1430 | ||
1397 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | 1431 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) | |||
1412 | list_add_tail(&event->group_entry, list); | 1446 | list_add_tail(&event->group_entry, list); |
1413 | } | 1447 | } |
1414 | 1448 | ||
1415 | if (is_cgroup_event(event)) | 1449 | list_update_cgroup_event(event, ctx, true); |
1416 | ctx->nr_cgroups++; | ||
1417 | 1450 | ||
1418 | list_add_rcu(&event->event_entry, &ctx->event_list); | 1451 | list_add_rcu(&event->event_entry, &ctx->event_list); |
1419 | ctx->nr_events++; | 1452 | ctx->nr_events++; |
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event) | |||
1581 | static void | 1614 | static void |
1582 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) | 1615 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
1583 | { | 1616 | { |
1584 | struct perf_cpu_context *cpuctx; | ||
1585 | |||
1586 | WARN_ON_ONCE(event->ctx != ctx); | 1617 | WARN_ON_ONCE(event->ctx != ctx); |
1587 | lockdep_assert_held(&ctx->lock); | 1618 | lockdep_assert_held(&ctx->lock); |
1588 | 1619 | ||
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) | |||
1594 | 1625 | ||
1595 | event->attach_state &= ~PERF_ATTACH_CONTEXT; | 1626 | event->attach_state &= ~PERF_ATTACH_CONTEXT; |
1596 | 1627 | ||
1597 | if (is_cgroup_event(event)) { | 1628 | list_update_cgroup_event(event, ctx, false); |
1598 | ctx->nr_cgroups--; | ||
1599 | /* | ||
1600 | * Because cgroup events are always per-cpu events, this will | ||
1601 | * always be called from the right CPU. | ||
1602 | */ | ||
1603 | cpuctx = __get_cpu_context(ctx); | ||
1604 | /* | ||
1605 | * If there are no more cgroup events then clear cgrp to avoid | ||
1606 | * stale pointer in update_cgrp_time_from_cpuctx(). | ||
1607 | */ | ||
1608 | if (!ctx->nr_cgroups) | ||
1609 | cpuctx->cgrp = NULL; | ||
1610 | } | ||
1611 | 1629 | ||
1612 | ctx->nr_events--; | 1630 | ctx->nr_events--; |
1613 | if (event->attr.inherit_stat) | 1631 | if (event->attr.inherit_stat) |
@@ -1716,8 +1734,8 @@ static inline int pmu_filter_match(struct perf_event *event) | |||
1716 | static inline int | 1734 | static inline int |
1717 | event_filter_match(struct perf_event *event) | 1735 | event_filter_match(struct perf_event *event) |
1718 | { | 1736 | { |
1719 | return (event->cpu == -1 || event->cpu == smp_processor_id()) | 1737 | return (event->cpu == -1 || event->cpu == smp_processor_id()) && |
1720 | && perf_cgroup_match(event) && pmu_filter_match(event); | 1738 | perf_cgroup_match(event) && pmu_filter_match(event); |
1721 | } | 1739 | } |
1722 | 1740 | ||
1723 | static void | 1741 | static void |
@@ -1737,8 +1755,8 @@ event_sched_out(struct perf_event *event, | |||
1737 | * maintained, otherwise bogus information is return | 1755 | * maintained, otherwise bogus information is return |
1738 | * via read() for time_enabled, time_running: | 1756 | * via read() for time_enabled, time_running: |
1739 | */ | 1757 | */ |
1740 | if (event->state == PERF_EVENT_STATE_INACTIVE | 1758 | if (event->state == PERF_EVENT_STATE_INACTIVE && |
1741 | && !event_filter_match(event)) { | 1759 | !event_filter_match(event)) { |
1742 | delta = tstamp - event->tstamp_stopped; | 1760 | delta = tstamp - event->tstamp_stopped; |
1743 | event->tstamp_running += delta; | 1761 | event->tstamp_running += delta; |
1744 | event->tstamp_stopped = tstamp; | 1762 | event->tstamp_stopped = tstamp; |
@@ -2236,10 +2254,15 @@ perf_install_in_context(struct perf_event_context *ctx, | |||
2236 | 2254 | ||
2237 | lockdep_assert_held(&ctx->mutex); | 2255 | lockdep_assert_held(&ctx->mutex); |
2238 | 2256 | ||
2239 | event->ctx = ctx; | ||
2240 | if (event->cpu != -1) | 2257 | if (event->cpu != -1) |
2241 | event->cpu = cpu; | 2258 | event->cpu = cpu; |
2242 | 2259 | ||
2260 | /* | ||
2261 | * Ensures that if we can observe event->ctx, both the event and ctx | ||
2262 | * will be 'complete'. See perf_iterate_sb_cpu(). | ||
2263 | */ | ||
2264 | smp_store_release(&event->ctx, ctx); | ||
2265 | |||
2243 | if (!task) { | 2266 | if (!task) { |
2244 | cpu_function_call(cpu, __perf_install_in_context, event); | 2267 | cpu_function_call(cpu, __perf_install_in_context, event); |
2245 | return; | 2268 | return; |
@@ -5969,6 +5992,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) | |||
5969 | struct perf_event *event; | 5992 | struct perf_event *event; |
5970 | 5993 | ||
5971 | list_for_each_entry_rcu(event, &pel->list, sb_list) { | 5994 | list_for_each_entry_rcu(event, &pel->list, sb_list) { |
5995 | /* | ||
5996 | * Skip events that are not fully formed yet; ensure that | ||
5997 | * if we observe event->ctx, both event and ctx will be | ||
5998 | * complete enough. See perf_install_in_context(). | ||
5999 | */ | ||
6000 | if (!smp_load_acquire(&event->ctx)) | ||
6001 | continue; | ||
6002 | |||
5972 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 6003 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
5973 | continue; | 6004 | continue; |
5974 | if (!event_filter_match(event)) | 6005 | if (!event_filter_match(event)) |
diff --git a/kernel/futex.c b/kernel/futex.c index 33664f70e2d2..46cb3a301bc1 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled; | |||
179 | * Futex flags used to encode options to functions and preserve them across | 179 | * Futex flags used to encode options to functions and preserve them across |
180 | * restarts. | 180 | * restarts. |
181 | */ | 181 | */ |
182 | #define FLAGS_SHARED 0x01 | 182 | #ifdef CONFIG_MMU |
183 | # define FLAGS_SHARED 0x01 | ||
184 | #else | ||
185 | /* | ||
186 | * NOMMU does not have per process address space. Let the compiler optimize | ||
187 | * code away. | ||
188 | */ | ||
189 | # define FLAGS_SHARED 0x00 | ||
190 | #endif | ||
183 | #define FLAGS_CLOCKRT 0x02 | 191 | #define FLAGS_CLOCKRT 0x02 |
184 | #define FLAGS_HAS_TIMEOUT 0x04 | 192 | #define FLAGS_HAS_TIMEOUT 0x04 |
185 | 193 | ||
@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key) | |||
405 | if (!key->both.ptr) | 413 | if (!key->both.ptr) |
406 | return; | 414 | return; |
407 | 415 | ||
416 | /* | ||
417 | * On MMU less systems futexes are always "private" as there is no per | ||
418 | * process address space. We need the smp wmb nevertheless - yes, | ||
419 | * arch/blackfin has MMU less SMP ... | ||
420 | */ | ||
421 | if (!IS_ENABLED(CONFIG_MMU)) { | ||
422 | smp_mb(); /* explicit smp_mb(); (B) */ | ||
423 | return; | ||
424 | } | ||
425 | |||
408 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 426 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
409 | case FUT_OFF_INODE: | 427 | case FUT_OFF_INODE: |
410 | ihold(key->shared.inode); /* implies smp_mb(); (B) */ | 428 | ihold(key->shared.inode); /* implies smp_mb(); (B) */ |
@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key) | |||
436 | return; | 454 | return; |
437 | } | 455 | } |
438 | 456 | ||
457 | if (!IS_ENABLED(CONFIG_MMU)) | ||
458 | return; | ||
459 | |||
439 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 460 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
440 | case FUT_OFF_INODE: | 461 | case FUT_OFF_INODE: |
441 | iput(key->shared.inode); | 462 | iput(key->shared.inode); |
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 54999350162c..19e9dfbe97fa 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c | |||
@@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | |||
359 | else | 359 | else |
360 | dev_dbg(dev, "irq [%d-%d] for MSI\n", | 360 | dev_dbg(dev, "irq [%d-%d] for MSI\n", |
361 | virq, virq + desc->nvec_used - 1); | 361 | virq, virq + desc->nvec_used - 1); |
362 | /* | ||
363 | * This flag is set by the PCI layer as we need to activate | ||
364 | * the MSI entries before the PCI layer enables MSI in the | ||
365 | * card. Otherwise the card latches a random msi message. | ||
366 | */ | ||
367 | if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { | ||
368 | struct irq_data *irq_data; | ||
369 | |||
370 | irq_data = irq_domain_get_irq_data(domain, desc->irq); | ||
371 | irq_domain_activate_irq(irq_data); | ||
372 | } | ||
362 | } | 373 | } |
363 | 374 | ||
364 | return 0; | 375 | return 0; |
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 37649e69056c..8a99abf58080 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h | |||
@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) | |||
450 | goto gotlock; | 450 | goto gotlock; |
451 | } | 451 | } |
452 | } | 452 | } |
453 | WRITE_ONCE(pn->state, vcpu_halted); | 453 | WRITE_ONCE(pn->state, vcpu_hashed); |
454 | qstat_inc(qstat_pv_wait_head, true); | 454 | qstat_inc(qstat_pv_wait_head, true); |
455 | qstat_inc(qstat_pv_wait_again, waitcnt); | 455 | qstat_inc(qstat_pv_wait_again, waitcnt); |
456 | pv_wait(&l->locked, _Q_SLOW_VAL); | 456 | pv_wait(&l->locked, _Q_SLOW_VAL); |
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 22e025309845..b9d031516254 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h | |||
@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf, | |||
153 | */ | 153 | */ |
154 | if ((counter == qstat_pv_latency_kick) || | 154 | if ((counter == qstat_pv_latency_kick) || |
155 | (counter == qstat_pv_latency_wake)) { | 155 | (counter == qstat_pv_latency_wake)) { |
156 | stat = 0; | ||
157 | if (kicks) | 156 | if (kicks) |
158 | stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); | 157 | stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); |
159 | } | 158 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5c883fe8e440..2a906f20fba7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/context_tracking.h> | 74 | #include <linux/context_tracking.h> |
75 | #include <linux/compiler.h> | 75 | #include <linux/compiler.h> |
76 | #include <linux/frame.h> | 76 | #include <linux/frame.h> |
77 | #include <linux/prefetch.h> | ||
77 | 78 | ||
78 | #include <asm/switch_to.h> | 79 | #include <asm/switch_to.h> |
79 | #include <asm/tlb.h> | 80 | #include <asm/tlb.h> |
@@ -2972,6 +2973,23 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2972 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | 2973 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
2973 | 2974 | ||
2974 | /* | 2975 | /* |
2976 | * The function fair_sched_class.update_curr accesses the struct curr | ||
2977 | * and its field curr->exec_start; when called from task_sched_runtime(), | ||
2978 | * we observe a high rate of cache misses in practice. | ||
2979 | * Prefetching this data results in improved performance. | ||
2980 | */ | ||
2981 | static inline void prefetch_curr_exec_start(struct task_struct *p) | ||
2982 | { | ||
2983 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
2984 | struct sched_entity *curr = (&p->se)->cfs_rq->curr; | ||
2985 | #else | ||
2986 | struct sched_entity *curr = (&task_rq(p)->cfs)->curr; | ||
2987 | #endif | ||
2988 | prefetch(curr); | ||
2989 | prefetch(&curr->exec_start); | ||
2990 | } | ||
2991 | |||
2992 | /* | ||
2975 | * Return accounted runtime for the task. | 2993 | * Return accounted runtime for the task. |
2976 | * In case the task is currently running, return the runtime plus current's | 2994 | * In case the task is currently running, return the runtime plus current's |
2977 | * pending runtime that have not been accounted yet. | 2995 | * pending runtime that have not been accounted yet. |
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3005 | * thread, breaking clock_gettime(). | 3023 | * thread, breaking clock_gettime(). |
3006 | */ | 3024 | */ |
3007 | if (task_current(rq, p) && task_on_rq_queued(p)) { | 3025 | if (task_current(rq, p) && task_on_rq_queued(p)) { |
3026 | prefetch_curr_exec_start(p); | ||
3008 | update_rq_clock(rq); | 3027 | update_rq_clock(rq); |
3009 | p->sched_class->update_curr(rq); | 3028 | p->sched_class->update_curr(rq); |
3010 | } | 3029 | } |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5be58820465c..d4184498c9f5 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
168 | 168 | ||
169 | if (old_idx == IDX_INVALID) { | 169 | if (old_idx == IDX_INVALID) { |
170 | cp->size++; | 170 | cp->size++; |
171 | cp->elements[cp->size - 1].dl = 0; | 171 | cp->elements[cp->size - 1].dl = dl; |
172 | cp->elements[cp->size - 1].cpu = cpu; | 172 | cp->elements[cp->size - 1].cpu = cpu; |
173 | cp->elements[cpu].idx = cp->size - 1; | 173 | cp->elements[cpu].idx = cp->size - 1; |
174 | cpudl_change_key(cp, cp->size - 1, dl); | 174 | cpudl_change_key(cp, cp->size - 1, dl); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 1934f658c036..9858266fb0b3 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
508 | */ | 508 | */ |
509 | void account_idle_ticks(unsigned long ticks) | 509 | void account_idle_ticks(unsigned long ticks) |
510 | { | 510 | { |
511 | cputime_t cputime, steal; | ||
511 | 512 | ||
512 | if (sched_clock_irqtime) { | 513 | if (sched_clock_irqtime) { |
513 | irqtime_account_idle_ticks(ticks); | 514 | irqtime_account_idle_ticks(ticks); |
514 | return; | 515 | return; |
515 | } | 516 | } |
516 | 517 | ||
517 | account_idle_time(jiffies_to_cputime(ticks)); | 518 | cputime = jiffies_to_cputime(ticks); |
519 | steal = steal_account_process_time(cputime); | ||
520 | |||
521 | if (steal >= cputime) | ||
522 | return; | ||
523 | |||
524 | cputime -= steal; | ||
525 | account_idle_time(cputime); | ||
518 | } | 526 | } |
519 | 527 | ||
520 | /* | 528 | /* |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fcb7f0217ff4..1ce8867283dc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) | |||
658 | * | 658 | * |
659 | * XXX figure out if select_task_rq_dl() deals with offline cpus. | 659 | * XXX figure out if select_task_rq_dl() deals with offline cpus. |
660 | */ | 660 | */ |
661 | if (unlikely(!rq->online)) | 661 | if (unlikely(!rq->online)) { |
662 | lockdep_unpin_lock(&rq->lock, rf.cookie); | ||
662 | rq = dl_task_offline_migration(rq, p); | 663 | rq = dl_task_offline_migration(rq, p); |
664 | rf.cookie = lockdep_pin_lock(&rq->lock); | ||
665 | } | ||
663 | 666 | ||
664 | /* | 667 | /* |
665 | * Queueing this task back might have overloaded rq, check if we need | 668 | * Queueing this task back might have overloaded rq, check if we need |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4088eedea763..039de34f1521 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu) | |||
4269 | pcfs_rq = tg->parent->cfs_rq[cpu]; | 4269 | pcfs_rq = tg->parent->cfs_rq[cpu]; |
4270 | 4270 | ||
4271 | cfs_rq->throttle_count = pcfs_rq->throttle_count; | 4271 | cfs_rq->throttle_count = pcfs_rq->throttle_count; |
4272 | pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); | 4272 | cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); |
4273 | } | 4273 | } |
4274 | 4274 | ||
4275 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ | 4275 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 555670a5143c..32bf6f75a8fe 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) | |||
1496 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | 1496 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); |
1497 | u64 expires = KTIME_MAX; | 1497 | u64 expires = KTIME_MAX; |
1498 | unsigned long nextevt; | 1498 | unsigned long nextevt; |
1499 | bool is_max_delta; | ||
1499 | 1500 | ||
1500 | /* | 1501 | /* |
1501 | * Pretend that there is no timer pending if the cpu is offline. | 1502 | * Pretend that there is no timer pending if the cpu is offline. |
@@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) | |||
1506 | 1507 | ||
1507 | spin_lock(&base->lock); | 1508 | spin_lock(&base->lock); |
1508 | nextevt = __next_timer_interrupt(base); | 1509 | nextevt = __next_timer_interrupt(base); |
1510 | is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | ||
1509 | base->next_expiry = nextevt; | 1511 | base->next_expiry = nextevt; |
1510 | /* | 1512 | /* |
1511 | * We have a fresh next event. Check whether we can forward the base: | 1513 | * We have a fresh next event. Check whether we can forward the base: |
@@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) | |||
1519 | expires = basem; | 1521 | expires = basem; |
1520 | base->is_idle = false; | 1522 | base->is_idle = false; |
1521 | } else { | 1523 | } else { |
1522 | expires = basem + (nextevt - basej) * TICK_NSEC; | 1524 | if (!is_max_delta) |
1525 | expires = basem + (nextevt - basej) * TICK_NSEC; | ||
1523 | /* | 1526 | /* |
1524 | * If we expect to sleep more than a tick, mark the base idle: | 1527 | * If we expect to sleep more than a tick, mark the base idle: |
1525 | */ | 1528 | */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b9aa1b0b38b0..87e11d8ad536 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page) | |||
1448 | list_del(&page->lru); | 1448 | list_del(&page->lru); |
1449 | h->free_huge_pages--; | 1449 | h->free_huge_pages--; |
1450 | h->free_huge_pages_node[nid]--; | 1450 | h->free_huge_pages_node[nid]--; |
1451 | h->max_huge_pages--; | ||
1451 | update_and_free_page(h, page); | 1452 | update_and_free_page(h, page); |
1452 | } | 1453 | } |
1453 | spin_unlock(&hugetlb_lock); | 1454 | spin_unlock(&hugetlb_lock); |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index b6728a33a4ac..baabaad4a4aa 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -217,11 +217,8 @@ void quarantine_reduce(void) | |||
217 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / | 217 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / |
218 | QUARANTINE_FRACTION; | 218 | QUARANTINE_FRACTION; |
219 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); | 219 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
220 | if (WARN_ONCE(new_quarantine_size < percpu_quarantines, | 220 | new_quarantine_size = (new_quarantine_size < percpu_quarantines) ? |
221 | "Too little memory, disabling global KASAN quarantine.\n")) | 221 | 0 : new_quarantine_size - percpu_quarantines; |
222 | new_quarantine_size = 0; | ||
223 | else | ||
224 | new_quarantine_size -= percpu_quarantines; | ||
225 | WRITE_ONCE(quarantine_size, new_quarantine_size); | 222 | WRITE_ONCE(quarantine_size, new_quarantine_size); |
226 | 223 | ||
227 | last = global_quarantine.head; | 224 | last = global_quarantine.head; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e74d7080ec9e..2ff0289ad061 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4077,14 +4077,32 @@ static struct cftype mem_cgroup_legacy_files[] = { | |||
4077 | 4077 | ||
4078 | static DEFINE_IDR(mem_cgroup_idr); | 4078 | static DEFINE_IDR(mem_cgroup_idr); |
4079 | 4079 | ||
4080 | static void mem_cgroup_id_get(struct mem_cgroup *memcg) | 4080 | static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) |
4081 | { | 4081 | { |
4082 | atomic_inc(&memcg->id.ref); | 4082 | atomic_add(n, &memcg->id.ref); |
4083 | } | 4083 | } |
4084 | 4084 | ||
4085 | static void mem_cgroup_id_put(struct mem_cgroup *memcg) | 4085 | static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) |
4086 | { | 4086 | { |
4087 | if (atomic_dec_and_test(&memcg->id.ref)) { | 4087 | while (!atomic_inc_not_zero(&memcg->id.ref)) { |
4088 | /* | ||
4089 | * The root cgroup cannot be destroyed, so it's refcount must | ||
4090 | * always be >= 1. | ||
4091 | */ | ||
4092 | if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { | ||
4093 | VM_BUG_ON(1); | ||
4094 | break; | ||
4095 | } | ||
4096 | memcg = parent_mem_cgroup(memcg); | ||
4097 | if (!memcg) | ||
4098 | memcg = root_mem_cgroup; | ||
4099 | } | ||
4100 | return memcg; | ||
4101 | } | ||
4102 | |||
4103 | static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) | ||
4104 | { | ||
4105 | if (atomic_sub_and_test(n, &memcg->id.ref)) { | ||
4088 | idr_remove(&mem_cgroup_idr, memcg->id.id); | 4106 | idr_remove(&mem_cgroup_idr, memcg->id.id); |
4089 | memcg->id.id = 0; | 4107 | memcg->id.id = 0; |
4090 | 4108 | ||
@@ -4093,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg) | |||
4093 | } | 4111 | } |
4094 | } | 4112 | } |
4095 | 4113 | ||
4114 | static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) | ||
4115 | { | ||
4116 | mem_cgroup_id_get_many(memcg, 1); | ||
4117 | } | ||
4118 | |||
4119 | static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) | ||
4120 | { | ||
4121 | mem_cgroup_id_put_many(memcg, 1); | ||
4122 | } | ||
4123 | |||
4096 | /** | 4124 | /** |
4097 | * mem_cgroup_from_id - look up a memcg from a memcg id | 4125 | * mem_cgroup_from_id - look up a memcg from a memcg id |
4098 | * @id: the memcg id to look up | 4126 | * @id: the memcg id to look up |
@@ -4727,6 +4755,8 @@ static void __mem_cgroup_clear_mc(void) | |||
4727 | if (!mem_cgroup_is_root(mc.from)) | 4755 | if (!mem_cgroup_is_root(mc.from)) |
4728 | page_counter_uncharge(&mc.from->memsw, mc.moved_swap); | 4756 | page_counter_uncharge(&mc.from->memsw, mc.moved_swap); |
4729 | 4757 | ||
4758 | mem_cgroup_id_put_many(mc.from, mc.moved_swap); | ||
4759 | |||
4730 | /* | 4760 | /* |
4731 | * we charged both to->memory and to->memsw, so we | 4761 | * we charged both to->memory and to->memsw, so we |
4732 | * should uncharge to->memory. | 4762 | * should uncharge to->memory. |
@@ -4734,9 +4764,9 @@ static void __mem_cgroup_clear_mc(void) | |||
4734 | if (!mem_cgroup_is_root(mc.to)) | 4764 | if (!mem_cgroup_is_root(mc.to)) |
4735 | page_counter_uncharge(&mc.to->memory, mc.moved_swap); | 4765 | page_counter_uncharge(&mc.to->memory, mc.moved_swap); |
4736 | 4766 | ||
4737 | css_put_many(&mc.from->css, mc.moved_swap); | 4767 | mem_cgroup_id_get_many(mc.to, mc.moved_swap); |
4768 | css_put_many(&mc.to->css, mc.moved_swap); | ||
4738 | 4769 | ||
4739 | /* we've already done css_get(mc.to) */ | ||
4740 | mc.moved_swap = 0; | 4770 | mc.moved_swap = 0; |
4741 | } | 4771 | } |
4742 | memcg_oom_recover(from); | 4772 | memcg_oom_recover(from); |
@@ -5800,7 +5830,7 @@ subsys_initcall(mem_cgroup_init); | |||
5800 | */ | 5830 | */ |
5801 | void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | 5831 | void mem_cgroup_swapout(struct page *page, swp_entry_t entry) |
5802 | { | 5832 | { |
5803 | struct mem_cgroup *memcg; | 5833 | struct mem_cgroup *memcg, *swap_memcg; |
5804 | unsigned short oldid; | 5834 | unsigned short oldid; |
5805 | 5835 | ||
5806 | VM_BUG_ON_PAGE(PageLRU(page), page); | 5836 | VM_BUG_ON_PAGE(PageLRU(page), page); |
@@ -5815,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | |||
5815 | if (!memcg) | 5845 | if (!memcg) |
5816 | return; | 5846 | return; |
5817 | 5847 | ||
5818 | mem_cgroup_id_get(memcg); | 5848 | /* |
5819 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); | 5849 | * In case the memcg owning these pages has been offlined and doesn't |
5850 | * have an ID allocated to it anymore, charge the closest online | ||
5851 | * ancestor for the swap instead and transfer the memory+swap charge. | ||
5852 | */ | ||
5853 | swap_memcg = mem_cgroup_id_get_online(memcg); | ||
5854 | oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); | ||
5820 | VM_BUG_ON_PAGE(oldid, page); | 5855 | VM_BUG_ON_PAGE(oldid, page); |
5821 | mem_cgroup_swap_statistics(memcg, true); | 5856 | mem_cgroup_swap_statistics(swap_memcg, true); |
5822 | 5857 | ||
5823 | page->mem_cgroup = NULL; | 5858 | page->mem_cgroup = NULL; |
5824 | 5859 | ||
5825 | if (!mem_cgroup_is_root(memcg)) | 5860 | if (!mem_cgroup_is_root(memcg)) |
5826 | page_counter_uncharge(&memcg->memory, 1); | 5861 | page_counter_uncharge(&memcg->memory, 1); |
5827 | 5862 | ||
5863 | if (memcg != swap_memcg) { | ||
5864 | if (!mem_cgroup_is_root(swap_memcg)) | ||
5865 | page_counter_charge(&swap_memcg->memsw, 1); | ||
5866 | page_counter_uncharge(&memcg->memsw, 1); | ||
5867 | } | ||
5868 | |||
5828 | /* | 5869 | /* |
5829 | * Interrupts should be disabled here because the caller holds the | 5870 | * Interrupts should be disabled here because the caller holds the |
5830 | * mapping->tree_lock lock which is taken with interrupts-off. It is | 5871 | * mapping->tree_lock lock which is taken with interrupts-off. It is |
@@ -5863,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) | |||
5863 | if (!memcg) | 5904 | if (!memcg) |
5864 | return 0; | 5905 | return 0; |
5865 | 5906 | ||
5907 | memcg = mem_cgroup_id_get_online(memcg); | ||
5908 | |||
5866 | if (!mem_cgroup_is_root(memcg) && | 5909 | if (!mem_cgroup_is_root(memcg) && |
5867 | !page_counter_try_charge(&memcg->swap, 1, &counter)) | 5910 | !page_counter_try_charge(&memcg->swap, 1, &counter)) { |
5911 | mem_cgroup_id_put(memcg); | ||
5868 | return -ENOMEM; | 5912 | return -ENOMEM; |
5913 | } | ||
5869 | 5914 | ||
5870 | mem_cgroup_id_get(memcg); | ||
5871 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); | 5915 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); |
5872 | VM_BUG_ON_PAGE(oldid, page); | 5916 | VM_BUG_ON_PAGE(oldid, page); |
5873 | mem_cgroup_swap_statistics(memcg, true); | 5917 | mem_cgroup_swap_statistics(memcg, true); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3894b65b1555..41266dc29f33 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1219 | 1219 | ||
1220 | /* init node's zones as empty zones, we don't have any present pages.*/ | 1220 | /* init node's zones as empty zones, we don't have any present pages.*/ |
1221 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); | 1221 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
1222 | pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); | ||
1222 | 1223 | ||
1223 | /* | 1224 | /* |
1224 | * The node we allocated has no zone fallback lists. For avoiding | 1225 | * The node we allocated has no zone fallback lists. For avoiding |
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1249 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | 1250 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) |
1250 | { | 1251 | { |
1251 | arch_refresh_nodedata(nid, NULL); | 1252 | arch_refresh_nodedata(nid, NULL); |
1253 | free_percpu(pgdat->per_cpu_nodestats); | ||
1252 | arch_free_nodedata(pgdat); | 1254 | arch_free_nodedata(pgdat); |
1253 | return; | 1255 | return; |
1254 | } | 1256 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7d0a275df822..d53a9aa00977 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task) | |||
764 | { | 764 | { |
765 | struct mm_struct *mm = task->mm; | 765 | struct mm_struct *mm = task->mm; |
766 | struct task_struct *p; | 766 | struct task_struct *p; |
767 | bool ret; | 767 | bool ret = true; |
768 | 768 | ||
769 | /* | 769 | /* |
770 | * Skip tasks without mm because it might have passed its exit_mm and | 770 | * Skip tasks without mm because it might have passed its exit_mm and |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee744fa3b93d..3fbe73a6fe4b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4060,7 +4060,7 @@ long si_mem_available(void) | |||
4060 | int lru; | 4060 | int lru; |
4061 | 4061 | ||
4062 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) | 4062 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) |
4063 | pages[lru] = global_page_state(NR_LRU_BASE + lru); | 4063 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
4064 | 4064 | ||
4065 | for_each_zone(zone) | 4065 | for_each_zone(zone) |
4066 | wmark_low += zone->watermark[WMARK_LOW]; | 4066 | wmark_low += zone->watermark[WMARK_LOW]; |
@@ -4757,6 +4757,8 @@ int local_memory_node(int node) | |||
4757 | } | 4757 | } |
4758 | #endif | 4758 | #endif |
4759 | 4759 | ||
4760 | static void setup_min_unmapped_ratio(void); | ||
4761 | static void setup_min_slab_ratio(void); | ||
4760 | #else /* CONFIG_NUMA */ | 4762 | #else /* CONFIG_NUMA */ |
4761 | 4763 | ||
4762 | static void set_zonelist_order(void) | 4764 | static void set_zonelist_order(void) |
@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
5878 | zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; | 5880 | zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; |
5879 | #ifdef CONFIG_NUMA | 5881 | #ifdef CONFIG_NUMA |
5880 | zone->node = nid; | 5882 | zone->node = nid; |
5881 | pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio) | ||
5882 | / 100; | ||
5883 | pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100; | ||
5884 | #endif | 5883 | #endif |
5885 | zone->name = zone_names[j]; | 5884 | zone->name = zone_names[j]; |
5886 | zone->zone_pgdat = pgdat; | 5885 | zone->zone_pgdat = pgdat; |
@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void) | |||
6801 | setup_per_zone_wmarks(); | 6800 | setup_per_zone_wmarks(); |
6802 | refresh_zone_stat_thresholds(); | 6801 | refresh_zone_stat_thresholds(); |
6803 | setup_per_zone_lowmem_reserve(); | 6802 | setup_per_zone_lowmem_reserve(); |
6803 | |||
6804 | #ifdef CONFIG_NUMA | ||
6805 | setup_min_unmapped_ratio(); | ||
6806 | setup_min_slab_ratio(); | ||
6807 | #endif | ||
6808 | |||
6804 | return 0; | 6809 | return 0; |
6805 | } | 6810 | } |
6806 | core_initcall(init_per_zone_wmark_min) | 6811 | core_initcall(init_per_zone_wmark_min) |
@@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, | |||
6842 | } | 6847 | } |
6843 | 6848 | ||
6844 | #ifdef CONFIG_NUMA | 6849 | #ifdef CONFIG_NUMA |
6850 | static void setup_min_unmapped_ratio(void) | ||
6851 | { | ||
6852 | pg_data_t *pgdat; | ||
6853 | struct zone *zone; | ||
6854 | |||
6855 | for_each_online_pgdat(pgdat) | ||
6856 | pgdat->min_unmapped_pages = 0; | ||
6857 | |||
6858 | for_each_zone(zone) | ||
6859 | zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * | ||
6860 | sysctl_min_unmapped_ratio) / 100; | ||
6861 | } | ||
6862 | |||
6863 | |||
6845 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, | 6864 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, |
6846 | void __user *buffer, size_t *length, loff_t *ppos) | 6865 | void __user *buffer, size_t *length, loff_t *ppos) |
6847 | { | 6866 | { |
6848 | struct pglist_data *pgdat; | ||
6849 | struct zone *zone; | ||
6850 | int rc; | 6867 | int rc; |
6851 | 6868 | ||
6852 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | 6869 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
6853 | if (rc) | 6870 | if (rc) |
6854 | return rc; | 6871 | return rc; |
6855 | 6872 | ||
6873 | setup_min_unmapped_ratio(); | ||
6874 | |||
6875 | return 0; | ||
6876 | } | ||
6877 | |||
6878 | static void setup_min_slab_ratio(void) | ||
6879 | { | ||
6880 | pg_data_t *pgdat; | ||
6881 | struct zone *zone; | ||
6882 | |||
6856 | for_each_online_pgdat(pgdat) | 6883 | for_each_online_pgdat(pgdat) |
6857 | pgdat->min_slab_pages = 0; | 6884 | pgdat->min_slab_pages = 0; |
6858 | 6885 | ||
6859 | for_each_zone(zone) | 6886 | for_each_zone(zone) |
6860 | zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * | 6887 | zone->zone_pgdat->min_slab_pages += (zone->managed_pages * |
6861 | sysctl_min_unmapped_ratio) / 100; | 6888 | sysctl_min_slab_ratio) / 100; |
6862 | return 0; | ||
6863 | } | 6889 | } |
6864 | 6890 | ||
6865 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, | 6891 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, |
6866 | void __user *buffer, size_t *length, loff_t *ppos) | 6892 | void __user *buffer, size_t *length, loff_t *ppos) |
6867 | { | 6893 | { |
6868 | struct pglist_data *pgdat; | ||
6869 | struct zone *zone; | ||
6870 | int rc; | 6894 | int rc; |
6871 | 6895 | ||
6872 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | 6896 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
6873 | if (rc) | 6897 | if (rc) |
6874 | return rc; | 6898 | return rc; |
6875 | 6899 | ||
6876 | for_each_online_pgdat(pgdat) | 6900 | setup_min_slab_ratio(); |
6877 | pgdat->min_slab_pages = 0; | ||
6878 | 6901 | ||
6879 | for_each_zone(zone) | ||
6880 | zone->zone_pgdat->min_slab_pages += (zone->managed_pages * | ||
6881 | sysctl_min_slab_ratio) / 100; | ||
6882 | return 0; | 6902 | return 0; |
6883 | } | 6903 | } |
6884 | #endif | 6904 | #endif |
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound) | |||
1284 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | 1284 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
1285 | __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); | 1285 | __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); |
1286 | } else { | 1286 | } else { |
1287 | if (PageTransCompound(page)) { | 1287 | if (PageTransCompound(page) && page_mapping(page)) { |
1288 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 1288 | VM_WARN_ON_ONCE(!PageLocked(page)); |
1289 | |||
1289 | SetPageDoubleMap(compound_head(page)); | 1290 | SetPageDoubleMap(compound_head(page)); |
1290 | if (PageMlocked(page)) | 1291 | if (PageMlocked(page)) |
1291 | clear_page_mlock(compound_head(page)); | 1292 | clear_page_mlock(compound_head(page)); |
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound) | |||
1303 | { | 1304 | { |
1304 | int i, nr = 1; | 1305 | int i, nr = 1; |
1305 | 1306 | ||
1306 | VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); | 1307 | VM_BUG_ON_PAGE(compound && !PageHead(page), page); |
1307 | lock_page_memcg(page); | 1308 | lock_page_memcg(page); |
1308 | 1309 | ||
1309 | /* Hugepages are not counted in NR_FILE_MAPPED for now. */ | 1310 | /* Hugepages are not counted in NR_FILE_MAPPED for now. */ |
diff --git a/mm/shmem.c b/mm/shmem.c index 7f7748a0f9e1..fd8b2b5741b1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, | |||
3975 | 3975 | ||
3976 | struct kobj_attribute shmem_enabled_attr = | 3976 | struct kobj_attribute shmem_enabled_attr = |
3977 | __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); | 3977 | __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); |
3978 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ | ||
3978 | 3979 | ||
3980 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE | ||
3979 | bool shmem_huge_enabled(struct vm_area_struct *vma) | 3981 | bool shmem_huge_enabled(struct vm_area_struct *vma) |
3980 | { | 3982 | { |
3981 | struct inode *inode = file_inode(vma->vm_file); | 3983 | struct inode *inode = file_inode(vma->vm_file); |
@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) | |||
4006 | return false; | 4008 | return false; |
4007 | } | 4009 | } |
4008 | } | 4010 | } |
4009 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ | 4011 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ |
4010 | 4012 | ||
4011 | #else /* !CONFIG_SHMEM */ | 4013 | #else /* !CONFIG_SHMEM */ |
4012 | 4014 | ||
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
3629 | */ | 3629 | */ |
3630 | static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | 3630 | static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) |
3631 | { | 3631 | { |
3632 | LIST_HEAD(discard); | ||
3632 | struct page *page, *h; | 3633 | struct page *page, *h; |
3633 | 3634 | ||
3634 | BUG_ON(irqs_disabled()); | 3635 | BUG_ON(irqs_disabled()); |
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
3636 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 3637 | list_for_each_entry_safe(page, h, &n->partial, lru) { |
3637 | if (!page->inuse) { | 3638 | if (!page->inuse) { |
3638 | remove_partial(n, page); | 3639 | remove_partial(n, page); |
3639 | discard_slab(s, page); | 3640 | list_add(&page->lru, &discard); |
3640 | } else { | 3641 | } else { |
3641 | list_slab_objects(s, page, | 3642 | list_slab_objects(s, page, |
3642 | "Objects remaining in %s on __kmem_cache_shutdown()"); | 3643 | "Objects remaining in %s on __kmem_cache_shutdown()"); |
3643 | } | 3644 | } |
3644 | } | 3645 | } |
3645 | spin_unlock_irq(&n->list_lock); | 3646 | spin_unlock_irq(&n->list_lock); |
3647 | |||
3648 | list_for_each_entry_safe(page, h, &discard, lru) | ||
3649 | discard_slab(s, page); | ||
3646 | } | 3650 | } |
3647 | 3651 | ||
3648 | /* | 3652 | /* |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 4acb1d5417aa..f24b25c25106 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -507,8 +507,8 @@ err_out: | |||
507 | /* wakeup anybody waiting for slots to pin pages */ | 507 | /* wakeup anybody waiting for slots to pin pages */ |
508 | wake_up(&vp_wq); | 508 | wake_up(&vp_wq); |
509 | } | 509 | } |
510 | kfree(in_pages); | 510 | kvfree(in_pages); |
511 | kfree(out_pages); | 511 | kvfree(out_pages); |
512 | return err; | 512 | return err; |
513 | } | 513 | } |
514 | 514 | ||
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index c83326c5ba58..ef34a02719d7 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req) | |||
574 | put_generic_request(req); | 574 | put_generic_request(req); |
575 | } | 575 | } |
576 | 576 | ||
577 | void cancel_generic_request(struct ceph_mon_generic_request *req) | 577 | static void cancel_generic_request(struct ceph_mon_generic_request *req) |
578 | { | 578 | { |
579 | struct ceph_mon_client *monc = req->monc; | 579 | struct ceph_mon_client *monc = req->monc; |
580 | struct ceph_mon_generic_request *lookup_req; | 580 | struct ceph_mon_generic_request *lookup_req; |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b5ec09612ff7..a97e7b506612 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) | |||
4220 | 4220 | ||
4221 | pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), | 4221 | pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), |
4222 | GFP_NOIO); | 4222 | GFP_NOIO); |
4223 | if (!pages) { | 4223 | if (IS_ERR(pages)) { |
4224 | ceph_msg_put(m); | 4224 | ceph_msg_put(m); |
4225 | return NULL; | 4225 | return NULL; |
4226 | } | 4226 | } |
diff --git a/net/ceph/string_table.c b/net/ceph/string_table.c index ca53c8319209..22fb96efcf34 100644 --- a/net/ceph/string_table.c +++ b/net/ceph/string_table.c | |||
@@ -84,12 +84,6 @@ retry: | |||
84 | } | 84 | } |
85 | EXPORT_SYMBOL(ceph_find_or_create_string); | 85 | EXPORT_SYMBOL(ceph_find_or_create_string); |
86 | 86 | ||
87 | static void ceph_free_string(struct rcu_head *head) | ||
88 | { | ||
89 | struct ceph_string *cs = container_of(head, struct ceph_string, rcu); | ||
90 | kfree(cs); | ||
91 | } | ||
92 | |||
93 | void ceph_release_string(struct kref *ref) | 87 | void ceph_release_string(struct kref *ref) |
94 | { | 88 | { |
95 | struct ceph_string *cs = container_of(ref, struct ceph_string, kref); | 89 | struct ceph_string *cs = container_of(ref, struct ceph_string, kref); |
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref) | |||
101 | } | 95 | } |
102 | spin_unlock(&string_tree_lock); | 96 | spin_unlock(&string_tree_lock); |
103 | 97 | ||
104 | call_rcu(&cs->rcu, ceph_free_string); | 98 | kfree_rcu(cs, rcu); |
105 | } | 99 | } |
106 | EXPORT_SYMBOL(ceph_release_string); | 100 | EXPORT_SYMBOL(ceph_release_string); |
107 | 101 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 23c8e7c39656..976c7812bbd5 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) | |||
340 | } | 340 | } |
341 | 341 | ||
342 | static struct gss_upcall_msg * | 342 | static struct gss_upcall_msg * |
343 | __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) | 343 | __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) |
344 | { | 344 | { |
345 | struct gss_upcall_msg *pos; | 345 | struct gss_upcall_msg *pos; |
346 | list_for_each_entry(pos, &pipe->in_downcall, list) { | 346 | list_for_each_entry(pos, &pipe->in_downcall, list) { |
347 | if (!uid_eq(pos->uid, uid)) | 347 | if (!uid_eq(pos->uid, uid)) |
348 | continue; | 348 | continue; |
349 | if (auth && pos->auth->service != auth->service) | ||
350 | continue; | ||
349 | atomic_inc(&pos->count); | 351 | atomic_inc(&pos->count); |
350 | dprintk("RPC: %s found msg %p\n", __func__, pos); | 352 | dprintk("RPC: %s found msg %p\n", __func__, pos); |
351 | return pos; | 353 | return pos; |
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) | |||
365 | struct gss_upcall_msg *old; | 367 | struct gss_upcall_msg *old; |
366 | 368 | ||
367 | spin_lock(&pipe->lock); | 369 | spin_lock(&pipe->lock); |
368 | old = __gss_find_upcall(pipe, gss_msg->uid); | 370 | old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); |
369 | if (old == NULL) { | 371 | if (old == NULL) { |
370 | atomic_inc(&gss_msg->count); | 372 | atomic_inc(&gss_msg->count); |
371 | list_add(&gss_msg->list, &pipe->in_downcall); | 373 | list_add(&gss_msg->list, &pipe->in_downcall); |
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
714 | err = -ENOENT; | 716 | err = -ENOENT; |
715 | /* Find a matching upcall */ | 717 | /* Find a matching upcall */ |
716 | spin_lock(&pipe->lock); | 718 | spin_lock(&pipe->lock); |
717 | gss_msg = __gss_find_upcall(pipe, uid); | 719 | gss_msg = __gss_find_upcall(pipe, uid, NULL); |
718 | if (gss_msg == NULL) { | 720 | if (gss_msg == NULL) { |
719 | spin_unlock(&pipe->lock); | 721 | spin_unlock(&pipe->lock); |
720 | goto err_put_ctx; | 722 | goto err_put_ctx; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cb49898a5a58..7f79fb7dc6a0 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2638 | { | 2638 | { |
2639 | struct rpc_xprt_switch *xps; | 2639 | struct rpc_xprt_switch *xps; |
2640 | struct rpc_xprt *xprt; | 2640 | struct rpc_xprt *xprt; |
2641 | unsigned long reconnect_timeout; | ||
2641 | unsigned char resvport; | 2642 | unsigned char resvport; |
2642 | int ret = 0; | 2643 | int ret = 0; |
2643 | 2644 | ||
@@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2649 | return -EAGAIN; | 2650 | return -EAGAIN; |
2650 | } | 2651 | } |
2651 | resvport = xprt->resvport; | 2652 | resvport = xprt->resvport; |
2653 | reconnect_timeout = xprt->max_reconnect_timeout; | ||
2652 | rcu_read_unlock(); | 2654 | rcu_read_unlock(); |
2653 | 2655 | ||
2654 | xprt = xprt_create_transport(xprtargs); | 2656 | xprt = xprt_create_transport(xprtargs); |
@@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2657 | goto out_put_switch; | 2659 | goto out_put_switch; |
2658 | } | 2660 | } |
2659 | xprt->resvport = resvport; | 2661 | xprt->resvport = resvport; |
2662 | xprt->max_reconnect_timeout = reconnect_timeout; | ||
2660 | 2663 | ||
2661 | rpc_xprt_switch_set_roundrobin(xps); | 2664 | rpc_xprt_switch_set_roundrobin(xps); |
2662 | if (setup) { | 2665 | if (setup) { |
@@ -2673,6 +2676,27 @@ out_put_switch: | |||
2673 | } | 2676 | } |
2674 | EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); | 2677 | EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); |
2675 | 2678 | ||
2679 | static int | ||
2680 | rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt, | ||
2681 | struct rpc_xprt *xprt, | ||
2682 | void *data) | ||
2683 | { | ||
2684 | unsigned long timeout = *((unsigned long *)data); | ||
2685 | |||
2686 | if (timeout < xprt->max_reconnect_timeout) | ||
2687 | xprt->max_reconnect_timeout = timeout; | ||
2688 | return 0; | ||
2689 | } | ||
2690 | |||
2691 | void | ||
2692 | rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo) | ||
2693 | { | ||
2694 | rpc_clnt_iterate_for_each_xprt(clnt, | ||
2695 | rpc_xprt_cap_max_reconnect_timeout, | ||
2696 | &timeo); | ||
2697 | } | ||
2698 | EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); | ||
2699 | |||
2676 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 2700 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
2677 | static void rpc_show_header(void) | 2701 | static void rpc_show_header(void) |
2678 | { | 2702 | { |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 8313960cac52..ea244b29138b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -680,6 +680,20 @@ out: | |||
680 | spin_unlock_bh(&xprt->transport_lock); | 680 | spin_unlock_bh(&xprt->transport_lock); |
681 | } | 681 | } |
682 | 682 | ||
683 | static bool | ||
684 | xprt_has_timer(const struct rpc_xprt *xprt) | ||
685 | { | ||
686 | return xprt->idle_timeout != 0; | ||
687 | } | ||
688 | |||
689 | static void | ||
690 | xprt_schedule_autodisconnect(struct rpc_xprt *xprt) | ||
691 | __must_hold(&xprt->transport_lock) | ||
692 | { | ||
693 | if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) | ||
694 | mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); | ||
695 | } | ||
696 | |||
683 | static void | 697 | static void |
684 | xprt_init_autodisconnect(unsigned long data) | 698 | xprt_init_autodisconnect(unsigned long data) |
685 | { | 699 | { |
@@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data) | |||
688 | spin_lock(&xprt->transport_lock); | 702 | spin_lock(&xprt->transport_lock); |
689 | if (!list_empty(&xprt->recv)) | 703 | if (!list_empty(&xprt->recv)) |
690 | goto out_abort; | 704 | goto out_abort; |
705 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ | ||
706 | xprt->last_used = jiffies; | ||
691 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 707 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
692 | goto out_abort; | 708 | goto out_abort; |
693 | spin_unlock(&xprt->transport_lock); | 709 | spin_unlock(&xprt->transport_lock); |
@@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |||
725 | goto out; | 741 | goto out; |
726 | xprt->snd_task =NULL; | 742 | xprt->snd_task =NULL; |
727 | xprt->ops->release_xprt(xprt, NULL); | 743 | xprt->ops->release_xprt(xprt, NULL); |
744 | xprt_schedule_autodisconnect(xprt); | ||
728 | out: | 745 | out: |
729 | spin_unlock_bh(&xprt->transport_lock); | 746 | spin_unlock_bh(&xprt->transport_lock); |
730 | wake_up_bit(&xprt->state, XPRT_LOCKED); | 747 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
@@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task) | |||
888 | spin_unlock_bh(&xprt->transport_lock); | 905 | spin_unlock_bh(&xprt->transport_lock); |
889 | } | 906 | } |
890 | 907 | ||
891 | static inline int xprt_has_timer(struct rpc_xprt *xprt) | ||
892 | { | ||
893 | return xprt->idle_timeout != 0; | ||
894 | } | ||
895 | |||
896 | /** | 908 | /** |
897 | * xprt_prepare_transmit - reserve the transport before sending a request | 909 | * xprt_prepare_transmit - reserve the transport before sending a request |
898 | * @task: RPC task about to send a request | 910 | * @task: RPC task about to send a request |
@@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task) | |||
1280 | if (!list_empty(&req->rq_list)) | 1292 | if (!list_empty(&req->rq_list)) |
1281 | list_del(&req->rq_list); | 1293 | list_del(&req->rq_list); |
1282 | xprt->last_used = jiffies; | 1294 | xprt->last_used = jiffies; |
1283 | if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) | 1295 | xprt_schedule_autodisconnect(xprt); |
1284 | mod_timer(&xprt->timer, | ||
1285 | xprt->last_used + xprt->idle_timeout); | ||
1286 | spin_unlock_bh(&xprt->transport_lock); | 1296 | spin_unlock_bh(&xprt->transport_lock); |
1287 | if (req->rq_buffer) | 1297 | if (req->rq_buffer) |
1288 | xprt->ops->buf_free(req->rq_buffer); | 1298 | xprt->ops->buf_free(req->rq_buffer); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 111767ab124a..8ede3bc52481 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = { | |||
177 | * increase over time if the server is down or not responding. | 177 | * increase over time if the server is down or not responding. |
178 | */ | 178 | */ |
179 | #define XS_TCP_INIT_REEST_TO (3U * HZ) | 179 | #define XS_TCP_INIT_REEST_TO (3U * HZ) |
180 | #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) | ||
181 | 180 | ||
182 | /* | 181 | /* |
183 | * TCP idle timeout; client drops the transport socket if it is idle | 182 | * TCP idle timeout; client drops the transport socket if it is idle |
@@ -2173,6 +2172,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2173 | write_unlock_bh(&sk->sk_callback_lock); | 2172 | write_unlock_bh(&sk->sk_callback_lock); |
2174 | } | 2173 | } |
2175 | xs_udp_do_set_buffer_size(xprt); | 2174 | xs_udp_do_set_buffer_size(xprt); |
2175 | |||
2176 | xprt->stat.connect_start = jiffies; | ||
2176 | } | 2177 | } |
2177 | 2178 | ||
2178 | static void xs_udp_setup_socket(struct work_struct *work) | 2179 | static void xs_udp_setup_socket(struct work_struct *work) |
@@ -2236,6 +2237,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2236 | unsigned int keepcnt = xprt->timeout->to_retries + 1; | 2237 | unsigned int keepcnt = xprt->timeout->to_retries + 1; |
2237 | unsigned int opt_on = 1; | 2238 | unsigned int opt_on = 1; |
2238 | unsigned int timeo; | 2239 | unsigned int timeo; |
2240 | unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; | ||
2239 | 2241 | ||
2240 | /* TCP Keepalive options */ | 2242 | /* TCP Keepalive options */ |
2241 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, | 2243 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, |
@@ -2247,6 +2249,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2247 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, | 2249 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, |
2248 | (char *)&keepcnt, sizeof(keepcnt)); | 2250 | (char *)&keepcnt, sizeof(keepcnt)); |
2249 | 2251 | ||
2252 | /* Avoid temporary address, they are bad for long-lived | ||
2253 | * connections such as NFS mounts. | ||
2254 | * RFC4941, section 3.6 suggests that: | ||
2255 | * Individual applications, which have specific | ||
2256 | * knowledge about the normal duration of connections, | ||
2257 | * MAY override this as appropriate. | ||
2258 | */ | ||
2259 | kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, | ||
2260 | (char *)&addr_pref, sizeof(addr_pref)); | ||
2261 | |||
2250 | /* TCP user timeout (see RFC5482) */ | 2262 | /* TCP user timeout (see RFC5482) */ |
2251 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * | 2263 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * |
2252 | (xprt->timeout->to_retries + 1); | 2264 | (xprt->timeout->to_retries + 1); |
@@ -2295,6 +2307,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2295 | /* SYN_SENT! */ | 2307 | /* SYN_SENT! */ |
2296 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) | 2308 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) |
2297 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 2309 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
2310 | break; | ||
2311 | case -EADDRNOTAVAIL: | ||
2312 | /* Source port number is unavailable. Try a new one! */ | ||
2313 | transport->srcport = 0; | ||
2298 | } | 2314 | } |
2299 | out: | 2315 | out: |
2300 | return ret; | 2316 | return ret; |
@@ -2369,6 +2385,25 @@ out: | |||
2369 | xprt_wake_pending_tasks(xprt, status); | 2385 | xprt_wake_pending_tasks(xprt, status); |
2370 | } | 2386 | } |
2371 | 2387 | ||
2388 | static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt) | ||
2389 | { | ||
2390 | unsigned long start, now = jiffies; | ||
2391 | |||
2392 | start = xprt->stat.connect_start + xprt->reestablish_timeout; | ||
2393 | if (time_after(start, now)) | ||
2394 | return start - now; | ||
2395 | return 0; | ||
2396 | } | ||
2397 | |||
2398 | static void xs_reconnect_backoff(struct rpc_xprt *xprt) | ||
2399 | { | ||
2400 | xprt->reestablish_timeout <<= 1; | ||
2401 | if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) | ||
2402 | xprt->reestablish_timeout = xprt->max_reconnect_timeout; | ||
2403 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) | ||
2404 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | ||
2405 | } | ||
2406 | |||
2372 | /** | 2407 | /** |
2373 | * xs_connect - connect a socket to a remote endpoint | 2408 | * xs_connect - connect a socket to a remote endpoint |
2374 | * @xprt: pointer to transport structure | 2409 | * @xprt: pointer to transport structure |
@@ -2386,6 +2421,7 @@ out: | |||
2386 | static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) | 2421 | static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) |
2387 | { | 2422 | { |
2388 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 2423 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
2424 | unsigned long delay = 0; | ||
2389 | 2425 | ||
2390 | WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); | 2426 | WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); |
2391 | 2427 | ||
@@ -2397,19 +2433,15 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) | |||
2397 | /* Start by resetting any existing state */ | 2433 | /* Start by resetting any existing state */ |
2398 | xs_reset_transport(transport); | 2434 | xs_reset_transport(transport); |
2399 | 2435 | ||
2400 | queue_delayed_work(xprtiod_workqueue, | 2436 | delay = xs_reconnect_delay(xprt); |
2401 | &transport->connect_worker, | 2437 | xs_reconnect_backoff(xprt); |
2402 | xprt->reestablish_timeout); | 2438 | |
2403 | xprt->reestablish_timeout <<= 1; | 2439 | } else |
2404 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) | ||
2405 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | ||
2406 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | ||
2407 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | ||
2408 | } else { | ||
2409 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 2440 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
2410 | queue_delayed_work(xprtiod_workqueue, | 2441 | |
2411 | &transport->connect_worker, 0); | 2442 | queue_delayed_work(xprtiod_workqueue, |
2412 | } | 2443 | &transport->connect_worker, |
2444 | delay); | ||
2413 | } | 2445 | } |
2414 | 2446 | ||
2415 | /** | 2447 | /** |
@@ -2961,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2961 | xprt->ops = &xs_tcp_ops; | 2993 | xprt->ops = &xs_tcp_ops; |
2962 | xprt->timeout = &xs_tcp_default_timeout; | 2994 | xprt->timeout = &xs_tcp_default_timeout; |
2963 | 2995 | ||
2996 | xprt->max_reconnect_timeout = xprt->timeout->to_maxval; | ||
2997 | |||
2964 | INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); | 2998 | INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); |
2965 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); | 2999 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); |
2966 | 3000 | ||
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 122fcdaf42c8..49a00d54b835 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -432,7 +432,7 @@ foreach my $file (@ARGV) { | |||
432 | die "$P: file '${file}' not found\n"; | 432 | die "$P: file '${file}' not found\n"; |
433 | } | 433 | } |
434 | } | 434 | } |
435 | if ($from_filename || vcs_file_exists($file)) { | 435 | if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) { |
436 | $file =~ s/^\Q${cur_path}\E//; #strip any absolute path | 436 | $file =~ s/^\Q${cur_path}\E//; #strip any absolute path |
437 | $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree | 437 | $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree |
438 | push(@files, $file); | 438 | push(@files, $file); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 89dacf9b4e6c..160c7f713722 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -906,20 +906,23 @@ static int azx_resume(struct device *dev) | |||
906 | struct snd_card *card = dev_get_drvdata(dev); | 906 | struct snd_card *card = dev_get_drvdata(dev); |
907 | struct azx *chip; | 907 | struct azx *chip; |
908 | struct hda_intel *hda; | 908 | struct hda_intel *hda; |
909 | struct hdac_bus *bus; | ||
909 | 910 | ||
910 | if (!card) | 911 | if (!card) |
911 | return 0; | 912 | return 0; |
912 | 913 | ||
913 | chip = card->private_data; | 914 | chip = card->private_data; |
914 | hda = container_of(chip, struct hda_intel, chip); | 915 | hda = container_of(chip, struct hda_intel, chip); |
916 | bus = azx_bus(chip); | ||
915 | if (chip->disabled || hda->init_failed || !chip->running) | 917 | if (chip->disabled || hda->init_failed || !chip->running) |
916 | return 0; | 918 | return 0; |
917 | 919 | ||
918 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL | 920 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { |
919 | && hda->need_i915_power) { | 921 | snd_hdac_display_power(bus, true); |
920 | snd_hdac_display_power(azx_bus(chip), true); | 922 | if (hda->need_i915_power) |
921 | snd_hdac_i915_set_bclk(azx_bus(chip)); | 923 | snd_hdac_i915_set_bclk(bus); |
922 | } | 924 | } |
925 | |||
923 | if (chip->msi) | 926 | if (chip->msi) |
924 | if (pci_enable_msi(pci) < 0) | 927 | if (pci_enable_msi(pci) < 0) |
925 | chip->msi = 0; | 928 | chip->msi = 0; |
@@ -929,6 +932,11 @@ static int azx_resume(struct device *dev) | |||
929 | 932 | ||
930 | hda_intel_init_chip(chip, true); | 933 | hda_intel_init_chip(chip, true); |
931 | 934 | ||
935 | /* power down again for link-controlled chips */ | ||
936 | if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && | ||
937 | !hda->need_i915_power) | ||
938 | snd_hdac_display_power(bus, false); | ||
939 | |||
932 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); | 940 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); |
933 | 941 | ||
934 | trace_azx_resume(chip); | 942 | trace_azx_resume(chip); |
@@ -1008,6 +1016,7 @@ static int azx_runtime_resume(struct device *dev) | |||
1008 | 1016 | ||
1009 | chip = card->private_data; | 1017 | chip = card->private_data; |
1010 | hda = container_of(chip, struct hda_intel, chip); | 1018 | hda = container_of(chip, struct hda_intel, chip); |
1019 | bus = azx_bus(chip); | ||
1011 | if (chip->disabled || hda->init_failed) | 1020 | if (chip->disabled || hda->init_failed) |
1012 | return 0; | 1021 | return 0; |
1013 | 1022 | ||
@@ -1015,15 +1024,9 @@ static int azx_runtime_resume(struct device *dev) | |||
1015 | return 0; | 1024 | return 0; |
1016 | 1025 | ||
1017 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { | 1026 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { |
1018 | bus = azx_bus(chip); | 1027 | snd_hdac_display_power(bus, true); |
1019 | if (hda->need_i915_power) { | 1028 | if (hda->need_i915_power) |
1020 | snd_hdac_display_power(bus, true); | ||
1021 | snd_hdac_i915_set_bclk(bus); | 1029 | snd_hdac_i915_set_bclk(bus); |
1022 | } else { | ||
1023 | /* toggle codec wakeup bit for STATESTS read */ | ||
1024 | snd_hdac_set_codec_wakeup(bus, true); | ||
1025 | snd_hdac_set_codec_wakeup(bus, false); | ||
1026 | } | ||
1027 | } | 1030 | } |
1028 | 1031 | ||
1029 | /* Read STATESTS before controller reset */ | 1032 | /* Read STATESTS before controller reset */ |
@@ -1043,6 +1046,11 @@ static int azx_runtime_resume(struct device *dev) | |||
1043 | azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & | 1046 | azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & |
1044 | ~STATESTS_INT_MASK); | 1047 | ~STATESTS_INT_MASK); |
1045 | 1048 | ||
1049 | /* power down again for link-controlled chips */ | ||
1050 | if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && | ||
1051 | !hda->need_i915_power) | ||
1052 | snd_hdac_display_power(bus, false); | ||
1053 | |||
1046 | trace_azx_runtime_resume(chip); | 1054 | trace_azx_runtime_resume(chip); |
1047 | return 0; | 1055 | return 0; |
1048 | } | 1056 | } |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 6adde457b602..6cf1f3597455 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1128,6 +1128,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
1128 | { | 1128 | { |
1129 | /* devices which do not support reading the sample rate. */ | 1129 | /* devices which do not support reading the sample rate. */ |
1130 | switch (chip->usb_id) { | 1130 | switch (chip->usb_id) { |
1131 | case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */ | ||
1131 | case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ | 1132 | case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ |
1132 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ | 1133 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ |
1133 | case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */ | 1134 | case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */ |
@@ -1138,6 +1139,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
1138 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ | 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ |
1139 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1140 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
1140 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ | 1141 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ |
1142 | case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ | ||
1141 | case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ | 1143 | case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ |
1142 | case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ | 1144 | case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ |
1143 | case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ | 1145 | case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 4a413485f9eb..92a8308b96f6 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -225,7 +225,6 @@ | |||
225 | #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ | 225 | #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ |
226 | #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ | 226 | #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ |
227 | #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ | 227 | #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ |
228 | #define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ | ||
229 | #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ | 228 | #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ |
230 | #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ | 229 | #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ |
231 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ | 230 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
@@ -301,10 +300,6 @@ | |||
301 | #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ | 300 | #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ |
302 | #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ | 301 | #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ |
303 | #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ | 302 | #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ |
304 | #define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */ | ||
305 | #define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */ | ||
306 | |||
307 | |||
308 | #ifdef CONFIG_X86_32 | 303 | #ifdef CONFIG_X86_32 |
309 | /* | 304 | /* |
310 | * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional | 305 | * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional |
@@ -312,5 +307,7 @@ | |||
312 | */ | 307 | */ |
313 | #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ | 308 | #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ |
314 | #endif | 309 | #endif |
315 | 310 | #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ | |
311 | #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ | ||
312 | #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ | ||
316 | #endif /* _ASM_X86_CPUFEATURES_H */ | 313 | #endif /* _ASM_X86_CPUFEATURES_H */ |
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 911e9358ceb1..85599ad4d024 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h | |||
@@ -56,5 +56,7 @@ | |||
56 | #define DISABLED_MASK14 0 | 56 | #define DISABLED_MASK14 0 |
57 | #define DISABLED_MASK15 0 | 57 | #define DISABLED_MASK15 0 |
58 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) | 58 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) |
59 | #define DISABLED_MASK17 0 | ||
60 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | ||
59 | 61 | ||
60 | #endif /* _ASM_X86_DISABLED_FEATURES_H */ | 62 | #endif /* _ASM_X86_DISABLED_FEATURES_H */ |
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 4916144e3c42..fac9a5c0abe9 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h | |||
@@ -99,5 +99,7 @@ | |||
99 | #define REQUIRED_MASK14 0 | 99 | #define REQUIRED_MASK14 0 |
100 | #define REQUIRED_MASK15 0 | 100 | #define REQUIRED_MASK15 0 |
101 | #define REQUIRED_MASK16 0 | 101 | #define REQUIRED_MASK16 0 |
102 | #define REQUIRED_MASK17 0 | ||
103 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | ||
102 | 104 | ||
103 | #endif /* _ASM_X86_REQUIRED_FEATURES_H */ | 105 | #endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index 5b15d94a33f8..37fee272618f 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h | |||
@@ -78,7 +78,6 @@ | |||
78 | #define EXIT_REASON_PML_FULL 62 | 78 | #define EXIT_REASON_PML_FULL 62 |
79 | #define EXIT_REASON_XSAVES 63 | 79 | #define EXIT_REASON_XSAVES 63 |
80 | #define EXIT_REASON_XRSTORS 64 | 80 | #define EXIT_REASON_XRSTORS 64 |
81 | #define EXIT_REASON_PCOMMIT 65 | ||
82 | 81 | ||
83 | #define VMX_EXIT_REASONS \ | 82 | #define VMX_EXIT_REASONS \ |
84 | { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ | 83 | { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ |
@@ -127,8 +126,7 @@ | |||
127 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | 126 | { EXIT_REASON_INVVPID, "INVVPID" }, \ |
128 | { EXIT_REASON_INVPCID, "INVPCID" }, \ | 127 | { EXIT_REASON_INVPCID, "INVPCID" }, \ |
129 | { EXIT_REASON_XSAVES, "XSAVES" }, \ | 128 | { EXIT_REASON_XSAVES, "XSAVES" }, \ |
130 | { EXIT_REASON_XRSTORS, "XRSTORS" }, \ | 129 | { EXIT_REASON_XRSTORS, "XRSTORS" } |
131 | { EXIT_REASON_PCOMMIT, "PCOMMIT" } | ||
132 | 130 | ||
133 | #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 | 131 | #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 |
134 | #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 | 132 | #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 406459b935a2..da218fec6056 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -84,6 +84,7 @@ enum bpf_map_type { | |||
84 | BPF_MAP_TYPE_PERCPU_HASH, | 84 | BPF_MAP_TYPE_PERCPU_HASH, |
85 | BPF_MAP_TYPE_PERCPU_ARRAY, | 85 | BPF_MAP_TYPE_PERCPU_ARRAY, |
86 | BPF_MAP_TYPE_STACK_TRACE, | 86 | BPF_MAP_TYPE_STACK_TRACE, |
87 | BPF_MAP_TYPE_CGROUP_ARRAY, | ||
87 | }; | 88 | }; |
88 | 89 | ||
89 | enum bpf_prog_type { | 90 | enum bpf_prog_type { |
@@ -93,6 +94,7 @@ enum bpf_prog_type { | |||
93 | BPF_PROG_TYPE_SCHED_CLS, | 94 | BPF_PROG_TYPE_SCHED_CLS, |
94 | BPF_PROG_TYPE_SCHED_ACT, | 95 | BPF_PROG_TYPE_SCHED_ACT, |
95 | BPF_PROG_TYPE_TRACEPOINT, | 96 | BPF_PROG_TYPE_TRACEPOINT, |
97 | BPF_PROG_TYPE_XDP, | ||
96 | }; | 98 | }; |
97 | 99 | ||
98 | #define BPF_PSEUDO_MAP_FD 1 | 100 | #define BPF_PSEUDO_MAP_FD 1 |
@@ -313,6 +315,66 @@ enum bpf_func_id { | |||
313 | */ | 315 | */ |
314 | BPF_FUNC_skb_get_tunnel_opt, | 316 | BPF_FUNC_skb_get_tunnel_opt, |
315 | BPF_FUNC_skb_set_tunnel_opt, | 317 | BPF_FUNC_skb_set_tunnel_opt, |
318 | |||
319 | /** | ||
320 | * bpf_skb_change_proto(skb, proto, flags) | ||
321 | * Change protocol of the skb. Currently supported is | ||
322 | * v4 -> v6, v6 -> v4 transitions. The helper will also | ||
323 | * resize the skb. eBPF program is expected to fill the | ||
324 | * new headers via skb_store_bytes and lX_csum_replace. | ||
325 | * @skb: pointer to skb | ||
326 | * @proto: new skb->protocol type | ||
327 | * @flags: reserved | ||
328 | * Return: 0 on success or negative error | ||
329 | */ | ||
330 | BPF_FUNC_skb_change_proto, | ||
331 | |||
332 | /** | ||
333 | * bpf_skb_change_type(skb, type) | ||
334 | * Change packet type of skb. | ||
335 | * @skb: pointer to skb | ||
336 | * @type: new skb->pkt_type type | ||
337 | * Return: 0 on success or negative error | ||
338 | */ | ||
339 | BPF_FUNC_skb_change_type, | ||
340 | |||
341 | /** | ||
342 | * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb | ||
343 | * @skb: pointer to skb | ||
344 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | ||
345 | * @index: index of the cgroup in the bpf_map | ||
346 | * Return: | ||
347 | * == 0 skb failed the cgroup2 descendant test | ||
348 | * == 1 skb succeeded the cgroup2 descendant test | ||
349 | * < 0 error | ||
350 | */ | ||
351 | BPF_FUNC_skb_in_cgroup, | ||
352 | |||
353 | /** | ||
354 | * bpf_get_hash_recalc(skb) | ||
355 | * Retrieve and possibly recalculate skb->hash. | ||
356 | * @skb: pointer to skb | ||
357 | * Return: hash | ||
358 | */ | ||
359 | BPF_FUNC_get_hash_recalc, | ||
360 | |||
361 | /** | ||
362 | * u64 bpf_get_current_task(void) | ||
363 | * Returns current task_struct | ||
364 | * Return: current | ||
365 | */ | ||
366 | BPF_FUNC_get_current_task, | ||
367 | |||
368 | /** | ||
369 | * bpf_probe_write_user(void *dst, void *src, int len) | ||
370 | * safely attempt to write to a location | ||
371 | * @dst: destination address in userspace | ||
372 | * @src: source address on stack | ||
373 | * @len: number of bytes to copy | ||
374 | * Return: 0 on success or negative error | ||
375 | */ | ||
376 | BPF_FUNC_probe_write_user, | ||
377 | |||
316 | __BPF_FUNC_MAX_ID, | 378 | __BPF_FUNC_MAX_ID, |
317 | }; | 379 | }; |
318 | 380 | ||
@@ -347,9 +409,11 @@ enum bpf_func_id { | |||
347 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) | 409 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) |
348 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) | 410 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) |
349 | 411 | ||
350 | /* BPF_FUNC_perf_event_output flags. */ | 412 | /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ |
351 | #define BPF_F_INDEX_MASK 0xffffffffULL | 413 | #define BPF_F_INDEX_MASK 0xffffffffULL |
352 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK | 414 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK |
415 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | ||
416 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | ||
353 | 417 | ||
354 | /* user accessible mirror of in-kernel sk_buff. | 418 | /* user accessible mirror of in-kernel sk_buff. |
355 | * new fields can only be added to the end of this structure | 419 | * new fields can only be added to the end of this structure |
@@ -386,4 +450,24 @@ struct bpf_tunnel_key { | |||
386 | __u32 tunnel_label; | 450 | __u32 tunnel_label; |
387 | }; | 451 | }; |
388 | 452 | ||
453 | /* User return codes for XDP prog type. | ||
454 | * A valid XDP program must return one of these defined values. All other | ||
455 | * return codes are reserved for future use. Unknown return codes will result | ||
456 | * in packet drop. | ||
457 | */ | ||
458 | enum xdp_action { | ||
459 | XDP_ABORTED = 0, | ||
460 | XDP_DROP, | ||
461 | XDP_PASS, | ||
462 | XDP_TX, | ||
463 | }; | ||
464 | |||
465 | /* user accessible metadata for XDP packet hook | ||
466 | * new fields must be added to the end of this structure | ||
467 | */ | ||
468 | struct xdp_md { | ||
469 | __u32 data; | ||
470 | __u32 data_end; | ||
471 | }; | ||
472 | |||
389 | #endif /* _UAPI__LINUX_BPF_H__ */ | 473 | #endif /* _UAPI__LINUX_BPF_H__ */ |
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 736da44596e4..b303bcdd8ed1 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt | |||
@@ -176,10 +176,18 @@ Each probe argument follows below syntax. | |||
176 | 176 | ||
177 | 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) | 177 | 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) |
178 | '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters. | 178 | '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters. |
179 | 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. | 179 | 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail) |
180 | 180 | ||
181 | On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. | 181 | On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. |
182 | 182 | ||
183 | TYPES | ||
184 | ----- | ||
185 | Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe. | ||
186 | String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. | ||
187 | Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is; | ||
188 | |||
189 | b<bit-width>@<bit-offset>/<container-size> | ||
190 | |||
183 | LINE SYNTAX | 191 | LINE SYNTAX |
184 | ----------- | 192 | ----------- |
185 | Line range is described by following syntax. | 193 | Line range is described by following syntax. |
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 1f6c70594f0f..053bbbd84ece 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -116,8 +116,8 @@ OPTIONS | |||
116 | --fields:: | 116 | --fields:: |
117 | Comma separated list of fields to print. Options are: | 117 | Comma separated list of fields to print. Options are: |
118 | comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, | 118 | comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, |
119 | srcline, period, iregs, brstack, brstacksym, flags. | 119 | srcline, period, iregs, brstack, brstacksym, flags, bpf-output, |
120 | Field list can be prepended with the type, trace, sw or hw, | 120 | callindent. Field list can be prepended with the type, trace, sw or hw, |
121 | to indicate to which event type the field list applies. | 121 | to indicate to which event type the field list applies. |
122 | e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace | 122 | e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace |
123 | 123 | ||
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index c6d0f91731a1..8d4dc97d80ba 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c | |||
@@ -54,10 +54,6 @@ int arch__compare_symbol_names(const char *namea, const char *nameb) | |||
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | 56 | #if defined(_CALL_ELF) && _CALL_ELF == 2 |
57 | bool arch__prefers_symtab(void) | ||
58 | { | ||
59 | return true; | ||
60 | } | ||
61 | 57 | ||
62 | #ifdef HAVE_LIBELF_SUPPORT | 58 | #ifdef HAVE_LIBELF_SUPPORT |
63 | void arch__sym_update(struct symbol *s, GElf_Sym *sym) | 59 | void arch__sym_update(struct symbol *s, GElf_Sym *sym) |
@@ -100,4 +96,27 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev, | |||
100 | tev->point.offset += lep_offset; | 96 | tev->point.offset += lep_offset; |
101 | } | 97 | } |
102 | } | 98 | } |
99 | |||
100 | void arch__post_process_probe_trace_events(struct perf_probe_event *pev, | ||
101 | int ntevs) | ||
102 | { | ||
103 | struct probe_trace_event *tev; | ||
104 | struct map *map; | ||
105 | struct symbol *sym = NULL; | ||
106 | struct rb_node *tmp; | ||
107 | int i = 0; | ||
108 | |||
109 | map = get_target_map(pev->target, pev->uprobes); | ||
110 | if (!map || map__load(map, NULL) < 0) | ||
111 | return; | ||
112 | |||
113 | for (i = 0; i < ntevs; i++) { | ||
114 | tev = &pev->tevs[i]; | ||
115 | map__for_each_symbol(map, sym, tmp) { | ||
116 | if (map->unmap_ip(map, sym->start) == tev->point.address) | ||
117 | arch__fix_tev_from_maps(pev, tev, map, sym); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | |||
103 | #endif | 122 | #endif |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 971ff91b16cb..9c640a8081c7 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -2116,7 +2116,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) | |||
2116 | "Valid types: hw,sw,trace,raw. " | 2116 | "Valid types: hw,sw,trace,raw. " |
2117 | "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," | 2117 | "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," |
2118 | "addr,symoff,period,iregs,brstack,brstacksym,flags," | 2118 | "addr,symoff,period,iregs,brstack,brstacksym,flags," |
2119 | "callindent", parse_output_fields), | 2119 | "bpf-output,callindent", parse_output_fields), |
2120 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 2120 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
2121 | "system-wide collection from all CPUs"), | 2121 | "system-wide collection from all CPUs"), |
2122 | OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", | 2122 | OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0c16d20d7e32..3c7452b39f57 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter) | |||
331 | return 0; | 331 | return 0; |
332 | } | 332 | } |
333 | 333 | ||
334 | static void read_counters(bool close_counters) | 334 | static void read_counters(void) |
335 | { | 335 | { |
336 | struct perf_evsel *counter; | 336 | struct perf_evsel *counter; |
337 | 337 | ||
@@ -341,11 +341,6 @@ static void read_counters(bool close_counters) | |||
341 | 341 | ||
342 | if (perf_stat_process_counter(&stat_config, counter)) | 342 | if (perf_stat_process_counter(&stat_config, counter)) |
343 | pr_warning("failed to process counter %s\n", counter->name); | 343 | pr_warning("failed to process counter %s\n", counter->name); |
344 | |||
345 | if (close_counters) { | ||
346 | perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), | ||
347 | thread_map__nr(evsel_list->threads)); | ||
348 | } | ||
349 | } | 344 | } |
350 | } | 345 | } |
351 | 346 | ||
@@ -353,7 +348,7 @@ static void process_interval(void) | |||
353 | { | 348 | { |
354 | struct timespec ts, rs; | 349 | struct timespec ts, rs; |
355 | 350 | ||
356 | read_counters(false); | 351 | read_counters(); |
357 | 352 | ||
358 | clock_gettime(CLOCK_MONOTONIC, &ts); | 353 | clock_gettime(CLOCK_MONOTONIC, &ts); |
359 | diff_timespec(&rs, &ts, &ref_time); | 354 | diff_timespec(&rs, &ts, &ref_time); |
@@ -380,6 +375,17 @@ static void enable_counters(void) | |||
380 | perf_evlist__enable(evsel_list); | 375 | perf_evlist__enable(evsel_list); |
381 | } | 376 | } |
382 | 377 | ||
378 | static void disable_counters(void) | ||
379 | { | ||
380 | /* | ||
381 | * If we don't have tracee (attaching to task or cpu), counters may | ||
382 | * still be running. To get accurate group ratios, we must stop groups | ||
383 | * from counting before reading their constituent counters. | ||
384 | */ | ||
385 | if (!target__none(&target)) | ||
386 | perf_evlist__disable(evsel_list); | ||
387 | } | ||
388 | |||
383 | static volatile int workload_exec_errno; | 389 | static volatile int workload_exec_errno; |
384 | 390 | ||
385 | /* | 391 | /* |
@@ -657,11 +663,20 @@ try_again: | |||
657 | } | 663 | } |
658 | } | 664 | } |
659 | 665 | ||
666 | disable_counters(); | ||
667 | |||
660 | t1 = rdclock(); | 668 | t1 = rdclock(); |
661 | 669 | ||
662 | update_stats(&walltime_nsecs_stats, t1 - t0); | 670 | update_stats(&walltime_nsecs_stats, t1 - t0); |
663 | 671 | ||
664 | read_counters(true); | 672 | /* |
673 | * Closing a group leader splits the group, and as we only disable | ||
674 | * group leaders, results in remaining events becoming enabled. To | ||
675 | * avoid arbitrary skew, we must read all counters before closing any | ||
676 | * group leaders. | ||
677 | */ | ||
678 | read_counters(); | ||
679 | perf_evlist__close(evsel_list); | ||
665 | 680 | ||
666 | return WEXITSTATUS(status); | 681 | return WEXITSTATUS(status); |
667 | } | 682 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 953dc1ab2ed7..28733962cd80 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -170,15 +170,17 @@ static struct map *kernel_get_module_map(const char *module) | |||
170 | module = "kernel"; | 170 | module = "kernel"; |
171 | 171 | ||
172 | for (pos = maps__first(maps); pos; pos = map__next(pos)) { | 172 | for (pos = maps__first(maps); pos; pos = map__next(pos)) { |
173 | /* short_name is "[module]" */ | ||
173 | if (strncmp(pos->dso->short_name + 1, module, | 174 | if (strncmp(pos->dso->short_name + 1, module, |
174 | pos->dso->short_name_len - 2) == 0) { | 175 | pos->dso->short_name_len - 2) == 0 && |
176 | module[pos->dso->short_name_len - 2] == '\0') { | ||
175 | return pos; | 177 | return pos; |
176 | } | 178 | } |
177 | } | 179 | } |
178 | return NULL; | 180 | return NULL; |
179 | } | 181 | } |
180 | 182 | ||
181 | static struct map *get_target_map(const char *target, bool user) | 183 | struct map *get_target_map(const char *target, bool user) |
182 | { | 184 | { |
183 | /* Init maps of given executable or kernel */ | 185 | /* Init maps of given executable or kernel */ |
184 | if (user) | 186 | if (user) |
@@ -385,7 +387,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, | |||
385 | if (uprobes) | 387 | if (uprobes) |
386 | address = sym->start; | 388 | address = sym->start; |
387 | else | 389 | else |
388 | address = map->unmap_ip(map, sym->start); | 390 | address = map->unmap_ip(map, sym->start) - map->reloc; |
389 | break; | 391 | break; |
390 | } | 392 | } |
391 | if (!address) { | 393 | if (!address) { |
@@ -664,22 +666,14 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, | |||
664 | return ret; | 666 | return ret; |
665 | } | 667 | } |
666 | 668 | ||
667 | /* Post processing the probe events */ | 669 | static int |
668 | static int post_process_probe_trace_events(struct probe_trace_event *tevs, | 670 | post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, |
669 | int ntevs, const char *module, | 671 | int ntevs) |
670 | bool uprobe) | ||
671 | { | 672 | { |
672 | struct ref_reloc_sym *reloc_sym; | 673 | struct ref_reloc_sym *reloc_sym; |
673 | char *tmp; | 674 | char *tmp; |
674 | int i, skipped = 0; | 675 | int i, skipped = 0; |
675 | 676 | ||
676 | if (uprobe) | ||
677 | return add_exec_to_probe_trace_events(tevs, ntevs, module); | ||
678 | |||
679 | /* Note that currently ref_reloc_sym based probe is not for drivers */ | ||
680 | if (module) | ||
681 | return add_module_to_probe_trace_events(tevs, ntevs, module); | ||
682 | |||
683 | reloc_sym = kernel_get_ref_reloc_sym(); | 677 | reloc_sym = kernel_get_ref_reloc_sym(); |
684 | if (!reloc_sym) { | 678 | if (!reloc_sym) { |
685 | pr_warning("Relocated base symbol is not found!\n"); | 679 | pr_warning("Relocated base symbol is not found!\n"); |
@@ -711,6 +705,34 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, | |||
711 | return skipped; | 705 | return skipped; |
712 | } | 706 | } |
713 | 707 | ||
708 | void __weak | ||
709 | arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused, | ||
710 | int ntevs __maybe_unused) | ||
711 | { | ||
712 | } | ||
713 | |||
714 | /* Post processing the probe events */ | ||
715 | static int post_process_probe_trace_events(struct perf_probe_event *pev, | ||
716 | struct probe_trace_event *tevs, | ||
717 | int ntevs, const char *module, | ||
718 | bool uprobe) | ||
719 | { | ||
720 | int ret; | ||
721 | |||
722 | if (uprobe) | ||
723 | ret = add_exec_to_probe_trace_events(tevs, ntevs, module); | ||
724 | else if (module) | ||
725 | /* Currently ref_reloc_sym based probe is not for drivers */ | ||
726 | ret = add_module_to_probe_trace_events(tevs, ntevs, module); | ||
727 | else | ||
728 | ret = post_process_kernel_probe_trace_events(tevs, ntevs); | ||
729 | |||
730 | if (ret >= 0) | ||
731 | arch__post_process_probe_trace_events(pev, ntevs); | ||
732 | |||
733 | return ret; | ||
734 | } | ||
735 | |||
714 | /* Try to find perf_probe_event with debuginfo */ | 736 | /* Try to find perf_probe_event with debuginfo */ |
715 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | 737 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, |
716 | struct probe_trace_event **tevs) | 738 | struct probe_trace_event **tevs) |
@@ -749,7 +771,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
749 | 771 | ||
750 | if (ntevs > 0) { /* Succeeded to find trace events */ | 772 | if (ntevs > 0) { /* Succeeded to find trace events */ |
751 | pr_debug("Found %d probe_trace_events.\n", ntevs); | 773 | pr_debug("Found %d probe_trace_events.\n", ntevs); |
752 | ret = post_process_probe_trace_events(*tevs, ntevs, | 774 | ret = post_process_probe_trace_events(pev, *tevs, ntevs, |
753 | pev->target, pev->uprobes); | 775 | pev->target, pev->uprobes); |
754 | if (ret < 0 || ret == ntevs) { | 776 | if (ret < 0 || ret == ntevs) { |
755 | clear_probe_trace_events(*tevs, ntevs); | 777 | clear_probe_trace_events(*tevs, ntevs); |
@@ -2936,8 +2958,6 @@ errout: | |||
2936 | return err; | 2958 | return err; |
2937 | } | 2959 | } |
2938 | 2960 | ||
2939 | bool __weak arch__prefers_symtab(void) { return false; } | ||
2940 | |||
2941 | /* Concatinate two arrays */ | 2961 | /* Concatinate two arrays */ |
2942 | static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b) | 2962 | static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b) |
2943 | { | 2963 | { |
@@ -3158,12 +3178,6 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, | |||
3158 | if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */ | 3178 | if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */ |
3159 | return ret == 0 ? -ENOENT : ret; /* Found in probe cache */ | 3179 | return ret == 0 ? -ENOENT : ret; /* Found in probe cache */ |
3160 | 3180 | ||
3161 | if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) { | ||
3162 | ret = find_probe_trace_events_from_map(pev, tevs); | ||
3163 | if (ret > 0) | ||
3164 | return ret; /* Found in symbol table */ | ||
3165 | } | ||
3166 | |||
3167 | /* Convert perf_probe_event with debuginfo */ | 3181 | /* Convert perf_probe_event with debuginfo */ |
3168 | ret = try_to_find_probe_trace_events(pev, tevs); | 3182 | ret = try_to_find_probe_trace_events(pev, tevs); |
3169 | if (ret != 0) | 3183 | if (ret != 0) |
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index e18ea9fe6385..f4f45db77c1c 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h | |||
@@ -158,7 +158,6 @@ int show_line_range(struct line_range *lr, const char *module, bool user); | |||
158 | int show_available_vars(struct perf_probe_event *pevs, int npevs, | 158 | int show_available_vars(struct perf_probe_event *pevs, int npevs, |
159 | struct strfilter *filter); | 159 | struct strfilter *filter); |
160 | int show_available_funcs(const char *module, struct strfilter *filter, bool user); | 160 | int show_available_funcs(const char *module, struct strfilter *filter, bool user); |
161 | bool arch__prefers_symtab(void); | ||
162 | void arch__fix_tev_from_maps(struct perf_probe_event *pev, | 161 | void arch__fix_tev_from_maps(struct perf_probe_event *pev, |
163 | struct probe_trace_event *tev, struct map *map, | 162 | struct probe_trace_event *tev, struct map *map, |
164 | struct symbol *sym); | 163 | struct symbol *sym); |
@@ -173,4 +172,9 @@ int e_snprintf(char *str, size_t size, const char *format, ...) | |||
173 | int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, | 172 | int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, |
174 | struct perf_probe_arg *pvar); | 173 | struct perf_probe_arg *pvar); |
175 | 174 | ||
175 | struct map *get_target_map(const char *target, bool user); | ||
176 | |||
177 | void arch__post_process_probe_trace_events(struct perf_probe_event *pev, | ||
178 | int ntevs); | ||
179 | |||
176 | #endif /*_PROBE_EVENT_H */ | 180 | #endif /*_PROBE_EVENT_H */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index f2d9ff064e2d..5c290c682afe 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -297,10 +297,13 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
297 | char sbuf[STRERR_BUFSIZE]; | 297 | char sbuf[STRERR_BUFSIZE]; |
298 | int bsize, boffs, total; | 298 | int bsize, boffs, total; |
299 | int ret; | 299 | int ret; |
300 | char sign; | ||
300 | 301 | ||
301 | /* TODO: check all types */ | 302 | /* TODO: check all types */ |
302 | if (cast && strcmp(cast, "string") != 0) { | 303 | if (cast && strcmp(cast, "string") != 0 && |
304 | strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) { | ||
303 | /* Non string type is OK */ | 305 | /* Non string type is OK */ |
306 | /* and respect signedness cast */ | ||
304 | tvar->type = strdup(cast); | 307 | tvar->type = strdup(cast); |
305 | return (tvar->type == NULL) ? -ENOMEM : 0; | 308 | return (tvar->type == NULL) ? -ENOMEM : 0; |
306 | } | 309 | } |
@@ -361,6 +364,13 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
361 | return (tvar->type == NULL) ? -ENOMEM : 0; | 364 | return (tvar->type == NULL) ? -ENOMEM : 0; |
362 | } | 365 | } |
363 | 366 | ||
367 | if (cast && (strcmp(cast, "u") == 0)) | ||
368 | sign = 'u'; | ||
369 | else if (cast && (strcmp(cast, "s") == 0)) | ||
370 | sign = 's'; | ||
371 | else | ||
372 | sign = die_is_signed_type(&type) ? 's' : 'u'; | ||
373 | |||
364 | ret = dwarf_bytesize(&type); | 374 | ret = dwarf_bytesize(&type); |
365 | if (ret <= 0) | 375 | if (ret <= 0) |
366 | /* No size ... try to use default type */ | 376 | /* No size ... try to use default type */ |
@@ -373,8 +383,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
373 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); | 383 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); |
374 | ret = MAX_BASIC_TYPE_BITS; | 384 | ret = MAX_BASIC_TYPE_BITS; |
375 | } | 385 | } |
376 | ret = snprintf(buf, 16, "%c%d", | 386 | ret = snprintf(buf, 16, "%c%d", sign, ret); |
377 | die_is_signed_type(&type) ? 's' : 'u', ret); | ||
378 | 387 | ||
379 | formatted: | 388 | formatted: |
380 | if (ret < 0 || ret >= 16) { | 389 | if (ret < 0 || ret >= 16) { |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 947d21f38398..3d3cb8392c86 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -588,7 +588,11 @@ static char *get_trace_output(struct hist_entry *he) | |||
588 | } else { | 588 | } else { |
589 | pevent_event_info(&seq, evsel->tp_format, &rec); | 589 | pevent_event_info(&seq, evsel->tp_format, &rec); |
590 | } | 590 | } |
591 | return seq.buffer; | 591 | /* |
592 | * Trim the buffer, it starts at 4KB and we're not going to | ||
593 | * add anything more to this buffer. | ||
594 | */ | ||
595 | return realloc(seq.buffer, seq.len + 1); | ||
592 | } | 596 | } |
593 | 597 | ||
594 | static int64_t | 598 | static int64_t |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 5404efa578a3..dd48f421844c 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/workqueue.h> | ||
16 | #include <linux/libnvdimm.h> | 17 | #include <linux/libnvdimm.h> |
17 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
18 | #include <linux/device.h> | 19 | #include <linux/device.h> |
@@ -1474,6 +1475,7 @@ static int nfit_test_probe(struct platform_device *pdev) | |||
1474 | if (nfit_test->setup != nfit_test0_setup) | 1475 | if (nfit_test->setup != nfit_test0_setup) |
1475 | return 0; | 1476 | return 0; |
1476 | 1477 | ||
1478 | flush_work(&acpi_desc->work); | ||
1477 | nfit_test->setup_hotplug = 1; | 1479 | nfit_test->setup_hotplug = 1; |
1478 | nfit_test->setup(nfit_test); | 1480 | nfit_test->setup(nfit_test); |
1479 | 1481 | ||
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 3c40c9d0e6c7..1cc6d64c39b7 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile | |||
@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc) | |||
8 | 8 | ||
9 | GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") | 9 | GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") |
10 | 10 | ||
11 | CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) | 11 | CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) |
12 | 12 | ||
13 | export CFLAGS | 13 | export CFLAGS |
14 | 14 | ||