diff options
576 files changed, 7572 insertions, 7901 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 31d1d658827f..c0d8788e75d3 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt | |||
| @@ -587,7 +587,7 @@ used to control it: | |||
| 587 | 587 | ||
| 588 | modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> | 588 | modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> |
| 589 | preaction=<preaction type> preop=<preop type> start_now=x | 589 | preaction=<preaction type> preop=<preop type> start_now=x |
| 590 | nowayout=x ifnum_to_use=n | 590 | nowayout=x ifnum_to_use=n panic_wdt_timeout=<t> |
| 591 | 591 | ||
| 592 | ifnum_to_use specifies which interface the watchdog timer should use. | 592 | ifnum_to_use specifies which interface the watchdog timer should use. |
| 593 | The default is -1, which means to pick the first one registered. | 593 | The default is -1, which means to pick the first one registered. |
| @@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will | |||
| 597 | occur (if pretimeout is zero, then pretimeout will not be enabled). Note | 597 | occur (if pretimeout is zero, then pretimeout will not be enabled). Note |
| 598 | that the pretimeout is the time before the final timeout. So if the | 598 | that the pretimeout is the time before the final timeout. So if the |
| 599 | timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout | 599 | timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout |
| 600 | will occur in 40 second (10 seconds before the timeout). | 600 | will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout |
| 601 | is the value of timeout which is set on kernel panic, in order to let actions | ||
| 602 | such as kdump to occur during panic. | ||
| 601 | 603 | ||
| 602 | The action may be "reset", "power_cycle", or "power_off", and | 604 | The action may be "reset", "power_cycle", or "power_off", and |
| 603 | specifies what to do when the timer times out, and defaults to | 605 | specifies what to do when the timer times out, and defaults to |
| @@ -634,6 +636,7 @@ for configuring the watchdog: | |||
| 634 | ipmi_watchdog.preop=<preop type> | 636 | ipmi_watchdog.preop=<preop type> |
| 635 | ipmi_watchdog.start_now=x | 637 | ipmi_watchdog.start_now=x |
| 636 | ipmi_watchdog.nowayout=x | 638 | ipmi_watchdog.nowayout=x |
| 639 | ipmi_watchdog.panic_wdt_timeout=<t> | ||
| 637 | 640 | ||
| 638 | The options are the same as the module parameter options. | 641 | The options are the same as the module parameter options. |
| 639 | 642 | ||
diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt index f17bc4c9dff9..400c0c270d2e 100644 --- a/Documentation/arm/keystone/Overview.txt +++ b/Documentation/arm/keystone/Overview.txt | |||
| @@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:- | |||
| 49 | The device tree documentation for the keystone machines are located at | 49 | The device tree documentation for the keystone machines are located at |
| 50 | Documentation/devicetree/bindings/arm/keystone/keystone.txt | 50 | Documentation/devicetree/bindings/arm/keystone/keystone.txt |
| 51 | 51 | ||
| 52 | Known issues & workaround | ||
| 53 | ------------------------- | ||
| 54 | |||
| 55 | Some of the device drivers used on keystone are re-used from that from | ||
| 56 | DaVinci and other TI SoCs. These device drivers may use clock APIs directly. | ||
| 57 | Some of the keystone specific drivers such as netcp uses run time power | ||
| 58 | management API instead to enable clock. As this API has limitations on | ||
| 59 | keystone, following workaround is needed to boot Linux. | ||
| 60 | |||
| 61 | Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise | ||
| 62 | clock frameworks will try to disable clocks that are unused and disable | ||
| 63 | the hardware. This is because netcp related power domain and clock | ||
| 64 | domains are enabled in u-boot as run time power management API currently | ||
| 65 | doesn't enable clocks for netcp due to a limitation. This workaround is | ||
| 66 | expected to be removed in the future when proper API support becomes | ||
| 67 | available. Until then, this work around is needed. | ||
| 68 | |||
| 69 | |||
| 70 | Document Author | 52 | Document Author |
| 71 | --------------- | 53 | --------------- |
| 72 | Murali Karicheri <m-karicheri2@ti.com> | 54 | Murali Karicheri <m-karicheri2@ti.com> |
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 2f6c6ff7161d..d8880ca30af4 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt | |||
| @@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 | |||
| 70 | parameter. | 70 | parameter. |
| 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch |
| 72 | queue for each CPU node in the system. | 72 | queue for each CPU node in the system. |
| 73 | |||
| 74 | use_lightnvm=[0/1]: Default: 0 | ||
| 75 | Register device with LightNVM. Requires blk-mq to be used. | ||
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt index b38200d2583a..0dfa60d88dd3 100644 --- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | * Temperature Sensor ADC (TSADC) on rockchip SoCs | 1 | * Temperature Sensor ADC (TSADC) on rockchip SoCs |
| 2 | 2 | ||
| 3 | Required properties: | 3 | Required properties: |
| 4 | - compatible : "rockchip,rk3288-tsadc" | 4 | - compatible : should be "rockchip,<name>-tsadc" |
| 5 | "rockchip,rk3288-tsadc": found on RK3288 SoCs | ||
| 6 | "rockchip,rk3368-tsadc": found on RK3368 SoCs | ||
| 5 | - reg : physical base address of the controller and length of memory mapped | 7 | - reg : physical base address of the controller and length of memory mapped |
| 6 | region. | 8 | region. |
| 7 | - interrupts : The interrupt number to the cpu. The interrupt specifier format | 9 | - interrupts : The interrupt number to the cpu. The interrupt specifier format |
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 6a4b1af724f8..1bba38dd2637 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
| @@ -32,6 +32,7 @@ Supported adapters: | |||
| 32 | * Intel Sunrise Point-LP (PCH) | 32 | * Intel Sunrise Point-LP (PCH) |
| 33 | * Intel DNV (SOC) | 33 | * Intel DNV (SOC) |
| 34 | * Intel Broxton (SOC) | 34 | * Intel Broxton (SOC) |
| 35 | * Intel Lewisburg (PCH) | ||
| 35 | Datasheets: Publicly available at the Intel website | 36 | Datasheets: Publicly available at the Intel website |
| 36 | 37 | ||
| 37 | On Intel Patsburg and later chipsets, both the normal host SMBus controller | 38 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f8aae632f02f..742f69d18fc8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 1583 | hwp_only | 1583 | hwp_only |
| 1584 | Only load intel_pstate on systems which support | 1584 | Only load intel_pstate on systems which support |
| 1585 | hardware P state control (HWP) if available. | 1585 | hardware P state control (HWP) if available. |
| 1586 | no_acpi | ||
| 1587 | Don't use ACPI processor performance control objects | ||
| 1588 | _PSS and _PPC specified limits. | ||
| 1589 | 1586 | ||
| 1590 | intremap= [X86-64, Intel-IOMMU] | 1587 | intremap= [X86-64, Intel-IOMMU] |
| 1591 | on enable Interrupt Remapping (default) | 1588 | on enable Interrupt Remapping (default) |
diff --git a/MAINTAINERS b/MAINTAINERS index e9caa4b28828..cba790b42f23 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1931,7 +1931,7 @@ S: Supported | |||
| 1931 | F: drivers/i2c/busses/i2c-at91.c | 1931 | F: drivers/i2c/busses/i2c-at91.c |
| 1932 | 1932 | ||
| 1933 | ATMEL ISI DRIVER | 1933 | ATMEL ISI DRIVER |
| 1934 | M: Josh Wu <josh.wu@atmel.com> | 1934 | M: Ludovic Desroches <ludovic.desroches@atmel.com> |
| 1935 | L: linux-media@vger.kernel.org | 1935 | L: linux-media@vger.kernel.org |
| 1936 | S: Supported | 1936 | S: Supported |
| 1937 | F: drivers/media/platform/soc_camera/atmel-isi.c | 1937 | F: drivers/media/platform/soc_camera/atmel-isi.c |
| @@ -1950,7 +1950,8 @@ S: Supported | |||
| 1950 | F: drivers/net/ethernet/cadence/ | 1950 | F: drivers/net/ethernet/cadence/ |
| 1951 | 1951 | ||
| 1952 | ATMEL NAND DRIVER | 1952 | ATMEL NAND DRIVER |
| 1953 | M: Josh Wu <josh.wu@atmel.com> | 1953 | M: Wenyou Yang <wenyou.yang@atmel.com> |
| 1954 | M: Josh Wu <rainyfeeling@outlook.com> | ||
| 1954 | L: linux-mtd@lists.infradead.org | 1955 | L: linux-mtd@lists.infradead.org |
| 1955 | S: Supported | 1956 | S: Supported |
| 1956 | F: drivers/mtd/nand/atmel_nand* | 1957 | F: drivers/mtd/nand/atmel_nand* |
| @@ -2449,7 +2450,9 @@ F: drivers/firmware/broadcom/* | |||
| 2449 | 2450 | ||
| 2450 | BROADCOM STB NAND FLASH DRIVER | 2451 | BROADCOM STB NAND FLASH DRIVER |
| 2451 | M: Brian Norris <computersforpeace@gmail.com> | 2452 | M: Brian Norris <computersforpeace@gmail.com> |
| 2453 | M: Kamal Dasu <kdasu.kdev@gmail.com> | ||
| 2452 | L: linux-mtd@lists.infradead.org | 2454 | L: linux-mtd@lists.infradead.org |
| 2455 | L: bcm-kernel-feedback-list@broadcom.com | ||
| 2453 | S: Maintained | 2456 | S: Maintained |
| 2454 | F: drivers/mtd/nand/brcmnand/ | 2457 | F: drivers/mtd/nand/brcmnand/ |
| 2455 | 2458 | ||
| @@ -2546,7 +2549,7 @@ F: arch/c6x/ | |||
| 2546 | 2549 | ||
| 2547 | CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS | 2550 | CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS |
| 2548 | M: David Howells <dhowells@redhat.com> | 2551 | M: David Howells <dhowells@redhat.com> |
| 2549 | L: linux-cachefs@redhat.com | 2552 | L: linux-cachefs@redhat.com (moderated for non-subscribers) |
| 2550 | S: Supported | 2553 | S: Supported |
| 2551 | F: Documentation/filesystems/caching/cachefiles.txt | 2554 | F: Documentation/filesystems/caching/cachefiles.txt |
| 2552 | F: fs/cachefiles/ | 2555 | F: fs/cachefiles/ |
| @@ -2929,10 +2932,9 @@ S: Maintained | |||
| 2929 | F: drivers/platform/x86/compal-laptop.c | 2932 | F: drivers/platform/x86/compal-laptop.c |
| 2930 | 2933 | ||
| 2931 | CONEXANT ACCESSRUNNER USB DRIVER | 2934 | CONEXANT ACCESSRUNNER USB DRIVER |
| 2932 | M: Simon Arlott <cxacru@fire.lp0.eu> | ||
| 2933 | L: accessrunner-general@lists.sourceforge.net | 2935 | L: accessrunner-general@lists.sourceforge.net |
| 2934 | W: http://accessrunner.sourceforge.net/ | 2936 | W: http://accessrunner.sourceforge.net/ |
| 2935 | S: Maintained | 2937 | S: Orphan |
| 2936 | F: drivers/usb/atm/cxacru.c | 2938 | F: drivers/usb/atm/cxacru.c |
| 2937 | 2939 | ||
| 2938 | CONFIGFS | 2940 | CONFIGFS |
| @@ -4409,6 +4411,7 @@ K: fmc_d.*register | |||
| 4409 | 4411 | ||
| 4410 | FPGA MANAGER FRAMEWORK | 4412 | FPGA MANAGER FRAMEWORK |
| 4411 | M: Alan Tull <atull@opensource.altera.com> | 4413 | M: Alan Tull <atull@opensource.altera.com> |
| 4414 | R: Moritz Fischer <moritz.fischer@ettus.com> | ||
| 4412 | S: Maintained | 4415 | S: Maintained |
| 4413 | F: drivers/fpga/ | 4416 | F: drivers/fpga/ |
| 4414 | F: include/linux/fpga/fpga-mgr.h | 4417 | F: include/linux/fpga/fpga-mgr.h |
| @@ -4559,7 +4562,7 @@ F: include/linux/frontswap.h | |||
| 4559 | 4562 | ||
| 4560 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS | 4563 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS |
| 4561 | M: David Howells <dhowells@redhat.com> | 4564 | M: David Howells <dhowells@redhat.com> |
| 4562 | L: linux-cachefs@redhat.com | 4565 | L: linux-cachefs@redhat.com (moderated for non-subscribers) |
| 4563 | S: Supported | 4566 | S: Supported |
| 4564 | F: Documentation/filesystems/caching/ | 4567 | F: Documentation/filesystems/caching/ |
| 4565 | F: fs/fscache/ | 4568 | F: fs/fscache/ |
| @@ -5711,13 +5714,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> | |||
| 5711 | S: Maintained | 5714 | S: Maintained |
| 5712 | F: net/ipv4/netfilter/ipt_MASQUERADE.c | 5715 | F: net/ipv4/netfilter/ipt_MASQUERADE.c |
| 5713 | 5716 | ||
| 5714 | IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER | ||
| 5715 | M: Francois Romieu <romieu@fr.zoreil.com> | ||
| 5716 | M: Sorbica Shieh <sorbica@icplus.com.tw> | ||
| 5717 | L: netdev@vger.kernel.org | ||
| 5718 | S: Maintained | ||
| 5719 | F: drivers/net/ethernet/icplus/ipg.* | ||
| 5720 | |||
| 5721 | IPATH DRIVER | 5717 | IPATH DRIVER |
| 5722 | M: Mike Marciniszyn <infinipath@intel.com> | 5718 | M: Mike Marciniszyn <infinipath@intel.com> |
| 5723 | L: linux-rdma@vger.kernel.org | 5719 | L: linux-rdma@vger.kernel.org |
| @@ -6371,6 +6367,7 @@ F: arch/*/include/asm/pmem.h | |||
| 6371 | LIGHTNVM PLATFORM SUPPORT | 6367 | LIGHTNVM PLATFORM SUPPORT |
| 6372 | M: Matias Bjorling <mb@lightnvm.io> | 6368 | M: Matias Bjorling <mb@lightnvm.io> |
| 6373 | W: http://github/OpenChannelSSD | 6369 | W: http://github/OpenChannelSSD |
| 6370 | L: linux-block@vger.kernel.org | ||
| 6374 | S: Maintained | 6371 | S: Maintained |
| 6375 | F: drivers/lightnvm/ | 6372 | F: drivers/lightnvm/ |
| 6376 | F: include/linux/lightnvm.h | 6373 | F: include/linux/lightnvm.h |
| @@ -6923,13 +6920,21 @@ F: drivers/scsi/megaraid.* | |||
| 6923 | F: drivers/scsi/megaraid/ | 6920 | F: drivers/scsi/megaraid/ |
| 6924 | 6921 | ||
| 6925 | MELLANOX ETHERNET DRIVER (mlx4_en) | 6922 | MELLANOX ETHERNET DRIVER (mlx4_en) |
| 6926 | M: Amir Vadai <amirv@mellanox.com> | 6923 | M: Eugenia Emantayev <eugenia@mellanox.com> |
| 6927 | L: netdev@vger.kernel.org | 6924 | L: netdev@vger.kernel.org |
| 6928 | S: Supported | 6925 | S: Supported |
| 6929 | W: http://www.mellanox.com | 6926 | W: http://www.mellanox.com |
| 6930 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 6927 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
| 6931 | F: drivers/net/ethernet/mellanox/mlx4/en_* | 6928 | F: drivers/net/ethernet/mellanox/mlx4/en_* |
| 6932 | 6929 | ||
| 6930 | MELLANOX ETHERNET DRIVER (mlx5e) | ||
| 6931 | M: Saeed Mahameed <saeedm@mellanox.com> | ||
| 6932 | L: netdev@vger.kernel.org | ||
| 6933 | S: Supported | ||
| 6934 | W: http://www.mellanox.com | ||
| 6935 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | ||
| 6936 | F: drivers/net/ethernet/mellanox/mlx5/core/en_* | ||
| 6937 | |||
| 6933 | MELLANOX ETHERNET SWITCH DRIVERS | 6938 | MELLANOX ETHERNET SWITCH DRIVERS |
| 6934 | M: Jiri Pirko <jiri@mellanox.com> | 6939 | M: Jiri Pirko <jiri@mellanox.com> |
| 6935 | M: Ido Schimmel <idosch@mellanox.com> | 6940 | M: Ido Schimmel <idosch@mellanox.com> |
| @@ -7901,6 +7906,18 @@ S: Maintained | |||
| 7901 | F: net/openvswitch/ | 7906 | F: net/openvswitch/ |
| 7902 | F: include/uapi/linux/openvswitch.h | 7907 | F: include/uapi/linux/openvswitch.h |
| 7903 | 7908 | ||
| 7909 | OPERATING PERFORMANCE POINTS (OPP) | ||
| 7910 | M: Viresh Kumar <vireshk@kernel.org> | ||
| 7911 | M: Nishanth Menon <nm@ti.com> | ||
| 7912 | M: Stephen Boyd <sboyd@codeaurora.org> | ||
| 7913 | L: linux-pm@vger.kernel.org | ||
| 7914 | S: Maintained | ||
| 7915 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git | ||
| 7916 | F: drivers/base/power/opp/ | ||
| 7917 | F: include/linux/pm_opp.h | ||
| 7918 | F: Documentation/power/opp.txt | ||
| 7919 | F: Documentation/devicetree/bindings/opp/ | ||
| 7920 | |||
| 7904 | OPL4 DRIVER | 7921 | OPL4 DRIVER |
| 7905 | M: Clemens Ladisch <clemens@ladisch.de> | 7922 | M: Clemens Ladisch <clemens@ladisch.de> |
| 7906 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 7923 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
| @@ -9314,7 +9331,6 @@ F: drivers/i2c/busses/i2c-designware-* | |||
| 9314 | F: include/linux/platform_data/i2c-designware.h | 9331 | F: include/linux/platform_data/i2c-designware.h |
| 9315 | 9332 | ||
| 9316 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER | 9333 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER |
| 9317 | M: Seungwon Jeon <tgih.jun@samsung.com> | ||
| 9318 | M: Jaehoon Chung <jh80.chung@samsung.com> | 9334 | M: Jaehoon Chung <jh80.chung@samsung.com> |
| 9319 | L: linux-mmc@vger.kernel.org | 9335 | L: linux-mmc@vger.kernel.org |
| 9320 | S: Maintained | 9336 | S: Maintained |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 4 | 2 | PATCHLEVEL = 4 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
| 5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index c92c0ef1e9d2..f1ac9818b751 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 3 | # CONFIG_SWAP is not set | 3 | # CONFIG_SWAP is not set |
| 4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index cfac24e0e7b6..323486d6ee83 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 3 | # CONFIG_SWAP is not set | 3 | # CONFIG_SWAP is not set |
| 4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 9922a118a15a..66191cd0447e 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 3 | # CONFIG_SWAP is not set | 3 | # CONFIG_SWAP is not set |
| 4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index f761a7c70761..f68838e8068a 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | # CONFIG_LOCALVERSION_AUTO is not set | 2 | # CONFIG_LOCALVERSION_AUTO is not set |
| 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 4 | # CONFIG_SWAP is not set | 4 | # CONFIG_SWAP is not set |
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index dc6f74f41283..96bd1c20fb0b 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | # CONFIG_LOCALVERSION_AUTO is not set | 2 | # CONFIG_LOCALVERSION_AUTO is not set |
| 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 4 | # CONFIG_SWAP is not set | 4 | # CONFIG_SWAP is not set |
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 3fef0a210c56..fcae66683ca0 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | # CONFIG_LOCALVERSION_AUTO is not set | 2 | # CONFIG_LOCALVERSION_AUTO is not set |
| 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 4 | # CONFIG_SWAP is not set | 4 | # CONFIG_SWAP is not set |
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 51784837daae..b01b659168ea 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 3 | # CONFIG_SWAP is not set | 3 | # CONFIG_SWAP is not set |
| 4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index ef35ef3923dd..a07f20de221b 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | # CONFIG_LOCALVERSION_AUTO is not set | 2 | # CONFIG_LOCALVERSION_AUTO is not set |
| 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 634509e5e572..f36c047b33ca 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="arc-linux-uclibc-" | 1 | CONFIG_CROSS_COMPILE="arc-linux-" |
| 2 | # CONFIG_LOCALVERSION_AUTO is not set | 2 | # CONFIG_LOCALVERSION_AUTO is not set |
| 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | 3 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" |
| 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index ad481c24070d..258b0e5ad332 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h | |||
| @@ -37,6 +37,9 @@ | |||
| 37 | #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ | 37 | #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ |
| 38 | (ARCV2_IRQ_DEF_PRIO << 1)) | 38 | (ARCV2_IRQ_DEF_PRIO << 1)) |
| 39 | 39 | ||
| 40 | /* SLEEP needs default irq priority (<=) which can interrupt the doze */ | ||
| 41 | #define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO) | ||
| 42 | |||
| 40 | #ifndef __ASSEMBLY__ | 43 | #ifndef __ASSEMBLY__ |
| 41 | 44 | ||
| 42 | /* | 45 | /* |
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index d8c608174617..c1d36458bfb7 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h | |||
| @@ -43,6 +43,8 @@ | |||
| 43 | 43 | ||
| 44 | #define ISA_INIT_STATUS_BITS STATUS_IE_MASK | 44 | #define ISA_INIT_STATUS_BITS STATUS_IE_MASK |
| 45 | 45 | ||
| 46 | #define ISA_SLEEP_ARG 0x3 | ||
| 47 | |||
| 46 | #ifndef __ASSEMBLY__ | 48 | #ifndef __ASSEMBLY__ |
| 47 | 49 | ||
| 48 | /****************************************************************** | 50 | /****************************************************************** |
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index c14a5bea0c76..5d446df2c413 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c | |||
| @@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) | |||
| 58 | "st sp, [r24] \n\t" | 58 | "st sp, [r24] \n\t" |
| 59 | #endif | 59 | #endif |
| 60 | 60 | ||
| 61 | "sync \n\t" | ||
| 62 | |||
| 63 | /* | 61 | /* |
| 64 | * setup _current_task with incoming tsk. | 62 | * setup _current_task with incoming tsk. |
| 65 | * optionally, set r25 to that as well | 63 | * optionally, set r25 to that as well |
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S index e248594097e7..e6890b1f8650 100644 --- a/arch/arc/kernel/ctx_sw_asm.S +++ b/arch/arc/kernel/ctx_sw_asm.S | |||
| @@ -44,9 +44,6 @@ __switch_to: | |||
| 44 | * don't need to do anything special to return it | 44 | * don't need to do anything special to return it |
| 45 | */ | 45 | */ |
| 46 | 46 | ||
| 47 | /* hardware memory barrier */ | ||
| 48 | sync | ||
| 49 | |||
| 50 | /* | 47 | /* |
| 51 | * switch to new task, contained in r1 | 48 | * switch to new task, contained in r1 |
| 52 | * Temp reg r3 is required to get the ptr to store val | 49 | * Temp reg r3 is required to get the ptr to store val |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 91d5a0f1f3f7..a3f750e76b68 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
| @@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls) | |||
| 44 | void arch_cpu_idle(void) | 44 | void arch_cpu_idle(void) |
| 45 | { | 45 | { |
| 46 | /* sleep, but enable all interrupts before committing */ | 46 | /* sleep, but enable all interrupts before committing */ |
| 47 | if (is_isa_arcompact()) { | 47 | __asm__ __volatile__( |
| 48 | __asm__("sleep 0x3"); | 48 | "sleep %0 \n" |
| 49 | } else { | 49 | : |
| 50 | __asm__("sleep 0x10"); | 50 | :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */ |
| 51 | } | ||
| 52 | } | 51 | } |
| 53 | 52 | ||
| 54 | asmlinkage void ret_from_fork(void); | 53 | asmlinkage void ret_from_fork(void); |
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 93c6ea52b671..7352475451f6 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
| @@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame) | |||
| 986 | (const u8 *)(fde + | 986 | (const u8 *)(fde + |
| 987 | 1) + | 987 | 1) + |
| 988 | *fde, ptrType); | 988 | *fde, ptrType); |
| 989 | if (pc >= endLoc) | 989 | if (pc >= endLoc) { |
| 990 | fde = NULL; | 990 | fde = NULL; |
| 991 | } else | ||
| 992 | fde = NULL; | ||
| 993 | } | ||
| 994 | if (fde == NULL) { | ||
| 995 | for (fde = table->address, tableSize = table->size; | ||
| 996 | cie = NULL, tableSize > sizeof(*fde) | ||
| 997 | && tableSize - sizeof(*fde) >= *fde; | ||
| 998 | tableSize -= sizeof(*fde) + *fde, | ||
| 999 | fde += 1 + *fde / sizeof(*fde)) { | ||
| 1000 | cie = cie_for_fde(fde, table); | ||
| 1001 | if (cie == &bad_cie) { | ||
| 1002 | cie = NULL; | 991 | cie = NULL; |
| 1003 | break; | ||
| 1004 | } | 992 | } |
| 1005 | if (cie == NULL | 993 | } else { |
| 1006 | || cie == ¬_fde | 994 | fde = NULL; |
| 1007 | || (ptrType = fde_pointer_type(cie)) < 0) | 995 | cie = NULL; |
| 1008 | continue; | ||
| 1009 | ptr = (const u8 *)(fde + 2); | ||
| 1010 | startLoc = read_pointer(&ptr, | ||
| 1011 | (const u8 *)(fde + 1) + | ||
| 1012 | *fde, ptrType); | ||
| 1013 | if (!startLoc) | ||
| 1014 | continue; | ||
| 1015 | if (!(ptrType & DW_EH_PE_indirect)) | ||
| 1016 | ptrType &= | ||
| 1017 | DW_EH_PE_FORM | DW_EH_PE_signed; | ||
| 1018 | endLoc = | ||
| 1019 | startLoc + read_pointer(&ptr, | ||
| 1020 | (const u8 *)(fde + | ||
| 1021 | 1) + | ||
| 1022 | *fde, ptrType); | ||
| 1023 | if (pc >= startLoc && pc < endLoc) | ||
| 1024 | break; | ||
| 1025 | } | 996 | } |
| 1026 | } | 997 | } |
| 1027 | } | 998 | } |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 0ee739846847..daf2bf52b984 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
| @@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, | |||
| 619 | 619 | ||
| 620 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); | 620 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); |
| 621 | if (dirty) { | 621 | if (dirty) { |
| 622 | /* wback + inv dcache lines */ | 622 | /* wback + inv dcache lines (K-mapping) */ |
| 623 | __flush_dcache_page(paddr, paddr); | 623 | __flush_dcache_page(paddr, paddr); |
| 624 | 624 | ||
| 625 | /* invalidate any existing icache lines */ | 625 | /* invalidate any existing icache lines (U-mapping) */ |
| 626 | if (vma->vm_flags & VM_EXEC) | 626 | if (vma->vm_flags & VM_EXEC) |
| 627 | __inv_icache_page(paddr, vaddr); | 627 | __inv_icache_page(paddr, vaddr); |
| 628 | } | 628 | } |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0365cbbc9179..34e1569a11ee 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -76,6 +76,8 @@ config ARM | |||
| 76 | select IRQ_FORCED_THREADING | 76 | select IRQ_FORCED_THREADING |
| 77 | select MODULES_USE_ELF_REL | 77 | select MODULES_USE_ELF_REL |
| 78 | select NO_BOOTMEM | 78 | select NO_BOOTMEM |
| 79 | select OF_EARLY_FLATTREE if OF | ||
| 80 | select OF_RESERVED_MEM if OF | ||
| 79 | select OLD_SIGACTION | 81 | select OLD_SIGACTION |
| 80 | select OLD_SIGSUSPEND3 | 82 | select OLD_SIGSUSPEND3 |
| 81 | select PERF_USE_VMALLOC | 83 | select PERF_USE_VMALLOC |
| @@ -1822,8 +1824,6 @@ config USE_OF | |||
| 1822 | bool "Flattened Device Tree support" | 1824 | bool "Flattened Device Tree support" |
| 1823 | select IRQ_DOMAIN | 1825 | select IRQ_DOMAIN |
| 1824 | select OF | 1826 | select OF |
| 1825 | select OF_EARLY_FLATTREE | ||
| 1826 | select OF_RESERVED_MEM | ||
| 1827 | help | 1827 | help |
| 1828 | Include support for flattened device tree machine descriptions. | 1828 | Include support for flattened device tree machine descriptions. |
| 1829 | 1829 | ||
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index d9ba6b879fc1..00352e761b8c 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
| @@ -604,6 +604,7 @@ | |||
| 604 | reg = <0x6f>; | 604 | reg = <0x6f>; |
| 605 | interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, | 605 | interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, |
| 606 | <&dra7_pmx_core 0x424>; | 606 | <&dra7_pmx_core 0x424>; |
| 607 | interrupt-names = "irq", "wakeup"; | ||
| 607 | 608 | ||
| 608 | pinctrl-names = "default"; | 609 | pinctrl-names = "default"; |
| 609 | pinctrl-0 = <&mcp79410_pins_default>; | 610 | pinctrl-0 = <&mcp79410_pins_default>; |
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts index 4e0ad3b82796..0962f2fa3f6e 100644 --- a/arch/arm/boot/dts/animeo_ip.dts +++ b/arch/arm/boot/dts/animeo_ip.dts | |||
| @@ -155,21 +155,21 @@ | |||
| 155 | label = "keyswitch_in"; | 155 | label = "keyswitch_in"; |
| 156 | gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; | 156 | gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; |
| 157 | linux,code = <28>; | 157 | linux,code = <28>; |
| 158 | gpio-key,wakeup; | 158 | wakeup-source; |
| 159 | }; | 159 | }; |
| 160 | 160 | ||
| 161 | error_in { | 161 | error_in { |
| 162 | label = "error_in"; | 162 | label = "error_in"; |
| 163 | gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; | 163 | gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; |
| 164 | linux,code = <29>; | 164 | linux,code = <29>; |
| 165 | gpio-key,wakeup; | 165 | wakeup-source; |
| 166 | }; | 166 | }; |
| 167 | 167 | ||
| 168 | btn { | 168 | btn { |
| 169 | label = "btn"; | 169 | label = "btn"; |
| 170 | gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; | 170 | gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; |
| 171 | linux,code = <31>; | 171 | linux,code = <31>; |
| 172 | gpio-key,wakeup; | 172 | wakeup-source; |
| 173 | }; | 173 | }; |
| 174 | }; | 174 | }; |
| 175 | }; | 175 | }; |
diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts index f89598af4c2b..6bf873e7d96c 100644 --- a/arch/arm/boot/dts/at91-foxg20.dts +++ b/arch/arm/boot/dts/at91-foxg20.dts | |||
| @@ -159,7 +159,7 @@ | |||
| 159 | label = "Button"; | 159 | label = "Button"; |
| 160 | gpios = <&pioC 4 GPIO_ACTIVE_LOW>; | 160 | gpios = <&pioC 4 GPIO_ACTIVE_LOW>; |
| 161 | linux,code = <0x103>; | 161 | linux,code = <0x103>; |
| 162 | gpio-key,wakeup; | 162 | wakeup-source; |
| 163 | }; | 163 | }; |
| 164 | }; | 164 | }; |
| 165 | }; | 165 | }; |
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts index bf18ece0c027..229e989eb60d 100644 --- a/arch/arm/boot/dts/at91-kizbox.dts +++ b/arch/arm/boot/dts/at91-kizbox.dts | |||
| @@ -24,15 +24,6 @@ | |||
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | clocks { | 26 | clocks { |
| 27 | #address-cells = <1>; | ||
| 28 | #size-cells = <1>; | ||
| 29 | ranges; | ||
| 30 | |||
| 31 | main_clock: clock@0 { | ||
| 32 | compatible = "atmel,osc", "fixed-clock"; | ||
| 33 | clock-frequency = <18432000>; | ||
| 34 | }; | ||
| 35 | |||
| 36 | main_xtal { | 27 | main_xtal { |
| 37 | clock-frequency = <18432000>; | 28 | clock-frequency = <18432000>; |
| 38 | }; | 29 | }; |
| @@ -94,14 +85,14 @@ | |||
| 94 | label = "PB_RST"; | 85 | label = "PB_RST"; |
| 95 | gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; | 86 | gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; |
| 96 | linux,code = <0x100>; | 87 | linux,code = <0x100>; |
| 97 | gpio-key,wakeup; | 88 | wakeup-source; |
| 98 | }; | 89 | }; |
| 99 | 90 | ||
| 100 | user { | 91 | user { |
| 101 | label = "PB_USER"; | 92 | label = "PB_USER"; |
| 102 | gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; | 93 | gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; |
| 103 | linux,code = <0x101>; | 94 | linux,code = <0x101>; |
| 104 | gpio-key,wakeup; | 95 | wakeup-source; |
| 105 | }; | 96 | }; |
| 106 | }; | 97 | }; |
| 107 | 98 | ||
diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts index f0b1563cb3f1..50a14568f094 100644 --- a/arch/arm/boot/dts/at91-kizbox2.dts +++ b/arch/arm/boot/dts/at91-kizbox2.dts | |||
| @@ -171,21 +171,21 @@ | |||
| 171 | label = "PB_PROG"; | 171 | label = "PB_PROG"; |
| 172 | gpios = <&pioE 27 GPIO_ACTIVE_LOW>; | 172 | gpios = <&pioE 27 GPIO_ACTIVE_LOW>; |
| 173 | linux,code = <0x102>; | 173 | linux,code = <0x102>; |
| 174 | gpio-key,wakeup; | 174 | wakeup-source; |
| 175 | }; | 175 | }; |
| 176 | 176 | ||
| 177 | reset { | 177 | reset { |
| 178 | label = "PB_RST"; | 178 | label = "PB_RST"; |
| 179 | gpios = <&pioE 29 GPIO_ACTIVE_LOW>; | 179 | gpios = <&pioE 29 GPIO_ACTIVE_LOW>; |
| 180 | linux,code = <0x100>; | 180 | linux,code = <0x100>; |
| 181 | gpio-key,wakeup; | 181 | wakeup-source; |
| 182 | }; | 182 | }; |
| 183 | 183 | ||
| 184 | user { | 184 | user { |
| 185 | label = "PB_USER"; | 185 | label = "PB_USER"; |
| 186 | gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; | 186 | gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; |
| 187 | linux,code = <0x101>; | 187 | linux,code = <0x101>; |
| 188 | gpio-key,wakeup; | 188 | wakeup-source; |
| 189 | }; | 189 | }; |
| 190 | }; | 190 | }; |
| 191 | 191 | ||
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts index 9f72b4932634..9682d105d4d8 100644 --- a/arch/arm/boot/dts/at91-kizboxmini.dts +++ b/arch/arm/boot/dts/at91-kizboxmini.dts | |||
| @@ -98,14 +98,14 @@ | |||
| 98 | label = "PB_PROG"; | 98 | label = "PB_PROG"; |
| 99 | gpios = <&pioC 17 GPIO_ACTIVE_LOW>; | 99 | gpios = <&pioC 17 GPIO_ACTIVE_LOW>; |
| 100 | linux,code = <0x102>; | 100 | linux,code = <0x102>; |
| 101 | gpio-key,wakeup; | 101 | wakeup-source; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | reset { | 104 | reset { |
| 105 | label = "PB_RST"; | 105 | label = "PB_RST"; |
| 106 | gpios = <&pioC 16 GPIO_ACTIVE_LOW>; | 106 | gpios = <&pioC 16 GPIO_ACTIVE_LOW>; |
| 107 | linux,code = <0x100>; | 107 | linux,code = <0x100>; |
| 108 | gpio-key,wakeup; | 108 | wakeup-source; |
| 109 | }; | 109 | }; |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts index a9aef53ab764..4f2eebf4a560 100644 --- a/arch/arm/boot/dts/at91-qil_a9260.dts +++ b/arch/arm/boot/dts/at91-qil_a9260.dts | |||
| @@ -183,7 +183,7 @@ | |||
| 183 | label = "user_pb"; | 183 | label = "user_pb"; |
| 184 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; | 184 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; |
| 185 | linux,code = <28>; | 185 | linux,code = <28>; |
| 186 | gpio-key,wakeup; | 186 | wakeup-source; |
| 187 | }; | 187 | }; |
| 188 | }; | 188 | }; |
| 189 | 189 | ||
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index e07c2b206beb..ad6de73ed5a5 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | /dts-v1/; | 45 | /dts-v1/; |
| 46 | #include "sama5d2.dtsi" | 46 | #include "sama5d2.dtsi" |
| 47 | #include "sama5d2-pinfunc.h" | 47 | #include "sama5d2-pinfunc.h" |
| 48 | #include <dt-bindings/mfd/atmel-flexcom.h> | ||
| 48 | 49 | ||
| 49 | / { | 50 | / { |
| 50 | model = "Atmel SAMA5D2 Xplained"; | 51 | model = "Atmel SAMA5D2 Xplained"; |
| @@ -59,15 +60,6 @@ | |||
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | clocks { | 62 | clocks { |
| 62 | #address-cells = <1>; | ||
| 63 | #size-cells = <1>; | ||
| 64 | ranges; | ||
| 65 | |||
| 66 | main_clock: clock@0 { | ||
| 67 | compatible = "atmel,osc", "fixed-clock"; | ||
| 68 | clock-frequency = <12000000>; | ||
| 69 | }; | ||
| 70 | |||
| 71 | slow_xtal { | 63 | slow_xtal { |
| 72 | clock-frequency = <32768>; | 64 | clock-frequency = <32768>; |
| 73 | }; | 65 | }; |
| @@ -91,6 +83,22 @@ | |||
| 91 | status = "okay"; | 83 | status = "okay"; |
| 92 | }; | 84 | }; |
| 93 | 85 | ||
| 86 | sdmmc0: sdio-host@a0000000 { | ||
| 87 | bus-width = <8>; | ||
| 88 | pinctrl-names = "default"; | ||
| 89 | pinctrl-0 = <&pinctrl_sdmmc0_default>; | ||
| 90 | non-removable; | ||
| 91 | mmc-ddr-1_8v; | ||
| 92 | status = "okay"; | ||
| 93 | }; | ||
| 94 | |||
| 95 | sdmmc1: sdio-host@b0000000 { | ||
| 96 | bus-width = <4>; | ||
| 97 | pinctrl-names = "default"; | ||
| 98 | pinctrl-0 = <&pinctrl_sdmmc1_default>; | ||
| 99 | status = "okay"; /* conflict with qspi0 */ | ||
| 100 | }; | ||
| 101 | |||
| 94 | apb { | 102 | apb { |
| 95 | spi0: spi@f8000000 { | 103 | spi0: spi@f8000000 { |
| 96 | pinctrl-names = "default"; | 104 | pinctrl-names = "default"; |
| @@ -181,12 +189,49 @@ | |||
| 181 | }; | 189 | }; |
| 182 | }; | 190 | }; |
| 183 | 191 | ||
| 192 | flx0: flexcom@f8034000 { | ||
| 193 | atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>; | ||
| 194 | status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */ | ||
| 195 | |||
| 196 | uart5: serial@200 { | ||
| 197 | compatible = "atmel,at91sam9260-usart"; | ||
| 198 | reg = <0x200 0x200>; | ||
| 199 | interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>; | ||
| 200 | clocks = <&flx0_clk>; | ||
| 201 | clock-names = "usart"; | ||
| 202 | pinctrl-names = "default"; | ||
| 203 | pinctrl-0 = <&pinctrl_flx0_default>; | ||
| 204 | atmel,fifo-size = <32>; | ||
| 205 | status = "okay"; | ||
| 206 | }; | ||
| 207 | }; | ||
| 208 | |||
| 184 | uart3: serial@fc008000 { | 209 | uart3: serial@fc008000 { |
| 185 | pinctrl-names = "default"; | 210 | pinctrl-names = "default"; |
| 186 | pinctrl-0 = <&pinctrl_uart3_default>; | 211 | pinctrl-0 = <&pinctrl_uart3_default>; |
| 187 | status = "okay"; | 212 | status = "okay"; |
| 188 | }; | 213 | }; |
| 189 | 214 | ||
| 215 | flx4: flexcom@fc018000 { | ||
| 216 | atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>; | ||
| 217 | status = "okay"; | ||
| 218 | |||
| 219 | i2c2: i2c@600 { | ||
| 220 | compatible = "atmel,sama5d2-i2c"; | ||
| 221 | reg = <0x600 0x200>; | ||
| 222 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; | ||
| 223 | dmas = <0>, <0>; | ||
| 224 | dma-names = "tx", "rx"; | ||
| 225 | #address-cells = <1>; | ||
| 226 | #size-cells = <0>; | ||
| 227 | clocks = <&flx4_clk>; | ||
| 228 | pinctrl-names = "default"; | ||
| 229 | pinctrl-0 = <&pinctrl_flx4_default>; | ||
| 230 | atmel,fifo-size = <16>; | ||
| 231 | status = "okay"; | ||
| 232 | }; | ||
| 233 | }; | ||
| 234 | |||
| 190 | i2c1: i2c@fc028000 { | 235 | i2c1: i2c@fc028000 { |
| 191 | dmas = <0>, <0>; | 236 | dmas = <0>, <0>; |
| 192 | pinctrl-names = "default"; | 237 | pinctrl-names = "default"; |
| @@ -201,6 +246,18 @@ | |||
| 201 | }; | 246 | }; |
| 202 | 247 | ||
| 203 | pinctrl@fc038000 { | 248 | pinctrl@fc038000 { |
| 249 | pinctrl_flx0_default: flx0_default { | ||
| 250 | pinmux = <PIN_PB28__FLEXCOM0_IO0>, | ||
| 251 | <PIN_PB29__FLEXCOM0_IO1>; | ||
| 252 | bias-disable; | ||
| 253 | }; | ||
| 254 | |||
| 255 | pinctrl_flx4_default: flx4_default { | ||
| 256 | pinmux = <PIN_PD12__FLEXCOM4_IO0>, | ||
| 257 | <PIN_PD13__FLEXCOM4_IO1>; | ||
| 258 | bias-disable; | ||
| 259 | }; | ||
| 260 | |||
| 204 | pinctrl_i2c0_default: i2c0_default { | 261 | pinctrl_i2c0_default: i2c0_default { |
| 205 | pinmux = <PIN_PD21__TWD0>, | 262 | pinmux = <PIN_PD21__TWD0>, |
| 206 | <PIN_PD22__TWCK0>; | 263 | <PIN_PD22__TWCK0>; |
| @@ -227,6 +284,46 @@ | |||
| 227 | bias-disable; | 284 | bias-disable; |
| 228 | }; | 285 | }; |
| 229 | 286 | ||
| 287 | pinctrl_sdmmc0_default: sdmmc0_default { | ||
| 288 | cmd_data { | ||
| 289 | pinmux = <PIN_PA1__SDMMC0_CMD>, | ||
| 290 | <PIN_PA2__SDMMC0_DAT0>, | ||
| 291 | <PIN_PA3__SDMMC0_DAT1>, | ||
| 292 | <PIN_PA4__SDMMC0_DAT2>, | ||
| 293 | <PIN_PA5__SDMMC0_DAT3>, | ||
| 294 | <PIN_PA6__SDMMC0_DAT4>, | ||
| 295 | <PIN_PA7__SDMMC0_DAT5>, | ||
| 296 | <PIN_PA8__SDMMC0_DAT6>, | ||
| 297 | <PIN_PA9__SDMMC0_DAT7>; | ||
| 298 | bias-pull-up; | ||
| 299 | }; | ||
| 300 | |||
| 301 | ck_cd_rstn_vddsel { | ||
| 302 | pinmux = <PIN_PA0__SDMMC0_CK>, | ||
| 303 | <PIN_PA10__SDMMC0_RSTN>, | ||
| 304 | <PIN_PA11__SDMMC0_VDDSEL>, | ||
| 305 | <PIN_PA13__SDMMC0_CD>; | ||
| 306 | bias-disable; | ||
| 307 | }; | ||
| 308 | }; | ||
| 309 | |||
| 310 | pinctrl_sdmmc1_default: sdmmc1_default { | ||
| 311 | cmd_data { | ||
| 312 | pinmux = <PIN_PA28__SDMMC1_CMD>, | ||
| 313 | <PIN_PA18__SDMMC1_DAT0>, | ||
| 314 | <PIN_PA19__SDMMC1_DAT1>, | ||
| 315 | <PIN_PA20__SDMMC1_DAT2>, | ||
| 316 | <PIN_PA21__SDMMC1_DAT3>; | ||
| 317 | bias-pull-up; | ||
| 318 | }; | ||
| 319 | |||
| 320 | conf-ck_cd { | ||
| 321 | pinmux = <PIN_PA22__SDMMC1_CK>, | ||
| 322 | <PIN_PA30__SDMMC1_CD>; | ||
| 323 | bias-disable; | ||
| 324 | }; | ||
| 325 | }; | ||
| 326 | |||
| 230 | pinctrl_spi0_default: spi0_default { | 327 | pinctrl_spi0_default: spi0_default { |
| 231 | pinmux = <PIN_PA14__SPI0_SPCK>, | 328 | pinmux = <PIN_PA14__SPI0_SPCK>, |
| 232 | <PIN_PA15__SPI0_MOSI>, | 329 | <PIN_PA15__SPI0_MOSI>, |
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 8488ac53d22d..ff888d21c786 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts | |||
| @@ -315,7 +315,7 @@ | |||
| 315 | label = "PB_USER"; | 315 | label = "PB_USER"; |
| 316 | gpios = <&pioE 29 GPIO_ACTIVE_LOW>; | 316 | gpios = <&pioE 29 GPIO_ACTIVE_LOW>; |
| 317 | linux,code = <0x104>; | 317 | linux,code = <0x104>; |
| 318 | gpio-key,wakeup; | 318 | wakeup-source; |
| 319 | }; | 319 | }; |
| 320 | }; | 320 | }; |
| 321 | 321 | ||
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 45371a1b61b3..131614f28e75 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts | |||
| @@ -50,7 +50,6 @@ | |||
| 50 | compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; | 50 | compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; |
| 51 | 51 | ||
| 52 | chosen { | 52 | chosen { |
| 53 | bootargs = "ignore_loglevel earlyprintk"; | ||
| 54 | stdout-path = "serial0:115200n8"; | 53 | stdout-path = "serial0:115200n8"; |
| 55 | }; | 54 | }; |
| 56 | 55 | ||
| @@ -59,15 +58,6 @@ | |||
| 59 | }; | 58 | }; |
| 60 | 59 | ||
| 61 | clocks { | 60 | clocks { |
| 62 | #address-cells = <1>; | ||
| 63 | #size-cells = <1>; | ||
| 64 | ranges; | ||
| 65 | |||
| 66 | main_clock: clock@0 { | ||
| 67 | compatible = "atmel,osc", "fixed-clock"; | ||
| 68 | clock-frequency = <12000000>; | ||
| 69 | }; | ||
| 70 | |||
| 71 | slow_xtal { | 61 | slow_xtal { |
| 72 | clock-frequency = <32768>; | 62 | clock-frequency = <32768>; |
| 73 | }; | 63 | }; |
| @@ -235,7 +225,7 @@ | |||
| 235 | label = "pb_user1"; | 225 | label = "pb_user1"; |
| 236 | gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; | 226 | gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; |
| 237 | linux,code = <0x100>; | 227 | linux,code = <0x100>; |
| 238 | gpio-key,wakeup; | 228 | wakeup-source; |
| 239 | }; | 229 | }; |
| 240 | }; | 230 | }; |
| 241 | 231 | ||
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts index 6d272c0125e3..2d4a33100af6 100644 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts | |||
| @@ -50,7 +50,6 @@ | |||
| 50 | compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; | 50 | compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; |
| 51 | 51 | ||
| 52 | chosen { | 52 | chosen { |
| 53 | bootargs = "ignore_loglevel earlyprintk"; | ||
| 54 | stdout-path = "serial0:115200n8"; | 53 | stdout-path = "serial0:115200n8"; |
| 55 | }; | 54 | }; |
| 56 | 55 | ||
| @@ -59,15 +58,6 @@ | |||
| 59 | }; | 58 | }; |
| 60 | 59 | ||
| 61 | clocks { | 60 | clocks { |
| 62 | #address-cells = <1>; | ||
| 63 | #size-cells = <1>; | ||
| 64 | ranges; | ||
| 65 | |||
| 66 | main_clock: clock@0 { | ||
| 67 | compatible = "atmel,osc", "fixed-clock"; | ||
| 68 | clock-frequency = <12000000>; | ||
| 69 | }; | ||
| 70 | |||
| 71 | slow_xtal { | 61 | slow_xtal { |
| 72 | clock-frequency = <32768>; | 62 | clock-frequency = <32768>; |
| 73 | }; | 63 | }; |
| @@ -304,7 +294,7 @@ | |||
| 304 | label = "pb_user1"; | 294 | label = "pb_user1"; |
| 305 | gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; | 295 | gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; |
| 306 | linux,code = <0x100>; | 296 | linux,code = <0x100>; |
| 307 | gpio-key,wakeup; | 297 | wakeup-source; |
| 308 | }; | 298 | }; |
| 309 | }; | 299 | }; |
| 310 | 300 | ||
diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts index 8dab4b75ca97..f90e1c2d3caa 100644 --- a/arch/arm/boot/dts/at91rm9200ek.dts +++ b/arch/arm/boot/dts/at91rm9200ek.dts | |||
| @@ -21,15 +21,6 @@ | |||
| 21 | }; | 21 | }; |
| 22 | 22 | ||
| 23 | clocks { | 23 | clocks { |
| 24 | #address-cells = <1>; | ||
| 25 | #size-cells = <1>; | ||
| 26 | ranges; | ||
| 27 | |||
| 28 | main_clock: clock@0 { | ||
| 29 | compatible = "atmel,osc", "fixed-clock"; | ||
| 30 | clock-frequency = <18432000>; | ||
| 31 | }; | ||
| 32 | |||
| 33 | slow_xtal { | 24 | slow_xtal { |
| 34 | clock-frequency = <32768>; | 25 | clock-frequency = <32768>; |
| 35 | }; | 26 | }; |
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts index 2e92ac020f23..55bd51f07fa6 100644 --- a/arch/arm/boot/dts/at91sam9261ek.dts +++ b/arch/arm/boot/dts/at91sam9261ek.dts | |||
| @@ -22,15 +22,6 @@ | |||
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | clocks { | 24 | clocks { |
| 25 | #address-cells = <1>; | ||
| 26 | #size-cells = <1>; | ||
| 27 | ranges; | ||
| 28 | |||
| 29 | main_clock: clock@0 { | ||
| 30 | compatible = "atmel,osc", "fixed-clock"; | ||
| 31 | clock-frequency = <18432000>; | ||
| 32 | }; | ||
| 33 | |||
| 34 | slow_xtal { | 25 | slow_xtal { |
| 35 | clock-frequency = <32768>; | 26 | clock-frequency = <32768>; |
| 36 | }; | 27 | }; |
| @@ -149,7 +140,7 @@ | |||
| 149 | ti,debounce-tol = /bits/ 16 <65535>; | 140 | ti,debounce-tol = /bits/ 16 <65535>; |
| 150 | ti,debounce-max = /bits/ 16 <1>; | 141 | ti,debounce-max = /bits/ 16 <1>; |
| 151 | 142 | ||
| 152 | linux,wakeup; | 143 | wakeup-source; |
| 153 | }; | 144 | }; |
| 154 | }; | 145 | }; |
| 155 | 146 | ||
| @@ -193,28 +184,28 @@ | |||
| 193 | label = "button_0"; | 184 | label = "button_0"; |
| 194 | gpios = <&pioA 27 GPIO_ACTIVE_LOW>; | 185 | gpios = <&pioA 27 GPIO_ACTIVE_LOW>; |
| 195 | linux,code = <256>; | 186 | linux,code = <256>; |
| 196 | gpio-key,wakeup; | 187 | wakeup-source; |
| 197 | }; | 188 | }; |
| 198 | 189 | ||
| 199 | button_1 { | 190 | button_1 { |
| 200 | label = "button_1"; | 191 | label = "button_1"; |
| 201 | gpios = <&pioA 26 GPIO_ACTIVE_LOW>; | 192 | gpios = <&pioA 26 GPIO_ACTIVE_LOW>; |
| 202 | linux,code = <257>; | 193 | linux,code = <257>; |
| 203 | gpio-key,wakeup; | 194 | wakeup-source; |
| 204 | }; | 195 | }; |
| 205 | 196 | ||
| 206 | button_2 { | 197 | button_2 { |
| 207 | label = "button_2"; | 198 | label = "button_2"; |
| 208 | gpios = <&pioA 25 GPIO_ACTIVE_LOW>; | 199 | gpios = <&pioA 25 GPIO_ACTIVE_LOW>; |
| 209 | linux,code = <258>; | 200 | linux,code = <258>; |
| 210 | gpio-key,wakeup; | 201 | wakeup-source; |
| 211 | }; | 202 | }; |
| 212 | 203 | ||
| 213 | button_3 { | 204 | button_3 { |
| 214 | label = "button_3"; | 205 | label = "button_3"; |
| 215 | gpios = <&pioA 24 GPIO_ACTIVE_LOW>; | 206 | gpios = <&pioA 24 GPIO_ACTIVE_LOW>; |
| 216 | linux,code = <259>; | 207 | linux,code = <259>; |
| 217 | gpio-key,wakeup; | 208 | wakeup-source; |
| 218 | }; | 209 | }; |
| 219 | }; | 210 | }; |
| 220 | }; | 211 | }; |
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts index 23381276ffb8..59df9d73d276 100644 --- a/arch/arm/boot/dts/at91sam9263ek.dts +++ b/arch/arm/boot/dts/at91sam9263ek.dts | |||
| @@ -22,15 +22,6 @@ | |||
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | clocks { | 24 | clocks { |
| 25 | #address-cells = <1>; | ||
| 26 | #size-cells = <1>; | ||
| 27 | ranges; | ||
| 28 | |||
| 29 | main_clock: clock@0 { | ||
| 30 | compatible = "atmel,osc", "fixed-clock"; | ||
| 31 | clock-frequency = <16367660>; | ||
| 32 | }; | ||
| 33 | |||
| 34 | slow_xtal { | 25 | slow_xtal { |
| 35 | clock-frequency = <32768>; | 26 | clock-frequency = <32768>; |
| 36 | }; | 27 | }; |
| @@ -213,14 +204,14 @@ | |||
| 213 | label = "left_click"; | 204 | label = "left_click"; |
| 214 | gpios = <&pioC 5 GPIO_ACTIVE_LOW>; | 205 | gpios = <&pioC 5 GPIO_ACTIVE_LOW>; |
| 215 | linux,code = <272>; | 206 | linux,code = <272>; |
| 216 | gpio-key,wakeup; | 207 | wakeup-source; |
| 217 | }; | 208 | }; |
| 218 | 209 | ||
| 219 | right_click { | 210 | right_click { |
| 220 | label = "right_click"; | 211 | label = "right_click"; |
| 221 | gpios = <&pioC 4 GPIO_ACTIVE_LOW>; | 212 | gpios = <&pioC 4 GPIO_ACTIVE_LOW>; |
| 222 | linux,code = <273>; | 213 | linux,code = <273>; |
| 223 | gpio-key,wakeup; | 214 | wakeup-source; |
| 224 | }; | 215 | }; |
| 225 | }; | 216 | }; |
| 226 | 217 | ||
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 57548a2c5a1e..e9cc99b6353a 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi | |||
| @@ -19,15 +19,6 @@ | |||
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | clocks { | 21 | clocks { |
| 22 | #address-cells = <1>; | ||
| 23 | #size-cells = <1>; | ||
| 24 | ranges; | ||
| 25 | |||
| 26 | main_clock: clock@0 { | ||
| 27 | compatible = "atmel,osc", "fixed-clock"; | ||
| 28 | clock-frequency = <18432000>; | ||
| 29 | }; | ||
| 30 | |||
| 31 | slow_xtal { | 22 | slow_xtal { |
| 32 | clock-frequency = <32768>; | 23 | clock-frequency = <32768>; |
| 33 | }; | 24 | }; |
| @@ -206,14 +197,14 @@ | |||
| 206 | label = "Button 3"; | 197 | label = "Button 3"; |
| 207 | gpios = <&pioA 30 GPIO_ACTIVE_LOW>; | 198 | gpios = <&pioA 30 GPIO_ACTIVE_LOW>; |
| 208 | linux,code = <0x103>; | 199 | linux,code = <0x103>; |
| 209 | gpio-key,wakeup; | 200 | wakeup-source; |
| 210 | }; | 201 | }; |
| 211 | 202 | ||
| 212 | btn4 { | 203 | btn4 { |
| 213 | label = "Button 4"; | 204 | label = "Button 4"; |
| 214 | gpios = <&pioA 31 GPIO_ACTIVE_LOW>; | 205 | gpios = <&pioA 31 GPIO_ACTIVE_LOW>; |
| 215 | linux,code = <0x104>; | 206 | linux,code = <0x104>; |
| 216 | gpio-key,wakeup; | 207 | wakeup-source; |
| 217 | }; | 208 | }; |
| 218 | }; | 209 | }; |
| 219 | 210 | ||
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 9d16ef8453c5..2400c99134f7 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts | |||
| @@ -24,15 +24,6 @@ | |||
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | clocks { | 26 | clocks { |
| 27 | #address-cells = <1>; | ||
| 28 | #size-cells = <1>; | ||
| 29 | ranges; | ||
| 30 | |||
| 31 | main_clock: clock@0 { | ||
| 32 | compatible = "atmel,osc", "fixed-clock"; | ||
| 33 | clock-frequency = <12000000>; | ||
| 34 | }; | ||
| 35 | |||
| 36 | slow_xtal { | 27 | slow_xtal { |
| 37 | clock-frequency = <32768>; | 28 | clock-frequency = <32768>; |
| 38 | }; | 29 | }; |
| @@ -323,14 +314,14 @@ | |||
| 323 | label = "left_click"; | 314 | label = "left_click"; |
| 324 | gpios = <&pioB 6 GPIO_ACTIVE_LOW>; | 315 | gpios = <&pioB 6 GPIO_ACTIVE_LOW>; |
| 325 | linux,code = <272>; | 316 | linux,code = <272>; |
| 326 | gpio-key,wakeup; | 317 | wakeup-source; |
| 327 | }; | 318 | }; |
| 328 | 319 | ||
| 329 | right_click { | 320 | right_click { |
| 330 | label = "right_click"; | 321 | label = "right_click"; |
| 331 | gpios = <&pioB 7 GPIO_ACTIVE_LOW>; | 322 | gpios = <&pioB 7 GPIO_ACTIVE_LOW>; |
| 332 | linux,code = <273>; | 323 | linux,code = <273>; |
| 333 | gpio-key,wakeup; | 324 | wakeup-source; |
| 334 | }; | 325 | }; |
| 335 | 326 | ||
| 336 | left { | 327 | left { |
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index acf3451a332d..ca4ddf86817a 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts | |||
| @@ -23,15 +23,6 @@ | |||
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | clocks { | 25 | clocks { |
| 26 | #address-cells = <1>; | ||
| 27 | #size-cells = <1>; | ||
| 28 | ranges; | ||
| 29 | |||
| 30 | main_clock: clock@0 { | ||
| 31 | compatible = "atmel,osc", "fixed-clock"; | ||
| 32 | clock-frequency = <16000000>; | ||
| 33 | }; | ||
| 34 | |||
| 35 | slow_xtal { | 26 | slow_xtal { |
| 36 | clock-frequency = <32768>; | 27 | clock-frequency = <32768>; |
| 37 | }; | 28 | }; |
| @@ -219,7 +210,7 @@ | |||
| 219 | label = "Enter"; | 210 | label = "Enter"; |
| 220 | gpios = <&pioB 3 GPIO_ACTIVE_LOW>; | 211 | gpios = <&pioB 3 GPIO_ACTIVE_LOW>; |
| 221 | linux,code = <28>; | 212 | linux,code = <28>; |
| 222 | gpio-key,wakeup; | 213 | wakeup-source; |
| 223 | }; | 214 | }; |
| 224 | }; | 215 | }; |
| 225 | 216 | ||
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts index 558c9f220bed..f10566f759cd 100644 --- a/arch/arm/boot/dts/at91sam9rlek.dts +++ b/arch/arm/boot/dts/at91sam9rlek.dts | |||
| @@ -22,15 +22,6 @@ | |||
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | clocks { | 24 | clocks { |
| 25 | #address-cells = <1>; | ||
| 26 | #size-cells = <1>; | ||
| 27 | ranges; | ||
| 28 | |||
| 29 | main_clock: clock { | ||
| 30 | compatible = "atmel,osc", "fixed-clock"; | ||
| 31 | clock-frequency = <12000000>; | ||
| 32 | }; | ||
| 33 | |||
| 34 | slow_xtal { | 25 | slow_xtal { |
| 35 | clock-frequency = <32768>; | 26 | clock-frequency = <32768>; |
| 36 | }; | 27 | }; |
| @@ -225,14 +216,14 @@ | |||
| 225 | label = "right_click"; | 216 | label = "right_click"; |
| 226 | gpios = <&pioB 0 GPIO_ACTIVE_LOW>; | 217 | gpios = <&pioB 0 GPIO_ACTIVE_LOW>; |
| 227 | linux,code = <273>; | 218 | linux,code = <273>; |
| 228 | gpio-key,wakeup; | 219 | wakeup-source; |
| 229 | }; | 220 | }; |
| 230 | 221 | ||
| 231 | left_click { | 222 | left_click { |
| 232 | label = "left_click"; | 223 | label = "left_click"; |
| 233 | gpios = <&pioB 1 GPIO_ACTIVE_LOW>; | 224 | gpios = <&pioB 1 GPIO_ACTIVE_LOW>; |
| 234 | linux,code = <272>; | 225 | linux,code = <272>; |
| 235 | gpio-key,wakeup; | 226 | wakeup-source; |
| 236 | }; | 227 | }; |
| 237 | }; | 228 | }; |
| 238 | 229 | ||
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi index 26112ebd15fc..b098ad8cd93a 100644 --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi | |||
| @@ -13,17 +13,6 @@ | |||
| 13 | }; | 13 | }; |
| 14 | 14 | ||
| 15 | clocks { | 15 | clocks { |
| 16 | #address-cells = <1>; | ||
| 17 | #size-cells = <1>; | ||
| 18 | ranges; | ||
| 19 | |||
| 20 | main_clock: clock@0 { | ||
| 21 | compatible = "atmel,osc", "fixed-clock"; | ||
| 22 | clock-frequency = <12000000>; | ||
| 23 | }; | ||
| 24 | }; | ||
| 25 | |||
| 26 | clocks { | ||
| 27 | slow_xtal { | 16 | slow_xtal { |
| 28 | clock-frequency = <32768>; | 17 | clock-frequency = <32768>; |
| 29 | }; | 18 | }; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index bc672fb91466..fe99231cbde5 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
| @@ -1459,8 +1459,8 @@ | |||
| 1459 | interrupt-names = "tx", "rx"; | 1459 | interrupt-names = "tx", "rx"; |
| 1460 | dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; | 1460 | dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; |
| 1461 | dma-names = "tx", "rx"; | 1461 | dma-names = "tx", "rx"; |
| 1462 | clocks = <&mcasp3_ahclkx_mux>; | 1462 | clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>; |
| 1463 | clock-names = "fck"; | 1463 | clock-names = "fck", "ahclkx"; |
| 1464 | status = "disabled"; | 1464 | status = "disabled"; |
| 1465 | }; | 1465 | }; |
| 1466 | 1466 | ||
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index feb9d34b239c..f818ea483aeb 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi | |||
| @@ -486,7 +486,10 @@ | |||
| 486 | compatible = "fsl,imx27-usb"; | 486 | compatible = "fsl,imx27-usb"; |
| 487 | reg = <0x10024000 0x200>; | 487 | reg = <0x10024000 0x200>; |
| 488 | interrupts = <56>; | 488 | interrupts = <56>; |
| 489 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 489 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 490 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 491 | <&clks IMX27_CLK_USB_DIV>; | ||
| 492 | clock-names = "ipg", "ahb", "per"; | ||
| 490 | fsl,usbmisc = <&usbmisc 0>; | 493 | fsl,usbmisc = <&usbmisc 0>; |
| 491 | status = "disabled"; | 494 | status = "disabled"; |
| 492 | }; | 495 | }; |
| @@ -495,7 +498,10 @@ | |||
| 495 | compatible = "fsl,imx27-usb"; | 498 | compatible = "fsl,imx27-usb"; |
| 496 | reg = <0x10024200 0x200>; | 499 | reg = <0x10024200 0x200>; |
| 497 | interrupts = <54>; | 500 | interrupts = <54>; |
| 498 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 501 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 502 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 503 | <&clks IMX27_CLK_USB_DIV>; | ||
| 504 | clock-names = "ipg", "ahb", "per"; | ||
| 499 | fsl,usbmisc = <&usbmisc 1>; | 505 | fsl,usbmisc = <&usbmisc 1>; |
| 500 | dr_mode = "host"; | 506 | dr_mode = "host"; |
| 501 | status = "disabled"; | 507 | status = "disabled"; |
| @@ -505,7 +511,10 @@ | |||
| 505 | compatible = "fsl,imx27-usb"; | 511 | compatible = "fsl,imx27-usb"; |
| 506 | reg = <0x10024400 0x200>; | 512 | reg = <0x10024400 0x200>; |
| 507 | interrupts = <55>; | 513 | interrupts = <55>; |
| 508 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 514 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 515 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 516 | <&clks IMX27_CLK_USB_DIV>; | ||
| 517 | clock-names = "ipg", "ahb", "per"; | ||
| 509 | fsl,usbmisc = <&usbmisc 2>; | 518 | fsl,usbmisc = <&usbmisc 2>; |
| 510 | dr_mode = "host"; | 519 | dr_mode = "host"; |
| 511 | status = "disabled"; | 520 | status = "disabled"; |
| @@ -515,7 +524,6 @@ | |||
| 515 | #index-cells = <1>; | 524 | #index-cells = <1>; |
| 516 | compatible = "fsl,imx27-usbmisc"; | 525 | compatible = "fsl,imx27-usbmisc"; |
| 517 | reg = <0x10024600 0x200>; | 526 | reg = <0x10024600 0x200>; |
| 518 | clocks = <&clks IMX27_CLK_USB_AHB_GATE>; | ||
| 519 | }; | 527 | }; |
| 520 | 528 | ||
| 521 | sahara2: sahara@10025000 { | 529 | sahara2: sahara@10025000 { |
diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi index 01aef230773d..5acbd0dcc2ab 100644 --- a/arch/arm/boot/dts/k2l-netcp.dtsi +++ b/arch/arm/boot/dts/k2l-netcp.dtsi | |||
| @@ -137,7 +137,7 @@ netcp: netcp@26000000 { | |||
| 137 | /* NetCP address range */ | 137 | /* NetCP address range */ |
| 138 | ranges = <0 0x26000000 0x1000000>; | 138 | ranges = <0 0x26000000 0x1000000>; |
| 139 | 139 | ||
| 140 | clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; | 140 | clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>; |
| 141 | dma-coherent; | 141 | dma-coherent; |
| 142 | 142 | ||
| 143 | ti,navigator-dmas = <&dma_gbe 0>, | 143 | ti,navigator-dmas = <&dma_gbe 0>, |
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi index c56ab6bbfe3c..0e46560551f4 100644 --- a/arch/arm/boot/dts/kirkwood-ts219.dtsi +++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | }; | 40 | }; |
| 41 | poweroff@12100 { | 41 | poweroff@12100 { |
| 42 | compatible = "qnap,power-off"; | 42 | compatible = "qnap,power-off"; |
| 43 | reg = <0x12000 0x100>; | 43 | reg = <0x12100 0x100>; |
| 44 | clocks = <&gate_clk 7>; | 44 | clocks = <&gate_clk 7>; |
| 45 | }; | 45 | }; |
| 46 | spi@10600 { | 46 | spi@10600 { |
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts index 8fd8ef2c72da..85f0373df498 100644 --- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts +++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts | |||
| @@ -86,6 +86,10 @@ | |||
| 86 | }; | 86 | }; |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | &emmc { | ||
| 90 | /delete-property/mmc-hs200-1_8v; | ||
| 91 | }; | ||
| 92 | |||
| 89 | &gpio_keys { | 93 | &gpio_keys { |
| 90 | pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; | 94 | pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; |
| 91 | 95 | ||
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 6a79c9c526b8..04ea209f1737 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
| @@ -452,8 +452,10 @@ | |||
| 452 | clock-names = "tsadc", "apb_pclk"; | 452 | clock-names = "tsadc", "apb_pclk"; |
| 453 | resets = <&cru SRST_TSADC>; | 453 | resets = <&cru SRST_TSADC>; |
| 454 | reset-names = "tsadc-apb"; | 454 | reset-names = "tsadc-apb"; |
| 455 | pinctrl-names = "default"; | 455 | pinctrl-names = "init", "default", "sleep"; |
| 456 | pinctrl-0 = <&otp_out>; | 456 | pinctrl-0 = <&otp_gpio>; |
| 457 | pinctrl-1 = <&otp_out>; | ||
| 458 | pinctrl-2 = <&otp_gpio>; | ||
| 457 | #thermal-sensor-cells = <1>; | 459 | #thermal-sensor-cells = <1>; |
| 458 | rockchip,hw-tshut-temp = <95000>; | 460 | rockchip,hw-tshut-temp = <95000>; |
| 459 | status = "disabled"; | 461 | status = "disabled"; |
| @@ -1395,6 +1397,10 @@ | |||
| 1395 | }; | 1397 | }; |
| 1396 | 1398 | ||
| 1397 | tsadc { | 1399 | tsadc { |
| 1400 | otp_gpio: otp-gpio { | ||
| 1401 | rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>; | ||
| 1402 | }; | ||
| 1403 | |||
| 1398 | otp_out: otp-out { | 1404 | otp_out: otp-out { |
| 1399 | rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; | 1405 | rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; |
| 1400 | }; | 1406 | }; |
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts index d9a9aca1ccfd..e812f5c1bf70 100644 --- a/arch/arm/boot/dts/sama5d35ek.dts +++ b/arch/arm/boot/dts/sama5d35ek.dts | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | label = "pb_user1"; | 49 | label = "pb_user1"; |
| 50 | gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; | 50 | gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; |
| 51 | linux,code = <0x100>; | 51 | linux,code = <0x100>; |
| 52 | gpio-key,wakeup; | 52 | wakeup-source; |
| 53 | }; | 53 | }; |
| 54 | }; | 54 | }; |
| 55 | }; | 55 | }; |
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 15bbaf690047..2193637b9cd2 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi | |||
| @@ -1300,7 +1300,7 @@ | |||
| 1300 | }; | 1300 | }; |
| 1301 | 1301 | ||
| 1302 | watchdog@fc068640 { | 1302 | watchdog@fc068640 { |
| 1303 | compatible = "atmel,at91sam9260-wdt"; | 1303 | compatible = "atmel,sama5d4-wdt"; |
| 1304 | reg = <0xfc068640 0x10>; | 1304 | reg = <0xfc068640 0x10>; |
| 1305 | clocks = <&clk32k>; | 1305 | clocks = <&clk32k>; |
| 1306 | status = "disabled"; | 1306 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi index 12edafefd44a..9beea8976584 100644 --- a/arch/arm/boot/dts/usb_a9260_common.dtsi +++ b/arch/arm/boot/dts/usb_a9260_common.dtsi | |||
| @@ -115,7 +115,7 @@ | |||
| 115 | label = "user_pb"; | 115 | label = "user_pb"; |
| 116 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; | 116 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; |
| 117 | linux,code = <28>; | 117 | linux,code = <28>; |
| 118 | gpio-key,wakeup; | 118 | wakeup-source; |
| 119 | }; | 119 | }; |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts index 68c0de36c339..8cc6edb29694 100644 --- a/arch/arm/boot/dts/usb_a9263.dts +++ b/arch/arm/boot/dts/usb_a9263.dts | |||
| @@ -143,7 +143,7 @@ | |||
| 143 | label = "user_pb"; | 143 | label = "user_pb"; |
| 144 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; | 144 | gpios = <&pioB 10 GPIO_ACTIVE_LOW>; |
| 145 | linux,code = <28>; | 145 | linux,code = <28>; |
| 146 | gpio-key,wakeup; | 146 | wakeup-source; |
| 147 | }; | 147 | }; |
| 148 | }; | 148 | }; |
| 149 | 149 | ||
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 6736bae43a5b..0d5acc2cdc8e 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi | |||
| @@ -158,7 +158,7 @@ | |||
| 158 | interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; | 158 | interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; |
| 159 | clocks = <&clks VF610_CLK_DSPI0>; | 159 | clocks = <&clks VF610_CLK_DSPI0>; |
| 160 | clock-names = "dspi"; | 160 | clock-names = "dspi"; |
| 161 | spi-num-chipselects = <5>; | 161 | spi-num-chipselects = <6>; |
| 162 | status = "disabled"; | 162 | status = "disabled"; |
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| @@ -170,7 +170,7 @@ | |||
| 170 | interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; | 170 | interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; |
| 171 | clocks = <&clks VF610_CLK_DSPI1>; | 171 | clocks = <&clks VF610_CLK_DSPI1>; |
| 172 | clock-names = "dspi"; | 172 | clock-names = "dspi"; |
| 173 | spi-num-chipselects = <5>; | 173 | spi-num-chipselects = <4>; |
| 174 | status = "disabled"; | 174 | status = "disabled"; |
| 175 | }; | 175 | }; |
| 176 | 176 | ||
| @@ -461,6 +461,8 @@ | |||
| 461 | clock-names = "adc"; | 461 | clock-names = "adc"; |
| 462 | #io-channel-cells = <1>; | 462 | #io-channel-cells = <1>; |
| 463 | status = "disabled"; | 463 | status = "disabled"; |
| 464 | fsl,adck-max-frequency = <30000000>, <40000000>, | ||
| 465 | <20000000>; | ||
| 464 | }; | 466 | }; |
| 465 | 467 | ||
| 466 | esdhc0: esdhc@400b1000 { | 468 | esdhc0: esdhc@400b1000 { |
| @@ -472,8 +474,6 @@ | |||
| 472 | <&clks VF610_CLK_ESDHC0>; | 474 | <&clks VF610_CLK_ESDHC0>; |
| 473 | clock-names = "ipg", "ahb", "per"; | 475 | clock-names = "ipg", "ahb", "per"; |
| 474 | status = "disabled"; | 476 | status = "disabled"; |
| 475 | fsl,adck-max-frequency = <30000000>, <40000000>, | ||
| 476 | <20000000>; | ||
| 477 | }; | 477 | }; |
| 478 | 478 | ||
| 479 | esdhc1: esdhc@400b2000 { | 479 | esdhc1: esdhc@400b2000 { |
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 1b1e5acd76e2..e4b1be66b3f5 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig | |||
| @@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y | |||
| 125 | # CONFIG_HWMON is not set | 125 | # CONFIG_HWMON is not set |
| 126 | CONFIG_WATCHDOG=y | 126 | CONFIG_WATCHDOG=y |
| 127 | CONFIG_AT91SAM9X_WATCHDOG=y | 127 | CONFIG_AT91SAM9X_WATCHDOG=y |
| 128 | CONFIG_SSB=m | ||
| 129 | CONFIG_MFD_ATMEL_HLCDC=y | 128 | CONFIG_MFD_ATMEL_HLCDC=y |
| 130 | CONFIG_REGULATOR=y | 129 | CONFIG_REGULATOR=y |
| 131 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 130 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index a0c57ac88b27..63f7e6ce649a 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig | |||
| @@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y | |||
| 129 | CONFIG_POWER_SUPPLY=y | 129 | CONFIG_POWER_SUPPLY=y |
| 130 | CONFIG_POWER_RESET=y | 130 | CONFIG_POWER_RESET=y |
| 131 | # CONFIG_HWMON is not set | 131 | # CONFIG_HWMON is not set |
| 132 | CONFIG_SSB=m | ||
| 133 | CONFIG_MFD_ATMEL_FLEXCOM=y | 132 | CONFIG_MFD_ATMEL_FLEXCOM=y |
| 134 | CONFIG_REGULATOR=y | 133 | CONFIG_REGULATOR=y |
| 135 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 134 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index be1d07d59ee9..1bd9510de1b9 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
| @@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool); | |||
| 40 | #define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) | 40 | #define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) |
| 41 | #endif | 41 | #endif |
| 42 | 42 | ||
| 43 | static inline int nr_legacy_irqs(void) | ||
| 44 | { | ||
| 45 | return NR_IRQS_LEGACY; | ||
| 46 | } | ||
| 47 | |||
| 43 | #endif | 48 | #endif |
| 44 | 49 | ||
| 45 | #endif | 50 | #endif |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 7a2a32a1d5a8..ede692ffa32e 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
| @@ -416,6 +416,7 @@ | |||
| 416 | #define __NR_execveat (__NR_SYSCALL_BASE+387) | 416 | #define __NR_execveat (__NR_SYSCALL_BASE+387) |
| 417 | #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) | 417 | #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) |
| 418 | #define __NR_membarrier (__NR_SYSCALL_BASE+389) | 418 | #define __NR_membarrier (__NR_SYSCALL_BASE+389) |
| 419 | #define __NR_mlock2 (__NR_SYSCALL_BASE+390) | ||
| 419 | 420 | ||
| 420 | /* | 421 | /* |
| 421 | * The following SWIs are ARM private. | 422 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 6551d28c27e6..066f7f9ba411 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
| @@ -17,11 +17,6 @@ | |||
| 17 | #include <asm/mach/pci.h> | 17 | #include <asm/mach/pci.h> |
| 18 | 18 | ||
| 19 | static int debug_pci; | 19 | static int debug_pci; |
| 20 | static resource_size_t (*align_resource)(struct pci_dev *dev, | ||
| 21 | const struct resource *res, | ||
| 22 | resource_size_t start, | ||
| 23 | resource_size_t size, | ||
| 24 | resource_size_t align) = NULL; | ||
| 25 | 20 | ||
| 26 | /* | 21 | /* |
| 27 | * We can't use pci_get_device() here since we are | 22 | * We can't use pci_get_device() here since we are |
| @@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, | |||
| 461 | sys->busnr = busnr; | 456 | sys->busnr = busnr; |
| 462 | sys->swizzle = hw->swizzle; | 457 | sys->swizzle = hw->swizzle; |
| 463 | sys->map_irq = hw->map_irq; | 458 | sys->map_irq = hw->map_irq; |
| 464 | align_resource = hw->align_resource; | ||
| 465 | INIT_LIST_HEAD(&sys->resources); | 459 | INIT_LIST_HEAD(&sys->resources); |
| 466 | 460 | ||
| 467 | if (hw->private_data) | 461 | if (hw->private_data) |
| @@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, | |||
| 470 | ret = hw->setup(nr, sys); | 464 | ret = hw->setup(nr, sys); |
| 471 | 465 | ||
| 472 | if (ret > 0) { | 466 | if (ret > 0) { |
| 467 | struct pci_host_bridge *host_bridge; | ||
| 468 | |||
| 473 | ret = pcibios_init_resources(nr, sys); | 469 | ret = pcibios_init_resources(nr, sys); |
| 474 | if (ret) { | 470 | if (ret) { |
| 475 | kfree(sys); | 471 | kfree(sys); |
| @@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, | |||
| 491 | busnr = sys->bus->busn_res.end + 1; | 487 | busnr = sys->bus->busn_res.end + 1; |
| 492 | 488 | ||
| 493 | list_add(&sys->node, head); | 489 | list_add(&sys->node, head); |
| 490 | |||
| 491 | host_bridge = pci_find_host_bridge(sys->bus); | ||
| 492 | host_bridge->align_resource = hw->align_resource; | ||
| 494 | } else { | 493 | } else { |
| 495 | kfree(sys); | 494 | kfree(sys); |
| 496 | if (ret < 0) | 495 | if (ret < 0) |
| @@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
| 578 | { | 577 | { |
| 579 | struct pci_dev *dev = data; | 578 | struct pci_dev *dev = data; |
| 580 | resource_size_t start = res->start; | 579 | resource_size_t start = res->start; |
| 580 | struct pci_host_bridge *host_bridge; | ||
| 581 | 581 | ||
| 582 | if (res->flags & IORESOURCE_IO && start & 0x300) | 582 | if (res->flags & IORESOURCE_IO && start & 0x300) |
| 583 | start = (start + 0x3ff) & ~0x3ff; | 583 | start = (start + 0x3ff) & ~0x3ff; |
| 584 | 584 | ||
| 585 | start = (start + align - 1) & ~(align - 1); | 585 | start = (start + align - 1) & ~(align - 1); |
| 586 | 586 | ||
| 587 | if (align_resource) | 587 | host_bridge = pci_find_host_bridge(dev->bus); |
| 588 | return align_resource(dev, res, start, size, align); | 588 | |
| 589 | if (host_bridge->align_resource) | ||
| 590 | return host_bridge->align_resource(dev, res, | ||
| 591 | start, size, align); | ||
| 589 | 592 | ||
| 590 | return start; | 593 | return start; |
| 591 | } | 594 | } |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index fde6c88d560c..ac368bb068d1 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -399,6 +399,7 @@ | |||
| 399 | CALL(sys_execveat) | 399 | CALL(sys_execveat) |
| 400 | CALL(sys_userfaultfd) | 400 | CALL(sys_userfaultfd) |
| 401 | CALL(sys_membarrier) | 401 | CALL(sys_membarrier) |
| 402 | CALL(sys_mlock2) | ||
| 402 | #ifndef syscalls_counted | 403 | #ifndef syscalls_counted |
| 403 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 404 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
| 404 | #define syscalls_counted | 405 | #define syscalls_counted |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index eab83b2435b8..e06fd299de08 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 564 | vcpu_sleep(vcpu); | 564 | vcpu_sleep(vcpu); |
| 565 | 565 | ||
| 566 | /* | 566 | /* |
| 567 | * Disarming the background timer must be done in a | ||
| 568 | * preemptible context, as this call may sleep. | ||
| 569 | */ | ||
| 570 | kvm_timer_flush_hwstate(vcpu); | ||
| 571 | |||
| 572 | /* | ||
| 573 | * Preparing the interrupts to be injected also | 567 | * Preparing the interrupts to be injected also |
| 574 | * involves poking the GIC, which must be done in a | 568 | * involves poking the GIC, which must be done in a |
| 575 | * non-preemptible context. | 569 | * non-preemptible context. |
| 576 | */ | 570 | */ |
| 577 | preempt_disable(); | 571 | preempt_disable(); |
| 572 | kvm_timer_flush_hwstate(vcpu); | ||
| 578 | kvm_vgic_flush_hwstate(vcpu); | 573 | kvm_vgic_flush_hwstate(vcpu); |
| 579 | 574 | ||
| 580 | local_irq_disable(); | 575 | local_irq_disable(); |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 6984342da13d..7dace909d5cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
| @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud) | |||
| 98 | __kvm_flush_dcache_pud(pud); | 98 | __kvm_flush_dcache_pud(pud); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static bool kvm_is_device_pfn(unsigned long pfn) | ||
| 102 | { | ||
| 103 | return !pfn_valid(pfn); | ||
| 104 | } | ||
| 105 | |||
| 101 | /** | 106 | /** |
| 102 | * stage2_dissolve_pmd() - clear and flush huge PMD entry | 107 | * stage2_dissolve_pmd() - clear and flush huge PMD entry |
| 103 | * @kvm: pointer to kvm structure. | 108 | * @kvm: pointer to kvm structure. |
| @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | |||
| 213 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 218 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
| 214 | 219 | ||
| 215 | /* No need to invalidate the cache for device mappings */ | 220 | /* No need to invalidate the cache for device mappings */ |
| 216 | if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | 221 | if (!kvm_is_device_pfn(__phys_to_pfn(addr))) |
| 217 | kvm_flush_dcache_pte(old_pte); | 222 | kvm_flush_dcache_pte(old_pte); |
| 218 | 223 | ||
| 219 | put_page(virt_to_page(pte)); | 224 | put_page(virt_to_page(pte)); |
| @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | |||
| 305 | 310 | ||
| 306 | pte = pte_offset_kernel(pmd, addr); | 311 | pte = pte_offset_kernel(pmd, addr); |
| 307 | do { | 312 | do { |
| 308 | if (!pte_none(*pte) && | 313 | if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) |
| 309 | (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | ||
| 310 | kvm_flush_dcache_pte(*pte); | 314 | kvm_flush_dcache_pte(*pte); |
| 311 | } while (pte++, addr += PAGE_SIZE, addr != end); | 315 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 312 | } | 316 | } |
| @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) | |||
| 1037 | return kvm_vcpu_dabt_iswrite(vcpu); | 1041 | return kvm_vcpu_dabt_iswrite(vcpu); |
| 1038 | } | 1042 | } |
| 1039 | 1043 | ||
| 1040 | static bool kvm_is_device_pfn(unsigned long pfn) | ||
| 1041 | { | ||
| 1042 | return !pfn_valid(pfn); | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | /** | 1044 | /** |
| 1046 | * stage2_wp_ptes - write protect PMD range | 1045 | * stage2_wp_ptes - write protect PMD range |
| 1047 | * @pmd: pointer to pmd entry | 1046 | * @pmd: pointer to pmd entry |
diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S index 72d622baaad3..df1d44bdc375 100644 --- a/arch/arm/mach-dove/include/mach/entry-macro.S +++ b/arch/arm/mach-dove/include/mach/entry-macro.S | |||
| @@ -18,13 +18,13 @@ | |||
| 18 | @ check low interrupts | 18 | @ check low interrupts |
| 19 | ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] | 19 | ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] |
| 20 | ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] | 20 | ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] |
| 21 | mov \irqnr, #31 | 21 | mov \irqnr, #32 |
| 22 | ands \irqstat, \irqstat, \tmp | 22 | ands \irqstat, \irqstat, \tmp |
| 23 | 23 | ||
| 24 | @ if no low interrupts set, check high interrupts | 24 | @ if no low interrupts set, check high interrupts |
| 25 | ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] | 25 | ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] |
| 26 | ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] | 26 | ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] |
| 27 | moveq \irqnr, #63 | 27 | moveq \irqnr, #64 |
| 28 | andeqs \irqstat, \irqstat, \tmp | 28 | andeqs \irqstat, \irqstat, \tmp |
| 29 | 29 | ||
| 30 | @ find first active interrupt source | 30 | @ find first active interrupt source |
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 8e7976a4c3e7..cfc696b972f3 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c | |||
| @@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = { | |||
| 177 | .irq_unmask = imx_gpc_irq_unmask, | 177 | .irq_unmask = imx_gpc_irq_unmask, |
| 178 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 178 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
| 179 | .irq_set_wake = imx_gpc_irq_set_wake, | 179 | .irq_set_wake = imx_gpc_irq_set_wake, |
| 180 | .irq_set_type = irq_chip_set_type_parent, | ||
| 180 | #ifdef CONFIG_SMP | 181 | #ifdef CONFIG_SMP |
| 181 | .irq_set_affinity = irq_chip_set_affinity_parent, | 182 | .irq_set_affinity = irq_chip_set_affinity_parent, |
| 182 | #endif | 183 | #endif |
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 5305ec7341ec..79e1f876d1c9 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
| @@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
| 143 | * Ensure that CPU power state is set to ON to avoid CPU | 143 | * Ensure that CPU power state is set to ON to avoid CPU |
| 144 | * powerdomain transition on wfi | 144 | * powerdomain transition on wfi |
| 145 | */ | 145 | */ |
| 146 | clkdm_wakeup(cpu1_clkdm); | 146 | clkdm_wakeup_nolock(cpu1_clkdm); |
| 147 | omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); | 147 | pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); |
| 148 | clkdm_allow_idle(cpu1_clkdm); | 148 | clkdm_allow_idle_nolock(cpu1_clkdm); |
| 149 | 149 | ||
| 150 | if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { | 150 | if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { |
| 151 | while (gic_dist_disabled()) { | 151 | while (gic_dist_disabled()) { |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index cc8a987149e2..48495ad82aba 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
| @@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh) | |||
| 890 | return ret; | 890 | return ret; |
| 891 | } | 891 | } |
| 892 | 892 | ||
| 893 | static void _enable_optional_clocks(struct omap_hwmod *oh) | ||
| 894 | { | ||
| 895 | struct omap_hwmod_opt_clk *oc; | ||
| 896 | int i; | ||
| 897 | |||
| 898 | pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); | ||
| 899 | |||
| 900 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
| 901 | if (oc->_clk) { | ||
| 902 | pr_debug("omap_hwmod: enable %s:%s\n", oc->role, | ||
| 903 | __clk_get_name(oc->_clk)); | ||
| 904 | clk_enable(oc->_clk); | ||
| 905 | } | ||
| 906 | } | ||
| 907 | |||
| 908 | static void _disable_optional_clocks(struct omap_hwmod *oh) | ||
| 909 | { | ||
| 910 | struct omap_hwmod_opt_clk *oc; | ||
| 911 | int i; | ||
| 912 | |||
| 913 | pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); | ||
| 914 | |||
| 915 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
| 916 | if (oc->_clk) { | ||
| 917 | pr_debug("omap_hwmod: disable %s:%s\n", oc->role, | ||
| 918 | __clk_get_name(oc->_clk)); | ||
| 919 | clk_disable(oc->_clk); | ||
| 920 | } | ||
| 921 | } | ||
| 922 | |||
| 893 | /** | 923 | /** |
| 894 | * _enable_clocks - enable hwmod main clock and interface clocks | 924 | * _enable_clocks - enable hwmod main clock and interface clocks |
| 895 | * @oh: struct omap_hwmod * | 925 | * @oh: struct omap_hwmod * |
| @@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh) | |||
| 917 | clk_enable(os->_clk); | 947 | clk_enable(os->_clk); |
| 918 | } | 948 | } |
| 919 | 949 | ||
| 950 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
| 951 | _enable_optional_clocks(oh); | ||
| 952 | |||
| 920 | /* The opt clocks are controlled by the device driver. */ | 953 | /* The opt clocks are controlled by the device driver. */ |
| 921 | 954 | ||
| 922 | return 0; | 955 | return 0; |
| @@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh) | |||
| 948 | clk_disable(os->_clk); | 981 | clk_disable(os->_clk); |
| 949 | } | 982 | } |
| 950 | 983 | ||
| 984 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
| 985 | _disable_optional_clocks(oh); | ||
| 986 | |||
| 951 | /* The opt clocks are controlled by the device driver. */ | 987 | /* The opt clocks are controlled by the device driver. */ |
| 952 | 988 | ||
| 953 | return 0; | 989 | return 0; |
| 954 | } | 990 | } |
| 955 | 991 | ||
| 956 | static void _enable_optional_clocks(struct omap_hwmod *oh) | ||
| 957 | { | ||
| 958 | struct omap_hwmod_opt_clk *oc; | ||
| 959 | int i; | ||
| 960 | |||
| 961 | pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); | ||
| 962 | |||
| 963 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
| 964 | if (oc->_clk) { | ||
| 965 | pr_debug("omap_hwmod: enable %s:%s\n", oc->role, | ||
| 966 | __clk_get_name(oc->_clk)); | ||
| 967 | clk_enable(oc->_clk); | ||
| 968 | } | ||
| 969 | } | ||
| 970 | |||
| 971 | static void _disable_optional_clocks(struct omap_hwmod *oh) | ||
| 972 | { | ||
| 973 | struct omap_hwmod_opt_clk *oc; | ||
| 974 | int i; | ||
| 975 | |||
| 976 | pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); | ||
| 977 | |||
| 978 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
| 979 | if (oc->_clk) { | ||
| 980 | pr_debug("omap_hwmod: disable %s:%s\n", oc->role, | ||
| 981 | __clk_get_name(oc->_clk)); | ||
| 982 | clk_disable(oc->_clk); | ||
| 983 | } | ||
| 984 | } | ||
| 985 | |||
| 986 | /** | 992 | /** |
| 987 | * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 | 993 | * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 |
| 988 | * @oh: struct omap_hwmod * | 994 | * @oh: struct omap_hwmod * |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index ca6df1a73475..76bce11c85a4 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
| @@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm { | |||
| 523 | * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up | 523 | * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up |
| 524 | * events by calling _reconfigure_io_chain() when a device is enabled | 524 | * events by calling _reconfigure_io_chain() when a device is enabled |
| 525 | * or idled. | 525 | * or idled. |
| 526 | * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to | ||
| 527 | * operate and they need to be handled at the same time as the main_clk. | ||
| 526 | */ | 528 | */ |
| 527 | #define HWMOD_SWSUP_SIDLE (1 << 0) | 529 | #define HWMOD_SWSUP_SIDLE (1 << 0) |
| 528 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) | 530 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) |
| @@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm { | |||
| 538 | #define HWMOD_FORCE_MSTANDBY (1 << 11) | 540 | #define HWMOD_FORCE_MSTANDBY (1 << 11) |
| 539 | #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) | 541 | #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) |
| 540 | #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) | 542 | #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) |
| 543 | #define HWMOD_OPT_CLKS_NEEDED (1 << 14) | ||
| 541 | 544 | ||
| 542 | /* | 545 | /* |
| 543 | * omap_hwmod._int_flags definitions | 546 | * omap_hwmod._int_flags definitions |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 51d1ecb384bd..ee4e04434a94 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
| @@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = { | |||
| 1298 | }; | 1298 | }; |
| 1299 | 1299 | ||
| 1300 | /* | 1300 | /* |
| 1301 | * 'mcasp' class | ||
| 1302 | * | ||
| 1303 | */ | ||
| 1304 | static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = { | ||
| 1305 | .sysc_offs = 0x0004, | ||
| 1306 | .sysc_flags = SYSC_HAS_SIDLEMODE, | ||
| 1307 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), | ||
| 1308 | .sysc_fields = &omap_hwmod_sysc_type3, | ||
| 1309 | }; | ||
| 1310 | |||
| 1311 | static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = { | ||
| 1312 | .name = "mcasp", | ||
| 1313 | .sysc = &dra7xx_mcasp_sysc, | ||
| 1314 | }; | ||
| 1315 | |||
| 1316 | /* mcasp3 */ | ||
| 1317 | static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = { | ||
| 1318 | { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" }, | ||
| 1319 | }; | ||
| 1320 | |||
| 1321 | static struct omap_hwmod dra7xx_mcasp3_hwmod = { | ||
| 1322 | .name = "mcasp3", | ||
| 1323 | .class = &dra7xx_mcasp_hwmod_class, | ||
| 1324 | .clkdm_name = "l4per2_clkdm", | ||
| 1325 | .main_clk = "mcasp3_aux_gfclk_mux", | ||
| 1326 | .flags = HWMOD_OPT_CLKS_NEEDED, | ||
| 1327 | .prcm = { | ||
| 1328 | .omap4 = { | ||
| 1329 | .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET, | ||
| 1330 | .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET, | ||
| 1331 | .modulemode = MODULEMODE_SWCTRL, | ||
| 1332 | }, | ||
| 1333 | }, | ||
| 1334 | .opt_clks = mcasp3_opt_clks, | ||
| 1335 | .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks), | ||
| 1336 | }; | ||
| 1337 | |||
| 1338 | /* | ||
| 1301 | * 'mmc' class | 1339 | * 'mmc' class |
| 1302 | * | 1340 | * |
| 1303 | */ | 1341 | */ |
| @@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = { | |||
| 2566 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2604 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
| 2567 | }; | 2605 | }; |
| 2568 | 2606 | ||
| 2607 | /* l4_per2 -> mcasp3 */ | ||
| 2608 | static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = { | ||
| 2609 | .master = &dra7xx_l4_per2_hwmod, | ||
| 2610 | .slave = &dra7xx_mcasp3_hwmod, | ||
| 2611 | .clk = "l4_root_clk_div", | ||
| 2612 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
| 2613 | }; | ||
| 2614 | |||
| 2615 | /* l3_main_1 -> mcasp3 */ | ||
| 2616 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = { | ||
| 2617 | .master = &dra7xx_l3_main_1_hwmod, | ||
| 2618 | .slave = &dra7xx_mcasp3_hwmod, | ||
| 2619 | .clk = "l3_iclk_div", | ||
| 2620 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
| 2621 | }; | ||
| 2622 | |||
| 2569 | /* l4_per1 -> elm */ | 2623 | /* l4_per1 -> elm */ |
| 2570 | static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { | 2624 | static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { |
| 2571 | .master = &dra7xx_l4_per1_hwmod, | 2625 | .master = &dra7xx_l4_per1_hwmod, |
| @@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { | |||
| 3308 | &dra7xx_l4_wkup__dcan1, | 3362 | &dra7xx_l4_wkup__dcan1, |
| 3309 | &dra7xx_l4_per2__dcan2, | 3363 | &dra7xx_l4_per2__dcan2, |
| 3310 | &dra7xx_l4_per2__cpgmac0, | 3364 | &dra7xx_l4_per2__cpgmac0, |
| 3365 | &dra7xx_l4_per2__mcasp3, | ||
| 3366 | &dra7xx_l3_main_1__mcasp3, | ||
| 3311 | &dra7xx_gmac__mdio, | 3367 | &dra7xx_gmac__mdio, |
| 3312 | &dra7xx_l4_cfg__dma_system, | 3368 | &dra7xx_l4_cfg__dma_system, |
| 3313 | &dra7xx_l3_main_1__dss, | 3369 | &dra7xx_l3_main_1__dss, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c index b1288f56d509..6256052893ec 100644 --- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c | |||
| @@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = { | |||
| 144 | .name = "l4_ls", | 144 | .name = "l4_ls", |
| 145 | .clkdm_name = "alwon_l3s_clkdm", | 145 | .clkdm_name = "alwon_l3s_clkdm", |
| 146 | .class = &l4_hwmod_class, | 146 | .class = &l4_hwmod_class, |
| 147 | .flags = HWMOD_NO_IDLEST, | ||
| 147 | }; | 148 | }; |
| 148 | 149 | ||
| 149 | /* | 150 | /* |
| @@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = { | |||
| 155 | .name = "l4_hs", | 156 | .name = "l4_hs", |
| 156 | .clkdm_name = "alwon_l3_med_clkdm", | 157 | .clkdm_name = "alwon_l3_med_clkdm", |
| 157 | .class = &l4_hwmod_class, | 158 | .class = &l4_hwmod_class, |
| 159 | .flags = HWMOD_NO_IDLEST, | ||
| 158 | }; | 160 | }; |
| 159 | 161 | ||
| 160 | /* L3 slow -> L4 ls peripheral interface running at 125MHz */ | 162 | /* L3 slow -> L4 ls peripheral interface running at 125MHz */ |
| @@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = { | |||
| 850 | .name = "emac0", | 852 | .name = "emac0", |
| 851 | .clkdm_name = "alwon_ethernet_clkdm", | 853 | .clkdm_name = "alwon_ethernet_clkdm", |
| 852 | .class = &dm816x_emac_hwmod_class, | 854 | .class = &dm816x_emac_hwmod_class, |
| 855 | .flags = HWMOD_NO_IDLEST, | ||
| 853 | }; | 856 | }; |
| 854 | 857 | ||
| 855 | static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { | 858 | static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { |
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 1dfe34654c43..58144779dec4 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c | |||
| @@ -24,9 +24,6 @@ | |||
| 24 | #include <linux/platform_data/iommu-omap.h> | 24 | #include <linux/platform_data/iommu-omap.h> |
| 25 | #include <linux/platform_data/wkup_m3.h> | 25 | #include <linux/platform_data/wkup_m3.h> |
| 26 | 26 | ||
| 27 | #include <asm/siginfo.h> | ||
| 28 | #include <asm/signal.h> | ||
| 29 | |||
| 30 | #include "common.h" | 27 | #include "common.h" |
| 31 | #include "common-board-devices.h" | 28 | #include "common-board-devices.h" |
| 32 | #include "dss-common.h" | 29 | #include "dss-common.h" |
| @@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void) | |||
| 385 | } | 382 | } |
| 386 | #endif /* CONFIG_ARCH_OMAP3 */ | 383 | #endif /* CONFIG_ARCH_OMAP3 */ |
| 387 | 384 | ||
| 388 | #ifdef CONFIG_SOC_TI81XX | ||
| 389 | static int fault_fixed_up; | ||
| 390 | |||
| 391 | static int t410_abort_handler(unsigned long addr, unsigned int fsr, | ||
| 392 | struct pt_regs *regs) | ||
| 393 | { | ||
| 394 | if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) { | ||
| 395 | pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n", | ||
| 396 | addr, fsr); | ||
| 397 | fault_fixed_up = 1; | ||
| 398 | return 0; | ||
| 399 | } | ||
| 400 | |||
| 401 | return 1; | ||
| 402 | } | ||
| 403 | |||
| 404 | static void __init t410_abort_init(void) | ||
| 405 | { | ||
| 406 | hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR, | ||
| 407 | "imprecise external abort"); | ||
| 408 | } | ||
| 409 | #endif | ||
| 410 | |||
| 411 | #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) | 385 | #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) |
| 412 | static struct iommu_platform_data omap4_iommu_pdata = { | 386 | static struct iommu_platform_data omap4_iommu_pdata = { |
| 413 | .reset_name = "mmu_cache", | 387 | .reset_name = "mmu_cache", |
| @@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = { | |||
| 536 | { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, | 510 | { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, |
| 537 | { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, | 511 | { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, |
| 538 | #endif | 512 | #endif |
| 539 | #ifdef CONFIG_SOC_TI81XX | ||
| 540 | { "hp,t410", t410_abort_init, }, | ||
| 541 | #endif | ||
| 542 | #ifdef CONFIG_SOC_OMAP5 | 513 | #ifdef CONFIG_SOC_OMAP5 |
| 543 | { "ti,omap5-uevm", omap5_uevm_legacy_init, }, | 514 | { "ti,omap5-uevm", omap5_uevm_legacy_init, }, |
| 544 | #endif | 515 | #endif |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 87b98bf92366..2dbd3785ee6f 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
| @@ -301,11 +301,11 @@ static void omap3_pm_idle(void) | |||
| 301 | if (omap_irq_pending()) | 301 | if (omap_irq_pending()) |
| 302 | return; | 302 | return; |
| 303 | 303 | ||
| 304 | trace_cpu_idle(1, smp_processor_id()); | 304 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
| 305 | 305 | ||
| 306 | omap_sram_idle(); | 306 | omap_sram_idle(); |
| 307 | 307 | ||
| 308 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | 308 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | #ifdef CONFIG_SUSPEND | 311 | #ifdef CONFIG_SUSPEND |
diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S index 79eb502a1e64..73919a36b577 100644 --- a/arch/arm/mach-orion5x/include/mach/entry-macro.S +++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S | |||
| @@ -21,5 +21,5 @@ | |||
| 21 | @ find cause bits that are unmasked | 21 | @ find cause bits that are unmasked |
| 22 | ands \irqstat, \irqstat, \tmp @ clear Z flag if any | 22 | ands \irqstat, \irqstat, \tmp @ clear Z flag if any |
| 23 | clzne \irqnr, \irqstat @ calc irqnr | 23 | clzne \irqnr, \irqstat @ calc irqnr |
| 24 | rsbne \irqnr, \irqnr, #31 | 24 | rsbne \irqnr, \irqnr, #32 |
| 25 | .endm | 25 | .endm |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 13eba2b26e0a..8fbfb10047ec 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
| @@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd) | |||
| 344 | { | 344 | { |
| 345 | palm_bl_power = bl; | 345 | palm_bl_power = bl; |
| 346 | palm_lcd_power = lcd; | 346 | palm_lcd_power = lcd; |
| 347 | pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); | 347 | pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); |
| 348 | platform_device_register(&palm27x_backlight); | 348 | platform_device_register(&palm27x_backlight); |
| 349 | } | 349 | } |
| 350 | #endif | 350 | #endif |
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index aebf6de62468..0b5c3876720c 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c | |||
| @@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {} | |||
| 169 | #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) | 169 | #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) |
| 170 | static struct pwm_lookup palmtc_pwm_lookup[] = { | 170 | static struct pwm_lookup palmtc_pwm_lookup[] = { |
| 171 | PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, | 171 | PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, |
| 172 | PWM_PERIOD_NORMAL), | 172 | PWM_POLARITY_NORMAL), |
| 173 | }; | 173 | }; |
| 174 | 174 | ||
| 175 | static struct platform_pwm_backlight_data palmtc_backlight_data = { | 175 | static struct platform_pwm_backlight_data palmtc_backlight_data = { |
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c index 1d2825cb7a65..5fce87f7f254 100644 --- a/arch/arm/mach-shmobile/setup-r8a7793.c +++ b/arch/arm/mach-shmobile/setup-r8a7793.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include "common.h" | 19 | #include "common.h" |
| 20 | #include "rcar-gen2.h" | 20 | #include "rcar-gen2.h" |
| 21 | 21 | ||
| 22 | static const char *r8a7793_boards_compat_dt[] __initconst = { | 22 | static const char * const r8a7793_boards_compat_dt[] __initconst = { |
| 23 | "renesas,r8a7793", | 23 | "renesas,r8a7793", |
| 24 | NULL, | 24 | NULL, |
| 25 | }; | 25 | }; |
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig index 7fdc5bf24f9b..446334a25cf5 100644 --- a/arch/arm/mach-zx/Kconfig +++ b/arch/arm/mach-zx/Kconfig | |||
| @@ -13,7 +13,7 @@ config SOC_ZX296702 | |||
| 13 | select ARM_GLOBAL_TIMER | 13 | select ARM_GLOBAL_TIMER |
| 14 | select HAVE_ARM_SCU if SMP | 14 | select HAVE_ARM_SCU if SMP |
| 15 | select HAVE_ARM_TWD if SMP | 15 | select HAVE_ARM_TWD if SMP |
| 16 | select PM_GENERIC_DOMAINS | 16 | select PM_GENERIC_DOMAINS if PM |
| 17 | help | 17 | help |
| 18 | Support for ZTE ZX296702 SoC which is a dual core CortexA9MP | 18 | Support for ZTE ZX296702 SoC which is a dual core CortexA9MP |
| 19 | endif | 19 | endif |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 2f4b14cfddb4..591f9db3bf40 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
| @@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
| 1061 | } | 1061 | } |
| 1062 | build_epilogue(&ctx); | 1062 | build_epilogue(&ctx); |
| 1063 | 1063 | ||
| 1064 | flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); | 1064 | flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); |
| 1065 | 1065 | ||
| 1066 | #if __LINUX_ARM_ARCH__ < 7 | 1066 | #if __LINUX_ARM_ARCH__ < 7 |
| 1067 | if (ctx.imm_count) | 1067 | if (ctx.imm_count) |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..871f21783866 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -49,7 +49,7 @@ config ARM64 | |||
| 49 | select HAVE_ARCH_AUDITSYSCALL | 49 | select HAVE_ARCH_AUDITSYSCALL |
| 50 | select HAVE_ARCH_BITREVERSE | 50 | select HAVE_ARCH_BITREVERSE |
| 51 | select HAVE_ARCH_JUMP_LABEL | 51 | select HAVE_ARCH_JUMP_LABEL |
| 52 | select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP | 52 | select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) |
| 53 | select HAVE_ARCH_KGDB | 53 | select HAVE_ARCH_KGDB |
| 54 | select HAVE_ARCH_SECCOMP_FILTER | 54 | select HAVE_ARCH_SECCOMP_FILTER |
| 55 | select HAVE_ARCH_TRACEHOOK | 55 | select HAVE_ARCH_TRACEHOOK |
| @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 | |||
| 316 | 316 | ||
| 317 | If unsure, say Y. | 317 | If unsure, say Y. |
| 318 | 318 | ||
| 319 | config ARM64_ERRATUM_834220 | ||
| 320 | bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" | ||
| 321 | depends on KVM | ||
| 322 | default y | ||
| 323 | help | ||
| 324 | This option adds an alternative code sequence to work around ARM | ||
| 325 | erratum 834220 on Cortex-A57 parts up to r1p2. | ||
| 326 | |||
| 327 | Affected Cortex-A57 parts might report a Stage 2 translation | ||
| 328 | fault as the result of a Stage 1 fault for load crossing a | ||
| 329 | page boundary when there is a permission or device memory | ||
| 330 | alignment fault at Stage 1 and a translation fault at Stage 2. | ||
| 331 | |||
| 332 | The workaround is to verify that the Stage 1 translation | ||
| 333 | doesn't generate a fault before handling the Stage 2 fault. | ||
| 334 | Please note that this does not necessarily enable the workaround, | ||
| 335 | as it depends on the alternative framework, which will only patch | ||
| 336 | the kernel if an affected CPU is detected. | ||
| 337 | |||
| 338 | If unsure, say Y. | ||
| 339 | |||
| 319 | config ARM64_ERRATUM_845719 | 340 | config ARM64_ERRATUM_845719 |
| 320 | bool "Cortex-A53: 845719: a load might read incorrect data" | 341 | bool "Cortex-A53: 845719: a load might read incorrect data" |
| 321 | depends on COMPAT | 342 | depends on COMPAT |
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index ce47792a983d..f7bd9bf0bbb3 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c | |||
| @@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey); | |||
| 237 | static struct crypto_alg aes_alg = { | 237 | static struct crypto_alg aes_alg = { |
| 238 | .cra_name = "aes", | 238 | .cra_name = "aes", |
| 239 | .cra_driver_name = "aes-ce", | 239 | .cra_driver_name = "aes-ce", |
| 240 | .cra_priority = 300, | 240 | .cra_priority = 250, |
| 241 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 241 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
| 242 | .cra_blocksize = AES_BLOCK_SIZE, | 242 | .cra_blocksize = AES_BLOCK_SIZE, |
| 243 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 243 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 624f9679f4b0..9622eb48f894 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
| @@ -64,27 +64,31 @@ do { \ | |||
| 64 | 64 | ||
| 65 | #define smp_load_acquire(p) \ | 65 | #define smp_load_acquire(p) \ |
| 66 | ({ \ | 66 | ({ \ |
| 67 | typeof(*p) ___p1; \ | 67 | union { typeof(*p) __val; char __c[1]; } __u; \ |
| 68 | compiletime_assert_atomic_type(*p); \ | 68 | compiletime_assert_atomic_type(*p); \ |
| 69 | switch (sizeof(*p)) { \ | 69 | switch (sizeof(*p)) { \ |
| 70 | case 1: \ | 70 | case 1: \ |
| 71 | asm volatile ("ldarb %w0, %1" \ | 71 | asm volatile ("ldarb %w0, %1" \ |
| 72 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 72 | : "=r" (*(__u8 *)__u.__c) \ |
| 73 | : "Q" (*p) : "memory"); \ | ||
| 73 | break; \ | 74 | break; \ |
| 74 | case 2: \ | 75 | case 2: \ |
| 75 | asm volatile ("ldarh %w0, %1" \ | 76 | asm volatile ("ldarh %w0, %1" \ |
| 76 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 77 | : "=r" (*(__u16 *)__u.__c) \ |
| 78 | : "Q" (*p) : "memory"); \ | ||
| 77 | break; \ | 79 | break; \ |
| 78 | case 4: \ | 80 | case 4: \ |
| 79 | asm volatile ("ldar %w0, %1" \ | 81 | asm volatile ("ldar %w0, %1" \ |
| 80 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 82 | : "=r" (*(__u32 *)__u.__c) \ |
| 83 | : "Q" (*p) : "memory"); \ | ||
| 81 | break; \ | 84 | break; \ |
| 82 | case 8: \ | 85 | case 8: \ |
| 83 | asm volatile ("ldar %0, %1" \ | 86 | asm volatile ("ldar %0, %1" \ |
| 84 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 87 | : "=r" (*(__u64 *)__u.__c) \ |
| 88 | : "Q" (*p) : "memory"); \ | ||
| 85 | break; \ | 89 | break; \ |
| 86 | } \ | 90 | } \ |
| 87 | ___p1; \ | 91 | __u.__val; \ |
| 88 | }) | 92 | }) |
| 89 | 93 | ||
| 90 | #define read_barrier_depends() do { } while(0) | 94 | #define read_barrier_depends() do { } while(0) |
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 7fbed6919b54..eb8432bb82b8 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
| 25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
| 26 | #include <linux/ptrace.h> | ||
| 27 | 26 | ||
| 28 | #define COMPAT_USER_HZ 100 | 27 | #define COMPAT_USER_HZ 100 |
| 29 | #ifdef __AARCH64EB__ | 28 | #ifdef __AARCH64EB__ |
| @@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
| 234 | return (u32)(unsigned long)uptr; | 233 | return (u32)(unsigned long)uptr; |
| 235 | } | 234 | } |
| 236 | 235 | ||
| 237 | #define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) | 236 | #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) |
| 238 | 237 | ||
| 239 | static inline void __user *arch_compat_alloc_user_space(long len) | 238 | static inline void __user *arch_compat_alloc_user_space(long len) |
| 240 | { | 239 | { |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..8f271b83f910 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
| @@ -29,8 +29,9 @@ | |||
| 29 | #define ARM64_HAS_PAN 4 | 29 | #define ARM64_HAS_PAN 4 |
| 30 | #define ARM64_HAS_LSE_ATOMICS 5 | 30 | #define ARM64_HAS_LSE_ATOMICS 5 |
| 31 | #define ARM64_WORKAROUND_CAVIUM_23154 6 | 31 | #define ARM64_WORKAROUND_CAVIUM_23154 6 |
| 32 | #define ARM64_WORKAROUND_834220 7 | ||
| 32 | 33 | ||
| 33 | #define ARM64_NCAPS 7 | 34 | #define ARM64_NCAPS 8 |
| 34 | 35 | ||
| 35 | #ifndef __ASSEMBLY__ | 36 | #ifndef __ASSEMBLY__ |
| 36 | 37 | ||
| @@ -46,8 +47,12 @@ enum ftr_type { | |||
| 46 | #define FTR_STRICT true /* SANITY check strict matching required */ | 47 | #define FTR_STRICT true /* SANITY check strict matching required */ |
| 47 | #define FTR_NONSTRICT false /* SANITY check ignored */ | 48 | #define FTR_NONSTRICT false /* SANITY check ignored */ |
| 48 | 49 | ||
| 50 | #define FTR_SIGNED true /* Value should be treated as signed */ | ||
| 51 | #define FTR_UNSIGNED false /* Value should be treated as unsigned */ | ||
| 52 | |||
| 49 | struct arm64_ftr_bits { | 53 | struct arm64_ftr_bits { |
| 50 | bool strict; /* CPU Sanity check: strict matching required ? */ | 54 | bool sign; /* Value is signed ? */ |
| 55 | bool strict; /* CPU Sanity check: strict matching required ? */ | ||
| 51 | enum ftr_type type; | 56 | enum ftr_type type; |
| 52 | u8 shift; | 57 | u8 shift; |
| 53 | u8 width; | 58 | u8 width; |
| @@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) | |||
| 123 | return cpuid_feature_extract_field_width(features, field, 4); | 128 | return cpuid_feature_extract_field_width(features, field, 4); |
| 124 | } | 129 | } |
| 125 | 130 | ||
| 131 | static inline unsigned int __attribute_const__ | ||
| 132 | cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) | ||
| 133 | { | ||
| 134 | return (u64)(features << (64 - width - field)) >> (64 - width); | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline unsigned int __attribute_const__ | ||
| 138 | cpuid_feature_extract_unsigned_field(u64 features, int field) | ||
| 139 | { | ||
| 140 | return cpuid_feature_extract_unsigned_field_width(features, field, 4); | ||
| 141 | } | ||
| 142 | |||
| 126 | static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) | 143 | static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) |
| 127 | { | 144 | { |
| 128 | return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); | 145 | return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); |
| @@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) | |||
| 130 | 147 | ||
| 131 | static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) | 148 | static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) |
| 132 | { | 149 | { |
| 133 | return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); | 150 | return ftrp->sign ? |
| 151 | cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) : | ||
| 152 | cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width); | ||
| 134 | } | 153 | } |
| 135 | 154 | ||
| 136 | static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) | 155 | static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 54d0ead41afc..61e08f360e31 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
| 20 | 20 | ||
| 21 | #include <linux/acpi.h> | ||
| 22 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 23 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 24 | 23 | ||
| @@ -26,22 +25,16 @@ | |||
| 26 | #include <asm/xen/hypervisor.h> | 25 | #include <asm/xen/hypervisor.h> |
| 27 | 26 | ||
| 28 | #define DMA_ERROR_CODE (~(dma_addr_t)0) | 27 | #define DMA_ERROR_CODE (~(dma_addr_t)0) |
| 29 | extern struct dma_map_ops *dma_ops; | ||
| 30 | extern struct dma_map_ops dummy_dma_ops; | 28 | extern struct dma_map_ops dummy_dma_ops; |
| 31 | 29 | ||
| 32 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) | 30 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
| 33 | { | 31 | { |
| 34 | if (unlikely(!dev)) | 32 | if (dev && dev->archdata.dma_ops) |
| 35 | return dma_ops; | ||
| 36 | else if (dev->archdata.dma_ops) | ||
| 37 | return dev->archdata.dma_ops; | 33 | return dev->archdata.dma_ops; |
| 38 | else if (acpi_disabled) | ||
| 39 | return dma_ops; | ||
| 40 | 34 | ||
| 41 | /* | 35 | /* |
| 42 | * When ACPI is enabled, if arch_set_dma_ops is not called, | 36 | * We expect no ISA devices, and all other DMA masters are expected to |
| 43 | * we will disable device DMA capability by setting it | 37 | * have someone call arch_setup_dma_ops at device creation time. |
| 44 | * to dummy_dma_ops. | ||
| 45 | */ | 38 | */ |
| 46 | return &dummy_dma_ops; | 39 | return &dummy_dma_ops; |
| 47 | } | 40 | } |
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index e54415ec6935..9732908bfc8a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
| @@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; | |||
| 138 | /* Determine number of BRP registers available. */ | 138 | /* Determine number of BRP registers available. */ |
| 139 | static inline int get_num_brps(void) | 139 | static inline int get_num_brps(void) |
| 140 | { | 140 | { |
| 141 | u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); | ||
| 141 | return 1 + | 142 | return 1 + |
| 142 | cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), | 143 | cpuid_feature_extract_unsigned_field(dfr0, |
| 143 | ID_AA64DFR0_BRPS_SHIFT); | 144 | ID_AA64DFR0_BRPS_SHIFT); |
| 144 | } | 145 | } |
| 145 | 146 | ||
| 146 | /* Determine number of WRP registers available. */ | 147 | /* Determine number of WRP registers available. */ |
| 147 | static inline int get_num_wrps(void) | 148 | static inline int get_num_wrps(void) |
| 148 | { | 149 | { |
| 150 | u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); | ||
| 149 | return 1 + | 151 | return 1 + |
| 150 | cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), | 152 | cpuid_feature_extract_unsigned_field(dfr0, |
| 151 | ID_AA64DFR0_WRPS_SHIFT); | 153 | ID_AA64DFR0_WRPS_SHIFT); |
| 152 | } | 154 | } |
| 153 | 155 | ||
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 23eb450b820b..8e8d30684392 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h | |||
| @@ -7,4 +7,9 @@ struct pt_regs; | |||
| 7 | 7 | ||
| 8 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | 8 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
| 9 | 9 | ||
| 10 | static inline int nr_legacy_irqs(void) | ||
| 11 | { | ||
| 12 | return 0; | ||
| 13 | } | ||
| 14 | |||
| 10 | #endif | 15 | #endif |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..3ca894ecf699 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | |||
| 99 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; | 99 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | /* | ||
| 103 | * vcpu_reg should always be passed a register number coming from a | ||
| 104 | * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 | ||
| 105 | * with banked registers. | ||
| 106 | */ | ||
| 102 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | 107 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) |
| 103 | { | 108 | { |
| 104 | if (vcpu_mode_is_32bit(vcpu)) | ||
| 105 | return vcpu_reg32(vcpu, reg_num); | ||
| 106 | |||
| 107 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | 109 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
| 108 | } | 110 | } |
| 109 | 111 | ||
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c0e87898ba96..24165784b803 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
| @@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void) | |||
| 101 | #define destroy_context(mm) do { } while(0) | 101 | #define destroy_context(mm) do { } while(0) |
| 102 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); | 102 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); |
| 103 | 103 | ||
| 104 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 104 | #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) |
| 105 | 105 | ||
| 106 | /* | 106 | /* |
| 107 | * This is called when "tsk" is about to enter lazy TLB mode. | 107 | * This is called when "tsk" is about to enter lazy TLB mode. |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9819a9426b69..7e074f93f383 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
| 81 | 81 | ||
| 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
| 84 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | ||
| 84 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 85 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 85 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) | 86 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
| 86 | 87 | ||
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
| @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
| 75 | (1 << MIDR_VARIANT_SHIFT) | 2), | 75 | (1 << MIDR_VARIANT_SHIFT) | 2), |
| 76 | }, | 76 | }, |
| 77 | #endif | 77 | #endif |
| 78 | #ifdef CONFIG_ARM64_ERRATUM_834220 | ||
| 79 | { | ||
| 80 | /* Cortex-A57 r0p0 - r1p2 */ | ||
| 81 | .desc = "ARM erratum 834220", | ||
| 82 | .capability = ARM64_WORKAROUND_834220, | ||
| 83 | MIDR_RANGE(MIDR_CORTEX_A57, 0x00, | ||
| 84 | (1 << MIDR_VARIANT_SHIFT) | 2), | ||
| 85 | }, | ||
| 86 | #endif | ||
| 78 | #ifdef CONFIG_ARM64_ERRATUM_845719 | 87 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
| 79 | { | 88 | { |
| 80 | /* Cortex-A53 r0p[01234] */ | 89 | /* Cortex-A53 r0p[01234] */ |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c8cf89223b5a..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; | |||
| 44 | 44 | ||
| 45 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 45 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
| 46 | 46 | ||
| 47 | #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ | 47 | #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ |
| 48 | { \ | 48 | { \ |
| 49 | .sign = SIGNED, \ | ||
| 49 | .strict = STRICT, \ | 50 | .strict = STRICT, \ |
| 50 | .type = TYPE, \ | 51 | .type = TYPE, \ |
| 51 | .shift = SHIFT, \ | 52 | .shift = SHIFT, \ |
| @@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | |||
| 53 | .safe_val = SAFE_VAL, \ | 54 | .safe_val = SAFE_VAL, \ |
| 54 | } | 55 | } |
| 55 | 56 | ||
| 57 | /* Define a feature with signed values */ | ||
| 58 | #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ | ||
| 59 | __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) | ||
| 60 | |||
| 61 | /* Define a feature with unsigned value */ | ||
| 62 | #define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ | ||
| 63 | __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) | ||
| 64 | |||
| 56 | #define ARM64_FTR_END \ | 65 | #define ARM64_FTR_END \ |
| 57 | { \ | 66 | { \ |
| 58 | .width = 0, \ | 67 | .width = 0, \ |
| @@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { | |||
| 99 | * Differing PARange is fine as long as all peripherals and memory are mapped | 108 | * Differing PARange is fine as long as all peripherals and memory are mapped |
| 100 | * within the minimum PARange of all CPUs | 109 | * within the minimum PARange of all CPUs |
| 101 | */ | 110 | */ |
| 102 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), | 111 | U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), |
| 103 | ARM64_FTR_END, | 112 | ARM64_FTR_END, |
| 104 | }; | 113 | }; |
| 105 | 114 | ||
| @@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { | |||
| 115 | }; | 124 | }; |
| 116 | 125 | ||
| 117 | static struct arm64_ftr_bits ftr_ctr[] = { | 126 | static struct arm64_ftr_bits ftr_ctr[] = { |
| 118 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ | 127 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ |
| 119 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), | 128 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), |
| 120 | ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ | 129 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ |
| 121 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ | 130 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ |
| 122 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ | 131 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ |
| 123 | /* | 132 | /* |
| 124 | * Linux can handle differing I-cache policies. Userspace JITs will | 133 | * Linux can handle differing I-cache policies. Userspace JITs will |
| 125 | * make use of *minLine | 134 | * make use of *minLine |
| 126 | */ | 135 | */ |
| 127 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ | 136 | U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ |
| 128 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ | 137 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ |
| 129 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ | 138 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ |
| 130 | ARM64_FTR_END, | 139 | ARM64_FTR_END, |
| 131 | }; | 140 | }; |
| 132 | 141 | ||
| @@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { | |||
| 144 | 153 | ||
| 145 | static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { | 154 | static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { |
| 146 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | 155 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), |
| 147 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), | 156 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), |
| 148 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), | 157 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), |
| 149 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), | 158 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), |
| 150 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), | 159 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), |
| 151 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), | 160 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), |
| 152 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), | 161 | U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), |
| 153 | ARM64_FTR_END, | 162 | ARM64_FTR_END, |
| 154 | }; | 163 | }; |
| 155 | 164 | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 706679d0a0b4..212ae6361d8b 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
| 31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
| 32 | #include <linux/smp.h> | 32 | #include <linux/smp.h> |
| 33 | #include <linux/delay.h> | ||
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| 35 | * In case the boot CPU is hotpluggable, we record its initial state and | 36 | * In case the boot CPU is hotpluggable, we record its initial state and |
| @@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v) | |||
| 112 | */ | 113 | */ |
| 113 | seq_printf(m, "processor\t: %d\n", i); | 114 | seq_printf(m, "processor\t: %d\n", i); |
| 114 | 115 | ||
| 116 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
| 117 | loops_per_jiffy / (500000UL/HZ), | ||
| 118 | loops_per_jiffy / (5000UL/HZ) % 100); | ||
| 119 | |||
| 115 | /* | 120 | /* |
| 116 | * Dump out the common processor features in a single line. | 121 | * Dump out the common processor features in a single line. |
| 117 | * Userspace should read the hwcaps with getauxval(AT_HWCAP) | 122 | * Userspace should read the hwcaps with getauxval(AT_HWCAP) |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index de46b50f4cdf..4eeb17198cfa 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
| @@ -127,7 +127,11 @@ static int __init uefi_init(void) | |||
| 127 | table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; | 127 | table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; |
| 128 | config_tables = early_memremap(efi_to_phys(efi.systab->tables), | 128 | config_tables = early_memremap(efi_to_phys(efi.systab->tables), |
| 129 | table_size); | 129 | table_size); |
| 130 | 130 | if (config_tables == NULL) { | |
| 131 | pr_warn("Unable to map EFI config table array.\n"); | ||
| 132 | retval = -ENOMEM; | ||
| 133 | goto out; | ||
| 134 | } | ||
| 131 | retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, | 135 | retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, |
| 132 | sizeof(efi_config_table_64_t), NULL); | 136 | sizeof(efi_config_table_64_t), NULL); |
| 133 | 137 | ||
| @@ -209,6 +213,14 @@ void __init efi_init(void) | |||
| 209 | PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); | 213 | PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); |
| 210 | memmap.phys_map = params.mmap; | 214 | memmap.phys_map = params.mmap; |
| 211 | memmap.map = early_memremap(params.mmap, params.mmap_size); | 215 | memmap.map = early_memremap(params.mmap, params.mmap_size); |
| 216 | if (memmap.map == NULL) { | ||
| 217 | /* | ||
| 218 | * If we are booting via UEFI, the UEFI memory map is the only | ||
| 219 | * description of memory we have, so there is little point in | ||
| 220 | * proceeding if we cannot access it. | ||
| 221 | */ | ||
| 222 | panic("Unable to map EFI memory map.\n"); | ||
| 223 | } | ||
| 212 | memmap.map_end = memmap.map + params.mmap_size; | 224 | memmap.map_end = memmap.map + params.mmap_size; |
| 213 | memmap.desc_size = params.desc_size; | 225 | memmap.desc_size = params.desc_size; |
| 214 | memmap.desc_version = params.desc_ver; | 226 | memmap.desc_version = params.desc_ver; |
| @@ -224,8 +236,9 @@ static bool __init efi_virtmap_init(void) | |||
| 224 | { | 236 | { |
| 225 | efi_memory_desc_t *md; | 237 | efi_memory_desc_t *md; |
| 226 | 238 | ||
| 239 | init_new_context(NULL, &efi_mm); | ||
| 240 | |||
| 227 | for_each_efi_memory_desc(&memmap, md) { | 241 | for_each_efi_memory_desc(&memmap, md) { |
| 228 | u64 paddr, npages, size; | ||
| 229 | pgprot_t prot; | 242 | pgprot_t prot; |
| 230 | 243 | ||
| 231 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 244 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
| @@ -233,11 +246,6 @@ static bool __init efi_virtmap_init(void) | |||
| 233 | if (md->virt_addr == 0) | 246 | if (md->virt_addr == 0) |
| 234 | return false; | 247 | return false; |
| 235 | 248 | ||
| 236 | paddr = md->phys_addr; | ||
| 237 | npages = md->num_pages; | ||
| 238 | memrange_efi_to_native(&paddr, &npages); | ||
| 239 | size = npages << PAGE_SHIFT; | ||
| 240 | |||
| 241 | pr_info(" EFI remap 0x%016llx => %p\n", | 249 | pr_info(" EFI remap 0x%016llx => %p\n", |
| 242 | md->phys_addr, (void *)md->virt_addr); | 250 | md->phys_addr, (void *)md->virt_addr); |
| 243 | 251 | ||
| @@ -254,7 +262,9 @@ static bool __init efi_virtmap_init(void) | |||
| 254 | else | 262 | else |
| 255 | prot = PAGE_KERNEL; | 263 | prot = PAGE_KERNEL; |
| 256 | 264 | ||
| 257 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); | 265 | create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, |
| 266 | md->num_pages << EFI_PAGE_SHIFT, | ||
| 267 | __pgprot(pgprot_val(prot) | PTE_NG)); | ||
| 258 | } | 268 | } |
| 259 | return true; | 269 | return true; |
| 260 | } | 270 | } |
| @@ -270,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) | |||
| 270 | 280 | ||
| 271 | if (!efi_enabled(EFI_BOOT)) { | 281 | if (!efi_enabled(EFI_BOOT)) { |
| 272 | pr_info("EFI services will not be available.\n"); | 282 | pr_info("EFI services will not be available.\n"); |
| 273 | return -1; | 283 | return 0; |
| 274 | } | 284 | } |
| 275 | 285 | ||
| 276 | if (efi_runtime_disabled()) { | 286 | if (efi_runtime_disabled()) { |
| 277 | pr_info("EFI runtime services will be disabled.\n"); | 287 | pr_info("EFI runtime services will be disabled.\n"); |
| 278 | return -1; | 288 | return 0; |
| 279 | } | 289 | } |
| 280 | 290 | ||
| 281 | pr_info("Remapping and enabling EFI services.\n"); | 291 | pr_info("Remapping and enabling EFI services.\n"); |
| @@ -285,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) | |||
| 285 | mapsize); | 295 | mapsize); |
| 286 | if (!memmap.map) { | 296 | if (!memmap.map) { |
| 287 | pr_err("Failed to remap EFI memory map\n"); | 297 | pr_err("Failed to remap EFI memory map\n"); |
| 288 | return -1; | 298 | return -ENOMEM; |
| 289 | } | 299 | } |
| 290 | memmap.map_end = memmap.map + mapsize; | 300 | memmap.map_end = memmap.map + mapsize; |
| 291 | efi.memmap = &memmap; | 301 | efi.memmap = &memmap; |
| @@ -294,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) | |||
| 294 | sizeof(efi_system_table_t)); | 304 | sizeof(efi_system_table_t)); |
| 295 | if (!efi.systab) { | 305 | if (!efi.systab) { |
| 296 | pr_err("Failed to remap EFI System Table\n"); | 306 | pr_err("Failed to remap EFI System Table\n"); |
| 297 | return -1; | 307 | return -ENOMEM; |
| 298 | } | 308 | } |
| 299 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | 309 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); |
| 300 | 310 | ||
| 301 | if (!efi_virtmap_init()) { | 311 | if (!efi_virtmap_init()) { |
| 302 | pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); | 312 | pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); |
| 303 | return -1; | 313 | return -ENOMEM; |
| 304 | } | 314 | } |
| 305 | 315 | ||
| 306 | /* Set up runtime services function pointers */ | 316 | /* Set up runtime services function pointers */ |
| @@ -329,14 +339,7 @@ core_initcall(arm64_dmi_init); | |||
| 329 | 339 | ||
| 330 | static void efi_set_pgd(struct mm_struct *mm) | 340 | static void efi_set_pgd(struct mm_struct *mm) |
| 331 | { | 341 | { |
| 332 | if (mm == &init_mm) | 342 | switch_mm(NULL, mm, NULL); |
| 333 | cpu_set_reserved_ttbr0(); | ||
| 334 | else | ||
| 335 | cpu_switch_mm(mm->pgd, mm); | ||
| 336 | |||
| 337 | local_flush_tlb_all(); | ||
| 338 | if (icache_is_aivivt()) | ||
| 339 | __local_flush_icache_all(); | ||
| 340 | } | 343 | } |
| 341 | 344 | ||
| 342 | void efi_virtmap_load(void) | 345 | void efi_virtmap_load(void) |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index fce95e17cf7f..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #include <linux/ftrace.h> | ||
| 1 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
| 2 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
| 3 | #include <asm/cacheflush.h> | 4 | #include <asm/cacheflush.h> |
| @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
| 71 | local_dbg_save(flags); | 72 | local_dbg_save(flags); |
| 72 | 73 | ||
| 73 | /* | 74 | /* |
| 75 | * Function graph tracer state gets incosistent when the kernel | ||
| 76 | * calls functions that never return (aka suspend finishers) hence | ||
| 77 | * disable graph tracing during their execution. | ||
| 78 | */ | ||
| 79 | pause_graph_tracing(); | ||
| 80 | |||
| 81 | /* | ||
| 74 | * mm context saved on the stack, it will be restored when | 82 | * mm context saved on the stack, it will be restored when |
| 75 | * the cpu comes out of reset through the identity mapped | 83 | * the cpu comes out of reset through the identity mapped |
| 76 | * page tables, so that the thread address space is properly | 84 | * page tables, so that the thread address space is properly |
| @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
| 111 | hw_breakpoint_restore(NULL); | 119 | hw_breakpoint_restore(NULL); |
| 112 | } | 120 | } |
| 113 | 121 | ||
| 122 | unpause_graph_tracing(); | ||
| 123 | |||
| 114 | /* | 124 | /* |
| 115 | * Restore pstate flags. OS lock and mdscr have been already | 125 | * Restore pstate flags. OS lock and mdscr have been already |
| 116 | * restored, so from this point onwards, debugging is fully | 126 | * restored, so from this point onwards, debugging is fully |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
| @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) | |||
| 864 | ENDPROC(__kvm_flush_vm_context) | 864 | ENDPROC(__kvm_flush_vm_context) |
| 865 | 865 | ||
| 866 | __kvm_hyp_panic: | 866 | __kvm_hyp_panic: |
| 867 | // Stash PAR_EL1 before corrupting it in __restore_sysregs | ||
| 868 | mrs x0, par_el1 | ||
| 869 | push x0, xzr | ||
| 870 | |||
| 867 | // Guess the context by looking at VTTBR: | 871 | // Guess the context by looking at VTTBR: |
| 868 | // If zero, then we're already a host. | 872 | // If zero, then we're already a host. |
| 869 | // Otherwise restore a minimal host context before panicing. | 873 | // Otherwise restore a minimal host context before panicing. |
| @@ -898,7 +902,7 @@ __kvm_hyp_panic: | |||
| 898 | mrs x3, esr_el2 | 902 | mrs x3, esr_el2 |
| 899 | mrs x4, far_el2 | 903 | mrs x4, far_el2 |
| 900 | mrs x5, hpfar_el2 | 904 | mrs x5, hpfar_el2 |
| 901 | mrs x6, par_el1 | 905 | pop x6, xzr // active context PAR_EL1 |
| 902 | mrs x7, tpidr_el2 | 906 | mrs x7, tpidr_el2 |
| 903 | 907 | ||
| 904 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | 908 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
| @@ -914,7 +918,7 @@ __kvm_hyp_panic: | |||
| 914 | ENDPROC(__kvm_hyp_panic) | 918 | ENDPROC(__kvm_hyp_panic) |
| 915 | 919 | ||
| 916 | __hyp_panic_str: | 920 | __hyp_panic_str: |
| 917 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | 921 | .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" |
| 918 | 922 | ||
| 919 | .align 2 | 923 | .align 2 |
| 920 | 924 | ||
| @@ -1015,9 +1019,15 @@ el1_trap: | |||
| 1015 | b.ne 1f // Not an abort we care about | 1019 | b.ne 1f // Not an abort we care about |
| 1016 | 1020 | ||
| 1017 | /* This is an abort. Check for permission fault */ | 1021 | /* This is an abort. Check for permission fault */ |
| 1022 | alternative_if_not ARM64_WORKAROUND_834220 | ||
| 1018 | and x2, x1, #ESR_ELx_FSC_TYPE | 1023 | and x2, x1, #ESR_ELx_FSC_TYPE |
| 1019 | cmp x2, #FSC_PERM | 1024 | cmp x2, #FSC_PERM |
| 1020 | b.ne 1f // Not a permission fault | 1025 | b.ne 1f // Not a permission fault |
| 1026 | alternative_else | ||
| 1027 | nop // Use the permission fault path to | ||
| 1028 | nop // check for a valid S1 translation, | ||
| 1029 | nop // regardless of the ESR value. | ||
| 1030 | alternative_endif | ||
| 1021 | 1031 | ||
| 1022 | /* | 1032 | /* |
| 1023 | * Check for Stage-1 page table walk, which is guaranteed | 1033 | * Check for Stage-1 page table walk, which is guaranteed |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
| @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | |||
| 48 | 48 | ||
| 49 | /* Note: These now point to the banked copies */ | 49 | /* Note: These now point to the banked copies */ |
| 50 | *vcpu_spsr(vcpu) = new_spsr_value; | 50 | *vcpu_spsr(vcpu) = new_spsr_value; |
| 51 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | 51 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; |
| 52 | 52 | ||
| 53 | /* Branch to exception vector */ | 53 | /* Branch to exception vector */ |
| 54 | if (sctlr & (1 << 13)) | 54 | if (sctlr & (1 << 13)) |
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index f636a2639f03..e87f53ff5f58 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c | |||
| @@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) | |||
| 76 | __flush_icache_all(); | 76 | __flush_icache_all(); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static int is_reserved_asid(u64 asid) | 79 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
| 80 | { | 80 | { |
| 81 | int cpu; | 81 | int cpu; |
| 82 | for_each_possible_cpu(cpu) | 82 | bool hit = false; |
| 83 | if (per_cpu(reserved_asids, cpu) == asid) | 83 | |
| 84 | return 1; | 84 | /* |
| 85 | return 0; | 85 | * Iterate over the set of reserved ASIDs looking for a match. |
| 86 | * If we find one, then we can update our mm to use newasid | ||
| 87 | * (i.e. the same ASID in the current generation) but we can't | ||
| 88 | * exit the loop early, since we need to ensure that all copies | ||
| 89 | * of the old ASID are updated to reflect the mm. Failure to do | ||
| 90 | * so could result in us missing the reserved ASID in a future | ||
| 91 | * generation. | ||
| 92 | */ | ||
| 93 | for_each_possible_cpu(cpu) { | ||
| 94 | if (per_cpu(reserved_asids, cpu) == asid) { | ||
| 95 | hit = true; | ||
| 96 | per_cpu(reserved_asids, cpu) = newasid; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | return hit; | ||
| 86 | } | 101 | } |
| 87 | 102 | ||
| 88 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) | 103 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
| @@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 92 | u64 generation = atomic64_read(&asid_generation); | 107 | u64 generation = atomic64_read(&asid_generation); |
| 93 | 108 | ||
| 94 | if (asid != 0) { | 109 | if (asid != 0) { |
| 110 | u64 newasid = generation | (asid & ~ASID_MASK); | ||
| 111 | |||
| 95 | /* | 112 | /* |
| 96 | * If our current ASID was active during a rollover, we | 113 | * If our current ASID was active during a rollover, we |
| 97 | * can continue to use it and this was just a false alarm. | 114 | * can continue to use it and this was just a false alarm. |
| 98 | */ | 115 | */ |
| 99 | if (is_reserved_asid(asid)) | 116 | if (check_update_reserved_asid(asid, newasid)) |
| 100 | return generation | (asid & ~ASID_MASK); | 117 | return newasid; |
| 101 | 118 | ||
| 102 | /* | 119 | /* |
| 103 | * We had a valid ASID in a previous life, so try to re-use | 120 | * We had a valid ASID in a previous life, so try to re-use |
| @@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 105 | */ | 122 | */ |
| 106 | asid &= ~ASID_MASK; | 123 | asid &= ~ASID_MASK; |
| 107 | if (!__test_and_set_bit(asid, asid_map)) | 124 | if (!__test_and_set_bit(asid, asid_map)) |
| 108 | goto bump_gen; | 125 | return newasid; |
| 109 | } | 126 | } |
| 110 | 127 | ||
| 111 | /* | 128 | /* |
| @@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 129 | set_asid: | 146 | set_asid: |
| 130 | __set_bit(asid, asid_map); | 147 | __set_bit(asid, asid_map); |
| 131 | cur_idx = asid; | 148 | cur_idx = asid; |
| 132 | 149 | return asid | generation; | |
| 133 | bump_gen: | ||
| 134 | asid |= generation; | ||
| 135 | return asid; | ||
| 136 | } | 150 | } |
| 137 | 151 | ||
| 138 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) | 152 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 131a199114b4..7963aa4b5d28 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
| 21 | #include <linux/acpi.h> | ||
| 21 | #include <linux/export.h> | 22 | #include <linux/export.h> |
| 22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 23 | #include <linux/genalloc.h> | 24 | #include <linux/genalloc.h> |
| @@ -28,9 +29,6 @@ | |||
| 28 | 29 | ||
| 29 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
| 30 | 31 | ||
| 31 | struct dma_map_ops *dma_ops; | ||
| 32 | EXPORT_SYMBOL(dma_ops); | ||
| 33 | |||
| 34 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, | 32 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, |
| 35 | bool coherent) | 33 | bool coherent) |
| 36 | { | 34 | { |
| @@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops); | |||
| 515 | 513 | ||
| 516 | static int __init arm64_dma_init(void) | 514 | static int __init arm64_dma_init(void) |
| 517 | { | 515 | { |
| 518 | int ret; | 516 | return atomic_pool_init(); |
| 519 | |||
| 520 | dma_ops = &swiotlb_dma_ops; | ||
| 521 | |||
| 522 | ret = atomic_pool_init(); | ||
| 523 | |||
| 524 | return ret; | ||
| 525 | } | 517 | } |
| 526 | arch_initcall(arm64_dma_init); | 518 | arch_initcall(arm64_dma_init); |
| 527 | 519 | ||
| @@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 552 | { | 544 | { |
| 553 | bool coherent = is_device_dma_coherent(dev); | 545 | bool coherent = is_device_dma_coherent(dev); |
| 554 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); | 546 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); |
| 547 | size_t iosize = size; | ||
| 555 | void *addr; | 548 | void *addr; |
| 556 | 549 | ||
| 557 | if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) | 550 | if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) |
| 558 | return NULL; | 551 | return NULL; |
| 552 | |||
| 553 | size = PAGE_ALIGN(size); | ||
| 554 | |||
| 559 | /* | 555 | /* |
| 560 | * Some drivers rely on this, and we probably don't want the | 556 | * Some drivers rely on this, and we probably don't want the |
| 561 | * possibility of stale kernel data being read by devices anyway. | 557 | * possibility of stale kernel data being read by devices anyway. |
| @@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 566 | struct page **pages; | 562 | struct page **pages; |
| 567 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); | 563 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); |
| 568 | 564 | ||
| 569 | pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, | 565 | pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, |
| 570 | flush_page); | 566 | flush_page); |
| 571 | if (!pages) | 567 | if (!pages) |
| 572 | return NULL; | 568 | return NULL; |
| @@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 574 | addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, | 570 | addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, |
| 575 | __builtin_return_address(0)); | 571 | __builtin_return_address(0)); |
| 576 | if (!addr) | 572 | if (!addr) |
| 577 | iommu_dma_free(dev, pages, size, handle); | 573 | iommu_dma_free(dev, pages, iosize, handle); |
| 578 | } else { | 574 | } else { |
| 579 | struct page *page; | 575 | struct page *page; |
| 580 | /* | 576 | /* |
| @@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 591 | if (!addr) | 587 | if (!addr) |
| 592 | return NULL; | 588 | return NULL; |
| 593 | 589 | ||
| 594 | *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); | 590 | *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); |
| 595 | if (iommu_dma_mapping_error(dev, *handle)) { | 591 | if (iommu_dma_mapping_error(dev, *handle)) { |
| 596 | if (coherent) | 592 | if (coherent) |
| 597 | __free_pages(page, get_order(size)); | 593 | __free_pages(page, get_order(size)); |
| @@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 606 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 602 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 607 | dma_addr_t handle, struct dma_attrs *attrs) | 603 | dma_addr_t handle, struct dma_attrs *attrs) |
| 608 | { | 604 | { |
| 605 | size_t iosize = size; | ||
| 606 | |||
| 607 | size = PAGE_ALIGN(size); | ||
| 609 | /* | 608 | /* |
| 610 | * @cpu_addr will be one of 3 things depending on how it was allocated: | 609 | * @cpu_addr will be one of 3 things depending on how it was allocated: |
| 611 | * - A remapped array of pages from iommu_dma_alloc(), for all | 610 | * - A remapped array of pages from iommu_dma_alloc(), for all |
| @@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
| 617 | * Hence how dodgy the below logic looks... | 616 | * Hence how dodgy the below logic looks... |
| 618 | */ | 617 | */ |
| 619 | if (__in_atomic_pool(cpu_addr, size)) { | 618 | if (__in_atomic_pool(cpu_addr, size)) { |
| 620 | iommu_dma_unmap_page(dev, handle, size, 0, NULL); | 619 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); |
| 621 | __free_from_pool(cpu_addr, size); | 620 | __free_from_pool(cpu_addr, size); |
| 622 | } else if (is_vmalloc_addr(cpu_addr)){ | 621 | } else if (is_vmalloc_addr(cpu_addr)){ |
| 623 | struct vm_struct *area = find_vm_area(cpu_addr); | 622 | struct vm_struct *area = find_vm_area(cpu_addr); |
| 624 | 623 | ||
| 625 | if (WARN_ON(!area || !area->pages)) | 624 | if (WARN_ON(!area || !area->pages)) |
| 626 | return; | 625 | return; |
| 627 | iommu_dma_free(dev, area->pages, size, &handle); | 626 | iommu_dma_free(dev, area->pages, iosize, &handle); |
| 628 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | 627 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); |
| 629 | } else { | 628 | } else { |
| 630 | iommu_dma_unmap_page(dev, handle, size, 0, NULL); | 629 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); |
| 631 | __free_pages(virt_to_page(cpu_addr), get_order(size)); | 630 | __free_pages(virt_to_page(cpu_addr), get_order(size)); |
| 632 | } | 631 | } |
| 633 | } | 632 | } |
| @@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
| 984 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | 983 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 985 | struct iommu_ops *iommu, bool coherent) | 984 | struct iommu_ops *iommu, bool coherent) |
| 986 | { | 985 | { |
| 987 | if (!acpi_disabled && !dev->archdata.dma_ops) | 986 | if (!dev->archdata.dma_ops) |
| 988 | dev->archdata.dma_ops = dma_ops; | 987 | dev->archdata.dma_ops = &swiotlb_dma_ops; |
| 989 | 988 | ||
| 990 | dev->archdata.dma_coherent = coherent; | 989 | dev->archdata.dma_coherent = coherent; |
| 991 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); | 990 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 19211c4a8911..92ddac1e8ca2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -393,16 +393,16 @@ static struct fault_info { | |||
| 393 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | 393 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
| 394 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | 394 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, |
| 395 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 395 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
| 396 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | 396 | { do_bad, SIGBUS, 0, "unknown 8" }, |
| 397 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | 397 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
| 398 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | 398 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
| 399 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | 399 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
| 400 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | 400 | { do_bad, SIGBUS, 0, "unknown 12" }, |
| 401 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | 401 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
| 402 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | 402 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, |
| 403 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | 403 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
| 404 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | 404 | { do_bad, SIGBUS, 0, "synchronous external abort" }, |
| 405 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | 405 | { do_bad, SIGBUS, 0, "unknown 17" }, |
| 406 | { do_bad, SIGBUS, 0, "unknown 18" }, | 406 | { do_bad, SIGBUS, 0, "unknown 18" }, |
| 407 | { do_bad, SIGBUS, 0, "unknown 19" }, | 407 | { do_bad, SIGBUS, 0, "unknown 19" }, |
| 408 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 408 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, |
| @@ -410,16 +410,16 @@ static struct fault_info { | |||
| 410 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 410 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, |
| 411 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 411 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, |
| 412 | { do_bad, SIGBUS, 0, "synchronous parity error" }, | 412 | { do_bad, SIGBUS, 0, "synchronous parity error" }, |
| 413 | { do_bad, SIGBUS, 0, "asynchronous parity error" }, | 413 | { do_bad, SIGBUS, 0, "unknown 25" }, |
| 414 | { do_bad, SIGBUS, 0, "unknown 26" }, | 414 | { do_bad, SIGBUS, 0, "unknown 26" }, |
| 415 | { do_bad, SIGBUS, 0, "unknown 27" }, | 415 | { do_bad, SIGBUS, 0, "unknown 27" }, |
| 416 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | 416 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, |
| 417 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | 417 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, |
| 418 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | 418 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, |
| 419 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | 419 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, |
| 420 | { do_bad, SIGBUS, 0, "unknown 32" }, | 420 | { do_bad, SIGBUS, 0, "unknown 32" }, |
| 421 | { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, | 421 | { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, |
| 422 | { do_bad, SIGBUS, 0, "debug event" }, | 422 | { do_bad, SIGBUS, 0, "unknown 34" }, |
| 423 | { do_bad, SIGBUS, 0, "unknown 35" }, | 423 | { do_bad, SIGBUS, 0, "unknown 35" }, |
| 424 | { do_bad, SIGBUS, 0, "unknown 36" }, | 424 | { do_bad, SIGBUS, 0, "unknown 36" }, |
| 425 | { do_bad, SIGBUS, 0, "unknown 37" }, | 425 | { do_bad, SIGBUS, 0, "unknown 37" }, |
| @@ -433,21 +433,21 @@ static struct fault_info { | |||
| 433 | { do_bad, SIGBUS, 0, "unknown 45" }, | 433 | { do_bad, SIGBUS, 0, "unknown 45" }, |
| 434 | { do_bad, SIGBUS, 0, "unknown 46" }, | 434 | { do_bad, SIGBUS, 0, "unknown 46" }, |
| 435 | { do_bad, SIGBUS, 0, "unknown 47" }, | 435 | { do_bad, SIGBUS, 0, "unknown 47" }, |
| 436 | { do_bad, SIGBUS, 0, "unknown 48" }, | 436 | { do_bad, SIGBUS, 0, "TLB conflict abort" }, |
| 437 | { do_bad, SIGBUS, 0, "unknown 49" }, | 437 | { do_bad, SIGBUS, 0, "unknown 49" }, |
| 438 | { do_bad, SIGBUS, 0, "unknown 50" }, | 438 | { do_bad, SIGBUS, 0, "unknown 50" }, |
| 439 | { do_bad, SIGBUS, 0, "unknown 51" }, | 439 | { do_bad, SIGBUS, 0, "unknown 51" }, |
| 440 | { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, | 440 | { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, |
| 441 | { do_bad, SIGBUS, 0, "unknown 53" }, | 441 | { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, |
| 442 | { do_bad, SIGBUS, 0, "unknown 54" }, | 442 | { do_bad, SIGBUS, 0, "unknown 54" }, |
| 443 | { do_bad, SIGBUS, 0, "unknown 55" }, | 443 | { do_bad, SIGBUS, 0, "unknown 55" }, |
| 444 | { do_bad, SIGBUS, 0, "unknown 56" }, | 444 | { do_bad, SIGBUS, 0, "unknown 56" }, |
| 445 | { do_bad, SIGBUS, 0, "unknown 57" }, | 445 | { do_bad, SIGBUS, 0, "unknown 57" }, |
| 446 | { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, | 446 | { do_bad, SIGBUS, 0, "unknown 58" }, |
| 447 | { do_bad, SIGBUS, 0, "unknown 59" }, | 447 | { do_bad, SIGBUS, 0, "unknown 59" }, |
| 448 | { do_bad, SIGBUS, 0, "unknown 60" }, | 448 | { do_bad, SIGBUS, 0, "unknown 60" }, |
| 449 | { do_bad, SIGBUS, 0, "unknown 61" }, | 449 | { do_bad, SIGBUS, 0, "section domain fault" }, |
| 450 | { do_bad, SIGBUS, 0, "unknown 62" }, | 450 | { do_bad, SIGBUS, 0, "page domain fault" }, |
| 451 | { do_bad, SIGBUS, 0, "unknown 63" }, | 451 | { do_bad, SIGBUS, 0, "unknown 63" }, |
| 452 | }; | 452 | }; |
| 453 | 453 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e3f563c81c48..873e363048c6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
| @@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); | |||
| 64 | 64 | ||
| 65 | static void __init *early_alloc(unsigned long sz) | 65 | static void __init *early_alloc(unsigned long sz) |
| 66 | { | 66 | { |
| 67 | void *ptr = __va(memblock_alloc(sz, sz)); | 67 | phys_addr_t phys; |
| 68 | BUG_ON(!ptr); | 68 | void *ptr; |
| 69 | |||
| 70 | phys = memblock_alloc(sz, sz); | ||
| 71 | BUG_ON(!phys); | ||
| 72 | ptr = __va(phys); | ||
| 69 | memset(ptr, 0, sz); | 73 | memset(ptr, 0, sz); |
| 70 | return ptr; | 74 | return ptr; |
| 71 | } | 75 | } |
| @@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) | |||
| 81 | do { | 85 | do { |
| 82 | /* | 86 | /* |
| 83 | * Need to have the least restrictive permissions available | 87 | * Need to have the least restrictive permissions available |
| 84 | * permissions will be fixed up later. Default the new page | 88 | * permissions will be fixed up later |
| 85 | * range as contiguous ptes. | ||
| 86 | */ | 89 | */ |
| 87 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); | 90 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); |
| 88 | pfn++; | 91 | pfn++; |
| 89 | } while (pte++, i++, i < PTRS_PER_PTE); | 92 | } while (pte++, i++, i < PTRS_PER_PTE); |
| 90 | } | 93 | } |
| 91 | 94 | ||
| 92 | /* | ||
| 93 | * Given a PTE with the CONT bit set, determine where the CONT range | ||
| 94 | * starts, and clear the entire range of PTE CONT bits. | ||
| 95 | */ | ||
| 96 | static void clear_cont_pte_range(pte_t *pte, unsigned long addr) | ||
| 97 | { | ||
| 98 | int i; | ||
| 99 | |||
| 100 | pte -= CONT_RANGE_OFFSET(addr); | ||
| 101 | for (i = 0; i < CONT_PTES; i++) { | ||
| 102 | set_pte(pte, pte_mknoncont(*pte)); | ||
| 103 | pte++; | ||
| 104 | } | ||
| 105 | flush_tlb_all(); | ||
| 106 | } | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Given a range of PTEs set the pfn and provided page protection flags | ||
| 110 | */ | ||
| 111 | static void __populate_init_pte(pte_t *pte, unsigned long addr, | ||
| 112 | unsigned long end, phys_addr_t phys, | ||
| 113 | pgprot_t prot) | ||
| 114 | { | ||
| 115 | unsigned long pfn = __phys_to_pfn(phys); | ||
| 116 | |||
| 117 | do { | ||
| 118 | /* clear all the bits except the pfn, then apply the prot */ | ||
| 119 | set_pte(pte, pfn_pte(pfn, prot)); | ||
| 120 | pte++; | ||
| 121 | pfn++; | ||
| 122 | addr += PAGE_SIZE; | ||
| 123 | } while (addr != end); | ||
| 124 | } | ||
| 125 | |||
| 126 | static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | 95 | static void alloc_init_pte(pmd_t *pmd, unsigned long addr, |
| 127 | unsigned long end, phys_addr_t phys, | 96 | unsigned long end, unsigned long pfn, |
| 128 | pgprot_t prot, | 97 | pgprot_t prot, |
| 129 | void *(*alloc)(unsigned long size)) | 98 | void *(*alloc)(unsigned long size)) |
| 130 | { | 99 | { |
| 131 | pte_t *pte; | 100 | pte_t *pte; |
| 132 | unsigned long next; | ||
| 133 | 101 | ||
| 134 | if (pmd_none(*pmd) || pmd_sect(*pmd)) { | 102 | if (pmd_none(*pmd) || pmd_sect(*pmd)) { |
| 135 | pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); | 103 | pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); |
| @@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
| 142 | 110 | ||
| 143 | pte = pte_offset_kernel(pmd, addr); | 111 | pte = pte_offset_kernel(pmd, addr); |
| 144 | do { | 112 | do { |
| 145 | next = min(end, (addr + CONT_SIZE) & CONT_MASK); | 113 | set_pte(pte, pfn_pte(pfn, prot)); |
| 146 | if (((addr | next | phys) & ~CONT_MASK) == 0) { | 114 | pfn++; |
| 147 | /* a block of CONT_PTES */ | 115 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 148 | __populate_init_pte(pte, addr, next, phys, | ||
| 149 | __pgprot(pgprot_val(prot) | PTE_CONT)); | ||
| 150 | } else { | ||
| 151 | /* | ||
| 152 | * If the range being split is already inside of a | ||
| 153 | * contiguous range but this PTE isn't going to be | ||
| 154 | * contiguous, then we want to unmark the adjacent | ||
| 155 | * ranges, then update the portion of the range we | ||
| 156 | * are interrested in. | ||
| 157 | */ | ||
| 158 | clear_cont_pte_range(pte, addr); | ||
| 159 | __populate_init_pte(pte, addr, next, phys, prot); | ||
| 160 | } | ||
| 161 | |||
| 162 | pte += (next - addr) >> PAGE_SHIFT; | ||
| 163 | phys += next - addr; | ||
| 164 | addr = next; | ||
| 165 | } while (addr != end); | ||
| 166 | } | 116 | } |
| 167 | 117 | ||
| 168 | static void split_pud(pud_t *old_pud, pmd_t *pmd) | 118 | static void split_pud(pud_t *old_pud, pmd_t *pmd) |
| @@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, | |||
| 223 | } | 173 | } |
| 224 | } | 174 | } |
| 225 | } else { | 175 | } else { |
| 226 | alloc_init_pte(pmd, addr, next, phys, prot, alloc); | 176 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), |
| 177 | prot, alloc); | ||
| 227 | } | 178 | } |
| 228 | phys += next - addr; | 179 | phys += next - addr; |
| 229 | } while (pmd++, addr = next, addr != end); | 180 | } while (pmd++, addr = next, addr != end); |
| @@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) | |||
| 362 | * for now. This will get more fine grained later once all memory | 313 | * for now. This will get more fine grained later once all memory |
| 363 | * is mapped | 314 | * is mapped |
| 364 | */ | 315 | */ |
| 365 | unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); | 316 | unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); |
| 366 | unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); | 317 | unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); |
| 367 | 318 | ||
| 368 | if (end < kernel_x_start) { | 319 | if (end < kernel_x_start) { |
| 369 | create_mapping(start, __phys_to_virt(start), | 320 | create_mapping(start, __phys_to_virt(start), |
| @@ -451,18 +402,18 @@ static void __init fixup_executable(void) | |||
| 451 | { | 402 | { |
| 452 | #ifdef CONFIG_DEBUG_RODATA | 403 | #ifdef CONFIG_DEBUG_RODATA |
| 453 | /* now that we are actually fully mapped, make the start/end more fine grained */ | 404 | /* now that we are actually fully mapped, make the start/end more fine grained */ |
| 454 | if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { | 405 | if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { |
| 455 | unsigned long aligned_start = round_down(__pa(_stext), | 406 | unsigned long aligned_start = round_down(__pa(_stext), |
| 456 | SECTION_SIZE); | 407 | SWAPPER_BLOCK_SIZE); |
| 457 | 408 | ||
| 458 | create_mapping(aligned_start, __phys_to_virt(aligned_start), | 409 | create_mapping(aligned_start, __phys_to_virt(aligned_start), |
| 459 | __pa(_stext) - aligned_start, | 410 | __pa(_stext) - aligned_start, |
| 460 | PAGE_KERNEL); | 411 | PAGE_KERNEL); |
| 461 | } | 412 | } |
| 462 | 413 | ||
| 463 | if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { | 414 | if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { |
| 464 | unsigned long aligned_end = round_up(__pa(__init_end), | 415 | unsigned long aligned_end = round_up(__pa(__init_end), |
| 465 | SECTION_SIZE); | 416 | SWAPPER_BLOCK_SIZE); |
| 466 | create_mapping(__pa(__init_end), (unsigned long)__init_end, | 417 | create_mapping(__pa(__init_end), (unsigned long)__init_end, |
| 467 | aligned_end - __pa(__init_end), | 418 | aligned_end - __pa(__init_end), |
| 468 | PAGE_KERNEL); | 419 | PAGE_KERNEL); |
| @@ -475,7 +426,7 @@ void mark_rodata_ro(void) | |||
| 475 | { | 426 | { |
| 476 | create_mapping_late(__pa(_stext), (unsigned long)_stext, | 427 | create_mapping_late(__pa(_stext), (unsigned long)_stext, |
| 477 | (unsigned long)_etext - (unsigned long)_stext, | 428 | (unsigned long)_etext - (unsigned long)_stext, |
| 478 | PAGE_KERNEL_EXEC | PTE_RDONLY); | 429 | PAGE_KERNEL_ROX); |
| 479 | 430 | ||
| 480 | } | 431 | } |
| 481 | #endif | 432 | #endif |
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index cf3c7d4a1b58..d6a53ef2350b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
| @@ -50,7 +50,7 @@ static const int bpf2a64[] = { | |||
| 50 | [BPF_REG_8] = A64_R(21), | 50 | [BPF_REG_8] = A64_R(21), |
| 51 | [BPF_REG_9] = A64_R(22), | 51 | [BPF_REG_9] = A64_R(22), |
| 52 | /* read-only frame pointer to access stack */ | 52 | /* read-only frame pointer to access stack */ |
| 53 | [BPF_REG_FP] = A64_FP, | 53 | [BPF_REG_FP] = A64_R(25), |
| 54 | /* temporary register for internal BPF JIT */ | 54 | /* temporary register for internal BPF JIT */ |
| 55 | [TMP_REG_1] = A64_R(23), | 55 | [TMP_REG_1] = A64_R(23), |
| 56 | [TMP_REG_2] = A64_R(24), | 56 | [TMP_REG_2] = A64_R(24), |
| @@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx) | |||
| 155 | stack_size += 4; /* extra for skb_copy_bits buffer */ | 155 | stack_size += 4; /* extra for skb_copy_bits buffer */ |
| 156 | stack_size = STACK_ALIGN(stack_size); | 156 | stack_size = STACK_ALIGN(stack_size); |
| 157 | 157 | ||
| 158 | /* | ||
| 159 | * BPF prog stack layout | ||
| 160 | * | ||
| 161 | * high | ||
| 162 | * original A64_SP => 0:+-----+ BPF prologue | ||
| 163 | * |FP/LR| | ||
| 164 | * current A64_FP => -16:+-----+ | ||
| 165 | * | ... | callee saved registers | ||
| 166 | * +-----+ | ||
| 167 | * | | x25/x26 | ||
| 168 | * BPF fp register => -80:+-----+ | ||
| 169 | * | | | ||
| 170 | * | ... | BPF prog stack | ||
| 171 | * | | | ||
| 172 | * | | | ||
| 173 | * current A64_SP => +-----+ | ||
| 174 | * | | | ||
| 175 | * | ... | Function call stack | ||
| 176 | * | | | ||
| 177 | * +-----+ | ||
| 178 | * low | ||
| 179 | * | ||
| 180 | */ | ||
| 181 | |||
| 182 | /* Save FP and LR registers to stay align with ARM64 AAPCS */ | ||
| 183 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | ||
| 184 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | ||
| 185 | |||
| 158 | /* Save callee-saved register */ | 186 | /* Save callee-saved register */ |
| 159 | emit(A64_PUSH(r6, r7, A64_SP), ctx); | 187 | emit(A64_PUSH(r6, r7, A64_SP), ctx); |
| 160 | emit(A64_PUSH(r8, r9, A64_SP), ctx); | 188 | emit(A64_PUSH(r8, r9, A64_SP), ctx); |
| 161 | if (ctx->tmp_used) | 189 | if (ctx->tmp_used) |
| 162 | emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); | 190 | emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); |
| 163 | 191 | ||
| 164 | /* Set up BPF stack */ | 192 | /* Save fp (x25) and x26. SP requires 16 bytes alignment */ |
| 165 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | 193 | emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); |
| 166 | 194 | ||
| 167 | /* Set up frame pointer */ | 195 | /* Set up BPF prog stack base register (x25) */ |
| 168 | emit(A64_MOV(1, fp, A64_SP), ctx); | 196 | emit(A64_MOV(1, fp, A64_SP), ctx); |
| 169 | 197 | ||
| 198 | /* Set up function call stack */ | ||
| 199 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | ||
| 200 | |||
| 170 | /* Clear registers A and X */ | 201 | /* Clear registers A and X */ |
| 171 | emit_a64_mov_i64(ra, 0, ctx); | 202 | emit_a64_mov_i64(ra, 0, ctx); |
| 172 | emit_a64_mov_i64(rx, 0, ctx); | 203 | emit_a64_mov_i64(rx, 0, ctx); |
| @@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx) | |||
| 190 | /* We're done with BPF stack */ | 221 | /* We're done with BPF stack */ |
| 191 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); | 222 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); |
| 192 | 223 | ||
| 224 | /* Restore fs (x25) and x26 */ | ||
| 225 | emit(A64_POP(fp, A64_R(26), A64_SP), ctx); | ||
| 226 | |||
| 193 | /* Restore callee-saved register */ | 227 | /* Restore callee-saved register */ |
| 194 | if (ctx->tmp_used) | 228 | if (ctx->tmp_used) |
| 195 | emit(A64_POP(tmp1, tmp2, A64_SP), ctx); | 229 | emit(A64_POP(tmp1, tmp2, A64_SP), ctx); |
| 196 | emit(A64_POP(r8, r9, A64_SP), ctx); | 230 | emit(A64_POP(r8, r9, A64_SP), ctx); |
| 197 | emit(A64_POP(r6, r7, A64_SP), ctx); | 231 | emit(A64_POP(r6, r7, A64_SP), ctx); |
| 198 | 232 | ||
| 199 | /* Restore frame pointer */ | 233 | /* Restore FP/LR registers */ |
| 200 | emit(A64_MOV(1, fp, A64_SP), ctx); | 234 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); |
| 201 | 235 | ||
| 202 | /* Set return value */ | 236 | /* Set return value */ |
| 203 | emit(A64_MOV(1, A64_R(0), r0), ctx); | 237 | emit(A64_MOV(1, A64_R(0), r0), ctx); |
| @@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
| 758 | if (bpf_jit_enable > 1) | 792 | if (bpf_jit_enable > 1) |
| 759 | bpf_jit_dump(prog->len, image_size, 2, ctx.image); | 793 | bpf_jit_dump(prog->len, image_size, 2, ctx.image); |
| 760 | 794 | ||
| 761 | bpf_flush_icache(ctx.image, ctx.image + ctx.idx); | 795 | bpf_flush_icache(header, ctx.image + ctx.idx); |
| 762 | 796 | ||
| 763 | set_memory_ro((unsigned long)header, header->pages); | 797 | set_memory_ro((unsigned long)header, header->pages); |
| 764 | prog->bpf_func = (void *)ctx.image; | 798 | prog->bpf_func = (void *)ctx.image; |
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c index f7836c6a6b60..c32f76791f48 100644 --- a/arch/m68k/coldfire/m54xx.c +++ b/arch/m68k/coldfire/m54xx.c | |||
| @@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void) | |||
| 98 | memstart = PAGE_ALIGN(_ramstart); | 98 | memstart = PAGE_ALIGN(_ramstart); |
| 99 | min_low_pfn = PFN_DOWN(_rambase); | 99 | min_low_pfn = PFN_DOWN(_rambase); |
| 100 | start_pfn = PFN_DOWN(memstart); | 100 | start_pfn = PFN_DOWN(memstart); |
| 101 | max_low_pfn = PFN_DOWN(_ramend); | 101 | max_pfn = max_low_pfn = PFN_DOWN(_ramend); |
| 102 | high_memory = (void *)_ramend; | 102 | high_memory = (void *)_ramend; |
| 103 | 103 | ||
| 104 | m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; | 104 | m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 0793a7f17417..f9d96bf86910 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
| 5 | 5 | ||
| 6 | 6 | ||
| 7 | #define NR_syscalls 375 | 7 | #define NR_syscalls 376 |
| 8 | 8 | ||
| 9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
| 10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 5e6fae6c275f..36cf129de663 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
| @@ -380,5 +380,6 @@ | |||
| 380 | #define __NR_sendmmsg 372 | 380 | #define __NR_sendmmsg 372 |
| 381 | #define __NR_userfaultfd 373 | 381 | #define __NR_userfaultfd 373 |
| 382 | #define __NR_membarrier 374 | 382 | #define __NR_membarrier 374 |
| 383 | #define __NR_mlock2 375 | ||
| 383 | 384 | ||
| 384 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 385 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 88c27d94a721..76b9113f3092 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c | |||
| @@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p) | |||
| 238 | * Give all the memory to the bootmap allocator, tell it to put the | 238 | * Give all the memory to the bootmap allocator, tell it to put the |
| 239 | * boot mem_map at the start of memory. | 239 | * boot mem_map at the start of memory. |
| 240 | */ | 240 | */ |
| 241 | min_low_pfn = PFN_DOWN(memory_start); | ||
| 242 | max_pfn = max_low_pfn = PFN_DOWN(memory_end); | ||
| 243 | |||
| 241 | bootmap_size = init_bootmem_node( | 244 | bootmap_size = init_bootmem_node( |
| 242 | NODE_DATA(0), | 245 | NODE_DATA(0), |
| 243 | memory_start >> PAGE_SHIFT, /* map goes here */ | 246 | min_low_pfn, /* map goes here */ |
| 244 | PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ | 247 | PFN_DOWN(PAGE_OFFSET), |
| 245 | memory_end >> PAGE_SHIFT); | 248 | max_pfn); |
| 246 | /* | 249 | /* |
| 247 | * Free the usable memory, we have to make sure we do not free | 250 | * Free the usable memory, we have to make sure we do not free |
| 248 | * the bootmem bitmap so we then reserve it after freeing it :-) | 251 | * the bootmem bitmap so we then reserve it after freeing it :-) |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 5dd0e80042f5..282cd903f4c4 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
| @@ -395,3 +395,4 @@ ENTRY(sys_call_table) | |||
| 395 | .long sys_sendmmsg | 395 | .long sys_sendmmsg |
| 396 | .long sys_userfaultfd | 396 | .long sys_userfaultfd |
| 397 | .long sys_membarrier | 397 | .long sys_membarrier |
| 398 | .long sys_mlock2 /* 375 */ | ||
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index b958916e5eac..8f37fdd80be9 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c | |||
| @@ -250,7 +250,7 @@ void __init paging_init(void) | |||
| 250 | high_memory = phys_to_virt(max_addr); | 250 | high_memory = phys_to_virt(max_addr); |
| 251 | 251 | ||
| 252 | min_low_pfn = availmem >> PAGE_SHIFT; | 252 | min_low_pfn = availmem >> PAGE_SHIFT; |
| 253 | max_low_pfn = max_addr >> PAGE_SHIFT; | 253 | max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; |
| 254 | 254 | ||
| 255 | for (i = 0; i < m68k_num_memory; i++) { | 255 | for (i = 0; i < m68k_num_memory; i++) { |
| 256 | addr = m68k_memory[i].addr; | 256 | addr = m68k_memory[i].addr; |
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index a8b942bf7163..2a5f43a68ae3 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c | |||
| @@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start, | |||
| 118 | memory_end = memory_end & PAGE_MASK; | 118 | memory_end = memory_end & PAGE_MASK; |
| 119 | 119 | ||
| 120 | start_page = __pa(memory_start) >> PAGE_SHIFT; | 120 | start_page = __pa(memory_start) >> PAGE_SHIFT; |
| 121 | num_pages = __pa(memory_end) >> PAGE_SHIFT; | 121 | max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT; |
| 122 | 122 | ||
| 123 | high_memory = (void *)memory_end; | 123 | high_memory = (void *)memory_end; |
| 124 | availmem = memory_start; | 124 | availmem = memory_start; |
| 125 | 125 | ||
| 126 | m68k_setup_node(0); | 126 | m68k_setup_node(0); |
| 127 | availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); | 127 | availmem += init_bootmem(start_page, num_pages); |
| 128 | availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; | 128 | availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; |
| 129 | 129 | ||
| 130 | free_bootmem(__pa(availmem), memory_end - (availmem)); | 130 | free_bootmem(__pa(availmem), memory_end - (availmem)); |
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 1ba21204ebe0..8755d618e116 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c | |||
| @@ -216,9 +216,9 @@ void __init plat_mem_setup(void) | |||
| 216 | AR71XX_RESET_SIZE); | 216 | AR71XX_RESET_SIZE); |
| 217 | ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, | 217 | ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, |
| 218 | AR71XX_PLL_SIZE); | 218 | AR71XX_PLL_SIZE); |
| 219 | ath79_detect_sys_type(); | ||
| 219 | ath79_ddr_ctrl_init(); | 220 | ath79_ddr_ctrl_init(); |
| 220 | 221 | ||
| 221 | ath79_detect_sys_type(); | ||
| 222 | if (mips_machtype != ATH79_MACH_GENERIC_OF) | 222 | if (mips_machtype != ATH79_MACH_GENERIC_OF) |
| 223 | detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); | 223 | detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); |
| 224 | 224 | ||
| @@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC, | |||
| 281 | "Generic", | 281 | "Generic", |
| 282 | "Generic AR71XX/AR724X/AR913X based board", | 282 | "Generic AR71XX/AR724X/AR913X based board", |
| 283 | ath79_generic_init); | 283 | ath79_generic_init); |
| 284 | |||
| 285 | MIPS_MACHINE(ATH79_MACH_GENERIC_OF, | ||
| 286 | "DTB", | ||
| 287 | "Generic AR71XX/AR724X/AR913X based board (DT)", | ||
| 288 | NULL); | ||
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi index fb7734eadbf0..13d0439496a9 100644 --- a/arch/mips/boot/dts/qca/ar9132.dtsi +++ b/arch/mips/boot/dts/qca/ar9132.dtsi | |||
| @@ -107,7 +107,7 @@ | |||
| 107 | miscintc: interrupt-controller@18060010 { | 107 | miscintc: interrupt-controller@18060010 { |
| 108 | compatible = "qca,ar9132-misc-intc", | 108 | compatible = "qca,ar9132-misc-intc", |
| 109 | "qca,ar7100-misc-intc"; | 109 | "qca,ar7100-misc-intc"; |
| 110 | reg = <0x18060010 0x4>; | 110 | reg = <0x18060010 0x8>; |
| 111 | 111 | ||
| 112 | interrupt-parent = <&cpuintc>; | 112 | interrupt-parent = <&cpuintc>; |
| 113 | interrupts = <6>; | 113 | interrupts = <6>; |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index ad1fccdb8d13..2046c0230224 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
| @@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn) | |||
| 200 | { | 200 | { |
| 201 | /* avoid <linux/mm.h> include hell */ | 201 | /* avoid <linux/mm.h> include hell */ |
| 202 | extern unsigned long max_mapnr; | 202 | extern unsigned long max_mapnr; |
| 203 | unsigned long pfn_offset = ARCH_PFN_OFFSET; | ||
| 203 | 204 | ||
| 204 | return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; | 205 | return pfn >= pfn_offset && pfn < max_mapnr; |
| 205 | } | 206 | } |
| 206 | 207 | ||
| 207 | #elif defined(CONFIG_SPARSEMEM) | 208 | #elif defined(CONFIG_SPARSEMEM) |
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d5fa3eaf39a1..41b1b090f56f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
| @@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | |||
| 1581 | 1581 | ||
| 1582 | base = (inst >> 21) & 0x1f; | 1582 | base = (inst >> 21) & 0x1f; |
| 1583 | op_inst = (inst >> 16) & 0x1f; | 1583 | op_inst = (inst >> 16) & 0x1f; |
| 1584 | offset = inst & 0xffff; | 1584 | offset = (int16_t)inst; |
| 1585 | cache = (inst >> 16) & 0x3; | 1585 | cache = (inst >> 16) & 0x3; |
| 1586 | op = (inst >> 18) & 0x7; | 1586 | op = (inst >> 18) & 0x7; |
| 1587 | 1587 | ||
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 7bab3a4e8f7d..7e2210846b8b 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S | |||
| @@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
| 157 | 157 | ||
| 158 | FEXPORT(__kvm_mips_load_asid) | 158 | FEXPORT(__kvm_mips_load_asid) |
| 159 | /* Set the ASID for the Guest Kernel */ | 159 | /* Set the ASID for the Guest Kernel */ |
| 160 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 160 | PTR_L t0, VCPU_COP0(k1) |
| 161 | /* addresses shift to 0x80000000 */ | 161 | LONG_L t0, COP0_STATUS(t0) |
| 162 | bltz t0, 1f /* If kernel */ | 162 | andi t0, KSU_USER | ST0_ERL | ST0_EXL |
| 163 | xori t0, KSU_USER | ||
| 164 | bnez t0, 1f /* If kernel */ | ||
| 163 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 165 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
| 164 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 166 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
| 165 | 1: | 167 | 1: |
| @@ -474,9 +476,11 @@ __kvm_mips_return_to_guest: | |||
| 474 | mtc0 t0, CP0_EPC | 476 | mtc0 t0, CP0_EPC |
| 475 | 477 | ||
| 476 | /* Set the ASID for the Guest Kernel */ | 478 | /* Set the ASID for the Guest Kernel */ |
| 477 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 479 | PTR_L t0, VCPU_COP0(k1) |
| 478 | /* addresses shift to 0x80000000 */ | 480 | LONG_L t0, COP0_STATUS(t0) |
| 479 | bltz t0, 1f /* If kernel */ | 481 | andi t0, KSU_USER | ST0_ERL | ST0_EXL |
| 482 | xori t0, KSU_USER | ||
| 483 | bnez t0, 1f /* If kernel */ | ||
| 480 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 484 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
| 481 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 485 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
| 482 | 1: | 486 | 1: |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 49ff3bfc007e..b9b803facdbf 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
| 279 | 279 | ||
| 280 | if (!gebase) { | 280 | if (!gebase) { |
| 281 | err = -ENOMEM; | 281 | err = -ENOMEM; |
| 282 | goto out_free_cpu; | 282 | goto out_uninit_cpu; |
| 283 | } | 283 | } |
| 284 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", | 284 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", |
| 285 | ALIGN(size, PAGE_SIZE), gebase); | 285 | ALIGN(size, PAGE_SIZE), gebase); |
| @@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
| 343 | out_free_gebase: | 343 | out_free_gebase: |
| 344 | kfree(gebase); | 344 | kfree(gebase); |
| 345 | 345 | ||
| 346 | out_uninit_cpu: | ||
| 347 | kvm_vcpu_uninit(vcpu); | ||
| 348 | |||
| 346 | out_free_cpu: | 349 | out_free_cpu: |
| 347 | kfree(vcpu); | 350 | kfree(vcpu); |
| 348 | 351 | ||
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index 8a978022630b..dbbeccc3d714 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * by the Free Software Foundation. | 11 | * by the Free Software Foundation. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/delay.h> | ||
| 14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 15 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
| 16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
| @@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev) | |||
| 232 | ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; | 233 | ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; |
| 233 | 234 | ||
| 234 | rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); | 235 | rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); |
| 235 | for (i = 0; i < 0xfffff; i++) | 236 | udelay(1); |
| 236 | ; | ||
| 237 | 237 | ||
| 238 | rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); | 238 | rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); |
| 239 | rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); | 239 | rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); |
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index 4f925e06c414..78b2ef49dbc7 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | * option) any later version. | 10 | * option) any later version. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/delay.h> | ||
| 14 | |||
| 13 | #include <asm/bootinfo.h> | 15 | #include <asm/bootinfo.h> |
| 14 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
| 15 | #include <asm/idle.h> | 17 | #include <asm/idle.h> |
| @@ -77,7 +79,7 @@ void msp7120_reset(void) | |||
| 77 | */ | 79 | */ |
| 78 | 80 | ||
| 79 | /* Wait a bit for the DDRC to settle */ | 81 | /* Wait a bit for the DDRC to settle */ |
| 80 | for (i = 0; i < 100000000; i++); | 82 | mdelay(125); |
| 81 | 83 | ||
| 82 | #if defined(CONFIG_PMC_MSP7120_GW) | 84 | #if defined(CONFIG_PMC_MSP7120_GW) |
| 83 | /* | 85 | /* |
diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c index 244f9427625b..db8f88b6a3af 100644 --- a/arch/mips/sni/reset.c +++ b/arch/mips/sni/reset.c | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | * | 3 | * |
| 4 | * Reset a SNI machine. | 4 | * Reset a SNI machine. |
| 5 | */ | 5 | */ |
| 6 | #include <linux/delay.h> | ||
| 7 | |||
| 6 | #include <asm/io.h> | 8 | #include <asm/io.h> |
| 7 | #include <asm/reboot.h> | 9 | #include <asm/reboot.h> |
| 8 | #include <asm/sni.h> | 10 | #include <asm/sni.h> |
| @@ -32,9 +34,9 @@ void sni_machine_restart(char *command) | |||
| 32 | for (;;) { | 34 | for (;;) { |
| 33 | for (i = 0; i < 100; i++) { | 35 | for (i = 0; i < 100; i++) { |
| 34 | kb_wait(); | 36 | kb_wait(); |
| 35 | for (j = 0; j < 100000 ; j++) | 37 | udelay(50); |
| 36 | /* nothing */; | ||
| 37 | outb_p(0xfe, 0x64); /* pulse reset low */ | 38 | outb_p(0xfe, 0x64); /* pulse reset low */ |
| 39 | udelay(50); | ||
| 38 | } | 40 | } |
| 39 | } | 41 | } |
| 40 | } | 42 | } |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 4434b54e1d87..78ae5552fdb8 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config MN10300 | 1 | config MN10300 |
| 2 | def_bool y | 2 | def_bool y |
| 3 | select HAVE_OPROFILE | 3 | select HAVE_OPROFILE |
| 4 | select HAVE_UID16 | ||
| 4 | select GENERIC_IRQ_SHOW | 5 | select GENERIC_IRQ_SHOW |
| 5 | select ARCH_WANT_IPC_PARSE_VERSION | 6 | select ARCH_WANT_IPC_PARSE_VERSION |
| 6 | select HAVE_ARCH_TRACEHOOK | 7 | select HAVE_ARCH_TRACEHOOK |
| @@ -37,9 +38,6 @@ config HIGHMEM | |||
| 37 | config NUMA | 38 | config NUMA |
| 38 | def_bool n | 39 | def_bool n |
| 39 | 40 | ||
| 40 | config UID16 | ||
| 41 | def_bool y | ||
| 42 | |||
| 43 | config RWSEM_GENERIC_SPINLOCK | 41 | config RWSEM_GENERIC_SPINLOCK |
| 44 | def_bool y | 42 | def_bool y |
| 45 | 43 | ||
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 223cdcc8203f..87bf88ed04c6 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c | |||
| @@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end) | |||
| 23 | end += (cpuinfo.dcache_line_size - 1); | 23 | end += (cpuinfo.dcache_line_size - 1); |
| 24 | end &= ~(cpuinfo.dcache_line_size - 1); | 24 | end &= ~(cpuinfo.dcache_line_size - 1); |
| 25 | 25 | ||
| 26 | for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { | ||
| 27 | __asm__ __volatile__ (" flushda 0(%0)\n" | ||
| 28 | : /* Outputs */ | ||
| 29 | : /* Inputs */ "r"(addr) | ||
| 30 | /* : No clobber */); | ||
| 31 | } | ||
| 32 | } | ||
| 33 | |||
| 34 | static void __flush_dcache_all(unsigned long start, unsigned long end) | ||
| 35 | { | ||
| 36 | unsigned long addr; | ||
| 37 | |||
| 38 | start &= ~(cpuinfo.dcache_line_size - 1); | ||
| 39 | end += (cpuinfo.dcache_line_size - 1); | ||
| 40 | end &= ~(cpuinfo.dcache_line_size - 1); | ||
| 41 | |||
| 42 | if (end > start + cpuinfo.dcache_size) | 26 | if (end > start + cpuinfo.dcache_size) |
| 43 | end = start + cpuinfo.dcache_size; | 27 | end = start + cpuinfo.dcache_size; |
| 44 | 28 | ||
| @@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page) | |||
| 112 | 96 | ||
| 113 | void flush_cache_all(void) | 97 | void flush_cache_all(void) |
| 114 | { | 98 | { |
| 115 | __flush_dcache_all(0, cpuinfo.dcache_size); | 99 | __flush_dcache(0, cpuinfo.dcache_size); |
| 116 | __flush_icache(0, cpuinfo.icache_size); | 100 | __flush_icache(0, cpuinfo.icache_size); |
| 117 | } | 101 | } |
| 118 | 102 | ||
| @@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
| 182 | */ | 166 | */ |
| 183 | unsigned long start = (unsigned long)page_address(page); | 167 | unsigned long start = (unsigned long)page_address(page); |
| 184 | 168 | ||
| 185 | __flush_dcache_all(start, start + PAGE_SIZE); | 169 | __flush_dcache(start, start + PAGE_SIZE); |
| 186 | } | 170 | } |
| 187 | 171 | ||
| 188 | void flush_dcache_page(struct page *page) | 172 | void flush_dcache_page(struct page *page) |
| @@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
| 268 | { | 252 | { |
| 269 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); | 253 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); |
| 270 | memcpy(dst, src, len); | 254 | memcpy(dst, src, len); |
| 271 | __flush_dcache_all((unsigned long)src, (unsigned long)src + len); | 255 | __flush_dcache((unsigned long)src, (unsigned long)src + len); |
| 272 | if (vma->vm_flags & VM_EXEC) | 256 | if (vma->vm_flags & VM_EXEC) |
| 273 | __flush_icache((unsigned long)src, (unsigned long)src + len); | 257 | __flush_icache((unsigned long)src, (unsigned long)src + len); |
| 274 | } | 258 | } |
| @@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
| 279 | { | 263 | { |
| 280 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); | 264 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); |
| 281 | memcpy(dst, src, len); | 265 | memcpy(dst, src, len); |
| 282 | __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); | 266 | __flush_dcache((unsigned long)dst, (unsigned long)dst + len); |
| 283 | if (vma->vm_flags & VM_EXEC) | 267 | if (vma->vm_flags & VM_EXEC) |
| 284 | __flush_icache((unsigned long)dst, (unsigned long)dst + len); | 268 | __flush_icache((unsigned long)dst, (unsigned long)dst + len); |
| 285 | } | 269 | } |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c36546959e86..729f89163bc3 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -108,6 +108,9 @@ config PGTABLE_LEVELS | |||
| 108 | default 3 if 64BIT && PARISC_PAGE_SIZE_4KB | 108 | default 3 if 64BIT && PARISC_PAGE_SIZE_4KB |
| 109 | default 2 | 109 | default 2 |
| 110 | 110 | ||
| 111 | config SYS_SUPPORTS_HUGETLBFS | ||
| 112 | def_bool y if PA20 | ||
| 113 | |||
| 111 | source "init/Kconfig" | 114 | source "init/Kconfig" |
| 112 | 115 | ||
| 113 | source "kernel/Kconfig.freezer" | 116 | source "kernel/Kconfig.freezer" |
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h new file mode 100644 index 000000000000..7d56a9ccb752 --- /dev/null +++ b/arch/parisc/include/asm/hugetlb.h | |||
| @@ -0,0 +1,85 @@ | |||
| 1 | #ifndef _ASM_PARISC64_HUGETLB_H | ||
| 2 | #define _ASM_PARISC64_HUGETLB_H | ||
| 3 | |||
| 4 | #include <asm/page.h> | ||
| 5 | #include <asm-generic/hugetlb.h> | ||
| 6 | |||
| 7 | |||
| 8 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 9 | pte_t *ptep, pte_t pte); | ||
| 10 | |||
| 11 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
| 12 | pte_t *ptep); | ||
| 13 | |||
| 14 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
| 15 | unsigned long addr, | ||
| 16 | unsigned long len) { | ||
| 17 | return 0; | ||
| 18 | } | ||
| 19 | |||
| 20 | /* | ||
| 21 | * If the arch doesn't supply something else, assume that hugepage | ||
| 22 | * size aligned regions are ok without further preparation. | ||
| 23 | */ | ||
| 24 | static inline int prepare_hugepage_range(struct file *file, | ||
| 25 | unsigned long addr, unsigned long len) | ||
| 26 | { | ||
| 27 | if (len & ~HPAGE_MASK) | ||
| 28 | return -EINVAL; | ||
| 29 | if (addr & ~HPAGE_MASK) | ||
| 30 | return -EINVAL; | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
| 35 | unsigned long addr, unsigned long end, | ||
| 36 | unsigned long floor, | ||
| 37 | unsigned long ceiling) | ||
| 38 | { | ||
| 39 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
| 43 | unsigned long addr, pte_t *ptep) | ||
| 44 | { | ||
| 45 | } | ||
| 46 | |||
| 47 | static inline int huge_pte_none(pte_t pte) | ||
| 48 | { | ||
| 49 | return pte_none(pte); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
| 53 | { | ||
| 54 | return pte_wrprotect(pte); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
| 58 | unsigned long addr, pte_t *ptep) | ||
| 59 | { | ||
| 60 | pte_t old_pte = *ptep; | ||
| 61 | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
| 65 | unsigned long addr, pte_t *ptep, | ||
| 66 | pte_t pte, int dirty) | ||
| 67 | { | ||
| 68 | int changed = !pte_same(*ptep, pte); | ||
| 69 | if (changed) { | ||
| 70 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | ||
| 71 | flush_tlb_page(vma, addr); | ||
| 72 | } | ||
| 73 | return changed; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
| 77 | { | ||
| 78 | return *ptep; | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
| 82 | { | ||
| 83 | } | ||
| 84 | |||
| 85 | #endif /* _ASM_PARISC64_HUGETLB_H */ | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 60d5d174dfe4..80e742a1c162 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
| @@ -145,11 +145,22 @@ extern int npmem_ranges; | |||
| 145 | #endif /* CONFIG_DISCONTIGMEM */ | 145 | #endif /* CONFIG_DISCONTIGMEM */ |
| 146 | 146 | ||
| 147 | #ifdef CONFIG_HUGETLB_PAGE | 147 | #ifdef CONFIG_HUGETLB_PAGE |
| 148 | #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ | 148 | #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ |
| 149 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | 149 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) |
| 150 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 150 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 151 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 151 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 152 | |||
| 153 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 154 | # define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */ | ||
| 155 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M | ||
| 156 | #elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 157 | # define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */ | ||
| 158 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M | ||
| 159 | #else | ||
| 160 | # define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */ | ||
| 161 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M | ||
| 152 | #endif | 162 | #endif |
| 163 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
| 153 | 164 | ||
| 154 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 165 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 155 | 166 | ||
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 3edbb9fc91b4..f2fd327dce2e 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
| @@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
| 35 | PxD_FLAG_VALID | | 35 | PxD_FLAG_VALID | |
| 36 | PxD_FLAG_ATTACHED) | 36 | PxD_FLAG_ATTACHED) |
| 37 | + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); | 37 | + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); |
| 38 | /* The first pmd entry also is marked with _PAGE_GATEWAY as | 38 | /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as |
| 39 | * a signal that this pmd may not be freed */ | 39 | * a signal that this pmd may not be freed */ |
| 40 | __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); | 40 | __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); |
| 41 | #endif | 41 | #endif |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index f93c4a4e6580..d8534f95915a 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
| @@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 83 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) | 83 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) |
| 84 | 84 | ||
| 85 | /* This is the size of the initially mapped kernel memory */ | 85 | /* This is the size of the initially mapped kernel memory */ |
| 86 | #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ | 86 | #ifdef CONFIG_64BIT |
| 87 | #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ | ||
| 88 | #else | ||
| 89 | #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ | ||
| 90 | #endif | ||
| 87 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) | 91 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) |
| 88 | 92 | ||
| 89 | #if CONFIG_PGTABLE_LEVELS == 3 | 93 | #if CONFIG_PGTABLE_LEVELS == 3 |
| @@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 167 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ | 171 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ |
| 168 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ | 172 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ |
| 169 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ | 173 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ |
| 170 | /* bit 21 was formerly the FLUSH bit but is now unused */ | 174 | #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ |
| 171 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ | 175 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ |
| 172 | 176 | ||
| 173 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ | 177 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ |
| @@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 194 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) | 198 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) |
| 195 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) | 199 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) |
| 196 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) | 200 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) |
| 201 | #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) | ||
| 197 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) | 202 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) |
| 198 | 203 | ||
| 199 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | 204 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) |
| @@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 217 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) | 222 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) |
| 218 | #define PxD_FLAG_MASK (0xf) | 223 | #define PxD_FLAG_MASK (0xf) |
| 219 | #define PxD_FLAG_SHIFT (4) | 224 | #define PxD_FLAG_SHIFT (4) |
| 220 | #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ | 225 | #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) |
| 221 | 226 | ||
| 222 | #ifndef __ASSEMBLY__ | 227 | #ifndef __ASSEMBLY__ |
| 223 | 228 | ||
| @@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return | |||
| 363 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 368 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
| 364 | 369 | ||
| 365 | /* | 370 | /* |
| 371 | * Huge pte definitions. | ||
| 372 | */ | ||
| 373 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 374 | #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) | ||
| 375 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE)) | ||
| 376 | #else | ||
| 377 | #define pte_huge(pte) (0) | ||
| 378 | #define pte_mkhuge(pte) (pte) | ||
| 379 | #endif | ||
| 380 | |||
| 381 | |||
| 382 | /* | ||
| 366 | * Conversion functions: convert a page and protection to a page entry, | 383 | * Conversion functions: convert a page and protection to a page entry, |
| 367 | * and a page entry and page directory to the page they refer to. | 384 | * and a page entry and page directory to the page they refer to. |
| 368 | */ | 385 | */ |
| @@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 410 | /* Find an entry in the second-level page table.. */ | 427 | /* Find an entry in the second-level page table.. */ |
| 411 | 428 | ||
| 412 | #if CONFIG_PGTABLE_LEVELS == 3 | 429 | #if CONFIG_PGTABLE_LEVELS == 3 |
| 430 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | ||
| 413 | #define pmd_offset(dir,address) \ | 431 | #define pmd_offset(dir,address) \ |
| 414 | ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) | 432 | ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) |
| 415 | #else | 433 | #else |
| 416 | #define pmd_offset(dir,addr) ((pmd_t *) dir) | 434 | #define pmd_offset(dir,addr) ((pmd_t *) dir) |
| 417 | #endif | 435 | #endif |
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 54adb60c0a42..7e759ecb1343 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
| @@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack); | |||
| 192 | */ | 192 | */ |
| 193 | typedef unsigned int elf_caddr_t; | 193 | typedef unsigned int elf_caddr_t; |
| 194 | 194 | ||
| 195 | #define start_thread_som(regs, new_pc, new_sp) do { \ | ||
| 196 | unsigned long *sp = (unsigned long *)new_sp; \ | ||
| 197 | __u32 spaceid = (__u32)current->mm->context; \ | ||
| 198 | unsigned long pc = (unsigned long)new_pc; \ | ||
| 199 | /* offset pc for priv. level */ \ | ||
| 200 | pc |= 3; \ | ||
| 201 | \ | ||
| 202 | regs->iasq[0] = spaceid; \ | ||
| 203 | regs->iasq[1] = spaceid; \ | ||
| 204 | regs->iaoq[0] = pc; \ | ||
| 205 | regs->iaoq[1] = pc + 4; \ | ||
| 206 | regs->sr[2] = LINUX_GATEWAY_SPACE; \ | ||
| 207 | regs->sr[3] = 0xffff; \ | ||
| 208 | regs->sr[4] = spaceid; \ | ||
| 209 | regs->sr[5] = spaceid; \ | ||
| 210 | regs->sr[6] = spaceid; \ | ||
| 211 | regs->sr[7] = spaceid; \ | ||
| 212 | regs->gr[ 0] = USER_PSW; \ | ||
| 213 | regs->gr[30] = ((new_sp)+63)&~63; \ | ||
| 214 | regs->gr[31] = pc; \ | ||
| 215 | \ | ||
| 216 | get_user(regs->gr[26],&sp[0]); \ | ||
| 217 | get_user(regs->gr[25],&sp[-1]); \ | ||
| 218 | get_user(regs->gr[24],&sp[-2]); \ | ||
| 219 | get_user(regs->gr[23],&sp[-3]); \ | ||
| 220 | } while(0) | ||
| 221 | |||
| 222 | /* The ELF abi wants things done a "wee bit" differently than | 195 | /* The ELF abi wants things done a "wee bit" differently than |
| 223 | * som does. Supporting this behavior here avoids | 196 | * som does. Supporting this behavior here avoids |
| 224 | * having our own version of create_elf_tables. | 197 | * having our own version of create_elf_tables. |
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index ecc3ae1ca28e..dd4d1876a020 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h | |||
| @@ -49,16 +49,6 @@ | |||
| 49 | #define MADV_DONTFORK 10 /* don't inherit across fork */ | 49 | #define MADV_DONTFORK 10 /* don't inherit across fork */ |
| 50 | #define MADV_DOFORK 11 /* do inherit across fork */ | 50 | #define MADV_DOFORK 11 /* do inherit across fork */ |
| 51 | 51 | ||
| 52 | /* The range 12-64 is reserved for page size specification. */ | ||
| 53 | #define MADV_4K_PAGES 12 /* Use 4K pages */ | ||
| 54 | #define MADV_16K_PAGES 14 /* Use 16K pages */ | ||
| 55 | #define MADV_64K_PAGES 16 /* Use 64K pages */ | ||
| 56 | #define MADV_256K_PAGES 18 /* Use 256K pages */ | ||
| 57 | #define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ | ||
| 58 | #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ | ||
| 59 | #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ | ||
| 60 | #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ | ||
| 61 | |||
| 62 | #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ | 52 | #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ |
| 63 | #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ | 53 | #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ |
| 64 | 54 | ||
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 59001cea13f9..d2f62570a7b1 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
| @@ -290,6 +290,14 @@ int main(void) | |||
| 290 | DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); | 290 | DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); |
| 291 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); | 291 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); |
| 292 | BLANK(); | 292 | BLANK(); |
| 293 | /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text | ||
| 294 | * and kernel data on physical huge pages */ | ||
| 295 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 296 | DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); | ||
| 297 | #else | ||
| 298 | DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); | ||
| 299 | #endif | ||
| 300 | BLANK(); | ||
| 293 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); | 301 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); |
| 294 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); | 302 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); |
| 295 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); | 303 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index c5ef4081b01d..623496c11756 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -502,21 +502,38 @@ | |||
| 502 | STREG \pte,0(\ptp) | 502 | STREG \pte,0(\ptp) |
| 503 | .endm | 503 | .endm |
| 504 | 504 | ||
| 505 | /* We have (depending on the page size): | ||
| 506 | * - 38 to 52-bit Physical Page Number | ||
| 507 | * - 12 to 26-bit page offset | ||
| 508 | */ | ||
| 505 | /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) | 509 | /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) |
| 506 | * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ | 510 | * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ |
| 507 | #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) | 511 | #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) |
| 512 | #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) | ||
| 508 | 513 | ||
| 509 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ | 514 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
| 510 | .macro convert_for_tlb_insert20 pte | 515 | .macro convert_for_tlb_insert20 pte,tmp |
| 516 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 517 | copy \pte,\tmp | ||
| 518 | extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ | ||
| 519 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte | ||
| 520 | |||
| 521 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ | ||
| 522 | (63-58)+PAGE_ADD_SHIFT,\pte | ||
| 523 | extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 | ||
| 524 | depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ | ||
| 525 | (63-58)+PAGE_ADD_HUGE_SHIFT,\pte | ||
| 526 | #else /* Huge pages disabled */ | ||
| 511 | extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ | 527 | extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ |
| 512 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte | 528 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte |
| 513 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ | 529 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ |
| 514 | (63-58)+PAGE_ADD_SHIFT,\pte | 530 | (63-58)+PAGE_ADD_SHIFT,\pte |
| 531 | #endif | ||
| 515 | .endm | 532 | .endm |
| 516 | 533 | ||
| 517 | /* Convert the pte and prot to tlb insertion values. How | 534 | /* Convert the pte and prot to tlb insertion values. How |
| 518 | * this happens is quite subtle, read below */ | 535 | * this happens is quite subtle, read below */ |
| 519 | .macro make_insert_tlb spc,pte,prot | 536 | .macro make_insert_tlb spc,pte,prot,tmp |
| 520 | space_to_prot \spc \prot /* create prot id from space */ | 537 | space_to_prot \spc \prot /* create prot id from space */ |
| 521 | /* The following is the real subtlety. This is depositing | 538 | /* The following is the real subtlety. This is depositing |
| 522 | * T <-> _PAGE_REFTRAP | 539 | * T <-> _PAGE_REFTRAP |
| @@ -553,7 +570,7 @@ | |||
| 553 | depdi 1,12,1,\prot | 570 | depdi 1,12,1,\prot |
| 554 | 571 | ||
| 555 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ | 572 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
| 556 | convert_for_tlb_insert20 \pte | 573 | convert_for_tlb_insert20 \pte \tmp |
| 557 | .endm | 574 | .endm |
| 558 | 575 | ||
| 559 | /* Identical macro to make_insert_tlb above, except it | 576 | /* Identical macro to make_insert_tlb above, except it |
| @@ -646,17 +663,12 @@ | |||
| 646 | 663 | ||
| 647 | 664 | ||
| 648 | /* | 665 | /* |
| 649 | * Align fault_vector_20 on 4K boundary so that both | 666 | * Fault_vectors are architecturally required to be aligned on a 2K |
| 650 | * fault_vector_11 and fault_vector_20 are on the | 667 | * boundary |
| 651 | * same page. This is only necessary as long as we | ||
| 652 | * write protect the kernel text, which we may stop | ||
| 653 | * doing once we use large page translations to cover | ||
| 654 | * the static part of the kernel address space. | ||
| 655 | */ | 668 | */ |
| 656 | 669 | ||
| 657 | .text | 670 | .text |
| 658 | 671 | .align 2048 | |
| 659 | .align 4096 | ||
| 660 | 672 | ||
| 661 | ENTRY(fault_vector_20) | 673 | ENTRY(fault_vector_20) |
| 662 | /* First vector is invalid (0) */ | 674 | /* First vector is invalid (0) */ |
| @@ -1147,7 +1159,7 @@ dtlb_miss_20w: | |||
| 1147 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w | 1159 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w |
| 1148 | update_accessed ptp,pte,t0,t1 | 1160 | update_accessed ptp,pte,t0,t1 |
| 1149 | 1161 | ||
| 1150 | make_insert_tlb spc,pte,prot | 1162 | make_insert_tlb spc,pte,prot,t1 |
| 1151 | 1163 | ||
| 1152 | idtlbt pte,prot | 1164 | idtlbt pte,prot |
| 1153 | 1165 | ||
| @@ -1173,7 +1185,7 @@ nadtlb_miss_20w: | |||
| 1173 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w | 1185 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w |
| 1174 | update_accessed ptp,pte,t0,t1 | 1186 | update_accessed ptp,pte,t0,t1 |
| 1175 | 1187 | ||
| 1176 | make_insert_tlb spc,pte,prot | 1188 | make_insert_tlb spc,pte,prot,t1 |
| 1177 | 1189 | ||
| 1178 | idtlbt pte,prot | 1190 | idtlbt pte,prot |
| 1179 | 1191 | ||
| @@ -1267,7 +1279,7 @@ dtlb_miss_20: | |||
| 1267 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 | 1279 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 |
| 1268 | update_accessed ptp,pte,t0,t1 | 1280 | update_accessed ptp,pte,t0,t1 |
| 1269 | 1281 | ||
| 1270 | make_insert_tlb spc,pte,prot | 1282 | make_insert_tlb spc,pte,prot,t1 |
| 1271 | 1283 | ||
| 1272 | f_extend pte,t1 | 1284 | f_extend pte,t1 |
| 1273 | 1285 | ||
| @@ -1295,7 +1307,7 @@ nadtlb_miss_20: | |||
| 1295 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 | 1307 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 |
| 1296 | update_accessed ptp,pte,t0,t1 | 1308 | update_accessed ptp,pte,t0,t1 |
| 1297 | 1309 | ||
| 1298 | make_insert_tlb spc,pte,prot | 1310 | make_insert_tlb spc,pte,prot,t1 |
| 1299 | 1311 | ||
| 1300 | f_extend pte,t1 | 1312 | f_extend pte,t1 |
| 1301 | 1313 | ||
| @@ -1404,7 +1416,7 @@ itlb_miss_20w: | |||
| 1404 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault | 1416 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault |
| 1405 | update_accessed ptp,pte,t0,t1 | 1417 | update_accessed ptp,pte,t0,t1 |
| 1406 | 1418 | ||
| 1407 | make_insert_tlb spc,pte,prot | 1419 | make_insert_tlb spc,pte,prot,t1 |
| 1408 | 1420 | ||
| 1409 | iitlbt pte,prot | 1421 | iitlbt pte,prot |
| 1410 | 1422 | ||
| @@ -1428,7 +1440,7 @@ naitlb_miss_20w: | |||
| 1428 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w | 1440 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w |
| 1429 | update_accessed ptp,pte,t0,t1 | 1441 | update_accessed ptp,pte,t0,t1 |
| 1430 | 1442 | ||
| 1431 | make_insert_tlb spc,pte,prot | 1443 | make_insert_tlb spc,pte,prot,t1 |
| 1432 | 1444 | ||
| 1433 | iitlbt pte,prot | 1445 | iitlbt pte,prot |
| 1434 | 1446 | ||
| @@ -1514,7 +1526,7 @@ itlb_miss_20: | |||
| 1514 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault | 1526 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault |
| 1515 | update_accessed ptp,pte,t0,t1 | 1527 | update_accessed ptp,pte,t0,t1 |
| 1516 | 1528 | ||
| 1517 | make_insert_tlb spc,pte,prot | 1529 | make_insert_tlb spc,pte,prot,t1 |
| 1518 | 1530 | ||
| 1519 | f_extend pte,t1 | 1531 | f_extend pte,t1 |
| 1520 | 1532 | ||
| @@ -1534,7 +1546,7 @@ naitlb_miss_20: | |||
| 1534 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 | 1546 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 |
| 1535 | update_accessed ptp,pte,t0,t1 | 1547 | update_accessed ptp,pte,t0,t1 |
| 1536 | 1548 | ||
| 1537 | make_insert_tlb spc,pte,prot | 1549 | make_insert_tlb spc,pte,prot,t1 |
| 1538 | 1550 | ||
| 1539 | f_extend pte,t1 | 1551 | f_extend pte,t1 |
| 1540 | 1552 | ||
| @@ -1566,7 +1578,7 @@ dbit_trap_20w: | |||
| 1566 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault | 1578 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault |
| 1567 | update_dirty ptp,pte,t1 | 1579 | update_dirty ptp,pte,t1 |
| 1568 | 1580 | ||
| 1569 | make_insert_tlb spc,pte,prot | 1581 | make_insert_tlb spc,pte,prot,t1 |
| 1570 | 1582 | ||
| 1571 | idtlbt pte,prot | 1583 | idtlbt pte,prot |
| 1572 | 1584 | ||
| @@ -1610,7 +1622,7 @@ dbit_trap_20: | |||
| 1610 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault | 1622 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault |
| 1611 | update_dirty ptp,pte,t1 | 1623 | update_dirty ptp,pte,t1 |
| 1612 | 1624 | ||
| 1613 | make_insert_tlb spc,pte,prot | 1625 | make_insert_tlb spc,pte,prot,t1 |
| 1614 | 1626 | ||
| 1615 | f_extend pte,t1 | 1627 | f_extend pte,t1 |
| 1616 | 1628 | ||
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index e7d64527aff9..75aa0db9f69e 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
| @@ -69,7 +69,7 @@ $bss_loop: | |||
| 69 | stw,ma %arg2,4(%r1) | 69 | stw,ma %arg2,4(%r1) |
| 70 | stw,ma %arg3,4(%r1) | 70 | stw,ma %arg3,4(%r1) |
| 71 | 71 | ||
| 72 | /* Initialize startup VM. Just map first 8/16 MB of memory */ | 72 | /* Initialize startup VM. Just map first 16/32 MB of memory */ |
| 73 | load32 PA(swapper_pg_dir),%r4 | 73 | load32 PA(swapper_pg_dir),%r4 |
| 74 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ | 74 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ |
| 75 | mtctl %r4,%cr25 /* Initialize user root pointer */ | 75 | mtctl %r4,%cr25 /* Initialize user root pointer */ |
| @@ -107,7 +107,7 @@ $bss_loop: | |||
| 107 | /* Now initialize the PTEs themselves. We use RWX for | 107 | /* Now initialize the PTEs themselves. We use RWX for |
| 108 | * everything ... it will get remapped correctly later */ | 108 | * everything ... it will get remapped correctly later */ |
| 109 | ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ | 109 | ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ |
| 110 | ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ | 110 | load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ |
| 111 | load32 PA(pg0),%r1 | 111 | load32 PA(pg0),%r1 |
| 112 | 112 | ||
| 113 | $pgt_fill_loop: | 113 | $pgt_fill_loop: |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 72a3c658ad7b..f7ea626e29c9 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
| @@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p) | |||
| 130 | printk(KERN_INFO "The 32-bit Kernel has started...\n"); | 130 | printk(KERN_INFO "The 32-bit Kernel has started...\n"); |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); | 133 | printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ", |
| 134 | (int)(PAGE_SIZE / 1024)); | ||
| 135 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 136 | printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size", | ||
| 137 | 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20)); | ||
| 138 | #else | ||
| 139 | printk(KERN_CONT "disabled"); | ||
| 140 | #endif | ||
| 141 | printk(KERN_CONT ".\n"); | ||
| 142 | |||
| 134 | 143 | ||
| 135 | pdc_console_init(); | 144 | pdc_console_init(); |
| 136 | 145 | ||
| @@ -377,6 +386,7 @@ arch_initcall(parisc_init); | |||
| 377 | void start_parisc(void) | 386 | void start_parisc(void) |
| 378 | { | 387 | { |
| 379 | extern void start_kernel(void); | 388 | extern void start_kernel(void); |
| 389 | extern void early_trap_init(void); | ||
| 380 | 390 | ||
| 381 | int ret, cpunum; | 391 | int ret, cpunum; |
| 382 | struct pdc_coproc_cfg coproc_cfg; | 392 | struct pdc_coproc_cfg coproc_cfg; |
| @@ -397,6 +407,8 @@ void start_parisc(void) | |||
| 397 | panic("must have an fpu to boot linux"); | 407 | panic("must have an fpu to boot linux"); |
| 398 | } | 408 | } |
| 399 | 409 | ||
| 410 | early_trap_init(); /* initialize checksum of fault_vector */ | ||
| 411 | |||
| 400 | start_kernel(); | 412 | start_kernel(); |
| 401 | // not reached | 413 | // not reached |
| 402 | } | 414 | } |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 0b8d26d3ba43..3fbd7252a4b2 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -369,7 +369,7 @@ tracesys_exit: | |||
| 369 | ldo -16(%r30),%r29 /* Reference param save area */ | 369 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 370 | #endif | 370 | #endif |
| 371 | ldo TASK_REGS(%r1),%r26 | 371 | ldo TASK_REGS(%r1),%r26 |
| 372 | bl do_syscall_trace_exit,%r2 | 372 | BL do_syscall_trace_exit,%r2 |
| 373 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ | 373 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
| 374 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 374 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 375 | LDREG TI_TASK(%r1), %r1 | 375 | LDREG TI_TASK(%r1), %r1 |
| @@ -390,7 +390,7 @@ tracesys_sigexit: | |||
| 390 | #ifdef CONFIG_64BIT | 390 | #ifdef CONFIG_64BIT |
| 391 | ldo -16(%r30),%r29 /* Reference param save area */ | 391 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 392 | #endif | 392 | #endif |
| 393 | bl do_syscall_trace_exit,%r2 | 393 | BL do_syscall_trace_exit,%r2 |
| 394 | ldo TASK_REGS(%r1),%r26 | 394 | ldo TASK_REGS(%r1),%r26 |
| 395 | 395 | ||
| 396 | ldil L%syscall_exit_rfi,%r1 | 396 | ldil L%syscall_exit_rfi,%r1 |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index b99b39f1da02..553b09855cfd 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
| @@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) | |||
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | 809 | ||
| 810 | int __init check_ivt(void *iva) | 810 | void __init initialize_ivt(const void *iva) |
| 811 | { | 811 | { |
| 812 | extern u32 os_hpmc_size; | 812 | extern u32 os_hpmc_size; |
| 813 | extern const u32 os_hpmc[]; | 813 | extern const u32 os_hpmc[]; |
| @@ -818,8 +818,8 @@ int __init check_ivt(void *iva) | |||
| 818 | u32 *hpmcp; | 818 | u32 *hpmcp; |
| 819 | u32 length; | 819 | u32 length; |
| 820 | 820 | ||
| 821 | if (strcmp((char *)iva, "cows can fly")) | 821 | if (strcmp((const char *)iva, "cows can fly")) |
| 822 | return -1; | 822 | panic("IVT invalid"); |
| 823 | 823 | ||
| 824 | ivap = (u32 *)iva; | 824 | ivap = (u32 *)iva; |
| 825 | 825 | ||
| @@ -839,28 +839,23 @@ int __init check_ivt(void *iva) | |||
| 839 | check += ivap[i]; | 839 | check += ivap[i]; |
| 840 | 840 | ||
| 841 | ivap[5] = -check; | 841 | ivap[5] = -check; |
| 842 | |||
| 843 | return 0; | ||
| 844 | } | 842 | } |
| 845 | 843 | ||
| 846 | #ifndef CONFIG_64BIT | ||
| 847 | extern const void fault_vector_11; | ||
| 848 | #endif | ||
| 849 | extern const void fault_vector_20; | ||
| 850 | 844 | ||
| 851 | void __init trap_init(void) | 845 | /* early_trap_init() is called before we set up kernel mappings and |
| 846 | * write-protect the kernel */ | ||
| 847 | void __init early_trap_init(void) | ||
| 852 | { | 848 | { |
| 853 | void *iva; | 849 | extern const void fault_vector_20; |
| 854 | 850 | ||
| 855 | if (boot_cpu_data.cpu_type >= pcxu) | 851 | #ifndef CONFIG_64BIT |
| 856 | iva = (void *) &fault_vector_20; | 852 | extern const void fault_vector_11; |
| 857 | else | 853 | initialize_ivt(&fault_vector_11); |
| 858 | #ifdef CONFIG_64BIT | ||
| 859 | panic("Can't boot 64-bit OS on PA1.1 processor!"); | ||
| 860 | #else | ||
| 861 | iva = (void *) &fault_vector_11; | ||
| 862 | #endif | 854 | #endif |
| 863 | 855 | ||
| 864 | if (check_ivt(iva)) | 856 | initialize_ivt(&fault_vector_20); |
| 865 | panic("IVT invalid"); | 857 | } |
| 858 | |||
| 859 | void __init trap_init(void) | ||
| 860 | { | ||
| 866 | } | 861 | } |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 0dacc5ca555a..308f29081d46 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
| @@ -60,7 +60,7 @@ SECTIONS | |||
| 60 | EXIT_DATA | 60 | EXIT_DATA |
| 61 | } | 61 | } |
| 62 | PERCPU_SECTION(8) | 62 | PERCPU_SECTION(8) |
| 63 | . = ALIGN(PAGE_SIZE); | 63 | . = ALIGN(HUGEPAGE_SIZE); |
| 64 | __init_end = .; | 64 | __init_end = .; |
| 65 | /* freed after init ends here */ | 65 | /* freed after init ends here */ |
| 66 | 66 | ||
| @@ -116,7 +116,7 @@ SECTIONS | |||
| 116 | * that we can properly leave these | 116 | * that we can properly leave these |
| 117 | * as writable | 117 | * as writable |
| 118 | */ | 118 | */ |
| 119 | . = ALIGN(PAGE_SIZE); | 119 | . = ALIGN(HUGEPAGE_SIZE); |
| 120 | data_start = .; | 120 | data_start = .; |
| 121 | 121 | ||
| 122 | EXCEPTION_TABLE(8) | 122 | EXCEPTION_TABLE(8) |
| @@ -135,8 +135,11 @@ SECTIONS | |||
| 135 | _edata = .; | 135 | _edata = .; |
| 136 | 136 | ||
| 137 | /* BSS */ | 137 | /* BSS */ |
| 138 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) | 138 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) |
| 139 | |||
| 140 | /* bootmap is allocated in setup_bootmem() directly behind bss. */ | ||
| 139 | 141 | ||
| 142 | . = ALIGN(HUGEPAGE_SIZE); | ||
| 140 | _end = . ; | 143 | _end = . ; |
| 141 | 144 | ||
| 142 | STABS_DEBUG | 145 | STABS_DEBUG |
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile index 758ceefb373a..134393de69d2 100644 --- a/arch/parisc/mm/Makefile +++ b/arch/parisc/mm/Makefile | |||
| @@ -3,3 +3,4 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := init.o fault.o ioremap.o | 5 | obj-y := init.o fault.o ioremap.o |
| 6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c new file mode 100644 index 000000000000..f6fdc77a72bd --- /dev/null +++ b/arch/parisc/mm/hugetlbpage.c | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * PARISC64 Huge TLB page support. | ||
| 3 | * | ||
| 4 | * This parisc implementation is heavily based on the SPARC and x86 code. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2015 Helge Deller <deller@gmx.de> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/fs.h> | ||
| 10 | #include <linux/mm.h> | ||
| 11 | #include <linux/hugetlb.h> | ||
| 12 | #include <linux/pagemap.h> | ||
| 13 | #include <linux/sysctl.h> | ||
| 14 | |||
| 15 | #include <asm/mman.h> | ||
| 16 | #include <asm/pgalloc.h> | ||
| 17 | #include <asm/tlb.h> | ||
| 18 | #include <asm/tlbflush.h> | ||
| 19 | #include <asm/cacheflush.h> | ||
| 20 | #include <asm/mmu_context.h> | ||
| 21 | |||
| 22 | |||
| 23 | unsigned long | ||
| 24 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
| 25 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
| 26 | { | ||
| 27 | struct hstate *h = hstate_file(file); | ||
| 28 | |||
| 29 | if (len & ~huge_page_mask(h)) | ||
| 30 | return -EINVAL; | ||
| 31 | if (len > TASK_SIZE) | ||
| 32 | return -ENOMEM; | ||
| 33 | |||
| 34 | if (flags & MAP_FIXED) | ||
| 35 | if (prepare_hugepage_range(file, addr, len)) | ||
| 36 | return -EINVAL; | ||
| 37 | |||
| 38 | if (addr) | ||
| 39 | addr = ALIGN(addr, huge_page_size(h)); | ||
| 40 | |||
| 41 | /* we need to make sure the colouring is OK */ | ||
| 42 | return arch_get_unmapped_area(file, addr, len, pgoff, flags); | ||
| 43 | } | ||
| 44 | |||
| 45 | |||
| 46 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
| 47 | unsigned long addr, unsigned long sz) | ||
| 48 | { | ||
| 49 | pgd_t *pgd; | ||
| 50 | pud_t *pud; | ||
| 51 | pmd_t *pmd; | ||
| 52 | pte_t *pte = NULL; | ||
| 53 | |||
| 54 | /* We must align the address, because our caller will run | ||
| 55 | * set_huge_pte_at() on whatever we return, which writes out | ||
| 56 | * all of the sub-ptes for the hugepage range. So we have | ||
| 57 | * to give it the first such sub-pte. | ||
| 58 | */ | ||
| 59 | addr &= HPAGE_MASK; | ||
| 60 | |||
| 61 | pgd = pgd_offset(mm, addr); | ||
| 62 | pud = pud_alloc(mm, pgd, addr); | ||
| 63 | if (pud) { | ||
| 64 | pmd = pmd_alloc(mm, pud, addr); | ||
| 65 | if (pmd) | ||
| 66 | pte = pte_alloc_map(mm, NULL, pmd, addr); | ||
| 67 | } | ||
| 68 | return pte; | ||
| 69 | } | ||
| 70 | |||
| 71 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
| 72 | { | ||
| 73 | pgd_t *pgd; | ||
| 74 | pud_t *pud; | ||
| 75 | pmd_t *pmd; | ||
| 76 | pte_t *pte = NULL; | ||
| 77 | |||
| 78 | addr &= HPAGE_MASK; | ||
| 79 | |||
| 80 | pgd = pgd_offset(mm, addr); | ||
| 81 | if (!pgd_none(*pgd)) { | ||
| 82 | pud = pud_offset(pgd, addr); | ||
| 83 | if (!pud_none(*pud)) { | ||
| 84 | pmd = pmd_offset(pud, addr); | ||
| 85 | if (!pmd_none(*pmd)) | ||
| 86 | pte = pte_offset_map(pmd, addr); | ||
| 87 | } | ||
| 88 | } | ||
| 89 | return pte; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Purge data and instruction TLB entries. Must be called holding | ||
| 93 | * the pa_tlb_lock. The TLB purge instructions are slow on SMP | ||
| 94 | * machines since the purge must be broadcast to all CPUs. | ||
| 95 | */ | ||
| 96 | static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) | ||
| 97 | { | ||
| 98 | int i; | ||
| 99 | |||
| 100 | /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate | ||
| 101 | * Linux standard huge pages (e.g. 2 MB) */ | ||
| 102 | BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); | ||
| 103 | |||
| 104 | addr &= HPAGE_MASK; | ||
| 105 | addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; | ||
| 106 | |||
| 107 | for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { | ||
| 108 | mtsp(mm->context, 1); | ||
| 109 | pdtlb(addr); | ||
| 110 | if (unlikely(split_tlb)) | ||
| 111 | pitlb(addr); | ||
| 112 | addr += (1UL << REAL_HPAGE_SHIFT); | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 117 | pte_t *ptep, pte_t entry) | ||
| 118 | { | ||
| 119 | unsigned long addr_start; | ||
| 120 | int i; | ||
| 121 | |||
| 122 | addr &= HPAGE_MASK; | ||
| 123 | addr_start = addr; | ||
| 124 | |||
| 125 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
| 126 | /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry) | ||
| 127 | * instead, but then we get double locking on pa_tlb_lock. */ | ||
| 128 | *ptep = entry; | ||
| 129 | ptep++; | ||
| 130 | |||
| 131 | /* Drop the PAGE_SIZE/non-huge tlb entry */ | ||
| 132 | purge_tlb_entries(mm, addr); | ||
| 133 | |||
| 134 | addr += PAGE_SIZE; | ||
| 135 | pte_val(entry) += PAGE_SIZE; | ||
| 136 | } | ||
| 137 | |||
| 138 | purge_tlb_entries_huge(mm, addr_start); | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
| 143 | pte_t *ptep) | ||
| 144 | { | ||
| 145 | pte_t entry; | ||
| 146 | |||
| 147 | entry = *ptep; | ||
| 148 | set_huge_pte_at(mm, addr, ptep, __pte(0)); | ||
| 149 | |||
| 150 | return entry; | ||
| 151 | } | ||
| 152 | |||
| 153 | int pmd_huge(pmd_t pmd) | ||
| 154 | { | ||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | int pud_huge(pud_t pud) | ||
| 159 | { | ||
| 160 | return 0; | ||
| 161 | } | ||
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index c5fec4890fdf..1b366c477687 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr, | |||
| 409 | unsigned long vaddr; | 409 | unsigned long vaddr; |
| 410 | unsigned long ro_start; | 410 | unsigned long ro_start; |
| 411 | unsigned long ro_end; | 411 | unsigned long ro_end; |
| 412 | unsigned long fv_addr; | 412 | unsigned long kernel_end; |
| 413 | unsigned long gw_addr; | ||
| 414 | extern const unsigned long fault_vector_20; | ||
| 415 | extern void * const linux_gateway_page; | ||
| 416 | 413 | ||
| 417 | ro_start = __pa((unsigned long)_text); | 414 | ro_start = __pa((unsigned long)_text); |
| 418 | ro_end = __pa((unsigned long)&data_start); | 415 | ro_end = __pa((unsigned long)&data_start); |
| 419 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | 416 | kernel_end = __pa((unsigned long)&_end); |
| 420 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
| 421 | 417 | ||
| 422 | end_paddr = start_paddr + size; | 418 | end_paddr = start_paddr + size; |
| 423 | 419 | ||
| @@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr, | |||
| 475 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | 471 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { |
| 476 | pte_t pte; | 472 | pte_t pte; |
| 477 | 473 | ||
| 478 | /* | ||
| 479 | * Map the fault vector writable so we can | ||
| 480 | * write the HPMC checksum. | ||
| 481 | */ | ||
| 482 | if (force) | 474 | if (force) |
| 483 | pte = __mk_pte(address, pgprot); | 475 | pte = __mk_pte(address, pgprot); |
| 484 | else if (parisc_text_address(vaddr) && | 476 | else if (parisc_text_address(vaddr)) { |
| 485 | address != fv_addr) | ||
| 486 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | 477 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
| 478 | if (address >= ro_start && address < kernel_end) | ||
| 479 | pte = pte_mkhuge(pte); | ||
| 480 | } | ||
| 487 | else | 481 | else |
| 488 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | 482 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) |
| 489 | if (address >= ro_start && address < ro_end | 483 | if (address >= ro_start && address < ro_end) { |
| 490 | && address != fv_addr | 484 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
| 491 | && address != gw_addr) | 485 | pte = pte_mkhuge(pte); |
| 492 | pte = __mk_pte(address, PAGE_KERNEL_RO); | 486 | } else |
| 493 | else | ||
| 494 | #endif | 487 | #endif |
| 488 | { | ||
| 495 | pte = __mk_pte(address, pgprot); | 489 | pte = __mk_pte(address, pgprot); |
| 490 | if (address >= ro_start && address < kernel_end) | ||
| 491 | pte = pte_mkhuge(pte); | ||
| 492 | } | ||
| 496 | 493 | ||
| 497 | if (address >= end_paddr) { | 494 | if (address >= end_paddr) { |
| 498 | if (force) | 495 | if (force) |
| @@ -536,15 +533,12 @@ void free_initmem(void) | |||
| 536 | 533 | ||
| 537 | /* force the kernel to see the new TLB entries */ | 534 | /* force the kernel to see the new TLB entries */ |
| 538 | __flush_tlb_range(0, init_begin, init_end); | 535 | __flush_tlb_range(0, init_begin, init_end); |
| 539 | /* Attempt to catch anyone trying to execute code here | 536 | |
| 540 | * by filling the page with BRK insns. | ||
| 541 | */ | ||
| 542 | memset((void *)init_begin, 0x00, init_end - init_begin); | ||
| 543 | /* finally dump all the instructions which were cached, since the | 537 | /* finally dump all the instructions which were cached, since the |
| 544 | * pages are no-longer executable */ | 538 | * pages are no-longer executable */ |
| 545 | flush_icache_range(init_begin, init_end); | 539 | flush_icache_range(init_begin, init_end); |
| 546 | 540 | ||
| 547 | free_initmem_default(-1); | 541 | free_initmem_default(POISON_FREE_INITMEM); |
| 548 | 542 | ||
| 549 | /* set up a new led state on systems shipped LED State panel */ | 543 | /* set up a new led state on systems shipped LED State panel */ |
| 550 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | 544 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
| @@ -728,8 +722,8 @@ static void __init pagetable_init(void) | |||
| 728 | unsigned long size; | 722 | unsigned long size; |
| 729 | 723 | ||
| 730 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | 724 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; |
| 731 | end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); | ||
| 732 | size = pmem_ranges[range].pages << PAGE_SHIFT; | 725 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
| 726 | end_paddr = start_paddr + size; | ||
| 733 | 727 | ||
| 734 | map_pages((unsigned long)__va(start_paddr), start_paddr, | 728 | map_pages((unsigned long)__va(start_paddr), start_paddr, |
| 735 | size, PAGE_KERNEL, 0); | 729 | size, PAGE_KERNEL, 0); |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a908ada8e0a5..2220f7a60def 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -108,6 +108,7 @@ | |||
| 108 | #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ | 108 | #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ |
| 109 | #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ | 109 | #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ |
| 110 | #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ | 110 | #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ |
| 111 | #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ | ||
| 111 | #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) | 112 | #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) |
| 112 | #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) | 113 | #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) |
| 113 | 114 | ||
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index c9e26cb264f4..f2b0b1b0c72a 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -382,3 +382,4 @@ COMPAT_SYS(shmat) | |||
| 382 | SYSCALL(shmdt) | 382 | SYSCALL(shmdt) |
| 383 | SYSCALL(shmget) | 383 | SYSCALL(shmget) |
| 384 | COMPAT_SYS(shmctl) | 384 | COMPAT_SYS(shmctl) |
| 385 | SYSCALL(mlock2) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6d8f8023ac27..4b6b8ace18e0 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | #define __NR_syscalls 378 | 15 | #define __NR_syscalls 379 |
| 16 | 16 | ||
| 17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
| 18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 81579e93c659..1effea5193d6 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
| @@ -400,5 +400,6 @@ | |||
| 400 | #define __NR_shmdt 375 | 400 | #define __NR_shmdt 375 |
| 401 | #define __NR_shmget 376 | 401 | #define __NR_shmget 376 |
| 402 | #define __NR_shmctl 377 | 402 | #define __NR_shmctl 377 |
| 403 | #define __NR_mlock2 378 | ||
| 403 | 404 | ||
| 404 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 405 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 75b6676c1a0b..646bf4d222c1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr, | |||
| 551 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; | 551 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | /* | ||
| 555 | * Use the current MSR TM suspended bit to track if we have | ||
| 556 | * checkpointed state outstanding. | ||
| 557 | * On signal delivery, we'd normally reclaim the checkpointed | ||
| 558 | * state to obtain stack pointer (see:get_tm_stackpointer()). | ||
| 559 | * This will then directly return to userspace without going | ||
| 560 | * through __switch_to(). However, if the stack frame is bad, | ||
| 561 | * we need to exit this thread which calls __switch_to() which | ||
| 562 | * will again attempt to reclaim the already saved tm state. | ||
| 563 | * Hence we need to check that we've not already reclaimed | ||
| 564 | * this state. | ||
| 565 | * We do this using the current MSR, rather tracking it in | ||
| 566 | * some specific thread_struct bit, as it has the additional | ||
| 567 | * benifit of checking for a potential TM bad thing exception. | ||
| 568 | */ | ||
| 569 | if (!MSR_TM_SUSPENDED(mfmsr())) | ||
| 570 | return; | ||
| 571 | |||
| 554 | tm_reclaim(thr, thr->regs->msr, cause); | 572 | tm_reclaim(thr, thr->regs->msr, cause); |
| 555 | 573 | ||
| 556 | /* Having done the reclaim, we now have the checkpointed | 574 | /* Having done the reclaim, we now have the checkpointed |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 0dbee465af7a..ef7c24e84a62 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
| @@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs, | |||
| 875 | return 1; | 875 | return 1; |
| 876 | #endif /* CONFIG_SPE */ | 876 | #endif /* CONFIG_SPE */ |
| 877 | 877 | ||
| 878 | /* Get the top half of the MSR from the user context */ | ||
| 879 | if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) | ||
| 880 | return 1; | ||
| 881 | msr_hi <<= 32; | ||
| 882 | /* If TM bits are set to the reserved value, it's an invalid context */ | ||
| 883 | if (MSR_TM_RESV(msr_hi)) | ||
| 884 | return 1; | ||
| 885 | /* Pull in the MSR TM bits from the user context */ | ||
| 886 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); | ||
| 878 | /* Now, recheckpoint. This loads up all of the checkpointed (older) | 887 | /* Now, recheckpoint. This loads up all of the checkpointed (older) |
| 879 | * registers, including FP and V[S]Rs. After recheckpointing, the | 888 | * registers, including FP and V[S]Rs. After recheckpointing, the |
| 880 | * transactional versions should be loaded. | 889 | * transactional versions should be loaded. |
| @@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, | |||
| 884 | current->thread.tm_texasr |= TEXASR_FS; | 893 | current->thread.tm_texasr |= TEXASR_FS; |
| 885 | /* This loads the checkpointed FP/VEC state, if used */ | 894 | /* This loads the checkpointed FP/VEC state, if used */ |
| 886 | tm_recheckpoint(¤t->thread, msr); | 895 | tm_recheckpoint(¤t->thread, msr); |
| 887 | /* Get the top half of the MSR */ | ||
| 888 | if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) | ||
| 889 | return 1; | ||
| 890 | /* Pull in MSR TM from user context */ | ||
| 891 | regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); | ||
| 892 | 896 | ||
| 893 | /* This loads the speculative FP/VEC state, if used */ | 897 | /* This loads the speculative FP/VEC state, if used */ |
| 894 | if (msr & MSR_FP) { | 898 | if (msr & MSR_FP) { |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 20756dfb9f34..c676ecec0869 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, | |||
| 438 | 438 | ||
| 439 | /* get MSR separately, transfer the LE bit if doing signal return */ | 439 | /* get MSR separately, transfer the LE bit if doing signal return */ |
| 440 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | 440 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); |
| 441 | /* Don't allow reserved mode. */ | ||
| 442 | if (MSR_TM_RESV(msr)) | ||
| 443 | return -EINVAL; | ||
| 444 | |||
| 441 | /* pull in MSR TM from user context */ | 445 | /* pull in MSR TM from user context */ |
| 442 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); | 446 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
| 443 | 447 | ||
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 0c5d8ee657f0..d1e7b0a0feeb 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
| @@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void); | |||
| 312 | extern void reipl_ccw_dev(struct ccw_dev_id *id); | 312 | extern void reipl_ccw_dev(struct ccw_dev_id *id); |
| 313 | 313 | ||
| 314 | struct cio_iplinfo { | 314 | struct cio_iplinfo { |
| 315 | u8 ssid; | ||
| 315 | u16 devno; | 316 | u16 devno; |
| 316 | int is_qdio; | 317 | int is_qdio; |
| 317 | }; | 318 | }; |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 3ad48f22de78..bab6739a1154 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
| @@ -206,9 +206,16 @@ do { \ | |||
| 206 | } while (0) | 206 | } while (0) |
| 207 | #endif /* CONFIG_COMPAT */ | 207 | #endif /* CONFIG_COMPAT */ |
| 208 | 208 | ||
| 209 | extern unsigned long mmap_rnd_mask; | 209 | /* |
| 210 | 210 | * Cache aliasing on the latest machines calls for a mapping granularity | |
| 211 | #define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) | 211 | * of 512KB. For 64-bit processes use a 512KB alignment and a randomization |
| 212 | * of up to 1GB. For 31-bit processes the virtual address space is limited, | ||
| 213 | * use no alignment and limit the randomization to 8MB. | ||
| 214 | */ | ||
| 215 | #define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL) | ||
| 216 | #define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL) | ||
| 217 | #define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL) | ||
| 218 | #define STACK_RND_MASK MMAP_RND_MASK | ||
| 212 | 219 | ||
| 213 | #define ARCH_DLINFO \ | 220 | #define ARCH_DLINFO \ |
| 214 | do { \ | 221 | do { \ |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 39ae6a359747..86634e71b69f 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
| @@ -64,7 +64,8 @@ struct ipl_block_fcp { | |||
| 64 | 64 | ||
| 65 | struct ipl_block_ccw { | 65 | struct ipl_block_ccw { |
| 66 | u8 reserved1[84]; | 66 | u8 reserved1[84]; |
| 67 | u8 reserved2[2]; | 67 | u16 reserved2 : 13; |
| 68 | u8 ssid : 3; | ||
| 68 | u16 devno; | 69 | u16 devno; |
| 69 | u8 vm_flags; | 70 | u8 vm_flags; |
| 70 | u8 reserved3[3]; | 71 | u8 reserved3[3]; |
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h index 7a7abf1a5537..1aac41e83ea1 100644 --- a/arch/s390/include/asm/pci_dma.h +++ b/arch/s390/include/asm/pci_dma.h | |||
| @@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *); | |||
| 195 | void dma_free_seg_table(unsigned long); | 195 | void dma_free_seg_table(unsigned long); |
| 196 | unsigned long *dma_alloc_cpu_table(void); | 196 | unsigned long *dma_alloc_cpu_table(void); |
| 197 | void dma_cleanup_tables(unsigned long *); | 197 | void dma_cleanup_tables(unsigned long *); |
| 198 | void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); | 198 | unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); |
| 199 | void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); | ||
| 200 | |||
| 199 | #endif | 201 | #endif |
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h index 776f307960cc..cc6cfe7889da 100644 --- a/arch/s390/include/asm/trace/diag.h +++ b/arch/s390/include/asm/trace/diag.h | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #define TRACE_INCLUDE_PATH asm/trace | 19 | #define TRACE_INCLUDE_PATH asm/trace |
| 20 | #define TRACE_INCLUDE_FILE diag | 20 | #define TRACE_INCLUDE_FILE diag |
| 21 | 21 | ||
| 22 | TRACE_EVENT(diagnose, | 22 | TRACE_EVENT(s390_diagnose, |
| 23 | TP_PROTO(unsigned short nr), | 23 | TP_PROTO(unsigned short nr), |
| 24 | TP_ARGS(nr), | 24 | TP_ARGS(nr), |
| 25 | TP_STRUCT__entry( | 25 | TP_STRUCT__entry( |
| @@ -32,9 +32,9 @@ TRACE_EVENT(diagnose, | |||
| 32 | ); | 32 | ); |
| 33 | 33 | ||
| 34 | #ifdef CONFIG_TRACEPOINTS | 34 | #ifdef CONFIG_TRACEPOINTS |
| 35 | void trace_diagnose_norecursion(int diag_nr); | 35 | void trace_s390_diagnose_norecursion(int diag_nr); |
| 36 | #else | 36 | #else |
| 37 | static inline void trace_diagnose_norecursion(int diag_nr) { } | 37 | static inline void trace_s390_diagnose_norecursion(int diag_nr) { } |
| 38 | #endif | 38 | #endif |
| 39 | 39 | ||
| 40 | #endif /* _TRACE_S390_DIAG_H */ | 40 | #endif /* _TRACE_S390_DIAG_H */ |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index a848adba1504..34ec202472c6 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
| @@ -192,14 +192,14 @@ | |||
| 192 | #define __NR_set_tid_address 252 | 192 | #define __NR_set_tid_address 252 |
| 193 | #define __NR_fadvise64 253 | 193 | #define __NR_fadvise64 253 |
| 194 | #define __NR_timer_create 254 | 194 | #define __NR_timer_create 254 |
| 195 | #define __NR_timer_settime (__NR_timer_create+1) | 195 | #define __NR_timer_settime 255 |
| 196 | #define __NR_timer_gettime (__NR_timer_create+2) | 196 | #define __NR_timer_gettime 256 |
| 197 | #define __NR_timer_getoverrun (__NR_timer_create+3) | 197 | #define __NR_timer_getoverrun 257 |
| 198 | #define __NR_timer_delete (__NR_timer_create+4) | 198 | #define __NR_timer_delete 258 |
| 199 | #define __NR_clock_settime (__NR_timer_create+5) | 199 | #define __NR_clock_settime 259 |
| 200 | #define __NR_clock_gettime (__NR_timer_create+6) | 200 | #define __NR_clock_gettime 260 |
| 201 | #define __NR_clock_getres (__NR_timer_create+7) | 201 | #define __NR_clock_getres 261 |
| 202 | #define __NR_clock_nanosleep (__NR_timer_create+8) | 202 | #define __NR_clock_nanosleep 262 |
| 203 | /* Number 263 is reserved for vserver */ | 203 | /* Number 263 is reserved for vserver */ |
| 204 | #define __NR_statfs64 265 | 204 | #define __NR_statfs64 265 |
| 205 | #define __NR_fstatfs64 266 | 205 | #define __NR_fstatfs64 266 |
| @@ -309,7 +309,8 @@ | |||
| 309 | #define __NR_recvfrom 371 | 309 | #define __NR_recvfrom 371 |
| 310 | #define __NR_recvmsg 372 | 310 | #define __NR_recvmsg 372 |
| 311 | #define __NR_shutdown 373 | 311 | #define __NR_shutdown 373 |
| 312 | #define NR_syscalls 374 | 312 | #define __NR_mlock2 374 |
| 313 | #define NR_syscalls 375 | ||
| 313 | 314 | ||
| 314 | /* | 315 | /* |
| 315 | * There are some system calls that are not present on 64 bit, some | 316 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 09f194052df3..fac4eeddef91 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
| @@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, | |||
| 176 | COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); | 176 | COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); |
| 177 | COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); | 177 | COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); |
| 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); | 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); |
| 179 | COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); | ||
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index f98766ede4e1..48b37b8357e6 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c | |||
| @@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init); | |||
| 121 | void diag_stat_inc(enum diag_stat_enum nr) | 121 | void diag_stat_inc(enum diag_stat_enum nr) |
| 122 | { | 122 | { |
| 123 | this_cpu_inc(diag_stat.counter[nr]); | 123 | this_cpu_inc(diag_stat.counter[nr]); |
| 124 | trace_diagnose(diag_map[nr].code); | 124 | trace_s390_diagnose(diag_map[nr].code); |
| 125 | } | 125 | } |
| 126 | EXPORT_SYMBOL(diag_stat_inc); | 126 | EXPORT_SYMBOL(diag_stat_inc); |
| 127 | 127 | ||
| 128 | void diag_stat_inc_norecursion(enum diag_stat_enum nr) | 128 | void diag_stat_inc_norecursion(enum diag_stat_enum nr) |
| 129 | { | 129 | { |
| 130 | this_cpu_inc(diag_stat.counter[nr]); | 130 | this_cpu_inc(diag_stat.counter[nr]); |
| 131 | trace_diagnose_norecursion(diag_map[nr].code); | 131 | trace_s390_diagnose_norecursion(diag_map[nr].code); |
| 132 | } | 132 | } |
| 133 | EXPORT_SYMBOL(diag_stat_inc_norecursion); | 133 | EXPORT_SYMBOL(diag_stat_inc_norecursion); |
| 134 | 134 | ||
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 1255c6c5353e..301ee9c70688 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <asm/asm-offsets.h> | 26 | #include <asm/asm-offsets.h> |
| 27 | #include <asm/thread_info.h> | 27 | #include <asm/thread_info.h> |
| 28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
| 29 | #include <asm/ptrace.h> | ||
| 29 | 30 | ||
| 30 | #define ARCH_OFFSET 4 | 31 | #define ARCH_OFFSET 4 |
| 31 | 32 | ||
| @@ -59,19 +60,6 @@ __HEAD | |||
| 59 | .long 0x020006e0,0x20000050 | 60 | .long 0x020006e0,0x20000050 |
| 60 | 61 | ||
| 61 | .org 0x200 | 62 | .org 0x200 |
| 62 | # | ||
| 63 | # subroutine to set architecture mode | ||
| 64 | # | ||
| 65 | .Lsetmode: | ||
| 66 | mvi __LC_AR_MODE_ID,1 # set esame flag | ||
| 67 | slr %r0,%r0 # set cpuid to zero | ||
| 68 | lhi %r1,2 # mode 2 = esame (dump) | ||
| 69 | sigp %r1,%r0,0x12 # switch to esame mode | ||
| 70 | bras %r13,0f | ||
| 71 | .fill 16,4,0x0 | ||
| 72 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | ||
| 73 | sam31 # switch to 31 bit addressing mode | ||
| 74 | br %r14 | ||
| 75 | 63 | ||
| 76 | # | 64 | # |
| 77 | # subroutine to wait for end I/O | 65 | # subroutine to wait for end I/O |
| @@ -159,7 +147,14 @@ __HEAD | |||
| 159 | .long 0x02200050,0x00000000 | 147 | .long 0x02200050,0x00000000 |
| 160 | 148 | ||
| 161 | iplstart: | 149 | iplstart: |
| 162 | bas %r14,.Lsetmode # Immediately switch to 64 bit mode | 150 | mvi __LC_AR_MODE_ID,1 # set esame flag |
| 151 | slr %r0,%r0 # set cpuid to zero | ||
| 152 | lhi %r1,2 # mode 2 = esame (dump) | ||
| 153 | sigp %r1,%r0,0x12 # switch to esame mode | ||
| 154 | bras %r13,0f | ||
| 155 | .fill 16,4,0x0 | ||
| 156 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | ||
| 157 | sam31 # switch to 31 bit addressing mode | ||
| 163 | lh %r1,0xb8 # test if subchannel number | 158 | lh %r1,0xb8 # test if subchannel number |
| 164 | bct %r1,.Lnoload # is valid | 159 | bct %r1,.Lnoload # is valid |
| 165 | l %r1,0xb8 # load ipl subchannel number | 160 | l %r1,0xb8 # load ipl subchannel number |
| @@ -269,71 +264,6 @@ iplstart: | |||
| 269 | .Lcpuid:.fill 8,1,0 | 264 | .Lcpuid:.fill 8,1,0 |
| 270 | 265 | ||
| 271 | # | 266 | # |
| 272 | # SALIPL loader support. Based on a patch by Rob van der Heij. | ||
| 273 | # This entry point is called directly from the SALIPL loader and | ||
| 274 | # doesn't need a builtin ipl record. | ||
| 275 | # | ||
| 276 | .org 0x800 | ||
| 277 | ENTRY(start) | ||
| 278 | stm %r0,%r15,0x07b0 # store registers | ||
| 279 | bas %r14,.Lsetmode # Immediately switch to 64 bit mode | ||
| 280 | basr %r12,%r0 | ||
| 281 | .base: | ||
| 282 | l %r11,.parm | ||
| 283 | l %r8,.cmd # pointer to command buffer | ||
| 284 | |||
| 285 | ltr %r9,%r9 # do we have SALIPL parameters? | ||
| 286 | bp .sk8x8 | ||
| 287 | |||
| 288 | mvc 0(64,%r8),0x00b0 # copy saved registers | ||
| 289 | xc 64(240-64,%r8),0(%r8) # remainder of buffer | ||
| 290 | tr 0(64,%r8),.lowcase | ||
| 291 | b .gotr | ||
| 292 | .sk8x8: | ||
| 293 | mvc 0(240,%r8),0(%r9) # copy iplparms into buffer | ||
| 294 | .gotr: | ||
| 295 | slr %r0,%r0 | ||
| 296 | st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) | ||
| 297 | st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) | ||
| 298 | j startup # continue with startup | ||
| 299 | .cmd: .long COMMAND_LINE # address of command line buffer | ||
| 300 | .parm: .long PARMAREA | ||
| 301 | .lowcase: | ||
| 302 | .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 | ||
| 303 | .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f | ||
| 304 | .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 | ||
| 305 | .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f | ||
| 306 | .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 | ||
| 307 | .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f | ||
| 308 | .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 | ||
| 309 | .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f | ||
| 310 | .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 | ||
| 311 | .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f | ||
| 312 | .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 | ||
| 313 | .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f | ||
| 314 | .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 | ||
| 315 | .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f | ||
| 316 | .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 | ||
| 317 | .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f | ||
| 318 | |||
| 319 | .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 | ||
| 320 | .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f | ||
| 321 | .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 | ||
| 322 | .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f | ||
| 323 | .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 | ||
| 324 | .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf | ||
| 325 | .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 | ||
| 326 | .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf | ||
| 327 | .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg | ||
| 328 | .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi | ||
| 329 | .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop | ||
| 330 | .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr | ||
| 331 | .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx | ||
| 332 | .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz | ||
| 333 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 | ||
| 334 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff | ||
| 335 | |||
| 336 | # | ||
| 337 | # startup-code at 0x10000, running in absolute addressing mode | 267 | # startup-code at 0x10000, running in absolute addressing mode |
| 338 | # this is called either by the ipl loader or directly by PSW restart | 268 | # this is called either by the ipl loader or directly by PSW restart |
| 339 | # or linload or SALIPL | 269 | # or linload or SALIPL |
| @@ -364,7 +294,7 @@ ENTRY(startup_kdump) | |||
| 364 | bras %r13,0f | 294 | bras %r13,0f |
| 365 | .fill 16,4,0x0 | 295 | .fill 16,4,0x0 |
| 366 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | 296 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs |
| 367 | sam31 # switch to 31 bit addressing mode | 297 | sam64 # switch to 64 bit addressing mode |
| 368 | basr %r13,0 # get base | 298 | basr %r13,0 # get base |
| 369 | .LPG0: | 299 | .LPG0: |
| 370 | xc 0x200(256),0x200 # partially clear lowcore | 300 | xc 0x200(256),0x200 # partially clear lowcore |
| @@ -395,7 +325,7 @@ ENTRY(startup_kdump) | |||
| 395 | jnz 1b | 325 | jnz 1b |
| 396 | j 4f | 326 | j 4f |
| 397 | 2: l %r15,.Lstack-.LPG0(%r13) | 327 | 2: l %r15,.Lstack-.LPG0(%r13) |
| 398 | ahi %r15,-96 | 328 | ahi %r15,-STACK_FRAME_OVERHEAD |
| 399 | la %r2,.Lals_string-.LPG0(%r13) | 329 | la %r2,.Lals_string-.LPG0(%r13) |
| 400 | l %r3,.Lsclp_print-.LPG0(%r13) | 330 | l %r3,.Lsclp_print-.LPG0(%r13) |
| 401 | basr %r14,%r3 | 331 | basr %r14,%r3 |
| @@ -429,8 +359,7 @@ ENTRY(startup_kdump) | |||
| 429 | .long 1, 0xc0000000 | 359 | .long 1, 0xc0000000 |
| 430 | #endif | 360 | #endif |
| 431 | 4: | 361 | 4: |
| 432 | /* Continue with 64bit startup code in head64.S */ | 362 | /* Continue with startup code in head64.S */ |
| 433 | sam64 # switch to 64 bit mode | ||
| 434 | jg startup_continue | 363 | jg startup_continue |
| 435 | 364 | ||
| 436 | .align 8 | 365 | .align 8 |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index f6d8acd7e136..b1f0a90f933b 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
| @@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type) | |||
| 121 | * Must be in data section since the bss section | 121 | * Must be in data section since the bss section |
| 122 | * is not cleared when these are accessed. | 122 | * is not cleared when these are accessed. |
| 123 | */ | 123 | */ |
| 124 | static u8 ipl_ssid __attribute__((__section__(".data"))) = 0; | ||
| 124 | static u16 ipl_devno __attribute__((__section__(".data"))) = 0; | 125 | static u16 ipl_devno __attribute__((__section__(".data"))) = 0; |
| 125 | u32 ipl_flags __attribute__((__section__(".data"))) = 0; | 126 | u32 ipl_flags __attribute__((__section__(".data"))) = 0; |
| 126 | 127 | ||
| @@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | |||
| 197 | return snprintf(page, PAGE_SIZE, _format, ##args); \ | 198 | return snprintf(page, PAGE_SIZE, _format, ##args); \ |
| 198 | } | 199 | } |
| 199 | 200 | ||
| 201 | #define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \ | ||
| 202 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | ||
| 203 | struct kobj_attribute *attr, \ | ||
| 204 | const char *buf, size_t len) \ | ||
| 205 | { \ | ||
| 206 | unsigned long long ssid, devno; \ | ||
| 207 | \ | ||
| 208 | if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \ | ||
| 209 | return -EINVAL; \ | ||
| 210 | \ | ||
| 211 | if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \ | ||
| 212 | return -EINVAL; \ | ||
| 213 | \ | ||
| 214 | _ipl_blk.ssid = ssid; \ | ||
| 215 | _ipl_blk.devno = devno; \ | ||
| 216 | return len; \ | ||
| 217 | } | ||
| 218 | |||
| 219 | #define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \ | ||
| 220 | IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \ | ||
| 221 | _ipl_blk.ssid, _ipl_blk.devno); \ | ||
| 222 | IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \ | ||
| 223 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | ||
| 224 | __ATTR(_name, (S_IRUGO | S_IWUSR), \ | ||
| 225 | sys_##_prefix##_##_name##_show, \ | ||
| 226 | sys_##_prefix##_##_name##_store) \ | ||
| 227 | |||
| 200 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ | 228 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ |
| 201 | IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ | 229 | IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ |
| 202 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 230 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
| @@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, | |||
| 395 | 423 | ||
| 396 | switch (ipl_info.type) { | 424 | switch (ipl_info.type) { |
| 397 | case IPL_TYPE_CCW: | 425 | case IPL_TYPE_CCW: |
| 398 | return sprintf(page, "0.0.%04x\n", ipl_devno); | 426 | return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno); |
| 399 | case IPL_TYPE_FCP: | 427 | case IPL_TYPE_FCP: |
| 400 | case IPL_TYPE_FCP_DUMP: | 428 | case IPL_TYPE_FCP_DUMP: |
| 401 | return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); | 429 | return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); |
| @@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, | |||
| 687 | struct bin_attribute *attr, | 715 | struct bin_attribute *attr, |
| 688 | char *buf, loff_t off, size_t count) | 716 | char *buf, loff_t off, size_t count) |
| 689 | { | 717 | { |
| 718 | size_t scpdata_len = count; | ||
| 690 | size_t padding; | 719 | size_t padding; |
| 691 | size_t scpdata_len; | ||
| 692 | |||
| 693 | if (off < 0) | ||
| 694 | return -EINVAL; | ||
| 695 | 720 | ||
| 696 | if (off >= DIAG308_SCPDATA_SIZE) | ||
| 697 | return -ENOSPC; | ||
| 698 | 721 | ||
| 699 | if (count > DIAG308_SCPDATA_SIZE - off) | 722 | if (off) |
| 700 | count = DIAG308_SCPDATA_SIZE - off; | 723 | return -EINVAL; |
| 701 | |||
| 702 | memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); | ||
| 703 | scpdata_len = off + count; | ||
| 704 | 724 | ||
| 725 | memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count); | ||
| 705 | if (scpdata_len % 8) { | 726 | if (scpdata_len % 8) { |
| 706 | padding = 8 - (scpdata_len % 8); | 727 | padding = 8 - (scpdata_len % 8); |
| 707 | memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, | 728 | memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, |
| @@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, | |||
| 717 | } | 738 | } |
| 718 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = | 739 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = |
| 719 | __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, | 740 | __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, |
| 720 | reipl_fcp_scpdata_write, PAGE_SIZE); | 741 | reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE); |
| 721 | 742 | ||
| 722 | static struct bin_attribute *reipl_fcp_bin_attrs[] = { | 743 | static struct bin_attribute *reipl_fcp_bin_attrs[] = { |
| 723 | &sys_reipl_fcp_scp_data_attr, | 744 | &sys_reipl_fcp_scp_data_attr, |
| @@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = { | |||
| 814 | }; | 835 | }; |
| 815 | 836 | ||
| 816 | /* CCW reipl device attributes */ | 837 | /* CCW reipl device attributes */ |
| 817 | 838 | DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw); | |
| 818 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
| 819 | reipl_block_ccw->ipl_info.ccw.devno); | ||
| 820 | 839 | ||
| 821 | /* NSS wrapper */ | 840 | /* NSS wrapper */ |
| 822 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, | 841 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, |
| @@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused) | |||
| 1056 | 1075 | ||
| 1057 | switch (reipl_method) { | 1076 | switch (reipl_method) { |
| 1058 | case REIPL_METHOD_CCW_CIO: | 1077 | case REIPL_METHOD_CCW_CIO: |
| 1078 | devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid; | ||
| 1059 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; | 1079 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; |
| 1060 | devid.ssid = 0; | ||
| 1061 | reipl_ccw_dev(&devid); | 1080 | reipl_ccw_dev(&devid); |
| 1062 | break; | 1081 | break; |
| 1063 | case REIPL_METHOD_CCW_VM: | 1082 | case REIPL_METHOD_CCW_VM: |
| @@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void) | |||
| 1192 | 1211 | ||
| 1193 | reipl_block_ccw_init(reipl_block_ccw); | 1212 | reipl_block_ccw_init(reipl_block_ccw); |
| 1194 | if (ipl_info.type == IPL_TYPE_CCW) { | 1213 | if (ipl_info.type == IPL_TYPE_CCW) { |
| 1214 | reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid; | ||
| 1195 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; | 1215 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; |
| 1196 | reipl_block_ccw_fill_parms(reipl_block_ccw); | 1216 | reipl_block_ccw_fill_parms(reipl_block_ccw); |
| 1197 | } | 1217 | } |
| @@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = { | |||
| 1336 | }; | 1356 | }; |
| 1337 | 1357 | ||
| 1338 | /* CCW dump device attributes */ | 1358 | /* CCW dump device attributes */ |
| 1339 | 1359 | DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw); | |
| 1340 | DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
| 1341 | dump_block_ccw->ipl_info.ccw.devno); | ||
| 1342 | 1360 | ||
| 1343 | static struct attribute *dump_ccw_attrs[] = { | 1361 | static struct attribute *dump_ccw_attrs[] = { |
| 1344 | &sys_dump_ccw_device_attr.attr, | 1362 | &sys_dump_ccw_device_attr.attr, |
| @@ -1418,8 +1436,8 @@ static void __dump_run(void *unused) | |||
| 1418 | 1436 | ||
| 1419 | switch (dump_method) { | 1437 | switch (dump_method) { |
| 1420 | case DUMP_METHOD_CCW_CIO: | 1438 | case DUMP_METHOD_CCW_CIO: |
| 1439 | devid.ssid = dump_block_ccw->ipl_info.ccw.ssid; | ||
| 1421 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; | 1440 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; |
| 1422 | devid.ssid = 0; | ||
| 1423 | reipl_ccw_dev(&devid); | 1441 | reipl_ccw_dev(&devid); |
| 1424 | break; | 1442 | break; |
| 1425 | case DUMP_METHOD_CCW_VM: | 1443 | case DUMP_METHOD_CCW_VM: |
| @@ -1939,14 +1957,14 @@ void __init setup_ipl(void) | |||
| 1939 | ipl_info.type = get_ipl_type(); | 1957 | ipl_info.type = get_ipl_type(); |
| 1940 | switch (ipl_info.type) { | 1958 | switch (ipl_info.type) { |
| 1941 | case IPL_TYPE_CCW: | 1959 | case IPL_TYPE_CCW: |
| 1960 | ipl_info.data.ccw.dev_id.ssid = ipl_ssid; | ||
| 1942 | ipl_info.data.ccw.dev_id.devno = ipl_devno; | 1961 | ipl_info.data.ccw.dev_id.devno = ipl_devno; |
| 1943 | ipl_info.data.ccw.dev_id.ssid = 0; | ||
| 1944 | break; | 1962 | break; |
| 1945 | case IPL_TYPE_FCP: | 1963 | case IPL_TYPE_FCP: |
| 1946 | case IPL_TYPE_FCP_DUMP: | 1964 | case IPL_TYPE_FCP_DUMP: |
| 1965 | ipl_info.data.fcp.dev_id.ssid = 0; | ||
| 1947 | ipl_info.data.fcp.dev_id.devno = | 1966 | ipl_info.data.fcp.dev_id.devno = |
| 1948 | IPL_PARMBLOCK_START->ipl_info.fcp.devno; | 1967 | IPL_PARMBLOCK_START->ipl_info.fcp.devno; |
| 1949 | ipl_info.data.fcp.dev_id.ssid = 0; | ||
| 1950 | ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; | 1968 | ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; |
| 1951 | ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; | 1969 | ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; |
| 1952 | break; | 1970 | break; |
| @@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void) | |||
| 1978 | if (cio_get_iplinfo(&iplinfo)) | 1996 | if (cio_get_iplinfo(&iplinfo)) |
| 1979 | return; | 1997 | return; |
| 1980 | 1998 | ||
| 1999 | ipl_ssid = iplinfo.ssid; | ||
| 1981 | ipl_devno = iplinfo.devno; | 2000 | ipl_devno = iplinfo.devno; |
| 1982 | ipl_flags |= IPL_DEVNO_VALID; | 2001 | ipl_flags |= IPL_DEVNO_VALID; |
| 1983 | if (!iplinfo.is_qdio) | 2002 | if (!iplinfo.is_qdio) |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 688a3aad9c79..114ee8b96f17 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp) | |||
| 243 | 243 | ||
| 244 | static inline unsigned long brk_rnd(void) | 244 | static inline unsigned long brk_rnd(void) |
| 245 | { | 245 | { |
| 246 | /* 8MB for 32bit, 1GB for 64bit */ | 246 | return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; |
| 247 | if (is_32bit_task()) | ||
| 248 | return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; | ||
| 249 | else | ||
| 250 | return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; | ||
| 251 | } | 247 | } |
| 252 | 248 | ||
| 253 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 249 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c index fa0bdff1d413..9fe7781a45cd 100644 --- a/arch/s390/kernel/sclp.c +++ b/arch/s390/kernel/sclp.c | |||
| @@ -21,7 +21,7 @@ static void _sclp_wait_int(void) | |||
| 21 | __ctl_load(cr0_new, 0, 0); | 21 | __ctl_load(cr0_new, 0, 0); |
| 22 | 22 | ||
| 23 | psw_ext_save = S390_lowcore.external_new_psw; | 23 | psw_ext_save = S390_lowcore.external_new_psw; |
| 24 | psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); | 24 | psw_mask = __extract_psw(); |
| 25 | S390_lowcore.external_new_psw.mask = psw_mask; | 25 | S390_lowcore.external_new_psw.mask = psw_mask; |
| 26 | psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; | 26 | psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; |
| 27 | S390_lowcore.ext_int_code = 0; | 27 | S390_lowcore.ext_int_code = 0; |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ce0cbd6ba7ca..c837bcacf218 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -764,9 +764,6 @@ static int __init setup_hwcaps(void) | |||
| 764 | get_cpu_id(&cpu_id); | 764 | get_cpu_id(&cpu_id); |
| 765 | add_device_randomness(&cpu_id, sizeof(cpu_id)); | 765 | add_device_randomness(&cpu_id, sizeof(cpu_id)); |
| 766 | switch (cpu_id.machine) { | 766 | switch (cpu_id.machine) { |
| 767 | case 0x9672: | ||
| 768 | strcpy(elf_platform, "g5"); | ||
| 769 | break; | ||
| 770 | case 0x2064: | 767 | case 0x2064: |
| 771 | case 0x2066: | 768 | case 0x2066: |
| 772 | default: /* Use "z900" as default for 64 bit kernels. */ | 769 | default: /* Use "z900" as default for 64 bit kernels. */ |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 8c56929c8d82..5378c3ea1b98 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
| @@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */ | |||
| 382 | SYSCALL(sys_recvfrom,compat_sys_recvfrom) | 382 | SYSCALL(sys_recvfrom,compat_sys_recvfrom) |
| 383 | SYSCALL(sys_recvmsg,compat_sys_recvmsg) | 383 | SYSCALL(sys_recvmsg,compat_sys_recvmsg) |
| 384 | SYSCALL(sys_shutdown,sys_shutdown) | 384 | SYSCALL(sys_shutdown,sys_shutdown) |
| 385 | SYSCALL(sys_mlock2,compat_sys_mlock2) | ||
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 73239bb576c4..21a5df99552b 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c | |||
| @@ -9,11 +9,11 @@ | |||
| 9 | #define CREATE_TRACE_POINTS | 9 | #define CREATE_TRACE_POINTS |
| 10 | #include <asm/trace/diag.h> | 10 | #include <asm/trace/diag.h> |
| 11 | 11 | ||
| 12 | EXPORT_TRACEPOINT_SYMBOL(diagnose); | 12 | EXPORT_TRACEPOINT_SYMBOL(s390_diagnose); |
| 13 | 13 | ||
| 14 | static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); | 14 | static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); |
| 15 | 15 | ||
| 16 | void trace_diagnose_norecursion(int diag_nr) | 16 | void trace_s390_diagnose_norecursion(int diag_nr) |
| 17 | { | 17 | { |
| 18 | unsigned long flags; | 18 | unsigned long flags; |
| 19 | unsigned int *depth; | 19 | unsigned int *depth; |
| @@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr) | |||
| 22 | depth = this_cpu_ptr(&diagnose_trace_depth); | 22 | depth = this_cpu_ptr(&diagnose_trace_depth); |
| 23 | if (*depth == 0) { | 23 | if (*depth == 0) { |
| 24 | (*depth)++; | 24 | (*depth)++; |
| 25 | trace_diagnose(diag_nr); | 25 | trace_s390_diagnose(diag_nr); |
| 26 | (*depth)--; | 26 | (*depth)--; |
| 27 | } | 27 | } |
| 28 | local_irq_restore(flags); | 28 | local_irq_restore(flags); |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 373e32346d68..6a75352f453c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
| @@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
| 1030 | src_id, 0); | 1030 | src_id, 0); |
| 1031 | 1031 | ||
| 1032 | /* sending vcpu invalid */ | 1032 | /* sending vcpu invalid */ |
| 1033 | if (src_id >= KVM_MAX_VCPUS || | 1033 | if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) |
| 1034 | kvm_get_vcpu(vcpu->kvm, src_id) == NULL) | ||
| 1035 | return -EINVAL; | 1034 | return -EINVAL; |
| 1036 | 1035 | ||
| 1037 | if (sclp.has_sigpif) | 1036 | if (sclp.has_sigpif) |
| @@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | |||
| 1110 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, | 1109 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
| 1111 | irq->u.emerg.code, 0); | 1110 | irq->u.emerg.code, 0); |
| 1112 | 1111 | ||
| 1112 | /* sending vcpu invalid */ | ||
| 1113 | if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) | ||
| 1114 | return -EINVAL; | ||
| 1115 | |||
| 1113 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); | 1116 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); |
| 1114 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | 1117 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
| 1115 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); | 1118 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fe2f1c722dc..846589281b04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
| 342 | r = 0; | 342 | r = 0; |
| 343 | break; | 343 | break; |
| 344 | case KVM_CAP_S390_VECTOR_REGISTERS: | 344 | case KVM_CAP_S390_VECTOR_REGISTERS: |
| 345 | if (MACHINE_HAS_VX) { | 345 | mutex_lock(&kvm->lock); |
| 346 | if (atomic_read(&kvm->online_vcpus)) { | ||
| 347 | r = -EBUSY; | ||
| 348 | } else if (MACHINE_HAS_VX) { | ||
| 346 | set_kvm_facility(kvm->arch.model.fac->mask, 129); | 349 | set_kvm_facility(kvm->arch.model.fac->mask, 129); |
| 347 | set_kvm_facility(kvm->arch.model.fac->list, 129); | 350 | set_kvm_facility(kvm->arch.model.fac->list, 129); |
| 348 | r = 0; | 351 | r = 0; |
| 349 | } else | 352 | } else |
| 350 | r = -EINVAL; | 353 | r = -EINVAL; |
| 354 | mutex_unlock(&kvm->lock); | ||
| 351 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", | 355 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", |
| 352 | r ? "(not available)" : "(success)"); | 356 | r ? "(not available)" : "(success)"); |
| 353 | break; | 357 | break; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 77191b85ea7a..d76b51cb4b62 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
| @@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
| 660 | 660 | ||
| 661 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); | 661 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); |
| 662 | 662 | ||
| 663 | if (!MACHINE_HAS_PFMF) | 663 | if (!test_kvm_facility(vcpu->kvm, 8)) |
| 664 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); | 664 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
| 665 | 665 | ||
| 666 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 666 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index da690b69f9fe..77c22d685c7a 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
| @@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, | |||
| 291 | u16 cpu_addr, u32 parameter, u64 *status_reg) | 291 | u16 cpu_addr, u32 parameter, u64 *status_reg) |
| 292 | { | 292 | { |
| 293 | int rc; | 293 | int rc; |
| 294 | struct kvm_vcpu *dst_vcpu; | 294 | struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); |
| 295 | 295 | ||
| 296 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
| 297 | return SIGP_CC_NOT_OPERATIONAL; | ||
| 298 | |||
| 299 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
| 300 | if (!dst_vcpu) | 296 | if (!dst_vcpu) |
| 301 | return SIGP_CC_NOT_OPERATIONAL; | 297 | return SIGP_CC_NOT_OPERATIONAL; |
| 302 | 298 | ||
| @@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
| 478 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | 474 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); |
| 479 | 475 | ||
| 480 | if (order_code == SIGP_EXTERNAL_CALL) { | 476 | if (order_code == SIGP_EXTERNAL_CALL) { |
| 481 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 477 | dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); |
| 482 | BUG_ON(dest_vcpu == NULL); | 478 | BUG_ON(dest_vcpu == NULL); |
| 483 | 479 | ||
| 484 | kvm_s390_vcpu_wakeup(dest_vcpu); | 480 | kvm_s390_vcpu_wakeup(dest_vcpu); |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c3c07d3505ba..c722400c7697 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask); | |||
| 48 | 48 | ||
| 49 | static void __init setup_zero_pages(void) | 49 | static void __init setup_zero_pages(void) |
| 50 | { | 50 | { |
| 51 | struct cpuid cpu_id; | ||
| 52 | unsigned int order; | 51 | unsigned int order; |
| 53 | struct page *page; | 52 | struct page *page; |
| 54 | int i; | 53 | int i; |
| 55 | 54 | ||
| 56 | get_cpu_id(&cpu_id); | 55 | /* Latest machines require a mapping granularity of 512KB */ |
| 57 | switch (cpu_id.machine) { | 56 | order = 7; |
| 58 | case 0x9672: /* g5 */ | 57 | |
| 59 | case 0x2064: /* z900 */ | ||
| 60 | case 0x2066: /* z900 */ | ||
| 61 | case 0x2084: /* z990 */ | ||
| 62 | case 0x2086: /* z990 */ | ||
| 63 | case 0x2094: /* z9-109 */ | ||
| 64 | case 0x2096: /* z9-109 */ | ||
| 65 | order = 0; | ||
| 66 | break; | ||
| 67 | case 0x2097: /* z10 */ | ||
| 68 | case 0x2098: /* z10 */ | ||
| 69 | case 0x2817: /* z196 */ | ||
| 70 | case 0x2818: /* z196 */ | ||
| 71 | order = 2; | ||
| 72 | break; | ||
| 73 | case 0x2827: /* zEC12 */ | ||
| 74 | case 0x2828: /* zEC12 */ | ||
| 75 | order = 5; | ||
| 76 | break; | ||
| 77 | case 0x2964: /* z13 */ | ||
| 78 | default: | ||
| 79 | order = 7; | ||
| 80 | break; | ||
| 81 | } | ||
| 82 | /* Limit number of empty zero pages for small memory sizes */ | 58 | /* Limit number of empty zero pages for small memory sizes */ |
| 83 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) | 59 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
| 84 | order--; | 60 | order--; |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 6e552af08c76..ea01477b4aa6 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
| @@ -31,9 +31,6 @@ | |||
| 31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
| 32 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
| 33 | 33 | ||
| 34 | unsigned long mmap_rnd_mask; | ||
| 35 | static unsigned long mmap_align_mask; | ||
| 36 | |||
| 37 | static unsigned long stack_maxrandom_size(void) | 34 | static unsigned long stack_maxrandom_size(void) |
| 38 | { | 35 | { |
| 39 | if (!(current->flags & PF_RANDOMIZE)) | 36 | if (!(current->flags & PF_RANDOMIZE)) |
| @@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void) | |||
| 62 | 59 | ||
| 63 | unsigned long arch_mmap_rnd(void) | 60 | unsigned long arch_mmap_rnd(void) |
| 64 | { | 61 | { |
| 65 | if (is_32bit_task()) | 62 | return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; |
| 66 | return (get_random_int() & 0x7ff) << PAGE_SHIFT; | ||
| 67 | else | ||
| 68 | return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; | ||
| 69 | } | 63 | } |
| 70 | 64 | ||
| 71 | static unsigned long mmap_base_legacy(unsigned long rnd) | 65 | static unsigned long mmap_base_legacy(unsigned long rnd) |
| @@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 92 | struct mm_struct *mm = current->mm; | 86 | struct mm_struct *mm = current->mm; |
| 93 | struct vm_area_struct *vma; | 87 | struct vm_area_struct *vma; |
| 94 | struct vm_unmapped_area_info info; | 88 | struct vm_unmapped_area_info info; |
| 95 | int do_color_align; | ||
| 96 | 89 | ||
| 97 | if (len > TASK_SIZE - mmap_min_addr) | 90 | if (len > TASK_SIZE - mmap_min_addr) |
| 98 | return -ENOMEM; | 91 | return -ENOMEM; |
| @@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 108 | return addr; | 101 | return addr; |
| 109 | } | 102 | } |
| 110 | 103 | ||
| 111 | do_color_align = 0; | ||
| 112 | if (filp || (flags & MAP_SHARED)) | ||
| 113 | do_color_align = !is_32bit_task(); | ||
| 114 | |||
| 115 | info.flags = 0; | 104 | info.flags = 0; |
| 116 | info.length = len; | 105 | info.length = len; |
| 117 | info.low_limit = mm->mmap_base; | 106 | info.low_limit = mm->mmap_base; |
| 118 | info.high_limit = TASK_SIZE; | 107 | info.high_limit = TASK_SIZE; |
| 119 | info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; | 108 | if (filp || (flags & MAP_SHARED)) |
| 109 | info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; | ||
| 110 | else | ||
| 111 | info.align_mask = 0; | ||
| 120 | info.align_offset = pgoff << PAGE_SHIFT; | 112 | info.align_offset = pgoff << PAGE_SHIFT; |
| 121 | return vm_unmapped_area(&info); | 113 | return vm_unmapped_area(&info); |
| 122 | } | 114 | } |
| @@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
| 130 | struct mm_struct *mm = current->mm; | 122 | struct mm_struct *mm = current->mm; |
| 131 | unsigned long addr = addr0; | 123 | unsigned long addr = addr0; |
| 132 | struct vm_unmapped_area_info info; | 124 | struct vm_unmapped_area_info info; |
| 133 | int do_color_align; | ||
| 134 | 125 | ||
| 135 | /* requested length too big for entire address space */ | 126 | /* requested length too big for entire address space */ |
| 136 | if (len > TASK_SIZE - mmap_min_addr) | 127 | if (len > TASK_SIZE - mmap_min_addr) |
| @@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
| 148 | return addr; | 139 | return addr; |
| 149 | } | 140 | } |
| 150 | 141 | ||
| 151 | do_color_align = 0; | ||
| 152 | if (filp || (flags & MAP_SHARED)) | ||
| 153 | do_color_align = !is_32bit_task(); | ||
| 154 | |||
| 155 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | 142 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 156 | info.length = len; | 143 | info.length = len; |
| 157 | info.low_limit = max(PAGE_SIZE, mmap_min_addr); | 144 | info.low_limit = max(PAGE_SIZE, mmap_min_addr); |
| 158 | info.high_limit = mm->mmap_base; | 145 | info.high_limit = mm->mmap_base; |
| 159 | info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; | 146 | if (filp || (flags & MAP_SHARED)) |
| 147 | info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; | ||
| 148 | else | ||
| 149 | info.align_mask = 0; | ||
| 160 | info.align_offset = pgoff << PAGE_SHIFT; | 150 | info.align_offset = pgoff << PAGE_SHIFT; |
| 161 | addr = vm_unmapped_area(&info); | 151 | addr = vm_unmapped_area(&info); |
| 162 | 152 | ||
| @@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
| 254 | mm->get_unmapped_area = s390_get_unmapped_area_topdown; | 244 | mm->get_unmapped_area = s390_get_unmapped_area_topdown; |
| 255 | } | 245 | } |
| 256 | } | 246 | } |
| 257 | |||
| 258 | static int __init setup_mmap_rnd(void) | ||
| 259 | { | ||
| 260 | struct cpuid cpu_id; | ||
| 261 | |||
| 262 | get_cpu_id(&cpu_id); | ||
| 263 | switch (cpu_id.machine) { | ||
| 264 | case 0x9672: | ||
| 265 | case 0x2064: | ||
| 266 | case 0x2066: | ||
| 267 | case 0x2084: | ||
| 268 | case 0x2086: | ||
| 269 | case 0x2094: | ||
| 270 | case 0x2096: | ||
| 271 | case 0x2097: | ||
| 272 | case 0x2098: | ||
| 273 | case 0x2817: | ||
| 274 | case 0x2818: | ||
| 275 | case 0x2827: | ||
| 276 | case 0x2828: | ||
| 277 | mmap_rnd_mask = 0x7ffUL; | ||
| 278 | mmap_align_mask = 0UL; | ||
| 279 | break; | ||
| 280 | case 0x2964: /* z13 */ | ||
| 281 | default: | ||
| 282 | mmap_rnd_mask = 0x3ff80UL; | ||
| 283 | mmap_align_mask = 0x7fUL; | ||
| 284 | break; | ||
| 285 | } | ||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | early_initcall(setup_mmap_rnd); | ||
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 37d10f74425a..d348f2c09a1e 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
| @@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void) | |||
| 33 | return NULL; | 33 | return NULL; |
| 34 | 34 | ||
| 35 | for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) | 35 | for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) |
| 36 | *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; | 36 | *entry = ZPCI_TABLE_INVALID; |
| 37 | return table; | 37 | return table; |
| 38 | } | 38 | } |
| 39 | 39 | ||
| @@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void) | |||
| 51 | return NULL; | 51 | return NULL; |
| 52 | 52 | ||
| 53 | for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) | 53 | for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) |
| 54 | *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; | 54 | *entry = ZPCI_PTE_INVALID; |
| 55 | return table; | 55 | return table; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| @@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry) | |||
| 95 | return pto; | 95 | return pto; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) | 98 | unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) |
| 99 | { | 99 | { |
| 100 | unsigned long *sto, *pto; | 100 | unsigned long *sto, *pto; |
| 101 | unsigned int rtx, sx, px; | 101 | unsigned int rtx, sx, px; |
| @@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr | |||
| 114 | return &pto[px]; | 114 | return &pto[px]; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, | 117 | void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) |
| 118 | dma_addr_t dma_addr, int flags) | ||
| 119 | { | 118 | { |
| 120 | unsigned long *entry; | ||
| 121 | |||
| 122 | entry = dma_walk_cpu_trans(dma_table, dma_addr); | ||
| 123 | if (!entry) { | ||
| 124 | WARN_ON_ONCE(1); | ||
| 125 | return; | ||
| 126 | } | ||
| 127 | |||
| 128 | if (flags & ZPCI_PTE_INVALID) { | 119 | if (flags & ZPCI_PTE_INVALID) { |
| 129 | invalidate_pt_entry(entry); | 120 | invalidate_pt_entry(entry); |
| 130 | return; | ||
| 131 | } else { | 121 | } else { |
| 132 | set_pt_pfaa(entry, page_addr); | 122 | set_pt_pfaa(entry, page_addr); |
| 133 | validate_pt_entry(entry); | 123 | validate_pt_entry(entry); |
| @@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
| 146 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); | 136 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); |
| 147 | dma_addr_t start_dma_addr = dma_addr; | 137 | dma_addr_t start_dma_addr = dma_addr; |
| 148 | unsigned long irq_flags; | 138 | unsigned long irq_flags; |
| 139 | unsigned long *entry; | ||
| 149 | int i, rc = 0; | 140 | int i, rc = 0; |
| 150 | 141 | ||
| 151 | if (!nr_pages) | 142 | if (!nr_pages) |
| 152 | return -EINVAL; | 143 | return -EINVAL; |
| 153 | 144 | ||
| 154 | spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); | 145 | spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); |
| 155 | if (!zdev->dma_table) | 146 | if (!zdev->dma_table) { |
| 147 | rc = -EINVAL; | ||
| 156 | goto no_refresh; | 148 | goto no_refresh; |
| 149 | } | ||
| 157 | 150 | ||
| 158 | for (i = 0; i < nr_pages; i++) { | 151 | for (i = 0; i < nr_pages; i++) { |
| 159 | dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, | 152 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); |
| 160 | flags); | 153 | if (!entry) { |
| 154 | rc = -ENOMEM; | ||
| 155 | goto undo_cpu_trans; | ||
| 156 | } | ||
| 157 | dma_update_cpu_trans(entry, page_addr, flags); | ||
| 161 | page_addr += PAGE_SIZE; | 158 | page_addr += PAGE_SIZE; |
| 162 | dma_addr += PAGE_SIZE; | 159 | dma_addr += PAGE_SIZE; |
| 163 | } | 160 | } |
| @@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
| 176 | 173 | ||
| 177 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, | 174 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, |
| 178 | nr_pages * PAGE_SIZE); | 175 | nr_pages * PAGE_SIZE); |
| 176 | undo_cpu_trans: | ||
| 177 | if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { | ||
| 178 | flags = ZPCI_PTE_INVALID; | ||
| 179 | while (i-- > 0) { | ||
| 180 | page_addr -= PAGE_SIZE; | ||
| 181 | dma_addr -= PAGE_SIZE; | ||
| 182 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); | ||
| 183 | if (!entry) | ||
| 184 | break; | ||
| 185 | dma_update_cpu_trans(entry, page_addr, flags); | ||
| 186 | } | ||
| 187 | } | ||
| 179 | 188 | ||
| 180 | no_refresh: | 189 | no_refresh: |
| 181 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); | 190 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); |
| @@ -260,6 +269,16 @@ out: | |||
| 260 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | 269 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); |
| 261 | } | 270 | } |
| 262 | 271 | ||
| 272 | static inline void zpci_err_dma(unsigned long rc, unsigned long addr) | ||
| 273 | { | ||
| 274 | struct { | ||
| 275 | unsigned long rc; | ||
| 276 | unsigned long addr; | ||
| 277 | } __packed data = {rc, addr}; | ||
| 278 | |||
| 279 | zpci_err_hex(&data, sizeof(data)); | ||
| 280 | } | ||
| 281 | |||
| 263 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | 282 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, |
| 264 | unsigned long offset, size_t size, | 283 | unsigned long offset, size_t size, |
| 265 | enum dma_data_direction direction, | 284 | enum dma_data_direction direction, |
| @@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | |||
| 270 | unsigned long pa = page_to_phys(page) + offset; | 289 | unsigned long pa = page_to_phys(page) + offset; |
| 271 | int flags = ZPCI_PTE_VALID; | 290 | int flags = ZPCI_PTE_VALID; |
| 272 | dma_addr_t dma_addr; | 291 | dma_addr_t dma_addr; |
| 292 | int ret; | ||
| 273 | 293 | ||
| 274 | /* This rounds up number of pages based on size and offset */ | 294 | /* This rounds up number of pages based on size and offset */ |
| 275 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); | 295 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); |
| 276 | iommu_page_index = dma_alloc_iommu(zdev, nr_pages); | 296 | iommu_page_index = dma_alloc_iommu(zdev, nr_pages); |
| 277 | if (iommu_page_index == -1) | 297 | if (iommu_page_index == -1) { |
| 298 | ret = -ENOSPC; | ||
| 278 | goto out_err; | 299 | goto out_err; |
| 300 | } | ||
| 279 | 301 | ||
| 280 | /* Use rounded up size */ | 302 | /* Use rounded up size */ |
| 281 | size = nr_pages * PAGE_SIZE; | 303 | size = nr_pages * PAGE_SIZE; |
| 282 | 304 | ||
| 283 | dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; | 305 | dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; |
| 284 | if (dma_addr + size > zdev->end_dma) | 306 | if (dma_addr + size > zdev->end_dma) { |
| 307 | ret = -ERANGE; | ||
| 285 | goto out_free; | 308 | goto out_free; |
| 309 | } | ||
| 286 | 310 | ||
| 287 | if (direction == DMA_NONE || direction == DMA_TO_DEVICE) | 311 | if (direction == DMA_NONE || direction == DMA_TO_DEVICE) |
| 288 | flags |= ZPCI_TABLE_PROTECTED; | 312 | flags |= ZPCI_TABLE_PROTECTED; |
| 289 | 313 | ||
| 290 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { | 314 | ret = dma_update_trans(zdev, pa, dma_addr, size, flags); |
| 291 | atomic64_add(nr_pages, &zdev->mapped_pages); | 315 | if (ret) |
| 292 | return dma_addr + (offset & ~PAGE_MASK); | 316 | goto out_free; |
| 293 | } | 317 | |
| 318 | atomic64_add(nr_pages, &zdev->mapped_pages); | ||
| 319 | return dma_addr + (offset & ~PAGE_MASK); | ||
| 294 | 320 | ||
| 295 | out_free: | 321 | out_free: |
| 296 | dma_free_iommu(zdev, iommu_page_index, nr_pages); | 322 | dma_free_iommu(zdev, iommu_page_index, nr_pages); |
| 297 | out_err: | 323 | out_err: |
| 298 | zpci_err("map error:\n"); | 324 | zpci_err("map error:\n"); |
| 299 | zpci_err_hex(&pa, sizeof(pa)); | 325 | zpci_err_dma(ret, pa); |
| 300 | return DMA_ERROR_CODE; | 326 | return DMA_ERROR_CODE; |
| 301 | } | 327 | } |
| 302 | 328 | ||
| @@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |||
| 306 | { | 332 | { |
| 307 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 333 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
| 308 | unsigned long iommu_page_index; | 334 | unsigned long iommu_page_index; |
| 309 | int npages; | 335 | int npages, ret; |
| 310 | 336 | ||
| 311 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 337 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
| 312 | dma_addr = dma_addr & PAGE_MASK; | 338 | dma_addr = dma_addr & PAGE_MASK; |
| 313 | if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, | 339 | ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, |
| 314 | ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { | 340 | ZPCI_PTE_INVALID); |
| 341 | if (ret) { | ||
| 315 | zpci_err("unmap error:\n"); | 342 | zpci_err("unmap error:\n"); |
| 316 | zpci_err_hex(&dma_addr, sizeof(dma_addr)); | 343 | zpci_err_dma(ret, dma_addr); |
| 344 | return; | ||
| 317 | } | 345 | } |
| 318 | 346 | ||
| 319 | atomic64_add(npages, &zdev->unmapped_pages); | 347 | atomic64_add(npages, &zdev->unmapped_pages); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 9f3905697f12..690b4027e17c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #define MSR_IA32_PERFCTR0 0x000000c1 | 35 | #define MSR_IA32_PERFCTR0 0x000000c1 |
| 36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
| 37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
| 38 | #define MSR_NHM_PLATFORM_INFO 0x000000ce | 38 | #define MSR_PLATFORM_INFO 0x000000ce |
| 39 | 39 | ||
| 40 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | 40 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
| 41 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | 41 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) |
| @@ -44,7 +44,6 @@ | |||
| 44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) | 44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
| 45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) | 45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
| 46 | 46 | ||
| 47 | #define MSR_PLATFORM_INFO 0x000000ce | ||
| 48 | #define MSR_MTRRcap 0x000000fe | 47 | #define MSR_MTRRcap 0x000000fe |
| 49 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 48 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
| 50 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e | 49 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4ddd780aeac9..c2b7522cbf35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap); | |||
| 273 | 273 | ||
| 274 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) | 274 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
| 275 | { | 275 | { |
| 276 | unsigned long eflags; | 276 | unsigned long eflags = native_save_fl(); |
| 277 | 277 | ||
| 278 | /* This should have been cleared long ago */ | 278 | /* This should have been cleared long ago */ |
| 279 | raw_local_save_flags(eflags); | ||
| 280 | BUG_ON(eflags & X86_EFLAGS_AC); | 279 | BUG_ON(eflags & X86_EFLAGS_AC); |
| 281 | 280 | ||
| 282 | if (cpu_has(c, X86_FEATURE_SMAP)) { | 281 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index ef29b742cea7..31c6a60505e6 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
| @@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame, | |||
| 385 | */ | 385 | */ |
| 386 | void fpu__init_prepare_fx_sw_frame(void) | 386 | void fpu__init_prepare_fx_sw_frame(void) |
| 387 | { | 387 | { |
| 388 | int fsave_header_size = sizeof(struct fregs_state); | ||
| 389 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; | 388 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; |
| 390 | 389 | ||
| 391 | if (config_enabled(CONFIG_X86_32)) | ||
| 392 | size += fsave_header_size; | ||
| 393 | |||
| 394 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | 390 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; |
| 395 | fx_sw_reserved.extended_size = size; | 391 | fx_sw_reserved.extended_size = size; |
| 396 | fx_sw_reserved.xfeatures = xfeatures_mask; | 392 | fx_sw_reserved.xfeatures = xfeatures_mask; |
| 397 | fx_sw_reserved.xstate_size = xstate_size; | 393 | fx_sw_reserved.xstate_size = xstate_size; |
| 398 | 394 | ||
| 399 | if (config_enabled(CONFIG_IA32_EMULATION)) { | 395 | if (config_enabled(CONFIG_IA32_EMULATION) || |
| 396 | config_enabled(CONFIG_X86_32)) { | ||
| 397 | int fsave_header_size = sizeof(struct fregs_state); | ||
| 398 | |||
| 400 | fx_sw_reserved_ia32 = fx_sw_reserved; | 399 | fx_sw_reserved_ia32 = fx_sw_reserved; |
| 401 | fx_sw_reserved_ia32.extended_size += fsave_header_size; | 400 | fx_sw_reserved_ia32.extended_size = size + fsave_header_size; |
| 402 | } | 401 | } |
| 403 | } | 402 | } |
| 404 | 403 | ||
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 6454f2731b56..70fc312221fc 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c | |||
| @@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature) | |||
| 694 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) | 694 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 695 | return NULL; | 695 | return NULL; |
| 696 | 696 | ||
| 697 | xsave = ¤t->thread.fpu.state.xsave; | ||
| 698 | /* | 697 | /* |
| 699 | * We should not ever be requesting features that we | 698 | * We should not ever be requesting features that we |
| 700 | * have not enabled. Remember that pcntxt_mask is | 699 | * have not enabled. Remember that pcntxt_mask is |
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index 94ea120fa21f..87e1762e2bca 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
| @@ -278,6 +278,12 @@ trace: | |||
| 278 | /* save_mcount_regs fills in first two parameters */ | 278 | /* save_mcount_regs fills in first two parameters */ |
| 279 | save_mcount_regs | 279 | save_mcount_regs |
| 280 | 280 | ||
| 281 | /* | ||
| 282 | * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not | ||
| 283 | * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the | ||
| 284 | * ip and parent ip are used and the list function is called when | ||
| 285 | * function tracing is enabled. | ||
| 286 | */ | ||
| 281 | call *ftrace_trace_function | 287 | call *ftrace_trace_function |
| 282 | 288 | ||
| 283 | restore_mcount_regs | 289 | restore_mcount_regs |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 87acc5221740..af823a388c19 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | |||
| 7394 | 7394 | ||
| 7395 | switch (type) { | 7395 | switch (type) { |
| 7396 | case VMX_VPID_EXTENT_ALL_CONTEXT: | 7396 | case VMX_VPID_EXTENT_ALL_CONTEXT: |
| 7397 | if (get_vmcs12(vcpu)->virtual_processor_id == 0) { | ||
| 7398 | nested_vmx_failValid(vcpu, | ||
| 7399 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | ||
| 7400 | return 1; | ||
| 7401 | } | ||
| 7402 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); | 7397 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); |
| 7403 | nested_vmx_succeed(vcpu); | 7398 | nested_vmx_succeed(vcpu); |
| 7404 | break; | 7399 | break; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00462bd63129..eed32283d22c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, | |||
| 2763 | return 0; | 2763 | return 0; |
| 2764 | } | 2764 | } |
| 2765 | 2765 | ||
| 2766 | static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) | ||
| 2767 | { | ||
| 2768 | return (!lapic_in_kernel(vcpu) || | ||
| 2769 | kvm_apic_accept_pic_intr(vcpu)); | ||
| 2770 | } | ||
| 2771 | |||
| 2772 | /* | ||
| 2773 | * if userspace requested an interrupt window, check that the | ||
| 2774 | * interrupt window is open. | ||
| 2775 | * | ||
| 2776 | * No need to exit to userspace if we already have an interrupt queued. | ||
| 2777 | */ | ||
| 2778 | static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) | ||
| 2779 | { | ||
| 2780 | return kvm_arch_interrupt_allowed(vcpu) && | ||
| 2781 | !kvm_cpu_has_interrupt(vcpu) && | ||
| 2782 | !kvm_event_needs_reinjection(vcpu) && | ||
| 2783 | kvm_cpu_accept_dm_intr(vcpu); | ||
| 2784 | } | ||
| 2785 | |||
| 2766 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | 2786 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
| 2767 | struct kvm_interrupt *irq) | 2787 | struct kvm_interrupt *irq) |
| 2768 | { | 2788 | { |
| @@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | |||
| 2786 | return -EEXIST; | 2806 | return -EEXIST; |
| 2787 | 2807 | ||
| 2788 | vcpu->arch.pending_external_vector = irq->irq; | 2808 | vcpu->arch.pending_external_vector = irq->irq; |
| 2809 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
| 2789 | return 0; | 2810 | return 0; |
| 2790 | } | 2811 | } |
| 2791 | 2812 | ||
| @@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) | |||
| 5910 | return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); | 5931 | return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); |
| 5911 | } | 5932 | } |
| 5912 | 5933 | ||
| 5913 | /* | ||
| 5914 | * Check if userspace requested an interrupt window, and that the | ||
| 5915 | * interrupt window is open. | ||
| 5916 | * | ||
| 5917 | * No need to exit to userspace if we already have an interrupt queued. | ||
| 5918 | */ | ||
| 5919 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) | 5934 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) |
| 5920 | { | 5935 | { |
| 5921 | if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) | 5936 | return vcpu->run->request_interrupt_window && |
| 5922 | return false; | 5937 | likely(!pic_in_kernel(vcpu->kvm)); |
| 5923 | |||
| 5924 | if (kvm_cpu_has_interrupt(vcpu)) | ||
| 5925 | return false; | ||
| 5926 | |||
| 5927 | return (irqchip_split(vcpu->kvm) | ||
| 5928 | ? kvm_apic_accept_pic_intr(vcpu) | ||
| 5929 | : kvm_arch_interrupt_allowed(vcpu)); | ||
| 5930 | } | 5938 | } |
| 5931 | 5939 | ||
| 5932 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) | 5940 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) |
| @@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) | |||
| 5937 | kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; | 5945 | kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; |
| 5938 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 5946 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
| 5939 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 5947 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
| 5940 | if (!irqchip_in_kernel(vcpu->kvm)) | 5948 | kvm_run->ready_for_interrupt_injection = |
| 5941 | kvm_run->ready_for_interrupt_injection = | 5949 | pic_in_kernel(vcpu->kvm) || |
| 5942 | kvm_arch_interrupt_allowed(vcpu) && | 5950 | kvm_vcpu_ready_for_interrupt_injection(vcpu); |
| 5943 | !kvm_cpu_has_interrupt(vcpu) && | ||
| 5944 | !kvm_event_needs_reinjection(vcpu); | ||
| 5945 | else if (!pic_in_kernel(vcpu->kvm)) | ||
| 5946 | kvm_run->ready_for_interrupt_injection = | ||
| 5947 | kvm_apic_accept_pic_intr(vcpu) && | ||
| 5948 | !kvm_cpu_has_interrupt(vcpu); | ||
| 5949 | else | ||
| 5950 | kvm_run->ready_for_interrupt_injection = 1; | ||
| 5951 | } | 5951 | } |
| 5952 | 5952 | ||
| 5953 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) | 5953 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) |
| @@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
| 6360 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | 6360 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
| 6361 | { | 6361 | { |
| 6362 | int r; | 6362 | int r; |
| 6363 | bool req_int_win = !lapic_in_kernel(vcpu) && | 6363 | bool req_int_win = |
| 6364 | vcpu->run->request_interrupt_window; | 6364 | dm_request_for_irq_injection(vcpu) && |
| 6365 | kvm_cpu_accept_dm_intr(vcpu); | ||
| 6366 | |||
| 6365 | bool req_immediate_exit = false; | 6367 | bool req_immediate_exit = false; |
| 6366 | 6368 | ||
| 6367 | if (vcpu->requests) { | 6369 | if (vcpu->requests) { |
| @@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6663 | if (kvm_cpu_has_pending_timer(vcpu)) | 6665 | if (kvm_cpu_has_pending_timer(vcpu)) |
| 6664 | kvm_inject_pending_timer_irqs(vcpu); | 6666 | kvm_inject_pending_timer_irqs(vcpu); |
| 6665 | 6667 | ||
| 6666 | if (dm_request_for_irq_injection(vcpu)) { | 6668 | if (dm_request_for_irq_injection(vcpu) && |
| 6669 | kvm_vcpu_ready_for_interrupt_injection(vcpu)) { | ||
| 6667 | r = 0; | 6670 | r = 0; |
| 6668 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 6671 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
| 6669 | ++vcpu->stat.request_irq_exits; | 6672 | ++vcpu->stat.request_irq_exits; |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b0ae85f90f10..1202d5ca2fb5 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
| @@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, | |||
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | /* | 588 | /* |
| 589 | * We only want to do a 4-byte get_user() on 32-bit. Otherwise, | ||
| 590 | * we might run off the end of the bounds table if we are on | ||
| 591 | * a 64-bit kernel and try to get 8 bytes. | ||
| 592 | */ | ||
| 593 | int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, | ||
| 594 | long __user *bd_entry_ptr) | ||
| 595 | { | ||
| 596 | u32 bd_entry_32; | ||
| 597 | int ret; | ||
| 598 | |||
| 599 | if (is_64bit_mm(mm)) | ||
| 600 | return get_user(*bd_entry_ret, bd_entry_ptr); | ||
| 601 | |||
| 602 | /* | ||
| 603 | * Note that get_user() uses the type of the *pointer* to | ||
| 604 | * establish the size of the get, not the destination. | ||
| 605 | */ | ||
| 606 | ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr); | ||
| 607 | *bd_entry_ret = bd_entry_32; | ||
| 608 | return ret; | ||
| 609 | } | ||
| 610 | |||
| 611 | /* | ||
| 589 | * Get the base of bounds tables pointed by specific bounds | 612 | * Get the base of bounds tables pointed by specific bounds |
| 590 | * directory entry. | 613 | * directory entry. |
| 591 | */ | 614 | */ |
| @@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm, | |||
| 605 | int need_write = 0; | 628 | int need_write = 0; |
| 606 | 629 | ||
| 607 | pagefault_disable(); | 630 | pagefault_disable(); |
| 608 | ret = get_user(bd_entry, bd_entry_ptr); | 631 | ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr); |
| 609 | pagefault_enable(); | 632 | pagefault_enable(); |
| 610 | if (!ret) | 633 | if (!ret) |
| 611 | break; | 634 | break; |
| @@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm, | |||
| 700 | */ | 723 | */ |
| 701 | static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) | 724 | static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) |
| 702 | { | 725 | { |
| 703 | unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); | 726 | unsigned long long virt_space; |
| 704 | if (is_64bit_mm(mm)) | 727 | unsigned long long GB = (1ULL << 30); |
| 705 | return virt_space / MPX_BD_NR_ENTRIES_64; | 728 | |
| 706 | else | 729 | /* |
| 707 | return virt_space / MPX_BD_NR_ENTRIES_32; | 730 | * This covers 32-bit emulation as well as 32-bit kernels |
| 731 | * running on 64-bit harware. | ||
| 732 | */ | ||
| 733 | if (!is_64bit_mm(mm)) | ||
| 734 | return (4ULL * GB) / MPX_BD_NR_ENTRIES_32; | ||
| 735 | |||
| 736 | /* | ||
| 737 | * 'x86_virt_bits' returns what the hardware is capable | ||
| 738 | * of, and returns the full >32-bit adddress space when | ||
| 739 | * running 32-bit kernels on 64-bit hardware. | ||
| 740 | */ | ||
| 741 | virt_space = (1ULL << boot_cpu_data.x86_virt_bits); | ||
| 742 | return virt_space / MPX_BD_NR_ENTRIES_64; | ||
| 708 | } | 743 | } |
| 709 | 744 | ||
| 710 | /* | 745 | /* |
diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..41a55ba0d78e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; | 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
| 77 | struct bvec_iter iter; | 77 | struct bvec_iter iter; |
| 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
| 79 | unsigned front_seg_size = bio->bi_seg_front_size; | ||
| 80 | bool do_split = true; | ||
| 81 | struct bio *new = NULL; | ||
| 79 | 82 | ||
| 80 | bio_for_each_segment(bv, bio, iter) { | 83 | bio_for_each_segment(bv, bio, iter) { |
| 81 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) | 84 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
| @@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 98 | 101 | ||
| 99 | seg_size += bv.bv_len; | 102 | seg_size += bv.bv_len; |
| 100 | bvprv = bv; | 103 | bvprv = bv; |
| 101 | bvprvp = &bv; | 104 | bvprvp = &bvprv; |
| 102 | sectors += bv.bv_len >> 9; | 105 | sectors += bv.bv_len >> 9; |
| 103 | continue; | 106 | continue; |
| 104 | } | 107 | } |
| @@ -108,16 +111,29 @@ new_segment: | |||
| 108 | 111 | ||
| 109 | nsegs++; | 112 | nsegs++; |
| 110 | bvprv = bv; | 113 | bvprv = bv; |
| 111 | bvprvp = &bv; | 114 | bvprvp = &bvprv; |
| 112 | seg_size = bv.bv_len; | 115 | seg_size = bv.bv_len; |
| 113 | sectors += bv.bv_len >> 9; | 116 | sectors += bv.bv_len >> 9; |
| 117 | |||
| 118 | if (nsegs == 1 && seg_size > front_seg_size) | ||
| 119 | front_seg_size = seg_size; | ||
| 114 | } | 120 | } |
| 115 | 121 | ||
| 116 | *segs = nsegs; | 122 | do_split = false; |
| 117 | return NULL; | ||
| 118 | split: | 123 | split: |
| 119 | *segs = nsegs; | 124 | *segs = nsegs; |
| 120 | return bio_split(bio, sectors, GFP_NOIO, bs); | 125 | |
| 126 | if (do_split) { | ||
| 127 | new = bio_split(bio, sectors, GFP_NOIO, bs); | ||
| 128 | if (new) | ||
| 129 | bio = new; | ||
| 130 | } | ||
| 131 | |||
| 132 | bio->bi_seg_front_size = front_seg_size; | ||
| 133 | if (seg_size > bio->bi_seg_back_size) | ||
| 134 | bio->bi_seg_back_size = seg_size; | ||
| 135 | |||
| 136 | return do_split ? new : NULL; | ||
| 121 | } | 137 | } |
| 122 | 138 | ||
| 123 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 139 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
| @@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
| 412 | if (sg) | 428 | if (sg) |
| 413 | sg_mark_end(sg); | 429 | sg_mark_end(sg); |
| 414 | 430 | ||
| 431 | /* | ||
| 432 | * Something must have been wrong if the figured number of | ||
| 433 | * segment is bigger than number of req's physical segments | ||
| 434 | */ | ||
| 435 | WARN_ON(nsegs > rq->nr_phys_segments); | ||
| 436 | |||
| 415 | return nsegs; | 437 | return nsegs; |
| 416 | } | 438 | } |
| 417 | EXPORT_SYMBOL(blk_rq_map_sg); | 439 | EXPORT_SYMBOL(blk_rq_map_sg); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1291 | blk_mq_bio_to_request(rq, bio); | 1291 | blk_mq_bio_to_request(rq, bio); |
| 1292 | 1292 | ||
| 1293 | /* | 1293 | /* |
| 1294 | * we do limited pluging. If bio can be merged, do merge. | 1294 | * We do limited pluging. If the bio can be merged, do that. |
| 1295 | * Otherwise the existing request in the plug list will be | 1295 | * Otherwise the existing request in the plug list will be |
| 1296 | * issued. So the plug list will have one request at most | 1296 | * issued. So the plug list will have one request at most |
| 1297 | */ | 1297 | */ |
| 1298 | if (plug) { | 1298 | if (plug) { |
| 1299 | /* | 1299 | /* |
| 1300 | * The plug list might get flushed before this. If that | 1300 | * The plug list might get flushed before this. If that |
| 1301 | * happens, same_queue_rq is invalid and plug list is empty | 1301 | * happens, same_queue_rq is invalid and plug list is |
| 1302 | **/ | 1302 | * empty |
| 1303 | */ | ||
| 1303 | if (same_queue_rq && !list_empty(&plug->mq_list)) { | 1304 | if (same_queue_rq && !list_empty(&plug->mq_list)) { |
| 1304 | old_rq = same_queue_rq; | 1305 | old_rq = same_queue_rq; |
| 1305 | list_del_init(&old_rq->queuelist); | 1306 | list_del_init(&old_rq->queuelist); |
| @@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1380 | blk_mq_bio_to_request(rq, bio); | 1381 | blk_mq_bio_to_request(rq, bio); |
| 1381 | if (!request_count) | 1382 | if (!request_count) |
| 1382 | trace_block_plug(q); | 1383 | trace_block_plug(q); |
| 1383 | else if (request_count >= BLK_MAX_REQUEST_COUNT) { | 1384 | |
| 1385 | blk_mq_put_ctx(data.ctx); | ||
| 1386 | |||
| 1387 | if (request_count >= BLK_MAX_REQUEST_COUNT) { | ||
| 1384 | blk_flush_plug_list(plug, false); | 1388 | blk_flush_plug_list(plug, false); |
| 1385 | trace_block_plug(q); | 1389 | trace_block_plug(q); |
| 1386 | } | 1390 | } |
| 1391 | |||
| 1387 | list_add_tail(&rq->queuelist, &plug->mq_list); | 1392 | list_add_tail(&rq->queuelist, &plug->mq_list); |
| 1388 | blk_mq_put_ctx(data.ctx); | ||
| 1389 | return cookie; | 1393 | return cookie; |
| 1390 | } | 1394 | } |
| 1391 | 1395 | ||
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..aa40aa93381b 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
| @@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) | |||
| 158 | { | 158 | { |
| 159 | if (blk_mark_rq_complete(req)) | 159 | if (blk_mark_rq_complete(req)) |
| 160 | return; | 160 | return; |
| 161 | blk_delete_timer(req); | 161 | |
| 162 | if (req->q->mq_ops) | 162 | if (req->q->mq_ops) { |
| 163 | blk_mq_rq_timed_out(req, false); | 163 | blk_mq_rq_timed_out(req, false); |
| 164 | else | 164 | } else { |
| 165 | blk_delete_timer(req); | ||
| 165 | blk_rq_timed_out(req); | 166 | blk_rq_timed_out(req); |
| 167 | } | ||
| 166 | } | 168 | } |
| 167 | EXPORT_SYMBOL_GPL(blk_abort_request); | 169 | EXPORT_SYMBOL_GPL(blk_abort_request); |
| 168 | 170 | ||
diff --git a/block/blk.h b/block/blk.h index da722eb786df..c43926d3d74d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq); | |||
| 72 | void __blk_queue_free_tags(struct request_queue *q); | 72 | void __blk_queue_free_tags(struct request_queue *q); |
| 73 | bool __blk_end_bidi_request(struct request *rq, int error, | 73 | bool __blk_end_bidi_request(struct request *rq, int error, |
| 74 | unsigned int nr_bytes, unsigned int bidi_bytes); | 74 | unsigned int nr_bytes, unsigned int bidi_bytes); |
| 75 | int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
| 76 | void blk_queue_exit(struct request_queue *q); | ||
| 77 | void blk_freeze_queue(struct request_queue *q); | 75 | void blk_freeze_queue(struct request_queue *q); |
| 78 | 76 | ||
| 79 | static inline void blk_queue_enter_live(struct request_queue *q) | 77 | static inline void blk_queue_enter_live(struct request_queue *q) |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
| @@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, | |||
| 21 | static int noop_dispatch(struct request_queue *q, int force) | 21 | static int noop_dispatch(struct request_queue *q, int force) |
| 22 | { | 22 | { |
| 23 | struct noop_data *nd = q->elevator->elevator_data; | 23 | struct noop_data *nd = q->elevator->elevator_data; |
| 24 | struct request *rq; | ||
| 24 | 25 | ||
| 25 | if (!list_empty(&nd->queue)) { | 26 | rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); |
| 26 | struct request *rq; | 27 | if (rq) { |
| 27 | rq = list_entry(nd->queue.next, struct request, queuelist); | ||
| 28 | list_del_init(&rq->queuelist); | 28 | list_del_init(&rq->queuelist); |
| 29 | elv_dispatch_sort(q, rq); | 29 | elv_dispatch_sort(q, rq); |
| 30 | return 1; | 30 | return 1; |
| @@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) | |||
| 46 | 46 | ||
| 47 | if (rq->queuelist.prev == &nd->queue) | 47 | if (rq->queuelist.prev == &nd->queue) |
| 48 | return NULL; | 48 | return NULL; |
| 49 | return list_entry(rq->queuelist.prev, struct request, queuelist); | 49 | return list_prev_entry(rq, queuelist); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static struct request * | 52 | static struct request * |
| @@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) | |||
| 56 | 56 | ||
| 57 | if (rq->queuelist.next == &nd->queue) | 57 | if (rq->queuelist.next == &nd->queue) |
| 58 | return NULL; | 58 | return NULL; |
| 59 | return list_entry(rq->queuelist.next, struct request, queuelist); | 59 | return list_next_entry(rq, queuelist); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) | 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) |
diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c | |||
| @@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) | |||
| 32 | Sector sect; | 32 | Sector sect; |
| 33 | unsigned char *data; | 33 | unsigned char *data; |
| 34 | int slot, blocks_in_map; | 34 | int slot, blocks_in_map; |
| 35 | unsigned secsize; | 35 | unsigned secsize, datasize, partoffset; |
| 36 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
| 37 | int found_root = 0; | 37 | int found_root = 0; |
| 38 | int found_root_goodness = 0; | 38 | int found_root_goodness = 0; |
| @@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) | |||
| 50 | } | 50 | } |
| 51 | secsize = be16_to_cpu(md->block_size); | 51 | secsize = be16_to_cpu(md->block_size); |
| 52 | put_dev_sector(sect); | 52 | put_dev_sector(sect); |
| 53 | data = read_part_sector(state, secsize/512, §); | 53 | datasize = round_down(secsize, 512); |
| 54 | data = read_part_sector(state, datasize / 512, §); | ||
| 54 | if (!data) | 55 | if (!data) |
| 55 | return -1; | 56 | return -1; |
| 56 | part = (struct mac_partition *) (data + secsize%512); | 57 | partoffset = secsize % 512; |
| 58 | if (partoffset + sizeof(*part) > datasize) | ||
| 59 | return -1; | ||
| 60 | part = (struct mac_partition *) (data + partoffset); | ||
| 57 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { | 61 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { |
| 58 | put_dev_sector(sect); | 62 | put_dev_sector(sect); |
| 59 | return 0; /* not a MacOS disk */ | 63 | return 0; /* not a MacOS disk */ |
diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
| @@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ | |||
| 63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ | 63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ |
| 64 | 64 | ||
| 65 | obj-$(CONFIG_PARPORT) += parport/ | 65 | obj-$(CONFIG_PARPORT) += parport/ |
| 66 | obj-$(CONFIG_NVM) += lightnvm/ | ||
| 66 | obj-y += base/ block/ misc/ mfd/ nfc/ | 67 | obj-y += base/ block/ misc/ mfd/ nfc/ |
| 67 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ | 68 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ |
| 68 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ | 69 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ |
| @@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ | |||
| 70 | obj-y += macintosh/ | 71 | obj-y += macintosh/ |
| 71 | obj-$(CONFIG_IDE) += ide/ | 72 | obj-$(CONFIG_IDE) += ide/ |
| 72 | obj-$(CONFIG_SCSI) += scsi/ | 73 | obj-$(CONFIG_SCSI) += scsi/ |
| 73 | obj-$(CONFIG_NVM) += lightnvm/ | ||
| 74 | obj-y += nvme/ | 74 | obj-y += nvme/ |
| 75 | obj-$(CONFIG_ATA) += ata/ | 75 | obj-$(CONFIG_ATA) += ata/ |
| 76 | obj-$(CONFIG_TARGET_CORE) += target/ | 76 | obj-$(CONFIG_TARGET_CORE) += target/ |
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3c083d2cc434..6730f965b379 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
| @@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |||
| 304 | 304 | ||
| 305 | static int register_pcc_channel(int pcc_subspace_idx) | 305 | static int register_pcc_channel(int pcc_subspace_idx) |
| 306 | { | 306 | { |
| 307 | struct acpi_pcct_subspace *cppc_ss; | 307 | struct acpi_pcct_hw_reduced *cppc_ss; |
| 308 | unsigned int len; | 308 | unsigned int len; |
| 309 | 309 | ||
| 310 | if (pcc_subspace_idx >= 0) { | 310 | if (pcc_subspace_idx >= 0) { |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f61a7c834540..b420fb46669d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) | |||
| 1103 | } | 1103 | } |
| 1104 | 1104 | ||
| 1105 | err_exit: | 1105 | err_exit: |
| 1106 | if (result && q) | 1106 | if (result) |
| 1107 | acpi_ec_delete_query(q); | 1107 | acpi_ec_delete_query(q); |
| 1108 | if (data) | 1108 | if (data) |
| 1109 | *data = value; | 1109 | *data = value; |
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index bf034f8b7c1a..2fa8304171e0 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/dmi.h> | ||
| 18 | #include "sbshc.h" | 17 | #include "sbshc.h" |
| 19 | 18 | ||
| 20 | #define PREFIX "ACPI: " | 19 | #define PREFIX "ACPI: " |
| @@ -30,6 +29,7 @@ struct acpi_smb_hc { | |||
| 30 | u8 query_bit; | 29 | u8 query_bit; |
| 31 | smbus_alarm_callback callback; | 30 | smbus_alarm_callback callback; |
| 32 | void *context; | 31 | void *context; |
| 32 | bool done; | ||
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | static int acpi_smbus_hc_add(struct acpi_device *device); | 35 | static int acpi_smbus_hc_add(struct acpi_device *device); |
| @@ -88,8 +88,6 @@ enum acpi_smb_offset { | |||
| 88 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ | 88 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | static bool macbook; | ||
| 92 | |||
| 93 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) | 91 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) |
| 94 | { | 92 | { |
| 95 | return ec_read(hc->offset + address, data); | 93 | return ec_read(hc->offset + address, data); |
| @@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data) | |||
| 100 | return ec_write(hc->offset + address, data); | 98 | return ec_write(hc->offset + address, data); |
| 101 | } | 99 | } |
| 102 | 100 | ||
| 103 | static inline int smb_check_done(struct acpi_smb_hc *hc) | ||
| 104 | { | ||
| 105 | union acpi_smb_status status = {.raw = 0}; | ||
| 106 | smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); | ||
| 107 | return status.fields.done && (status.fields.status == SMBUS_OK); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) | 101 | static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) |
| 111 | { | 102 | { |
| 112 | if (wait_event_timeout(hc->wait, smb_check_done(hc), | 103 | if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout))) |
| 113 | msecs_to_jiffies(timeout))) | ||
| 114 | return 0; | 104 | return 0; |
| 115 | /* | 105 | return -ETIME; |
| 116 | * After the timeout happens, OS will try to check the status of SMbus. | ||
| 117 | * If the status is what OS expected, it will be regarded as the bogus | ||
| 118 | * timeout. | ||
| 119 | */ | ||
| 120 | if (smb_check_done(hc)) | ||
| 121 | return 0; | ||
| 122 | else | ||
| 123 | return -ETIME; | ||
| 124 | } | 106 | } |
| 125 | 107 | ||
| 126 | static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, | 108 | static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, |
| @@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, | |||
| 135 | } | 117 | } |
| 136 | 118 | ||
| 137 | mutex_lock(&hc->lock); | 119 | mutex_lock(&hc->lock); |
| 138 | if (macbook) | 120 | hc->done = false; |
| 139 | udelay(5); | ||
| 140 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) | 121 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) |
| 141 | goto end; | 122 | goto end; |
| 142 | if (temp) { | 123 | if (temp) { |
| @@ -235,8 +216,10 @@ static int smbus_alarm(void *context) | |||
| 235 | if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) | 216 | if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) |
| 236 | return 0; | 217 | return 0; |
| 237 | /* Check if it is only a completion notify */ | 218 | /* Check if it is only a completion notify */ |
| 238 | if (status.fields.done) | 219 | if (status.fields.done && status.fields.status == SMBUS_OK) { |
| 220 | hc->done = true; | ||
| 239 | wake_up(&hc->wait); | 221 | wake_up(&hc->wait); |
| 222 | } | ||
| 240 | if (!status.fields.alarm) | 223 | if (!status.fields.alarm) |
| 241 | return 0; | 224 | return 0; |
| 242 | mutex_lock(&hc->lock); | 225 | mutex_lock(&hc->lock); |
| @@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
| 262 | acpi_handle handle, acpi_ec_query_func func, | 245 | acpi_handle handle, acpi_ec_query_func func, |
| 263 | void *data); | 246 | void *data); |
| 264 | 247 | ||
| 265 | static int macbook_dmi_match(const struct dmi_system_id *d) | ||
| 266 | { | ||
| 267 | pr_debug("Detected MacBook, enabling workaround\n"); | ||
| 268 | macbook = true; | ||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | |||
| 272 | static struct dmi_system_id acpi_smbus_dmi_table[] = { | ||
| 273 | { macbook_dmi_match, "Apple MacBook", { | ||
| 274 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | ||
| 275 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, | ||
| 276 | }, | ||
| 277 | { }, | ||
| 278 | }; | ||
| 279 | |||
| 280 | static int acpi_smbus_hc_add(struct acpi_device *device) | 248 | static int acpi_smbus_hc_add(struct acpi_device *device) |
| 281 | { | 249 | { |
| 282 | int status; | 250 | int status; |
| 283 | unsigned long long val; | 251 | unsigned long long val; |
| 284 | struct acpi_smb_hc *hc; | 252 | struct acpi_smb_hc *hc; |
| 285 | 253 | ||
| 286 | dmi_check_system(acpi_smbus_dmi_table); | ||
| 287 | |||
| 288 | if (!device) | 254 | if (!device) |
| 289 | return -EINVAL; | 255 | return -EINVAL; |
| 290 | 256 | ||
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451dec..0d77cd6fd8d1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c | |||
| @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) | |||
| 68 | struct wake_irq *wirq; | 68 | struct wake_irq *wirq; |
| 69 | int err; | 69 | int err; |
| 70 | 70 | ||
| 71 | if (irq < 0) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 71 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); | 74 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); |
| 72 | if (!wirq) | 75 | if (!wirq) |
| 73 | return -ENOMEM; | 76 | return -ENOMEM; |
| @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) | |||
| 167 | struct wake_irq *wirq; | 170 | struct wake_irq *wirq; |
| 168 | int err; | 171 | int err; |
| 169 | 172 | ||
| 173 | if (irq < 0) | ||
| 174 | return -EINVAL; | ||
| 175 | |||
| 170 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); | 176 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); |
| 171 | if (!wirq) | 177 | if (!wirq) |
| 172 | return -ENOMEM; | 178 | return -ENOMEM; |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
| 3810 | sector_t capacity; | 3810 | sector_t capacity; |
| 3811 | unsigned int index = 0; | 3811 | unsigned int index = 0; |
| 3812 | struct kobject *kobj; | 3812 | struct kobject *kobj; |
| 3813 | unsigned char thd_name[16]; | ||
| 3814 | 3813 | ||
| 3815 | if (dd->disk) | 3814 | if (dd->disk) |
| 3816 | goto skip_create_disk; /* hw init done, before rebuild */ | 3815 | goto skip_create_disk; /* hw init done, before rebuild */ |
| @@ -3958,10 +3957,9 @@ skip_create_disk: | |||
| 3958 | } | 3957 | } |
| 3959 | 3958 | ||
| 3960 | start_service_thread: | 3959 | start_service_thread: |
| 3961 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | ||
| 3962 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | 3960 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, |
| 3963 | dd, dd->numa_node, "%s", | 3961 | dd, dd->numa_node, |
| 3964 | thd_name); | 3962 | "mtip_svc_thd_%02d", index); |
| 3965 | 3963 | ||
| 3966 | if (IS_ERR(dd->mtip_svc_handler)) { | 3964 | if (IS_ERR(dd->mtip_svc_handler)) { |
| 3967 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 3965 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..5c8ba5484d86 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/blk-mq.h> | 9 | #include <linux/blk-mq.h> |
| 10 | #include <linux/hrtimer.h> | 10 | #include <linux/hrtimer.h> |
| 11 | #include <linux/lightnvm.h> | ||
| 11 | 12 | ||
| 12 | struct nullb_cmd { | 13 | struct nullb_cmd { |
| 13 | struct list_head list; | 14 | struct list_head list; |
| @@ -39,12 +40,14 @@ struct nullb { | |||
| 39 | 40 | ||
| 40 | struct nullb_queue *queues; | 41 | struct nullb_queue *queues; |
| 41 | unsigned int nr_queues; | 42 | unsigned int nr_queues; |
| 43 | char disk_name[DISK_NAME_LEN]; | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | static LIST_HEAD(nullb_list); | 46 | static LIST_HEAD(nullb_list); |
| 45 | static struct mutex lock; | 47 | static struct mutex lock; |
| 46 | static int null_major; | 48 | static int null_major; |
| 47 | static int nullb_indexes; | 49 | static int nullb_indexes; |
| 50 | static struct kmem_cache *ppa_cache; | ||
| 48 | 51 | ||
| 49 | struct completion_queue { | 52 | struct completion_queue { |
| 50 | struct llist_head list; | 53 | struct llist_head list; |
| @@ -119,6 +122,10 @@ static int nr_devices = 2; | |||
| 119 | module_param(nr_devices, int, S_IRUGO); | 122 | module_param(nr_devices, int, S_IRUGO); |
| 120 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | 123 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
| 121 | 124 | ||
| 125 | static bool use_lightnvm; | ||
| 126 | module_param(use_lightnvm, bool, S_IRUGO); | ||
| 127 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); | ||
| 128 | |||
| 122 | static int irqmode = NULL_IRQ_SOFTIRQ; | 129 | static int irqmode = NULL_IRQ_SOFTIRQ; |
| 123 | 130 | ||
| 124 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | 131 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) |
| @@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb) | |||
| 427 | { | 434 | { |
| 428 | list_del_init(&nullb->list); | 435 | list_del_init(&nullb->list); |
| 429 | 436 | ||
| 430 | del_gendisk(nullb->disk); | 437 | if (use_lightnvm) |
| 438 | nvm_unregister(nullb->disk_name); | ||
| 439 | else | ||
| 440 | del_gendisk(nullb->disk); | ||
| 431 | blk_cleanup_queue(nullb->q); | 441 | blk_cleanup_queue(nullb->q); |
| 432 | if (queue_mode == NULL_Q_MQ) | 442 | if (queue_mode == NULL_Q_MQ) |
| 433 | blk_mq_free_tag_set(&nullb->tag_set); | 443 | blk_mq_free_tag_set(&nullb->tag_set); |
| 434 | put_disk(nullb->disk); | 444 | if (!use_lightnvm) |
| 445 | put_disk(nullb->disk); | ||
| 435 | cleanup_queues(nullb); | 446 | cleanup_queues(nullb); |
| 436 | kfree(nullb); | 447 | kfree(nullb); |
| 437 | } | 448 | } |
| 438 | 449 | ||
| 450 | #ifdef CONFIG_NVM | ||
| 451 | |||
| 452 | static void null_lnvm_end_io(struct request *rq, int error) | ||
| 453 | { | ||
| 454 | struct nvm_rq *rqd = rq->end_io_data; | ||
| 455 | struct nvm_dev *dev = rqd->dev; | ||
| 456 | |||
| 457 | dev->mt->end_io(rqd, error); | ||
| 458 | |||
| 459 | blk_put_request(rq); | ||
| 460 | } | ||
| 461 | |||
| 462 | static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) | ||
| 463 | { | ||
| 464 | struct request *rq; | ||
| 465 | struct bio *bio = rqd->bio; | ||
| 466 | |||
| 467 | rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); | ||
| 468 | if (IS_ERR(rq)) | ||
| 469 | return -ENOMEM; | ||
| 470 | |||
| 471 | rq->cmd_type = REQ_TYPE_DRV_PRIV; | ||
| 472 | rq->__sector = bio->bi_iter.bi_sector; | ||
| 473 | rq->ioprio = bio_prio(bio); | ||
| 474 | |||
| 475 | if (bio_has_data(bio)) | ||
| 476 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
| 477 | |||
| 478 | rq->__data_len = bio->bi_iter.bi_size; | ||
| 479 | rq->bio = rq->biotail = bio; | ||
| 480 | |||
| 481 | rq->end_io_data = rqd; | ||
| 482 | |||
| 483 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) | ||
| 489 | { | ||
| 490 | sector_t size = gb * 1024 * 1024 * 1024ULL; | ||
| 491 | sector_t blksize; | ||
| 492 | struct nvm_id_group *grp; | ||
| 493 | |||
| 494 | id->ver_id = 0x1; | ||
| 495 | id->vmnt = 0; | ||
| 496 | id->cgrps = 1; | ||
| 497 | id->cap = 0x3; | ||
| 498 | id->dom = 0x1; | ||
| 499 | |||
| 500 | id->ppaf.blk_offset = 0; | ||
| 501 | id->ppaf.blk_len = 16; | ||
| 502 | id->ppaf.pg_offset = 16; | ||
| 503 | id->ppaf.pg_len = 16; | ||
| 504 | id->ppaf.sect_offset = 32; | ||
| 505 | id->ppaf.sect_len = 8; | ||
| 506 | id->ppaf.pln_offset = 40; | ||
| 507 | id->ppaf.pln_len = 8; | ||
| 508 | id->ppaf.lun_offset = 48; | ||
| 509 | id->ppaf.lun_len = 8; | ||
| 510 | id->ppaf.ch_offset = 56; | ||
| 511 | id->ppaf.ch_len = 8; | ||
| 512 | |||
| 513 | do_div(size, bs); /* convert size to pages */ | ||
| 514 | do_div(size, 256); /* concert size to pgs pr blk */ | ||
| 515 | grp = &id->groups[0]; | ||
| 516 | grp->mtype = 0; | ||
| 517 | grp->fmtype = 0; | ||
| 518 | grp->num_ch = 1; | ||
| 519 | grp->num_pg = 256; | ||
| 520 | blksize = size; | ||
| 521 | do_div(size, (1 << 16)); | ||
| 522 | grp->num_lun = size + 1; | ||
| 523 | do_div(blksize, grp->num_lun); | ||
| 524 | grp->num_blk = blksize; | ||
| 525 | grp->num_pln = 1; | ||
| 526 | |||
| 527 | grp->fpg_sz = bs; | ||
| 528 | grp->csecs = bs; | ||
| 529 | grp->trdt = 25000; | ||
| 530 | grp->trdm = 25000; | ||
| 531 | grp->tprt = 500000; | ||
| 532 | grp->tprm = 500000; | ||
| 533 | grp->tbet = 1500000; | ||
| 534 | grp->tbem = 1500000; | ||
| 535 | grp->mpos = 0x010101; /* single plane rwe */ | ||
| 536 | grp->cpar = hw_queue_depth; | ||
| 537 | |||
| 538 | return 0; | ||
| 539 | } | ||
| 540 | |||
| 541 | static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) | ||
| 542 | { | ||
| 543 | mempool_t *virtmem_pool; | ||
| 544 | |||
| 545 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); | ||
| 546 | if (!virtmem_pool) { | ||
| 547 | pr_err("null_blk: Unable to create virtual memory pool\n"); | ||
| 548 | return NULL; | ||
| 549 | } | ||
| 550 | |||
| 551 | return virtmem_pool; | ||
| 552 | } | ||
| 553 | |||
| 554 | static void null_lnvm_destroy_dma_pool(void *pool) | ||
| 555 | { | ||
| 556 | mempool_destroy(pool); | ||
| 557 | } | ||
| 558 | |||
| 559 | static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, | ||
| 560 | gfp_t mem_flags, dma_addr_t *dma_handler) | ||
| 561 | { | ||
| 562 | return mempool_alloc(pool, mem_flags); | ||
| 563 | } | ||
| 564 | |||
| 565 | static void null_lnvm_dev_dma_free(void *pool, void *entry, | ||
| 566 | dma_addr_t dma_handler) | ||
| 567 | { | ||
| 568 | mempool_free(entry, pool); | ||
| 569 | } | ||
| 570 | |||
| 571 | static struct nvm_dev_ops null_lnvm_dev_ops = { | ||
| 572 | .identity = null_lnvm_id, | ||
| 573 | .submit_io = null_lnvm_submit_io, | ||
| 574 | |||
| 575 | .create_dma_pool = null_lnvm_create_dma_pool, | ||
| 576 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, | ||
| 577 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, | ||
| 578 | .dev_dma_free = null_lnvm_dev_dma_free, | ||
| 579 | |||
| 580 | /* Simulate nvme protocol restriction */ | ||
| 581 | .max_phys_sect = 64, | ||
| 582 | }; | ||
| 583 | #else | ||
| 584 | static struct nvm_dev_ops null_lnvm_dev_ops; | ||
| 585 | #endif /* CONFIG_NVM */ | ||
| 586 | |||
| 439 | static int null_open(struct block_device *bdev, fmode_t mode) | 587 | static int null_open(struct block_device *bdev, fmode_t mode) |
| 440 | { | 588 | { |
| 441 | return 0; | 589 | return 0; |
| @@ -575,11 +723,6 @@ static int null_add_dev(void) | |||
| 575 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 723 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
| 576 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); | 724 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
| 577 | 725 | ||
| 578 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
| 579 | if (!disk) { | ||
| 580 | rv = -ENOMEM; | ||
| 581 | goto out_cleanup_blk_queue; | ||
| 582 | } | ||
| 583 | 726 | ||
| 584 | mutex_lock(&lock); | 727 | mutex_lock(&lock); |
| 585 | list_add_tail(&nullb->list, &nullb_list); | 728 | list_add_tail(&nullb->list, &nullb_list); |
| @@ -589,6 +732,21 @@ static int null_add_dev(void) | |||
| 589 | blk_queue_logical_block_size(nullb->q, bs); | 732 | blk_queue_logical_block_size(nullb->q, bs); |
| 590 | blk_queue_physical_block_size(nullb->q, bs); | 733 | blk_queue_physical_block_size(nullb->q, bs); |
| 591 | 734 | ||
| 735 | sprintf(nullb->disk_name, "nullb%d", nullb->index); | ||
| 736 | |||
| 737 | if (use_lightnvm) { | ||
| 738 | rv = nvm_register(nullb->q, nullb->disk_name, | ||
| 739 | &null_lnvm_dev_ops); | ||
| 740 | if (rv) | ||
| 741 | goto out_cleanup_blk_queue; | ||
| 742 | goto done; | ||
| 743 | } | ||
| 744 | |||
| 745 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
| 746 | if (!disk) { | ||
| 747 | rv = -ENOMEM; | ||
| 748 | goto out_cleanup_lightnvm; | ||
| 749 | } | ||
| 592 | size = gb * 1024 * 1024 * 1024ULL; | 750 | size = gb * 1024 * 1024 * 1024ULL; |
| 593 | set_capacity(disk, size >> 9); | 751 | set_capacity(disk, size >> 9); |
| 594 | 752 | ||
| @@ -598,10 +756,15 @@ static int null_add_dev(void) | |||
| 598 | disk->fops = &null_fops; | 756 | disk->fops = &null_fops; |
| 599 | disk->private_data = nullb; | 757 | disk->private_data = nullb; |
| 600 | disk->queue = nullb->q; | 758 | disk->queue = nullb->q; |
| 601 | sprintf(disk->disk_name, "nullb%d", nullb->index); | 759 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
| 760 | |||
| 602 | add_disk(disk); | 761 | add_disk(disk); |
| 762 | done: | ||
| 603 | return 0; | 763 | return 0; |
| 604 | 764 | ||
| 765 | out_cleanup_lightnvm: | ||
| 766 | if (use_lightnvm) | ||
| 767 | nvm_unregister(nullb->disk_name); | ||
| 605 | out_cleanup_blk_queue: | 768 | out_cleanup_blk_queue: |
| 606 | blk_cleanup_queue(nullb->q); | 769 | blk_cleanup_queue(nullb->q); |
| 607 | out_cleanup_tags: | 770 | out_cleanup_tags: |
| @@ -625,6 +788,18 @@ static int __init null_init(void) | |||
| 625 | bs = PAGE_SIZE; | 788 | bs = PAGE_SIZE; |
| 626 | } | 789 | } |
| 627 | 790 | ||
| 791 | if (use_lightnvm && bs != 4096) { | ||
| 792 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); | ||
| 793 | pr_warn("null_blk: defaults block size to 4k\n"); | ||
| 794 | bs = 4096; | ||
| 795 | } | ||
| 796 | |||
| 797 | if (use_lightnvm && queue_mode != NULL_Q_MQ) { | ||
| 798 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); | ||
| 799 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); | ||
| 800 | queue_mode = NULL_Q_MQ; | ||
| 801 | } | ||
| 802 | |||
| 628 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { | 803 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
| 629 | if (submit_queues < nr_online_nodes) { | 804 | if (submit_queues < nr_online_nodes) { |
| 630 | pr_warn("null_blk: submit_queues param is set to %u.", | 805 | pr_warn("null_blk: submit_queues param is set to %u.", |
| @@ -655,15 +830,27 @@ static int __init null_init(void) | |||
| 655 | if (null_major < 0) | 830 | if (null_major < 0) |
| 656 | return null_major; | 831 | return null_major; |
| 657 | 832 | ||
| 833 | if (use_lightnvm) { | ||
| 834 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), | ||
| 835 | 0, 0, NULL); | ||
| 836 | if (!ppa_cache) { | ||
| 837 | pr_err("null_blk: unable to create ppa cache\n"); | ||
| 838 | return -ENOMEM; | ||
| 839 | } | ||
| 840 | } | ||
| 841 | |||
| 658 | for (i = 0; i < nr_devices; i++) { | 842 | for (i = 0; i < nr_devices; i++) { |
| 659 | if (null_add_dev()) { | 843 | if (null_add_dev()) { |
| 660 | unregister_blkdev(null_major, "nullb"); | 844 | unregister_blkdev(null_major, "nullb"); |
| 661 | return -EINVAL; | 845 | goto err_ppa; |
| 662 | } | 846 | } |
| 663 | } | 847 | } |
| 664 | 848 | ||
| 665 | pr_info("null: module loaded\n"); | 849 | pr_info("null: module loaded\n"); |
| 666 | return 0; | 850 | return 0; |
| 851 | err_ppa: | ||
| 852 | kmem_cache_destroy(ppa_cache); | ||
| 853 | return -EINVAL; | ||
| 667 | } | 854 | } |
| 668 | 855 | ||
| 669 | static void __exit null_exit(void) | 856 | static void __exit null_exit(void) |
| @@ -678,6 +865,8 @@ static void __exit null_exit(void) | |||
| 678 | null_del_dev(nullb); | 865 | null_del_dev(nullb); |
| 679 | } | 866 | } |
| 680 | mutex_unlock(&lock); | 867 | mutex_unlock(&lock); |
| 868 | |||
| 869 | kmem_cache_destroy(ppa_cache); | ||
| 681 | } | 870 | } |
| 682 | 871 | ||
| 683 | module_init(null_init); | 872 | module_init(null_init); |
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index 9f1856948758..bf500e0e7362 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c | |||
| @@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = { | |||
| 117 | 117 | ||
| 118 | module_platform_driver(omap_ocp2scp_driver); | 118 | module_platform_driver(omap_ocp2scp_driver); |
| 119 | 119 | ||
| 120 | MODULE_ALIAS("platform: omap-ocp2scp"); | 120 | MODULE_ALIAS("platform:omap-ocp2scp"); |
| 121 | MODULE_AUTHOR("Texas Instruments Inc."); | 121 | MODULE_AUTHOR("Texas Instruments Inc."); |
| 122 | MODULE_DESCRIPTION("OMAP OCP2SCP driver"); | 122 | MODULE_DESCRIPTION("OMAP OCP2SCP driver"); |
| 123 | MODULE_LICENSE("GPL v2"); | 123 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 654f6f36a071..55fe9020459f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
| 412 | return rv; | 412 | return rv; |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | static void start_check_enables(struct smi_info *smi_info) | 415 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
| 416 | { | ||
| 417 | smi_info->last_timeout_jiffies = jiffies; | ||
| 418 | mod_timer(&smi_info->si_timer, new_val); | ||
| 419 | smi_info->timer_running = true; | ||
| 420 | } | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Start a new message and (re)start the timer and thread. | ||
| 424 | */ | ||
| 425 | static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, | ||
| 426 | unsigned int size) | ||
| 427 | { | ||
| 428 | smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); | ||
| 429 | |||
| 430 | if (smi_info->thread) | ||
| 431 | wake_up_process(smi_info->thread); | ||
| 432 | |||
| 433 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); | ||
| 434 | } | ||
| 435 | |||
| 436 | static void start_check_enables(struct smi_info *smi_info, bool start_timer) | ||
| 416 | { | 437 | { |
| 417 | unsigned char msg[2]; | 438 | unsigned char msg[2]; |
| 418 | 439 | ||
| 419 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 440 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| 420 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 441 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
| 421 | 442 | ||
| 422 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 443 | if (start_timer) |
| 444 | start_new_msg(smi_info, msg, 2); | ||
| 445 | else | ||
| 446 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | ||
| 423 | smi_info->si_state = SI_CHECKING_ENABLES; | 447 | smi_info->si_state = SI_CHECKING_ENABLES; |
| 424 | } | 448 | } |
| 425 | 449 | ||
| 426 | static void start_clear_flags(struct smi_info *smi_info) | 450 | static void start_clear_flags(struct smi_info *smi_info, bool start_timer) |
| 427 | { | 451 | { |
| 428 | unsigned char msg[3]; | 452 | unsigned char msg[3]; |
| 429 | 453 | ||
| @@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info) | |||
| 432 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; | 456 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
| 433 | msg[2] = WDT_PRE_TIMEOUT_INT; | 457 | msg[2] = WDT_PRE_TIMEOUT_INT; |
| 434 | 458 | ||
| 435 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | 459 | if (start_timer) |
| 460 | start_new_msg(smi_info, msg, 3); | ||
| 461 | else | ||
| 462 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | ||
| 436 | smi_info->si_state = SI_CLEARING_FLAGS; | 463 | smi_info->si_state = SI_CLEARING_FLAGS; |
| 437 | } | 464 | } |
| 438 | 465 | ||
| @@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info) | |||
| 442 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; | 469 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; |
| 443 | smi_info->curr_msg->data_size = 2; | 470 | smi_info->curr_msg->data_size = 2; |
| 444 | 471 | ||
| 445 | smi_info->handlers->start_transaction( | 472 | start_new_msg(smi_info, smi_info->curr_msg->data, |
| 446 | smi_info->si_sm, | 473 | smi_info->curr_msg->data_size); |
| 447 | smi_info->curr_msg->data, | ||
| 448 | smi_info->curr_msg->data_size); | ||
| 449 | smi_info->si_state = SI_GETTING_MESSAGES; | 474 | smi_info->si_state = SI_GETTING_MESSAGES; |
| 450 | } | 475 | } |
| 451 | 476 | ||
| @@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info) | |||
| 455 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; | 480 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
| 456 | smi_info->curr_msg->data_size = 2; | 481 | smi_info->curr_msg->data_size = 2; |
| 457 | 482 | ||
| 458 | smi_info->handlers->start_transaction( | 483 | start_new_msg(smi_info, smi_info->curr_msg->data, |
| 459 | smi_info->si_sm, | 484 | smi_info->curr_msg->data_size); |
| 460 | smi_info->curr_msg->data, | ||
| 461 | smi_info->curr_msg->data_size); | ||
| 462 | smi_info->si_state = SI_GETTING_EVENTS; | 485 | smi_info->si_state = SI_GETTING_EVENTS; |
| 463 | } | 486 | } |
| 464 | 487 | ||
| 465 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | ||
| 466 | { | ||
| 467 | smi_info->last_timeout_jiffies = jiffies; | ||
| 468 | mod_timer(&smi_info->si_timer, new_val); | ||
| 469 | smi_info->timer_running = true; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* | 488 | /* |
| 473 | * When we have a situtaion where we run out of memory and cannot | 489 | * When we have a situtaion where we run out of memory and cannot |
| 474 | * allocate messages, we just leave them in the BMC and run the system | 490 | * allocate messages, we just leave them in the BMC and run the system |
| @@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | |||
| 478 | * Note that we cannot just use disable_irq(), since the interrupt may | 494 | * Note that we cannot just use disable_irq(), since the interrupt may |
| 479 | * be shared. | 495 | * be shared. |
| 480 | */ | 496 | */ |
| 481 | static inline bool disable_si_irq(struct smi_info *smi_info) | 497 | static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) |
| 482 | { | 498 | { |
| 483 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 499 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
| 484 | smi_info->interrupt_disabled = true; | 500 | smi_info->interrupt_disabled = true; |
| 485 | start_check_enables(smi_info); | 501 | start_check_enables(smi_info, start_timer); |
| 486 | return true; | 502 | return true; |
| 487 | } | 503 | } |
| 488 | return false; | 504 | return false; |
| @@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) | |||
| 492 | { | 508 | { |
| 493 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { | 509 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
| 494 | smi_info->interrupt_disabled = false; | 510 | smi_info->interrupt_disabled = false; |
| 495 | start_check_enables(smi_info); | 511 | start_check_enables(smi_info, true); |
| 496 | return true; | 512 | return true; |
| 497 | } | 513 | } |
| 498 | return false; | 514 | return false; |
| @@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) | |||
| 510 | 526 | ||
| 511 | msg = ipmi_alloc_smi_msg(); | 527 | msg = ipmi_alloc_smi_msg(); |
| 512 | if (!msg) { | 528 | if (!msg) { |
| 513 | if (!disable_si_irq(smi_info)) | 529 | if (!disable_si_irq(smi_info, true)) |
| 514 | smi_info->si_state = SI_NORMAL; | 530 | smi_info->si_state = SI_NORMAL; |
| 515 | } else if (enable_si_irq(smi_info)) { | 531 | } else if (enable_si_irq(smi_info)) { |
| 516 | ipmi_free_smi_msg(msg); | 532 | ipmi_free_smi_msg(msg); |
| @@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
| 526 | /* Watchdog pre-timeout */ | 542 | /* Watchdog pre-timeout */ |
| 527 | smi_inc_stat(smi_info, watchdog_pretimeouts); | 543 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
| 528 | 544 | ||
| 529 | start_clear_flags(smi_info); | 545 | start_clear_flags(smi_info, true); |
| 530 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 546 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
| 531 | if (smi_info->intf) | 547 | if (smi_info->intf) |
| 532 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | 548 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
| @@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 879 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 895 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| 880 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 896 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
| 881 | 897 | ||
| 882 | smi_info->handlers->start_transaction( | 898 | start_new_msg(smi_info, msg, 2); |
| 883 | smi_info->si_sm, msg, 2); | ||
| 884 | smi_info->si_state = SI_GETTING_FLAGS; | 899 | smi_info->si_state = SI_GETTING_FLAGS; |
| 885 | goto restart; | 900 | goto restart; |
| 886 | } | 901 | } |
| @@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 910 | * disable and messages disabled. | 925 | * disable and messages disabled. |
| 911 | */ | 926 | */ |
| 912 | if (smi_info->supports_event_msg_buff || smi_info->irq) { | 927 | if (smi_info->supports_event_msg_buff || smi_info->irq) { |
| 913 | start_check_enables(smi_info); | 928 | start_check_enables(smi_info, true); |
| 914 | } else { | 929 | } else { |
| 915 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); | 930 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
| 916 | if (!smi_info->curr_msg) | 931 | if (!smi_info->curr_msg) |
| @@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 920 | } | 935 | } |
| 921 | goto restart; | 936 | goto restart; |
| 922 | } | 937 | } |
| 938 | |||
| 939 | if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { | ||
| 940 | /* Ok it if fails, the timer will just go off. */ | ||
| 941 | if (del_timer(&smi_info->si_timer)) | ||
| 942 | smi_info->timer_running = false; | ||
| 943 | } | ||
| 944 | |||
| 923 | out: | 945 | out: |
| 924 | return si_sm_result; | 946 | return si_sm_result; |
| 925 | } | 947 | } |
| @@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = { | |||
| 2560 | .data = (void *)(unsigned long) SI_BT }, | 2582 | .data = (void *)(unsigned long) SI_BT }, |
| 2561 | {}, | 2583 | {}, |
| 2562 | }; | 2584 | }; |
| 2585 | MODULE_DEVICE_TABLE(of, of_ipmi_match); | ||
| 2563 | 2586 | ||
| 2564 | static int of_ipmi_probe(struct platform_device *dev) | 2587 | static int of_ipmi_probe(struct platform_device *dev) |
| 2565 | { | 2588 | { |
| @@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev) | |||
| 2646 | } | 2669 | } |
| 2647 | return 0; | 2670 | return 0; |
| 2648 | } | 2671 | } |
| 2649 | MODULE_DEVICE_TABLE(of, of_ipmi_match); | ||
| 2650 | #else | 2672 | #else |
| 2651 | #define of_ipmi_match NULL | 2673 | #define of_ipmi_match NULL |
| 2652 | static int of_ipmi_probe(struct platform_device *dev) | 2674 | static int of_ipmi_probe(struct platform_device *dev) |
| @@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
| 3613 | * Start clearing the flags before we enable interrupts or the | 3635 | * Start clearing the flags before we enable interrupts or the |
| 3614 | * timer to avoid racing with the timer. | 3636 | * timer to avoid racing with the timer. |
| 3615 | */ | 3637 | */ |
| 3616 | start_clear_flags(new_smi); | 3638 | start_clear_flags(new_smi, false); |
| 3617 | 3639 | ||
| 3618 | /* | 3640 | /* |
| 3619 | * IRQ is defined to be set when non-zero. req_events will | 3641 | * IRQ is defined to be set when non-zero. req_events will |
| @@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
| 3908 | poll(to_clean); | 3930 | poll(to_clean); |
| 3909 | schedule_timeout_uninterruptible(1); | 3931 | schedule_timeout_uninterruptible(1); |
| 3910 | } | 3932 | } |
| 3911 | disable_si_irq(to_clean); | 3933 | disable_si_irq(to_clean, false); |
| 3912 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3934 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
| 3913 | poll(to_clean); | 3935 | poll(to_clean); |
| 3914 | schedule_timeout_uninterruptible(1); | 3936 | schedule_timeout_uninterruptible(1); |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 0ac3bd1a5497..096f0cef4da1 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
| @@ -153,6 +153,9 @@ static int timeout = 10; | |||
| 153 | /* The pre-timeout is disabled by default. */ | 153 | /* The pre-timeout is disabled by default. */ |
| 154 | static int pretimeout; | 154 | static int pretimeout; |
| 155 | 155 | ||
| 156 | /* Default timeout to set on panic */ | ||
| 157 | static int panic_wdt_timeout = 255; | ||
| 158 | |||
| 156 | /* Default action is to reset the board on a timeout. */ | 159 | /* Default action is to reset the board on a timeout. */ |
| 157 | static unsigned char action_val = WDOG_TIMEOUT_RESET; | 160 | static unsigned char action_val = WDOG_TIMEOUT_RESET; |
| 158 | 161 | ||
| @@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds."); | |||
| 293 | module_param(pretimeout, timeout, 0644); | 296 | module_param(pretimeout, timeout, 0644); |
| 294 | MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); | 297 | MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); |
| 295 | 298 | ||
| 299 | module_param(panic_wdt_timeout, timeout, 0644); | ||
| 300 | MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds."); | ||
| 301 | |||
| 296 | module_param_cb(action, ¶m_ops_str, action_op, 0644); | 302 | module_param_cb(action, ¶m_ops_str, action_op, 0644); |
| 297 | MODULE_PARM_DESC(action, "Timeout action. One of: " | 303 | MODULE_PARM_DESC(action, "Timeout action. One of: " |
| 298 | "reset, none, power_cycle, power_off."); | 304 | "reset, none, power_cycle, power_off."); |
| @@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
| 1189 | /* Make sure we do this only once. */ | 1195 | /* Make sure we do this only once. */ |
| 1190 | panic_event_handled = 1; | 1196 | panic_event_handled = 1; |
| 1191 | 1197 | ||
| 1192 | timeout = 255; | 1198 | timeout = panic_wdt_timeout; |
| 1193 | pretimeout = 0; | 1199 | pretimeout = 0; |
| 1194 | panic_halt_ipmi_set_timeout(); | 1200 | panic_halt_ipmi_set_timeout(); |
| 1195 | } | 1201 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 71cfdf7c9708..2eb5f0efae90 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | menu "Clock Source drivers" | 1 | menu "Clock Source drivers" |
| 2 | depends on !ARCH_USES_GETTIMEOFFSET | ||
| 2 | 3 | ||
| 3 | config CLKSRC_OF | 4 | config CLKSRC_OF |
| 4 | bool | 5 | bool |
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 10202f1fdfd7..517e1c7624d4 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c | |||
| @@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq) | |||
| 203 | int err; | 203 | int err; |
| 204 | 204 | ||
| 205 | ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); | 205 | ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); |
| 206 | ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); | 206 | ftm_writel(~0u, priv->clkevt_base + FTM_MOD); |
| 207 | 207 | ||
| 208 | ftm_reset_counter(priv->clkevt_base); | 208 | ftm_reset_counter(priv->clkevt_base); |
| 209 | 209 | ||
| @@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq) | |||
| 230 | int err; | 230 | int err; |
| 231 | 231 | ||
| 232 | ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); | 232 | ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); |
| 233 | ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); | 233 | ftm_writel(~0u, priv->clksrc_base + FTM_MOD); |
| 234 | 234 | ||
| 235 | ftm_reset_counter(priv->clksrc_base); | 235 | ftm_reset_counter(priv->clksrc_base); |
| 236 | 236 | ||
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1582c1c016b0..235a1ba73d92 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ | |||
| 84 | config ARM_MT8173_CPUFREQ | 84 | config ARM_MT8173_CPUFREQ |
| 85 | bool "Mediatek MT8173 CPUFreq support" | 85 | bool "Mediatek MT8173 CPUFreq support" |
| 86 | depends on ARCH_MEDIATEK && REGULATOR | 86 | depends on ARCH_MEDIATEK && REGULATOR |
| 87 | depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) | ||
| 87 | depends on !CPU_THERMAL || THERMAL=y | 88 | depends on !CPU_THERMAL || THERMAL=y |
| 88 | select PM_OPP | 89 | select PM_OPP |
| 89 | help | 90 | help |
| @@ -201,7 +202,7 @@ config ARM_SA1110_CPUFREQ | |||
| 201 | 202 | ||
| 202 | config ARM_SCPI_CPUFREQ | 203 | config ARM_SCPI_CPUFREQ |
| 203 | tristate "SCPI based CPUfreq driver" | 204 | tristate "SCPI based CPUfreq driver" |
| 204 | depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL | 205 | depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI |
| 205 | help | 206 | help |
| 206 | This adds the CPUfreq driver support for ARM big.LITTLE platforms | 207 | This adds the CPUfreq driver support for ARM big.LITTLE platforms |
| 207 | using SCPI protocol for CPU power management. | 208 | using SCPI protocol for CPU power management. |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | config X86_INTEL_PSTATE | 5 | config X86_INTEL_PSTATE |
| 6 | bool "Intel P state control" | 6 | bool "Intel P state control" |
| 7 | depends on X86 | 7 | depends on X86 |
| 8 | select ACPI_PROCESSOR if ACPI | ||
| 9 | help | 8 | help |
| 10 | This driver provides a P state for Intel core processors. | 9 | This driver provides a P state for Intel core processors. |
| 11 | The driver implements an internal governor and will become | 10 | The driver implements an internal governor and will become |
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index e8cb334094b0..7c0bdfb1a2ca 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c | |||
| @@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 98 | policy->max = cpu->perf_caps.highest_perf; | 98 | policy->max = cpu->perf_caps.highest_perf; |
| 99 | policy->cpuinfo.min_freq = policy->min; | 99 | policy->cpuinfo.min_freq = policy->min; |
| 100 | policy->cpuinfo.max_freq = policy->max; | 100 | policy->cpuinfo.max_freq = policy->max; |
| 101 | policy->shared_type = cpu->shared_type; | ||
| 101 | 102 | ||
| 102 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 103 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
| 103 | cpumask_copy(policy->cpus, cpu->shared_cpu_map); | 104 | cpumask_copy(policy->cpus, cpu->shared_cpu_map); |
| 104 | else { | 105 | else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { |
| 105 | /* Support only SW_ANY for now. */ | 106 | /* Support only SW_ANY for now. */ |
| 106 | pr_debug("Unsupported CPU co-ord type\n"); | 107 | pr_debug("Unsupported CPU co-ord type\n"); |
| 107 | return -EFAULT; | 108 | return -EFAULT; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7c48e7316d91..a83c995a62df 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1401,13 +1401,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
| 1401 | } | 1401 | } |
| 1402 | 1402 | ||
| 1403 | cpumask_clear_cpu(cpu, policy->real_cpus); | 1403 | cpumask_clear_cpu(cpu, policy->real_cpus); |
| 1404 | remove_cpu_dev_symlink(policy, cpu); | ||
| 1404 | 1405 | ||
| 1405 | if (cpumask_empty(policy->real_cpus)) { | 1406 | if (cpumask_empty(policy->real_cpus)) |
| 1406 | cpufreq_policy_free(policy, true); | 1407 | cpufreq_policy_free(policy, true); |
| 1407 | return; | ||
| 1408 | } | ||
| 1409 | |||
| 1410 | remove_cpu_dev_symlink(policy, cpu); | ||
| 1411 | } | 1408 | } |
| 1412 | 1409 | ||
| 1413 | static void handle_update(struct work_struct *work) | 1410 | static void handle_update(struct work_struct *work) |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2e31d097def6..4d07cbd2b23c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -34,14 +34,10 @@ | |||
| 34 | #include <asm/cpu_device_id.h> | 34 | #include <asm/cpu_device_id.h> |
| 35 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
| 36 | 36 | ||
| 37 | #if IS_ENABLED(CONFIG_ACPI) | 37 | #define ATOM_RATIOS 0x66a |
| 38 | #include <acpi/processor.h> | 38 | #define ATOM_VIDS 0x66b |
| 39 | #endif | 39 | #define ATOM_TURBO_RATIOS 0x66c |
| 40 | 40 | #define ATOM_TURBO_VIDS 0x66d | |
| 41 | #define BYT_RATIOS 0x66a | ||
| 42 | #define BYT_VIDS 0x66b | ||
| 43 | #define BYT_TURBO_RATIOS 0x66c | ||
| 44 | #define BYT_TURBO_VIDS 0x66d | ||
| 45 | 41 | ||
| 46 | #define FRAC_BITS 8 | 42 | #define FRAC_BITS 8 |
| 47 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 43 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
| @@ -117,9 +113,6 @@ struct cpudata { | |||
| 117 | u64 prev_mperf; | 113 | u64 prev_mperf; |
| 118 | u64 prev_tsc; | 114 | u64 prev_tsc; |
| 119 | struct sample sample; | 115 | struct sample sample; |
| 120 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 121 | struct acpi_processor_performance acpi_perf_data; | ||
| 122 | #endif | ||
| 123 | }; | 116 | }; |
| 124 | 117 | ||
| 125 | static struct cpudata **all_cpu_data; | 118 | static struct cpudata **all_cpu_data; |
| @@ -150,7 +143,6 @@ struct cpu_defaults { | |||
| 150 | static struct pstate_adjust_policy pid_params; | 143 | static struct pstate_adjust_policy pid_params; |
| 151 | static struct pstate_funcs pstate_funcs; | 144 | static struct pstate_funcs pstate_funcs; |
| 152 | static int hwp_active; | 145 | static int hwp_active; |
| 153 | static int no_acpi_perf; | ||
| 154 | 146 | ||
| 155 | struct perf_limits { | 147 | struct perf_limits { |
| 156 | int no_turbo; | 148 | int no_turbo; |
| @@ -163,8 +155,6 @@ struct perf_limits { | |||
| 163 | int max_sysfs_pct; | 155 | int max_sysfs_pct; |
| 164 | int min_policy_pct; | 156 | int min_policy_pct; |
| 165 | int min_sysfs_pct; | 157 | int min_sysfs_pct; |
| 166 | int max_perf_ctl; | ||
| 167 | int min_perf_ctl; | ||
| 168 | }; | 158 | }; |
| 169 | 159 | ||
| 170 | static struct perf_limits performance_limits = { | 160 | static struct perf_limits performance_limits = { |
| @@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = { | |||
| 191 | .max_sysfs_pct = 100, | 181 | .max_sysfs_pct = 100, |
| 192 | .min_policy_pct = 0, | 182 | .min_policy_pct = 0, |
| 193 | .min_sysfs_pct = 0, | 183 | .min_sysfs_pct = 0, |
| 194 | .max_perf_ctl = 0, | ||
| 195 | .min_perf_ctl = 0, | ||
| 196 | }; | 184 | }; |
| 197 | 185 | ||
| 198 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 186 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
| @@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; | |||
| 201 | static struct perf_limits *limits = &powersave_limits; | 189 | static struct perf_limits *limits = &powersave_limits; |
| 202 | #endif | 190 | #endif |
| 203 | 191 | ||
| 204 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 205 | /* | ||
| 206 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
| 207 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
| 208 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
| 209 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
| 210 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
| 211 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
| 212 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
| 213 | * This function converts the _PSS control value to intel pstate driver format | ||
| 214 | * for comparison and assignment. | ||
| 215 | */ | ||
| 216 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
| 217 | { | ||
| 218 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
| 219 | } | ||
| 220 | |||
| 221 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
| 222 | { | ||
| 223 | struct cpudata *cpu; | ||
| 224 | int ret; | ||
| 225 | bool turbo_absent = false; | ||
| 226 | int max_pstate_index; | ||
| 227 | int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; | ||
| 228 | int i; | ||
| 229 | |||
| 230 | cpu = all_cpu_data[policy->cpu]; | ||
| 231 | |||
| 232 | pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", | ||
| 233 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
| 234 | cpu->pstate.turbo_pstate); | ||
| 235 | |||
| 236 | if (!cpu->acpi_perf_data.shared_cpu_map && | ||
| 237 | zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, | ||
| 238 | GFP_KERNEL, cpu_to_node(policy->cpu))) { | ||
| 239 | return -ENOMEM; | ||
| 240 | } | ||
| 241 | |||
| 242 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | ||
| 243 | policy->cpu); | ||
| 244 | if (ret) | ||
| 245 | return ret; | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | ||
| 249 | * guarantee that the states returned by it map to the states in our | ||
| 250 | * list directly. | ||
| 251 | */ | ||
| 252 | if (cpu->acpi_perf_data.control_register.space_id != | ||
| 253 | ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
| 254 | return -EIO; | ||
| 255 | |||
| 256 | pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); | ||
| 257 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) | ||
| 258 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | ||
| 259 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | ||
| 260 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | ||
| 261 | (u32) cpu->acpi_perf_data.states[i].power, | ||
| 262 | (u32) cpu->acpi_perf_data.states[i].control); | ||
| 263 | |||
| 264 | /* | ||
| 265 | * If there is only one entry _PSS, simply ignore _PSS and continue as | ||
| 266 | * usual without taking _PSS into account | ||
| 267 | */ | ||
| 268 | if (cpu->acpi_perf_data.state_count < 2) | ||
| 269 | return 0; | ||
| 270 | |||
| 271 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | ||
| 272 | min_pss_ctl = convert_to_native_pstate_format(cpu, | ||
| 273 | cpu->acpi_perf_data.state_count - 1); | ||
| 274 | /* Check if there is a turbo freq in _PSS */ | ||
| 275 | if (turbo_pss_ctl <= cpu->pstate.max_pstate && | ||
| 276 | turbo_pss_ctl > cpu->pstate.min_pstate) { | ||
| 277 | pr_debug("intel_pstate: no turbo range exists in _PSS\n"); | ||
| 278 | limits->no_turbo = limits->turbo_disabled = 1; | ||
| 279 | cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; | ||
| 280 | turbo_absent = true; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* Check if the max non turbo p state < Intel P state max */ | ||
| 284 | max_pstate_index = turbo_absent ? 0 : 1; | ||
| 285 | max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); | ||
| 286 | if (max_pss_ctl < cpu->pstate.max_pstate && | ||
| 287 | max_pss_ctl > cpu->pstate.min_pstate) | ||
| 288 | cpu->pstate.max_pstate = max_pss_ctl; | ||
| 289 | |||
| 290 | /* check If min perf > Intel P State min */ | ||
| 291 | if (min_pss_ctl > cpu->pstate.min_pstate && | ||
| 292 | min_pss_ctl < cpu->pstate.max_pstate) { | ||
| 293 | cpu->pstate.min_pstate = min_pss_ctl; | ||
| 294 | policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; | ||
| 295 | } | ||
| 296 | |||
| 297 | if (turbo_absent) | ||
| 298 | policy->cpuinfo.max_freq = cpu->pstate.max_pstate * | ||
| 299 | cpu->pstate.scaling; | ||
| 300 | else { | ||
| 301 | policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * | ||
| 302 | cpu->pstate.scaling; | ||
| 303 | /* | ||
| 304 | * The _PSS table doesn't contain whole turbo frequency range. | ||
| 305 | * This just contains +1 MHZ above the max non turbo frequency, | ||
| 306 | * with control value corresponding to max turbo ratio. But | ||
| 307 | * when cpufreq set policy is called, it will call with this | ||
| 308 | * max frequency, which will cause a reduced performance as | ||
| 309 | * this driver uses real max turbo frequency as the max | ||
| 310 | * frequeny. So correct this frequency in _PSS table to | ||
| 311 | * correct max turbo frequency based on the turbo ratio. | ||
| 312 | * Also need to convert to MHz as _PSS freq is in MHz. | ||
| 313 | */ | ||
| 314 | cpu->acpi_perf_data.states[0].core_frequency = | ||
| 315 | turbo_pss_ctl * 100; | ||
| 316 | } | ||
| 317 | |||
| 318 | pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", | ||
| 319 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
| 320 | cpu->pstate.turbo_pstate); | ||
| 321 | pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", | ||
| 322 | policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); | ||
| 323 | |||
| 324 | return 0; | ||
| 325 | } | ||
| 326 | |||
| 327 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
| 328 | { | ||
| 329 | struct cpudata *cpu; | ||
| 330 | |||
| 331 | if (!no_acpi_perf) | ||
| 332 | return 0; | ||
| 333 | |||
| 334 | cpu = all_cpu_data[policy->cpu]; | ||
| 335 | acpi_processor_unregister_performance(policy->cpu); | ||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | |||
| 339 | #else | ||
| 340 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
| 341 | { | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 345 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
| 346 | { | ||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | #endif | ||
| 350 | |||
| 351 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 192 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
| 352 | int deadband, int integral) { | 193 | int deadband, int integral) { |
| 353 | pid->setpoint = setpoint; | 194 | pid->setpoint = setpoint; |
| @@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) | |||
| 687 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); | 528 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
| 688 | } | 529 | } |
| 689 | 530 | ||
| 690 | static int byt_get_min_pstate(void) | 531 | static int atom_get_min_pstate(void) |
| 691 | { | 532 | { |
| 692 | u64 value; | 533 | u64 value; |
| 693 | 534 | ||
| 694 | rdmsrl(BYT_RATIOS, value); | 535 | rdmsrl(ATOM_RATIOS, value); |
| 695 | return (value >> 8) & 0x7F; | 536 | return (value >> 8) & 0x7F; |
| 696 | } | 537 | } |
| 697 | 538 | ||
| 698 | static int byt_get_max_pstate(void) | 539 | static int atom_get_max_pstate(void) |
| 699 | { | 540 | { |
| 700 | u64 value; | 541 | u64 value; |
| 701 | 542 | ||
| 702 | rdmsrl(BYT_RATIOS, value); | 543 | rdmsrl(ATOM_RATIOS, value); |
| 703 | return (value >> 16) & 0x7F; | 544 | return (value >> 16) & 0x7F; |
| 704 | } | 545 | } |
| 705 | 546 | ||
| 706 | static int byt_get_turbo_pstate(void) | 547 | static int atom_get_turbo_pstate(void) |
| 707 | { | 548 | { |
| 708 | u64 value; | 549 | u64 value; |
| 709 | 550 | ||
| 710 | rdmsrl(BYT_TURBO_RATIOS, value); | 551 | rdmsrl(ATOM_TURBO_RATIOS, value); |
| 711 | return value & 0x7F; | 552 | return value & 0x7F; |
| 712 | } | 553 | } |
| 713 | 554 | ||
| 714 | static void byt_set_pstate(struct cpudata *cpudata, int pstate) | 555 | static void atom_set_pstate(struct cpudata *cpudata, int pstate) |
| 715 | { | 556 | { |
| 716 | u64 val; | 557 | u64 val; |
| 717 | int32_t vid_fp; | 558 | int32_t vid_fp; |
| @@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) | |||
| 736 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); | 577 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); |
| 737 | } | 578 | } |
| 738 | 579 | ||
| 739 | #define BYT_BCLK_FREQS 5 | 580 | static int silvermont_get_scaling(void) |
| 740 | static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; | ||
| 741 | |||
| 742 | static int byt_get_scaling(void) | ||
| 743 | { | 581 | { |
| 744 | u64 value; | 582 | u64 value; |
| 745 | int i; | 583 | int i; |
| 584 | /* Defined in Table 35-6 from SDM (Sept 2015) */ | ||
| 585 | static int silvermont_freq_table[] = { | ||
| 586 | 83300, 100000, 133300, 116700, 80000}; | ||
| 746 | 587 | ||
| 747 | rdmsrl(MSR_FSB_FREQ, value); | 588 | rdmsrl(MSR_FSB_FREQ, value); |
| 748 | i = value & 0x3; | 589 | i = value & 0x7; |
| 590 | WARN_ON(i > 4); | ||
| 749 | 591 | ||
| 750 | BUG_ON(i > BYT_BCLK_FREQS); | 592 | return silvermont_freq_table[i]; |
| 593 | } | ||
| 751 | 594 | ||
| 752 | return byt_freq_table[i] * 100; | 595 | static int airmont_get_scaling(void) |
| 596 | { | ||
| 597 | u64 value; | ||
| 598 | int i; | ||
| 599 | /* Defined in Table 35-10 from SDM (Sept 2015) */ | ||
| 600 | static int airmont_freq_table[] = { | ||
| 601 | 83300, 100000, 133300, 116700, 80000, | ||
| 602 | 93300, 90000, 88900, 87500}; | ||
| 603 | |||
| 604 | rdmsrl(MSR_FSB_FREQ, value); | ||
| 605 | i = value & 0xF; | ||
| 606 | WARN_ON(i > 8); | ||
| 607 | |||
| 608 | return airmont_freq_table[i]; | ||
| 753 | } | 609 | } |
| 754 | 610 | ||
| 755 | static void byt_get_vid(struct cpudata *cpudata) | 611 | static void atom_get_vid(struct cpudata *cpudata) |
| 756 | { | 612 | { |
| 757 | u64 value; | 613 | u64 value; |
| 758 | 614 | ||
| 759 | rdmsrl(BYT_VIDS, value); | 615 | rdmsrl(ATOM_VIDS, value); |
| 760 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); | 616 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
| 761 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | 617 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
| 762 | cpudata->vid.ratio = div_fp( | 618 | cpudata->vid.ratio = div_fp( |
| @@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) | |||
| 764 | int_tofp(cpudata->pstate.max_pstate - | 620 | int_tofp(cpudata->pstate.max_pstate - |
| 765 | cpudata->pstate.min_pstate)); | 621 | cpudata->pstate.min_pstate)); |
| 766 | 622 | ||
| 767 | rdmsrl(BYT_TURBO_VIDS, value); | 623 | rdmsrl(ATOM_TURBO_VIDS, value); |
| 768 | cpudata->vid.turbo = value & 0x7f; | 624 | cpudata->vid.turbo = value & 0x7f; |
| 769 | } | 625 | } |
| 770 | 626 | ||
| @@ -885,7 +741,7 @@ static struct cpu_defaults core_params = { | |||
| 885 | }, | 741 | }, |
| 886 | }; | 742 | }; |
| 887 | 743 | ||
| 888 | static struct cpu_defaults byt_params = { | 744 | static struct cpu_defaults silvermont_params = { |
| 889 | .pid_policy = { | 745 | .pid_policy = { |
| 890 | .sample_rate_ms = 10, | 746 | .sample_rate_ms = 10, |
| 891 | .deadband = 0, | 747 | .deadband = 0, |
| @@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = { | |||
| 895 | .i_gain_pct = 4, | 751 | .i_gain_pct = 4, |
| 896 | }, | 752 | }, |
| 897 | .funcs = { | 753 | .funcs = { |
| 898 | .get_max = byt_get_max_pstate, | 754 | .get_max = atom_get_max_pstate, |
| 899 | .get_max_physical = byt_get_max_pstate, | 755 | .get_max_physical = atom_get_max_pstate, |
| 900 | .get_min = byt_get_min_pstate, | 756 | .get_min = atom_get_min_pstate, |
| 901 | .get_turbo = byt_get_turbo_pstate, | 757 | .get_turbo = atom_get_turbo_pstate, |
| 902 | .set = byt_set_pstate, | 758 | .set = atom_set_pstate, |
| 903 | .get_scaling = byt_get_scaling, | 759 | .get_scaling = silvermont_get_scaling, |
| 904 | .get_vid = byt_get_vid, | 760 | .get_vid = atom_get_vid, |
| 761 | }, | ||
| 762 | }; | ||
| 763 | |||
| 764 | static struct cpu_defaults airmont_params = { | ||
| 765 | .pid_policy = { | ||
| 766 | .sample_rate_ms = 10, | ||
| 767 | .deadband = 0, | ||
| 768 | .setpoint = 60, | ||
| 769 | .p_gain_pct = 14, | ||
| 770 | .d_gain_pct = 0, | ||
| 771 | .i_gain_pct = 4, | ||
| 772 | }, | ||
| 773 | .funcs = { | ||
| 774 | .get_max = atom_get_max_pstate, | ||
| 775 | .get_max_physical = atom_get_max_pstate, | ||
| 776 | .get_min = atom_get_min_pstate, | ||
| 777 | .get_turbo = atom_get_turbo_pstate, | ||
| 778 | .set = atom_set_pstate, | ||
| 779 | .get_scaling = airmont_get_scaling, | ||
| 780 | .get_vid = atom_get_vid, | ||
| 905 | }, | 781 | }, |
| 906 | }; | 782 | }; |
| 907 | 783 | ||
| @@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
| 938 | * policy, or by cpu specific default values determined through | 814 | * policy, or by cpu specific default values determined through |
| 939 | * experimentation. | 815 | * experimentation. |
| 940 | */ | 816 | */ |
| 941 | if (limits->max_perf_ctl && limits->max_sysfs_pct >= | 817 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); |
| 942 | limits->max_policy_pct) { | 818 | *max = clamp_t(int, max_perf_adj, |
| 943 | *max = limits->max_perf_ctl; | 819 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); |
| 944 | } else { | ||
| 945 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), | ||
| 946 | limits->max_perf)); | ||
| 947 | *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, | ||
| 948 | cpu->pstate.turbo_pstate); | ||
| 949 | } | ||
| 950 | 820 | ||
| 951 | if (limits->min_perf_ctl) { | 821 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); |
| 952 | *min = limits->min_perf_ctl; | 822 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); |
| 953 | } else { | ||
| 954 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), | ||
| 955 | limits->min_perf)); | ||
| 956 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); | ||
| 957 | } | ||
| 958 | } | 823 | } |
| 959 | 824 | ||
| 960 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) | 825 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) |
| @@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
| 1153 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 1018 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
| 1154 | ICPU(0x2a, core_params), | 1019 | ICPU(0x2a, core_params), |
| 1155 | ICPU(0x2d, core_params), | 1020 | ICPU(0x2d, core_params), |
| 1156 | ICPU(0x37, byt_params), | 1021 | ICPU(0x37, silvermont_params), |
| 1157 | ICPU(0x3a, core_params), | 1022 | ICPU(0x3a, core_params), |
| 1158 | ICPU(0x3c, core_params), | 1023 | ICPU(0x3c, core_params), |
| 1159 | ICPU(0x3d, core_params), | 1024 | ICPU(0x3d, core_params), |
| @@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |||
| 1162 | ICPU(0x45, core_params), | 1027 | ICPU(0x45, core_params), |
| 1163 | ICPU(0x46, core_params), | 1028 | ICPU(0x46, core_params), |
| 1164 | ICPU(0x47, core_params), | 1029 | ICPU(0x47, core_params), |
| 1165 | ICPU(0x4c, byt_params), | 1030 | ICPU(0x4c, airmont_params), |
| 1166 | ICPU(0x4e, core_params), | 1031 | ICPU(0x4e, core_params), |
| 1167 | ICPU(0x4f, core_params), | 1032 | ICPU(0x4f, core_params), |
| 1168 | ICPU(0x5e, core_params), | 1033 | ICPU(0x5e, core_params), |
| @@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
| 1229 | 1094 | ||
| 1230 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 1095 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
| 1231 | { | 1096 | { |
| 1232 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 1233 | struct cpudata *cpu; | ||
| 1234 | int i; | ||
| 1235 | #endif | ||
| 1236 | pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, | ||
| 1237 | policy->cpuinfo.max_freq, policy->max); | ||
| 1238 | if (!policy->cpuinfo.max_freq) | 1097 | if (!policy->cpuinfo.max_freq) |
| 1239 | return -ENODEV; | 1098 | return -ENODEV; |
| 1240 | 1099 | ||
| @@ -1242,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1242 | policy->max >= policy->cpuinfo.max_freq) { | 1101 | policy->max >= policy->cpuinfo.max_freq) { |
| 1243 | pr_debug("intel_pstate: set performance\n"); | 1102 | pr_debug("intel_pstate: set performance\n"); |
| 1244 | limits = &performance_limits; | 1103 | limits = &performance_limits; |
| 1104 | if (hwp_active) | ||
| 1105 | intel_pstate_hwp_set(); | ||
| 1245 | return 0; | 1106 | return 0; |
| 1246 | } | 1107 | } |
| 1247 | 1108 | ||
| @@ -1249,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1249 | limits = &powersave_limits; | 1110 | limits = &powersave_limits; |
| 1250 | limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; | 1111 | limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; |
| 1251 | limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); | 1112 | limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); |
| 1252 | limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; | 1113 | limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, |
| 1114 | policy->cpuinfo.max_freq); | ||
| 1253 | limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); | 1115 | limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); |
| 1254 | 1116 | ||
| 1255 | /* Normalize user input to [min_policy_pct, max_policy_pct] */ | 1117 | /* Normalize user input to [min_policy_pct, max_policy_pct] */ |
| @@ -1261,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1261 | limits->max_sysfs_pct); | 1123 | limits->max_sysfs_pct); |
| 1262 | limits->max_perf_pct = max(limits->min_policy_pct, | 1124 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1263 | limits->max_perf_pct); | 1125 | limits->max_perf_pct); |
| 1126 | limits->max_perf = round_up(limits->max_perf, 8); | ||
| 1264 | 1127 | ||
| 1265 | /* Make sure min_perf_pct <= max_perf_pct */ | 1128 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1266 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1129 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
| @@ -1270,23 +1133,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1270 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), | 1133 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), |
| 1271 | int_tofp(100)); | 1134 | int_tofp(100)); |
| 1272 | 1135 | ||
| 1273 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 1274 | cpu = all_cpu_data[policy->cpu]; | ||
| 1275 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | ||
| 1276 | int control; | ||
| 1277 | |||
| 1278 | control = convert_to_native_pstate_format(cpu, i); | ||
| 1279 | if (control * cpu->pstate.scaling == policy->max) | ||
| 1280 | limits->max_perf_ctl = control; | ||
| 1281 | if (control * cpu->pstate.scaling == policy->min) | ||
| 1282 | limits->min_perf_ctl = control; | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", | ||
| 1286 | policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, | ||
| 1287 | limits->max_perf_ctl); | ||
| 1288 | #endif | ||
| 1289 | |||
| 1290 | if (hwp_active) | 1136 | if (hwp_active) |
| 1291 | intel_pstate_hwp_set(); | 1137 | intel_pstate_hwp_set(); |
| 1292 | 1138 | ||
| @@ -1341,30 +1187,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1341 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1187 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1342 | policy->cpuinfo.max_freq = | 1188 | policy->cpuinfo.max_freq = |
| 1343 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1189 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
| 1344 | if (!no_acpi_perf) | ||
| 1345 | intel_pstate_init_perf_limits(policy); | ||
| 1346 | /* | ||
| 1347 | * If there is no acpi perf data or error, we ignore and use Intel P | ||
| 1348 | * state calculated limits, So this is not fatal error. | ||
| 1349 | */ | ||
| 1350 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1190 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1351 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1191 | cpumask_set_cpu(policy->cpu, policy->cpus); |
| 1352 | 1192 | ||
| 1353 | return 0; | 1193 | return 0; |
| 1354 | } | 1194 | } |
| 1355 | 1195 | ||
| 1356 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) | ||
| 1357 | { | ||
| 1358 | return intel_pstate_exit_perf_limits(policy); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | static struct cpufreq_driver intel_pstate_driver = { | 1196 | static struct cpufreq_driver intel_pstate_driver = { |
| 1362 | .flags = CPUFREQ_CONST_LOOPS, | 1197 | .flags = CPUFREQ_CONST_LOOPS, |
| 1363 | .verify = intel_pstate_verify_policy, | 1198 | .verify = intel_pstate_verify_policy, |
| 1364 | .setpolicy = intel_pstate_set_policy, | 1199 | .setpolicy = intel_pstate_set_policy, |
| 1365 | .get = intel_pstate_get, | 1200 | .get = intel_pstate_get, |
| 1366 | .init = intel_pstate_cpu_init, | 1201 | .init = intel_pstate_cpu_init, |
| 1367 | .exit = intel_pstate_cpu_exit, | ||
| 1368 | .stop_cpu = intel_pstate_stop_cpu, | 1202 | .stop_cpu = intel_pstate_stop_cpu, |
| 1369 | .name = "intel_pstate", | 1203 | .name = "intel_pstate", |
| 1370 | }; | 1204 | }; |
| @@ -1406,6 +1240,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) | |||
| 1406 | } | 1240 | } |
| 1407 | 1241 | ||
| 1408 | #if IS_ENABLED(CONFIG_ACPI) | 1242 | #if IS_ENABLED(CONFIG_ACPI) |
| 1243 | #include <acpi/processor.h> | ||
| 1409 | 1244 | ||
| 1410 | static bool intel_pstate_no_acpi_pss(void) | 1245 | static bool intel_pstate_no_acpi_pss(void) |
| 1411 | { | 1246 | { |
| @@ -1601,9 +1436,6 @@ static int __init intel_pstate_setup(char *str) | |||
| 1601 | force_load = 1; | 1436 | force_load = 1; |
| 1602 | if (!strcmp(str, "hwp_only")) | 1437 | if (!strcmp(str, "hwp_only")) |
| 1603 | hwp_only = 1; | 1438 | hwp_only = 1; |
| 1604 | if (!strcmp(str, "no_acpi")) | ||
| 1605 | no_acpi_perf = 1; | ||
| 1606 | |||
| 1607 | return 0; | 1439 | return 0; |
| 1608 | } | 1440 | } |
| 1609 | early_param("intel_pstate", intel_pstate_setup); | 1441 | early_param("intel_pstate", intel_pstate_setup); |
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 03856ad280b9..473d36d91644 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
| @@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, | |||
| 198 | goto out_err; | 198 | goto out_err; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | params_head = section_head->params; | 201 | params_head = section.params; |
| 202 | 202 | ||
| 203 | while (params_head) { | 203 | while (params_head) { |
| 204 | if (copy_from_user(&key_val, (void __user *)params_head, | 204 | if (copy_from_user(&key_val, (void __user *)params_head, |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 4e55239c7a30..53d22eb73b56 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, | |||
| 729 | return NULL; | 729 | return NULL; |
| 730 | 730 | ||
| 731 | dev_info(chan2dev(chan), | 731 | dev_info(chan2dev(chan), |
| 732 | "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", | 732 | "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
| 733 | __func__, xt->src_start, xt->dst_start, xt->numf, | 733 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
| 734 | xt->frame_size, flags); | 734 | xt->frame_size, flags); |
| 735 | 735 | ||
| 736 | /* | 736 | /* |
| @@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 824 | u32 ctrla; | 824 | u32 ctrla; |
| 825 | u32 ctrlb; | 825 | u32 ctrlb; |
| 826 | 826 | ||
| 827 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", | 827 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", |
| 828 | dest, src, len, flags); | 828 | &dest, &src, len, flags); |
| 829 | 829 | ||
| 830 | if (unlikely(!len)) { | 830 | if (unlikely(!len)) { |
| 831 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 831 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
| @@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
| 938 | void __iomem *vaddr; | 938 | void __iomem *vaddr; |
| 939 | dma_addr_t paddr; | 939 | dma_addr_t paddr; |
| 940 | 940 | ||
| 941 | dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, | 941 | dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, |
| 942 | dest, value, len, flags); | 942 | &dest, value, len, flags); |
| 943 | 943 | ||
| 944 | if (unlikely(!len)) { | 944 | if (unlikely(!len)) { |
| 945 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); | 945 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
| @@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, | |||
| 1022 | dma_addr_t dest = sg_dma_address(sg); | 1022 | dma_addr_t dest = sg_dma_address(sg); |
| 1023 | size_t len = sg_dma_len(sg); | 1023 | size_t len = sg_dma_len(sg); |
| 1024 | 1024 | ||
| 1025 | dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", | 1025 | dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n", |
| 1026 | __func__, dest, len); | 1026 | __func__, &dest, len); |
| 1027 | 1027 | ||
| 1028 | if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { | 1028 | if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { |
| 1029 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n", | 1029 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n", |
| @@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
| 1439 | unsigned int periods = buf_len / period_len; | 1439 | unsigned int periods = buf_len / period_len; |
| 1440 | unsigned int i; | 1440 | unsigned int i; |
| 1441 | 1441 | ||
| 1442 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | 1442 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n", |
| 1443 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", | 1443 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
| 1444 | buf_addr, | 1444 | &buf_addr, |
| 1445 | periods, buf_len, period_len); | 1445 | periods, buf_len, period_len); |
| 1446 | 1446 | ||
| 1447 | if (unlikely(!atslave || !buf_len || !period_len)) { | 1447 | if (unlikely(!atslave || !buf_len || !period_len)) { |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d1cfc8c876f9..7f58f06157f6 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
| @@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} | |||
| 385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) | 385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) |
| 386 | { | 386 | { |
| 387 | dev_crit(chan2dev(&atchan->chan_common), | 387 | dev_crit(chan2dev(&atchan->chan_common), |
| 388 | " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", | 388 | " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", |
| 389 | lli->saddr, lli->daddr, | 389 | &lli->saddr, &lli->daddr, |
| 390 | lli->ctrla, lli->ctrlb, lli->dscr); | 390 | lli->ctrla, lli->ctrlb, &lli->dscr); |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | 393 | ||
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b5e132d4bae5..7f039de143f0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
| 920 | desc->lld.mbr_cfg = chan_cc; | 920 | desc->lld.mbr_cfg = chan_cc; |
| 921 | 921 | ||
| 922 | dev_dbg(chan2dev(chan), | 922 | dev_dbg(chan2dev(chan), |
| 923 | "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | 923 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
| 924 | __func__, desc->lld.mbr_sa, desc->lld.mbr_da, | 924 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, |
| 925 | desc->lld.mbr_ubc, desc->lld.mbr_cfg); | 925 | desc->lld.mbr_ubc, desc->lld.mbr_cfg); |
| 926 | 926 | ||
| 927 | /* Chain lld. */ | 927 | /* Chain lld. */ |
| @@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
| 953 | if ((xt->numf > 1) && (xt->frame_size > 1)) | 953 | if ((xt->numf > 1) && (xt->frame_size > 1)) |
| 954 | return NULL; | 954 | return NULL; |
| 955 | 955 | ||
| 956 | dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", | 956 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
| 957 | __func__, xt->src_start, xt->dst_start, xt->numf, | 957 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
| 958 | xt->frame_size, flags); | 958 | xt->frame_size, flags); |
| 959 | 959 | ||
| 960 | src_addr = xt->src_start; | 960 | src_addr = xt->src_start; |
| @@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, | |||
| 1179 | desc->lld.mbr_cfg = chan_cc; | 1179 | desc->lld.mbr_cfg = chan_cc; |
| 1180 | 1180 | ||
| 1181 | dev_dbg(chan2dev(chan), | 1181 | dev_dbg(chan2dev(chan), |
| 1182 | "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | 1182 | "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
| 1183 | __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, | 1183 | __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, |
| 1184 | desc->lld.mbr_cfg); | 1184 | desc->lld.mbr_cfg); |
| 1185 | 1185 | ||
| 1186 | return desc; | 1186 | return desc; |
| @@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
| 1193 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1193 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
| 1194 | struct at_xdmac_desc *desc; | 1194 | struct at_xdmac_desc *desc; |
| 1195 | 1195 | ||
| 1196 | dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", | 1196 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
| 1197 | __func__, dest, len, value, flags); | 1197 | __func__, &dest, len, value, flags); |
| 1198 | 1198 | ||
| 1199 | if (unlikely(!len)) | 1199 | if (unlikely(!len)) |
| 1200 | return NULL; | 1200 | return NULL; |
| @@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1229 | 1229 | ||
| 1230 | /* Prepare descriptors. */ | 1230 | /* Prepare descriptors. */ |
| 1231 | for_each_sg(sgl, sg, sg_len, i) { | 1231 | for_each_sg(sgl, sg, sg_len, i) { |
| 1232 | dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", | 1232 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
| 1233 | __func__, sg_dma_address(sg), sg_dma_len(sg), | 1233 | __func__, &sg_dma_address(sg), sg_dma_len(sg), |
| 1234 | value, flags); | 1234 | value, flags); |
| 1235 | desc = at_xdmac_memset_create_desc(chan, atchan, | 1235 | desc = at_xdmac_memset_create_desc(chan, atchan, |
| 1236 | sg_dma_address(sg), | 1236 | sg_dma_address(sg), |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 6b03e4e84e6b..0675e268d577 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -107,7 +107,7 @@ | |||
| 107 | 107 | ||
| 108 | /* CCCFG register */ | 108 | /* CCCFG register */ |
| 109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | 109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ |
| 110 | #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ | 110 | #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ |
| 111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | 111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ |
| 112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | 112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ |
| 113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | 113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ |
| @@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) | |||
| 1565 | struct platform_device *tc_pdev; | 1565 | struct platform_device *tc_pdev; |
| 1566 | int ret; | 1566 | int ret; |
| 1567 | 1567 | ||
| 1568 | if (!tc) | 1568 | if (!IS_ENABLED(CONFIG_OF) || !tc) |
| 1569 | return; | 1569 | return; |
| 1570 | 1570 | ||
| 1571 | tc_pdev = of_find_device_by_node(tc->node); | 1571 | tc_pdev = of_find_device_by_node(tc->node); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7058d58ba588..0f6fd42f55ca 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -1462,7 +1462,7 @@ err_firmware: | |||
| 1462 | 1462 | ||
| 1463 | #define EVENT_REMAP_CELLS 3 | 1463 | #define EVENT_REMAP_CELLS 3 |
| 1464 | 1464 | ||
| 1465 | static int __init sdma_event_remap(struct sdma_engine *sdma) | 1465 | static int sdma_event_remap(struct sdma_engine *sdma) |
| 1466 | { | 1466 | { |
| 1467 | struct device_node *np = sdma->dev->of_node; | 1467 | struct device_node *np = sdma->dev->of_node; |
| 1468 | struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); | 1468 | struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index ebd8a5f398b0..f1bcc2a163b3 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
| @@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev) | |||
| 679 | struct usb_dmac *dmac = dev_get_drvdata(dev); | 679 | struct usb_dmac *dmac = dev_get_drvdata(dev); |
| 680 | int i; | 680 | int i; |
| 681 | 681 | ||
| 682 | for (i = 0; i < dmac->n_channels; ++i) | 682 | for (i = 0; i < dmac->n_channels; ++i) { |
| 683 | if (!dmac->channels[i].iomem) | ||
| 684 | break; | ||
| 683 | usb_dmac_chan_halt(&dmac->channels[i]); | 685 | usb_dmac_chan_halt(&dmac->channels[i]); |
| 686 | } | ||
| 684 | 687 | ||
| 685 | return 0; | 688 | return 0; |
| 686 | } | 689 | } |
| @@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev) | |||
| 799 | ret = pm_runtime_get_sync(&pdev->dev); | 802 | ret = pm_runtime_get_sync(&pdev->dev); |
| 800 | if (ret < 0) { | 803 | if (ret < 0) { |
| 801 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | 804 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); |
| 802 | return ret; | 805 | goto error_pm; |
| 803 | } | 806 | } |
| 804 | 807 | ||
| 805 | ret = usb_dmac_init(dmac); | 808 | ret = usb_dmac_init(dmac); |
| 806 | pm_runtime_put(&pdev->dev); | ||
| 807 | 809 | ||
| 808 | if (ret) { | 810 | if (ret) { |
| 809 | dev_err(&pdev->dev, "failed to reset device\n"); | 811 | dev_err(&pdev->dev, "failed to reset device\n"); |
| @@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev) | |||
| 851 | if (ret < 0) | 853 | if (ret < 0) |
| 852 | goto error; | 854 | goto error; |
| 853 | 855 | ||
| 856 | pm_runtime_put(&pdev->dev); | ||
| 854 | return 0; | 857 | return 0; |
| 855 | 858 | ||
| 856 | error: | 859 | error: |
| 857 | of_dma_controller_free(pdev->dev.of_node); | 860 | of_dma_controller_free(pdev->dev.of_node); |
| 861 | pm_runtime_put(&pdev->dev); | ||
| 862 | error_pm: | ||
| 858 | pm_runtime_disable(&pdev->dev); | 863 | pm_runtime_disable(&pdev->dev); |
| 859 | return ret; | 864 | return ret; |
| 860 | } | 865 | } |
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index 6ed7c0fb3378..6b186829087c 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c | |||
| @@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
| 113 | 113 | ||
| 114 | static int mmio_74xx_gpio_probe(struct platform_device *pdev) | 114 | static int mmio_74xx_gpio_probe(struct platform_device *pdev) |
| 115 | { | 115 | { |
| 116 | const struct of_device_id *of_id = | 116 | const struct of_device_id *of_id; |
| 117 | of_match_device(mmio_74xx_gpio_ids, &pdev->dev); | ||
| 118 | struct mmio_74xx_gpio_priv *priv; | 117 | struct mmio_74xx_gpio_priv *priv; |
| 119 | struct resource *res; | 118 | struct resource *res; |
| 120 | void __iomem *dat; | 119 | void __iomem *dat; |
| 121 | int err; | 120 | int err; |
| 122 | 121 | ||
| 122 | of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev); | ||
| 123 | if (!of_id) | ||
| 124 | return -ENODEV; | ||
| 125 | |||
| 123 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 126 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
| 124 | if (!priv) | 127 | if (!priv) |
| 125 | return -ENOMEM; | 128 | return -ENOMEM; |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 56d2d026e62e..f7fbb46d5d79 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
| @@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
| 1122 | /* MPUIO is a bit different, reading IRQ status clears it */ | 1122 | /* MPUIO is a bit different, reading IRQ status clears it */ |
| 1123 | if (bank->is_mpuio) { | 1123 | if (bank->is_mpuio) { |
| 1124 | irqc->irq_ack = dummy_irq_chip.irq_ack; | 1124 | irqc->irq_ack = dummy_irq_chip.irq_ack; |
| 1125 | irqc->irq_mask = irq_gc_mask_set_bit; | ||
| 1126 | irqc->irq_unmask = irq_gc_mask_clr_bit; | ||
| 1127 | if (!bank->regs->wkup_en) | 1125 | if (!bank->regs->wkup_en) |
| 1128 | irqc->irq_set_wake = NULL; | 1126 | irqc->irq_set_wake = NULL; |
| 1129 | } | 1127 | } |
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 171a6389f9ce..52b447c071cb 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c | |||
| @@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) | |||
| 167 | const struct palmas_device_data *dev_data; | 167 | const struct palmas_device_data *dev_data; |
| 168 | 168 | ||
| 169 | match = of_match_device(of_palmas_gpio_match, &pdev->dev); | 169 | match = of_match_device(of_palmas_gpio_match, &pdev->dev); |
| 170 | if (!match) | ||
| 171 | return -ENODEV; | ||
| 170 | dev_data = match->data; | 172 | dev_data = match->data; |
| 171 | if (!dev_data) | 173 | if (!dev_data) |
| 172 | dev_data = &palmas_dev_data; | 174 | dev_data = &palmas_dev_data; |
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 045a952576c7..7b25fdf64802 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c | |||
| @@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids); | |||
| 187 | static int syscon_gpio_probe(struct platform_device *pdev) | 187 | static int syscon_gpio_probe(struct platform_device *pdev) |
| 188 | { | 188 | { |
| 189 | struct device *dev = &pdev->dev; | 189 | struct device *dev = &pdev->dev; |
| 190 | const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); | 190 | const struct of_device_id *of_id; |
| 191 | struct syscon_gpio_priv *priv; | 191 | struct syscon_gpio_priv *priv; |
| 192 | struct device_node *np = dev->of_node; | 192 | struct device_node *np = dev->of_node; |
| 193 | int ret; | 193 | int ret; |
| 194 | 194 | ||
| 195 | of_id = of_match_device(syscon_gpio_ids, dev); | ||
| 196 | if (!of_id) | ||
| 197 | return -ENODEV; | ||
| 198 | |||
| 195 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 199 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
| 196 | if (!priv) | 200 | if (!priv) |
| 197 | return -ENOMEM; | 201 | return -ENOMEM; |
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 027e5f47dd28..896bf29776b0 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c | |||
| @@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) | |||
| 375 | } | 375 | } |
| 376 | #endif | 376 | #endif |
| 377 | 377 | ||
| 378 | #ifdef CONFIG_DEBUG_FS | ||
| 379 | |||
| 380 | #include <linux/debugfs.h> | ||
| 381 | #include <linux/seq_file.h> | ||
| 382 | |||
| 383 | static int dbg_gpio_show(struct seq_file *s, void *unused) | ||
| 384 | { | ||
| 385 | int i; | ||
| 386 | int j; | ||
| 387 | |||
| 388 | for (i = 0; i < tegra_gpio_bank_count; i++) { | ||
| 389 | for (j = 0; j < 4; j++) { | ||
| 390 | int gpio = tegra_gpio_compose(i, j, 0); | ||
| 391 | seq_printf(s, | ||
| 392 | "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", | ||
| 393 | i, j, | ||
| 394 | tegra_gpio_readl(GPIO_CNF(gpio)), | ||
| 395 | tegra_gpio_readl(GPIO_OE(gpio)), | ||
| 396 | tegra_gpio_readl(GPIO_OUT(gpio)), | ||
| 397 | tegra_gpio_readl(GPIO_IN(gpio)), | ||
| 398 | tegra_gpio_readl(GPIO_INT_STA(gpio)), | ||
| 399 | tegra_gpio_readl(GPIO_INT_ENB(gpio)), | ||
| 400 | tegra_gpio_readl(GPIO_INT_LVL(gpio))); | ||
| 401 | } | ||
| 402 | } | ||
| 403 | return 0; | ||
| 404 | } | ||
| 405 | |||
| 406 | static int dbg_gpio_open(struct inode *inode, struct file *file) | ||
| 407 | { | ||
| 408 | return single_open(file, dbg_gpio_show, &inode->i_private); | ||
| 409 | } | ||
| 410 | |||
| 411 | static const struct file_operations debug_fops = { | ||
| 412 | .open = dbg_gpio_open, | ||
| 413 | .read = seq_read, | ||
| 414 | .llseek = seq_lseek, | ||
| 415 | .release = single_release, | ||
| 416 | }; | ||
| 417 | |||
| 418 | static void tegra_gpio_debuginit(void) | ||
| 419 | { | ||
| 420 | (void) debugfs_create_file("tegra_gpio", S_IRUGO, | ||
| 421 | NULL, NULL, &debug_fops); | ||
| 422 | } | ||
| 423 | |||
| 424 | #else | ||
| 425 | |||
| 426 | static inline void tegra_gpio_debuginit(void) | ||
| 427 | { | ||
| 428 | } | ||
| 429 | |||
| 430 | #endif | ||
| 431 | |||
| 378 | static struct irq_chip tegra_gpio_irq_chip = { | 432 | static struct irq_chip tegra_gpio_irq_chip = { |
| 379 | .name = "GPIO", | 433 | .name = "GPIO", |
| 380 | .irq_ack = tegra_gpio_irq_ack, | 434 | .irq_ack = tegra_gpio_irq_ack, |
| @@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) | |||
| 519 | spin_lock_init(&bank->lvl_lock[j]); | 573 | spin_lock_init(&bank->lvl_lock[j]); |
| 520 | } | 574 | } |
| 521 | 575 | ||
| 576 | tegra_gpio_debuginit(); | ||
| 577 | |||
| 522 | return 0; | 578 | return 0; |
| 523 | } | 579 | } |
| 524 | 580 | ||
| @@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void) | |||
| 536 | return platform_driver_register(&tegra_gpio_driver); | 592 | return platform_driver_register(&tegra_gpio_driver); |
| 537 | } | 593 | } |
| 538 | postcore_initcall(tegra_gpio_init); | 594 | postcore_initcall(tegra_gpio_init); |
| 539 | |||
| 540 | #ifdef CONFIG_DEBUG_FS | ||
| 541 | |||
| 542 | #include <linux/debugfs.h> | ||
| 543 | #include <linux/seq_file.h> | ||
| 544 | |||
| 545 | static int dbg_gpio_show(struct seq_file *s, void *unused) | ||
| 546 | { | ||
| 547 | int i; | ||
| 548 | int j; | ||
| 549 | |||
| 550 | for (i = 0; i < tegra_gpio_bank_count; i++) { | ||
| 551 | for (j = 0; j < 4; j++) { | ||
| 552 | int gpio = tegra_gpio_compose(i, j, 0); | ||
| 553 | seq_printf(s, | ||
| 554 | "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", | ||
| 555 | i, j, | ||
| 556 | tegra_gpio_readl(GPIO_CNF(gpio)), | ||
| 557 | tegra_gpio_readl(GPIO_OE(gpio)), | ||
| 558 | tegra_gpio_readl(GPIO_OUT(gpio)), | ||
| 559 | tegra_gpio_readl(GPIO_IN(gpio)), | ||
| 560 | tegra_gpio_readl(GPIO_INT_STA(gpio)), | ||
| 561 | tegra_gpio_readl(GPIO_INT_ENB(gpio)), | ||
| 562 | tegra_gpio_readl(GPIO_INT_LVL(gpio))); | ||
| 563 | } | ||
| 564 | } | ||
| 565 | return 0; | ||
| 566 | } | ||
| 567 | |||
| 568 | static int dbg_gpio_open(struct inode *inode, struct file *file) | ||
| 569 | { | ||
| 570 | return single_open(file, dbg_gpio_show, &inode->i_private); | ||
| 571 | } | ||
| 572 | |||
| 573 | static const struct file_operations debug_fops = { | ||
| 574 | .open = dbg_gpio_open, | ||
| 575 | .read = seq_read, | ||
| 576 | .llseek = seq_lseek, | ||
| 577 | .release = single_release, | ||
| 578 | }; | ||
| 579 | |||
| 580 | static int __init tegra_gpio_debuginit(void) | ||
| 581 | { | ||
| 582 | (void) debugfs_create_file("tegra_gpio", S_IRUGO, | ||
| 583 | NULL, NULL, &debug_fops); | ||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | late_initcall(tegra_gpio_debuginit); | ||
| 587 | #endif | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a18f00fc1bb8..2a91f3287e3b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) | |||
| 233 | for (i = 0; i != chip->ngpio; ++i) { | 233 | for (i = 0; i != chip->ngpio; ++i) { |
| 234 | struct gpio_desc *gpio = &chip->desc[i]; | 234 | struct gpio_desc *gpio = &chip->desc[i]; |
| 235 | 235 | ||
| 236 | if (!gpio->name) | 236 | if (!gpio->name || !name) |
| 237 | continue; | 237 | continue; |
| 238 | 238 | ||
| 239 | if (!strcmp(gpio->name, name)) { | 239 | if (!strcmp(gpio->name, name)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 615ce6d464fb..251b14736de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -389,7 +389,6 @@ struct amdgpu_clock { | |||
| 389 | * Fences. | 389 | * Fences. |
| 390 | */ | 390 | */ |
| 391 | struct amdgpu_fence_driver { | 391 | struct amdgpu_fence_driver { |
| 392 | struct amdgpu_ring *ring; | ||
| 393 | uint64_t gpu_addr; | 392 | uint64_t gpu_addr; |
| 394 | volatile uint32_t *cpu_addr; | 393 | volatile uint32_t *cpu_addr; |
| 395 | /* sync_seq is protected by ring emission lock */ | 394 | /* sync_seq is protected by ring emission lock */ |
| @@ -398,7 +397,7 @@ struct amdgpu_fence_driver { | |||
| 398 | bool initialized; | 397 | bool initialized; |
| 399 | struct amdgpu_irq_src *irq_src; | 398 | struct amdgpu_irq_src *irq_src; |
| 400 | unsigned irq_type; | 399 | unsigned irq_type; |
| 401 | struct delayed_work lockup_work; | 400 | struct timer_list fallback_timer; |
| 402 | wait_queue_head_t fence_queue; | 401 | wait_queue_head_t fence_queue; |
| 403 | }; | 402 | }; |
| 404 | 403 | ||
| @@ -497,6 +496,7 @@ struct amdgpu_bo_va_mapping { | |||
| 497 | 496 | ||
| 498 | /* bo virtual addresses in a specific vm */ | 497 | /* bo virtual addresses in a specific vm */ |
| 499 | struct amdgpu_bo_va { | 498 | struct amdgpu_bo_va { |
| 499 | struct mutex mutex; | ||
| 500 | /* protected by bo being reserved */ | 500 | /* protected by bo being reserved */ |
| 501 | struct list_head bo_list; | 501 | struct list_head bo_list; |
| 502 | struct fence *last_pt_update; | 502 | struct fence *last_pt_update; |
| @@ -917,8 +917,8 @@ struct amdgpu_ring { | |||
| 917 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | 917 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 |
| 918 | 918 | ||
| 919 | struct amdgpu_vm_pt { | 919 | struct amdgpu_vm_pt { |
| 920 | struct amdgpu_bo *bo; | 920 | struct amdgpu_bo *bo; |
| 921 | uint64_t addr; | 921 | uint64_t addr; |
| 922 | }; | 922 | }; |
| 923 | 923 | ||
| 924 | struct amdgpu_vm_id { | 924 | struct amdgpu_vm_id { |
| @@ -926,13 +926,9 @@ struct amdgpu_vm_id { | |||
| 926 | uint64_t pd_gpu_addr; | 926 | uint64_t pd_gpu_addr; |
| 927 | /* last flushed PD/PT update */ | 927 | /* last flushed PD/PT update */ |
| 928 | struct fence *flushed_updates; | 928 | struct fence *flushed_updates; |
| 929 | /* last use of vmid */ | ||
| 930 | struct fence *last_id_use; | ||
| 931 | }; | 929 | }; |
| 932 | 930 | ||
| 933 | struct amdgpu_vm { | 931 | struct amdgpu_vm { |
| 934 | struct mutex mutex; | ||
| 935 | |||
| 936 | struct rb_root va; | 932 | struct rb_root va; |
| 937 | 933 | ||
| 938 | /* protecting invalidated */ | 934 | /* protecting invalidated */ |
| @@ -957,24 +953,70 @@ struct amdgpu_vm { | |||
| 957 | 953 | ||
| 958 | /* for id and flush management per ring */ | 954 | /* for id and flush management per ring */ |
| 959 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; | 955 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; |
| 956 | /* for interval tree */ | ||
| 957 | spinlock_t it_lock; | ||
| 960 | }; | 958 | }; |
| 961 | 959 | ||
| 962 | struct amdgpu_vm_manager { | 960 | struct amdgpu_vm_manager { |
| 963 | struct fence *active[AMDGPU_NUM_VM]; | 961 | struct { |
| 964 | uint32_t max_pfn; | 962 | struct fence *active; |
| 963 | atomic_long_t owner; | ||
| 964 | } ids[AMDGPU_NUM_VM]; | ||
| 965 | |||
| 966 | uint32_t max_pfn; | ||
| 965 | /* number of VMIDs */ | 967 | /* number of VMIDs */ |
| 966 | unsigned nvm; | 968 | unsigned nvm; |
| 967 | /* vram base address for page table entry */ | 969 | /* vram base address for page table entry */ |
| 968 | u64 vram_base_offset; | 970 | u64 vram_base_offset; |
| 969 | /* is vm enabled? */ | 971 | /* is vm enabled? */ |
| 970 | bool enabled; | 972 | bool enabled; |
| 971 | /* for hw to save the PD addr on suspend/resume */ | ||
| 972 | uint32_t saved_table_addr[AMDGPU_NUM_VM]; | ||
| 973 | /* vm pte handling */ | 973 | /* vm pte handling */ |
| 974 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | 974 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; |
| 975 | struct amdgpu_ring *vm_pte_funcs_ring; | 975 | struct amdgpu_ring *vm_pte_funcs_ring; |
| 976 | }; | 976 | }; |
| 977 | 977 | ||
| 978 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
| 979 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 980 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 981 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | ||
| 982 | struct amdgpu_vm *vm, | ||
| 983 | struct list_head *head); | ||
| 984 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
| 985 | struct amdgpu_sync *sync); | ||
| 986 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | ||
| 987 | struct amdgpu_vm *vm, | ||
| 988 | struct fence *updates); | ||
| 989 | void amdgpu_vm_fence(struct amdgpu_device *adev, | ||
| 990 | struct amdgpu_vm *vm, | ||
| 991 | struct fence *fence); | ||
| 992 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); | ||
| 993 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
| 994 | struct amdgpu_vm *vm); | ||
| 995 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
| 996 | struct amdgpu_vm *vm); | ||
| 997 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
| 998 | struct amdgpu_sync *sync); | ||
| 999 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
| 1000 | struct amdgpu_bo_va *bo_va, | ||
| 1001 | struct ttm_mem_reg *mem); | ||
| 1002 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
| 1003 | struct amdgpu_bo *bo); | ||
| 1004 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
| 1005 | struct amdgpu_bo *bo); | ||
| 1006 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
| 1007 | struct amdgpu_vm *vm, | ||
| 1008 | struct amdgpu_bo *bo); | ||
| 1009 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
| 1010 | struct amdgpu_bo_va *bo_va, | ||
| 1011 | uint64_t addr, uint64_t offset, | ||
| 1012 | uint64_t size, uint32_t flags); | ||
| 1013 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
| 1014 | struct amdgpu_bo_va *bo_va, | ||
| 1015 | uint64_t addr); | ||
| 1016 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
| 1017 | struct amdgpu_bo_va *bo_va); | ||
| 1018 | int amdgpu_vm_free_job(struct amdgpu_job *job); | ||
| 1019 | |||
| 978 | /* | 1020 | /* |
| 979 | * context related structures | 1021 | * context related structures |
| 980 | */ | 1022 | */ |
| @@ -1211,6 +1253,7 @@ struct amdgpu_cs_parser { | |||
| 1211 | /* relocations */ | 1253 | /* relocations */ |
| 1212 | struct amdgpu_bo_list_entry *vm_bos; | 1254 | struct amdgpu_bo_list_entry *vm_bos; |
| 1213 | struct list_head validated; | 1255 | struct list_head validated; |
| 1256 | struct fence *fence; | ||
| 1214 | 1257 | ||
| 1215 | struct amdgpu_ib *ibs; | 1258 | struct amdgpu_ib *ibs; |
| 1216 | uint32_t num_ibs; | 1259 | uint32_t num_ibs; |
| @@ -1226,7 +1269,7 @@ struct amdgpu_job { | |||
| 1226 | struct amdgpu_device *adev; | 1269 | struct amdgpu_device *adev; |
| 1227 | struct amdgpu_ib *ibs; | 1270 | struct amdgpu_ib *ibs; |
| 1228 | uint32_t num_ibs; | 1271 | uint32_t num_ibs; |
| 1229 | struct mutex job_lock; | 1272 | void *owner; |
| 1230 | struct amdgpu_user_fence uf; | 1273 | struct amdgpu_user_fence uf; |
| 1231 | int (*free_job)(struct amdgpu_job *job); | 1274 | int (*free_job)(struct amdgpu_job *job); |
| 1232 | }; | 1275 | }; |
| @@ -2257,11 +2300,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); | |||
| 2257 | bool amdgpu_card_posted(struct amdgpu_device *adev); | 2300 | bool amdgpu_card_posted(struct amdgpu_device *adev); |
| 2258 | void amdgpu_update_display_priority(struct amdgpu_device *adev); | 2301 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
| 2259 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); | 2302 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); |
| 2260 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
| 2261 | struct drm_file *filp, | ||
| 2262 | struct amdgpu_ctx *ctx, | ||
| 2263 | struct amdgpu_ib *ibs, | ||
| 2264 | uint32_t num_ibs); | ||
| 2265 | 2303 | ||
| 2266 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | 2304 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
| 2267 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 2305 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
| @@ -2319,49 +2357,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 2319 | unsigned long arg); | 2357 | unsigned long arg); |
| 2320 | 2358 | ||
| 2321 | /* | 2359 | /* |
| 2322 | * vm | ||
| 2323 | */ | ||
| 2324 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 2325 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 2326 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | ||
| 2327 | struct amdgpu_vm *vm, | ||
| 2328 | struct list_head *head); | ||
| 2329 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
| 2330 | struct amdgpu_sync *sync); | ||
| 2331 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | ||
| 2332 | struct amdgpu_vm *vm, | ||
| 2333 | struct fence *updates); | ||
| 2334 | void amdgpu_vm_fence(struct amdgpu_device *adev, | ||
| 2335 | struct amdgpu_vm *vm, | ||
| 2336 | struct amdgpu_fence *fence); | ||
| 2337 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); | ||
| 2338 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
| 2339 | struct amdgpu_vm *vm); | ||
| 2340 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
| 2341 | struct amdgpu_vm *vm); | ||
| 2342 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | ||
| 2343 | struct amdgpu_vm *vm, struct amdgpu_sync *sync); | ||
| 2344 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
| 2345 | struct amdgpu_bo_va *bo_va, | ||
| 2346 | struct ttm_mem_reg *mem); | ||
| 2347 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
| 2348 | struct amdgpu_bo *bo); | ||
| 2349 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
| 2350 | struct amdgpu_bo *bo); | ||
| 2351 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
| 2352 | struct amdgpu_vm *vm, | ||
| 2353 | struct amdgpu_bo *bo); | ||
| 2354 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
| 2355 | struct amdgpu_bo_va *bo_va, | ||
| 2356 | uint64_t addr, uint64_t offset, | ||
| 2357 | uint64_t size, uint32_t flags); | ||
| 2358 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
| 2359 | struct amdgpu_bo_va *bo_va, | ||
| 2360 | uint64_t addr); | ||
| 2361 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
| 2362 | struct amdgpu_bo_va *bo_va); | ||
| 2363 | int amdgpu_vm_free_job(struct amdgpu_job *job); | ||
| 2364 | /* | ||
| 2365 | * functions used by amdgpu_encoder.c | 2360 | * functions used by amdgpu_encoder.c |
| 2366 | */ | 2361 | */ |
| 2367 | struct amdgpu_afmt_acr { | 2362 | struct amdgpu_afmt_acr { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dfc4d02c7a38..1d44d508d4d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
| 127 | return 0; | 127 | return 0; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
| 131 | struct drm_file *filp, | ||
| 132 | struct amdgpu_ctx *ctx, | ||
| 133 | struct amdgpu_ib *ibs, | ||
| 134 | uint32_t num_ibs) | ||
| 135 | { | ||
| 136 | struct amdgpu_cs_parser *parser; | ||
| 137 | int i; | ||
| 138 | |||
| 139 | parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); | ||
| 140 | if (!parser) | ||
| 141 | return NULL; | ||
| 142 | |||
| 143 | parser->adev = adev; | ||
| 144 | parser->filp = filp; | ||
| 145 | parser->ctx = ctx; | ||
| 146 | parser->ibs = ibs; | ||
| 147 | parser->num_ibs = num_ibs; | ||
| 148 | for (i = 0; i < num_ibs; i++) | ||
| 149 | ibs[i].ctx = ctx; | ||
| 150 | |||
| 151 | return parser; | ||
| 152 | } | ||
| 153 | |||
| 154 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
| 155 | { | 131 | { |
| 156 | union drm_amdgpu_cs *cs = data; | 132 | union drm_amdgpu_cs *cs = data; |
| @@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, | |||
| 463 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | 439 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
| 464 | } | 440 | } |
| 465 | 441 | ||
| 466 | static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) | 442 | /** |
| 443 | * cs_parser_fini() - clean parser states | ||
| 444 | * @parser: parser structure holding parsing context. | ||
| 445 | * @error: error number | ||
| 446 | * | ||
| 447 | * If error is set than unvalidate buffer, otherwise just free memory | ||
| 448 | * used by parsing context. | ||
| 449 | **/ | ||
| 450 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | ||
| 467 | { | 451 | { |
| 452 | unsigned i; | ||
| 453 | |||
| 468 | if (!error) { | 454 | if (!error) { |
| 469 | /* Sort the buffer list from the smallest to largest buffer, | 455 | /* Sort the buffer list from the smallest to largest buffer, |
| 470 | * which affects the order of buffers in the LRU list. | 456 | * which affects the order of buffers in the LRU list. |
| @@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err | |||
| 479 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | 465 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); |
| 480 | 466 | ||
| 481 | ttm_eu_fence_buffer_objects(&parser->ticket, | 467 | ttm_eu_fence_buffer_objects(&parser->ticket, |
| 482 | &parser->validated, | 468 | &parser->validated, |
| 483 | &parser->ibs[parser->num_ibs-1].fence->base); | 469 | parser->fence); |
| 484 | } else if (backoff) { | 470 | } else if (backoff) { |
| 485 | ttm_eu_backoff_reservation(&parser->ticket, | 471 | ttm_eu_backoff_reservation(&parser->ticket, |
| 486 | &parser->validated); | 472 | &parser->validated); |
| 487 | } | 473 | } |
| 488 | } | 474 | fence_put(parser->fence); |
| 489 | 475 | ||
| 490 | static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | ||
| 491 | { | ||
| 492 | unsigned i; | ||
| 493 | if (parser->ctx) | 476 | if (parser->ctx) |
| 494 | amdgpu_ctx_put(parser->ctx); | 477 | amdgpu_ctx_put(parser->ctx); |
| 495 | if (parser->bo_list) | 478 | if (parser->bo_list) |
| @@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | |||
| 499 | for (i = 0; i < parser->nchunks; i++) | 482 | for (i = 0; i < parser->nchunks; i++) |
| 500 | drm_free_large(parser->chunks[i].kdata); | 483 | drm_free_large(parser->chunks[i].kdata); |
| 501 | kfree(parser->chunks); | 484 | kfree(parser->chunks); |
| 502 | if (!amdgpu_enable_scheduler) | 485 | if (parser->ibs) |
| 503 | { | 486 | for (i = 0; i < parser->num_ibs; i++) |
| 504 | if (parser->ibs) | 487 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
| 505 | for (i = 0; i < parser->num_ibs; i++) | 488 | kfree(parser->ibs); |
| 506 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 489 | if (parser->uf.bo) |
| 507 | kfree(parser->ibs); | 490 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); |
| 508 | if (parser->uf.bo) | ||
| 509 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | ||
| 510 | } | ||
| 511 | |||
| 512 | kfree(parser); | ||
| 513 | } | ||
| 514 | |||
| 515 | /** | ||
| 516 | * cs_parser_fini() - clean parser states | ||
| 517 | * @parser: parser structure holding parsing context. | ||
| 518 | * @error: error number | ||
| 519 | * | ||
| 520 | * If error is set than unvalidate buffer, otherwise just free memory | ||
| 521 | * used by parsing context. | ||
| 522 | **/ | ||
| 523 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | ||
| 524 | { | ||
| 525 | amdgpu_cs_parser_fini_early(parser, error, backoff); | ||
| 526 | amdgpu_cs_parser_fini_late(parser); | ||
| 527 | } | 491 | } |
| 528 | 492 | ||
| 529 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 493 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
| @@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
| 610 | } | 574 | } |
| 611 | 575 | ||
| 612 | r = amdgpu_bo_vm_update_pte(parser, vm); | 576 | r = amdgpu_bo_vm_update_pte(parser, vm); |
| 613 | if (r) { | 577 | if (!r) |
| 614 | goto out; | 578 | amdgpu_cs_sync_rings(parser); |
| 615 | } | ||
| 616 | amdgpu_cs_sync_rings(parser); | ||
| 617 | if (!amdgpu_enable_scheduler) | ||
| 618 | r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, | ||
| 619 | parser->filp); | ||
| 620 | 579 | ||
| 621 | out: | ||
| 622 | return r; | 580 | return r; |
| 623 | } | 581 | } |
| 624 | 582 | ||
| @@ -826,38 +784,35 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 826 | { | 784 | { |
| 827 | struct amdgpu_device *adev = dev->dev_private; | 785 | struct amdgpu_device *adev = dev->dev_private; |
| 828 | union drm_amdgpu_cs *cs = data; | 786 | union drm_amdgpu_cs *cs = data; |
| 829 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 787 | struct amdgpu_cs_parser parser = {}; |
| 830 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 831 | struct amdgpu_cs_parser *parser; | ||
| 832 | bool reserved_buffers = false; | 788 | bool reserved_buffers = false; |
| 833 | int i, r; | 789 | int i, r; |
| 834 | 790 | ||
| 835 | if (!adev->accel_working) | 791 | if (!adev->accel_working) |
| 836 | return -EBUSY; | 792 | return -EBUSY; |
| 837 | 793 | ||
| 838 | parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); | 794 | parser.adev = adev; |
| 839 | if (!parser) | 795 | parser.filp = filp; |
| 840 | return -ENOMEM; | 796 | |
| 841 | r = amdgpu_cs_parser_init(parser, data); | 797 | r = amdgpu_cs_parser_init(&parser, data); |
| 842 | if (r) { | 798 | if (r) { |
| 843 | DRM_ERROR("Failed to initialize parser !\n"); | 799 | DRM_ERROR("Failed to initialize parser !\n"); |
| 844 | amdgpu_cs_parser_fini(parser, r, false); | 800 | amdgpu_cs_parser_fini(&parser, r, false); |
| 845 | r = amdgpu_cs_handle_lockup(adev, r); | 801 | r = amdgpu_cs_handle_lockup(adev, r); |
| 846 | return r; | 802 | return r; |
| 847 | } | 803 | } |
| 848 | mutex_lock(&vm->mutex); | 804 | r = amdgpu_cs_parser_relocs(&parser); |
| 849 | r = amdgpu_cs_parser_relocs(parser); | ||
| 850 | if (r == -ENOMEM) | 805 | if (r == -ENOMEM) |
| 851 | DRM_ERROR("Not enough memory for command submission!\n"); | 806 | DRM_ERROR("Not enough memory for command submission!\n"); |
| 852 | else if (r && r != -ERESTARTSYS) | 807 | else if (r && r != -ERESTARTSYS) |
| 853 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | 808 | DRM_ERROR("Failed to process the buffer list %d!\n", r); |
| 854 | else if (!r) { | 809 | else if (!r) { |
| 855 | reserved_buffers = true; | 810 | reserved_buffers = true; |
| 856 | r = amdgpu_cs_ib_fill(adev, parser); | 811 | r = amdgpu_cs_ib_fill(adev, &parser); |
| 857 | } | 812 | } |
| 858 | 813 | ||
| 859 | if (!r) { | 814 | if (!r) { |
| 860 | r = amdgpu_cs_dependencies(adev, parser); | 815 | r = amdgpu_cs_dependencies(adev, &parser); |
| 861 | if (r) | 816 | if (r) |
| 862 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | 817 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); |
| 863 | } | 818 | } |
| @@ -865,63 +820,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 865 | if (r) | 820 | if (r) |
| 866 | goto out; | 821 | goto out; |
| 867 | 822 | ||
| 868 | for (i = 0; i < parser->num_ibs; i++) | 823 | for (i = 0; i < parser.num_ibs; i++) |
| 869 | trace_amdgpu_cs(parser, i); | 824 | trace_amdgpu_cs(&parser, i); |
| 870 | 825 | ||
| 871 | r = amdgpu_cs_ib_vm_chunk(adev, parser); | 826 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); |
| 872 | if (r) | 827 | if (r) |
| 873 | goto out; | 828 | goto out; |
| 874 | 829 | ||
| 875 | if (amdgpu_enable_scheduler && parser->num_ibs) { | 830 | if (amdgpu_enable_scheduler && parser.num_ibs) { |
| 831 | struct amdgpu_ring * ring = parser.ibs->ring; | ||
| 832 | struct amd_sched_fence *fence; | ||
| 876 | struct amdgpu_job *job; | 833 | struct amdgpu_job *job; |
| 877 | struct amdgpu_ring * ring = parser->ibs->ring; | 834 | |
| 878 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 835 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
| 879 | if (!job) { | 836 | if (!job) { |
| 880 | r = -ENOMEM; | 837 | r = -ENOMEM; |
| 881 | goto out; | 838 | goto out; |
| 882 | } | 839 | } |
| 840 | |||
| 883 | job->base.sched = &ring->sched; | 841 | job->base.sched = &ring->sched; |
| 884 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; | 842 | job->base.s_entity = &parser.ctx->rings[ring->idx].entity; |
| 885 | job->adev = parser->adev; | 843 | job->adev = parser.adev; |
| 886 | job->ibs = parser->ibs; | 844 | job->owner = parser.filp; |
| 887 | job->num_ibs = parser->num_ibs; | 845 | job->free_job = amdgpu_cs_free_job; |
| 888 | job->base.owner = parser->filp; | 846 | |
| 889 | mutex_init(&job->job_lock); | 847 | job->ibs = parser.ibs; |
| 848 | job->num_ibs = parser.num_ibs; | ||
| 849 | parser.ibs = NULL; | ||
| 850 | parser.num_ibs = 0; | ||
| 851 | |||
| 890 | if (job->ibs[job->num_ibs - 1].user) { | 852 | if (job->ibs[job->num_ibs - 1].user) { |
| 891 | memcpy(&job->uf, &parser->uf, | 853 | job->uf = parser.uf; |
| 892 | sizeof(struct amdgpu_user_fence)); | ||
| 893 | job->ibs[job->num_ibs - 1].user = &job->uf; | 854 | job->ibs[job->num_ibs - 1].user = &job->uf; |
| 855 | parser.uf.bo = NULL; | ||
| 894 | } | 856 | } |
| 895 | 857 | ||
| 896 | job->free_job = amdgpu_cs_free_job; | 858 | fence = amd_sched_fence_create(job->base.s_entity, |
| 897 | mutex_lock(&job->job_lock); | 859 | parser.filp); |
| 898 | r = amd_sched_entity_push_job(&job->base); | 860 | if (!fence) { |
| 899 | if (r) { | 861 | r = -ENOMEM; |
| 900 | mutex_unlock(&job->job_lock); | ||
| 901 | amdgpu_cs_free_job(job); | 862 | amdgpu_cs_free_job(job); |
| 902 | kfree(job); | 863 | kfree(job); |
| 903 | goto out; | 864 | goto out; |
| 904 | } | 865 | } |
| 905 | cs->out.handle = | 866 | job->base.s_fence = fence; |
| 906 | amdgpu_ctx_add_fence(parser->ctx, ring, | 867 | parser.fence = fence_get(&fence->base); |
| 907 | &job->base.s_fence->base); | ||
| 908 | parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; | ||
| 909 | 868 | ||
| 910 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | 869 | cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, |
| 911 | ttm_eu_fence_buffer_objects(&parser->ticket, | 870 | &fence->base); |
| 912 | &parser->validated, | 871 | job->ibs[job->num_ibs - 1].sequence = cs->out.handle; |
| 913 | &job->base.s_fence->base); | ||
| 914 | 872 | ||
| 915 | mutex_unlock(&job->job_lock); | 873 | trace_amdgpu_cs_ioctl(job); |
| 916 | amdgpu_cs_parser_fini_late(parser); | 874 | amd_sched_entity_push_job(&job->base); |
| 917 | mutex_unlock(&vm->mutex); | 875 | |
| 918 | return 0; | 876 | } else { |
| 877 | struct amdgpu_fence *fence; | ||
| 878 | |||
| 879 | r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, | ||
| 880 | parser.filp); | ||
| 881 | fence = parser.ibs[parser.num_ibs - 1].fence; | ||
| 882 | parser.fence = fence_get(&fence->base); | ||
| 883 | cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; | ||
| 919 | } | 884 | } |
| 920 | 885 | ||
| 921 | cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; | ||
| 922 | out: | 886 | out: |
| 923 | amdgpu_cs_parser_fini(parser, r, reserved_buffers); | 887 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
| 924 | mutex_unlock(&vm->mutex); | ||
| 925 | r = amdgpu_cs_handle_lockup(adev, r); | 888 | r = amdgpu_cs_handle_lockup(adev, r); |
| 926 | return r; | 889 | return r; |
| 927 | } | 890 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 257d72205bb5..3671f9f220bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -47,6 +47,9 @@ | |||
| 47 | * that the the relevant GPU caches have been flushed. | 47 | * that the the relevant GPU caches have been flushed. |
| 48 | */ | 48 | */ |
| 49 | 49 | ||
| 50 | static struct kmem_cache *amdgpu_fence_slab; | ||
| 51 | static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); | ||
| 52 | |||
| 50 | /** | 53 | /** |
| 51 | * amdgpu_fence_write - write a fence value | 54 | * amdgpu_fence_write - write a fence value |
| 52 | * | 55 | * |
| @@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |||
| 85 | } | 88 | } |
| 86 | 89 | ||
| 87 | /** | 90 | /** |
| 88 | * amdgpu_fence_schedule_check - schedule lockup check | ||
| 89 | * | ||
| 90 | * @ring: pointer to struct amdgpu_ring | ||
| 91 | * | ||
| 92 | * Queues a delayed work item to check for lockups. | ||
| 93 | */ | ||
| 94 | static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) | ||
| 95 | { | ||
| 96 | /* | ||
| 97 | * Do not reset the timer here with mod_delayed_work, | ||
| 98 | * this can livelock in an interaction with TTM delayed destroy. | ||
| 99 | */ | ||
| 100 | queue_delayed_work(system_power_efficient_wq, | ||
| 101 | &ring->fence_drv.lockup_work, | ||
| 102 | AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 103 | } | ||
| 104 | |||
| 105 | /** | ||
| 106 | * amdgpu_fence_emit - emit a fence on the requested ring | 91 | * amdgpu_fence_emit - emit a fence on the requested ring |
| 107 | * | 92 | * |
| 108 | * @ring: ring the fence is associated with | 93 | * @ring: ring the fence is associated with |
| @@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
| 118 | struct amdgpu_device *adev = ring->adev; | 103 | struct amdgpu_device *adev = ring->adev; |
| 119 | 104 | ||
| 120 | /* we are protected by the ring emission mutex */ | 105 | /* we are protected by the ring emission mutex */ |
| 121 | *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); | 106 | *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
| 122 | if ((*fence) == NULL) { | 107 | if ((*fence) == NULL) { |
| 123 | return -ENOMEM; | 108 | return -ENOMEM; |
| 124 | } | 109 | } |
| @@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
| 132 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, | 117 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
| 133 | (*fence)->seq, | 118 | (*fence)->seq, |
| 134 | AMDGPU_FENCE_FLAG_INT); | 119 | AMDGPU_FENCE_FLAG_INT); |
| 135 | trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); | ||
| 136 | return 0; | 120 | return 0; |
| 137 | } | 121 | } |
| 138 | 122 | ||
| 139 | /** | 123 | /** |
| 124 | * amdgpu_fence_schedule_fallback - schedule fallback check | ||
| 125 | * | ||
| 126 | * @ring: pointer to struct amdgpu_ring | ||
| 127 | * | ||
| 128 | * Start a timer as fallback to our interrupts. | ||
| 129 | */ | ||
| 130 | static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) | ||
| 131 | { | ||
| 132 | mod_timer(&ring->fence_drv.fallback_timer, | ||
| 133 | jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 140 | * amdgpu_fence_activity - check for fence activity | 137 | * amdgpu_fence_activity - check for fence activity |
| 141 | * | 138 | * |
| 142 | * @ring: pointer to struct amdgpu_ring | 139 | * @ring: pointer to struct amdgpu_ring |
| @@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) | |||
| 202 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); | 199 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); |
| 203 | 200 | ||
| 204 | if (seq < last_emitted) | 201 | if (seq < last_emitted) |
| 205 | amdgpu_fence_schedule_check(ring); | 202 | amdgpu_fence_schedule_fallback(ring); |
| 206 | 203 | ||
| 207 | return wake; | 204 | return wake; |
| 208 | } | 205 | } |
| 209 | 206 | ||
| 210 | /** | 207 | /** |
| 211 | * amdgpu_fence_check_lockup - check for hardware lockup | 208 | * amdgpu_fence_process - process a fence |
| 212 | * | 209 | * |
| 213 | * @work: delayed work item | 210 | * @adev: amdgpu_device pointer |
| 211 | * @ring: ring index the fence is associated with | ||
| 214 | * | 212 | * |
| 215 | * Checks for fence activity and if there is none probe | 213 | * Checks the current fence value and wakes the fence queue |
| 216 | * the hardware if a lockup occured. | 214 | * if the sequence number has increased (all asics). |
| 217 | */ | 215 | */ |
| 218 | static void amdgpu_fence_check_lockup(struct work_struct *work) | 216 | void amdgpu_fence_process(struct amdgpu_ring *ring) |
| 219 | { | 217 | { |
| 220 | struct amdgpu_fence_driver *fence_drv; | ||
| 221 | struct amdgpu_ring *ring; | ||
| 222 | |||
| 223 | fence_drv = container_of(work, struct amdgpu_fence_driver, | ||
| 224 | lockup_work.work); | ||
| 225 | ring = fence_drv->ring; | ||
| 226 | |||
| 227 | if (amdgpu_fence_activity(ring)) | 218 | if (amdgpu_fence_activity(ring)) |
| 228 | wake_up_all(&ring->fence_drv.fence_queue); | 219 | wake_up_all(&ring->fence_drv.fence_queue); |
| 229 | } | 220 | } |
| 230 | 221 | ||
| 231 | /** | 222 | /** |
| 232 | * amdgpu_fence_process - process a fence | 223 | * amdgpu_fence_fallback - fallback for hardware interrupts |
| 233 | * | 224 | * |
| 234 | * @adev: amdgpu_device pointer | 225 | * @work: delayed work item |
| 235 | * @ring: ring index the fence is associated with | ||
| 236 | * | 226 | * |
| 237 | * Checks the current fence value and wakes the fence queue | 227 | * Checks for fence activity. |
| 238 | * if the sequence number has increased (all asics). | ||
| 239 | */ | 228 | */ |
| 240 | void amdgpu_fence_process(struct amdgpu_ring *ring) | 229 | static void amdgpu_fence_fallback(unsigned long arg) |
| 241 | { | 230 | { |
| 242 | if (amdgpu_fence_activity(ring)) | 231 | struct amdgpu_ring *ring = (void *)arg; |
| 243 | wake_up_all(&ring->fence_drv.fence_queue); | 232 | |
| 233 | amdgpu_fence_process(ring); | ||
| 244 | } | 234 | } |
| 245 | 235 | ||
| 246 | /** | 236 | /** |
| @@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) | |||
| 290 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) | 280 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) |
| 291 | return 0; | 281 | return 0; |
| 292 | 282 | ||
| 293 | amdgpu_fence_schedule_check(ring); | 283 | amdgpu_fence_schedule_fallback(ring); |
| 294 | wait_event(ring->fence_drv.fence_queue, ( | 284 | wait_event(ring->fence_drv.fence_queue, ( |
| 295 | (signaled = amdgpu_fence_seq_signaled(ring, seq)))); | 285 | (signaled = amdgpu_fence_seq_signaled(ring, seq)))); |
| 296 | 286 | ||
| @@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
| 491 | atomic64_set(&ring->fence_drv.last_seq, 0); | 481 | atomic64_set(&ring->fence_drv.last_seq, 0); |
| 492 | ring->fence_drv.initialized = false; | 482 | ring->fence_drv.initialized = false; |
| 493 | 483 | ||
| 494 | INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, | 484 | setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, |
| 495 | amdgpu_fence_check_lockup); | 485 | (unsigned long)ring); |
| 496 | ring->fence_drv.ring = ring; | ||
| 497 | 486 | ||
| 498 | init_waitqueue_head(&ring->fence_drv.fence_queue); | 487 | init_waitqueue_head(&ring->fence_drv.fence_queue); |
| 499 | 488 | ||
| @@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
| 536 | */ | 525 | */ |
| 537 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | 526 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) |
| 538 | { | 527 | { |
| 528 | if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { | ||
| 529 | amdgpu_fence_slab = kmem_cache_create( | ||
| 530 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
| 531 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 532 | if (!amdgpu_fence_slab) | ||
| 533 | return -ENOMEM; | ||
| 534 | } | ||
| 539 | if (amdgpu_debugfs_fence_init(adev)) | 535 | if (amdgpu_debugfs_fence_init(adev)) |
| 540 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | 536 | dev_err(adev->dev, "fence debugfs file creation failed\n"); |
| 541 | 537 | ||
| @@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
| 554 | { | 550 | { |
| 555 | int i, r; | 551 | int i, r; |
| 556 | 552 | ||
| 553 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) | ||
| 554 | kmem_cache_destroy(amdgpu_fence_slab); | ||
| 557 | mutex_lock(&adev->ring_lock); | 555 | mutex_lock(&adev->ring_lock); |
| 558 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 556 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 559 | struct amdgpu_ring *ring = adev->rings[i]; | 557 | struct amdgpu_ring *ring = adev->rings[i]; |
| 558 | |||
| 560 | if (!ring || !ring->fence_drv.initialized) | 559 | if (!ring || !ring->fence_drv.initialized) |
| 561 | continue; | 560 | continue; |
| 562 | r = amdgpu_fence_wait_empty(ring); | 561 | r = amdgpu_fence_wait_empty(ring); |
| @@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
| 568 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | 567 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
| 569 | ring->fence_drv.irq_type); | 568 | ring->fence_drv.irq_type); |
| 570 | amd_sched_fini(&ring->sched); | 569 | amd_sched_fini(&ring->sched); |
| 570 | del_timer_sync(&ring->fence_drv.fallback_timer); | ||
| 571 | ring->fence_drv.initialized = false; | 571 | ring->fence_drv.initialized = false; |
| 572 | } | 572 | } |
| 573 | mutex_unlock(&adev->ring_lock); | 573 | mutex_unlock(&adev->ring_lock); |
| @@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
| 751 | fence->fence_wake.func = amdgpu_fence_check_signaled; | 751 | fence->fence_wake.func = amdgpu_fence_check_signaled; |
| 752 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); | 752 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); |
| 753 | fence_get(f); | 753 | fence_get(f); |
| 754 | amdgpu_fence_schedule_check(ring); | 754 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
| 755 | amdgpu_fence_schedule_fallback(ring); | ||
| 755 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | 756 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
| 756 | return true; | 757 | return true; |
| 757 | } | 758 | } |
| 758 | 759 | ||
| 760 | static void amdgpu_fence_release(struct fence *f) | ||
| 761 | { | ||
| 762 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 763 | kmem_cache_free(amdgpu_fence_slab, fence); | ||
| 764 | } | ||
| 765 | |||
| 759 | const struct fence_ops amdgpu_fence_ops = { | 766 | const struct fence_ops amdgpu_fence_ops = { |
| 760 | .get_driver_name = amdgpu_fence_get_driver_name, | 767 | .get_driver_name = amdgpu_fence_get_driver_name, |
| 761 | .get_timeline_name = amdgpu_fence_get_timeline_name, | 768 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
| 762 | .enable_signaling = amdgpu_fence_enable_signaling, | 769 | .enable_signaling = amdgpu_fence_enable_signaling, |
| 763 | .signaled = amdgpu_fence_is_signaled, | 770 | .signaled = amdgpu_fence_is_signaled, |
| 764 | .wait = fence_default_wait, | 771 | .wait = fence_default_wait, |
| 765 | .release = NULL, | 772 | .release = amdgpu_fence_release, |
| 766 | }; | 773 | }; |
| 767 | 774 | ||
| 768 | /* | 775 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 087332858853..fc32fc01a64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
| @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
| 115 | struct amdgpu_vm *vm = &fpriv->vm; | 115 | struct amdgpu_vm *vm = &fpriv->vm; |
| 116 | struct amdgpu_bo_va *bo_va; | 116 | struct amdgpu_bo_va *bo_va; |
| 117 | int r; | 117 | int r; |
| 118 | mutex_lock(&vm->mutex); | ||
| 119 | r = amdgpu_bo_reserve(rbo, false); | 118 | r = amdgpu_bo_reserve(rbo, false); |
| 120 | if (r) { | 119 | if (r) |
| 121 | mutex_unlock(&vm->mutex); | ||
| 122 | return r; | 120 | return r; |
| 123 | } | ||
| 124 | 121 | ||
| 125 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 122 | bo_va = amdgpu_vm_bo_find(vm, rbo); |
| 126 | if (!bo_va) { | 123 | if (!bo_va) { |
| @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
| 129 | ++bo_va->ref_count; | 126 | ++bo_va->ref_count; |
| 130 | } | 127 | } |
| 131 | amdgpu_bo_unreserve(rbo); | 128 | amdgpu_bo_unreserve(rbo); |
| 132 | mutex_unlock(&vm->mutex); | ||
| 133 | return 0; | 129 | return 0; |
| 134 | } | 130 | } |
| 135 | 131 | ||
| @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
| 142 | struct amdgpu_vm *vm = &fpriv->vm; | 138 | struct amdgpu_vm *vm = &fpriv->vm; |
| 143 | struct amdgpu_bo_va *bo_va; | 139 | struct amdgpu_bo_va *bo_va; |
| 144 | int r; | 140 | int r; |
| 145 | mutex_lock(&vm->mutex); | ||
| 146 | r = amdgpu_bo_reserve(rbo, true); | 141 | r = amdgpu_bo_reserve(rbo, true); |
| 147 | if (r) { | 142 | if (r) { |
| 148 | mutex_unlock(&vm->mutex); | ||
| 149 | dev_err(adev->dev, "leaking bo va because " | 143 | dev_err(adev->dev, "leaking bo va because " |
| 150 | "we fail to reserve bo (%d)\n", r); | 144 | "we fail to reserve bo (%d)\n", r); |
| 151 | return; | 145 | return; |
| @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
| 157 | } | 151 | } |
| 158 | } | 152 | } |
| 159 | amdgpu_bo_unreserve(rbo); | 153 | amdgpu_bo_unreserve(rbo); |
| 160 | mutex_unlock(&vm->mutex); | ||
| 161 | } | 154 | } |
| 162 | 155 | ||
| 163 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | 156 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
| @@ -483,6 +476,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
| 483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 476 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
| 484 | goto error_unreserve; | 477 | goto error_unreserve; |
| 485 | } | 478 | } |
| 479 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | ||
| 480 | if (r) | ||
| 481 | goto error_unreserve; | ||
| 486 | 482 | ||
| 487 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | 483 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
| 488 | if (r) | 484 | if (r) |
| @@ -512,6 +508,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 512 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 508 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 513 | struct amdgpu_bo *rbo; | 509 | struct amdgpu_bo *rbo; |
| 514 | struct amdgpu_bo_va *bo_va; | 510 | struct amdgpu_bo_va *bo_va; |
| 511 | struct ttm_validate_buffer tv, tv_pd; | ||
| 512 | struct ww_acquire_ctx ticket; | ||
| 513 | struct list_head list, duplicates; | ||
| 515 | uint32_t invalid_flags, va_flags = 0; | 514 | uint32_t invalid_flags, va_flags = 0; |
| 516 | int r = 0; | 515 | int r = 0; |
| 517 | 516 | ||
| @@ -547,19 +546,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 547 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 546 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 548 | if (gobj == NULL) | 547 | if (gobj == NULL) |
| 549 | return -ENOENT; | 548 | return -ENOENT; |
| 550 | mutex_lock(&fpriv->vm.mutex); | ||
| 551 | rbo = gem_to_amdgpu_bo(gobj); | 549 | rbo = gem_to_amdgpu_bo(gobj); |
| 552 | r = amdgpu_bo_reserve(rbo, false); | 550 | INIT_LIST_HEAD(&list); |
| 551 | INIT_LIST_HEAD(&duplicates); | ||
| 552 | tv.bo = &rbo->tbo; | ||
| 553 | tv.shared = true; | ||
| 554 | list_add(&tv.head, &list); | ||
| 555 | |||
| 556 | if (args->operation == AMDGPU_VA_OP_MAP) { | ||
| 557 | tv_pd.bo = &fpriv->vm.page_directory->tbo; | ||
| 558 | tv_pd.shared = true; | ||
| 559 | list_add(&tv_pd.head, &list); | ||
| 560 | } | ||
| 561 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | ||
| 553 | if (r) { | 562 | if (r) { |
| 554 | mutex_unlock(&fpriv->vm.mutex); | ||
| 555 | drm_gem_object_unreference_unlocked(gobj); | 563 | drm_gem_object_unreference_unlocked(gobj); |
| 556 | return r; | 564 | return r; |
| 557 | } | 565 | } |
| 558 | 566 | ||
| 559 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 567 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); |
| 560 | if (!bo_va) { | 568 | if (!bo_va) { |
| 561 | amdgpu_bo_unreserve(rbo); | 569 | ttm_eu_backoff_reservation(&ticket, &list); |
| 562 | mutex_unlock(&fpriv->vm.mutex); | 570 | drm_gem_object_unreference_unlocked(gobj); |
| 563 | return -ENOENT; | 571 | return -ENOENT; |
| 564 | } | 572 | } |
| 565 | 573 | ||
| @@ -581,10 +589,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 581 | default: | 589 | default: |
| 582 | break; | 590 | break; |
| 583 | } | 591 | } |
| 584 | 592 | ttm_eu_backoff_reservation(&ticket, &list); | |
| 585 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 593 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
| 586 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 594 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
| 587 | mutex_unlock(&fpriv->vm.mutex); | 595 | |
| 588 | drm_gem_object_unreference_unlocked(gobj); | 596 | drm_gem_object_unreference_unlocked(gobj); |
| 589 | return r; | 597 | return r; |
| 590 | } | 598 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index e65987743871..9e25edafa721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, | |||
| 62 | int r; | 62 | int r; |
| 63 | 63 | ||
| 64 | if (size) { | 64 | if (size) { |
| 65 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | 65 | r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, |
| 66 | &ib->sa_bo, size, 256); | 66 | &ib->sa_bo, size, 256); |
| 67 | if (r) { | 67 | if (r) { |
| 68 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); | 68 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); |
| @@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | if (ib->vm) | 218 | if (ib->vm) |
| 219 | amdgpu_vm_fence(adev, ib->vm, ib->fence); | 219 | amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); |
| 220 | 220 | ||
| 221 | amdgpu_ring_unlock_commit(ring); | 221 | amdgpu_ring_unlock_commit(ring); |
| 222 | return 0; | 222 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 3c2ff4567798..ea756e77b023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
| @@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 189 | struct amdgpu_sa_manager *sa_manager); | 189 | struct amdgpu_sa_manager *sa_manager); |
| 190 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, | 190 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, |
| 191 | struct amdgpu_sa_manager *sa_manager); | 191 | struct amdgpu_sa_manager *sa_manager); |
| 192 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | 192 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
| 193 | struct amdgpu_sa_manager *sa_manager, | 193 | struct amdgpu_sa_bo **sa_bo, |
| 194 | struct amdgpu_sa_bo **sa_bo, | 194 | unsigned size, unsigned align); |
| 195 | unsigned size, unsigned align); | ||
| 196 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, | 195 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
| 197 | struct amdgpu_sa_bo **sa_bo, | 196 | struct amdgpu_sa_bo **sa_bo, |
| 198 | struct fence *fence); | 197 | struct fence *fence); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 0212b31dc194..8b88edb0434b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
| 311 | return false; | 311 | return false; |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | 314 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
| 315 | struct amdgpu_sa_manager *sa_manager, | ||
| 316 | struct amdgpu_sa_bo **sa_bo, | 315 | struct amdgpu_sa_bo **sa_bo, |
| 317 | unsigned size, unsigned align) | 316 | unsigned size, unsigned align) |
| 318 | { | 317 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
| 27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
| 28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
| 29 | #include "amdgpu_trace.h" | ||
| 29 | 30 | ||
| 30 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) | 31 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) |
| 31 | { | 32 | { |
| @@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) | |||
| 44 | return NULL; | 45 | return NULL; |
| 45 | } | 46 | } |
| 46 | job = to_amdgpu_job(sched_job); | 47 | job = to_amdgpu_job(sched_job); |
| 47 | mutex_lock(&job->job_lock); | 48 | trace_amdgpu_sched_run_job(job); |
| 48 | r = amdgpu_ib_schedule(job->adev, | 49 | r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); |
| 49 | job->num_ibs, | ||
| 50 | job->ibs, | ||
| 51 | job->base.owner); | ||
| 52 | if (r) { | 50 | if (r) { |
| 53 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 51 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
| 54 | goto err; | 52 | goto err; |
| @@ -61,8 +59,6 @@ err: | |||
| 61 | if (job->free_job) | 59 | if (job->free_job) |
| 62 | job->free_job(job); | 60 | job->free_job(job); |
| 63 | 61 | ||
| 64 | mutex_unlock(&job->job_lock); | ||
| 65 | fence_put(&job->base.s_fence->base); | ||
| 66 | kfree(job); | 62 | kfree(job); |
| 67 | return fence ? &fence->base : NULL; | 63 | return fence ? &fence->base : NULL; |
| 68 | } | 64 | } |
| @@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
| 88 | return -ENOMEM; | 84 | return -ENOMEM; |
| 89 | job->base.sched = &ring->sched; | 85 | job->base.sched = &ring->sched; |
| 90 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; | 86 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; |
| 87 | job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); | ||
| 88 | if (!job->base.s_fence) { | ||
| 89 | kfree(job); | ||
| 90 | return -ENOMEM; | ||
| 91 | } | ||
| 92 | *f = fence_get(&job->base.s_fence->base); | ||
| 93 | |||
| 91 | job->adev = adev; | 94 | job->adev = adev; |
| 92 | job->ibs = ibs; | 95 | job->ibs = ibs; |
| 93 | job->num_ibs = num_ibs; | 96 | job->num_ibs = num_ibs; |
| 94 | job->base.owner = owner; | 97 | job->owner = owner; |
| 95 | mutex_init(&job->job_lock); | ||
| 96 | job->free_job = free_job; | 98 | job->free_job = free_job; |
| 97 | mutex_lock(&job->job_lock); | 99 | amd_sched_entity_push_job(&job->base); |
| 98 | r = amd_sched_entity_push_job(&job->base); | ||
| 99 | if (r) { | ||
| 100 | mutex_unlock(&job->job_lock); | ||
| 101 | kfree(job); | ||
| 102 | return r; | ||
| 103 | } | ||
| 104 | *f = fence_get(&job->base.s_fence->base); | ||
| 105 | mutex_unlock(&job->job_lock); | ||
| 106 | } else { | 100 | } else { |
| 107 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | 101 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); |
| 108 | if (r) | 102 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c index ff3ca52ec6fe..1caaf201b708 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c | |||
| @@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev, | |||
| 40 | if (*semaphore == NULL) { | 40 | if (*semaphore == NULL) { |
| 41 | return -ENOMEM; | 41 | return -ENOMEM; |
| 42 | } | 42 | } |
| 43 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | 43 | r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, |
| 44 | &(*semaphore)->sa_bo, 8, 8); | 44 | &(*semaphore)->sa_bo, 8, 8); |
| 45 | if (r) { | 45 | if (r) { |
| 46 | kfree(*semaphore); | 46 | kfree(*semaphore); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6697fd05217..dd005c336c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
| @@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, | |||
| 302 | return -EINVAL; | 302 | return -EINVAL; |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || | 305 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { |
| 306 | (count >= AMDGPU_NUM_SYNCS)) { | 306 | r = fence_wait(&fence->base, true); |
| 307 | if (r) | ||
| 308 | return r; | ||
| 309 | continue; | ||
| 310 | } | ||
| 311 | |||
| 312 | if (count >= AMDGPU_NUM_SYNCS) { | ||
| 307 | /* not enough room, wait manually */ | 313 | /* not enough room, wait manually */ |
| 308 | r = fence_wait(&fence->base, false); | 314 | r = fence_wait(&fence->base, false); |
| 309 | if (r) | 315 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76ecbaf72a2e..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
| @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, | |||
| 48 | __entry->fences) | 48 | __entry->fences) |
| 49 | ); | 49 | ); |
| 50 | 50 | ||
| 51 | TRACE_EVENT(amdgpu_cs_ioctl, | ||
| 52 | TP_PROTO(struct amdgpu_job *job), | ||
| 53 | TP_ARGS(job), | ||
| 54 | TP_STRUCT__entry( | ||
| 55 | __field(struct amdgpu_device *, adev) | ||
| 56 | __field(struct amd_sched_job *, sched_job) | ||
| 57 | __field(struct amdgpu_ib *, ib) | ||
| 58 | __field(struct fence *, fence) | ||
| 59 | __field(char *, ring_name) | ||
| 60 | __field(u32, num_ibs) | ||
| 61 | ), | ||
| 62 | |||
| 63 | TP_fast_assign( | ||
| 64 | __entry->adev = job->adev; | ||
| 65 | __entry->sched_job = &job->base; | ||
| 66 | __entry->ib = job->ibs; | ||
| 67 | __entry->fence = &job->base.s_fence->base; | ||
| 68 | __entry->ring_name = job->ibs[0].ring->name; | ||
| 69 | __entry->num_ibs = job->num_ibs; | ||
| 70 | ), | ||
| 71 | TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", | ||
| 72 | __entry->adev, __entry->sched_job, __entry->ib, | ||
| 73 | __entry->fence, __entry->ring_name, __entry->num_ibs) | ||
| 74 | ); | ||
| 75 | |||
| 76 | TRACE_EVENT(amdgpu_sched_run_job, | ||
| 77 | TP_PROTO(struct amdgpu_job *job), | ||
| 78 | TP_ARGS(job), | ||
| 79 | TP_STRUCT__entry( | ||
| 80 | __field(struct amdgpu_device *, adev) | ||
| 81 | __field(struct amd_sched_job *, sched_job) | ||
| 82 | __field(struct amdgpu_ib *, ib) | ||
| 83 | __field(struct fence *, fence) | ||
| 84 | __field(char *, ring_name) | ||
| 85 | __field(u32, num_ibs) | ||
| 86 | ), | ||
| 87 | |||
| 88 | TP_fast_assign( | ||
| 89 | __entry->adev = job->adev; | ||
| 90 | __entry->sched_job = &job->base; | ||
| 91 | __entry->ib = job->ibs; | ||
| 92 | __entry->fence = &job->base.s_fence->base; | ||
| 93 | __entry->ring_name = job->ibs[0].ring->name; | ||
| 94 | __entry->num_ibs = job->num_ibs; | ||
| 95 | ), | ||
| 96 | TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", | ||
| 97 | __entry->adev, __entry->sched_job, __entry->ib, | ||
| 98 | __entry->fence, __entry->ring_name, __entry->num_ibs) | ||
| 99 | ); | ||
| 100 | |||
| 101 | |||
| 51 | TRACE_EVENT(amdgpu_vm_grab_id, | 102 | TRACE_EVENT(amdgpu_vm_grab_id, |
| 52 | TP_PROTO(unsigned vmid, int ring), | 103 | TP_PROTO(unsigned vmid, int ring), |
| 53 | TP_ARGS(vmid, ring), | 104 | TP_ARGS(vmid, ring), |
| @@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set, | |||
| 196 | TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) | 247 | TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) |
| 197 | ); | 248 | ); |
| 198 | 249 | ||
| 199 | DECLARE_EVENT_CLASS(amdgpu_fence_request, | ||
| 200 | |||
| 201 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 202 | |||
| 203 | TP_ARGS(dev, ring, seqno), | ||
| 204 | |||
| 205 | TP_STRUCT__entry( | ||
| 206 | __field(u32, dev) | ||
| 207 | __field(int, ring) | ||
| 208 | __field(u32, seqno) | ||
| 209 | ), | ||
| 210 | |||
| 211 | TP_fast_assign( | ||
| 212 | __entry->dev = dev->primary->index; | ||
| 213 | __entry->ring = ring; | ||
| 214 | __entry->seqno = seqno; | ||
| 215 | ), | ||
| 216 | |||
| 217 | TP_printk("dev=%u, ring=%d, seqno=%u", | ||
| 218 | __entry->dev, __entry->ring, __entry->seqno) | ||
| 219 | ); | ||
| 220 | |||
| 221 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, | ||
| 222 | |||
| 223 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 224 | |||
| 225 | TP_ARGS(dev, ring, seqno) | ||
| 226 | ); | ||
| 227 | |||
| 228 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, | ||
| 229 | |||
| 230 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 231 | |||
| 232 | TP_ARGS(dev, ring, seqno) | ||
| 233 | ); | ||
| 234 | |||
| 235 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, | ||
| 236 | |||
| 237 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 238 | |||
| 239 | TP_ARGS(dev, ring, seqno) | ||
| 240 | ); | ||
| 241 | |||
| 242 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, | 250 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, |
| 243 | 251 | ||
| 244 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), | 252 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 81bb8e9fc26d..d4bac5f49939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |||
| 1073 | ret = drm_mm_dump_table(m, mm); | 1073 | ret = drm_mm_dump_table(m, mm); |
| 1074 | spin_unlock(&glob->lru_lock); | 1074 | spin_unlock(&glob->lru_lock); |
| 1075 | if (ttm_pl == TTM_PL_VRAM) | 1075 | if (ttm_pl == TTM_PL_VRAM) |
| 1076 | seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", | 1076 | seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", |
| 1077 | adev->mman.bdev.man[ttm_pl].size, | 1077 | adev->mman.bdev.man[ttm_pl].size, |
| 1078 | atomic64_read(&adev->vram_usage) >> 20, | 1078 | (u64)atomic64_read(&adev->vram_usage) >> 20, |
| 1079 | atomic64_read(&adev->vram_vis_usage) >> 20); | 1079 | (u64)atomic64_read(&adev->vram_vis_usage) >> 20); |
| 1080 | return ret; | 1080 | return ret; |
| 1081 | } | 1081 | } |
| 1082 | 1082 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 03f0c3bae516..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
| @@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
| 392 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ | 392 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ |
| 393 | ib->ptr[ib->length_dw++] = handle; | 393 | ib->ptr[ib->length_dw++] = handle; |
| 394 | 394 | ||
| 395 | ib->ptr[ib->length_dw++] = 0x00000030; /* len */ | 395 | if ((ring->adev->vce.fw_version >> 24) >= 52) |
| 396 | ib->ptr[ib->length_dw++] = 0x00000040; /* len */ | ||
| 397 | else | ||
| 398 | ib->ptr[ib->length_dw++] = 0x00000030; /* len */ | ||
| 396 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ | 399 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ |
| 397 | ib->ptr[ib->length_dw++] = 0x00000000; | 400 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 398 | ib->ptr[ib->length_dw++] = 0x00000042; | 401 | ib->ptr[ib->length_dw++] = 0x00000042; |
| @@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
| 404 | ib->ptr[ib->length_dw++] = 0x00000100; | 407 | ib->ptr[ib->length_dw++] = 0x00000100; |
| 405 | ib->ptr[ib->length_dw++] = 0x0000000c; | 408 | ib->ptr[ib->length_dw++] = 0x0000000c; |
| 406 | ib->ptr[ib->length_dw++] = 0x00000000; | 409 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 410 | if ((ring->adev->vce.fw_version >> 24) >= 52) { | ||
| 411 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
| 412 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
| 413 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
| 414 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
| 415 | } | ||
| 407 | 416 | ||
| 408 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ | 417 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ |
| 409 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ | 418 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 633a32a48560..ae037e5b6ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 143 | unsigned i; | 143 | unsigned i; |
| 144 | 144 | ||
| 145 | /* check if the id is still valid */ | 145 | /* check if the id is still valid */ |
| 146 | if (vm_id->id && vm_id->last_id_use && | 146 | if (vm_id->id) { |
| 147 | vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { | 147 | unsigned id = vm_id->id; |
| 148 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | 148 | long owner; |
| 149 | return 0; | 149 | |
| 150 | owner = atomic_long_read(&adev->vm_manager.ids[id].owner); | ||
| 151 | if (owner == (long)vm) { | ||
| 152 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | ||
| 153 | return 0; | ||
| 154 | } | ||
| 150 | } | 155 | } |
| 151 | 156 | ||
| 152 | /* we definately need to flush */ | 157 | /* we definately need to flush */ |
| @@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 154 | 159 | ||
| 155 | /* skip over VMID 0, since it is the system VM */ | 160 | /* skip over VMID 0, since it is the system VM */ |
| 156 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | 161 | for (i = 1; i < adev->vm_manager.nvm; ++i) { |
| 157 | struct fence *fence = adev->vm_manager.active[i]; | 162 | struct fence *fence = adev->vm_manager.ids[i].active; |
| 158 | struct amdgpu_ring *fring; | 163 | struct amdgpu_ring *fring; |
| 159 | 164 | ||
| 160 | if (fence == NULL) { | 165 | if (fence == NULL) { |
| @@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 176 | if (choices[i]) { | 181 | if (choices[i]) { |
| 177 | struct fence *fence; | 182 | struct fence *fence; |
| 178 | 183 | ||
| 179 | fence = adev->vm_manager.active[choices[i]]; | 184 | fence = adev->vm_manager.ids[choices[i]].active; |
| 180 | vm_id->id = choices[i]; | 185 | vm_id->id = choices[i]; |
| 181 | 186 | ||
| 182 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); | 187 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); |
| @@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 207 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 212 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
| 208 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 213 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; |
| 209 | struct fence *flushed_updates = vm_id->flushed_updates; | 214 | struct fence *flushed_updates = vm_id->flushed_updates; |
| 210 | bool is_earlier = false; | 215 | bool is_later; |
| 211 | |||
| 212 | if (flushed_updates && updates) { | ||
| 213 | BUG_ON(flushed_updates->context != updates->context); | ||
| 214 | is_earlier = (updates->seqno - flushed_updates->seqno <= | ||
| 215 | INT_MAX) ? true : false; | ||
| 216 | } | ||
| 217 | 216 | ||
| 218 | if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || | 217 | if (!flushed_updates) |
| 219 | is_earlier) { | 218 | is_later = true; |
| 219 | else if (!updates) | ||
| 220 | is_later = false; | ||
| 221 | else | ||
| 222 | is_later = fence_is_later(updates, flushed_updates); | ||
| 220 | 223 | ||
| 224 | if (pd_addr != vm_id->pd_gpu_addr || is_later) { | ||
| 221 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | 225 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); |
| 222 | if (is_earlier) { | 226 | if (is_later) { |
| 223 | vm_id->flushed_updates = fence_get(updates); | 227 | vm_id->flushed_updates = fence_get(updates); |
| 224 | fence_put(flushed_updates); | 228 | fence_put(flushed_updates); |
| 225 | } | 229 | } |
| 226 | if (!flushed_updates) | ||
| 227 | vm_id->flushed_updates = fence_get(updates); | ||
| 228 | vm_id->pd_gpu_addr = pd_addr; | 230 | vm_id->pd_gpu_addr = pd_addr; |
| 229 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | 231 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); |
| 230 | } | 232 | } |
| @@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 244 | */ | 246 | */ |
| 245 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 247 | void amdgpu_vm_fence(struct amdgpu_device *adev, |
| 246 | struct amdgpu_vm *vm, | 248 | struct amdgpu_vm *vm, |
| 247 | struct amdgpu_fence *fence) | 249 | struct fence *fence) |
| 248 | { | 250 | { |
| 249 | unsigned ridx = fence->ring->idx; | 251 | struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); |
| 250 | unsigned vm_id = vm->ids[ridx].id; | 252 | unsigned vm_id = vm->ids[ring->idx].id; |
| 251 | |||
| 252 | fence_put(adev->vm_manager.active[vm_id]); | ||
| 253 | adev->vm_manager.active[vm_id] = fence_get(&fence->base); | ||
| 254 | 253 | ||
| 255 | fence_put(vm->ids[ridx].last_id_use); | 254 | fence_put(adev->vm_manager.ids[vm_id].active); |
| 256 | vm->ids[ridx].last_id_use = fence_get(&fence->base); | 255 | adev->vm_manager.ids[vm_id].active = fence_get(fence); |
| 256 | atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); | ||
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | /** | 259 | /** |
| @@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) | |||
| 332 | * | 332 | * |
| 333 | * @adev: amdgpu_device pointer | 333 | * @adev: amdgpu_device pointer |
| 334 | * @bo: bo to clear | 334 | * @bo: bo to clear |
| 335 | * | ||
| 336 | * need to reserve bo first before calling it. | ||
| 335 | */ | 337 | */ |
| 336 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | 338 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, |
| 337 | struct amdgpu_bo *bo) | 339 | struct amdgpu_bo *bo) |
| @@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 343 | uint64_t addr; | 345 | uint64_t addr; |
| 344 | int r; | 346 | int r; |
| 345 | 347 | ||
| 346 | r = amdgpu_bo_reserve(bo, false); | ||
| 347 | if (r) | ||
| 348 | return r; | ||
| 349 | |||
| 350 | r = reservation_object_reserve_shared(bo->tbo.resv); | 348 | r = reservation_object_reserve_shared(bo->tbo.resv); |
| 351 | if (r) | 349 | if (r) |
| 352 | return r; | 350 | return r; |
| 353 | 351 | ||
| 354 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 352 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 355 | if (r) | 353 | if (r) |
| 356 | goto error_unreserve; | 354 | goto error; |
| 357 | 355 | ||
| 358 | addr = amdgpu_bo_gpu_offset(bo); | 356 | addr = amdgpu_bo_gpu_offset(bo); |
| 359 | entries = amdgpu_bo_size(bo) / 8; | 357 | entries = amdgpu_bo_size(bo) / 8; |
| 360 | 358 | ||
| 361 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 359 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
| 362 | if (!ib) | 360 | if (!ib) |
| 363 | goto error_unreserve; | 361 | goto error; |
| 364 | 362 | ||
| 365 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); | 363 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); |
| 366 | if (r) | 364 | if (r) |
| @@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 378 | if (!r) | 376 | if (!r) |
| 379 | amdgpu_bo_fence(bo, fence, true); | 377 | amdgpu_bo_fence(bo, fence, true); |
| 380 | fence_put(fence); | 378 | fence_put(fence); |
| 381 | if (amdgpu_enable_scheduler) { | 379 | if (amdgpu_enable_scheduler) |
| 382 | amdgpu_bo_unreserve(bo); | ||
| 383 | return 0; | 380 | return 0; |
| 384 | } | 381 | |
| 385 | error_free: | 382 | error_free: |
| 386 | amdgpu_ib_free(adev, ib); | 383 | amdgpu_ib_free(adev, ib); |
| 387 | kfree(ib); | 384 | kfree(ib); |
| 388 | 385 | ||
| 389 | error_unreserve: | 386 | error: |
| 390 | amdgpu_bo_unreserve(bo); | ||
| 391 | return r; | 387 | return r; |
| 392 | } | 388 | } |
| 393 | 389 | ||
| @@ -926,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |||
| 926 | bo_va = list_first_entry(&vm->invalidated, | 922 | bo_va = list_first_entry(&vm->invalidated, |
| 927 | struct amdgpu_bo_va, vm_status); | 923 | struct amdgpu_bo_va, vm_status); |
| 928 | spin_unlock(&vm->status_lock); | 924 | spin_unlock(&vm->status_lock); |
| 929 | 925 | mutex_lock(&bo_va->mutex); | |
| 930 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); | 926 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
| 927 | mutex_unlock(&bo_va->mutex); | ||
| 931 | if (r) | 928 | if (r) |
| 932 | return r; | 929 | return r; |
| 933 | 930 | ||
| @@ -971,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
| 971 | INIT_LIST_HEAD(&bo_va->valids); | 968 | INIT_LIST_HEAD(&bo_va->valids); |
| 972 | INIT_LIST_HEAD(&bo_va->invalids); | 969 | INIT_LIST_HEAD(&bo_va->invalids); |
| 973 | INIT_LIST_HEAD(&bo_va->vm_status); | 970 | INIT_LIST_HEAD(&bo_va->vm_status); |
| 974 | 971 | mutex_init(&bo_va->mutex); | |
| 975 | list_add_tail(&bo_va->bo_list, &bo->va); | 972 | list_add_tail(&bo_va->bo_list, &bo->va); |
| 976 | 973 | ||
| 977 | return bo_va; | 974 | return bo_va; |
| @@ -989,7 +986,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
| 989 | * Add a mapping of the BO at the specefied addr into the VM. | 986 | * Add a mapping of the BO at the specefied addr into the VM. |
| 990 | * Returns 0 for success, error for failure. | 987 | * Returns 0 for success, error for failure. |
| 991 | * | 988 | * |
| 992 | * Object has to be reserved and gets unreserved by this function! | 989 | * Object has to be reserved and unreserved outside! |
| 993 | */ | 990 | */ |
| 994 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | 991 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
| 995 | struct amdgpu_bo_va *bo_va, | 992 | struct amdgpu_bo_va *bo_va, |
| @@ -1005,30 +1002,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1005 | 1002 | ||
| 1006 | /* validate the parameters */ | 1003 | /* validate the parameters */ |
| 1007 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | 1004 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || |
| 1008 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) { | 1005 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
| 1009 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1010 | return -EINVAL; | 1006 | return -EINVAL; |
| 1011 | } | ||
| 1012 | 1007 | ||
| 1013 | /* make sure object fit at this offset */ | 1008 | /* make sure object fit at this offset */ |
| 1014 | eaddr = saddr + size; | 1009 | eaddr = saddr + size; |
| 1015 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { | 1010 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
| 1016 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1017 | return -EINVAL; | 1011 | return -EINVAL; |
| 1018 | } | ||
| 1019 | 1012 | ||
| 1020 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | 1013 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; |
| 1021 | if (last_pfn > adev->vm_manager.max_pfn) { | 1014 | if (last_pfn > adev->vm_manager.max_pfn) { |
| 1022 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | 1015 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", |
| 1023 | last_pfn, adev->vm_manager.max_pfn); | 1016 | last_pfn, adev->vm_manager.max_pfn); |
| 1024 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1025 | return -EINVAL; | 1017 | return -EINVAL; |
| 1026 | } | 1018 | } |
| 1027 | 1019 | ||
| 1028 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1020 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
| 1029 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | 1021 | eaddr /= AMDGPU_GPU_PAGE_SIZE; |
| 1030 | 1022 | ||
| 1023 | spin_lock(&vm->it_lock); | ||
| 1031 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); | 1024 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); |
| 1025 | spin_unlock(&vm->it_lock); | ||
| 1032 | if (it) { | 1026 | if (it) { |
| 1033 | struct amdgpu_bo_va_mapping *tmp; | 1027 | struct amdgpu_bo_va_mapping *tmp; |
| 1034 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | 1028 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); |
| @@ -1036,14 +1030,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1036 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | 1030 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " |
| 1037 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | 1031 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, |
| 1038 | tmp->it.start, tmp->it.last + 1); | 1032 | tmp->it.start, tmp->it.last + 1); |
| 1039 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1040 | r = -EINVAL; | 1033 | r = -EINVAL; |
| 1041 | goto error; | 1034 | goto error; |
| 1042 | } | 1035 | } |
| 1043 | 1036 | ||
| 1044 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | 1037 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); |
| 1045 | if (!mapping) { | 1038 | if (!mapping) { |
| 1046 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1047 | r = -ENOMEM; | 1039 | r = -ENOMEM; |
| 1048 | goto error; | 1040 | goto error; |
| 1049 | } | 1041 | } |
| @@ -1054,8 +1046,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1054 | mapping->offset = offset; | 1046 | mapping->offset = offset; |
| 1055 | mapping->flags = flags; | 1047 | mapping->flags = flags; |
| 1056 | 1048 | ||
| 1049 | mutex_lock(&bo_va->mutex); | ||
| 1057 | list_add(&mapping->list, &bo_va->invalids); | 1050 | list_add(&mapping->list, &bo_va->invalids); |
| 1051 | mutex_unlock(&bo_va->mutex); | ||
| 1052 | spin_lock(&vm->it_lock); | ||
| 1058 | interval_tree_insert(&mapping->it, &vm->va); | 1053 | interval_tree_insert(&mapping->it, &vm->va); |
| 1054 | spin_unlock(&vm->it_lock); | ||
| 1059 | trace_amdgpu_vm_bo_map(bo_va, mapping); | 1055 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
| 1060 | 1056 | ||
| 1061 | /* Make sure the page tables are allocated */ | 1057 | /* Make sure the page tables are allocated */ |
| @@ -1067,8 +1063,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1067 | if (eaddr > vm->max_pde_used) | 1063 | if (eaddr > vm->max_pde_used) |
| 1068 | vm->max_pde_used = eaddr; | 1064 | vm->max_pde_used = eaddr; |
| 1069 | 1065 | ||
| 1070 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1071 | |||
| 1072 | /* walk over the address space and allocate the page tables */ | 1066 | /* walk over the address space and allocate the page tables */ |
| 1073 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1067 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
| 1074 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1068 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
| @@ -1077,13 +1071,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1077 | if (vm->page_tables[pt_idx].bo) | 1071 | if (vm->page_tables[pt_idx].bo) |
| 1078 | continue; | 1072 | continue; |
| 1079 | 1073 | ||
| 1080 | ww_mutex_lock(&resv->lock, NULL); | ||
| 1081 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1074 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
| 1082 | AMDGPU_GPU_PAGE_SIZE, true, | 1075 | AMDGPU_GPU_PAGE_SIZE, true, |
| 1083 | AMDGPU_GEM_DOMAIN_VRAM, | 1076 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1084 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | 1077 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, |
| 1085 | NULL, resv, &pt); | 1078 | NULL, resv, &pt); |
| 1086 | ww_mutex_unlock(&resv->lock); | ||
| 1087 | if (r) | 1079 | if (r) |
| 1088 | goto error_free; | 1080 | goto error_free; |
| 1089 | 1081 | ||
| @@ -1101,7 +1093,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1101 | 1093 | ||
| 1102 | error_free: | 1094 | error_free: |
| 1103 | list_del(&mapping->list); | 1095 | list_del(&mapping->list); |
| 1096 | spin_lock(&vm->it_lock); | ||
| 1104 | interval_tree_remove(&mapping->it, &vm->va); | 1097 | interval_tree_remove(&mapping->it, &vm->va); |
| 1098 | spin_unlock(&vm->it_lock); | ||
| 1105 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1099 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1106 | kfree(mapping); | 1100 | kfree(mapping); |
| 1107 | 1101 | ||
| @@ -1119,7 +1113,7 @@ error: | |||
| 1119 | * Remove a mapping of the BO at the specefied addr from the VM. | 1113 | * Remove a mapping of the BO at the specefied addr from the VM. |
| 1120 | * Returns 0 for success, error for failure. | 1114 | * Returns 0 for success, error for failure. |
| 1121 | * | 1115 | * |
| 1122 | * Object has to be reserved and gets unreserved by this function! | 1116 | * Object has to be reserved and unreserved outside! |
| 1123 | */ | 1117 | */ |
| 1124 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | 1118 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
| 1125 | struct amdgpu_bo_va *bo_va, | 1119 | struct amdgpu_bo_va *bo_va, |
| @@ -1130,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
| 1130 | bool valid = true; | 1124 | bool valid = true; |
| 1131 | 1125 | ||
| 1132 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1126 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
| 1133 | 1127 | mutex_lock(&bo_va->mutex); | |
| 1134 | list_for_each_entry(mapping, &bo_va->valids, list) { | 1128 | list_for_each_entry(mapping, &bo_va->valids, list) { |
| 1135 | if (mapping->it.start == saddr) | 1129 | if (mapping->it.start == saddr) |
| 1136 | break; | 1130 | break; |
| @@ -1145,20 +1139,21 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
| 1145 | } | 1139 | } |
| 1146 | 1140 | ||
| 1147 | if (&mapping->list == &bo_va->invalids) { | 1141 | if (&mapping->list == &bo_va->invalids) { |
| 1148 | amdgpu_bo_unreserve(bo_va->bo); | 1142 | mutex_unlock(&bo_va->mutex); |
| 1149 | return -ENOENT; | 1143 | return -ENOENT; |
| 1150 | } | 1144 | } |
| 1151 | } | 1145 | } |
| 1152 | 1146 | mutex_unlock(&bo_va->mutex); | |
| 1153 | list_del(&mapping->list); | 1147 | list_del(&mapping->list); |
| 1148 | spin_lock(&vm->it_lock); | ||
| 1154 | interval_tree_remove(&mapping->it, &vm->va); | 1149 | interval_tree_remove(&mapping->it, &vm->va); |
| 1150 | spin_unlock(&vm->it_lock); | ||
| 1155 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1151 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1156 | 1152 | ||
| 1157 | if (valid) | 1153 | if (valid) |
| 1158 | list_add(&mapping->list, &vm->freed); | 1154 | list_add(&mapping->list, &vm->freed); |
| 1159 | else | 1155 | else |
| 1160 | kfree(mapping); | 1156 | kfree(mapping); |
| 1161 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1162 | 1157 | ||
| 1163 | return 0; | 1158 | return 0; |
| 1164 | } | 1159 | } |
| @@ -1187,17 +1182,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
| 1187 | 1182 | ||
| 1188 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { | 1183 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
| 1189 | list_del(&mapping->list); | 1184 | list_del(&mapping->list); |
| 1185 | spin_lock(&vm->it_lock); | ||
| 1190 | interval_tree_remove(&mapping->it, &vm->va); | 1186 | interval_tree_remove(&mapping->it, &vm->va); |
| 1187 | spin_unlock(&vm->it_lock); | ||
| 1191 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1188 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1192 | list_add(&mapping->list, &vm->freed); | 1189 | list_add(&mapping->list, &vm->freed); |
| 1193 | } | 1190 | } |
| 1194 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | 1191 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { |
| 1195 | list_del(&mapping->list); | 1192 | list_del(&mapping->list); |
| 1193 | spin_lock(&vm->it_lock); | ||
| 1196 | interval_tree_remove(&mapping->it, &vm->va); | 1194 | interval_tree_remove(&mapping->it, &vm->va); |
| 1195 | spin_unlock(&vm->it_lock); | ||
| 1197 | kfree(mapping); | 1196 | kfree(mapping); |
| 1198 | } | 1197 | } |
| 1199 | |||
| 1200 | fence_put(bo_va->last_pt_update); | 1198 | fence_put(bo_va->last_pt_update); |
| 1199 | mutex_destroy(&bo_va->mutex); | ||
| 1201 | kfree(bo_va); | 1200 | kfree(bo_va); |
| 1202 | } | 1201 | } |
| 1203 | 1202 | ||
| @@ -1241,15 +1240,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1241 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1240 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 1242 | vm->ids[i].id = 0; | 1241 | vm->ids[i].id = 0; |
| 1243 | vm->ids[i].flushed_updates = NULL; | 1242 | vm->ids[i].flushed_updates = NULL; |
| 1244 | vm->ids[i].last_id_use = NULL; | ||
| 1245 | } | 1243 | } |
| 1246 | mutex_init(&vm->mutex); | ||
| 1247 | vm->va = RB_ROOT; | 1244 | vm->va = RB_ROOT; |
| 1248 | spin_lock_init(&vm->status_lock); | 1245 | spin_lock_init(&vm->status_lock); |
| 1249 | INIT_LIST_HEAD(&vm->invalidated); | 1246 | INIT_LIST_HEAD(&vm->invalidated); |
| 1250 | INIT_LIST_HEAD(&vm->cleared); | 1247 | INIT_LIST_HEAD(&vm->cleared); |
| 1251 | INIT_LIST_HEAD(&vm->freed); | 1248 | INIT_LIST_HEAD(&vm->freed); |
| 1252 | 1249 | spin_lock_init(&vm->it_lock); | |
| 1253 | pd_size = amdgpu_vm_directory_size(adev); | 1250 | pd_size = amdgpu_vm_directory_size(adev); |
| 1254 | pd_entries = amdgpu_vm_num_pdes(adev); | 1251 | pd_entries = amdgpu_vm_num_pdes(adev); |
| 1255 | 1252 | ||
| @@ -1269,8 +1266,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1269 | NULL, NULL, &vm->page_directory); | 1266 | NULL, NULL, &vm->page_directory); |
| 1270 | if (r) | 1267 | if (r) |
| 1271 | return r; | 1268 | return r; |
| 1272 | 1269 | r = amdgpu_bo_reserve(vm->page_directory, false); | |
| 1270 | if (r) { | ||
| 1271 | amdgpu_bo_unref(&vm->page_directory); | ||
| 1272 | vm->page_directory = NULL; | ||
| 1273 | return r; | ||
| 1274 | } | ||
| 1273 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); | 1275 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); |
| 1276 | amdgpu_bo_unreserve(vm->page_directory); | ||
| 1274 | if (r) { | 1277 | if (r) { |
| 1275 | amdgpu_bo_unref(&vm->page_directory); | 1278 | amdgpu_bo_unref(&vm->page_directory); |
| 1276 | vm->page_directory = NULL; | 1279 | vm->page_directory = NULL; |
| @@ -1313,11 +1316,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1313 | 1316 | ||
| 1314 | amdgpu_bo_unref(&vm->page_directory); | 1317 | amdgpu_bo_unref(&vm->page_directory); |
| 1315 | fence_put(vm->page_directory_fence); | 1318 | fence_put(vm->page_directory_fence); |
| 1316 | |||
| 1317 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1319 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 1320 | unsigned id = vm->ids[i].id; | ||
| 1321 | |||
| 1322 | atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, | ||
| 1323 | (long)vm, 0); | ||
| 1318 | fence_put(vm->ids[i].flushed_updates); | 1324 | fence_put(vm->ids[i].flushed_updates); |
| 1319 | fence_put(vm->ids[i].last_id_use); | ||
| 1320 | } | 1325 | } |
| 1321 | 1326 | ||
| 1322 | mutex_destroy(&vm->mutex); | 1327 | } |
| 1328 | |||
| 1329 | /** | ||
| 1330 | * amdgpu_vm_manager_fini - cleanup VM manager | ||
| 1331 | * | ||
| 1332 | * @adev: amdgpu_device pointer | ||
| 1333 | * | ||
| 1334 | * Cleanup the VM manager and free resources. | ||
| 1335 | */ | ||
| 1336 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | ||
| 1337 | { | ||
| 1338 | unsigned i; | ||
| 1339 | |||
| 1340 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
| 1341 | fence_put(adev->vm_manager.ids[i].active); | ||
| 1323 | } | 1342 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a1a35a5df8e7..57a2e347f04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, | |||
| 6569 | switch (state) { | 6569 | switch (state) { |
| 6570 | case AMDGPU_IRQ_STATE_DISABLE: | 6570 | case AMDGPU_IRQ_STATE_DISABLE: |
| 6571 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6571 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6572 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; | 6572 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
| 6573 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6573 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6574 | break; | 6574 | break; |
| 6575 | case AMDGPU_IRQ_STATE_ENABLE: | 6575 | case AMDGPU_IRQ_STATE_ENABLE: |
| 6576 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6576 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6577 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; | 6577 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
| 6578 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6578 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6579 | break; | 6579 | break; |
| 6580 | default: | 6580 | default: |
| @@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, | |||
| 6586 | switch (state) { | 6586 | switch (state) { |
| 6587 | case AMDGPU_IRQ_STATE_DISABLE: | 6587 | case AMDGPU_IRQ_STATE_DISABLE: |
| 6588 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6588 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6589 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; | 6589 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
| 6590 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6590 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6591 | break; | 6591 | break; |
| 6592 | case AMDGPU_IRQ_STATE_ENABLE: | 6592 | case AMDGPU_IRQ_STATE_ENABLE: |
| 6593 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6593 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6594 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; | 6594 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
| 6595 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6595 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6596 | break; | 6596 | break; |
| 6597 | default: | 6597 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6776cf756d40..e1dcab98e249 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
| 268 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, | 268 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, |
| 269 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, | 269 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, |
| 270 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, | 270 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, |
| 271 | mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, | ||
| 272 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, | 271 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, |
| 273 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, | 272 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, |
| 274 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, | 273 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, |
| @@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
| 296 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, | 295 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, |
| 297 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, | 296 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, |
| 298 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, | 297 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, |
| 299 | mmPCIE_INDEX, 0xffffffff, 0x0140001c, | ||
| 300 | mmPCIE_DATA, 0x000f0000, 0x00000000, | ||
| 301 | mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, | ||
| 302 | mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, | ||
| 303 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, | 298 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, |
| 304 | }; | 299 | }; |
| 305 | 300 | ||
| @@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) | |||
| 1000 | adev->gfx.config.max_cu_per_sh = 16; | 995 | adev->gfx.config.max_cu_per_sh = 16; |
| 1001 | adev->gfx.config.max_sh_per_se = 1; | 996 | adev->gfx.config.max_sh_per_se = 1; |
| 1002 | adev->gfx.config.max_backends_per_se = 4; | 997 | adev->gfx.config.max_backends_per_se = 4; |
| 1003 | adev->gfx.config.max_texture_channel_caches = 8; | 998 | adev->gfx.config.max_texture_channel_caches = 16; |
| 1004 | adev->gfx.config.max_gprs = 256; | 999 | adev->gfx.config.max_gprs = 256; |
| 1005 | adev->gfx.config.max_gs_threads = 32; | 1000 | adev->gfx.config.max_gs_threads = 32; |
| 1006 | adev->gfx.config.max_hw_contexts = 8; | 1001 | adev->gfx.config.max_hw_contexts = 8; |
| @@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) | |||
| 1613 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | 1608 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); |
| 1614 | } | 1609 | } |
| 1615 | case CHIP_FIJI: | 1610 | case CHIP_FIJI: |
| 1611 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | ||
| 1612 | switch (reg_offset) { | ||
| 1613 | case 0: | ||
| 1614 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1615 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1616 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
| 1617 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1618 | break; | ||
| 1619 | case 1: | ||
| 1620 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1621 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1622 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
| 1623 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1624 | break; | ||
| 1625 | case 2: | ||
| 1626 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1627 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1628 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
| 1629 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1630 | break; | ||
| 1631 | case 3: | ||
| 1632 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1633 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1634 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
| 1635 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1636 | break; | ||
| 1637 | case 4: | ||
| 1638 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1639 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1640 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1641 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1642 | break; | ||
| 1643 | case 5: | ||
| 1644 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1645 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1646 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1647 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1648 | break; | ||
| 1649 | case 6: | ||
| 1650 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1651 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1652 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1653 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1654 | break; | ||
| 1655 | case 7: | ||
| 1656 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1657 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1658 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1659 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1660 | break; | ||
| 1661 | case 8: | ||
| 1662 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
| 1663 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); | ||
| 1664 | break; | ||
| 1665 | case 9: | ||
| 1666 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1667 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1668 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1669 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1670 | break; | ||
| 1671 | case 10: | ||
| 1672 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1673 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1674 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1675 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1676 | break; | ||
| 1677 | case 11: | ||
| 1678 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1679 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1680 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1681 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1682 | break; | ||
| 1683 | case 12: | ||
| 1684 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1685 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1686 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1687 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1688 | break; | ||
| 1689 | case 13: | ||
| 1690 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1691 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1692 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1693 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1694 | break; | ||
| 1695 | case 14: | ||
| 1696 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1697 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1698 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1699 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1700 | break; | ||
| 1701 | case 15: | ||
| 1702 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | | ||
| 1703 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1704 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1705 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1706 | break; | ||
| 1707 | case 16: | ||
| 1708 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1709 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1710 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1711 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1712 | break; | ||
| 1713 | case 17: | ||
| 1714 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1715 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1716 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1717 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1718 | break; | ||
| 1719 | case 18: | ||
| 1720 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
| 1721 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1722 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1723 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1724 | break; | ||
| 1725 | case 19: | ||
| 1726 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
| 1727 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1728 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1729 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1730 | break; | ||
| 1731 | case 20: | ||
| 1732 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
| 1733 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1734 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1735 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1736 | break; | ||
| 1737 | case 21: | ||
| 1738 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | | ||
| 1739 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1740 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1741 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1742 | break; | ||
| 1743 | case 22: | ||
| 1744 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
| 1745 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1746 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1747 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1748 | break; | ||
| 1749 | case 23: | ||
| 1750 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
| 1751 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1752 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1753 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1754 | break; | ||
| 1755 | case 24: | ||
| 1756 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
| 1757 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1758 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1759 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1760 | break; | ||
| 1761 | case 25: | ||
| 1762 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | | ||
| 1763 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1764 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1765 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1766 | break; | ||
| 1767 | case 26: | ||
| 1768 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | | ||
| 1769 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1770 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1771 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1772 | break; | ||
| 1773 | case 27: | ||
| 1774 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1775 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1776 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1777 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1778 | break; | ||
| 1779 | case 28: | ||
| 1780 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1781 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1782 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1783 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1784 | break; | ||
| 1785 | case 29: | ||
| 1786 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1787 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1788 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1789 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1790 | break; | ||
| 1791 | case 30: | ||
| 1792 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1793 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1794 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1795 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1796 | break; | ||
| 1797 | default: | ||
| 1798 | gb_tile_moden = 0; | ||
| 1799 | break; | ||
| 1800 | } | ||
| 1801 | adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; | ||
| 1802 | WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); | ||
| 1803 | } | ||
| 1804 | for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { | ||
| 1805 | switch (reg_offset) { | ||
| 1806 | case 0: | ||
| 1807 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1808 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1809 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1810 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1811 | break; | ||
| 1812 | case 1: | ||
| 1813 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1814 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1815 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1816 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1817 | break; | ||
| 1818 | case 2: | ||
| 1819 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1820 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1821 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1822 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1823 | break; | ||
| 1824 | case 3: | ||
| 1825 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1826 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1827 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1828 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1829 | break; | ||
| 1830 | case 4: | ||
| 1831 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1832 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1833 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1834 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1835 | break; | ||
| 1836 | case 5: | ||
| 1837 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1838 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1839 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1840 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1841 | break; | ||
| 1842 | case 6: | ||
| 1843 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1844 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1845 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1846 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1847 | break; | ||
| 1848 | case 8: | ||
| 1849 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1850 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | | ||
| 1851 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1852 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1853 | break; | ||
| 1854 | case 9: | ||
| 1855 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1856 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1857 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1858 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1859 | break; | ||
| 1860 | case 10: | ||
| 1861 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1862 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1863 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1864 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1865 | break; | ||
| 1866 | case 11: | ||
| 1867 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1868 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1869 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1870 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1871 | break; | ||
| 1872 | case 12: | ||
| 1873 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1874 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1875 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1876 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1877 | break; | ||
| 1878 | case 13: | ||
| 1879 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1880 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1881 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1882 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1883 | break; | ||
| 1884 | case 14: | ||
| 1885 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1886 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1887 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1888 | NUM_BANKS(ADDR_SURF_4_BANK)); | ||
| 1889 | break; | ||
| 1890 | case 7: | ||
| 1891 | /* unused idx */ | ||
| 1892 | continue; | ||
| 1893 | default: | ||
| 1894 | gb_tile_moden = 0; | ||
| 1895 | break; | ||
| 1896 | } | ||
| 1897 | adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; | ||
| 1898 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | ||
| 1899 | } | ||
| 1900 | break; | ||
| 1616 | case CHIP_TONGA: | 1901 | case CHIP_TONGA: |
| 1617 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | 1902 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
| 1618 | switch (reg_offset) { | 1903 | switch (reg_offset) { |
| @@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 2971 | amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); | 3256 | amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); |
| 2972 | switch (adev->asic_type) { | 3257 | switch (adev->asic_type) { |
| 2973 | case CHIP_TONGA: | 3258 | case CHIP_TONGA: |
| 2974 | case CHIP_FIJI: | ||
| 2975 | amdgpu_ring_write(ring, 0x16000012); | 3259 | amdgpu_ring_write(ring, 0x16000012); |
| 2976 | amdgpu_ring_write(ring, 0x0000002A); | 3260 | amdgpu_ring_write(ring, 0x0000002A); |
| 2977 | break; | 3261 | break; |
| 3262 | case CHIP_FIJI: | ||
| 3263 | amdgpu_ring_write(ring, 0x3a00161a); | ||
| 3264 | amdgpu_ring_write(ring, 0x0000002e); | ||
| 3265 | break; | ||
| 2978 | case CHIP_TOPAZ: | 3266 | case CHIP_TOPAZ: |
| 2979 | case CHIP_CARRIZO: | 3267 | case CHIP_CARRIZO: |
| 2980 | amdgpu_ring_write(ring, 0x00000002); | 3268 | amdgpu_ring_write(ring, 0x00000002); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 85bbcdc73fff..7427d8cd4c43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); | 40 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); |
| 41 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); | 41 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); |
| 42 | 42 | ||
| 43 | MODULE_FIRMWARE("radeon/boniare_mc.bin"); | 43 | MODULE_FIRMWARE("radeon/bonaire_mc.bin"); |
| 44 | MODULE_FIRMWARE("radeon/hawaii_mc.bin"); | 44 | MODULE_FIRMWARE("radeon/hawaii_mc.bin"); |
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| @@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | |||
| 501 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | 501 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); |
| 502 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | 502 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); |
| 503 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | 503 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
| 504 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); | ||
| 504 | WREG32(mmVM_L2_CNTL, tmp); | 505 | WREG32(mmVM_L2_CNTL, tmp); |
| 505 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | 506 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
| 506 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | 507 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); |
| @@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle) | |||
| 960 | 961 | ||
| 961 | static int gmc_v7_0_sw_fini(void *handle) | 962 | static int gmc_v7_0_sw_fini(void *handle) |
| 962 | { | 963 | { |
| 963 | int i; | ||
| 964 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 964 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 965 | 965 | ||
| 966 | if (adev->vm_manager.enabled) { | 966 | if (adev->vm_manager.enabled) { |
| 967 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 967 | amdgpu_vm_manager_fini(adev); |
| 968 | fence_put(adev->vm_manager.active[i]); | ||
| 969 | gmc_v7_0_vm_fini(adev); | 968 | gmc_v7_0_vm_fini(adev); |
| 970 | adev->vm_manager.enabled = false; | 969 | adev->vm_manager.enabled = false; |
| 971 | } | 970 | } |
| @@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle) | |||
| 1010 | 1009 | ||
| 1011 | static int gmc_v7_0_suspend(void *handle) | 1010 | static int gmc_v7_0_suspend(void *handle) |
| 1012 | { | 1011 | { |
| 1013 | int i; | ||
| 1014 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1012 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1015 | 1013 | ||
| 1016 | if (adev->vm_manager.enabled) { | 1014 | if (adev->vm_manager.enabled) { |
| 1017 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 1015 | amdgpu_vm_manager_fini(adev); |
| 1018 | fence_put(adev->vm_manager.active[i]); | ||
| 1019 | gmc_v7_0_vm_fini(adev); | 1016 | gmc_v7_0_vm_fini(adev); |
| 1020 | adev->vm_manager.enabled = false; | 1017 | adev->vm_manager.enabled = false; |
| 1021 | } | 1018 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1bcc4e74e3b4..cb0e50ebb528 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |||
| 629 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | 629 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); |
| 630 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | 630 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); |
| 631 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | 631 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
| 632 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); | ||
| 632 | WREG32(mmVM_L2_CNTL, tmp); | 633 | WREG32(mmVM_L2_CNTL, tmp); |
| 633 | tmp = RREG32(mmVM_L2_CNTL2); | 634 | tmp = RREG32(mmVM_L2_CNTL2); |
| 634 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | 635 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
| @@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle) | |||
| 979 | 980 | ||
| 980 | static int gmc_v8_0_sw_fini(void *handle) | 981 | static int gmc_v8_0_sw_fini(void *handle) |
| 981 | { | 982 | { |
| 982 | int i; | ||
| 983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 984 | 984 | ||
| 985 | if (adev->vm_manager.enabled) { | 985 | if (adev->vm_manager.enabled) { |
| 986 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 986 | amdgpu_vm_manager_fini(adev); |
| 987 | fence_put(adev->vm_manager.active[i]); | ||
| 988 | gmc_v8_0_vm_fini(adev); | 987 | gmc_v8_0_vm_fini(adev); |
| 989 | adev->vm_manager.enabled = false; | 988 | adev->vm_manager.enabled = false; |
| 990 | } | 989 | } |
| @@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle) | |||
| 1031 | 1030 | ||
| 1032 | static int gmc_v8_0_suspend(void *handle) | 1031 | static int gmc_v8_0_suspend(void *handle) |
| 1033 | { | 1032 | { |
| 1034 | int i; | ||
| 1035 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1033 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1036 | 1034 | ||
| 1037 | if (adev->vm_manager.enabled) { | 1035 | if (adev->vm_manager.enabled) { |
| 1038 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 1036 | amdgpu_vm_manager_fini(adev); |
| 1039 | fence_put(adev->vm_manager.active[i]); | ||
| 1040 | gmc_v8_0_vm_fini(adev); | 1037 | gmc_v8_0_vm_fini(adev); |
| 1041 | adev->vm_manager.enabled = false; | 1038 | adev->vm_manager.enabled = false; |
| 1042 | } | 1039 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6a52db6ad8d7..370c6c9d81c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -40,6 +40,9 @@ | |||
| 40 | 40 | ||
| 41 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | 41 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 |
| 42 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | 42 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 |
| 43 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 | ||
| 44 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | ||
| 45 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | ||
| 43 | 46 | ||
| 44 | #define VCE_V3_0_FW_SIZE (384 * 1024) | 47 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
| 45 | #define VCE_V3_0_STACK_SIZE (64 * 1024) | 48 | #define VCE_V3_0_STACK_SIZE (64 * 1024) |
| @@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 130 | 133 | ||
| 131 | /* set BUSY flag */ | 134 | /* set BUSY flag */ |
| 132 | WREG32_P(mmVCE_STATUS, 1, ~1); | 135 | WREG32_P(mmVCE_STATUS, 1, ~1); |
| 133 | 136 | if (adev->asic_type >= CHIP_STONEY) | |
| 134 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, | 137 | WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); |
| 135 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | 138 | else |
| 139 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, | ||
| 140 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
| 136 | 141 | ||
| 137 | WREG32_P(mmVCE_SOFT_RESET, | 142 | WREG32_P(mmVCE_SOFT_RESET, |
| 138 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | 143 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, |
| @@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) | |||
| 391 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | 396 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); |
| 392 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | 397 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); |
| 393 | WREG32(mmVCE_LMI_VM_CTRL, 0); | 398 | WREG32(mmVCE_LMI_VM_CTRL, 0); |
| 394 | 399 | if (adev->asic_type >= CHIP_STONEY) { | |
| 395 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | 400 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); |
| 401 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); | ||
| 402 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); | ||
| 403 | } else | ||
| 404 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | ||
| 396 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; | 405 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; |
| 397 | size = VCE_V3_0_FW_SIZE; | 406 | size = VCE_V3_0_FW_SIZE; |
| 398 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); | 407 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); |
| @@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, | |||
| 576 | struct amdgpu_iv_entry *entry) | 585 | struct amdgpu_iv_entry *entry) |
| 577 | { | 586 | { |
| 578 | DRM_DEBUG("IH: VCE\n"); | 587 | DRM_DEBUG("IH: VCE\n"); |
| 588 | |||
| 589 | WREG32_P(mmVCE_SYS_INT_STATUS, | ||
| 590 | VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, | ||
| 591 | ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); | ||
| 592 | |||
| 579 | switch (entry->src_data) { | 593 | switch (entry->src_data) { |
| 580 | case 0: | 594 | case 0: |
| 581 | amdgpu_fence_process(&adev->vce.ring[0]); | 595 | amdgpu_fence_process(&adev->vce.ring[0]); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | |||
| @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, | |||
| 16 | TP_ARGS(sched_job), | 16 | TP_ARGS(sched_job), |
| 17 | TP_STRUCT__entry( | 17 | TP_STRUCT__entry( |
| 18 | __field(struct amd_sched_entity *, entity) | 18 | __field(struct amd_sched_entity *, entity) |
| 19 | __field(struct amd_sched_job *, sched_job) | ||
| 20 | __field(struct fence *, fence) | ||
| 19 | __field(const char *, name) | 21 | __field(const char *, name) |
| 20 | __field(u32, job_count) | 22 | __field(u32, job_count) |
| 21 | __field(int, hw_job_count) | 23 | __field(int, hw_job_count) |
| @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, | |||
| 23 | 25 | ||
| 24 | TP_fast_assign( | 26 | TP_fast_assign( |
| 25 | __entry->entity = sched_job->s_entity; | 27 | __entry->entity = sched_job->s_entity; |
| 28 | __entry->sched_job = sched_job; | ||
| 29 | __entry->fence = &sched_job->s_fence->base; | ||
| 26 | __entry->name = sched_job->sched->name; | 30 | __entry->name = sched_job->sched->name; |
| 27 | __entry->job_count = kfifo_len( | 31 | __entry->job_count = kfifo_len( |
| 28 | &sched_job->s_entity->job_queue) / sizeof(sched_job); | 32 | &sched_job->s_entity->job_queue) / sizeof(sched_job); |
| 29 | __entry->hw_job_count = atomic_read( | 33 | __entry->hw_job_count = atomic_read( |
| 30 | &sched_job->sched->hw_rq_count); | 34 | &sched_job->sched->hw_rq_count); |
| 31 | ), | 35 | ), |
| 32 | TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", | 36 | TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", |
| 33 | __entry->entity, __entry->name, __entry->job_count, | 37 | __entry->entity, __entry->sched_job, __entry->fence, __entry->name, |
| 34 | __entry->hw_job_count) | 38 | __entry->job_count, __entry->hw_job_count) |
| 35 | ); | 39 | ); |
| 40 | |||
| 41 | TRACE_EVENT(amd_sched_process_job, | ||
| 42 | TP_PROTO(struct amd_sched_fence *fence), | ||
| 43 | TP_ARGS(fence), | ||
| 44 | TP_STRUCT__entry( | ||
| 45 | __field(struct fence *, fence) | ||
| 46 | ), | ||
| 47 | |||
| 48 | TP_fast_assign( | ||
| 49 | __entry->fence = &fence->base; | ||
| 50 | ), | ||
| 51 | TP_printk("fence=%p signaled", __entry->fence) | ||
| 52 | ); | ||
| 53 | |||
| 36 | #endif | 54 | #endif |
| 37 | 55 | ||
| 38 | /* This part must be outside protection */ | 56 | /* This part must be outside protection */ |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89619a5a4289..651129f2ec1d 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
| @@ -30,10 +30,12 @@ | |||
| 30 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
| 31 | #include "gpu_sched_trace.h" | 31 | #include "gpu_sched_trace.h" |
| 32 | 32 | ||
| 33 | static struct amd_sched_job * | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
| 34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | ||
| 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
| 36 | 35 | ||
| 36 | struct kmem_cache *sched_fence_slab; | ||
| 37 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | ||
| 38 | |||
| 37 | /* Initialize a given run queue struct */ | 39 | /* Initialize a given run queue struct */ |
| 38 | static void amd_sched_rq_init(struct amd_sched_rq *rq) | 40 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
| 39 | { | 41 | { |
| @@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, | |||
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | /** | 65 | /** |
| 64 | * Select next job from a specified run queue with round robin policy. | 66 | * Select an entity which could provide a job to run |
| 65 | * Return NULL if nothing available. | 67 | * |
| 68 | * @rq The run queue to check. | ||
| 69 | * | ||
| 70 | * Try to find a ready entity, returns NULL if none found. | ||
| 66 | */ | 71 | */ |
| 67 | static struct amd_sched_job * | 72 | static struct amd_sched_entity * |
| 68 | amd_sched_rq_select_job(struct amd_sched_rq *rq) | 73 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) |
| 69 | { | 74 | { |
| 70 | struct amd_sched_entity *entity; | 75 | struct amd_sched_entity *entity; |
| 71 | struct amd_sched_job *sched_job; | ||
| 72 | 76 | ||
| 73 | spin_lock(&rq->lock); | 77 | spin_lock(&rq->lock); |
| 74 | 78 | ||
| 75 | entity = rq->current_entity; | 79 | entity = rq->current_entity; |
| 76 | if (entity) { | 80 | if (entity) { |
| 77 | list_for_each_entry_continue(entity, &rq->entities, list) { | 81 | list_for_each_entry_continue(entity, &rq->entities, list) { |
| 78 | sched_job = amd_sched_entity_pop_job(entity); | 82 | if (amd_sched_entity_is_ready(entity)) { |
| 79 | if (sched_job) { | ||
| 80 | rq->current_entity = entity; | 83 | rq->current_entity = entity; |
| 81 | spin_unlock(&rq->lock); | 84 | spin_unlock(&rq->lock); |
| 82 | return sched_job; | 85 | return entity; |
| 83 | } | 86 | } |
| 84 | } | 87 | } |
| 85 | } | 88 | } |
| 86 | 89 | ||
| 87 | list_for_each_entry(entity, &rq->entities, list) { | 90 | list_for_each_entry(entity, &rq->entities, list) { |
| 88 | 91 | ||
| 89 | sched_job = amd_sched_entity_pop_job(entity); | 92 | if (amd_sched_entity_is_ready(entity)) { |
| 90 | if (sched_job) { | ||
| 91 | rq->current_entity = entity; | 93 | rq->current_entity = entity; |
| 92 | spin_unlock(&rq->lock); | 94 | spin_unlock(&rq->lock); |
| 93 | return sched_job; | 95 | return entity; |
| 94 | } | 96 | } |
| 95 | 97 | ||
| 96 | if (entity == rq->current_entity) | 98 | if (entity == rq->current_entity) |
| @@ -174,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |||
| 174 | } | 176 | } |
| 175 | 177 | ||
| 176 | /** | 178 | /** |
| 179 | * Check if entity is ready | ||
| 180 | * | ||
| 181 | * @entity The pointer to a valid scheduler entity | ||
| 182 | * | ||
| 183 | * Return true if entity could provide a job. | ||
| 184 | */ | ||
| 185 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | ||
| 186 | { | ||
| 187 | if (kfifo_is_empty(&entity->job_queue)) | ||
| 188 | return false; | ||
| 189 | |||
| 190 | if (ACCESS_ONCE(entity->dependency)) | ||
| 191 | return false; | ||
| 192 | |||
| 193 | return true; | ||
| 194 | } | ||
| 195 | |||
| 196 | /** | ||
| 177 | * Destroy a context entity | 197 | * Destroy a context entity |
| 178 | * | 198 | * |
| 179 | * @sched Pointer to scheduler instance | 199 | * @sched Pointer to scheduler instance |
| @@ -208,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) | |||
| 208 | amd_sched_wakeup(entity->sched); | 228 | amd_sched_wakeup(entity->sched); |
| 209 | } | 229 | } |
| 210 | 230 | ||
| 231 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | ||
| 232 | { | ||
| 233 | struct amd_gpu_scheduler *sched = entity->sched; | ||
| 234 | struct fence * fence = entity->dependency; | ||
| 235 | struct amd_sched_fence *s_fence; | ||
| 236 | |||
| 237 | if (fence->context == entity->fence_context) { | ||
| 238 | /* We can ignore fences from ourself */ | ||
| 239 | fence_put(entity->dependency); | ||
| 240 | return false; | ||
| 241 | } | ||
| 242 | |||
| 243 | s_fence = to_amd_sched_fence(fence); | ||
| 244 | if (s_fence && s_fence->sched == sched) { | ||
| 245 | /* Fence is from the same scheduler */ | ||
| 246 | if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { | ||
| 247 | /* Ignore it when it is already scheduled */ | ||
| 248 | fence_put(entity->dependency); | ||
| 249 | return false; | ||
| 250 | } | ||
| 251 | |||
| 252 | /* Wait for fence to be scheduled */ | ||
| 253 | entity->cb.func = amd_sched_entity_wakeup; | ||
| 254 | list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); | ||
| 255 | return true; | ||
| 256 | } | ||
| 257 | |||
| 258 | if (!fence_add_callback(entity->dependency, &entity->cb, | ||
| 259 | amd_sched_entity_wakeup)) | ||
| 260 | return true; | ||
| 261 | |||
| 262 | fence_put(entity->dependency); | ||
| 263 | return false; | ||
| 264 | } | ||
| 265 | |||
| 211 | static struct amd_sched_job * | 266 | static struct amd_sched_job * |
| 212 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | 267 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
| 213 | { | 268 | { |
| 214 | struct amd_gpu_scheduler *sched = entity->sched; | 269 | struct amd_gpu_scheduler *sched = entity->sched; |
| 215 | struct amd_sched_job *sched_job; | 270 | struct amd_sched_job *sched_job; |
| 216 | 271 | ||
| 217 | if (ACCESS_ONCE(entity->dependency)) | ||
| 218 | return NULL; | ||
| 219 | |||
| 220 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) | 272 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
| 221 | return NULL; | 273 | return NULL; |
| 222 | 274 | ||
| 223 | while ((entity->dependency = sched->ops->dependency(sched_job))) { | 275 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
| 224 | 276 | if (amd_sched_entity_add_dependency_cb(entity)) | |
| 225 | if (entity->dependency->context == entity->fence_context) { | ||
| 226 | /* We can ignore fences from ourself */ | ||
| 227 | fence_put(entity->dependency); | ||
| 228 | continue; | ||
| 229 | } | ||
| 230 | |||
| 231 | if (fence_add_callback(entity->dependency, &entity->cb, | ||
| 232 | amd_sched_entity_wakeup)) | ||
| 233 | fence_put(entity->dependency); | ||
| 234 | else | ||
| 235 | return NULL; | 277 | return NULL; |
| 236 | } | ||
| 237 | 278 | ||
| 238 | return sched_job; | 279 | return sched_job; |
| 239 | } | 280 | } |
| @@ -273,22 +314,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | |||
| 273 | * | 314 | * |
| 274 | * Returns 0 for success, negative error code otherwise. | 315 | * Returns 0 for success, negative error code otherwise. |
| 275 | */ | 316 | */ |
| 276 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job) | 317 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
| 277 | { | 318 | { |
| 278 | struct amd_sched_entity *entity = sched_job->s_entity; | 319 | struct amd_sched_entity *entity = sched_job->s_entity; |
| 279 | struct amd_sched_fence *fence = amd_sched_fence_create( | ||
| 280 | entity, sched_job->owner); | ||
| 281 | |||
| 282 | if (!fence) | ||
| 283 | return -ENOMEM; | ||
| 284 | |||
| 285 | fence_get(&fence->base); | ||
| 286 | sched_job->s_fence = fence; | ||
| 287 | 320 | ||
| 288 | wait_event(entity->sched->job_scheduled, | 321 | wait_event(entity->sched->job_scheduled, |
| 289 | amd_sched_entity_in(sched_job)); | 322 | amd_sched_entity_in(sched_job)); |
| 290 | trace_amd_sched_job(sched_job); | 323 | trace_amd_sched_job(sched_job); |
| 291 | return 0; | ||
| 292 | } | 324 | } |
| 293 | 325 | ||
| 294 | /** | 326 | /** |
| @@ -310,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |||
| 310 | } | 342 | } |
| 311 | 343 | ||
| 312 | /** | 344 | /** |
| 313 | * Select next to run | 345 | * Select next entity to process |
| 314 | */ | 346 | */ |
| 315 | static struct amd_sched_job * | 347 | static struct amd_sched_entity * |
| 316 | amd_sched_select_job(struct amd_gpu_scheduler *sched) | 348 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) |
| 317 | { | 349 | { |
| 318 | struct amd_sched_job *sched_job; | 350 | struct amd_sched_entity *entity; |
| 319 | 351 | ||
| 320 | if (!amd_sched_ready(sched)) | 352 | if (!amd_sched_ready(sched)) |
| 321 | return NULL; | 353 | return NULL; |
| 322 | 354 | ||
| 323 | /* Kernel run queue has higher priority than normal run queue*/ | 355 | /* Kernel run queue has higher priority than normal run queue*/ |
| 324 | sched_job = amd_sched_rq_select_job(&sched->kernel_rq); | 356 | entity = amd_sched_rq_select_entity(&sched->kernel_rq); |
| 325 | if (sched_job == NULL) | 357 | if (entity == NULL) |
| 326 | sched_job = amd_sched_rq_select_job(&sched->sched_rq); | 358 | entity = amd_sched_rq_select_entity(&sched->sched_rq); |
| 327 | 359 | ||
| 328 | return sched_job; | 360 | return entity; |
| 329 | } | 361 | } |
| 330 | 362 | ||
| 331 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | 363 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
| @@ -343,6 +375,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | |||
| 343 | list_del_init(&s_fence->list); | 375 | list_del_init(&s_fence->list); |
| 344 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); | 376 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); |
| 345 | } | 377 | } |
| 378 | trace_amd_sched_process_job(s_fence); | ||
| 346 | fence_put(&s_fence->base); | 379 | fence_put(&s_fence->base); |
| 347 | wake_up_interruptible(&sched->wake_up_worker); | 380 | wake_up_interruptible(&sched->wake_up_worker); |
| 348 | } | 381 | } |
| @@ -386,13 +419,16 @@ static int amd_sched_main(void *param) | |||
| 386 | unsigned long flags; | 419 | unsigned long flags; |
| 387 | 420 | ||
| 388 | wait_event_interruptible(sched->wake_up_worker, | 421 | wait_event_interruptible(sched->wake_up_worker, |
| 389 | kthread_should_stop() || | 422 | (entity = amd_sched_select_entity(sched)) || |
| 390 | (sched_job = amd_sched_select_job(sched))); | 423 | kthread_should_stop()); |
| 391 | 424 | ||
| 425 | if (!entity) | ||
| 426 | continue; | ||
| 427 | |||
| 428 | sched_job = amd_sched_entity_pop_job(entity); | ||
| 392 | if (!sched_job) | 429 | if (!sched_job) |
| 393 | continue; | 430 | continue; |
| 394 | 431 | ||
| 395 | entity = sched_job->s_entity; | ||
| 396 | s_fence = sched_job->s_fence; | 432 | s_fence = sched_job->s_fence; |
| 397 | 433 | ||
| 398 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { | 434 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
| @@ -405,6 +441,7 @@ static int amd_sched_main(void *param) | |||
| 405 | 441 | ||
| 406 | atomic_inc(&sched->hw_rq_count); | 442 | atomic_inc(&sched->hw_rq_count); |
| 407 | fence = sched->ops->run_job(sched_job); | 443 | fence = sched->ops->run_job(sched_job); |
| 444 | amd_sched_fence_scheduled(s_fence); | ||
| 408 | if (fence) { | 445 | if (fence) { |
| 409 | r = fence_add_callback(fence, &s_fence->cb, | 446 | r = fence_add_callback(fence, &s_fence->cb, |
| 410 | amd_sched_process_job); | 447 | amd_sched_process_job); |
| @@ -450,6 +487,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, | |||
| 450 | init_waitqueue_head(&sched->wake_up_worker); | 487 | init_waitqueue_head(&sched->wake_up_worker); |
| 451 | init_waitqueue_head(&sched->job_scheduled); | 488 | init_waitqueue_head(&sched->job_scheduled); |
| 452 | atomic_set(&sched->hw_rq_count, 0); | 489 | atomic_set(&sched->hw_rq_count, 0); |
| 490 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { | ||
| 491 | sched_fence_slab = kmem_cache_create( | ||
| 492 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | ||
| 493 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 494 | if (!sched_fence_slab) | ||
| 495 | return -ENOMEM; | ||
| 496 | } | ||
| 453 | 497 | ||
| 454 | /* Each scheduler will run on a seperate kernel thread */ | 498 | /* Each scheduler will run on a seperate kernel thread */ |
| 455 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); | 499 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
| @@ -470,4 +514,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) | |||
| 470 | { | 514 | { |
| 471 | if (sched->thread) | 515 | if (sched->thread) |
| 472 | kthread_stop(sched->thread); | 516 | kthread_stop(sched->thread); |
| 517 | if (atomic_dec_and_test(&sched_fence_slab_ref)) | ||
| 518 | kmem_cache_destroy(sched_fence_slab); | ||
| 473 | } | 519 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 929e9aced041..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
| @@ -27,9 +27,14 @@ | |||
| 27 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
| 28 | #include <linux/fence.h> | 28 | #include <linux/fence.h> |
| 29 | 29 | ||
| 30 | #define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS | ||
| 31 | |||
| 30 | struct amd_gpu_scheduler; | 32 | struct amd_gpu_scheduler; |
| 31 | struct amd_sched_rq; | 33 | struct amd_sched_rq; |
| 32 | 34 | ||
| 35 | extern struct kmem_cache *sched_fence_slab; | ||
| 36 | extern atomic_t sched_fence_slab_ref; | ||
| 37 | |||
| 33 | /** | 38 | /** |
| 34 | * A scheduler entity is a wrapper around a job queue or a group | 39 | * A scheduler entity is a wrapper around a job queue or a group |
| 35 | * of other entities. Entities take turns emitting jobs from their | 40 | * of other entities. Entities take turns emitting jobs from their |
| @@ -65,6 +70,7 @@ struct amd_sched_rq { | |||
| 65 | struct amd_sched_fence { | 70 | struct amd_sched_fence { |
| 66 | struct fence base; | 71 | struct fence base; |
| 67 | struct fence_cb cb; | 72 | struct fence_cb cb; |
| 73 | struct list_head scheduled_cb; | ||
| 68 | struct amd_gpu_scheduler *sched; | 74 | struct amd_gpu_scheduler *sched; |
| 69 | spinlock_t lock; | 75 | spinlock_t lock; |
| 70 | void *owner; | 76 | void *owner; |
| @@ -76,7 +82,6 @@ struct amd_sched_job { | |||
| 76 | struct amd_gpu_scheduler *sched; | 82 | struct amd_gpu_scheduler *sched; |
| 77 | struct amd_sched_entity *s_entity; | 83 | struct amd_sched_entity *s_entity; |
| 78 | struct amd_sched_fence *s_fence; | 84 | struct amd_sched_fence *s_fence; |
| 79 | void *owner; | ||
| 80 | }; | 85 | }; |
| 81 | 86 | ||
| 82 | extern const struct fence_ops amd_sched_fence_ops; | 87 | extern const struct fence_ops amd_sched_fence_ops; |
| @@ -128,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
| 128 | uint32_t jobs); | 133 | uint32_t jobs); |
| 129 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | 134 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 130 | struct amd_sched_entity *entity); | 135 | struct amd_sched_entity *entity); |
| 131 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job); | 136 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job); |
| 132 | 137 | ||
| 133 | struct amd_sched_fence *amd_sched_fence_create( | 138 | struct amd_sched_fence *amd_sched_fence_create( |
| 134 | struct amd_sched_entity *s_entity, void *owner); | 139 | struct amd_sched_entity *s_entity, void *owner); |
| 140 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence); | ||
| 135 | void amd_sched_fence_signal(struct amd_sched_fence *fence); | 141 | void amd_sched_fence_signal(struct amd_sched_fence *fence); |
| 136 | 142 | ||
| 137 | |||
| 138 | #endif | 143 | #endif |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index d802638094f4..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
| @@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity | |||
| 32 | struct amd_sched_fence *fence = NULL; | 32 | struct amd_sched_fence *fence = NULL; |
| 33 | unsigned seq; | 33 | unsigned seq; |
| 34 | 34 | ||
| 35 | fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); | 35 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); |
| 36 | if (fence == NULL) | 36 | if (fence == NULL) |
| 37 | return NULL; | 37 | return NULL; |
| 38 | |||
| 39 | INIT_LIST_HEAD(&fence->scheduled_cb); | ||
| 38 | fence->owner = owner; | 40 | fence->owner = owner; |
| 39 | fence->sched = s_entity->sched; | 41 | fence->sched = s_entity->sched; |
| 40 | spin_lock_init(&fence->lock); | 42 | spin_lock_init(&fence->lock); |
| @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) | |||
| 55 | FENCE_TRACE(&fence->base, "was already signaled\n"); | 57 | FENCE_TRACE(&fence->base, "was already signaled\n"); |
| 56 | } | 58 | } |
| 57 | 59 | ||
| 60 | void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) | ||
| 61 | { | ||
| 62 | struct fence_cb *cur, *tmp; | ||
| 63 | |||
| 64 | set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); | ||
| 65 | list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { | ||
| 66 | list_del_init(&cur->node); | ||
| 67 | cur->func(&s_fence->base, cur); | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 58 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) | 71 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) |
| 59 | { | 72 | { |
| 60 | return "amd_sched"; | 73 | return "amd_sched"; |
| @@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) | |||
| 71 | return true; | 84 | return true; |
| 72 | } | 85 | } |
| 73 | 86 | ||
| 87 | static void amd_sched_fence_release(struct fence *f) | ||
| 88 | { | ||
| 89 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | ||
| 90 | kmem_cache_free(sched_fence_slab, fence); | ||
| 91 | } | ||
| 92 | |||
| 74 | const struct fence_ops amd_sched_fence_ops = { | 93 | const struct fence_ops amd_sched_fence_ops = { |
| 75 | .get_driver_name = amd_sched_fence_get_driver_name, | 94 | .get_driver_name = amd_sched_fence_get_driver_name, |
| 76 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 95 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
| 77 | .enable_signaling = amd_sched_fence_enable_signaling, | 96 | .enable_signaling = amd_sched_fence_enable_signaling, |
| 78 | .signaled = NULL, | 97 | .signaled = NULL, |
| 79 | .wait = fence_default_wait, | 98 | .wait = fence_default_wait, |
| 80 | .release = NULL, | 99 | .release = amd_sched_fence_release, |
| 81 | }; | 100 | }; |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7bb3845d9974..aeee083c7f95 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state, | |||
| 1432 | return ret; | 1432 | return ret; |
| 1433 | } | 1433 | } |
| 1434 | 1434 | ||
| 1435 | /** | ||
| 1436 | * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. | ||
| 1437 | * | ||
| 1438 | * @dev: drm device to check. | ||
| 1439 | * @plane_mask: plane mask for planes that were updated. | ||
| 1440 | * @ret: return value, can be -EDEADLK for a retry. | ||
| 1441 | * | ||
| 1442 | * Before doing an update plane->old_fb is set to plane->fb, | ||
| 1443 | * but before dropping the locks old_fb needs to be set to NULL | ||
| 1444 | * and plane->fb updated. This is a common operation for each | ||
| 1445 | * atomic update, so this call is split off as a helper. | ||
| 1446 | */ | ||
| 1447 | void drm_atomic_clean_old_fb(struct drm_device *dev, | ||
| 1448 | unsigned plane_mask, | ||
| 1449 | int ret) | ||
| 1450 | { | ||
| 1451 | struct drm_plane *plane; | ||
| 1452 | |||
| 1453 | /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping | ||
| 1454 | * locks (ie. while it is still safe to deref plane->state). We | ||
| 1455 | * need to do this here because the driver entry points cannot | ||
| 1456 | * distinguish between legacy and atomic ioctls. | ||
| 1457 | */ | ||
| 1458 | drm_for_each_plane_mask(plane, dev, plane_mask) { | ||
| 1459 | if (ret == 0) { | ||
| 1460 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1461 | if (new_fb) | ||
| 1462 | drm_framebuffer_reference(new_fb); | ||
| 1463 | plane->fb = new_fb; | ||
| 1464 | plane->crtc = plane->state->crtc; | ||
| 1465 | |||
| 1466 | if (plane->old_fb) | ||
| 1467 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1468 | } | ||
| 1469 | plane->old_fb = NULL; | ||
| 1470 | } | ||
| 1471 | } | ||
| 1472 | EXPORT_SYMBOL(drm_atomic_clean_old_fb); | ||
| 1473 | |||
| 1435 | int drm_mode_atomic_ioctl(struct drm_device *dev, | 1474 | int drm_mode_atomic_ioctl(struct drm_device *dev, |
| 1436 | void *data, struct drm_file *file_priv) | 1475 | void *data, struct drm_file *file_priv) |
| 1437 | { | 1476 | { |
| @@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 1446 | struct drm_plane *plane; | 1485 | struct drm_plane *plane; |
| 1447 | struct drm_crtc *crtc; | 1486 | struct drm_crtc *crtc; |
| 1448 | struct drm_crtc_state *crtc_state; | 1487 | struct drm_crtc_state *crtc_state; |
| 1449 | unsigned plane_mask = 0; | 1488 | unsigned plane_mask; |
| 1450 | int ret = 0; | 1489 | int ret = 0; |
| 1451 | unsigned int i, j; | 1490 | unsigned int i, j; |
| 1452 | 1491 | ||
| @@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 1486 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); | 1525 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); |
| 1487 | 1526 | ||
| 1488 | retry: | 1527 | retry: |
| 1528 | plane_mask = 0; | ||
| 1489 | copied_objs = 0; | 1529 | copied_objs = 0; |
| 1490 | copied_props = 0; | 1530 | copied_props = 0; |
| 1491 | 1531 | ||
| @@ -1576,24 +1616,7 @@ retry: | |||
| 1576 | } | 1616 | } |
| 1577 | 1617 | ||
| 1578 | out: | 1618 | out: |
| 1579 | /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping | 1619 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 1580 | * locks (ie. while it is still safe to deref plane->state). We | ||
| 1581 | * need to do this here because the driver entry points cannot | ||
| 1582 | * distinguish between legacy and atomic ioctls. | ||
| 1583 | */ | ||
| 1584 | drm_for_each_plane_mask(plane, dev, plane_mask) { | ||
| 1585 | if (ret == 0) { | ||
| 1586 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1587 | if (new_fb) | ||
| 1588 | drm_framebuffer_reference(new_fb); | ||
| 1589 | plane->fb = new_fb; | ||
| 1590 | plane->crtc = plane->state->crtc; | ||
| 1591 | |||
| 1592 | if (plane->old_fb) | ||
| 1593 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1594 | } | ||
| 1595 | plane->old_fb = NULL; | ||
| 1596 | } | ||
| 1597 | 1620 | ||
| 1598 | if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { | 1621 | if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { |
| 1599 | /* | 1622 | /* |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0c6f62168776..e5aec45bf985 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) | |||
| 210 | return -EINVAL; | 210 | return -EINVAL; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) { | ||
| 214 | DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n", | ||
| 215 | new_encoder->base.id, | ||
| 216 | new_encoder->name, | ||
| 217 | connector_state->crtc->base.id); | ||
| 218 | return -EINVAL; | ||
| 219 | } | ||
| 220 | |||
| 213 | if (new_encoder == connector_state->best_encoder) { | 221 | if (new_encoder == connector_state->best_encoder) { |
| 214 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", | 222 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", |
| 215 | connector->base.id, | 223 | connector->base.id, |
| @@ -1553,6 +1561,9 @@ retry: | |||
| 1553 | goto fail; | 1561 | goto fail; |
| 1554 | } | 1562 | } |
| 1555 | 1563 | ||
| 1564 | if (plane_state->crtc && (plane == plane->crtc->cursor)) | ||
| 1565 | plane_state->state->legacy_cursor_update = true; | ||
| 1566 | |||
| 1556 | ret = __drm_atomic_helper_disable_plane(plane, plane_state); | 1567 | ret = __drm_atomic_helper_disable_plane(plane, plane_state); |
| 1557 | if (ret != 0) | 1568 | if (ret != 0) |
| 1558 | goto fail; | 1569 | goto fail; |
| @@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, | |||
| 1605 | plane_state->src_h = 0; | 1616 | plane_state->src_h = 0; |
| 1606 | plane_state->src_w = 0; | 1617 | plane_state->src_w = 0; |
| 1607 | 1618 | ||
| 1608 | if (plane->crtc && (plane == plane->crtc->cursor)) | ||
| 1609 | plane_state->state->legacy_cursor_update = true; | ||
| 1610 | |||
| 1611 | return 0; | 1619 | return 0; |
| 1612 | } | 1620 | } |
| 1613 | 1621 | ||
| @@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, | |||
| 1741 | struct drm_crtc_state *crtc_state; | 1749 | struct drm_crtc_state *crtc_state; |
| 1742 | struct drm_plane_state *primary_state; | 1750 | struct drm_plane_state *primary_state; |
| 1743 | struct drm_crtc *crtc = set->crtc; | 1751 | struct drm_crtc *crtc = set->crtc; |
| 1752 | int hdisplay, vdisplay; | ||
| 1744 | int ret; | 1753 | int ret; |
| 1745 | 1754 | ||
| 1746 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | 1755 | crtc_state = drm_atomic_get_crtc_state(state, crtc); |
| @@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, | |||
| 1783 | if (ret != 0) | 1792 | if (ret != 0) |
| 1784 | return ret; | 1793 | return ret; |
| 1785 | 1794 | ||
| 1795 | drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); | ||
| 1796 | |||
| 1786 | drm_atomic_set_fb_for_plane(primary_state, set->fb); | 1797 | drm_atomic_set_fb_for_plane(primary_state, set->fb); |
| 1787 | primary_state->crtc_x = 0; | 1798 | primary_state->crtc_x = 0; |
| 1788 | primary_state->crtc_y = 0; | 1799 | primary_state->crtc_y = 0; |
| 1789 | primary_state->crtc_h = set->mode->vdisplay; | 1800 | primary_state->crtc_h = vdisplay; |
| 1790 | primary_state->crtc_w = set->mode->hdisplay; | 1801 | primary_state->crtc_w = hdisplay; |
| 1791 | primary_state->src_x = set->x << 16; | 1802 | primary_state->src_x = set->x << 16; |
| 1792 | primary_state->src_y = set->y << 16; | 1803 | primary_state->src_y = set->y << 16; |
| 1793 | if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { | 1804 | if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { |
| 1794 | primary_state->src_h = set->mode->hdisplay << 16; | 1805 | primary_state->src_h = hdisplay << 16; |
| 1795 | primary_state->src_w = set->mode->vdisplay << 16; | 1806 | primary_state->src_w = vdisplay << 16; |
| 1796 | } else { | 1807 | } else { |
| 1797 | primary_state->src_h = set->mode->vdisplay << 16; | 1808 | primary_state->src_h = vdisplay << 16; |
| 1798 | primary_state->src_w = set->mode->hdisplay << 16; | 1809 | primary_state->src_w = hdisplay << 16; |
| 1799 | } | 1810 | } |
| 1800 | 1811 | ||
| 1801 | commit: | 1812 | commit: |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e673c13c7391..69cbab5e5c81 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) | |||
| 342 | struct drm_plane *plane; | 342 | struct drm_plane *plane; |
| 343 | struct drm_atomic_state *state; | 343 | struct drm_atomic_state *state; |
| 344 | int i, ret; | 344 | int i, ret; |
| 345 | unsigned plane_mask; | ||
| 345 | 346 | ||
| 346 | state = drm_atomic_state_alloc(dev); | 347 | state = drm_atomic_state_alloc(dev); |
| 347 | if (!state) | 348 | if (!state) |
| @@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) | |||
| 349 | 350 | ||
| 350 | state->acquire_ctx = dev->mode_config.acquire_ctx; | 351 | state->acquire_ctx = dev->mode_config.acquire_ctx; |
| 351 | retry: | 352 | retry: |
| 353 | plane_mask = 0; | ||
| 352 | drm_for_each_plane(plane, dev) { | 354 | drm_for_each_plane(plane, dev) { |
| 353 | struct drm_plane_state *plane_state; | 355 | struct drm_plane_state *plane_state; |
| 354 | 356 | ||
| 355 | plane->old_fb = plane->fb; | ||
| 356 | |||
| 357 | plane_state = drm_atomic_get_plane_state(state, plane); | 357 | plane_state = drm_atomic_get_plane_state(state, plane); |
| 358 | if (IS_ERR(plane_state)) { | 358 | if (IS_ERR(plane_state)) { |
| 359 | ret = PTR_ERR(plane_state); | 359 | ret = PTR_ERR(plane_state); |
| @@ -362,6 +362,9 @@ retry: | |||
| 362 | 362 | ||
| 363 | plane_state->rotation = BIT(DRM_ROTATE_0); | 363 | plane_state->rotation = BIT(DRM_ROTATE_0); |
| 364 | 364 | ||
| 365 | plane->old_fb = plane->fb; | ||
| 366 | plane_mask |= 1 << drm_plane_index(plane); | ||
| 367 | |||
| 365 | /* disable non-primary: */ | 368 | /* disable non-primary: */ |
| 366 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) | 369 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) |
| 367 | continue; | 370 | continue; |
| @@ -382,19 +385,7 @@ retry: | |||
| 382 | ret = drm_atomic_commit(state); | 385 | ret = drm_atomic_commit(state); |
| 383 | 386 | ||
| 384 | fail: | 387 | fail: |
| 385 | drm_for_each_plane(plane, dev) { | 388 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 386 | if (ret == 0) { | ||
| 387 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 388 | if (new_fb) | ||
| 389 | drm_framebuffer_reference(new_fb); | ||
| 390 | plane->fb = new_fb; | ||
| 391 | plane->crtc = plane->state->crtc; | ||
| 392 | |||
| 393 | if (plane->old_fb) | ||
| 394 | drm_framebuffer_unreference(plane->old_fb); | ||
| 395 | } | ||
| 396 | plane->old_fb = NULL; | ||
| 397 | } | ||
| 398 | 389 | ||
| 399 | if (ret == -EDEADLK) | 390 | if (ret == -EDEADLK) |
| 400 | goto backoff; | 391 | goto backoff; |
| @@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, | |||
| 1236 | struct drm_fb_helper *fb_helper = info->par; | 1227 | struct drm_fb_helper *fb_helper = info->par; |
| 1237 | struct drm_device *dev = fb_helper->dev; | 1228 | struct drm_device *dev = fb_helper->dev; |
| 1238 | struct drm_atomic_state *state; | 1229 | struct drm_atomic_state *state; |
| 1230 | struct drm_plane *plane; | ||
| 1239 | int i, ret; | 1231 | int i, ret; |
| 1232 | unsigned plane_mask; | ||
| 1240 | 1233 | ||
| 1241 | state = drm_atomic_state_alloc(dev); | 1234 | state = drm_atomic_state_alloc(dev); |
| 1242 | if (!state) | 1235 | if (!state) |
| @@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, | |||
| 1244 | 1237 | ||
| 1245 | state->acquire_ctx = dev->mode_config.acquire_ctx; | 1238 | state->acquire_ctx = dev->mode_config.acquire_ctx; |
| 1246 | retry: | 1239 | retry: |
| 1240 | plane_mask = 0; | ||
| 1247 | for(i = 0; i < fb_helper->crtc_count; i++) { | 1241 | for(i = 0; i < fb_helper->crtc_count; i++) { |
| 1248 | struct drm_mode_set *mode_set; | 1242 | struct drm_mode_set *mode_set; |
| 1249 | 1243 | ||
| 1250 | mode_set = &fb_helper->crtc_info[i].mode_set; | 1244 | mode_set = &fb_helper->crtc_info[i].mode_set; |
| 1251 | 1245 | ||
| 1252 | mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb; | ||
| 1253 | |||
| 1254 | mode_set->x = var->xoffset; | 1246 | mode_set->x = var->xoffset; |
| 1255 | mode_set->y = var->yoffset; | 1247 | mode_set->y = var->yoffset; |
| 1256 | 1248 | ||
| 1257 | ret = __drm_atomic_helper_set_config(mode_set, state); | 1249 | ret = __drm_atomic_helper_set_config(mode_set, state); |
| 1258 | if (ret != 0) | 1250 | if (ret != 0) |
| 1259 | goto fail; | 1251 | goto fail; |
| 1252 | |||
| 1253 | plane = mode_set->crtc->primary; | ||
| 1254 | plane_mask |= drm_plane_index(plane); | ||
| 1255 | plane->old_fb = plane->fb; | ||
| 1260 | } | 1256 | } |
| 1261 | 1257 | ||
| 1262 | ret = drm_atomic_commit(state); | 1258 | ret = drm_atomic_commit(state); |
| @@ -1268,26 +1264,7 @@ retry: | |||
| 1268 | 1264 | ||
| 1269 | 1265 | ||
| 1270 | fail: | 1266 | fail: |
| 1271 | for(i = 0; i < fb_helper->crtc_count; i++) { | 1267 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 1272 | struct drm_mode_set *mode_set; | ||
| 1273 | struct drm_plane *plane; | ||
| 1274 | |||
| 1275 | mode_set = &fb_helper->crtc_info[i].mode_set; | ||
| 1276 | plane = mode_set->crtc->primary; | ||
| 1277 | |||
| 1278 | if (ret == 0) { | ||
| 1279 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1280 | |||
| 1281 | if (new_fb) | ||
| 1282 | drm_framebuffer_reference(new_fb); | ||
| 1283 | plane->fb = new_fb; | ||
| 1284 | plane->crtc = plane->state->crtc; | ||
| 1285 | |||
| 1286 | if (plane->old_fb) | ||
| 1287 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1288 | } | ||
| 1289 | plane->old_fb = NULL; | ||
| 1290 | } | ||
| 1291 | 1268 | ||
| 1292 | if (ret == -EDEADLK) | 1269 | if (ret == -EDEADLK) |
| 1293 | goto backoff; | 1270 | goto backoff; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8afda459a26e..95bb27de774f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -351,6 +351,8 @@ enum intel_dpll_id { | |||
| 351 | /* hsw/bdw */ | 351 | /* hsw/bdw */ |
| 352 | DPLL_ID_WRPLL1 = 0, | 352 | DPLL_ID_WRPLL1 = 0, |
| 353 | DPLL_ID_WRPLL2 = 1, | 353 | DPLL_ID_WRPLL2 = 1, |
| 354 | DPLL_ID_SPLL = 2, | ||
| 355 | |||
| 354 | /* skl */ | 356 | /* skl */ |
| 355 | DPLL_ID_SKL_DPLL1 = 0, | 357 | DPLL_ID_SKL_DPLL1 = 0, |
| 356 | DPLL_ID_SKL_DPLL2 = 1, | 358 | DPLL_ID_SKL_DPLL2 = 1, |
| @@ -367,6 +369,7 @@ struct intel_dpll_hw_state { | |||
| 367 | 369 | ||
| 368 | /* hsw, bdw */ | 370 | /* hsw, bdw */ |
| 369 | uint32_t wrpll; | 371 | uint32_t wrpll; |
| 372 | uint32_t spll; | ||
| 370 | 373 | ||
| 371 | /* skl */ | 374 | /* skl */ |
| 372 | /* | 375 | /* |
| @@ -2648,6 +2651,7 @@ struct i915_params { | |||
| 2648 | int enable_cmd_parser; | 2651 | int enable_cmd_parser; |
| 2649 | /* leave bools at the end to not create holes */ | 2652 | /* leave bools at the end to not create holes */ |
| 2650 | bool enable_hangcheck; | 2653 | bool enable_hangcheck; |
| 2654 | bool fastboot; | ||
| 2651 | bool prefault_disable; | 2655 | bool prefault_disable; |
| 2652 | bool load_detect_test; | 2656 | bool load_detect_test; |
| 2653 | bool reset; | 2657 | bool reset; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5cf4a1998273..91bb1fc27420 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3809,6 +3809,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3809 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | 3809 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
| 3810 | struct drm_file *file) | 3810 | struct drm_file *file) |
| 3811 | { | 3811 | { |
| 3812 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3812 | struct drm_i915_gem_caching *args = data; | 3813 | struct drm_i915_gem_caching *args = data; |
| 3813 | struct drm_i915_gem_object *obj; | 3814 | struct drm_i915_gem_object *obj; |
| 3814 | enum i915_cache_level level; | 3815 | enum i915_cache_level level; |
| @@ -3837,9 +3838,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3837 | return -EINVAL; | 3838 | return -EINVAL; |
| 3838 | } | 3839 | } |
| 3839 | 3840 | ||
| 3841 | intel_runtime_pm_get(dev_priv); | ||
| 3842 | |||
| 3840 | ret = i915_mutex_lock_interruptible(dev); | 3843 | ret = i915_mutex_lock_interruptible(dev); |
| 3841 | if (ret) | 3844 | if (ret) |
| 3842 | return ret; | 3845 | goto rpm_put; |
| 3843 | 3846 | ||
| 3844 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3847 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
| 3845 | if (&obj->base == NULL) { | 3848 | if (&obj->base == NULL) { |
| @@ -3852,6 +3855,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3852 | drm_gem_object_unreference(&obj->base); | 3855 | drm_gem_object_unreference(&obj->base); |
| 3853 | unlock: | 3856 | unlock: |
| 3854 | mutex_unlock(&dev->struct_mutex); | 3857 | mutex_unlock(&dev->struct_mutex); |
| 3858 | rpm_put: | ||
| 3859 | intel_runtime_pm_put(dev_priv); | ||
| 3860 | |||
| 3855 | return ret; | 3861 | return ret; |
| 3856 | } | 3862 | } |
| 3857 | 3863 | ||
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 96bb23865eac..4be13a5eb932 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
| @@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = { | |||
| 40 | .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), | 40 | .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), |
| 41 | .disable_power_well = -1, | 41 | .disable_power_well = -1, |
| 42 | .enable_ips = 1, | 42 | .enable_ips = 1, |
| 43 | .fastboot = 0, | ||
| 43 | .prefault_disable = 0, | 44 | .prefault_disable = 0, |
| 44 | .load_detect_test = 0, | 45 | .load_detect_test = 0, |
| 45 | .reset = true, | 46 | .reset = true, |
| @@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well, | |||
| 133 | module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); | 134 | module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); |
| 134 | MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); | 135 | MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); |
| 135 | 136 | ||
| 137 | module_param_named(fastboot, i915.fastboot, bool, 0600); | ||
| 138 | MODULE_PARM_DESC(fastboot, | ||
| 139 | "Try to skip unnecessary mode sets at boot time (default: false)"); | ||
| 140 | |||
| 136 | module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); | 141 | module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); |
| 137 | MODULE_PARM_DESC(prefault_disable, | 142 | MODULE_PARM_DESC(prefault_disable, |
| 138 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " | 143 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b84aaa0bb48a..6a2c76e367a5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, | |||
| 138 | pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); | 138 | pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static void hsw_crt_pre_enable(struct intel_encoder *encoder) | ||
| 142 | { | ||
| 143 | struct drm_device *dev = encoder->base.dev; | ||
| 144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 145 | |||
| 146 | WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); | ||
| 147 | I915_WRITE(SPLL_CTL, | ||
| 148 | SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); | ||
| 149 | POSTING_READ(SPLL_CTL); | ||
| 150 | udelay(20); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* Note: The caller is required to filter out dpms modes not supported by the | 141 | /* Note: The caller is required to filter out dpms modes not supported by the |
| 154 | * platform. */ | 142 | * platform. */ |
| 155 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | 143 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
| @@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder) | |||
| 216 | intel_disable_crt(encoder); | 204 | intel_disable_crt(encoder); |
| 217 | } | 205 | } |
| 218 | 206 | ||
| 219 | static void hsw_crt_post_disable(struct intel_encoder *encoder) | ||
| 220 | { | ||
| 221 | struct drm_device *dev = encoder->base.dev; | ||
| 222 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 223 | uint32_t val; | ||
| 224 | |||
| 225 | DRM_DEBUG_KMS("Disabling SPLL\n"); | ||
| 226 | val = I915_READ(SPLL_CTL); | ||
| 227 | WARN_ON(!(val & SPLL_PLL_ENABLE)); | ||
| 228 | I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); | ||
| 229 | POSTING_READ(SPLL_CTL); | ||
| 230 | } | ||
| 231 | |||
| 232 | static void intel_enable_crt(struct intel_encoder *encoder) | 207 | static void intel_enable_crt(struct intel_encoder *encoder) |
| 233 | { | 208 | { |
| 234 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | 209 | struct intel_crt *crt = intel_encoder_to_crt(encoder); |
| @@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, | |||
| 280 | if (HAS_DDI(dev)) { | 255 | if (HAS_DDI(dev)) { |
| 281 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; | 256 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; |
| 282 | pipe_config->port_clock = 135000 * 2; | 257 | pipe_config->port_clock = 135000 * 2; |
| 258 | |||
| 259 | pipe_config->dpll_hw_state.wrpll = 0; | ||
| 260 | pipe_config->dpll_hw_state.spll = | ||
| 261 | SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; | ||
| 283 | } | 262 | } |
| 284 | 263 | ||
| 285 | return true; | 264 | return true; |
| @@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev) | |||
| 860 | if (HAS_DDI(dev)) { | 839 | if (HAS_DDI(dev)) { |
| 861 | crt->base.get_config = hsw_crt_get_config; | 840 | crt->base.get_config = hsw_crt_get_config; |
| 862 | crt->base.get_hw_state = intel_ddi_get_hw_state; | 841 | crt->base.get_hw_state = intel_ddi_get_hw_state; |
| 863 | crt->base.pre_enable = hsw_crt_pre_enable; | ||
| 864 | crt->base.post_disable = hsw_crt_post_disable; | ||
| 865 | } else { | 842 | } else { |
| 866 | crt->base.get_config = intel_crt_get_config; | 843 | crt->base.get_config = intel_crt_get_config; |
| 867 | crt->base.get_hw_state = intel_crt_get_hw_state; | 844 | crt->base.get_hw_state = intel_crt_get_hw_state; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b25e99a432fb..a6752a61d99f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1286 | } | 1286 | } |
| 1287 | 1287 | ||
| 1288 | crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); | 1288 | crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); |
| 1289 | } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) { | ||
| 1290 | struct drm_atomic_state *state = crtc_state->base.state; | ||
| 1291 | struct intel_shared_dpll_config *spll = | ||
| 1292 | &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL]; | ||
| 1293 | |||
| 1294 | if (spll->crtc_mask && | ||
| 1295 | WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll)) | ||
| 1296 | return false; | ||
| 1297 | |||
| 1298 | crtc_state->shared_dpll = DPLL_ID_SPLL; | ||
| 1299 | spll->hw_state.spll = crtc_state->dpll_hw_state.spll; | ||
| 1300 | spll->crtc_mask |= 1 << intel_crtc->pipe; | ||
| 1289 | } | 1301 | } |
| 1290 | 1302 | ||
| 1291 | return true; | 1303 | return true; |
| @@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) | |||
| 2437 | } | 2449 | } |
| 2438 | } | 2450 | } |
| 2439 | 2451 | ||
| 2440 | static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, | 2452 | static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, |
| 2441 | struct intel_shared_dpll *pll) | 2453 | struct intel_shared_dpll *pll) |
| 2442 | { | 2454 | { |
| 2443 | I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); | 2455 | I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); |
| @@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
| 2445 | udelay(20); | 2457 | udelay(20); |
| 2446 | } | 2458 | } |
| 2447 | 2459 | ||
| 2448 | static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, | 2460 | static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, |
| 2449 | struct intel_shared_dpll *pll) | 2461 | struct intel_shared_dpll *pll) |
| 2450 | { | 2462 | { |
| 2463 | I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); | ||
| 2464 | POSTING_READ(SPLL_CTL); | ||
| 2465 | udelay(20); | ||
| 2466 | } | ||
| 2467 | |||
| 2468 | static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, | ||
| 2469 | struct intel_shared_dpll *pll) | ||
| 2470 | { | ||
| 2451 | uint32_t val; | 2471 | uint32_t val; |
| 2452 | 2472 | ||
| 2453 | val = I915_READ(WRPLL_CTL(pll->id)); | 2473 | val = I915_READ(WRPLL_CTL(pll->id)); |
| @@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, | |||
| 2455 | POSTING_READ(WRPLL_CTL(pll->id)); | 2475 | POSTING_READ(WRPLL_CTL(pll->id)); |
| 2456 | } | 2476 | } |
| 2457 | 2477 | ||
| 2458 | static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | 2478 | static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, |
| 2459 | struct intel_shared_dpll *pll, | 2479 | struct intel_shared_dpll *pll) |
| 2460 | struct intel_dpll_hw_state *hw_state) | 2480 | { |
| 2481 | uint32_t val; | ||
| 2482 | |||
| 2483 | val = I915_READ(SPLL_CTL); | ||
| 2484 | I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); | ||
| 2485 | POSTING_READ(SPLL_CTL); | ||
| 2486 | } | ||
| 2487 | |||
| 2488 | static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, | ||
| 2489 | struct intel_shared_dpll *pll, | ||
| 2490 | struct intel_dpll_hw_state *hw_state) | ||
| 2461 | { | 2491 | { |
| 2462 | uint32_t val; | 2492 | uint32_t val; |
| 2463 | 2493 | ||
| @@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
| 2470 | return val & WRPLL_PLL_ENABLE; | 2500 | return val & WRPLL_PLL_ENABLE; |
| 2471 | } | 2501 | } |
| 2472 | 2502 | ||
| 2503 | static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, | ||
| 2504 | struct intel_shared_dpll *pll, | ||
| 2505 | struct intel_dpll_hw_state *hw_state) | ||
| 2506 | { | ||
| 2507 | uint32_t val; | ||
| 2508 | |||
| 2509 | if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) | ||
| 2510 | return false; | ||
| 2511 | |||
| 2512 | val = I915_READ(SPLL_CTL); | ||
| 2513 | hw_state->spll = val; | ||
| 2514 | |||
| 2515 | return val & SPLL_PLL_ENABLE; | ||
| 2516 | } | ||
| 2517 | |||
| 2518 | |||
| 2473 | static const char * const hsw_ddi_pll_names[] = { | 2519 | static const char * const hsw_ddi_pll_names[] = { |
| 2474 | "WRPLL 1", | 2520 | "WRPLL 1", |
| 2475 | "WRPLL 2", | 2521 | "WRPLL 2", |
| 2522 | "SPLL" | ||
| 2476 | }; | 2523 | }; |
| 2477 | 2524 | ||
| 2478 | static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) | 2525 | static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) |
| 2479 | { | 2526 | { |
| 2480 | int i; | 2527 | int i; |
| 2481 | 2528 | ||
| 2482 | dev_priv->num_shared_dpll = 2; | 2529 | dev_priv->num_shared_dpll = 3; |
| 2483 | 2530 | ||
| 2484 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 2531 | for (i = 0; i < 2; i++) { |
| 2485 | dev_priv->shared_dplls[i].id = i; | 2532 | dev_priv->shared_dplls[i].id = i; |
| 2486 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; | 2533 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; |
| 2487 | dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; | 2534 | dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable; |
| 2488 | dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; | 2535 | dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable; |
| 2489 | dev_priv->shared_dplls[i].get_hw_state = | 2536 | dev_priv->shared_dplls[i].get_hw_state = |
| 2490 | hsw_ddi_pll_get_hw_state; | 2537 | hsw_ddi_wrpll_get_hw_state; |
| 2491 | } | 2538 | } |
| 2539 | |||
| 2540 | /* SPLL is special, but needs to be initialized anyway.. */ | ||
| 2541 | dev_priv->shared_dplls[i].id = i; | ||
| 2542 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; | ||
| 2543 | dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable; | ||
| 2544 | dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable; | ||
| 2545 | dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state; | ||
| 2546 | |||
| 2492 | } | 2547 | } |
| 2493 | 2548 | ||
| 2494 | static const char * const skl_ddi_pll_names[] = { | 2549 | static const char * const skl_ddi_pll_names[] = { |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f62ffc04c21d..71860f8680f9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2646 | return; | 2646 | return; |
| 2647 | 2647 | ||
| 2648 | valid_fb: | 2648 | valid_fb: |
| 2649 | plane_state->src_x = plane_state->src_y = 0; | 2649 | plane_state->src_x = 0; |
| 2650 | plane_state->src_y = 0; | ||
| 2650 | plane_state->src_w = fb->width << 16; | 2651 | plane_state->src_w = fb->width << 16; |
| 2651 | plane_state->src_h = fb->height << 16; | 2652 | plane_state->src_h = fb->height << 16; |
| 2652 | 2653 | ||
| 2653 | plane_state->crtc_x = plane_state->src_y = 0; | 2654 | plane_state->crtc_x = 0; |
| 2655 | plane_state->crtc_y = 0; | ||
| 2654 | plane_state->crtc_w = fb->width; | 2656 | plane_state->crtc_w = fb->width; |
| 2655 | plane_state->crtc_h = fb->height; | 2657 | plane_state->crtc_h = fb->height; |
| 2656 | 2658 | ||
| @@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4237 | struct intel_shared_dpll *pll; | 4239 | struct intel_shared_dpll *pll; |
| 4238 | struct intel_shared_dpll_config *shared_dpll; | 4240 | struct intel_shared_dpll_config *shared_dpll; |
| 4239 | enum intel_dpll_id i; | 4241 | enum intel_dpll_id i; |
| 4242 | int max = dev_priv->num_shared_dpll; | ||
| 4240 | 4243 | ||
| 4241 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); | 4244 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); |
| 4242 | 4245 | ||
| @@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4271 | WARN_ON(shared_dpll[i].crtc_mask); | 4274 | WARN_ON(shared_dpll[i].crtc_mask); |
| 4272 | 4275 | ||
| 4273 | goto found; | 4276 | goto found; |
| 4274 | } | 4277 | } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) |
| 4278 | /* Do not consider SPLL */ | ||
| 4279 | max = 2; | ||
| 4275 | 4280 | ||
| 4276 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 4281 | for (i = 0; i < max; i++) { |
| 4277 | pll = &dev_priv->shared_dplls[i]; | 4282 | pll = &dev_priv->shared_dplls[i]; |
| 4278 | 4283 | ||
| 4279 | /* Only want to check enabled timings first */ | 4284 | /* Only want to check enabled timings first */ |
| @@ -9723,6 +9728,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, | |||
| 9723 | case PORT_CLK_SEL_WRPLL2: | 9728 | case PORT_CLK_SEL_WRPLL2: |
| 9724 | pipe_config->shared_dpll = DPLL_ID_WRPLL2; | 9729 | pipe_config->shared_dpll = DPLL_ID_WRPLL2; |
| 9725 | break; | 9730 | break; |
| 9731 | case PORT_CLK_SEL_SPLL: | ||
| 9732 | pipe_config->shared_dpll = DPLL_ID_SPLL; | ||
| 9726 | } | 9733 | } |
| 9727 | } | 9734 | } |
| 9728 | 9735 | ||
| @@ -12003,9 +12010,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 12003 | pipe_config->dpll_hw_state.cfgcr1, | 12010 | pipe_config->dpll_hw_state.cfgcr1, |
| 12004 | pipe_config->dpll_hw_state.cfgcr2); | 12011 | pipe_config->dpll_hw_state.cfgcr2); |
| 12005 | } else if (HAS_DDI(dev)) { | 12012 | } else if (HAS_DDI(dev)) { |
| 12006 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", | 12013 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", |
| 12007 | pipe_config->ddi_pll_sel, | 12014 | pipe_config->ddi_pll_sel, |
| 12008 | pipe_config->dpll_hw_state.wrpll); | 12015 | pipe_config->dpll_hw_state.wrpll, |
| 12016 | pipe_config->dpll_hw_state.spll); | ||
| 12009 | } else { | 12017 | } else { |
| 12010 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " | 12018 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " |
| 12011 | "fp0: 0x%x, fp1: 0x%x\n", | 12019 | "fp0: 0x%x, fp1: 0x%x\n", |
| @@ -12528,6 +12536,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12528 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); | 12536 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); |
| 12529 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); | 12537 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); |
| 12530 | PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); | 12538 | PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); |
| 12539 | PIPE_CONF_CHECK_X(dpll_hw_state.spll); | ||
| 12531 | PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); | 12540 | PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); |
| 12532 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); | 12541 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); |
| 12533 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); | 12542 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); |
| @@ -13032,6 +13041,9 @@ static int intel_atomic_check(struct drm_device *dev, | |||
| 13032 | struct intel_crtc_state *pipe_config = | 13041 | struct intel_crtc_state *pipe_config = |
| 13033 | to_intel_crtc_state(crtc_state); | 13042 | to_intel_crtc_state(crtc_state); |
| 13034 | 13043 | ||
| 13044 | memset(&to_intel_crtc(crtc)->atomic, 0, | ||
| 13045 | sizeof(struct intel_crtc_atomic_commit)); | ||
| 13046 | |||
| 13035 | /* Catch I915_MODE_FLAG_INHERITED */ | 13047 | /* Catch I915_MODE_FLAG_INHERITED */ |
| 13036 | if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) | 13048 | if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) |
| 13037 | crtc_state->mode_changed = true; | 13049 | crtc_state->mode_changed = true; |
| @@ -13056,7 +13068,8 @@ static int intel_atomic_check(struct drm_device *dev, | |||
| 13056 | if (ret) | 13068 | if (ret) |
| 13057 | return ret; | 13069 | return ret; |
| 13058 | 13070 | ||
| 13059 | if (intel_pipe_config_compare(state->dev, | 13071 | if (i915.fastboot && |
| 13072 | intel_pipe_config_compare(state->dev, | ||
| 13060 | to_intel_crtc_state(crtc->state), | 13073 | to_intel_crtc_state(crtc->state), |
| 13061 | pipe_config, true)) { | 13074 | pipe_config, true)) { |
| 13062 | crtc_state->mode_changed = false; | 13075 | crtc_state->mode_changed = false; |
| @@ -14364,16 +14377,17 @@ static int intel_framebuffer_init(struct drm_device *dev, | |||
| 14364 | static struct drm_framebuffer * | 14377 | static struct drm_framebuffer * |
| 14365 | intel_user_framebuffer_create(struct drm_device *dev, | 14378 | intel_user_framebuffer_create(struct drm_device *dev, |
| 14366 | struct drm_file *filp, | 14379 | struct drm_file *filp, |
| 14367 | struct drm_mode_fb_cmd2 *mode_cmd) | 14380 | struct drm_mode_fb_cmd2 *user_mode_cmd) |
| 14368 | { | 14381 | { |
| 14369 | struct drm_i915_gem_object *obj; | 14382 | struct drm_i915_gem_object *obj; |
| 14383 | struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; | ||
| 14370 | 14384 | ||
| 14371 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, | 14385 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, |
| 14372 | mode_cmd->handles[0])); | 14386 | mode_cmd.handles[0])); |
| 14373 | if (&obj->base == NULL) | 14387 | if (&obj->base == NULL) |
| 14374 | return ERR_PTR(-ENOENT); | 14388 | return ERR_PTR(-ENOENT); |
| 14375 | 14389 | ||
| 14376 | return intel_framebuffer_create(dev, mode_cmd, obj); | 14390 | return intel_framebuffer_create(dev, &mode_cmd, obj); |
| 14377 | } | 14391 | } |
| 14378 | 14392 | ||
| 14379 | #ifndef CONFIG_DRM_FBDEV_EMULATION | 14393 | #ifndef CONFIG_DRM_FBDEV_EMULATION |
| @@ -14705,6 +14719,9 @@ static struct intel_quirk intel_quirks[] = { | |||
| 14705 | /* Apple Macbook 2,1 (Core 2 T7400) */ | 14719 | /* Apple Macbook 2,1 (Core 2 T7400) */ |
| 14706 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, | 14720 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, |
| 14707 | 14721 | ||
| 14722 | /* Apple Macbook 4,1 */ | ||
| 14723 | { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, | ||
| 14724 | |||
| 14708 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ | 14725 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ |
| 14709 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, | 14726 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, |
| 14710 | 14727 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d52a15df6917..071a76b9ac52 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) | |||
| 4449 | POSTING_READ(GEN6_RPNSWREQ); | 4449 | POSTING_READ(GEN6_RPNSWREQ); |
| 4450 | 4450 | ||
| 4451 | dev_priv->rps.cur_freq = val; | 4451 | dev_priv->rps.cur_freq = val; |
| 4452 | trace_intel_gpu_freq_change(val * 50); | 4452 | trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); |
| 4453 | } | 4453 | } |
| 4454 | 4454 | ||
| 4455 | static void valleyview_set_rps(struct drm_device *dev, u8 val) | 4455 | static void valleyview_set_rps(struct drm_device *dev, u8 val) |
| @@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
| 7255 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | 7255 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) |
| 7256 | { | 7256 | { |
| 7257 | if (IS_GEN9(dev_priv->dev)) | 7257 | if (IS_GEN9(dev_priv->dev)) |
| 7258 | return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; | 7258 | return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, |
| 7259 | GEN9_FREQ_SCALER); | ||
| 7259 | else if (IS_CHERRYVIEW(dev_priv->dev)) | 7260 | else if (IS_CHERRYVIEW(dev_priv->dev)) |
| 7260 | return chv_gpu_freq(dev_priv, val); | 7261 | return chv_gpu_freq(dev_priv, val); |
| 7261 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 7262 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
| @@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | |||
| 7267 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) | 7268 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) |
| 7268 | { | 7269 | { |
| 7269 | if (IS_GEN9(dev_priv->dev)) | 7270 | if (IS_GEN9(dev_priv->dev)) |
| 7270 | return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; | 7271 | return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, |
| 7272 | GT_FREQUENCY_MULTIPLIER); | ||
| 7271 | else if (IS_CHERRYVIEW(dev_priv->dev)) | 7273 | else if (IS_CHERRYVIEW(dev_priv->dev)) |
| 7272 | return chv_freq_opcode(dev_priv, val); | 7274 | return chv_freq_opcode(dev_priv, val); |
| 7273 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 7275 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
| 7274 | return byt_freq_opcode(dev_priv, val); | 7276 | return byt_freq_opcode(dev_priv, val); |
| 7275 | else | 7277 | else |
| 7276 | return val / GT_FREQUENCY_MULTIPLIER; | 7278 | return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); |
| 7277 | } | 7279 | } |
| 7278 | 7280 | ||
| 7279 | struct request_boost { | 7281 | struct request_boost { |
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 4f2068fe5d88..a7bf6a90eae5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c | |||
| @@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 70 | BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); | 70 | BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); |
| 71 | BUG_ON(pixels_current == pixels_prev); | 71 | BUG_ON(pixels_current == pixels_prev); |
| 72 | 72 | ||
| 73 | if (!handle || !file_priv) { | ||
| 74 | mga_hide_cursor(mdev); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 73 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 78 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
| 74 | if (!obj) | 79 | if (!obj) |
| 75 | return -ENOENT; | 80 | return -ENOENT; |
| @@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 88 | goto out_unreserve1; | 93 | goto out_unreserve1; |
| 89 | } | 94 | } |
| 90 | 95 | ||
| 91 | if (!handle) { | ||
| 92 | mga_hide_cursor(mdev); | ||
| 93 | ret = 0; | ||
| 94 | goto out1; | ||
| 95 | } | ||
| 96 | |||
| 97 | /* Move cursor buffers into VRAM if they aren't already */ | 96 | /* Move cursor buffers into VRAM if they aren't already */ |
| 98 | if (!pixels_1->pin_count) { | 97 | if (!pixels_1->pin_count) { |
| 99 | ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, | 98 | ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 28bc202f9753..40f845e31272 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h | |||
| @@ -7,6 +7,7 @@ struct nvkm_instmem { | |||
| 7 | const struct nvkm_instmem_func *func; | 7 | const struct nvkm_instmem_func *func; |
| 8 | struct nvkm_subdev subdev; | 8 | struct nvkm_subdev subdev; |
| 9 | 9 | ||
| 10 | spinlock_t lock; | ||
| 10 | struct list_head list; | 11 | struct list_head list; |
| 11 | u32 reserved; | 12 | u32 reserved; |
| 12 | 13 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8b8332e46f24..d5e6938cc6bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | |||
| 367 | return -ENODEV; | 367 | return -ENODEV; |
| 368 | } | 368 | } |
| 369 | obj = (union acpi_object *)buffer.pointer; | 369 | obj = (union acpi_object *)buffer.pointer; |
| 370 | len = min(len, (int)obj->buffer.length); | ||
| 370 | memcpy(bios+offset, obj->buffer.pointer, len); | 371 | memcpy(bios+offset, obj->buffer.pointer, len); |
| 371 | kfree(buffer.pointer); | 372 | kfree(buffer.pointer); |
| 372 | return len; | 373 | return len; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 3050042e6c6d..a02813e994ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | 39 | ||
| 40 | #include <nvif/client.h> | 40 | #include <nvif/client.h> |
| 41 | #include <nvif/device.h> | 41 | #include <nvif/device.h> |
| 42 | #include <nvif/ioctl.h> | ||
| 42 | 43 | ||
| 43 | #include <drmP.h> | 44 | #include <drmP.h> |
| 44 | 45 | ||
| @@ -65,9 +66,10 @@ struct nouveau_drm_tile { | |||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | enum nouveau_drm_object_route { | 68 | enum nouveau_drm_object_route { |
| 68 | NVDRM_OBJECT_NVIF = 0, | 69 | NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF, |
| 69 | NVDRM_OBJECT_USIF, | 70 | NVDRM_OBJECT_USIF, |
| 70 | NVDRM_OBJECT_ABI16, | 71 | NVDRM_OBJECT_ABI16, |
| 72 | NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY, | ||
| 71 | }; | 73 | }; |
| 72 | 74 | ||
| 73 | enum nouveau_drm_notify_route { | 75 | enum nouveau_drm_notify_route { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 89dc4ce63490..6ae1b3494bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | |||
| @@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | |||
| 313 | if (nvif_unpack(argv->v0, 0, 0, true)) { | 313 | if (nvif_unpack(argv->v0, 0, 0, true)) { |
| 314 | /* block access to objects not created via this interface */ | 314 | /* block access to objects not created via this interface */ |
| 315 | owner = argv->v0.owner; | 315 | owner = argv->v0.owner; |
| 316 | argv->v0.owner = NVDRM_OBJECT_USIF; | 316 | if (argv->v0.object == 0ULL) |
| 317 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ | ||
| 318 | else | ||
| 319 | argv->v0.owner = NVDRM_OBJECT_USIF; | ||
| 317 | } else | 320 | } else |
| 318 | goto done; | 321 | goto done; |
| 319 | 322 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index e3c783d0e2ab..caf22b589edc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
| @@ -279,6 +279,12 @@ nvkm_device_pci_10de_0fe3[] = { | |||
| 279 | }; | 279 | }; |
| 280 | 280 | ||
| 281 | static const struct nvkm_device_pci_vendor | 281 | static const struct nvkm_device_pci_vendor |
| 282 | nvkm_device_pci_10de_0fe4[] = { | ||
| 283 | { 0x144d, 0xc740, NULL, { .War00C800_0 = true } }, | ||
| 284 | {} | ||
| 285 | }; | ||
| 286 | |||
| 287 | static const struct nvkm_device_pci_vendor | ||
| 282 | nvkm_device_pci_10de_104b[] = { | 288 | nvkm_device_pci_10de_104b[] = { |
| 283 | { 0x1043, 0x844c, "GeForce GT 625" }, | 289 | { 0x1043, 0x844c, "GeForce GT 625" }, |
| 284 | { 0x1043, 0x846b, "GeForce GT 625" }, | 290 | { 0x1043, 0x846b, "GeForce GT 625" }, |
| @@ -689,6 +695,12 @@ nvkm_device_pci_10de_1199[] = { | |||
| 689 | }; | 695 | }; |
| 690 | 696 | ||
| 691 | static const struct nvkm_device_pci_vendor | 697 | static const struct nvkm_device_pci_vendor |
| 698 | nvkm_device_pci_10de_11e0[] = { | ||
| 699 | { 0x1558, 0x5106, NULL, { .War00C800_0 = true } }, | ||
| 700 | {} | ||
| 701 | }; | ||
| 702 | |||
| 703 | static const struct nvkm_device_pci_vendor | ||
| 692 | nvkm_device_pci_10de_11e3[] = { | 704 | nvkm_device_pci_10de_11e3[] = { |
| 693 | { 0x17aa, 0x3683, "GeForce GTX 760A" }, | 705 | { 0x17aa, 0x3683, "GeForce GTX 760A" }, |
| 694 | {} | 706 | {} |
| @@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = { | |||
| 1370 | { 0x0fe1, "GeForce GT 730M" }, | 1382 | { 0x0fe1, "GeForce GT 730M" }, |
| 1371 | { 0x0fe2, "GeForce GT 745M" }, | 1383 | { 0x0fe2, "GeForce GT 745M" }, |
| 1372 | { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, | 1384 | { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, |
| 1373 | { 0x0fe4, "GeForce GT 750M" }, | 1385 | { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, |
| 1374 | { 0x0fe9, "GeForce GT 750M" }, | 1386 | { 0x0fe9, "GeForce GT 750M" }, |
| 1375 | { 0x0fea, "GeForce GT 755M" }, | 1387 | { 0x0fea, "GeForce GT 755M" }, |
| 1376 | { 0x0fec, "GeForce 710A" }, | 1388 | { 0x0fec, "GeForce 710A" }, |
| @@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = { | |||
| 1485 | { 0x11c6, "GeForce GTX 650 Ti" }, | 1497 | { 0x11c6, "GeForce GTX 650 Ti" }, |
| 1486 | { 0x11c8, "GeForce GTX 650" }, | 1498 | { 0x11c8, "GeForce GTX 650" }, |
| 1487 | { 0x11cb, "GeForce GT 740" }, | 1499 | { 0x11cb, "GeForce GT 740" }, |
| 1488 | { 0x11e0, "GeForce GTX 770M" }, | 1500 | { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, |
| 1489 | { 0x11e1, "GeForce GTX 765M" }, | 1501 | { 0x11e1, "GeForce GTX 765M" }, |
| 1490 | { 0x11e2, "GeForce GTX 765M" }, | 1502 | { 0x11e2, "GeForce GTX 765M" }, |
| 1491 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, | 1503 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index b5b875928aba..74de7a96c22a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | |||
| @@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) | |||
| 207 | const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; | 207 | const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; |
| 208 | const u32 t = timeslice_mode; | 208 | const u32 t = timeslice_mode; |
| 209 | const u32 o = PPC_UNIT(gpc, ppc, 0); | 209 | const u32 o = PPC_UNIT(gpc, ppc, 0); |
| 210 | if (!(gr->ppc_mask[gpc] & (1 << ppc))) | ||
| 211 | continue; | ||
| 210 | mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); | 212 | mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); |
| 211 | mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); | 213 | mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); |
| 212 | bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; | 214 | bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc index 194afe910d21..7dacb3cc0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc | |||
| @@ -52,10 +52,12 @@ mmio_list_base: | |||
| 52 | #endif | 52 | #endif |
| 53 | 53 | ||
| 54 | #ifdef INCLUDE_CODE | 54 | #ifdef INCLUDE_CODE |
| 55 | #define gpc_addr(reg,addr) /* | ||
| 56 | */ imm32(reg,addr) /* | ||
| 57 | */ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE | ||
| 55 | #define gpc_wr32(addr,reg) /* | 58 | #define gpc_wr32(addr,reg) /* |
| 59 | */ gpc_addr($r14,addr) /* | ||
| 56 | */ mov b32 $r15 reg /* | 60 | */ mov b32 $r15 reg /* |
| 57 | */ imm32($r14, addr) /* | ||
| 58 | */ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /* | ||
| 59 | */ call(nv_wr32) | 61 | */ call(nv_wr32) |
| 60 | 62 | ||
| 61 | // reports an exception to the host | 63 | // reports an exception to the host |
| @@ -161,7 +163,7 @@ init: | |||
| 161 | 163 | ||
| 162 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 | 164 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 |
| 163 | // figure out which, and how many, UNKs are actually present | 165 | // figure out which, and how many, UNKs are actually present |
| 164 | imm32($r14, 0x500c30) | 166 | gpc_addr($r14, 0x500c30) |
| 165 | clear b32 $r2 | 167 | clear b32 $r2 |
| 166 | clear b32 $r3 | 168 | clear b32 $r3 |
| 167 | clear b32 $r4 | 169 | clear b32 $r4 |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h index 64d07df4b8b1..bb820ff28621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h | |||
| @@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { | |||
| 314 | 0x03f01200, | 314 | 0x03f01200, |
| 315 | 0x0002d000, | 315 | 0x0002d000, |
| 316 | 0x17f104bd, | 316 | 0x17f104bd, |
| 317 | 0x10fe0542, | 317 | 0x10fe0545, |
| 318 | 0x0007f100, | 318 | 0x0007f100, |
| 319 | 0x0003f007, | 319 | 0x0003f007, |
| 320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
| @@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { | |||
| 338 | 0x02d00103, | 338 | 0x02d00103, |
| 339 | 0xf104bd00, | 339 | 0xf104bd00, |
| 340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
| 341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
| 342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
| 343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
| 344 | 0xb06821f4, | 344 | 0xf444bd34, |
| 345 | 0x0bf400f6, | 345 | 0xf6b06821, |
| 346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
| 347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
| 348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
| 349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
| 350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
| 351 | 0xb004e0b6, | 351 | 0xb60120b6, |
| 352 | 0x1bf40126, | 352 | 0x26b004e0, |
| 353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf401, |
| 354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
| 355 | 0xf1080480, | 355 | 0x80070380, |
| 356 | 0xf0010027, | 356 | 0x27f10804, |
| 357 | 0x22cf0223, | 357 | 0x23f00100, |
| 358 | 0x9534bd00, | 358 | 0x0022cf02, |
| 359 | 0x07f10825, | 359 | 0x259534bd, |
| 360 | 0x03f0c000, | 360 | 0x0007f108, |
| 361 | 0x0005d001, | 361 | 0x0103f0c0, |
| 362 | 0x07f104bd, | 362 | 0xbd0005d0, |
| 363 | 0x03f0c100, | 363 | 0x0007f104, |
| 364 | 0x0005d001, | 364 | 0x0103f0c1, |
| 365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
| 366 | 0x010f9800, | 366 | 0x000e9804, |
| 367 | 0x015021f5, | 367 | 0xf5010f98, |
| 368 | 0xbb002fbb, | 368 | 0xbb015021, |
| 369 | 0x0e98003f, | 369 | 0x3fbb002f, |
| 370 | 0x020f9801, | 370 | 0x010e9800, |
| 371 | 0x015021f5, | 371 | 0xf5020f98, |
| 372 | 0xfd050e98, | 372 | 0x98015021, |
| 373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
| 374 | 0x003ebb00, | 374 | 0x002ebb00, |
| 375 | 0x98020e98, | 375 | 0x98003ebb, |
| 376 | 0x21f5030f, | 376 | 0x0f98020e, |
| 377 | 0x0e980150, | 377 | 0x5021f503, |
| 378 | 0x00effd07, | 378 | 0x070e9801, |
| 379 | 0xbb002ebb, | 379 | 0xbb00effd, |
| 380 | 0x35b6003e, | 380 | 0x3ebb002e, |
| 381 | 0x0007f102, | 381 | 0x0235b600, |
| 382 | 0x0103f0d3, | 382 | 0xd30007f1, |
| 383 | 0xbd0003d0, | 383 | 0xd00103f0, |
| 384 | 0x0825b604, | ||
| 385 | 0xb60635b6, | ||
| 386 | 0x30b60120, | ||
| 387 | 0x0824b601, | ||
| 388 | 0xb90834b6, | ||
| 389 | 0x21f5022f, | ||
| 390 | 0x2fbb02d3, | ||
| 391 | 0x003fbb00, | ||
| 392 | 0x010007f1, | ||
| 393 | 0xd00203f0, | ||
| 394 | 0x04bd0003, | 384 | 0x04bd0003, |
| 395 | 0x29f024bd, | 385 | 0xb60825b6, |
| 396 | 0x0007f11f, | 386 | 0x20b60635, |
| 397 | 0x0203f008, | 387 | 0x0130b601, |
| 398 | 0xbd0002d0, | 388 | 0xb60824b6, |
| 399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
| 400 | 0x0031f404, | 390 | 0xd321f502, |
| 401 | 0xf00028f4, | 391 | 0x002fbb02, |
| 402 | 0x21f424d7, | 392 | 0xf1003fbb, |
| 403 | 0xf401f439, | 393 | 0xf0010007, |
| 404 | 0xf404e4b0, | 394 | 0x03d00203, |
| 405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
| 406 | 0x0627f001, | 396 | 0x1f29f024, |
| 407 | 0x12fd20bd, | 397 | 0x080007f1, |
| 408 | 0x01e4b604, | 398 | 0xd00203f0, |
| 409 | 0xfe051efd, | 399 | 0x04bd0002, |
| 410 | 0x21f50018, | 400 | /* 0x0508: main */ |
| 411 | 0x0ef405fa, | 401 | 0xf40031f4, |
| 412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
| 413 | 0x10ef94d3, | 403 | 0x3921f424, |
| 414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
| 415 | 0xf4037e21, | 405 | 0x18f404e4, |
| 416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
| 417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
| 418 | 0xf90188fe, | 408 | 0x0412fd20, |
| 419 | 0xf990f980, | 409 | 0xfd01e4b6, |
| 420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
| 421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
| 422 | 0xf104bdf0, | 412 | 0xd30ef405, |
| 423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
| 424 | 0xaacf00a3, | 414 | 0xf010ef94, |
| 425 | 0x04abc400, | 415 | 0x21f501f5, |
| 426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
| 427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
| 428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
| 429 | 0x00eecf00, | 419 | 0x80f90188, |
| 430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
| 431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
| 432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
| 433 | 0x01e7f004, | 423 | 0xa7f104bd, |
| 434 | 0x1d0007f1, | 424 | 0xa3f00200, |
| 435 | 0xd00003f0, | 425 | 0x00aacf00, |
| 436 | 0x04bd000e, | 426 | 0xf404abc4, |
| 437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
| 438 | 0x010007f1, | 428 | 0x00e7f124, |
| 439 | 0xd00003f0, | 429 | 0x00e3f01a, |
| 440 | 0x04bd000a, | 430 | 0xf100eecf, |
| 441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
| 442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
| 443 | 0x90fca0fc, | 433 | 0x0421f400, |
| 444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
| 445 | 0xf480fc00, | 435 | 0xf01d0007, |
| 446 | 0x01f80032, | 436 | 0x0ed00003, |
| 447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
| 448 | 0x9801f7f0, | 438 | 0xf104bd00, |
| 449 | 0xfebb040e, | 439 | 0xf0010007, |
| 450 | 0x02ffb904, | 440 | 0x0ad00003, |
| 451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
| 452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
| 453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
| 454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
| 455 | 0xf120f7f0, | 445 | 0x0088fe80, |
| 446 | 0x32f480fc, | ||
| 447 | /* 0x05b7: hub_barrier_done */ | ||
| 448 | 0xf001f800, | ||
| 449 | 0x0e9801f7, | ||
| 450 | 0x04febb04, | ||
| 451 | 0xf102ffb9, | ||
| 452 | 0xf09418e7, | ||
| 453 | 0x21f440e3, | ||
| 454 | /* 0x05cf: ctx_redswitch */ | ||
| 455 | 0xf000f89d, | ||
| 456 | 0x07f120f7, | ||
| 457 | 0x03f08500, | ||
| 458 | 0x000fd001, | ||
| 459 | 0xe7f004bd, | ||
| 460 | /* 0x05e1: ctx_redswitch_delay */ | ||
| 461 | 0x01e2b608, | ||
| 462 | 0xf1fd1bf4, | ||
| 463 | 0xf10800f5, | ||
| 464 | 0xf10200f5, | ||
| 456 | 0xf0850007, | 465 | 0xf0850007, |
| 457 | 0x0fd00103, | 466 | 0x0fd00103, |
| 458 | 0xf004bd00, | 467 | 0xf804bd00, |
| 459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
| 460 | 0xe2b608e7, | 469 | 0x0007f100, |
| 461 | 0xfd1bf401, | 470 | 0x0203f081, |
| 462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
| 463 | 0x0200f5f1, | 472 | 0x0711f404, |
| 464 | 0x850007f1, | 473 | 0x05cf21f5, |
| 465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
| 466 | 0x04bd000f, | 475 | 0x026a21f5, |
| 467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
| 468 | 0x07f100f8, | 477 | 0x03f047fc, |
| 469 | 0x03f08100, | 478 | 0x0002d002, |
| 470 | 0x000fd002, | 479 | 0x2cf004bd, |
| 471 | 0x11f404bd, | 480 | 0x0320b601, |
| 472 | 0xcc21f507, | 481 | 0x4afc07f1, |
| 473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
| 474 | 0x6a21f505, | 483 | 0x04bd0002, |
| 475 | 0xf124bd02, | ||
| 476 | 0xf047fc07, | ||
| 477 | 0x02d00203, | ||
| 478 | 0xf004bd00, | ||
| 479 | 0x20b6012c, | ||
| 480 | 0xfc07f103, | ||
| 481 | 0x0203f04a, | ||
| 482 | 0xbd0002d0, | ||
| 483 | 0x01acf004, | ||
| 484 | 0xf102a5f0, | ||
| 485 | 0xf00000b7, | ||
| 486 | 0x0c9850b3, | ||
| 487 | 0x0fc4b604, | ||
| 488 | 0x9800bcbb, | ||
| 489 | 0x0d98000c, | ||
| 490 | 0x00e7f001, | ||
| 491 | 0x016f21f5, | ||
| 492 | 0xf101acf0, | ||
| 493 | 0xf04000b7, | ||
| 494 | 0x0c9850b3, | ||
| 495 | 0x0fc4b604, | ||
| 496 | 0x9800bcbb, | ||
| 497 | 0x0d98010c, | ||
| 498 | 0x060f9802, | ||
| 499 | 0x0800e7f1, | ||
| 500 | 0x016f21f5, | ||
| 501 | 0xf001acf0, | 484 | 0xf001acf0, |
| 502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
| 503 | 0xb3f03000, | 486 | 0xb3f00000, |
| 504 | 0x040c9850, | 487 | 0x040c9850, |
| 505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
| 506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
| 507 | 0x030d9802, | 490 | 0x010d9800, |
| 508 | 0xf1080f98, | 491 | 0xf500e7f0, |
| 509 | 0xf50200e7, | 492 | 0xf0016f21, |
| 510 | 0xf5016f21, | 493 | 0xb7f101ac, |
| 511 | 0xf4025e21, | 494 | 0xb3f04000, |
| 512 | 0x12f40601, | 495 | 0x040c9850, |
| 513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
| 514 | 0x7f21f507, | 497 | 0x0c9800bc, |
| 515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
| 516 | 0xb421f502, | 499 | 0xf1060f98, |
| 517 | 0x0000f805, | 500 | 0xf50800e7, |
| 518 | 0x00000000, | 501 | 0xf0016f21, |
| 502 | 0xa5f001ac, | ||
| 503 | 0x00b7f104, | ||
| 504 | 0x50b3f030, | ||
| 505 | 0xb6040c98, | ||
| 506 | 0xbcbb0fc4, | ||
| 507 | 0x020c9800, | ||
| 508 | 0x98030d98, | ||
| 509 | 0xe7f1080f, | ||
| 510 | 0x21f50200, | ||
| 511 | 0x21f5016f, | ||
| 512 | 0x01f4025e, | ||
| 513 | 0x0712f406, | ||
| 514 | /* 0x06ac: ctx_xfer_post */ | ||
| 515 | 0x027f21f5, | ||
| 516 | /* 0x06b0: ctx_xfer_done */ | ||
| 517 | 0x05b721f5, | ||
| 518 | 0x000000f8, | ||
| 519 | 0x00000000, | 519 | 0x00000000, |
| 520 | 0x00000000, | 520 | 0x00000000, |
| 521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h index 2f596433c222..911976d20940 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h | |||
| @@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { | |||
| 314 | 0x03f01200, | 314 | 0x03f01200, |
| 315 | 0x0002d000, | 315 | 0x0002d000, |
| 316 | 0x17f104bd, | 316 | 0x17f104bd, |
| 317 | 0x10fe0542, | 317 | 0x10fe0545, |
| 318 | 0x0007f100, | 318 | 0x0007f100, |
| 319 | 0x0003f007, | 319 | 0x0003f007, |
| 320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
| @@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { | |||
| 338 | 0x02d00103, | 338 | 0x02d00103, |
| 339 | 0xf104bd00, | 339 | 0xf104bd00, |
| 340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
| 341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
| 342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
| 343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
| 344 | 0xb06821f4, | 344 | 0xf444bd34, |
| 345 | 0x0bf400f6, | 345 | 0xf6b06821, |
| 346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
| 347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
| 348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
| 349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
| 350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
| 351 | 0xb004e0b6, | 351 | 0xb60120b6, |
| 352 | 0x1bf40126, | 352 | 0x26b004e0, |
| 353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf401, |
| 354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
| 355 | 0xf1080480, | 355 | 0x80070380, |
| 356 | 0xf0010027, | 356 | 0x27f10804, |
| 357 | 0x22cf0223, | 357 | 0x23f00100, |
| 358 | 0x9534bd00, | 358 | 0x0022cf02, |
| 359 | 0x07f10825, | 359 | 0x259534bd, |
| 360 | 0x03f0c000, | 360 | 0x0007f108, |
| 361 | 0x0005d001, | 361 | 0x0103f0c0, |
| 362 | 0x07f104bd, | 362 | 0xbd0005d0, |
| 363 | 0x03f0c100, | 363 | 0x0007f104, |
| 364 | 0x0005d001, | 364 | 0x0103f0c1, |
| 365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
| 366 | 0x010f9800, | 366 | 0x000e9804, |
| 367 | 0x015021f5, | 367 | 0xf5010f98, |
| 368 | 0xbb002fbb, | 368 | 0xbb015021, |
| 369 | 0x0e98003f, | 369 | 0x3fbb002f, |
| 370 | 0x020f9801, | 370 | 0x010e9800, |
| 371 | 0x015021f5, | 371 | 0xf5020f98, |
| 372 | 0xfd050e98, | 372 | 0x98015021, |
| 373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
| 374 | 0x003ebb00, | 374 | 0x002ebb00, |
| 375 | 0x98020e98, | 375 | 0x98003ebb, |
| 376 | 0x21f5030f, | 376 | 0x0f98020e, |
| 377 | 0x0e980150, | 377 | 0x5021f503, |
| 378 | 0x00effd07, | 378 | 0x070e9801, |
| 379 | 0xbb002ebb, | 379 | 0xbb00effd, |
| 380 | 0x35b6003e, | 380 | 0x3ebb002e, |
| 381 | 0x0007f102, | 381 | 0x0235b600, |
| 382 | 0x0103f0d3, | 382 | 0xd30007f1, |
| 383 | 0xbd0003d0, | 383 | 0xd00103f0, |
| 384 | 0x0825b604, | ||
| 385 | 0xb60635b6, | ||
| 386 | 0x30b60120, | ||
| 387 | 0x0824b601, | ||
| 388 | 0xb90834b6, | ||
| 389 | 0x21f5022f, | ||
| 390 | 0x2fbb02d3, | ||
| 391 | 0x003fbb00, | ||
| 392 | 0x010007f1, | ||
| 393 | 0xd00203f0, | ||
| 394 | 0x04bd0003, | 384 | 0x04bd0003, |
| 395 | 0x29f024bd, | 385 | 0xb60825b6, |
| 396 | 0x0007f11f, | 386 | 0x20b60635, |
| 397 | 0x0203f008, | 387 | 0x0130b601, |
| 398 | 0xbd0002d0, | 388 | 0xb60824b6, |
| 399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
| 400 | 0x0031f404, | 390 | 0xd321f502, |
| 401 | 0xf00028f4, | 391 | 0x002fbb02, |
| 402 | 0x21f424d7, | 392 | 0xf1003fbb, |
| 403 | 0xf401f439, | 393 | 0xf0010007, |
| 404 | 0xf404e4b0, | 394 | 0x03d00203, |
| 405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
| 406 | 0x0627f001, | 396 | 0x1f29f024, |
| 407 | 0x12fd20bd, | 397 | 0x080007f1, |
| 408 | 0x01e4b604, | 398 | 0xd00203f0, |
| 409 | 0xfe051efd, | 399 | 0x04bd0002, |
| 410 | 0x21f50018, | 400 | /* 0x0508: main */ |
| 411 | 0x0ef405fa, | 401 | 0xf40031f4, |
| 412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
| 413 | 0x10ef94d3, | 403 | 0x3921f424, |
| 414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
| 415 | 0xf4037e21, | 405 | 0x18f404e4, |
| 416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
| 417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
| 418 | 0xf90188fe, | 408 | 0x0412fd20, |
| 419 | 0xf990f980, | 409 | 0xfd01e4b6, |
| 420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
| 421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
| 422 | 0xf104bdf0, | 412 | 0xd30ef405, |
| 423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
| 424 | 0xaacf00a3, | 414 | 0xf010ef94, |
| 425 | 0x04abc400, | 415 | 0x21f501f5, |
| 426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
| 427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
| 428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
| 429 | 0x00eecf00, | 419 | 0x80f90188, |
| 430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
| 431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
| 432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
| 433 | 0x01e7f004, | 423 | 0xa7f104bd, |
| 434 | 0x1d0007f1, | 424 | 0xa3f00200, |
| 435 | 0xd00003f0, | 425 | 0x00aacf00, |
| 436 | 0x04bd000e, | 426 | 0xf404abc4, |
| 437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
| 438 | 0x010007f1, | 428 | 0x00e7f124, |
| 439 | 0xd00003f0, | 429 | 0x00e3f01a, |
| 440 | 0x04bd000a, | 430 | 0xf100eecf, |
| 441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
| 442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
| 443 | 0x90fca0fc, | 433 | 0x0421f400, |
| 444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
| 445 | 0xf480fc00, | 435 | 0xf01d0007, |
| 446 | 0x01f80032, | 436 | 0x0ed00003, |
| 447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
| 448 | 0x9801f7f0, | 438 | 0xf104bd00, |
| 449 | 0xfebb040e, | 439 | 0xf0010007, |
| 450 | 0x02ffb904, | 440 | 0x0ad00003, |
| 451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
| 452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
| 453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
| 454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
| 455 | 0xf120f7f0, | 445 | 0x0088fe80, |
| 446 | 0x32f480fc, | ||
| 447 | /* 0x05b7: hub_barrier_done */ | ||
| 448 | 0xf001f800, | ||
| 449 | 0x0e9801f7, | ||
| 450 | 0x04febb04, | ||
| 451 | 0xf102ffb9, | ||
| 452 | 0xf09418e7, | ||
| 453 | 0x21f440e3, | ||
| 454 | /* 0x05cf: ctx_redswitch */ | ||
| 455 | 0xf000f89d, | ||
| 456 | 0x07f120f7, | ||
| 457 | 0x03f08500, | ||
| 458 | 0x000fd001, | ||
| 459 | 0xe7f004bd, | ||
| 460 | /* 0x05e1: ctx_redswitch_delay */ | ||
| 461 | 0x01e2b608, | ||
| 462 | 0xf1fd1bf4, | ||
| 463 | 0xf10800f5, | ||
| 464 | 0xf10200f5, | ||
| 456 | 0xf0850007, | 465 | 0xf0850007, |
| 457 | 0x0fd00103, | 466 | 0x0fd00103, |
| 458 | 0xf004bd00, | 467 | 0xf804bd00, |
| 459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
| 460 | 0xe2b608e7, | 469 | 0x0007f100, |
| 461 | 0xfd1bf401, | 470 | 0x0203f081, |
| 462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
| 463 | 0x0200f5f1, | 472 | 0x0711f404, |
| 464 | 0x850007f1, | 473 | 0x05cf21f5, |
| 465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
| 466 | 0x04bd000f, | 475 | 0x026a21f5, |
| 467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
| 468 | 0x07f100f8, | 477 | 0x03f047fc, |
| 469 | 0x03f08100, | 478 | 0x0002d002, |
| 470 | 0x000fd002, | 479 | 0x2cf004bd, |
| 471 | 0x11f404bd, | 480 | 0x0320b601, |
| 472 | 0xcc21f507, | 481 | 0x4afc07f1, |
| 473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
| 474 | 0x6a21f505, | 483 | 0x04bd0002, |
| 475 | 0xf124bd02, | ||
| 476 | 0xf047fc07, | ||
| 477 | 0x02d00203, | ||
| 478 | 0xf004bd00, | ||
| 479 | 0x20b6012c, | ||
| 480 | 0xfc07f103, | ||
| 481 | 0x0203f04a, | ||
| 482 | 0xbd0002d0, | ||
| 483 | 0x01acf004, | ||
| 484 | 0xf102a5f0, | ||
| 485 | 0xf00000b7, | ||
| 486 | 0x0c9850b3, | ||
| 487 | 0x0fc4b604, | ||
| 488 | 0x9800bcbb, | ||
| 489 | 0x0d98000c, | ||
| 490 | 0x00e7f001, | ||
| 491 | 0x016f21f5, | ||
| 492 | 0xf101acf0, | ||
| 493 | 0xf04000b7, | ||
| 494 | 0x0c9850b3, | ||
| 495 | 0x0fc4b604, | ||
| 496 | 0x9800bcbb, | ||
| 497 | 0x0d98010c, | ||
| 498 | 0x060f9802, | ||
| 499 | 0x0800e7f1, | ||
| 500 | 0x016f21f5, | ||
| 501 | 0xf001acf0, | 484 | 0xf001acf0, |
| 502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
| 503 | 0xb3f03000, | 486 | 0xb3f00000, |
| 504 | 0x040c9850, | 487 | 0x040c9850, |
| 505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
| 506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
| 507 | 0x030d9802, | 490 | 0x010d9800, |
| 508 | 0xf1080f98, | 491 | 0xf500e7f0, |
| 509 | 0xf50200e7, | 492 | 0xf0016f21, |
| 510 | 0xf5016f21, | 493 | 0xb7f101ac, |
| 511 | 0xf4025e21, | 494 | 0xb3f04000, |
| 512 | 0x12f40601, | 495 | 0x040c9850, |
| 513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
| 514 | 0x7f21f507, | 497 | 0x0c9800bc, |
| 515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
| 516 | 0xb421f502, | 499 | 0xf1060f98, |
| 517 | 0x0000f805, | 500 | 0xf50800e7, |
| 518 | 0x00000000, | 501 | 0xf0016f21, |
| 502 | 0xa5f001ac, | ||
| 503 | 0x00b7f104, | ||
| 504 | 0x50b3f030, | ||
| 505 | 0xb6040c98, | ||
| 506 | 0xbcbb0fc4, | ||
| 507 | 0x020c9800, | ||
| 508 | 0x98030d98, | ||
| 509 | 0xe7f1080f, | ||
| 510 | 0x21f50200, | ||
| 511 | 0x21f5016f, | ||
| 512 | 0x01f4025e, | ||
| 513 | 0x0712f406, | ||
| 514 | /* 0x06ac: ctx_xfer_post */ | ||
| 515 | 0x027f21f5, | ||
| 516 | /* 0x06b0: ctx_xfer_done */ | ||
| 517 | 0x05b721f5, | ||
| 518 | 0x000000f8, | ||
| 519 | 0x00000000, | 519 | 0x00000000, |
| 520 | 0x00000000, | 520 | 0x00000000, |
| 521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h index ee8e54db8fc9..1c6e11b05df2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h | |||
| @@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { | |||
| 314 | 0x03f01200, | 314 | 0x03f01200, |
| 315 | 0x0002d000, | 315 | 0x0002d000, |
| 316 | 0x17f104bd, | 316 | 0x17f104bd, |
| 317 | 0x10fe0542, | 317 | 0x10fe0545, |
| 318 | 0x0007f100, | 318 | 0x0007f100, |
| 319 | 0x0003f007, | 319 | 0x0003f007, |
| 320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
| @@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { | |||
| 338 | 0x02d00103, | 338 | 0x02d00103, |
| 339 | 0xf104bd00, | 339 | 0xf104bd00, |
| 340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
| 341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
| 342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
| 343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
| 344 | 0xb06821f4, | 344 | 0xf444bd34, |
| 345 | 0x0bf400f6, | 345 | 0xf6b06821, |
| 346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
| 347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
| 348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
| 349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
| 350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
| 351 | 0xb004e0b6, | 351 | 0xb60120b6, |
| 352 | 0x1bf40226, | 352 | 0x26b004e0, |
| 353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf402, |
| 354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
| 355 | 0xf1080480, | 355 | 0x80070380, |
| 356 | 0xf0010027, | 356 | 0x27f10804, |
| 357 | 0x22cf0223, | 357 | 0x23f00100, |
| 358 | 0x9534bd00, | 358 | 0x0022cf02, |
| 359 | 0x07f10825, | 359 | 0x259534bd, |
| 360 | 0x03f0c000, | 360 | 0x0007f108, |
| 361 | 0x0005d001, | 361 | 0x0103f0c0, |
| 362 | 0x07f104bd, | 362 | 0xbd0005d0, |
| 363 | 0x03f0c100, | 363 | 0x0007f104, |
| 364 | 0x0005d001, | 364 | 0x0103f0c1, |
| 365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
| 366 | 0x010f9800, | 366 | 0x000e9804, |
| 367 | 0x015021f5, | 367 | 0xf5010f98, |
| 368 | 0xbb002fbb, | 368 | 0xbb015021, |
| 369 | 0x0e98003f, | 369 | 0x3fbb002f, |
| 370 | 0x020f9801, | 370 | 0x010e9800, |
| 371 | 0x015021f5, | 371 | 0xf5020f98, |
| 372 | 0xfd050e98, | 372 | 0x98015021, |
| 373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
| 374 | 0x003ebb00, | 374 | 0x002ebb00, |
| 375 | 0x98020e98, | 375 | 0x98003ebb, |
| 376 | 0x21f5030f, | 376 | 0x0f98020e, |
| 377 | 0x0e980150, | 377 | 0x5021f503, |
| 378 | 0x00effd07, | 378 | 0x070e9801, |
| 379 | 0xbb002ebb, | 379 | 0xbb00effd, |
| 380 | 0x35b6003e, | 380 | 0x3ebb002e, |
| 381 | 0x0007f102, | 381 | 0x0235b600, |
| 382 | 0x0103f0d3, | 382 | 0xd30007f1, |
| 383 | 0xbd0003d0, | 383 | 0xd00103f0, |
| 384 | 0x0825b604, | ||
| 385 | 0xb60635b6, | ||
| 386 | 0x30b60120, | ||
| 387 | 0x0824b601, | ||
| 388 | 0xb90834b6, | ||
| 389 | 0x21f5022f, | ||
| 390 | 0x2fbb02d3, | ||
| 391 | 0x003fbb00, | ||
| 392 | 0x010007f1, | ||
| 393 | 0xd00203f0, | ||
| 394 | 0x04bd0003, | 384 | 0x04bd0003, |
| 395 | 0x29f024bd, | 385 | 0xb60825b6, |
| 396 | 0x0007f11f, | 386 | 0x20b60635, |
| 397 | 0x0203f030, | 387 | 0x0130b601, |
| 398 | 0xbd0002d0, | 388 | 0xb60824b6, |
| 399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
| 400 | 0x0031f404, | 390 | 0xd321f502, |
| 401 | 0xf00028f4, | 391 | 0x002fbb02, |
| 402 | 0x21f424d7, | 392 | 0xf1003fbb, |
| 403 | 0xf401f439, | 393 | 0xf0010007, |
| 404 | 0xf404e4b0, | 394 | 0x03d00203, |
| 405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
| 406 | 0x0627f001, | 396 | 0x1f29f024, |
| 407 | 0x12fd20bd, | 397 | 0x300007f1, |
| 408 | 0x01e4b604, | 398 | 0xd00203f0, |
| 409 | 0xfe051efd, | 399 | 0x04bd0002, |
| 410 | 0x21f50018, | 400 | /* 0x0508: main */ |
| 411 | 0x0ef405fa, | 401 | 0xf40031f4, |
| 412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
| 413 | 0x10ef94d3, | 403 | 0x3921f424, |
| 414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
| 415 | 0xf4037e21, | 405 | 0x18f404e4, |
| 416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
| 417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
| 418 | 0xf90188fe, | 408 | 0x0412fd20, |
| 419 | 0xf990f980, | 409 | 0xfd01e4b6, |
| 420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
| 421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
| 422 | 0xf104bdf0, | 412 | 0xd30ef405, |
| 423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
| 424 | 0xaacf00a3, | 414 | 0xf010ef94, |
| 425 | 0x04abc400, | 415 | 0x21f501f5, |
| 426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
| 427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
| 428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
| 429 | 0x00eecf00, | 419 | 0x80f90188, |
| 430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
| 431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
| 432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
| 433 | 0x01e7f004, | 423 | 0xa7f104bd, |
| 434 | 0x1d0007f1, | 424 | 0xa3f00200, |
| 435 | 0xd00003f0, | 425 | 0x00aacf00, |
| 436 | 0x04bd000e, | 426 | 0xf404abc4, |
| 437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
| 438 | 0x010007f1, | 428 | 0x00e7f124, |
| 439 | 0xd00003f0, | 429 | 0x00e3f01a, |
| 440 | 0x04bd000a, | 430 | 0xf100eecf, |
| 441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
| 442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
| 443 | 0x90fca0fc, | 433 | 0x0421f400, |
| 444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
| 445 | 0xf480fc00, | 435 | 0xf01d0007, |
| 446 | 0x01f80032, | 436 | 0x0ed00003, |
| 447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
| 448 | 0x9801f7f0, | 438 | 0xf104bd00, |
| 449 | 0xfebb040e, | 439 | 0xf0010007, |
| 450 | 0x02ffb904, | 440 | 0x0ad00003, |
| 451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
| 452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
| 453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
| 454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
| 455 | 0xf120f7f0, | 445 | 0x0088fe80, |
| 446 | 0x32f480fc, | ||
| 447 | /* 0x05b7: hub_barrier_done */ | ||
| 448 | 0xf001f800, | ||
| 449 | 0x0e9801f7, | ||
| 450 | 0x04febb04, | ||
| 451 | 0xf102ffb9, | ||
| 452 | 0xf09418e7, | ||
| 453 | 0x21f440e3, | ||
| 454 | /* 0x05cf: ctx_redswitch */ | ||
| 455 | 0xf000f89d, | ||
| 456 | 0x07f120f7, | ||
| 457 | 0x03f08500, | ||
| 458 | 0x000fd001, | ||
| 459 | 0xe7f004bd, | ||
| 460 | /* 0x05e1: ctx_redswitch_delay */ | ||
| 461 | 0x01e2b608, | ||
| 462 | 0xf1fd1bf4, | ||
| 463 | 0xf10800f5, | ||
| 464 | 0xf10200f5, | ||
| 456 | 0xf0850007, | 465 | 0xf0850007, |
| 457 | 0x0fd00103, | 466 | 0x0fd00103, |
| 458 | 0xf004bd00, | 467 | 0xf804bd00, |
| 459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
| 460 | 0xe2b608e7, | 469 | 0x0007f100, |
| 461 | 0xfd1bf401, | 470 | 0x0203f081, |
| 462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
| 463 | 0x0200f5f1, | 472 | 0x0711f404, |
| 464 | 0x850007f1, | 473 | 0x05cf21f5, |
| 465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
| 466 | 0x04bd000f, | 475 | 0x026a21f5, |
| 467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
| 468 | 0x07f100f8, | 477 | 0x03f047fc, |
| 469 | 0x03f08100, | 478 | 0x0002d002, |
| 470 | 0x000fd002, | 479 | 0x2cf004bd, |
| 471 | 0x11f404bd, | 480 | 0x0320b601, |
| 472 | 0xcc21f507, | 481 | 0x4afc07f1, |
| 473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
| 474 | 0x6a21f505, | 483 | 0x04bd0002, |
| 475 | 0xf124bd02, | ||
| 476 | 0xf047fc07, | ||
| 477 | 0x02d00203, | ||
| 478 | 0xf004bd00, | ||
| 479 | 0x20b6012c, | ||
| 480 | 0xfc07f103, | ||
| 481 | 0x0203f04a, | ||
| 482 | 0xbd0002d0, | ||
| 483 | 0x01acf004, | ||
| 484 | 0xf102a5f0, | ||
| 485 | 0xf00000b7, | ||
| 486 | 0x0c9850b3, | ||
| 487 | 0x0fc4b604, | ||
| 488 | 0x9800bcbb, | ||
| 489 | 0x0d98000c, | ||
| 490 | 0x00e7f001, | ||
| 491 | 0x016f21f5, | ||
| 492 | 0xf101acf0, | ||
| 493 | 0xf04000b7, | ||
| 494 | 0x0c9850b3, | ||
| 495 | 0x0fc4b604, | ||
| 496 | 0x9800bcbb, | ||
| 497 | 0x0d98010c, | ||
| 498 | 0x060f9802, | ||
| 499 | 0x0800e7f1, | ||
| 500 | 0x016f21f5, | ||
| 501 | 0xf001acf0, | 484 | 0xf001acf0, |
| 502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
| 503 | 0xb3f03000, | 486 | 0xb3f00000, |
| 504 | 0x040c9850, | 487 | 0x040c9850, |
| 505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
| 506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
| 507 | 0x030d9802, | 490 | 0x010d9800, |
| 508 | 0xf1080f98, | 491 | 0xf500e7f0, |
| 509 | 0xf50200e7, | 492 | 0xf0016f21, |
| 510 | 0xf5016f21, | 493 | 0xb7f101ac, |
| 511 | 0xf4025e21, | 494 | 0xb3f04000, |
| 512 | 0x12f40601, | 495 | 0x040c9850, |
| 513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
| 514 | 0x7f21f507, | 497 | 0x0c9800bc, |
| 515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
| 516 | 0xb421f502, | 499 | 0xf1060f98, |
| 517 | 0x0000f805, | 500 | 0xf50800e7, |
| 518 | 0x00000000, | 501 | 0xf0016f21, |
| 502 | 0xa5f001ac, | ||
| 503 | 0x00b7f104, | ||
| 504 | 0x50b3f030, | ||
| 505 | 0xb6040c98, | ||
| 506 | 0xbcbb0fc4, | ||
| 507 | 0x020c9800, | ||
| 508 | 0x98030d98, | ||
| 509 | 0xe7f1080f, | ||
| 510 | 0x21f50200, | ||
| 511 | 0x21f5016f, | ||
| 512 | 0x01f4025e, | ||
| 513 | 0x0712f406, | ||
| 514 | /* 0x06ac: ctx_xfer_post */ | ||
| 515 | 0x027f21f5, | ||
| 516 | /* 0x06b0: ctx_xfer_done */ | ||
| 517 | 0x05b721f5, | ||
| 518 | 0x000000f8, | ||
| 519 | 0x00000000, | 519 | 0x00000000, |
| 520 | 0x00000000, | 520 | 0x00000000, |
| 521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h index fbcc342f896f..84af7ec6a78e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h | |||
| @@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { | |||
| 276 | 0x02020014, | 276 | 0x02020014, |
| 277 | 0xf6120040, | 277 | 0xf6120040, |
| 278 | 0x04bd0002, | 278 | 0x04bd0002, |
| 279 | 0xfe048141, | 279 | 0xfe048441, |
| 280 | 0x00400010, | 280 | 0x00400010, |
| 281 | 0x0000f607, | 281 | 0x0000f607, |
| 282 | 0x040204bd, | 282 | 0x040204bd, |
| @@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { | |||
| 295 | 0x01c90080, | 295 | 0x01c90080, |
| 296 | 0xbd0002f6, | 296 | 0xbd0002f6, |
| 297 | 0x0c308e04, | 297 | 0x0c308e04, |
| 298 | 0xbd24bd50, | 298 | 0x01e5f050, |
| 299 | /* 0x0383: init_unk_loop */ | 299 | 0x34bd24bd, |
| 300 | 0x7e44bd34, | 300 | /* 0x0386: init_unk_loop */ |
| 301 | 0xb0000065, | 301 | 0x657e44bd, |
| 302 | 0x0bf400f6, | 302 | 0xf6b00000, |
| 303 | 0xbb010f0e, | 303 | 0x0e0bf400, |
| 304 | 0x4ffd04f2, | 304 | 0xf2bb010f, |
| 305 | 0x0130b605, | 305 | 0x054ffd04, |
| 306 | /* 0x0398: init_unk_next */ | 306 | /* 0x039b: init_unk_next */ |
| 307 | 0xb60120b6, | 307 | 0xb60130b6, |
| 308 | 0x26b004e0, | 308 | 0xe0b60120, |
| 309 | 0xe21bf401, | 309 | 0x0126b004, |
| 310 | /* 0x03a4: init_unk_done */ | 310 | /* 0x03a7: init_unk_done */ |
| 311 | 0xb50703b5, | 311 | 0xb5e21bf4, |
| 312 | 0x00820804, | 312 | 0x04b50703, |
| 313 | 0x22cf0201, | 313 | 0x01008208, |
| 314 | 0x9534bd00, | 314 | 0x0022cf02, |
| 315 | 0x00800825, | 315 | 0x259534bd, |
| 316 | 0x05f601c0, | 316 | 0xc0008008, |
| 317 | 0x8004bd00, | 317 | 0x0005f601, |
| 318 | 0xf601c100, | 318 | 0x008004bd, |
| 319 | 0x04bd0005, | 319 | 0x05f601c1, |
| 320 | 0x98000e98, | 320 | 0x9804bd00, |
| 321 | 0x207e010f, | 321 | 0x0f98000e, |
| 322 | 0x2fbb0001, | 322 | 0x01207e01, |
| 323 | 0x003fbb00, | 323 | 0x002fbb00, |
| 324 | 0x98010e98, | 324 | 0x98003fbb, |
| 325 | 0x207e020f, | 325 | 0x0f98010e, |
| 326 | 0x0e980001, | 326 | 0x01207e02, |
| 327 | 0x00effd05, | 327 | 0x050e9800, |
| 328 | 0xbb002ebb, | 328 | 0xbb00effd, |
| 329 | 0x0e98003e, | 329 | 0x3ebb002e, |
| 330 | 0x030f9802, | 330 | 0x020e9800, |
| 331 | 0x0001207e, | 331 | 0x7e030f98, |
| 332 | 0xfd070e98, | 332 | 0x98000120, |
| 333 | 0x2ebb00ef, | 333 | 0xeffd070e, |
| 334 | 0x003ebb00, | 334 | 0x002ebb00, |
| 335 | 0x800235b6, | 335 | 0xb6003ebb, |
| 336 | 0xf601d300, | 336 | 0x00800235, |
| 337 | 0x04bd0003, | 337 | 0x03f601d3, |
| 338 | 0xb60825b6, | 338 | 0xb604bd00, |
| 339 | 0x20b60635, | 339 | 0x35b60825, |
| 340 | 0x0130b601, | 340 | 0x0120b606, |
| 341 | 0xb60824b6, | 341 | 0xb60130b6, |
| 342 | 0x2fb20834, | 342 | 0x34b60824, |
| 343 | 0x0002687e, | 343 | 0x7e2fb208, |
| 344 | 0xbb002fbb, | 344 | 0xbb000268, |
| 345 | 0x0080003f, | 345 | 0x3fbb002f, |
| 346 | 0x03f60201, | 346 | 0x01008000, |
| 347 | 0xbd04bd00, | 347 | 0x0003f602, |
| 348 | 0x1f29f024, | 348 | 0x24bd04bd, |
| 349 | 0x02300080, | 349 | 0x801f29f0, |
| 350 | 0xbd0002f6, | 350 | 0xf6023000, |
| 351 | /* 0x0445: main */ | 351 | 0x04bd0002, |
| 352 | 0x0031f404, | 352 | /* 0x0448: main */ |
| 353 | 0x0d0028f4, | 353 | 0xf40031f4, |
| 354 | 0x00377e24, | 354 | 0x240d0028, |
| 355 | 0xf401f400, | 355 | 0x0000377e, |
| 356 | 0xf404e4b0, | 356 | 0xb0f401f4, |
| 357 | 0x81fe1d18, | 357 | 0x18f404e4, |
| 358 | 0xbd060201, | 358 | 0x0181fe1d, |
| 359 | 0x0412fd20, | 359 | 0x20bd0602, |
| 360 | 0xfd01e4b6, | 360 | 0xb60412fd, |
| 361 | 0x18fe051e, | 361 | 0x1efd01e4, |
| 362 | 0x05187e00, | 362 | 0x0018fe05, |
| 363 | 0xd40ef400, | 363 | 0x00051b7e, |
| 364 | /* 0x0474: main_not_ctx_xfer */ | 364 | /* 0x0477: main_not_ctx_xfer */ |
| 365 | 0xf010ef94, | 365 | 0x94d40ef4, |
| 366 | 0xf87e01f5, | 366 | 0xf5f010ef, |
| 367 | 0x0ef40002, | 367 | 0x02f87e01, |
| 368 | /* 0x0481: ih */ | 368 | 0xc70ef400, |
| 369 | 0xfe80f9c7, | 369 | /* 0x0484: ih */ |
| 370 | 0x80f90188, | 370 | 0x88fe80f9, |
| 371 | 0xa0f990f9, | 371 | 0xf980f901, |
| 372 | 0xd0f9b0f9, | 372 | 0xf9a0f990, |
| 373 | 0xf0f9e0f9, | 373 | 0xf9d0f9b0, |
| 374 | 0x004a04bd, | 374 | 0xbdf0f9e0, |
| 375 | 0x00aacf02, | 375 | 0x02004a04, |
| 376 | 0xf404abc4, | 376 | 0xc400aacf, |
| 377 | 0x240d1f0b, | 377 | 0x0bf404ab, |
| 378 | 0xcf1a004e, | 378 | 0x4e240d1f, |
| 379 | 0x004f00ee, | 379 | 0xeecf1a00, |
| 380 | 0x00ffcf19, | 380 | 0x19004f00, |
| 381 | 0x0000047e, | 381 | 0x7e00ffcf, |
| 382 | 0x0040010e, | 382 | 0x0e000004, |
| 383 | 0x000ef61d, | 383 | 0x1d004001, |
| 384 | /* 0x04be: ih_no_fifo */ | 384 | 0xbd000ef6, |
| 385 | 0x004004bd, | 385 | /* 0x04c1: ih_no_fifo */ |
| 386 | 0x000af601, | 386 | 0x01004004, |
| 387 | 0xf0fc04bd, | 387 | 0xbd000af6, |
| 388 | 0xd0fce0fc, | 388 | 0xfcf0fc04, |
| 389 | 0xa0fcb0fc, | 389 | 0xfcd0fce0, |
| 390 | 0x80fc90fc, | 390 | 0xfca0fcb0, |
| 391 | 0xfc0088fe, | 391 | 0xfe80fc90, |
| 392 | 0x0032f480, | 392 | 0x80fc0088, |
| 393 | /* 0x04de: hub_barrier_done */ | 393 | 0xf80032f4, |
| 394 | 0x010f01f8, | 394 | /* 0x04e1: hub_barrier_done */ |
| 395 | 0xbb040e98, | 395 | 0x98010f01, |
| 396 | 0xffb204fe, | 396 | 0xfebb040e, |
| 397 | 0x4094188e, | 397 | 0x8effb204, |
| 398 | 0x00008f7e, | 398 | 0x7e409418, |
| 399 | /* 0x04f2: ctx_redswitch */ | 399 | 0xf800008f, |
| 400 | 0x200f00f8, | 400 | /* 0x04f5: ctx_redswitch */ |
| 401 | 0x80200f00, | ||
| 402 | 0xf6018500, | ||
| 403 | 0x04bd000f, | ||
| 404 | /* 0x0502: ctx_redswitch_delay */ | ||
| 405 | 0xe2b6080e, | ||
| 406 | 0xfd1bf401, | ||
| 407 | 0x0800f5f1, | ||
| 408 | 0x0200f5f1, | ||
| 401 | 0x01850080, | 409 | 0x01850080, |
| 402 | 0xbd000ff6, | 410 | 0xbd000ff6, |
| 403 | /* 0x04ff: ctx_redswitch_delay */ | 411 | /* 0x051b: ctx_xfer */ |
| 404 | 0xb6080e04, | 412 | 0x8000f804, |
| 405 | 0x1bf401e2, | 413 | 0xf6028100, |
| 406 | 0x00f5f1fd, | 414 | 0x04bd000f, |
| 407 | 0x00f5f108, | 415 | 0x7e0711f4, |
| 408 | 0x85008002, | 416 | /* 0x052b: ctx_xfer_not_load */ |
| 409 | 0x000ff601, | 417 | 0x7e0004f5, |
| 410 | 0x00f804bd, | 418 | 0xbd000216, |
| 411 | /* 0x0518: ctx_xfer */ | 419 | 0x47fc8024, |
| 412 | 0x02810080, | ||
| 413 | 0xbd000ff6, | ||
| 414 | 0x0711f404, | ||
| 415 | 0x0004f27e, | ||
| 416 | /* 0x0528: ctx_xfer_not_load */ | ||
| 417 | 0x0002167e, | ||
| 418 | 0xfc8024bd, | ||
| 419 | 0x02f60247, | ||
| 420 | 0xf004bd00, | ||
| 421 | 0x20b6012c, | ||
| 422 | 0x4afc8003, | ||
| 423 | 0x0002f602, | 420 | 0x0002f602, |
| 424 | 0xacf004bd, | 421 | 0x2cf004bd, |
| 425 | 0x02a5f001, | 422 | 0x0320b601, |
| 426 | 0x5000008b, | 423 | 0x024afc80, |
| 427 | 0xb6040c98, | 424 | 0xbd0002f6, |
| 428 | 0xbcbb0fc4, | 425 | 0x01acf004, |
| 429 | 0x000c9800, | 426 | 0x8b02a5f0, |
| 430 | 0x0e010d98, | 427 | 0x98500000, |
| 431 | 0x013d7e00, | ||
| 432 | 0x01acf000, | ||
| 433 | 0x5040008b, | ||
| 434 | 0xb6040c98, | ||
| 435 | 0xbcbb0fc4, | ||
| 436 | 0x010c9800, | ||
| 437 | 0x98020d98, | ||
| 438 | 0x004e060f, | ||
| 439 | 0x013d7e08, | ||
| 440 | 0x01acf000, | ||
| 441 | 0x8b04a5f0, | ||
| 442 | 0x98503000, | ||
| 443 | 0xc4b6040c, | 428 | 0xc4b6040c, |
| 444 | 0x00bcbb0f, | 429 | 0x00bcbb0f, |
| 445 | 0x98020c98, | 430 | 0x98000c98, |
| 446 | 0x0f98030d, | 431 | 0x000e010d, |
| 447 | 0x02004e08, | ||
| 448 | 0x00013d7e, | 432 | 0x00013d7e, |
| 449 | 0x00020a7e, | 433 | 0x8b01acf0, |
| 450 | 0xf40601f4, | 434 | 0x98504000, |
| 451 | /* 0x05b2: ctx_xfer_post */ | 435 | 0xc4b6040c, |
| 452 | 0x277e0712, | 436 | 0x00bcbb0f, |
| 453 | /* 0x05b6: ctx_xfer_done */ | 437 | 0x98010c98, |
| 454 | 0xde7e0002, | 438 | 0x0f98020d, |
| 455 | 0x00f80004, | 439 | 0x08004e06, |
| 456 | 0x00000000, | 440 | 0x00013d7e, |
| 441 | 0xf001acf0, | ||
| 442 | 0x008b04a5, | ||
| 443 | 0x0c985030, | ||
| 444 | 0x0fc4b604, | ||
| 445 | 0x9800bcbb, | ||
| 446 | 0x0d98020c, | ||
| 447 | 0x080f9803, | ||
| 448 | 0x7e02004e, | ||
| 449 | 0x7e00013d, | ||
| 450 | 0xf400020a, | ||
| 451 | 0x12f40601, | ||
| 452 | /* 0x05b5: ctx_xfer_post */ | ||
| 453 | 0x02277e07, | ||
| 454 | /* 0x05b9: ctx_xfer_done */ | ||
| 455 | 0x04e17e00, | ||
| 456 | 0x0000f800, | ||
| 457 | 0x00000000, | 457 | 0x00000000, |
| 458 | 0x00000000, | 458 | 0x00000000, |
| 459 | 0x00000000, | 459 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h index 51f5c3c6e966..11bf363a6ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h | |||
| @@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { | |||
| 289 | 0x020014fe, | 289 | 0x020014fe, |
| 290 | 0x12004002, | 290 | 0x12004002, |
| 291 | 0xbd0002f6, | 291 | 0xbd0002f6, |
| 292 | 0x05b04104, | 292 | 0x05b34104, |
| 293 | 0x400010fe, | 293 | 0x400010fe, |
| 294 | 0x00f60700, | 294 | 0x00f60700, |
| 295 | 0x0204bd00, | 295 | 0x0204bd00, |
| @@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { | |||
| 308 | 0xc900800f, | 308 | 0xc900800f, |
| 309 | 0x0002f601, | 309 | 0x0002f601, |
| 310 | 0x308e04bd, | 310 | 0x308e04bd, |
| 311 | 0x24bd500c, | 311 | 0xe5f0500c, |
| 312 | 0x44bd34bd, | 312 | 0xbd24bd01, |
| 313 | /* 0x03b0: init_unk_loop */ | 313 | /* 0x03b3: init_unk_loop */ |
| 314 | 0x0000657e, | 314 | 0x7e44bd34, |
| 315 | 0xf400f6b0, | 315 | 0xb0000065, |
| 316 | 0x010f0e0b, | 316 | 0x0bf400f6, |
| 317 | 0xfd04f2bb, | 317 | 0xbb010f0e, |
| 318 | 0x30b6054f, | 318 | 0x4ffd04f2, |
| 319 | /* 0x03c5: init_unk_next */ | 319 | 0x0130b605, |
| 320 | 0x0120b601, | 320 | /* 0x03c8: init_unk_next */ |
| 321 | 0xb004e0b6, | 321 | 0xb60120b6, |
| 322 | 0x1bf40226, | 322 | 0x26b004e0, |
| 323 | /* 0x03d1: init_unk_done */ | 323 | 0xe21bf402, |
| 324 | 0x0703b5e2, | 324 | /* 0x03d4: init_unk_done */ |
| 325 | 0x820804b5, | 325 | 0xb50703b5, |
| 326 | 0xcf020100, | 326 | 0x00820804, |
| 327 | 0x34bd0022, | 327 | 0x22cf0201, |
| 328 | 0x80082595, | 328 | 0x9534bd00, |
| 329 | 0xf601c000, | 329 | 0x00800825, |
| 330 | 0x05f601c0, | ||
| 331 | 0x8004bd00, | ||
| 332 | 0xf601c100, | ||
| 330 | 0x04bd0005, | 333 | 0x04bd0005, |
| 331 | 0x01c10080, | 334 | 0x98000e98, |
| 332 | 0xbd0005f6, | 335 | 0x207e010f, |
| 333 | 0x000e9804, | 336 | 0x2fbb0001, |
| 334 | 0x7e010f98, | 337 | 0x003fbb00, |
| 335 | 0xbb000120, | 338 | 0x98010e98, |
| 336 | 0x3fbb002f, | 339 | 0x207e020f, |
| 337 | 0x010e9800, | 340 | 0x0e980001, |
| 338 | 0x7e020f98, | 341 | 0x00effd05, |
| 339 | 0x98000120, | 342 | 0xbb002ebb, |
| 340 | 0xeffd050e, | 343 | 0x0e98003e, |
| 341 | 0x002ebb00, | 344 | 0x030f9802, |
| 342 | 0x98003ebb, | 345 | 0x0001207e, |
| 343 | 0x0f98020e, | 346 | 0xfd070e98, |
| 344 | 0x01207e03, | 347 | 0x2ebb00ef, |
| 345 | 0x070e9800, | 348 | 0x003ebb00, |
| 346 | 0xbb00effd, | 349 | 0x800235b6, |
| 347 | 0x3ebb002e, | 350 | 0xf601d300, |
| 348 | 0x0235b600, | 351 | 0x04bd0003, |
| 349 | 0x01d30080, | 352 | 0xb60825b6, |
| 350 | 0xbd0003f6, | 353 | 0x20b60635, |
| 351 | 0x0825b604, | 354 | 0x0130b601, |
| 352 | 0xb60635b6, | 355 | 0xb60824b6, |
| 353 | 0x30b60120, | 356 | 0x2fb20834, |
| 354 | 0x0824b601, | 357 | 0x0002687e, |
| 355 | 0xb20834b6, | 358 | 0xbb002fbb, |
| 356 | 0x02687e2f, | 359 | 0x3f0f003f, |
| 357 | 0x002fbb00, | 360 | 0x501d608e, |
| 358 | 0x0f003fbb, | 361 | 0xb201e5f0, |
| 359 | 0x8effb23f, | 362 | 0x008f7eff, |
| 360 | 0xf0501d60, | 363 | 0x8e0c0f00, |
| 361 | 0x8f7e01e5, | ||
| 362 | 0x0c0f0000, | ||
| 363 | 0xa88effb2, | ||
| 364 | 0xe5f0501d, | ||
| 365 | 0x008f7e01, | ||
| 366 | 0x03147e00, | ||
| 367 | 0xb23f0f00, | ||
| 368 | 0x1d608eff, | ||
| 369 | 0x01e5f050, | ||
| 370 | 0x00008f7e, | ||
| 371 | 0xffb2000f, | ||
| 372 | 0x501d9c8e, | ||
| 373 | 0x7e01e5f0, | ||
| 374 | 0x0f00008f, | ||
| 375 | 0x03147e01, | ||
| 376 | 0x8effb200, | ||
| 377 | 0xf0501da8, | 364 | 0xf0501da8, |
| 378 | 0x8f7e01e5, | 365 | 0xffb201e5, |
| 379 | 0xff0f0000, | 366 | 0x00008f7e, |
| 380 | 0x988effb2, | 367 | 0x0003147e, |
| 368 | 0x608e3f0f, | ||
| 381 | 0xe5f0501d, | 369 | 0xe5f0501d, |
| 382 | 0x008f7e01, | 370 | 0x7effb201, |
| 383 | 0xb2020f00, | 371 | 0x0f00008f, |
| 384 | 0x1da88eff, | 372 | 0x1d9c8e00, |
| 385 | 0x01e5f050, | 373 | 0x01e5f050, |
| 386 | 0x00008f7e, | 374 | 0x8f7effb2, |
| 375 | 0x010f0000, | ||
| 387 | 0x0003147e, | 376 | 0x0003147e, |
| 388 | 0x85050498, | 377 | 0x501da88e, |
| 389 | 0x98504000, | 378 | 0xb201e5f0, |
| 390 | 0x64b60406, | 379 | 0x008f7eff, |
| 391 | 0x0056bb0f, | 380 | 0x8eff0f00, |
| 392 | /* 0x04e0: tpc_strand_init_tpc_loop */ | 381 | 0xf0501d98, |
| 393 | 0x05705eb8, | 382 | 0xffb201e5, |
| 394 | 0x00657e00, | ||
| 395 | 0xbdf6b200, | ||
| 396 | /* 0x04ed: tpc_strand_init_idx_loop */ | ||
| 397 | 0x605eb874, | ||
| 398 | 0x7fb20005, | ||
| 399 | 0x00008f7e, | ||
| 400 | 0x05885eb8, | ||
| 401 | 0x082f9500, | ||
| 402 | 0x00008f7e, | ||
| 403 | 0x058c5eb8, | ||
| 404 | 0x082f9500, | ||
| 405 | 0x00008f7e, | 383 | 0x00008f7e, |
| 406 | 0x05905eb8, | 384 | 0xa88e020f, |
| 407 | 0x00657e00, | ||
| 408 | 0x06f5b600, | ||
| 409 | 0xb601f0b6, | ||
| 410 | 0x2fbb08f4, | ||
| 411 | 0x003fbb00, | ||
| 412 | 0xb60170b6, | ||
| 413 | 0x1bf40162, | ||
| 414 | 0x0050b7bf, | ||
| 415 | 0x0142b608, | ||
| 416 | 0x0fa81bf4, | ||
| 417 | 0x8effb23f, | ||
| 418 | 0xf0501d60, | ||
| 419 | 0x8f7e01e5, | ||
| 420 | 0x0d0f0000, | ||
| 421 | 0xa88effb2, | ||
| 422 | 0xe5f0501d, | 385 | 0xe5f0501d, |
| 423 | 0x008f7e01, | 386 | 0x7effb201, |
| 424 | 0x03147e00, | 387 | 0x7e00008f, |
| 425 | 0x01008000, | 388 | 0x98000314, |
| 426 | 0x0003f602, | 389 | 0x00850504, |
| 427 | 0x24bd04bd, | 390 | 0x06985040, |
| 428 | 0x801f29f0, | 391 | 0x0f64b604, |
| 429 | 0xf6023000, | 392 | /* 0x04e3: tpc_strand_init_tpc_loop */ |
| 430 | 0x04bd0002, | 393 | 0xb80056bb, |
| 431 | /* 0x0574: main */ | 394 | 0x0005705e, |
| 432 | 0xf40031f4, | 395 | 0x0000657e, |
| 433 | 0x240d0028, | 396 | 0x74bdf6b2, |
| 434 | 0x0000377e, | 397 | /* 0x04f0: tpc_strand_init_idx_loop */ |
| 435 | 0xb0f401f4, | 398 | 0x05605eb8, |
| 436 | 0x18f404e4, | 399 | 0x7e7fb200, |
| 437 | 0x0181fe1d, | 400 | 0xb800008f, |
| 438 | 0x20bd0602, | 401 | 0x0005885e, |
| 439 | 0xb60412fd, | 402 | 0x7e082f95, |
| 440 | 0x1efd01e4, | 403 | 0xb800008f, |
| 441 | 0x0018fe05, | 404 | 0x00058c5e, |
| 442 | 0x0006477e, | 405 | 0x7e082f95, |
| 443 | /* 0x05a3: main_not_ctx_xfer */ | 406 | 0xb800008f, |
| 444 | 0x94d40ef4, | 407 | 0x0005905e, |
| 445 | 0xf5f010ef, | 408 | 0x0000657e, |
| 446 | 0x02f87e01, | 409 | 0xb606f5b6, |
| 447 | 0xc70ef400, | 410 | 0xf4b601f0, |
| 448 | /* 0x05b0: ih */ | 411 | 0x002fbb08, |
| 449 | 0x88fe80f9, | 412 | 0xb6003fbb, |
| 450 | 0xf980f901, | 413 | 0x62b60170, |
| 451 | 0xf9a0f990, | 414 | 0xbf1bf401, |
| 452 | 0xf9d0f9b0, | 415 | 0x080050b7, |
| 453 | 0xbdf0f9e0, | 416 | 0xf40142b6, |
| 454 | 0x02004a04, | 417 | 0x3f0fa81b, |
| 455 | 0xc400aacf, | 418 | 0x501d608e, |
| 456 | 0x0bf404ab, | 419 | 0xb201e5f0, |
| 457 | 0x4e240d1f, | 420 | 0x008f7eff, |
| 458 | 0xeecf1a00, | 421 | 0x8e0d0f00, |
| 459 | 0x19004f00, | 422 | 0xf0501da8, |
| 460 | 0x7e00ffcf, | 423 | 0xffb201e5, |
| 461 | 0x0e000004, | 424 | 0x00008f7e, |
| 462 | 0x1d004001, | 425 | 0x0003147e, |
| 463 | 0xbd000ef6, | 426 | 0x02010080, |
| 464 | /* 0x05ed: ih_no_fifo */ | 427 | 0xbd0003f6, |
| 465 | 0x01004004, | 428 | 0xf024bd04, |
| 466 | 0xbd000af6, | 429 | 0x00801f29, |
| 467 | 0xfcf0fc04, | 430 | 0x02f60230, |
| 468 | 0xfcd0fce0, | 431 | /* 0x0577: main */ |
| 469 | 0xfca0fcb0, | 432 | 0xf404bd00, |
| 470 | 0xfe80fc90, | 433 | 0x28f40031, |
| 471 | 0x80fc0088, | 434 | 0x7e240d00, |
| 472 | 0xf80032f4, | 435 | 0xf4000037, |
| 473 | /* 0x060d: hub_barrier_done */ | 436 | 0xe4b0f401, |
| 474 | 0x98010f01, | 437 | 0x1d18f404, |
| 475 | 0xfebb040e, | 438 | 0x020181fe, |
| 476 | 0x8effb204, | 439 | 0xfd20bd06, |
| 477 | 0x7e409418, | 440 | 0xe4b60412, |
| 478 | 0xf800008f, | 441 | 0x051efd01, |
| 479 | /* 0x0621: ctx_redswitch */ | 442 | 0x7e0018fe, |
| 480 | 0x80200f00, | 443 | 0xf400064a, |
| 444 | /* 0x05a6: main_not_ctx_xfer */ | ||
| 445 | 0xef94d40e, | ||
| 446 | 0x01f5f010, | ||
| 447 | 0x0002f87e, | ||
| 448 | /* 0x05b3: ih */ | ||
| 449 | 0xf9c70ef4, | ||
| 450 | 0x0188fe80, | ||
| 451 | 0x90f980f9, | ||
| 452 | 0xb0f9a0f9, | ||
| 453 | 0xe0f9d0f9, | ||
| 454 | 0x04bdf0f9, | ||
| 455 | 0xcf02004a, | ||
| 456 | 0xabc400aa, | ||
| 457 | 0x1f0bf404, | ||
| 458 | 0x004e240d, | ||
| 459 | 0x00eecf1a, | ||
| 460 | 0xcf19004f, | ||
| 461 | 0x047e00ff, | ||
| 462 | 0x010e0000, | ||
| 463 | 0xf61d0040, | ||
| 464 | 0x04bd000e, | ||
| 465 | /* 0x05f0: ih_no_fifo */ | ||
| 466 | 0xf6010040, | ||
| 467 | 0x04bd000a, | ||
| 468 | 0xe0fcf0fc, | ||
| 469 | 0xb0fcd0fc, | ||
| 470 | 0x90fca0fc, | ||
| 471 | 0x88fe80fc, | ||
| 472 | 0xf480fc00, | ||
| 473 | 0x01f80032, | ||
| 474 | /* 0x0610: hub_barrier_done */ | ||
| 475 | 0x0e98010f, | ||
| 476 | 0x04febb04, | ||
| 477 | 0x188effb2, | ||
| 478 | 0x8f7e4094, | ||
| 479 | 0x00f80000, | ||
| 480 | /* 0x0624: ctx_redswitch */ | ||
| 481 | 0x0080200f, | ||
| 482 | 0x0ff60185, | ||
| 483 | 0x0e04bd00, | ||
| 484 | /* 0x0631: ctx_redswitch_delay */ | ||
| 485 | 0x01e2b608, | ||
| 486 | 0xf1fd1bf4, | ||
| 487 | 0xf10800f5, | ||
| 488 | 0x800200f5, | ||
| 481 | 0xf6018500, | 489 | 0xf6018500, |
| 482 | 0x04bd000f, | 490 | 0x04bd000f, |
| 483 | /* 0x062e: ctx_redswitch_delay */ | 491 | /* 0x064a: ctx_xfer */ |
| 484 | 0xe2b6080e, | 492 | 0x008000f8, |
| 485 | 0xfd1bf401, | 493 | 0x0ff60281, |
| 486 | 0x0800f5f1, | 494 | 0x8e04bd00, |
| 487 | 0x0200f5f1, | 495 | 0xf0501dc4, |
| 488 | 0x01850080, | 496 | 0xffb201e5, |
| 489 | 0xbd000ff6, | 497 | 0x00008f7e, |
| 490 | /* 0x0647: ctx_xfer */ | 498 | 0x7e0711f4, |
| 491 | 0x8000f804, | 499 | /* 0x0667: ctx_xfer_not_load */ |
| 492 | 0xf6028100, | 500 | 0x7e000624, |
| 493 | 0x04bd000f, | 501 | 0xbd000216, |
| 494 | 0xc48effb2, | 502 | 0x47fc8024, |
| 495 | 0xe5f0501d, | ||
| 496 | 0x008f7e01, | ||
| 497 | 0x0711f400, | ||
| 498 | 0x0006217e, | ||
| 499 | /* 0x0664: ctx_xfer_not_load */ | ||
| 500 | 0x0002167e, | ||
| 501 | 0xfc8024bd, | ||
| 502 | 0x02f60247, | ||
| 503 | 0xf004bd00, | ||
| 504 | 0x20b6012c, | ||
| 505 | 0x4afc8003, | ||
| 506 | 0x0002f602, | 503 | 0x0002f602, |
| 507 | 0x0c0f04bd, | 504 | 0x2cf004bd, |
| 508 | 0xa88effb2, | 505 | 0x0320b601, |
| 509 | 0xe5f0501d, | 506 | 0x024afc80, |
| 510 | 0x008f7e01, | 507 | 0xbd0002f6, |
| 511 | 0x03147e00, | 508 | 0x8e0c0f04, |
| 512 | 0xb23f0f00, | 509 | 0xf0501da8, |
| 513 | 0x1d608eff, | 510 | 0xffb201e5, |
| 514 | 0x01e5f050, | ||
| 515 | 0x00008f7e, | 511 | 0x00008f7e, |
| 516 | 0xffb2000f, | 512 | 0x0003147e, |
| 517 | 0x501d9c8e, | 513 | 0x608e3f0f, |
| 518 | 0x7e01e5f0, | 514 | 0xe5f0501d, |
| 515 | 0x7effb201, | ||
| 519 | 0x0f00008f, | 516 | 0x0f00008f, |
| 520 | 0x03147e01, | 517 | 0x1d9c8e00, |
| 521 | 0x01fcf000, | ||
| 522 | 0xb203f0b6, | ||
| 523 | 0x1da88eff, | ||
| 524 | 0x01e5f050, | 518 | 0x01e5f050, |
| 525 | 0x00008f7e, | 519 | 0x8f7effb2, |
| 526 | 0xf001acf0, | 520 | 0x010f0000, |
| 527 | 0x008b02a5, | 521 | 0x0003147e, |
| 528 | 0x0c985000, | 522 | 0xb601fcf0, |
| 529 | 0x0fc4b604, | 523 | 0xa88e03f0, |
| 530 | 0x9800bcbb, | 524 | 0xe5f0501d, |
| 531 | 0x0d98000c, | 525 | 0x7effb201, |
| 532 | 0x7e000e01, | 526 | 0xf000008f, |
| 533 | 0xf000013d, | ||
| 534 | 0x008b01ac, | ||
| 535 | 0x0c985040, | ||
| 536 | 0x0fc4b604, | ||
| 537 | 0x9800bcbb, | ||
| 538 | 0x0d98010c, | ||
| 539 | 0x060f9802, | ||
| 540 | 0x7e08004e, | ||
| 541 | 0xf000013d, | ||
| 542 | 0xa5f001ac, | 527 | 0xa5f001ac, |
| 543 | 0x30008b04, | 528 | 0x00008b02, |
| 544 | 0x040c9850, | 529 | 0x040c9850, |
| 545 | 0xbb0fc4b6, | 530 | 0xbb0fc4b6, |
| 546 | 0x0c9800bc, | 531 | 0x0c9800bc, |
| 547 | 0x030d9802, | 532 | 0x010d9800, |
| 548 | 0x4e080f98, | 533 | 0x3d7e000e, |
| 549 | 0x3d7e0200, | 534 | 0xacf00001, |
| 550 | 0x0a7e0001, | 535 | 0x40008b01, |
| 551 | 0x147e0002, | 536 | 0x040c9850, |
| 552 | 0x01f40003, | 537 | 0xbb0fc4b6, |
| 553 | 0x1a12f406, | 538 | 0x0c9800bc, |
| 554 | /* 0x073c: ctx_xfer_post */ | 539 | 0x020d9801, |
| 555 | 0x0002277e, | 540 | 0x4e060f98, |
| 556 | 0xffb20d0f, | 541 | 0x3d7e0800, |
| 557 | 0x501da88e, | 542 | 0xacf00001, |
| 558 | 0x7e01e5f0, | 543 | 0x04a5f001, |
| 559 | 0x7e00008f, | 544 | 0x5030008b, |
| 560 | /* 0x0753: ctx_xfer_done */ | 545 | 0xb6040c98, |
| 561 | 0x7e000314, | 546 | 0xbcbb0fc4, |
| 562 | 0xf800060d, | 547 | 0x020c9800, |
| 563 | 0x00000000, | 548 | 0x98030d98, |
| 549 | 0x004e080f, | ||
| 550 | 0x013d7e02, | ||
| 551 | 0x020a7e00, | ||
| 552 | 0x03147e00, | ||
| 553 | 0x0601f400, | ||
| 554 | /* 0x073f: ctx_xfer_post */ | ||
| 555 | 0x7e1a12f4, | ||
| 556 | 0x0f000227, | ||
| 557 | 0x1da88e0d, | ||
| 558 | 0x01e5f050, | ||
| 559 | 0x8f7effb2, | ||
| 560 | 0x147e0000, | ||
| 561 | /* 0x0756: ctx_xfer_done */ | ||
| 562 | 0x107e0003, | ||
| 563 | 0x00f80006, | ||
| 564 | 0x00000000, | 564 | 0x00000000, |
| 565 | 0x00000000, | 565 | 0x00000000, |
| 566 | 0x00000000, | 566 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dda7a7d224c9..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, | |||
| 143 | static int | 143 | static int |
| 144 | gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) | 144 | gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) |
| 145 | { | 145 | { |
| 146 | struct gf100_gr *gr = (void *)object->engine; | 146 | struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); |
| 147 | union { | 147 | union { |
| 148 | struct fermi_a_zbc_color_v0 v0; | 148 | struct fermi_a_zbc_color_v0 v0; |
| 149 | } *args = data; | 149 | } *args = data; |
| @@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) | |||
| 189 | static int | 189 | static int |
| 190 | gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) | 190 | gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) |
| 191 | { | 191 | { |
| 192 | struct gf100_gr *gr = (void *)object->engine; | 192 | struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); |
| 193 | union { | 193 | union { |
| 194 | struct fermi_a_zbc_depth_v0 v0; | 194 | struct fermi_a_zbc_depth_v0 v0; |
| 195 | } *args = data; | 195 | } *args = data; |
| @@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) | |||
| 1530 | gr->ppc_nr[i] = gr->func->ppc_nr; | 1530 | gr->ppc_nr[i] = gr->func->ppc_nr; |
| 1531 | for (j = 0; j < gr->ppc_nr[i]; j++) { | 1531 | for (j = 0; j < gr->ppc_nr[i]; j++) { |
| 1532 | u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); | 1532 | u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); |
| 1533 | if (mask) | ||
| 1534 | gr->ppc_mask[i] |= (1 << j); | ||
| 1533 | gr->ppc_tpc_nr[i][j] = hweight8(mask); | 1535 | gr->ppc_tpc_nr[i][j] = hweight8(mask); |
| 1534 | } | 1536 | } |
| 1535 | } | 1537 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 4611961b1187..02e78b8d93f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | |||
| @@ -97,6 +97,7 @@ struct gf100_gr { | |||
| 97 | u8 tpc_nr[GPC_MAX]; | 97 | u8 tpc_nr[GPC_MAX]; |
| 98 | u8 tpc_total; | 98 | u8 tpc_total; |
| 99 | u8 ppc_nr[GPC_MAX]; | 99 | u8 ppc_nr[GPC_MAX]; |
| 100 | u8 ppc_mask[GPC_MAX]; | ||
| 100 | u8 ppc_tpc_nr[GPC_MAX][4]; | 101 | u8 ppc_tpc_nr[GPC_MAX][4]; |
| 101 | 102 | ||
| 102 | struct nvkm_memory *unk4188b4; | 103 | struct nvkm_memory *unk4188b4; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 895ba74057d4..1d7dd38292b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | |||
| @@ -97,7 +97,9 @@ static void * | |||
| 97 | nvkm_instobj_dtor(struct nvkm_memory *memory) | 97 | nvkm_instobj_dtor(struct nvkm_memory *memory) |
| 98 | { | 98 | { |
| 99 | struct nvkm_instobj *iobj = nvkm_instobj(memory); | 99 | struct nvkm_instobj *iobj = nvkm_instobj(memory); |
| 100 | spin_lock(&iobj->imem->lock); | ||
| 100 | list_del(&iobj->head); | 101 | list_del(&iobj->head); |
| 102 | spin_unlock(&iobj->imem->lock); | ||
| 101 | nvkm_memory_del(&iobj->parent); | 103 | nvkm_memory_del(&iobj->parent); |
| 102 | return iobj; | 104 | return iobj; |
| 103 | } | 105 | } |
| @@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, | |||
| 190 | nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); | 192 | nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); |
| 191 | iobj->parent = memory; | 193 | iobj->parent = memory; |
| 192 | iobj->imem = imem; | 194 | iobj->imem = imem; |
| 195 | spin_lock(&iobj->imem->lock); | ||
| 193 | list_add_tail(&iobj->head, &imem->list); | 196 | list_add_tail(&iobj->head, &imem->list); |
| 197 | spin_unlock(&iobj->imem->lock); | ||
| 194 | memory = &iobj->memory; | 198 | memory = &iobj->memory; |
| 195 | } | 199 | } |
| 196 | 200 | ||
| @@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, | |||
| 309 | { | 313 | { |
| 310 | nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); | 314 | nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); |
| 311 | imem->func = func; | 315 | imem->func = func; |
| 316 | spin_lock_init(&imem->lock); | ||
| 312 | INIT_LIST_HEAD(&imem->list); | 317 | INIT_LIST_HEAD(&imem->list); |
| 313 | } | 318 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index b61509e26ec9..b735173a18ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c | |||
| @@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) | |||
| 59 | duty = (uv - bios->base) * div / bios->pwm_range; | 59 | duty = (uv - bios->base) * div / bios->pwm_range; |
| 60 | 60 | ||
| 61 | nvkm_wr32(device, 0x20340, div); | 61 | nvkm_wr32(device, 0x20340, div); |
| 62 | nvkm_wr32(device, 0x20344, 0x8000000 | duty); | 62 | nvkm_wr32(device, 0x20344, 0x80000000 | duty); |
| 63 | 63 | ||
| 64 | return 0; | 64 | return 0; |
| 65 | } | 65 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d3024883b844..84d45633d28c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 221 | if (!(rdev->flags & RADEON_IS_PCIE)) | 221 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 222 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | 222 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 223 | 223 | ||
| 224 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx | ||
| 225 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 | ||
| 226 | */ | ||
| 227 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) | ||
| 228 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | ||
| 229 | |||
| 224 | #ifdef CONFIG_X86_32 | 230 | #ifdef CONFIG_X86_32 |
| 225 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | 231 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
| 226 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 | 232 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
| 227 | */ | 233 | */ |
| 228 | bo->flags &= ~RADEON_GEM_GTT_WC; | 234 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 229 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) | 235 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
| 230 | /* Don't try to enable write-combining when it can't work, or things | 236 | /* Don't try to enable write-combining when it can't work, or things |
| 231 | * may be slow | 237 | * may be slow |
| @@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 235 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ | 241 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
| 236 | thanks to write-combining | 242 | thanks to write-combining |
| 237 | 243 | ||
| 238 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " | 244 | if (bo->flags & RADEON_GEM_GTT_WC) |
| 239 | "better performance thanks to write-combining\n"); | 245 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
| 240 | bo->flags &= ~RADEON_GEM_GTT_WC; | 246 | "better performance thanks to write-combining\n"); |
| 247 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | ||
| 241 | #endif | 248 | #endif |
| 242 | 249 | ||
| 243 | radeon_ttm_placement_from_domain(bo, domain); | 250 | radeon_ttm_placement_from_domain(bo, domain); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6d80dde23400..f4f03dcc1530 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev) | |||
| 1542 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | 1542 | ret = device_create_file(rdev->dev, &dev_attr_power_method); |
| 1543 | if (ret) | 1543 | if (ret) |
| 1544 | DRM_ERROR("failed to create device file for power method\n"); | 1544 | DRM_ERROR("failed to create device file for power method\n"); |
| 1545 | if (!ret) | 1545 | rdev->pm.sysfs_initialized = true; |
| 1546 | rdev->pm.sysfs_initialized = true; | ||
| 1547 | } | 1546 | } |
| 1548 | 1547 | ||
| 1549 | mutex_lock(&rdev->pm.mutex); | 1548 | mutex_lock(&rdev->pm.mutex); |
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c index 3f5e1cf138ba..d37ba2cb886e 100644 --- a/drivers/gpu/drm/radeon/rv730_dpm.c +++ b/drivers/gpu/drm/radeon/rv730_dpm.c | |||
| @@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) | |||
| 464 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); | 464 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
| 465 | 465 | ||
| 466 | if (result != PPSMC_Result_OK) | 466 | if (result != PPSMC_Result_OK) |
| 467 | DRM_ERROR("Could not force DPM to low\n"); | 467 | DRM_DEBUG("Could not force DPM to low\n"); |
| 468 | 468 | ||
| 469 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | 469 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
| 470 | 470 | ||
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b9c770745a7a..e830c8935db0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) | |||
| 193 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); | 193 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
| 194 | 194 | ||
| 195 | if (result != PPSMC_Result_OK) | 195 | if (result != PPSMC_Result_OK) |
| 196 | DRM_ERROR("Could not force DPM to low.\n"); | 196 | DRM_DEBUG("Could not force DPM to low.\n"); |
| 197 | 197 | ||
| 198 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | 198 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
| 199 | 199 | ||
| @@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) | |||
| 1418 | int rv770_set_sw_state(struct radeon_device *rdev) | 1418 | int rv770_set_sw_state(struct radeon_device *rdev) |
| 1419 | { | 1419 | { |
| 1420 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) | 1420 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) |
| 1421 | return -EINVAL; | 1421 | DRM_DEBUG("rv770_set_sw_state failed\n"); |
| 1422 | return 0; | 1422 | return 0; |
| 1423 | } | 1423 | } |
| 1424 | 1424 | ||
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e72bf46042e0..a82b891ae1fe 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { | |||
| 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
| 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
| 2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, | 2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
| 2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, | 2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, |
| 2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, | 2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, |
| 2932 | { 0, 0, 0, 0 }, | 2932 | { 0, 0, 0, 0 }, |
| 2933 | }; | 2933 | }; |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 7a9f4768591e..265064c62d49 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc) | |||
| 168 | struct drm_connector *connector; | 168 | struct drm_connector *connector; |
| 169 | 169 | ||
| 170 | drm_for_each_connector(connector, crtc->dev) { | 170 | drm_for_each_connector(connector, crtc->dev) { |
| 171 | if (connector && connector->state->crtc == crtc) { | 171 | if (connector->state->crtc == crtc) { |
| 172 | struct drm_encoder *encoder = connector->encoder; | 172 | struct drm_encoder *encoder = connector->encoder; |
| 173 | struct vc4_encoder *vc4_encoder = | 173 | struct vc4_encoder *vc4_encoder = |
| 174 | to_vc4_encoder(encoder); | 174 | to_vc4_encoder(encoder); |
| @@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 401 | dlist_next++; | 401 | dlist_next++; |
| 402 | 402 | ||
| 403 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | 403 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), |
| 404 | (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); | 404 | (u32 __iomem *)vc4_crtc->dlist - |
| 405 | (u32 __iomem *)vc4->hvs->dlist); | ||
| 405 | 406 | ||
| 406 | /* Make the next display list start after ours. */ | 407 | /* Make the next display list start after ours. */ |
| 407 | vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); | 408 | vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); |
| @@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) | |||
| 591 | * that will take too much. | 592 | * that will take too much. |
| 592 | */ | 593 | */ |
| 593 | primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); | 594 | primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); |
| 594 | if (!primary_plane) { | 595 | if (IS_ERR(primary_plane)) { |
| 595 | dev_err(dev, "failed to construct primary plane\n"); | 596 | dev_err(dev, "failed to construct primary plane\n"); |
| 596 | ret = PTR_ERR(primary_plane); | 597 | ret = PTR_ERR(primary_plane); |
| 597 | goto err; | 598 | goto err; |
| 598 | } | 599 | } |
| 599 | 600 | ||
| 600 | cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); | 601 | cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); |
| 601 | if (!cursor_plane) { | 602 | if (IS_ERR(cursor_plane)) { |
| 602 | dev_err(dev, "failed to construct cursor plane\n"); | 603 | dev_err(dev, "failed to construct cursor plane\n"); |
| 603 | ret = PTR_ERR(cursor_plane); | 604 | ret = PTR_ERR(cursor_plane); |
| 604 | goto err_primary; | 605 | goto err_primary; |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 6e730605edcc..d5db9e0f3b73 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = { | |||
| 259 | .remove = vc4_platform_drm_remove, | 259 | .remove = vc4_platform_drm_remove, |
| 260 | .driver = { | 260 | .driver = { |
| 261 | .name = "vc4-drm", | 261 | .name = "vc4-drm", |
| 262 | .owner = THIS_MODULE, | ||
| 263 | .of_match_table = vc4_of_match, | 262 | .of_match_table = vc4_of_match, |
| 264 | }, | 263 | }, |
| 265 | }; | 264 | }; |
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index ab1673f672a4..8098c5b21ba4 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c | |||
| @@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev) | |||
| 75 | for (i = 0; i < 64; i += 4) { | 75 | for (i = 0; i < 64; i += 4) { |
| 76 | DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", | 76 | DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 77 | i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", | 77 | i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", |
| 78 | ((uint32_t *)vc4->hvs->dlist)[i + 0], | 78 | readl((u32 __iomem *)vc4->hvs->dlist + i + 0), |
| 79 | ((uint32_t *)vc4->hvs->dlist)[i + 1], | 79 | readl((u32 __iomem *)vc4->hvs->dlist + i + 1), |
| 80 | ((uint32_t *)vc4->hvs->dlist)[i + 2], | 80 | readl((u32 __iomem *)vc4->hvs->dlist + i + 2), |
| 81 | ((uint32_t *)vc4->hvs->dlist)[i + 3]); | 81 | readl((u32 __iomem *)vc4->hvs->dlist + i + 3)); |
| 82 | } | 82 | } |
| 83 | } | 83 | } |
| 84 | 84 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cdd8b10c0147..887f3caad0be 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state) | |||
| 70 | return state->fb && state->crtc; | 70 | return state->fb && state->crtc; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) | 73 | static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) |
| 74 | { | 74 | { |
| 75 | struct vc4_plane_state *vc4_state; | 75 | struct vc4_plane_state *vc4_state; |
| 76 | 76 | ||
| @@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) | |||
| 97 | return &vc4_state->base; | 97 | return &vc4_state->base; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | void vc4_plane_destroy_state(struct drm_plane *plane, | 100 | static void vc4_plane_destroy_state(struct drm_plane *plane, |
| 101 | struct drm_plane_state *state) | 101 | struct drm_plane_state *state) |
| 102 | { | 102 | { |
| 103 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); | 103 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); |
| 104 | 104 | ||
| @@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane, | |||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | /* Called during init to allocate the plane's atomic state. */ | 110 | /* Called during init to allocate the plane's atomic state. */ |
| 111 | void vc4_plane_reset(struct drm_plane *plane) | 111 | static void vc4_plane_reset(struct drm_plane *plane) |
| 112 | { | 112 | { |
| 113 | struct vc4_plane_state *vc4_state; | 113 | struct vc4_plane_state *vc4_state; |
| 114 | 114 | ||
| @@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 157 | int crtc_w = state->crtc_w; | 157 | int crtc_w = state->crtc_w; |
| 158 | int crtc_h = state->crtc_h; | 158 | int crtc_h = state->crtc_h; |
| 159 | 159 | ||
| 160 | if (state->crtc_w << 16 != state->src_w || | ||
| 161 | state->crtc_h << 16 != state->src_h) { | ||
| 162 | /* We don't support scaling yet, which involves | ||
| 163 | * allocating the LBM memory for scaling temporary | ||
| 164 | * storage, and putting filter kernels in the HVS | ||
| 165 | * context. | ||
| 166 | */ | ||
| 167 | return -EINVAL; | ||
| 168 | } | ||
| 169 | |||
| 160 | if (crtc_x < 0) { | 170 | if (crtc_x < 0) { |
| 161 | offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; | 171 | offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; |
| 162 | crtc_w += crtc_x; | 172 | crtc_w += crtc_x; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 8b29949507d1..01a4f05c1642 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom) | |||
| 2481 | if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { | 2481 | if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { |
| 2482 | if (features->touch_max) | 2482 | if (features->touch_max) |
| 2483 | features->device_type |= WACOM_DEVICETYPE_TOUCH; | 2483 | features->device_type |= WACOM_DEVICETYPE_TOUCH; |
| 2484 | if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) | 2484 | if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) |
| 2485 | features->device_type |= WACOM_DEVICETYPE_PAD; | 2485 | features->device_type |= WACOM_DEVICETYPE_PAD; |
| 2486 | 2486 | ||
| 2487 | features->x_max = 4096; | 2487 | features->x_max = 4096; |
| @@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F = | |||
| 3213 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; | 3213 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; |
| 3214 | static const struct wacom_features wacom_features_0x336 = | 3214 | static const struct wacom_features wacom_features_0x336 = |
| 3215 | { "Wacom DTU1141", 23472, 13203, 1023, 0, | 3215 | { "Wacom DTU1141", 23472, 13203, 1023, 0, |
| 3216 | DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; | 3216 | DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, |
| 3217 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; | ||
| 3217 | static const struct wacom_features wacom_features_0x57 = | 3218 | static const struct wacom_features wacom_features_0x57 = |
| 3218 | { "Wacom DTK2241", 95640, 54060, 2047, 63, | 3219 | { "Wacom DTK2241", 95640, 54060, 2047, 63, |
| 3219 | DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, | 3220 | DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 842b0043ad94..8f59f057cdf4 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -324,6 +324,7 @@ config SENSORS_APPLESMC | |||
| 324 | config SENSORS_ARM_SCPI | 324 | config SENSORS_ARM_SCPI |
| 325 | tristate "ARM SCPI Sensors" | 325 | tristate "ARM SCPI Sensors" |
| 326 | depends on ARM_SCPI_PROTOCOL | 326 | depends on ARM_SCPI_PROTOCOL |
| 327 | depends on THERMAL || !THERMAL_OF | ||
| 327 | help | 328 | help |
| 328 | This driver provides support for temperature, voltage, current | 329 | This driver provides support for temperature, voltage, current |
| 329 | and power sensors available on ARM Ltd's SCP based platforms. The | 330 | and power sensors available on ARM Ltd's SCP based platforms. The |
| @@ -1471,6 +1472,7 @@ config SENSORS_INA209 | |||
| 1471 | config SENSORS_INA2XX | 1472 | config SENSORS_INA2XX |
| 1472 | tristate "Texas Instruments INA219 and compatibles" | 1473 | tristate "Texas Instruments INA219 and compatibles" |
| 1473 | depends on I2C | 1474 | depends on I2C |
| 1475 | select REGMAP_I2C | ||
| 1474 | help | 1476 | help |
| 1475 | If you say yes here you get support for INA219, INA220, INA226, | 1477 | If you say yes here you get support for INA219, INA220, INA226, |
| 1476 | INA230, and INA231 power monitor chips. | 1478 | INA230, and INA231 power monitor chips. |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 1f5e956941b1..0af7fd311979 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
| @@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s) | |||
| 537 | static int applesmc_init_smcreg_try(void) | 537 | static int applesmc_init_smcreg_try(void) |
| 538 | { | 538 | { |
| 539 | struct applesmc_registers *s = &smcreg; | 539 | struct applesmc_registers *s = &smcreg; |
| 540 | bool left_light_sensor, right_light_sensor; | 540 | bool left_light_sensor = 0, right_light_sensor = 0; |
| 541 | unsigned int count; | 541 | unsigned int count; |
| 542 | u8 tmp[1]; | 542 | u8 tmp[1]; |
| 543 | int ret; | 543 | int ret; |
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c index 2c1241bbf9af..7e20567bc369 100644 --- a/drivers/hwmon/scpi-hwmon.c +++ b/drivers/hwmon/scpi-hwmon.c | |||
| @@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev) | |||
| 117 | struct scpi_ops *scpi_ops; | 117 | struct scpi_ops *scpi_ops; |
| 118 | struct device *hwdev, *dev = &pdev->dev; | 118 | struct device *hwdev, *dev = &pdev->dev; |
| 119 | struct scpi_sensors *scpi_sensors; | 119 | struct scpi_sensors *scpi_sensors; |
| 120 | int ret; | 120 | int ret, idx; |
| 121 | 121 | ||
| 122 | scpi_ops = get_scpi_ops(); | 122 | scpi_ops = get_scpi_ops(); |
| 123 | if (!scpi_ops) | 123 | if (!scpi_ops) |
| @@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev) | |||
| 146 | 146 | ||
| 147 | scpi_sensors->scpi_ops = scpi_ops; | 147 | scpi_sensors->scpi_ops = scpi_ops; |
| 148 | 148 | ||
| 149 | for (i = 0; i < nr_sensors; i++) { | 149 | for (i = 0, idx = 0; i < nr_sensors; i++) { |
| 150 | struct sensor_data *sensor = &scpi_sensors->data[i]; | 150 | struct sensor_data *sensor = &scpi_sensors->data[idx]; |
| 151 | 151 | ||
| 152 | ret = scpi_ops->sensor_get_info(i, &sensor->info); | 152 | ret = scpi_ops->sensor_get_info(i, &sensor->info); |
| 153 | if (ret) | 153 | if (ret) |
| @@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev) | |||
| 183 | num_power++; | 183 | num_power++; |
| 184 | break; | 184 | break; |
| 185 | default: | 185 | default: |
| 186 | break; | 186 | continue; |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | sensor->dev_attr_input.attr.mode = S_IRUGO; | 189 | sensor->dev_attr_input.attr.mode = S_IRUGO; |
| @@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev) | |||
| 194 | sensor->dev_attr_label.show = scpi_show_label; | 194 | sensor->dev_attr_label.show = scpi_show_label; |
| 195 | sensor->dev_attr_label.attr.name = sensor->label; | 195 | sensor->dev_attr_label.attr.name = sensor->label; |
| 196 | 196 | ||
| 197 | scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; | 197 | scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr; |
| 198 | scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; | 198 | scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr; |
| 199 | 199 | ||
| 200 | sysfs_attr_init(scpi_sensors->attrs[i << 1]); | 200 | sysfs_attr_init(scpi_sensors->attrs[idx << 1]); |
| 201 | sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); | 201 | sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]); |
| 202 | idx++; | ||
| 202 | } | 203 | } |
| 203 | 204 | ||
| 204 | scpi_sensors->group.attrs = scpi_sensors->attrs; | 205 | scpi_sensors->group.attrs = scpi_sensors->attrs; |
| @@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev) | |||
| 236 | 237 | ||
| 237 | zone->sensor_id = i; | 238 | zone->sensor_id = i; |
| 238 | zone->scpi_sensors = scpi_sensors; | 239 | zone->scpi_sensors = scpi_sensors; |
| 239 | zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, | 240 | zone->tzd = thermal_zone_of_sensor_register(dev, |
| 240 | &scpi_sensor_ops); | 241 | sensor->info.sensor_id, zone, &scpi_sensor_ops); |
| 241 | /* | 242 | /* |
| 242 | * The call to thermal_zone_of_sensor_register returns | 243 | * The call to thermal_zone_of_sensor_register returns |
| 243 | * an error for sensors that are not associated with | 244 | * an error for sensors that are not associated with |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e24c2b680b47..7b0aa82ea38b 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -126,6 +126,7 @@ config I2C_I801 | |||
| 126 | Sunrise Point-LP (PCH) | 126 | Sunrise Point-LP (PCH) |
| 127 | DNV (SOC) | 127 | DNV (SOC) |
| 128 | Broxton (SOC) | 128 | Broxton (SOC) |
| 129 | Lewisburg (PCH) | ||
| 129 | 130 | ||
| 130 | This driver can also be built as a module. If so, the module | 131 | This driver can also be built as a module. If so, the module |
| 131 | will be called i2c-i801. | 132 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c306751ceadb..f62d69799a9c 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -62,6 +62,8 @@ | |||
| 62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes | 62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes |
| 63 | * DNV (SOC) 0x19df 32 hard yes yes yes | 63 | * DNV (SOC) 0x19df 32 hard yes yes yes |
| 64 | * Broxton (SOC) 0x5ad4 32 hard yes yes yes | 64 | * Broxton (SOC) 0x5ad4 32 hard yes yes yes |
| 65 | * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes | ||
| 66 | * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes | ||
| 65 | * | 67 | * |
| 66 | * Features supported by this driver: | 68 | * Features supported by this driver: |
| 67 | * Software PEC no | 69 | * Software PEC no |
| @@ -206,6 +208,8 @@ | |||
| 206 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 | 208 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 |
| 207 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df | 209 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df |
| 208 | #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 | 210 | #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 |
| 211 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 | ||
| 212 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 | ||
| 209 | 213 | ||
| 210 | struct i801_mux_config { | 214 | struct i801_mux_config { |
| 211 | char *gpio_chip; | 215 | char *gpio_chip; |
| @@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = { | |||
| 869 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, | 873 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, |
| 870 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, | 874 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, |
| 871 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, | 875 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, |
| 876 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, | ||
| 877 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, | ||
| 872 | { 0, } | 878 | { 0, } |
| 873 | }; | 879 | }; |
| 874 | 880 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1e4d99da4164..9bb0b056b25f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -50,6 +50,7 @@ | |||
| 50 | #include <linux/of_device.h> | 50 | #include <linux/of_device.h> |
| 51 | #include <linux/of_dma.h> | 51 | #include <linux/of_dma.h> |
| 52 | #include <linux/of_gpio.h> | 52 | #include <linux/of_gpio.h> |
| 53 | #include <linux/pinctrl/consumer.h> | ||
| 53 | #include <linux/platform_data/i2c-imx.h> | 54 | #include <linux/platform_data/i2c-imx.h> |
| 54 | #include <linux/platform_device.h> | 55 | #include <linux/platform_device.h> |
| 55 | #include <linux/sched.h> | 56 | #include <linux/sched.h> |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index e23a7b068c60..0b20449e48cf 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
| @@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c) | |||
| 662 | 662 | ||
| 663 | static void xiic_start_xfer(struct xiic_i2c *i2c) | 663 | static void xiic_start_xfer(struct xiic_i2c *i2c) |
| 664 | { | 664 | { |
| 665 | 665 | spin_lock(&i2c->lock); | |
| 666 | xiic_reinit(i2c); | ||
| 666 | __xiic_start_xfer(i2c); | 667 | __xiic_start_xfer(i2c); |
| 668 | spin_unlock(&i2c->lock); | ||
| 667 | } | 669 | } |
| 668 | 670 | ||
| 669 | static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | 671 | static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 040af5cc8143..ba8eb087f224 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev) | |||
| 715 | if (wakeirq > 0 && wakeirq != client->irq) | 715 | if (wakeirq > 0 && wakeirq != client->irq) |
| 716 | status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); | 716 | status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); |
| 717 | else if (client->irq > 0) | 717 | else if (client->irq > 0) |
| 718 | status = dev_pm_set_wake_irq(dev, wakeirq); | 718 | status = dev_pm_set_wake_irq(dev, client->irq); |
| 719 | else | 719 | else |
| 720 | status = 0; | 720 | status = 0; |
| 721 | 721 | ||
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index eea0c79111e7..4d960d3b93c0 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c | |||
| @@ -101,7 +101,7 @@ | |||
| 101 | #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ | 101 | #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ |
| 102 | 102 | ||
| 103 | /* ID Register Bit Designations (AD7793_REG_ID) */ | 103 | /* ID Register Bit Designations (AD7793_REG_ID) */ |
| 104 | #define AD7785_ID 0xB | 104 | #define AD7785_ID 0x3 |
| 105 | #define AD7792_ID 0xA | 105 | #define AD7792_ID 0xA |
| 106 | #define AD7793_ID 0xB | 106 | #define AD7793_ID 0xB |
| 107 | #define AD7794_ID 0xF | 107 | #define AD7794_ID 0xF |
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 599cde3d03a1..b10f629cc44b 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
| @@ -106,6 +106,13 @@ | |||
| 106 | 106 | ||
| 107 | #define DEFAULT_SAMPLE_TIME 1000 | 107 | #define DEFAULT_SAMPLE_TIME 1000 |
| 108 | 108 | ||
| 109 | /* V at 25°C of 696 mV */ | ||
| 110 | #define VF610_VTEMP25_3V0 950 | ||
| 111 | /* V at 25°C of 699 mV */ | ||
| 112 | #define VF610_VTEMP25_3V3 867 | ||
| 113 | /* Typical sensor slope coefficient at all temperatures */ | ||
| 114 | #define VF610_TEMP_SLOPE_COEFF 1840 | ||
| 115 | |||
| 109 | enum clk_sel { | 116 | enum clk_sel { |
| 110 | VF610_ADCIOC_BUSCLK_SET, | 117 | VF610_ADCIOC_BUSCLK_SET, |
| 111 | VF610_ADCIOC_ALTCLK_SET, | 118 | VF610_ADCIOC_ALTCLK_SET, |
| @@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) | |||
| 197 | adc_feature->clk_div = 8; | 204 | adc_feature->clk_div = 8; |
| 198 | } | 205 | } |
| 199 | 206 | ||
| 207 | adck_rate = ipg_rate / adc_feature->clk_div; | ||
| 208 | |||
| 200 | /* | 209 | /* |
| 201 | * Determine the long sample time adder value to be used based | 210 | * Determine the long sample time adder value to be used based |
| 202 | * on the default minimum sample time provided. | 211 | * on the default minimum sample time provided. |
| @@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) | |||
| 221 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode | 230 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode |
| 222 | * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles | 231 | * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles |
| 223 | */ | 232 | */ |
| 224 | adck_rate = ipg_rate / info->adc_feature.clk_div; | ||
| 225 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) | 233 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) |
| 226 | info->sample_freq_avail[i] = | 234 | info->sample_freq_avail[i] = |
| 227 | adck_rate / (6 + vf610_hw_avgs[i] * | 235 | adck_rate / (6 + vf610_hw_avgs[i] * |
| @@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev, | |||
| 663 | break; | 671 | break; |
| 664 | case IIO_TEMP: | 672 | case IIO_TEMP: |
| 665 | /* | 673 | /* |
| 666 | * Calculate in degree Celsius times 1000 | 674 | * Calculate in degree Celsius times 1000 |
| 667 | * Using sensor slope of 1.84 mV/°C and | 675 | * Using the typical sensor slope of 1.84 mV/°C |
| 668 | * V at 25°C of 696 mV | 676 | * and VREFH_ADC at 3.3V, V at 25°C of 699 mV |
| 669 | */ | 677 | */ |
| 670 | *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; | 678 | *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * |
| 679 | 1000000 / VF610_TEMP_SLOPE_COEFF; | ||
| 680 | |||
| 671 | break; | 681 | break; |
| 672 | default: | 682 | default: |
| 673 | mutex_unlock(&indio_dev->mlock); | 683 | mutex_unlock(&indio_dev->mlock); |
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 0370624a35db..02e636a1c49a 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c | |||
| @@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, | |||
| 841 | case XADC_REG_VCCINT: | 841 | case XADC_REG_VCCINT: |
| 842 | case XADC_REG_VCCAUX: | 842 | case XADC_REG_VCCAUX: |
| 843 | case XADC_REG_VREFP: | 843 | case XADC_REG_VREFP: |
| 844 | case XADC_REG_VREFN: | ||
| 844 | case XADC_REG_VCCBRAM: | 845 | case XADC_REG_VCCBRAM: |
| 845 | case XADC_REG_VCCPINT: | 846 | case XADC_REG_VCCPINT: |
| 846 | case XADC_REG_VCCPAUX: | 847 | case XADC_REG_VCCPAUX: |
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 9e4d2c18b554..81ca0081a019 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c | |||
| @@ -113,12 +113,16 @@ enum ad5064_type { | |||
| 113 | ID_AD5065, | 113 | ID_AD5065, |
| 114 | ID_AD5628_1, | 114 | ID_AD5628_1, |
| 115 | ID_AD5628_2, | 115 | ID_AD5628_2, |
| 116 | ID_AD5629_1, | ||
| 117 | ID_AD5629_2, | ||
| 116 | ID_AD5648_1, | 118 | ID_AD5648_1, |
| 117 | ID_AD5648_2, | 119 | ID_AD5648_2, |
| 118 | ID_AD5666_1, | 120 | ID_AD5666_1, |
| 119 | ID_AD5666_2, | 121 | ID_AD5666_2, |
| 120 | ID_AD5668_1, | 122 | ID_AD5668_1, |
| 121 | ID_AD5668_2, | 123 | ID_AD5668_2, |
| 124 | ID_AD5669_1, | ||
| 125 | ID_AD5669_2, | ||
| 122 | }; | 126 | }; |
| 123 | 127 | ||
| 124 | static int ad5064_write(struct ad5064_state *st, unsigned int cmd, | 128 | static int ad5064_write(struct ad5064_state *st, unsigned int cmd, |
| @@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { | |||
| 291 | { }, | 295 | { }, |
| 292 | }; | 296 | }; |
| 293 | 297 | ||
| 294 | #define AD5064_CHANNEL(chan, addr, bits) { \ | 298 | #define AD5064_CHANNEL(chan, addr, bits, _shift) { \ |
| 295 | .type = IIO_VOLTAGE, \ | 299 | .type = IIO_VOLTAGE, \ |
| 296 | .indexed = 1, \ | 300 | .indexed = 1, \ |
| 297 | .output = 1, \ | 301 | .output = 1, \ |
| @@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { | |||
| 303 | .sign = 'u', \ | 307 | .sign = 'u', \ |
| 304 | .realbits = (bits), \ | 308 | .realbits = (bits), \ |
| 305 | .storagebits = 16, \ | 309 | .storagebits = 16, \ |
| 306 | .shift = 20 - bits, \ | 310 | .shift = (_shift), \ |
| 307 | }, \ | 311 | }, \ |
| 308 | .ext_info = ad5064_ext_info, \ | 312 | .ext_info = ad5064_ext_info, \ |
| 309 | } | 313 | } |
| 310 | 314 | ||
| 311 | #define DECLARE_AD5064_CHANNELS(name, bits) \ | 315 | #define DECLARE_AD5064_CHANNELS(name, bits, shift) \ |
| 312 | const struct iio_chan_spec name[] = { \ | 316 | const struct iio_chan_spec name[] = { \ |
| 313 | AD5064_CHANNEL(0, 0, bits), \ | 317 | AD5064_CHANNEL(0, 0, bits, shift), \ |
| 314 | AD5064_CHANNEL(1, 1, bits), \ | 318 | AD5064_CHANNEL(1, 1, bits, shift), \ |
| 315 | AD5064_CHANNEL(2, 2, bits), \ | 319 | AD5064_CHANNEL(2, 2, bits, shift), \ |
| 316 | AD5064_CHANNEL(3, 3, bits), \ | 320 | AD5064_CHANNEL(3, 3, bits, shift), \ |
| 317 | AD5064_CHANNEL(4, 4, bits), \ | 321 | AD5064_CHANNEL(4, 4, bits, shift), \ |
| 318 | AD5064_CHANNEL(5, 5, bits), \ | 322 | AD5064_CHANNEL(5, 5, bits, shift), \ |
| 319 | AD5064_CHANNEL(6, 6, bits), \ | 323 | AD5064_CHANNEL(6, 6, bits, shift), \ |
| 320 | AD5064_CHANNEL(7, 7, bits), \ | 324 | AD5064_CHANNEL(7, 7, bits, shift), \ |
| 321 | } | 325 | } |
| 322 | 326 | ||
| 323 | #define DECLARE_AD5065_CHANNELS(name, bits) \ | 327 | #define DECLARE_AD5065_CHANNELS(name, bits, shift) \ |
| 324 | const struct iio_chan_spec name[] = { \ | 328 | const struct iio_chan_spec name[] = { \ |
| 325 | AD5064_CHANNEL(0, 0, bits), \ | 329 | AD5064_CHANNEL(0, 0, bits, shift), \ |
| 326 | AD5064_CHANNEL(1, 3, bits), \ | 330 | AD5064_CHANNEL(1, 3, bits, shift), \ |
| 327 | } | 331 | } |
| 328 | 332 | ||
| 329 | static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); | 333 | static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); |
| 330 | static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); | 334 | static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); |
| 331 | static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); | 335 | static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); |
| 332 | 336 | ||
| 333 | static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); | 337 | static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); |
| 334 | static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); | 338 | static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); |
| 335 | static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); | 339 | static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); |
| 340 | |||
| 341 | static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); | ||
| 342 | static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); | ||
| 336 | 343 | ||
| 337 | static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | 344 | static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { |
| 338 | [ID_AD5024] = { | 345 | [ID_AD5024] = { |
| @@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | |||
| 382 | .channels = ad5024_channels, | 389 | .channels = ad5024_channels, |
| 383 | .num_channels = 8, | 390 | .num_channels = 8, |
| 384 | }, | 391 | }, |
| 392 | [ID_AD5629_1] = { | ||
| 393 | .shared_vref = true, | ||
| 394 | .internal_vref = 2500000, | ||
| 395 | .channels = ad5629_channels, | ||
| 396 | .num_channels = 8, | ||
| 397 | }, | ||
| 398 | [ID_AD5629_2] = { | ||
| 399 | .shared_vref = true, | ||
| 400 | .internal_vref = 5000000, | ||
| 401 | .channels = ad5629_channels, | ||
| 402 | .num_channels = 8, | ||
| 403 | }, | ||
| 385 | [ID_AD5648_1] = { | 404 | [ID_AD5648_1] = { |
| 386 | .shared_vref = true, | 405 | .shared_vref = true, |
| 387 | .internal_vref = 2500000, | 406 | .internal_vref = 2500000, |
| @@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | |||
| 418 | .channels = ad5064_channels, | 437 | .channels = ad5064_channels, |
| 419 | .num_channels = 8, | 438 | .num_channels = 8, |
| 420 | }, | 439 | }, |
| 440 | [ID_AD5669_1] = { | ||
| 441 | .shared_vref = true, | ||
| 442 | .internal_vref = 2500000, | ||
| 443 | .channels = ad5669_channels, | ||
| 444 | .num_channels = 8, | ||
| 445 | }, | ||
| 446 | [ID_AD5669_2] = { | ||
| 447 | .shared_vref = true, | ||
| 448 | .internal_vref = 5000000, | ||
| 449 | .channels = ad5669_channels, | ||
| 450 | .num_channels = 8, | ||
| 451 | }, | ||
| 421 | }; | 452 | }; |
| 422 | 453 | ||
| 423 | static inline unsigned int ad5064_num_vref(struct ad5064_state *st) | 454 | static inline unsigned int ad5064_num_vref(struct ad5064_state *st) |
| @@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, | |||
| 597 | unsigned int addr, unsigned int val) | 628 | unsigned int addr, unsigned int val) |
| 598 | { | 629 | { |
| 599 | struct i2c_client *i2c = to_i2c_client(st->dev); | 630 | struct i2c_client *i2c = to_i2c_client(st->dev); |
| 631 | int ret; | ||
| 600 | 632 | ||
| 601 | st->data.i2c[0] = (cmd << 4) | addr; | 633 | st->data.i2c[0] = (cmd << 4) | addr; |
| 602 | put_unaligned_be16(val, &st->data.i2c[1]); | 634 | put_unaligned_be16(val, &st->data.i2c[1]); |
| 603 | return i2c_master_send(i2c, st->data.i2c, 3); | 635 | |
| 636 | ret = i2c_master_send(i2c, st->data.i2c, 3); | ||
| 637 | if (ret < 0) | ||
| 638 | return ret; | ||
| 639 | |||
| 640 | return 0; | ||
| 604 | } | 641 | } |
| 605 | 642 | ||
| 606 | static int ad5064_i2c_probe(struct i2c_client *i2c, | 643 | static int ad5064_i2c_probe(struct i2c_client *i2c, |
| @@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c) | |||
| 616 | } | 653 | } |
| 617 | 654 | ||
| 618 | static const struct i2c_device_id ad5064_i2c_ids[] = { | 655 | static const struct i2c_device_id ad5064_i2c_ids[] = { |
| 619 | {"ad5629-1", ID_AD5628_1}, | 656 | {"ad5629-1", ID_AD5629_1}, |
| 620 | {"ad5629-2", ID_AD5628_2}, | 657 | {"ad5629-2", ID_AD5629_2}, |
| 621 | {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ | 658 | {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ |
| 622 | {"ad5669-1", ID_AD5668_1}, | 659 | {"ad5669-1", ID_AD5669_1}, |
| 623 | {"ad5669-2", ID_AD5668_2}, | 660 | {"ad5669-2", ID_AD5669_2}, |
| 624 | {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ | 661 | {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ |
| 625 | {} | 662 | {} |
| 626 | }; | 663 | }; |
| 627 | MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); | 664 | MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); |
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index 12128d1ca570..71991b5c0658 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c | |||
| @@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev, | |||
| 50 | 50 | ||
| 51 | switch (mask) { | 51 | switch (mask) { |
| 52 | case IIO_CHAN_INFO_RAW: | 52 | case IIO_CHAN_INFO_RAW: |
| 53 | ret = i2c_smbus_read_word_data(*client, | 53 | ret = i2c_smbus_read_word_swapped(*client, |
| 54 | chan->type == IIO_TEMP ? | 54 | chan->type == IIO_TEMP ? |
| 55 | SI7020CMD_TEMP_HOLD : | 55 | SI7020CMD_TEMP_HOLD : |
| 56 | SI7020CMD_RH_HOLD); | 56 | SI7020CMD_RH_HOLD); |
| 57 | if (ret < 0) | 57 | if (ret < 0) |
| 58 | return ret; | 58 | return ret; |
| 59 | *val = ret >> 2; | 59 | *val = ret >> 2; |
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index cbe198cb3699..471ee36b9c6e 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c | |||
| @@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, | |||
| 216 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); | 216 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); |
| 217 | dma_addr_t start_dma_addr = dma_addr; | 217 | dma_addr_t start_dma_addr = dma_addr; |
| 218 | unsigned long irq_flags, nr_pages, i; | 218 | unsigned long irq_flags, nr_pages, i; |
| 219 | unsigned long *entry; | ||
| 219 | int rc = 0; | 220 | int rc = 0; |
| 220 | 221 | ||
| 221 | if (dma_addr < s390_domain->domain.geometry.aperture_start || | 222 | if (dma_addr < s390_domain->domain.geometry.aperture_start || |
| @@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, | |||
| 228 | 229 | ||
| 229 | spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); | 230 | spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); |
| 230 | for (i = 0; i < nr_pages; i++) { | 231 | for (i = 0; i < nr_pages; i++) { |
| 231 | dma_update_cpu_trans(s390_domain->dma_table, page_addr, | 232 | entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); |
| 232 | dma_addr, flags); | 233 | if (!entry) { |
| 234 | rc = -ENOMEM; | ||
| 235 | goto undo_cpu_trans; | ||
| 236 | } | ||
| 237 | dma_update_cpu_trans(entry, page_addr, flags); | ||
| 233 | page_addr += PAGE_SIZE; | 238 | page_addr += PAGE_SIZE; |
| 234 | dma_addr += PAGE_SIZE; | 239 | dma_addr += PAGE_SIZE; |
| 235 | } | 240 | } |
| @@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, | |||
| 242 | break; | 247 | break; |
| 243 | } | 248 | } |
| 244 | spin_unlock(&s390_domain->list_lock); | 249 | spin_unlock(&s390_domain->list_lock); |
| 250 | |||
| 251 | undo_cpu_trans: | ||
| 252 | if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { | ||
| 253 | flags = ZPCI_PTE_INVALID; | ||
| 254 | while (i-- > 0) { | ||
| 255 | page_addr -= PAGE_SIZE; | ||
| 256 | dma_addr -= PAGE_SIZE; | ||
| 257 | entry = dma_walk_cpu_trans(s390_domain->dma_table, | ||
| 258 | dma_addr); | ||
| 259 | if (!entry) | ||
| 260 | break; | ||
| 261 | dma_update_cpu_trans(entry, page_addr, flags); | ||
| 262 | } | ||
| 263 | } | ||
| 245 | spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); | 264 | spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); |
| 246 | 265 | ||
| 247 | return rc; | 266 | return rc; |
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 44a077f3a4a2..f174ce0ca361 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c | |||
| @@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs, | |||
| 84 | writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); | 84 | writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * Disable all interrupts. Leave the PPI and SGIs alone | 87 | * Deactivate and disable all SPIs. Leave the PPI and SGIs |
| 88 | * as they are enabled by redistributor registers. | 88 | * alone as they are in the redistributor registers on GICv3. |
| 89 | */ | 89 | */ |
| 90 | for (i = 32; i < gic_irqs; i += 32) | 90 | for (i = 32; i < gic_irqs; i += 32) { |
| 91 | writel_relaxed(GICD_INT_EN_CLR_X32, | 91 | writel_relaxed(GICD_INT_EN_CLR_X32, |
| 92 | base + GIC_DIST_ENABLE_CLEAR + i / 8); | 92 | base + GIC_DIST_ACTIVE_CLEAR + i / 8); |
| 93 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 94 | base + GIC_DIST_ENABLE_CLEAR + i / 8); | ||
| 95 | } | ||
| 93 | 96 | ||
| 94 | if (sync_access) | 97 | if (sync_access) |
| 95 | sync_access(); | 98 | sync_access(); |
| @@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) | |||
| 102 | /* | 105 | /* |
| 103 | * Deal with the banked PPI and SGI interrupts - disable all | 106 | * Deal with the banked PPI and SGI interrupts - disable all |
| 104 | * PPI interrupts, ensure all SGI interrupts are enabled. | 107 | * PPI interrupts, ensure all SGI interrupts are enabled. |
| 108 | * Make sure everything is deactivated. | ||
| 105 | */ | 109 | */ |
| 110 | writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); | ||
| 106 | writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); | 111 | writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); |
| 107 | writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); | 112 | writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); |
| 108 | 113 | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 515c823c1c95..abf2ffaed392 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -73,9 +73,11 @@ struct gic_chip_data { | |||
| 73 | union gic_base cpu_base; | 73 | union gic_base cpu_base; |
| 74 | #ifdef CONFIG_CPU_PM | 74 | #ifdef CONFIG_CPU_PM |
| 75 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; | 75 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
| 76 | u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; | ||
| 76 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; | 77 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
| 77 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; | 78 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
| 78 | u32 __percpu *saved_ppi_enable; | 79 | u32 __percpu *saved_ppi_enable; |
| 80 | u32 __percpu *saved_ppi_active; | ||
| 79 | u32 __percpu *saved_ppi_conf; | 81 | u32 __percpu *saved_ppi_conf; |
| 80 | #endif | 82 | #endif |
| 81 | struct irq_domain *domain; | 83 | struct irq_domain *domain; |
| @@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr) | |||
| 566 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 568 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
| 567 | gic_data[gic_nr].saved_spi_enable[i] = | 569 | gic_data[gic_nr].saved_spi_enable[i] = |
| 568 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 570 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 571 | |||
| 572 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
| 573 | gic_data[gic_nr].saved_spi_active[i] = | ||
| 574 | readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 569 | } | 575 | } |
| 570 | 576 | ||
| 571 | /* | 577 | /* |
| @@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr) | |||
| 604 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | 610 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], |
| 605 | dist_base + GIC_DIST_TARGET + i * 4); | 611 | dist_base + GIC_DIST_TARGET + i * 4); |
| 606 | 612 | ||
| 607 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 613 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { |
| 614 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 615 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); | ||
| 608 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | 616 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], |
| 609 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | 617 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 618 | } | ||
| 619 | |||
| 620 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { | ||
| 621 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 622 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); | ||
| 623 | writel_relaxed(gic_data[gic_nr].saved_spi_active[i], | ||
| 624 | dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 625 | } | ||
| 610 | 626 | ||
| 611 | writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); | 627 | writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); |
| 612 | } | 628 | } |
| @@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr) | |||
| 631 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 647 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
| 632 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 648 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 633 | 649 | ||
| 650 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); | ||
| 651 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
| 652 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 653 | |||
| 634 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 654 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 635 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 655 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| 636 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | 656 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
| @@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr) | |||
| 654 | return; | 674 | return; |
| 655 | 675 | ||
| 656 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 676 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
| 657 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 677 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { |
| 678 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 679 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); | ||
| 658 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | 680 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 681 | } | ||
| 682 | |||
| 683 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); | ||
| 684 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { | ||
| 685 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 686 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); | ||
| 687 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 688 | } | ||
| 659 | 689 | ||
| 660 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 690 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 661 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 691 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| @@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic) | |||
| 710 | sizeof(u32)); | 740 | sizeof(u32)); |
| 711 | BUG_ON(!gic->saved_ppi_enable); | 741 | BUG_ON(!gic->saved_ppi_enable); |
| 712 | 742 | ||
| 743 | gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | ||
| 744 | sizeof(u32)); | ||
| 745 | BUG_ON(!gic->saved_ppi_active); | ||
| 746 | |||
| 713 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | 747 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
| 714 | sizeof(u32)); | 748 | sizeof(u32)); |
| 715 | BUG_ON(!gic->saved_ppi_conf); | 749 | BUG_ON(!gic->saved_ppi_conf); |
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..5178645ac42b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
| @@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
| 160 | } | 160 | } |
| 161 | EXPORT_SYMBOL(nvm_erase_blk); | 161 | EXPORT_SYMBOL(nvm_erase_blk); |
| 162 | 162 | ||
| 163 | static void nvm_core_free(struct nvm_dev *dev) | ||
| 164 | { | ||
| 165 | kfree(dev); | ||
| 166 | } | ||
| 167 | |||
| 168 | static int nvm_core_init(struct nvm_dev *dev) | 163 | static int nvm_core_init(struct nvm_dev *dev) |
| 169 | { | 164 | { |
| 170 | struct nvm_id *id = &dev->identity; | 165 | struct nvm_id *id = &dev->identity; |
| @@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev) | |||
| 179 | dev->sec_size = grp->csecs; | 174 | dev->sec_size = grp->csecs; |
| 180 | dev->oob_size = grp->sos; | 175 | dev->oob_size = grp->sos; |
| 181 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; | 176 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; |
| 182 | dev->addr_mode = id->ppat; | 177 | memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); |
| 183 | dev->addr_format = id->ppaf; | ||
| 184 | 178 | ||
| 185 | dev->plane_mode = NVM_PLANE_SINGLE; | 179 | dev->plane_mode = NVM_PLANE_SINGLE; |
| 186 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; | 180 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; |
| 187 | 181 | ||
| 182 | if (grp->mtype != 0) { | ||
| 183 | pr_err("nvm: memory type not supported\n"); | ||
| 184 | return -EINVAL; | ||
| 185 | } | ||
| 186 | |||
| 187 | if (grp->fmtype != 0 && grp->fmtype != 1) { | ||
| 188 | pr_err("nvm: flash type not supported\n"); | ||
| 189 | return -EINVAL; | ||
| 190 | } | ||
| 191 | |||
| 188 | if (grp->mpos & 0x020202) | 192 | if (grp->mpos & 0x020202) |
| 189 | dev->plane_mode = NVM_PLANE_DOUBLE; | 193 | dev->plane_mode = NVM_PLANE_DOUBLE; |
| 190 | if (grp->mpos & 0x040404) | 194 | if (grp->mpos & 0x040404) |
| @@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev) | |||
| 213 | 217 | ||
| 214 | if (dev->mt) | 218 | if (dev->mt) |
| 215 | dev->mt->unregister_mgr(dev); | 219 | dev->mt->unregister_mgr(dev); |
| 216 | |||
| 217 | nvm_core_free(dev); | ||
| 218 | } | 220 | } |
| 219 | 221 | ||
| 220 | static int nvm_init(struct nvm_dev *dev) | 222 | static int nvm_init(struct nvm_dev *dev) |
| 221 | { | 223 | { |
| 222 | struct nvmm_type *mt; | 224 | struct nvmm_type *mt; |
| 223 | int ret = 0; | 225 | int ret = -EINVAL; |
| 224 | 226 | ||
| 225 | if (!dev->q || !dev->ops) | 227 | if (!dev->q || !dev->ops) |
| 226 | return -EINVAL; | 228 | return ret; |
| 227 | 229 | ||
| 228 | if (dev->ops->identity(dev->q, &dev->identity)) { | 230 | if (dev->ops->identity(dev->q, &dev->identity)) { |
| 229 | pr_err("nvm: device could not be identified\n"); | 231 | pr_err("nvm: device could not be identified\n"); |
| 230 | ret = -EINVAL; | ||
| 231 | goto err; | 232 | goto err; |
| 232 | } | 233 | } |
| 233 | 234 | ||
| @@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev) | |||
| 273 | dev->nr_chnls); | 274 | dev->nr_chnls); |
| 274 | return 0; | 275 | return 0; |
| 275 | err: | 276 | err: |
| 276 | nvm_free(dev); | ||
| 277 | pr_err("nvm: failed to initialize nvm\n"); | 277 | pr_err("nvm: failed to initialize nvm\n"); |
| 278 | return ret; | 278 | return ret; |
| 279 | } | 279 | } |
| @@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
| 308 | if (ret) | 308 | if (ret) |
| 309 | goto err_init; | 309 | goto err_init; |
| 310 | 310 | ||
| 311 | down_write(&nvm_lock); | ||
| 312 | list_add(&dev->devices, &nvm_devices); | ||
| 313 | up_write(&nvm_lock); | ||
| 314 | |||
| 315 | if (dev->ops->max_phys_sect > 1) { | 311 | if (dev->ops->max_phys_sect > 1) { |
| 316 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, | 312 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, |
| 317 | "ppalist"); | 313 | "ppalist"); |
| 318 | if (!dev->ppalist_pool) { | 314 | if (!dev->ppalist_pool) { |
| 319 | pr_err("nvm: could not create ppa pool\n"); | 315 | pr_err("nvm: could not create ppa pool\n"); |
| 320 | return -ENOMEM; | 316 | ret = -ENOMEM; |
| 317 | goto err_init; | ||
| 321 | } | 318 | } |
| 322 | } else if (dev->ops->max_phys_sect > 256) { | 319 | } else if (dev->ops->max_phys_sect > 256) { |
| 323 | pr_info("nvm: max sectors supported is 256.\n"); | 320 | pr_info("nvm: max sectors supported is 256.\n"); |
| 324 | return -EINVAL; | 321 | ret = -EINVAL; |
| 322 | goto err_init; | ||
| 325 | } | 323 | } |
| 326 | 324 | ||
| 325 | down_write(&nvm_lock); | ||
| 326 | list_add(&dev->devices, &nvm_devices); | ||
| 327 | up_write(&nvm_lock); | ||
| 328 | |||
| 327 | return 0; | 329 | return 0; |
| 328 | err_init: | 330 | err_init: |
| 329 | kfree(dev); | 331 | kfree(dev); |
| @@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name) | |||
| 341 | return; | 343 | return; |
| 342 | } | 344 | } |
| 343 | 345 | ||
| 344 | nvm_exit(dev); | ||
| 345 | |||
| 346 | down_write(&nvm_lock); | 346 | down_write(&nvm_lock); |
| 347 | list_del(&dev->devices); | 347 | list_del(&dev->devices); |
| 348 | up_write(&nvm_lock); | 348 | up_write(&nvm_lock); |
| 349 | |||
| 350 | nvm_exit(dev); | ||
| 351 | kfree(dev); | ||
| 349 | } | 352 | } |
| 350 | EXPORT_SYMBOL(nvm_unregister); | 353 | EXPORT_SYMBOL(nvm_unregister); |
| 351 | 354 | ||
| @@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t) | |||
| 457 | lockdep_assert_held(&nvm_lock); | 460 | lockdep_assert_held(&nvm_lock); |
| 458 | 461 | ||
| 459 | del_gendisk(tdisk); | 462 | del_gendisk(tdisk); |
| 463 | blk_cleanup_queue(q); | ||
| 464 | |||
| 460 | if (tt->exit) | 465 | if (tt->exit) |
| 461 | tt->exit(tdisk->private_data); | 466 | tt->exit(tdisk->private_data); |
| 462 | 467 | ||
| 463 | blk_cleanup_queue(q); | ||
| 464 | |||
| 465 | put_disk(tdisk); | 468 | put_disk(tdisk); |
| 466 | 469 | ||
| 467 | list_del(&t->list); | 470 | list_del(&t->list); |
| @@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val) | |||
| 541 | if (!dev->mt) | 544 | if (!dev->mt) |
| 542 | return 0; | 545 | return 0; |
| 543 | 546 | ||
| 544 | dev->mt->free_blocks_print(dev); | 547 | dev->mt->lun_info_print(dev); |
| 545 | 548 | ||
| 546 | return 0; | 549 | return 0; |
| 547 | } | 550 | } |
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..e20e74ec6b91 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c | |||
| @@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
| 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; | 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; |
| 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; | 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; |
| 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; | 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; |
| 63 | lun->vlun.nr_inuse_blocks = 0; | ||
| 64 | lun->vlun.nr_bad_blocks = 0; | ||
| 63 | } | 65 | } |
| 64 | return 0; | 66 | return 0; |
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | 69 | static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, |
| 68 | void *private) | 70 | void *private) |
| 69 | { | 71 | { |
| 70 | struct gen_nvm *gn = private; | 72 | struct gen_nvm *gn = private; |
| 71 | struct gen_lun *lun = &gn->luns[lun_id]; | 73 | struct nvm_dev *dev = gn->dev; |
| 74 | struct gen_lun *lun; | ||
| 72 | struct nvm_block *blk; | 75 | struct nvm_block *blk; |
| 73 | int i; | 76 | int i; |
| 74 | 77 | ||
| 75 | if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) | 78 | ppa = dev_to_generic_addr(gn->dev, ppa); |
| 76 | return 0; | 79 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; |
| 80 | |||
| 81 | for (i = 0; i < nr_blocks; i++) { | ||
| 82 | if (blks[i] == 0) | ||
| 83 | continue; | ||
| 77 | 84 | ||
| 78 | i = -1; | ||
| 79 | while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { | ||
| 80 | blk = &lun->vlun.blocks[i]; | 85 | blk = &lun->vlun.blocks[i]; |
| 81 | if (!blk) { | 86 | if (!blk) { |
| 82 | pr_err("gennvm: BB data is out of bounds.\n"); | 87 | pr_err("gennvm: BB data is out of bounds.\n"); |
| @@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | |||
| 84 | } | 89 | } |
| 85 | 90 | ||
| 86 | list_move_tail(&blk->list, &lun->bb_list); | 91 | list_move_tail(&blk->list, &lun->bb_list); |
| 92 | lun->vlun.nr_bad_blocks++; | ||
| 87 | } | 93 | } |
| 88 | 94 | ||
| 89 | return 0; | 95 | return 0; |
| @@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) | |||
| 136 | list_move_tail(&blk->list, &lun->used_list); | 142 | list_move_tail(&blk->list, &lun->used_list); |
| 137 | blk->type = 1; | 143 | blk->type = 1; |
| 138 | lun->vlun.nr_free_blocks--; | 144 | lun->vlun.nr_free_blocks--; |
| 145 | lun->vlun.nr_inuse_blocks++; | ||
| 139 | } | 146 | } |
| 140 | } | 147 | } |
| 141 | 148 | ||
| @@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
| 164 | block->id = cur_block_id++; | 171 | block->id = cur_block_id++; |
| 165 | 172 | ||
| 166 | /* First block is reserved for device */ | 173 | /* First block is reserved for device */ |
| 167 | if (unlikely(lun_iter == 0 && blk_iter == 0)) | 174 | if (unlikely(lun_iter == 0 && blk_iter == 0)) { |
| 175 | lun->vlun.nr_free_blocks--; | ||
| 168 | continue; | 176 | continue; |
| 177 | } | ||
| 169 | 178 | ||
| 170 | list_add_tail(&block->list, &lun->free_list); | 179 | list_add_tail(&block->list, &lun->free_list); |
| 171 | } | 180 | } |
| 172 | 181 | ||
| 173 | if (dev->ops->get_bb_tbl) { | 182 | if (dev->ops->get_bb_tbl) { |
| 174 | ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, | 183 | struct ppa_addr ppa; |
| 175 | dev->blks_per_lun, gennvm_block_bb, gn); | 184 | |
| 185 | ppa.ppa = 0; | ||
| 186 | ppa.g.ch = lun->vlun.chnl_id; | ||
| 187 | ppa.g.lun = lun->vlun.id; | ||
| 188 | ppa = generic_to_dev_addr(dev, ppa); | ||
| 189 | |||
| 190 | ret = dev->ops->get_bb_tbl(dev->q, ppa, | ||
| 191 | dev->blks_per_lun, | ||
| 192 | gennvm_block_bb, gn); | ||
| 176 | if (ret) | 193 | if (ret) |
| 177 | pr_err("gennvm: could not read BB table\n"); | 194 | pr_err("gennvm: could not read BB table\n"); |
| 178 | } | 195 | } |
| @@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev) | |||
| 199 | if (!gn) | 216 | if (!gn) |
| 200 | return -ENOMEM; | 217 | return -ENOMEM; |
| 201 | 218 | ||
| 219 | gn->dev = dev; | ||
| 202 | gn->nr_luns = dev->nr_luns; | 220 | gn->nr_luns = dev->nr_luns; |
| 203 | dev->mp = gn; | 221 | dev->mp = gn; |
| 204 | 222 | ||
| @@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, | |||
| 254 | blk->type = 1; | 272 | blk->type = 1; |
| 255 | 273 | ||
| 256 | lun->vlun.nr_free_blocks--; | 274 | lun->vlun.nr_free_blocks--; |
| 275 | lun->vlun.nr_inuse_blocks++; | ||
| 257 | 276 | ||
| 258 | spin_unlock(&vlun->lock); | 277 | spin_unlock(&vlun->lock); |
| 259 | out: | 278 | out: |
| @@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
| 271 | case 1: | 290 | case 1: |
| 272 | list_move_tail(&blk->list, &lun->free_list); | 291 | list_move_tail(&blk->list, &lun->free_list); |
| 273 | lun->vlun.nr_free_blocks++; | 292 | lun->vlun.nr_free_blocks++; |
| 293 | lun->vlun.nr_inuse_blocks--; | ||
| 274 | blk->type = 0; | 294 | blk->type = 0; |
| 275 | break; | 295 | break; |
| 276 | case 2: | 296 | case 2: |
| 277 | list_move_tail(&blk->list, &lun->bb_list); | 297 | list_move_tail(&blk->list, &lun->bb_list); |
| 298 | lun->vlun.nr_bad_blocks++; | ||
| 299 | lun->vlun.nr_inuse_blocks--; | ||
| 278 | break; | 300 | break; |
| 279 | default: | 301 | default: |
| 280 | WARN_ON_ONCE(1); | 302 | WARN_ON_ONCE(1); |
| 281 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", | 303 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", |
| 282 | blk->id, blk->type); | 304 | blk->id, blk->type); |
| 283 | list_move_tail(&blk->list, &lun->bb_list); | 305 | list_move_tail(&blk->list, &lun->bb_list); |
| 306 | lun->vlun.nr_bad_blocks++; | ||
| 307 | lun->vlun.nr_inuse_blocks--; | ||
| 284 | } | 308 | } |
| 285 | 309 | ||
| 286 | spin_unlock(&vlun->lock); | 310 | spin_unlock(&vlun->lock); |
| @@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 292 | 316 | ||
| 293 | if (rqd->nr_pages > 1) { | 317 | if (rqd->nr_pages > 1) { |
| 294 | for (i = 0; i < rqd->nr_pages; i++) | 318 | for (i = 0; i < rqd->nr_pages; i++) |
| 295 | rqd->ppa_list[i] = addr_to_generic_mode(dev, | 319 | rqd->ppa_list[i] = dev_to_generic_addr(dev, |
| 296 | rqd->ppa_list[i]); | 320 | rqd->ppa_list[i]); |
| 297 | } else { | 321 | } else { |
| 298 | rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); | 322 | rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); |
| 299 | } | 323 | } |
| 300 | } | 324 | } |
| 301 | 325 | ||
| @@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 305 | 329 | ||
| 306 | if (rqd->nr_pages > 1) { | 330 | if (rqd->nr_pages > 1) { |
| 307 | for (i = 0; i < rqd->nr_pages; i++) | 331 | for (i = 0; i < rqd->nr_pages; i++) |
| 308 | rqd->ppa_list[i] = generic_to_addr_mode(dev, | 332 | rqd->ppa_list[i] = generic_to_dev_addr(dev, |
| 309 | rqd->ppa_list[i]); | 333 | rqd->ppa_list[i]); |
| 310 | } else { | 334 | } else { |
| 311 | rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); | 335 | rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); |
| 312 | } | 336 | } |
| 313 | } | 337 | } |
| 314 | 338 | ||
| @@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 354 | { | 378 | { |
| 355 | int i; | 379 | int i; |
| 356 | 380 | ||
| 357 | if (!dev->ops->set_bb) | 381 | if (!dev->ops->set_bb_tbl) |
| 358 | return; | 382 | return; |
| 359 | 383 | ||
| 360 | if (dev->ops->set_bb(dev->q, rqd, 1)) | 384 | if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) |
| 361 | return; | 385 | return; |
| 362 | 386 | ||
| 363 | gennvm_addr_to_generic_mode(dev, rqd); | 387 | gennvm_addr_to_generic_mode(dev, rqd); |
| @@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) | |||
| 440 | return &gn->luns[lunid].vlun; | 464 | return &gn->luns[lunid].vlun; |
| 441 | } | 465 | } |
| 442 | 466 | ||
| 443 | static void gennvm_free_blocks_print(struct nvm_dev *dev) | 467 | static void gennvm_lun_info_print(struct nvm_dev *dev) |
| 444 | { | 468 | { |
| 445 | struct gen_nvm *gn = dev->mp; | 469 | struct gen_nvm *gn = dev->mp; |
| 446 | struct gen_lun *lun; | 470 | struct gen_lun *lun; |
| 447 | unsigned int i; | 471 | unsigned int i; |
| 448 | 472 | ||
| 449 | gennvm_for_each_lun(gn, lun, i) | 473 | |
| 450 | pr_info("%s: lun%8u\t%u\n", | 474 | gennvm_for_each_lun(gn, lun, i) { |
| 451 | dev->name, i, lun->vlun.nr_free_blocks); | 475 | spin_lock(&lun->vlun.lock); |
| 476 | |||
| 477 | pr_info("%s: lun%8u\t%u\t%u\t%u\n", | ||
| 478 | dev->name, i, | ||
| 479 | lun->vlun.nr_free_blocks, | ||
| 480 | lun->vlun.nr_inuse_blocks, | ||
| 481 | lun->vlun.nr_bad_blocks); | ||
| 482 | |||
| 483 | spin_unlock(&lun->vlun.lock); | ||
| 484 | } | ||
| 452 | } | 485 | } |
| 453 | 486 | ||
| 454 | static struct nvmm_type gennvm = { | 487 | static struct nvmm_type gennvm = { |
| @@ -466,7 +499,7 @@ static struct nvmm_type gennvm = { | |||
| 466 | .erase_blk = gennvm_erase_blk, | 499 | .erase_blk = gennvm_erase_blk, |
| 467 | 500 | ||
| 468 | .get_lun = gennvm_get_lun, | 501 | .get_lun = gennvm_get_lun, |
| 469 | .free_blocks_print = gennvm_free_blocks_print, | 502 | .lun_info_print = gennvm_lun_info_print, |
| 470 | }; | 503 | }; |
| 471 | 504 | ||
| 472 | static int __init gennvm_module_init(void) | 505 | static int __init gennvm_module_init(void) |
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h | |||
| @@ -35,6 +35,8 @@ struct gen_lun { | |||
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct gen_nvm { | 37 | struct gen_nvm { |
| 38 | struct nvm_dev *dev; | ||
| 39 | |||
| 38 | int nr_luns; | 40 | int nr_luns; |
| 39 | struct gen_lun *luns; | 41 | struct gen_lun *luns; |
| 40 | }; | 42 | }; |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
| @@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |||
| 123 | return blk->id * rrpc->dev->pgs_per_blk; | 123 | return blk->id * rrpc->dev->pgs_per_blk; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, | ||
| 127 | struct ppa_addr r) | ||
| 128 | { | ||
| 129 | struct ppa_addr l; | ||
| 130 | int secs, pgs, blks, luns; | ||
| 131 | sector_t ppa = r.ppa; | ||
| 132 | |||
| 133 | l.ppa = 0; | ||
| 134 | |||
| 135 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | ||
| 136 | l.g.sec = secs; | ||
| 137 | |||
| 138 | sector_div(ppa, dev->sec_per_pg); | ||
| 139 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
| 140 | l.g.pg = pgs; | ||
| 141 | |||
| 142 | sector_div(ppa, dev->pgs_per_blk); | ||
| 143 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
| 144 | l.g.blk = blks; | ||
| 145 | |||
| 146 | sector_div(ppa, dev->blks_per_lun); | ||
| 147 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
| 148 | l.g.lun = luns; | ||
| 149 | |||
| 150 | sector_div(ppa, dev->luns_per_chnl); | ||
| 151 | l.g.ch = ppa; | ||
| 152 | |||
| 153 | return l; | ||
| 154 | } | ||
| 155 | |||
| 126 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) | 156 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
| 127 | { | 157 | { |
| 128 | struct ppa_addr paddr; | 158 | struct ppa_addr paddr; |
| 129 | 159 | ||
| 130 | paddr.ppa = addr; | 160 | paddr.ppa = addr; |
| 131 | return __linear_to_generic_addr(dev, paddr); | 161 | return linear_to_generic_addr(dev, paddr); |
| 132 | } | 162 | } |
| 133 | 163 | ||
| 134 | /* requires lun->lock taken */ | 164 | /* requires lun->lock taken */ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 917d47e290ae..3147c8d09ea8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -112,7 +112,8 @@ struct iv_tcw_private { | |||
| 112 | * and encrypts / decrypts at the same time. | 112 | * and encrypts / decrypts at the same time. |
| 113 | */ | 113 | */ |
| 114 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, | 114 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
| 115 | DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; | 115 | DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
| 116 | DM_CRYPT_EXIT_THREAD}; | ||
| 116 | 117 | ||
| 117 | /* | 118 | /* |
| 118 | * The fields in here must be read only after initialization. | 119 | * The fields in here must be read only after initialization. |
| @@ -1203,20 +1204,18 @@ continue_locked: | |||
| 1203 | if (!RB_EMPTY_ROOT(&cc->write_tree)) | 1204 | if (!RB_EMPTY_ROOT(&cc->write_tree)) |
| 1204 | goto pop_from_list; | 1205 | goto pop_from_list; |
| 1205 | 1206 | ||
| 1207 | if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { | ||
| 1208 | spin_unlock_irq(&cc->write_thread_wait.lock); | ||
| 1209 | break; | ||
| 1210 | } | ||
| 1211 | |||
| 1206 | __set_current_state(TASK_INTERRUPTIBLE); | 1212 | __set_current_state(TASK_INTERRUPTIBLE); |
| 1207 | __add_wait_queue(&cc->write_thread_wait, &wait); | 1213 | __add_wait_queue(&cc->write_thread_wait, &wait); |
| 1208 | 1214 | ||
| 1209 | spin_unlock_irq(&cc->write_thread_wait.lock); | 1215 | spin_unlock_irq(&cc->write_thread_wait.lock); |
| 1210 | 1216 | ||
| 1211 | if (unlikely(kthread_should_stop())) { | ||
| 1212 | set_task_state(current, TASK_RUNNING); | ||
| 1213 | remove_wait_queue(&cc->write_thread_wait, &wait); | ||
| 1214 | break; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | schedule(); | 1217 | schedule(); |
| 1218 | 1218 | ||
| 1219 | set_task_state(current, TASK_RUNNING); | ||
| 1220 | spin_lock_irq(&cc->write_thread_wait.lock); | 1219 | spin_lock_irq(&cc->write_thread_wait.lock); |
| 1221 | __remove_wait_queue(&cc->write_thread_wait, &wait); | 1220 | __remove_wait_queue(&cc->write_thread_wait, &wait); |
| 1222 | goto continue_locked; | 1221 | goto continue_locked; |
| @@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti) | |||
| 1531 | if (!cc) | 1530 | if (!cc) |
| 1532 | return; | 1531 | return; |
| 1533 | 1532 | ||
| 1534 | if (cc->write_thread) | 1533 | if (cc->write_thread) { |
| 1534 | spin_lock_irq(&cc->write_thread_wait.lock); | ||
| 1535 | set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); | ||
| 1536 | wake_up_locked(&cc->write_thread_wait); | ||
| 1537 | spin_unlock_irq(&cc->write_thread_wait.lock); | ||
| 1535 | kthread_stop(cc->write_thread); | 1538 | kthread_stop(cc->write_thread); |
| 1539 | } | ||
| 1536 | 1540 | ||
| 1537 | if (cc->io_queue) | 1541 | if (cc->io_queue) |
| 1538 | destroy_workqueue(cc->io_queue); | 1542 | destroy_workqueue(cc->io_queue); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaa6caa46a9f..cfa29f574c2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti, | |||
| 1537 | struct block_device **bdev, fmode_t *mode) | 1537 | struct block_device **bdev, fmode_t *mode) |
| 1538 | { | 1538 | { |
| 1539 | struct multipath *m = ti->private; | 1539 | struct multipath *m = ti->private; |
| 1540 | struct pgpath *pgpath; | ||
| 1541 | unsigned long flags; | 1540 | unsigned long flags; |
| 1542 | int r; | 1541 | int r; |
| 1543 | 1542 | ||
| 1544 | r = 0; | ||
| 1545 | |||
| 1546 | spin_lock_irqsave(&m->lock, flags); | 1543 | spin_lock_irqsave(&m->lock, flags); |
| 1547 | 1544 | ||
| 1548 | if (!m->current_pgpath) | 1545 | if (!m->current_pgpath) |
| 1549 | __choose_pgpath(m, 0); | 1546 | __choose_pgpath(m, 0); |
| 1550 | 1547 | ||
| 1551 | pgpath = m->current_pgpath; | 1548 | if (m->current_pgpath) { |
| 1552 | 1549 | if (!m->queue_io) { | |
| 1553 | if (pgpath) { | 1550 | *bdev = m->current_pgpath->path.dev->bdev; |
| 1554 | *bdev = pgpath->path.dev->bdev; | 1551 | *mode = m->current_pgpath->path.dev->mode; |
| 1555 | *mode = pgpath->path.dev->mode; | 1552 | r = 0; |
| 1553 | } else { | ||
| 1554 | /* pg_init has not started or completed */ | ||
| 1555 | r = -ENOTCONN; | ||
| 1556 | } | ||
| 1557 | } else { | ||
| 1558 | /* No path is available */ | ||
| 1559 | if (m->queue_if_no_path) | ||
| 1560 | r = -ENOTCONN; | ||
| 1561 | else | ||
| 1562 | r = -EIO; | ||
| 1556 | } | 1563 | } |
| 1557 | 1564 | ||
| 1558 | if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) | ||
| 1559 | r = -ENOTCONN; | ||
| 1560 | else if (!*bdev) | ||
| 1561 | r = -EIO; | ||
| 1562 | |||
| 1563 | spin_unlock_irqrestore(&m->lock, flags); | 1565 | spin_unlock_irqrestore(&m->lock, flags); |
| 1564 | 1566 | ||
| 1565 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { | 1567 | if (r == -ENOTCONN) { |
| 1566 | spin_lock_irqsave(&m->lock, flags); | 1568 | spin_lock_irqsave(&m->lock, flags); |
| 1567 | if (!m->current_pg) { | 1569 | if (!m->current_pg) { |
| 1568 | /* Path status changed, redo selection */ | 1570 | /* Path status changed, redo selection */ |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 3897b90bd462..63903a5a5d9e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2432 | case PM_WRITE: | 2432 | case PM_WRITE: |
| 2433 | if (old_mode != new_mode) | 2433 | if (old_mode != new_mode) |
| 2434 | notify_of_pool_mode_change(pool, "write"); | 2434 | notify_of_pool_mode_change(pool, "write"); |
| 2435 | pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; | ||
| 2435 | dm_pool_metadata_read_write(pool->pmd); | 2436 | dm_pool_metadata_read_write(pool->pmd); |
| 2436 | pool->process_bio = process_bio; | 2437 | pool->process_bio = process_bio; |
| 2437 | pool->process_discard = process_discard_bio; | 2438 | pool->process_discard = process_discard_bio; |
| @@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 4249 | { | 4250 | { |
| 4250 | struct thin_c *tc = ti->private; | 4251 | struct thin_c *tc = ti->private; |
| 4251 | struct pool *pool = tc->pool; | 4252 | struct pool *pool = tc->pool; |
| 4252 | struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); | ||
| 4253 | 4253 | ||
| 4254 | if (!pool_limits->discard_granularity) | 4254 | if (!pool->pf.discard_enabled) |
| 4255 | return; /* pool's discard support is disabled */ | 4255 | return; |
| 4256 | 4256 | ||
| 4257 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 4257 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
| 4258 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ | 4258 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e15f3565892..5df40480228b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -591,7 +591,7 @@ retry: | |||
| 591 | 591 | ||
| 592 | out: | 592 | out: |
| 593 | dm_put_live_table(md, *srcu_idx); | 593 | dm_put_live_table(md, *srcu_idx); |
| 594 | if (r == -ENOTCONN) { | 594 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { |
| 595 | msleep(10); | 595 | msleep(10); |
| 596 | goto retry; | 596 | goto retry; |
| 597 | } | 597 | } |
| @@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 603 | { | 603 | { |
| 604 | struct mapped_device *md = bdev->bd_disk->private_data; | 604 | struct mapped_device *md = bdev->bd_disk->private_data; |
| 605 | struct dm_target *tgt; | 605 | struct dm_target *tgt; |
| 606 | struct block_device *tgt_bdev = NULL; | ||
| 606 | int srcu_idx, r; | 607 | int srcu_idx, r; |
| 607 | 608 | ||
| 608 | r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); | 609 | r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); |
| 609 | if (r < 0) | 610 | if (r < 0) |
| 610 | return r; | 611 | return r; |
| 611 | 612 | ||
| @@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 620 | goto out; | 621 | goto out; |
| 621 | } | 622 | } |
| 622 | 623 | ||
| 623 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 624 | r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); |
| 624 | out: | 625 | out: |
| 625 | dm_put_live_table(md, srcu_idx); | 626 | dm_put_live_table(md, srcu_idx); |
| 626 | return r; | 627 | return r; |
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 35759a91d47d..e8f847226a19 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c | |||
| @@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev, | |||
| 1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); | 1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); |
| 1993 | 1993 | ||
| 1994 | pci_set_master(pci_dev); | 1994 | pci_set_master(pci_dev); |
| 1995 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1995 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1996 | if (err) { | ||
| 1996 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1997 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1997 | err = -EIO; | ||
| 1998 | goto fail_context; | 1998 | goto fail_context; |
| 1999 | } | 1999 | } |
| 2000 | 2000 | ||
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index dbc695f32760..0042803a9de7 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c | |||
| @@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev, | |||
| 1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); | 1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); |
| 1320 | 1320 | ||
| 1321 | pci_set_master(pci_dev); | 1321 | pci_set_master(pci_dev); |
| 1322 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1322 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1323 | if (err) { | ||
| 1323 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1324 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1324 | err = -EIO; | 1325 | err = -EIO; |
| 1325 | goto fail_irq; | 1326 | goto fail_irq; |
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 0ed1b6530374..1b5268f9bb24 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c | |||
| @@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, | |||
| 890 | return err; | 890 | return err; |
| 891 | } | 891 | } |
| 892 | 892 | ||
| 893 | if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { | 893 | err = pci_set_dma_mask(pci,DMA_BIT_MASK(32)); |
| 894 | if (err) { | ||
| 894 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); | 895 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); |
| 895 | err = -EIO; | ||
| 896 | cx88_core_put(core, pci); | 896 | cx88_core_put(core, pci); |
| 897 | return err; | 897 | return err; |
| 898 | } | 898 | } |
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 9db7767d1fe0..f34c229f9b37 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c | |||
| @@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev) | |||
| 393 | if (pci_enable_device(dev->pci)) | 393 | if (pci_enable_device(dev->pci)) |
| 394 | return -EIO; | 394 | return -EIO; |
| 395 | pci_set_master(dev->pci); | 395 | pci_set_master(dev->pci); |
| 396 | if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { | 396 | err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32)); |
| 397 | if (err) { | ||
| 397 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); | 398 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); |
| 398 | return -EIO; | 399 | return -EIO; |
| 399 | } | 400 | } |
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 0de1ad5a977d..aef9acf351f6 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c | |||
| @@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev, | |||
| 1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
| 1315 | 1315 | ||
| 1316 | pci_set_master(pci_dev); | 1316 | pci_set_master(pci_dev); |
| 1317 | if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { | 1317 | err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32)); |
| 1318 | if (err) { | ||
| 1318 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); | 1319 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); |
| 1319 | err = -EIO; | ||
| 1320 | goto fail_core; | 1320 | goto fail_core; |
| 1321 | } | 1321 | } |
| 1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); | 1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); |
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 60b2d462f98d..3fdbd81b5580 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c | |||
| @@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, | |||
| 810 | "%s(): board vendor 0x%x, revision 0x%x\n", | 810 | "%s(): board vendor 0x%x, revision 0x%x\n", |
| 811 | __func__, board_vendor, board_revision); | 811 | __func__, board_vendor, board_revision); |
| 812 | pci_set_master(pci_dev); | 812 | pci_set_master(pci_dev); |
| 813 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 813 | if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) { |
| 814 | dev_err(&pci_dev->dev, | 814 | dev_err(&pci_dev->dev, |
| 815 | "%s(): 32bit PCI DMA is not supported\n", __func__); | 815 | "%s(): 32bit PCI DMA is not supported\n", __func__); |
| 816 | goto pci_detect_err; | 816 | goto pci_detect_err; |
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index e79d63eb774e..f720cea80e28 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c | |||
| @@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev, | |||
| 951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
| 952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
| 953 | pci_set_master(pci_dev); | 953 | pci_set_master(pci_dev); |
| 954 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 954 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
| 955 | if (err) { | ||
| 955 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 956 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 956 | err = -EIO; | ||
| 957 | goto fail1; | 957 | goto fail1; |
| 958 | } | 958 | } |
| 959 | 959 | ||
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 8f36b48ef733..8bbd092fbe1d 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c | |||
| @@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev, | |||
| 1264 | 1264 | ||
| 1265 | pci_set_master(pci_dev); | 1265 | pci_set_master(pci_dev); |
| 1266 | /* TODO */ | 1266 | /* TODO */ |
| 1267 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1267 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1268 | if (err) { | ||
| 1268 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1269 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1269 | err = -EIO; | ||
| 1270 | goto fail_irq; | 1270 | goto fail_irq; |
| 1271 | } | 1271 | } |
| 1272 | 1272 | ||
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index 8c5655d351d3..4e77618fbb2b 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c | |||
| @@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev, | |||
| 257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
| 258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); | 258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); |
| 259 | pci_set_master(pci_dev); | 259 | pci_set_master(pci_dev); |
| 260 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 260 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
| 261 | if (err) { | ||
| 261 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 262 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 262 | err = -EIO; | ||
| 263 | goto fail1; | 263 | goto fail1; |
| 264 | } | 264 | } |
| 265 | 265 | ||
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 23b6c8e8701c..d8486168415a 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
| @@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block"); | |||
| 65 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 | 65 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 |
| 66 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) | 66 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) |
| 67 | 67 | ||
| 68 | #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ | 68 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ |
| 69 | (req->cmd_flags & REQ_META)) && \ | ||
| 70 | (rq_data_dir(req) == WRITE)) | 69 | (rq_data_dir(req) == WRITE)) |
| 71 | #define PACKED_CMD_VER 0x01 | 70 | #define PACKED_CMD_VER 0x01 |
| 72 | #define PACKED_CMD_WR 0x02 | 71 | #define PACKED_CMD_WR 0x02 |
| @@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
| 1467 | 1466 | ||
| 1468 | /* | 1467 | /* |
| 1469 | * Reliable writes are used to implement Forced Unit Access and | 1468 | * Reliable writes are used to implement Forced Unit Access and |
| 1470 | * REQ_META accesses, and are supported only on MMCs. | 1469 | * are supported only on MMCs. |
| 1471 | * | ||
| 1472 | * XXX: this really needs a good explanation of why REQ_META | ||
| 1473 | * is treated special. | ||
| 1474 | */ | 1470 | */ |
| 1475 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | 1471 | bool do_rel_wr = (req->cmd_flags & REQ_FUA) && |
| 1476 | (req->cmd_flags & REQ_META)) && | ||
| 1477 | (rq_data_dir(req) == WRITE) && | 1472 | (rq_data_dir(req) == WRITE) && |
| 1478 | (md->flags & MMC_BLK_REL_WR); | 1473 | (md->flags & MMC_BLK_REL_WR); |
| 1479 | 1474 | ||
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c793fda27321..3a9a79ec4343 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card) | |||
| 1040 | return err; | 1040 | return err; |
| 1041 | } | 1041 | } |
| 1042 | 1042 | ||
| 1043 | /* Caller must hold re-tuning */ | ||
| 1044 | static int mmc_switch_status(struct mmc_card *card) | ||
| 1045 | { | ||
| 1046 | u32 status; | ||
| 1047 | int err; | ||
| 1048 | |||
| 1049 | err = mmc_send_status(card, &status); | ||
| 1050 | if (err) | ||
| 1051 | return err; | ||
| 1052 | |||
| 1053 | return mmc_switch_status_error(card->host, status); | ||
| 1054 | } | ||
| 1055 | |||
| 1043 | static int mmc_select_hs400(struct mmc_card *card) | 1056 | static int mmc_select_hs400(struct mmc_card *card) |
| 1044 | { | 1057 | { |
| 1045 | struct mmc_host *host = card->host; | 1058 | struct mmc_host *host = card->host; |
| 1059 | bool send_status = true; | ||
| 1060 | unsigned int max_dtr; | ||
| 1046 | int err = 0; | 1061 | int err = 0; |
| 1047 | u8 val; | 1062 | u8 val; |
| 1048 | 1063 | ||
| @@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card) | |||
| 1053 | host->ios.bus_width == MMC_BUS_WIDTH_8)) | 1068 | host->ios.bus_width == MMC_BUS_WIDTH_8)) |
| 1054 | return 0; | 1069 | return 0; |
| 1055 | 1070 | ||
| 1056 | /* | 1071 | if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) |
| 1057 | * Before switching to dual data rate operation for HS400, | 1072 | send_status = false; |
| 1058 | * it is required to convert from HS200 mode to HS mode. | ||
| 1059 | */ | ||
| 1060 | mmc_set_timing(card->host, MMC_TIMING_MMC_HS); | ||
| 1061 | mmc_set_bus_speed(card); | ||
| 1062 | 1073 | ||
| 1074 | /* Reduce frequency to HS frequency */ | ||
| 1075 | max_dtr = card->ext_csd.hs_max_dtr; | ||
| 1076 | mmc_set_clock(host, max_dtr); | ||
| 1077 | |||
| 1078 | /* Switch card to HS mode */ | ||
| 1063 | val = EXT_CSD_TIMING_HS | | 1079 | val = EXT_CSD_TIMING_HS | |
| 1064 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1080 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| 1065 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1081 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| 1066 | EXT_CSD_HS_TIMING, val, | 1082 | EXT_CSD_HS_TIMING, val, |
| 1067 | card->ext_csd.generic_cmd6_time, | 1083 | card->ext_csd.generic_cmd6_time, |
| 1068 | true, true, true); | 1084 | true, send_status, true); |
| 1069 | if (err) { | 1085 | if (err) { |
| 1070 | pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", | 1086 | pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", |
| 1071 | mmc_hostname(host), err); | 1087 | mmc_hostname(host), err); |
| 1072 | return err; | 1088 | return err; |
| 1073 | } | 1089 | } |
| 1074 | 1090 | ||
| 1091 | /* Set host controller to HS timing */ | ||
| 1092 | mmc_set_timing(card->host, MMC_TIMING_MMC_HS); | ||
| 1093 | |||
| 1094 | if (!send_status) { | ||
| 1095 | err = mmc_switch_status(card); | ||
| 1096 | if (err) | ||
| 1097 | goto out_err; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | /* Switch card to DDR */ | ||
| 1075 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1101 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| 1076 | EXT_CSD_BUS_WIDTH, | 1102 | EXT_CSD_BUS_WIDTH, |
| 1077 | EXT_CSD_DDR_BUS_WIDTH_8, | 1103 | EXT_CSD_DDR_BUS_WIDTH_8, |
| @@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card) | |||
| 1082 | return err; | 1108 | return err; |
| 1083 | } | 1109 | } |
| 1084 | 1110 | ||
| 1111 | /* Switch card to HS400 */ | ||
| 1085 | val = EXT_CSD_TIMING_HS400 | | 1112 | val = EXT_CSD_TIMING_HS400 | |
| 1086 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1113 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| 1087 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1114 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| 1088 | EXT_CSD_HS_TIMING, val, | 1115 | EXT_CSD_HS_TIMING, val, |
| 1089 | card->ext_csd.generic_cmd6_time, | 1116 | card->ext_csd.generic_cmd6_time, |
| 1090 | true, true, true); | 1117 | true, send_status, true); |
| 1091 | if (err) { | 1118 | if (err) { |
| 1092 | pr_err("%s: switch to hs400 failed, err:%d\n", | 1119 | pr_err("%s: switch to hs400 failed, err:%d\n", |
| 1093 | mmc_hostname(host), err); | 1120 | mmc_hostname(host), err); |
| 1094 | return err; | 1121 | return err; |
| 1095 | } | 1122 | } |
| 1096 | 1123 | ||
| 1124 | /* Set host controller to HS400 timing and frequency */ | ||
| 1097 | mmc_set_timing(host, MMC_TIMING_MMC_HS400); | 1125 | mmc_set_timing(host, MMC_TIMING_MMC_HS400); |
| 1098 | mmc_set_bus_speed(card); | 1126 | mmc_set_bus_speed(card); |
| 1099 | 1127 | ||
| 1128 | if (!send_status) { | ||
| 1129 | err = mmc_switch_status(card); | ||
| 1130 | if (err) | ||
| 1131 | goto out_err; | ||
| 1132 | } | ||
| 1133 | |||
| 1100 | return 0; | 1134 | return 0; |
| 1135 | |||
| 1136 | out_err: | ||
| 1137 | pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), | ||
| 1138 | __func__, err); | ||
| 1139 | return err; | ||
| 1101 | } | 1140 | } |
| 1102 | 1141 | ||
| 1103 | int mmc_hs200_to_hs400(struct mmc_card *card) | 1142 | int mmc_hs200_to_hs400(struct mmc_card *card) |
| @@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card) | |||
| 1105 | return mmc_select_hs400(card); | 1144 | return mmc_select_hs400(card); |
| 1106 | } | 1145 | } |
| 1107 | 1146 | ||
| 1108 | /* Caller must hold re-tuning */ | ||
| 1109 | static int mmc_switch_status(struct mmc_card *card) | ||
| 1110 | { | ||
| 1111 | u32 status; | ||
| 1112 | int err; | ||
| 1113 | |||
| 1114 | err = mmc_send_status(card, &status); | ||
| 1115 | if (err) | ||
| 1116 | return err; | ||
| 1117 | |||
| 1118 | return mmc_switch_status_error(card->host, status); | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | int mmc_hs400_to_hs200(struct mmc_card *card) | 1147 | int mmc_hs400_to_hs200(struct mmc_card *card) |
| 1122 | { | 1148 | { |
| 1123 | struct mmc_host *host = card->host; | 1149 | struct mmc_host *host = card->host; |
| @@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card) | |||
| 1219 | static int mmc_select_hs200(struct mmc_card *card) | 1245 | static int mmc_select_hs200(struct mmc_card *card) |
| 1220 | { | 1246 | { |
| 1221 | struct mmc_host *host = card->host; | 1247 | struct mmc_host *host = card->host; |
| 1248 | bool send_status = true; | ||
| 1249 | unsigned int old_timing; | ||
| 1222 | int err = -EINVAL; | 1250 | int err = -EINVAL; |
| 1223 | u8 val; | 1251 | u8 val; |
| 1224 | 1252 | ||
| @@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
| 1234 | 1262 | ||
| 1235 | mmc_select_driver_type(card); | 1263 | mmc_select_driver_type(card); |
| 1236 | 1264 | ||
| 1265 | if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) | ||
| 1266 | send_status = false; | ||
| 1267 | |||
| 1237 | /* | 1268 | /* |
| 1238 | * Set the bus width(4 or 8) with host's support and | 1269 | * Set the bus width(4 or 8) with host's support and |
| 1239 | * switch to HS200 mode if bus width is set successfully. | 1270 | * switch to HS200 mode if bus width is set successfully. |
| @@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
| 1245 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1276 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| 1246 | EXT_CSD_HS_TIMING, val, | 1277 | EXT_CSD_HS_TIMING, val, |
| 1247 | card->ext_csd.generic_cmd6_time, | 1278 | card->ext_csd.generic_cmd6_time, |
| 1248 | true, true, true); | 1279 | true, send_status, true); |
| 1249 | if (!err) | 1280 | if (err) |
| 1250 | mmc_set_timing(host, MMC_TIMING_MMC_HS200); | 1281 | goto err; |
| 1282 | old_timing = host->ios.timing; | ||
| 1283 | mmc_set_timing(host, MMC_TIMING_MMC_HS200); | ||
| 1284 | if (!send_status) { | ||
| 1285 | err = mmc_switch_status(card); | ||
| 1286 | /* | ||
| 1287 | * mmc_select_timing() assumes timing has not changed if | ||
| 1288 | * it is a switch error. | ||
| 1289 | */ | ||
| 1290 | if (err == -EBADMSG) | ||
| 1291 | mmc_set_timing(host, old_timing); | ||
| 1292 | } | ||
| 1251 | } | 1293 | } |
| 1252 | err: | 1294 | err: |
| 1295 | if (err) | ||
| 1296 | pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), | ||
| 1297 | __func__, err); | ||
| 1253 | return err; | 1298 | return err; |
| 1254 | } | 1299 | } |
| 1255 | 1300 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index af71de5fda3b..1dee533634c9 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
| @@ -473,6 +473,7 @@ config MMC_DAVINCI | |||
| 473 | 473 | ||
| 474 | config MMC_GOLDFISH | 474 | config MMC_GOLDFISH |
| 475 | tristate "goldfish qemu Multimedia Card Interface support" | 475 | tristate "goldfish qemu Multimedia Card Interface support" |
| 476 | depends on HAS_DMA | ||
| 476 | depends on GOLDFISH || COMPILE_TEST | 477 | depends on GOLDFISH || COMPILE_TEST |
| 477 | help | 478 | help |
| 478 | This selects the Goldfish Multimedia card Interface emulation | 479 | This selects the Goldfish Multimedia card Interface emulation |
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 39568cc29a2a..33dfd7e72516 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c | |||
| @@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) | |||
| 1276 | int start = 0, len = 0; | 1276 | int start = 0, len = 0; |
| 1277 | int start_final = 0, len_final = 0; | 1277 | int start_final = 0, len_final = 0; |
| 1278 | u8 final_phase = 0xff; | 1278 | u8 final_phase = 0xff; |
| 1279 | struct msdc_delay_phase delay_phase; | 1279 | struct msdc_delay_phase delay_phase = { 0, }; |
| 1280 | 1280 | ||
| 1281 | if (delay == 0) { | 1281 | if (delay == 0) { |
| 1282 | dev_err(host->dev, "phase error: [map:%x]\n", delay); | 1282 | dev_err(host->dev, "phase error: [map:%x]\n", delay); |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 8cadd74e8407..ce08896b9d69 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
| @@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 805 | goto out; | 805 | goto out; |
| 806 | } else { | 806 | } else { |
| 807 | mmc->caps |= host->pdata->gpio_card_ro_invert ? | 807 | mmc->caps |= host->pdata->gpio_card_ro_invert ? |
| 808 | MMC_CAP2_RO_ACTIVE_HIGH : 0; | 808 | 0 : MMC_CAP2_RO_ACTIVE_HIGH; |
| 809 | } | 809 | } |
| 810 | 810 | ||
| 811 | if (gpio_is_valid(gpio_cd)) | 811 | if (gpio_is_valid(gpio_cd)) |
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index dc4e8446f1ff..5a99a93ed025 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #include <linux/gpio.h> | 26 | #include <linux/gpio.h> |
| 27 | 27 | ||
| 28 | #include <asm/mach-jz4740/gpio.h> | ||
| 28 | #include <asm/mach-jz4740/jz4740_nand.h> | 29 | #include <asm/mach-jz4740/jz4740_nand.h> |
| 29 | 30 | ||
| 30 | #define JZ_REG_NAND_CTRL 0x50 | 31 | #define JZ_REG_NAND_CTRL 0x50 |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index cc74142938b0..ece544efccc3 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd) | |||
| 3110 | */ | 3110 | */ |
| 3111 | static void nand_shutdown(struct mtd_info *mtd) | 3111 | static void nand_shutdown(struct mtd_info *mtd) |
| 3112 | { | 3112 | { |
| 3113 | nand_get_device(mtd, FL_SHUTDOWN); | 3113 | nand_get_device(mtd, FL_PM_SUSPENDED); |
| 3114 | } | 3114 | } |
| 3115 | 3115 | ||
| 3116 | /* Set default functions */ | 3116 | /* Set default functions */ |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 9093577755f6..0527f485c3dc 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
| @@ -15,9 +15,7 @@ | |||
| 15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
| 16 | #include <linux/phy.h> | 16 | #include <linux/phy.h> |
| 17 | #include <net/dsa.h> | 17 | #include <net/dsa.h> |
| 18 | 18 | #include "mv88e6060.h" | |
| 19 | #define REG_PORT(p) (8 + (p)) | ||
| 20 | #define REG_GLOBAL 0x0f | ||
| 21 | 19 | ||
| 22 | static int reg_read(struct dsa_switch *ds, int addr, int reg) | 20 | static int reg_read(struct dsa_switch *ds, int addr, int reg) |
| 23 | { | 21 | { |
| @@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr) | |||
| 67 | if (bus == NULL) | 65 | if (bus == NULL) |
| 68 | return NULL; | 66 | return NULL; |
| 69 | 67 | ||
| 70 | ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); | 68 | ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID); |
| 71 | if (ret >= 0) { | 69 | if (ret >= 0) { |
| 72 | if (ret == 0x0600) | 70 | if (ret == PORT_SWITCH_ID_6060) |
| 73 | return "Marvell 88E6060 (A0)"; | 71 | return "Marvell 88E6060 (A0)"; |
| 74 | if (ret == 0x0601 || ret == 0x0602) | 72 | if (ret == PORT_SWITCH_ID_6060_R1 || |
| 73 | ret == PORT_SWITCH_ID_6060_R2) | ||
| 75 | return "Marvell 88E6060 (B0)"; | 74 | return "Marvell 88E6060 (B0)"; |
| 76 | if ((ret & 0xfff0) == 0x0600) | 75 | if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060) |
| 77 | return "Marvell 88E6060"; | 76 | return "Marvell 88E6060"; |
| 78 | } | 77 | } |
| 79 | 78 | ||
| @@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) | |||
| 87 | unsigned long timeout; | 86 | unsigned long timeout; |
| 88 | 87 | ||
| 89 | /* Set all ports to the disabled state. */ | 88 | /* Set all ports to the disabled state. */ |
| 90 | for (i = 0; i < 6; i++) { | 89 | for (i = 0; i < MV88E6060_PORTS; i++) { |
| 91 | ret = REG_READ(REG_PORT(i), 0x04); | 90 | ret = REG_READ(REG_PORT(i), PORT_CONTROL); |
| 92 | REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); | 91 | REG_WRITE(REG_PORT(i), PORT_CONTROL, |
| 92 | ret & ~PORT_CONTROL_STATE_MASK); | ||
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | /* Wait for transmit queues to drain. */ | 95 | /* Wait for transmit queues to drain. */ |
| 96 | usleep_range(2000, 4000); | 96 | usleep_range(2000, 4000); |
| 97 | 97 | ||
| 98 | /* Reset the switch. */ | 98 | /* Reset the switch. */ |
| 99 | REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); | 99 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 100 | GLOBAL_ATU_CONTROL_SWRESET | | ||
| 101 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | ||
| 102 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 100 | 103 | ||
| 101 | /* Wait up to one second for reset to complete. */ | 104 | /* Wait up to one second for reset to complete. */ |
| 102 | timeout = jiffies + 1 * HZ; | 105 | timeout = jiffies + 1 * HZ; |
| 103 | while (time_before(jiffies, timeout)) { | 106 | while (time_before(jiffies, timeout)) { |
| 104 | ret = REG_READ(REG_GLOBAL, 0x00); | 107 | ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); |
| 105 | if ((ret & 0x8000) == 0x0000) | 108 | if (ret & GLOBAL_STATUS_INIT_READY) |
| 106 | break; | 109 | break; |
| 107 | 110 | ||
| 108 | usleep_range(1000, 2000); | 111 | usleep_range(1000, 2000); |
| @@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) | |||
| 119 | * set the maximum frame size to 1536 bytes, and mask all | 122 | * set the maximum frame size to 1536 bytes, and mask all |
| 120 | * interrupt sources. | 123 | * interrupt sources. |
| 121 | */ | 124 | */ |
| 122 | REG_WRITE(REG_GLOBAL, 0x04, 0x0800); | 125 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); |
| 123 | 126 | ||
| 124 | /* Enable automatic address learning, set the address | 127 | /* Enable automatic address learning, set the address |
| 125 | * database size to 1024 entries, and set the default aging | 128 | * database size to 1024 entries, and set the default aging |
| 126 | * time to 5 minutes. | 129 | * time to 5 minutes. |
| 127 | */ | 130 | */ |
| 128 | REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); | 131 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 132 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | ||
| 133 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 129 | 134 | ||
| 130 | return 0; | 135 | return 0; |
| 131 | } | 136 | } |
| @@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) | |||
| 139 | * state to Forwarding. Additionally, if this is the CPU | 144 | * state to Forwarding. Additionally, if this is the CPU |
| 140 | * port, enable Ingress and Egress Trailer tagging mode. | 145 | * port, enable Ingress and Egress Trailer tagging mode. |
| 141 | */ | 146 | */ |
| 142 | REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); | 147 | REG_WRITE(addr, PORT_CONTROL, |
| 148 | dsa_is_cpu_port(ds, p) ? | ||
| 149 | PORT_CONTROL_TRAILER | | ||
| 150 | PORT_CONTROL_INGRESS_MODE | | ||
| 151 | PORT_CONTROL_STATE_FORWARDING : | ||
| 152 | PORT_CONTROL_STATE_FORWARDING); | ||
| 143 | 153 | ||
| 144 | /* Port based VLAN map: give each port its own address | 154 | /* Port based VLAN map: give each port its own address |
| 145 | * database, allow the CPU port to talk to each of the 'real' | 155 | * database, allow the CPU port to talk to each of the 'real' |
| 146 | * ports, and allow each of the 'real' ports to only talk to | 156 | * ports, and allow each of the 'real' ports to only talk to |
| 147 | * the CPU port. | 157 | * the CPU port. |
| 148 | */ | 158 | */ |
| 149 | REG_WRITE(addr, 0x06, | 159 | REG_WRITE(addr, PORT_VLAN_MAP, |
| 150 | ((p & 0xf) << 12) | | 160 | ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | |
| 151 | (dsa_is_cpu_port(ds, p) ? | 161 | (dsa_is_cpu_port(ds, p) ? |
| 152 | ds->phys_port_mask : | 162 | ds->phys_port_mask : |
| 153 | (1 << ds->dst->cpu_port))); | 163 | BIT(ds->dst->cpu_port))); |
| 154 | 164 | ||
| 155 | /* Port Association Vector: when learning source addresses | 165 | /* Port Association Vector: when learning source addresses |
| 156 | * of packets, add the address to the address database using | 166 | * of packets, add the address to the address database using |
| 157 | * a port bitmap that has only the bit for this port set and | 167 | * a port bitmap that has only the bit for this port set and |
| 158 | * the other bits clear. | 168 | * the other bits clear. |
| 159 | */ | 169 | */ |
| 160 | REG_WRITE(addr, 0x0b, 1 << p); | 170 | REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p)); |
| 161 | 171 | ||
| 162 | return 0; | 172 | return 0; |
| 163 | } | 173 | } |
| @@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds) | |||
| 177 | if (ret < 0) | 187 | if (ret < 0) |
| 178 | return ret; | 188 | return ret; |
| 179 | 189 | ||
| 180 | for (i = 0; i < 6; i++) { | 190 | for (i = 0; i < MV88E6060_PORTS; i++) { |
| 181 | ret = mv88e6060_setup_port(ds, i); | 191 | ret = mv88e6060_setup_port(ds, i); |
| 182 | if (ret < 0) | 192 | if (ret < 0) |
| 183 | return ret; | 193 | return ret; |
| @@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds) | |||
| 188 | 198 | ||
| 189 | static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) | 199 | static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) |
| 190 | { | 200 | { |
| 191 | REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); | 201 | /* Use the same MAC Address as FD Pause frames for all ports */ |
| 192 | REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); | 202 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]); |
| 193 | REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); | 203 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); |
| 204 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); | ||
| 194 | 205 | ||
| 195 | return 0; | 206 | return 0; |
| 196 | } | 207 | } |
| 197 | 208 | ||
| 198 | static int mv88e6060_port_to_phy_addr(int port) | 209 | static int mv88e6060_port_to_phy_addr(int port) |
| 199 | { | 210 | { |
| 200 | if (port >= 0 && port <= 5) | 211 | if (port >= 0 && port < MV88E6060_PORTS) |
| 201 | return port; | 212 | return port; |
| 202 | return -1; | 213 | return -1; |
| 203 | } | 214 | } |
| @@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) | |||
| 225 | return reg_write(ds, addr, regnum, val); | 236 | return reg_write(ds, addr, regnum, val); |
| 226 | } | 237 | } |
| 227 | 238 | ||
| 228 | static void mv88e6060_poll_link(struct dsa_switch *ds) | ||
| 229 | { | ||
| 230 | int i; | ||
| 231 | |||
| 232 | for (i = 0; i < DSA_MAX_PORTS; i++) { | ||
| 233 | struct net_device *dev; | ||
| 234 | int uninitialized_var(port_status); | ||
| 235 | int link; | ||
| 236 | int speed; | ||
| 237 | int duplex; | ||
| 238 | int fc; | ||
| 239 | |||
| 240 | dev = ds->ports[i]; | ||
| 241 | if (dev == NULL) | ||
| 242 | continue; | ||
| 243 | |||
| 244 | link = 0; | ||
| 245 | if (dev->flags & IFF_UP) { | ||
| 246 | port_status = reg_read(ds, REG_PORT(i), 0x00); | ||
| 247 | if (port_status < 0) | ||
| 248 | continue; | ||
| 249 | |||
| 250 | link = !!(port_status & 0x1000); | ||
| 251 | } | ||
| 252 | |||
| 253 | if (!link) { | ||
| 254 | if (netif_carrier_ok(dev)) { | ||
| 255 | netdev_info(dev, "link down\n"); | ||
| 256 | netif_carrier_off(dev); | ||
| 257 | } | ||
| 258 | continue; | ||
| 259 | } | ||
| 260 | |||
| 261 | speed = (port_status & 0x0100) ? 100 : 10; | ||
| 262 | duplex = (port_status & 0x0200) ? 1 : 0; | ||
| 263 | fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0; | ||
| 264 | |||
| 265 | if (!netif_carrier_ok(dev)) { | ||
| 266 | netdev_info(dev, | ||
| 267 | "link up, %d Mb/s, %s duplex, flow control %sabled\n", | ||
| 268 | speed, | ||
| 269 | duplex ? "full" : "half", | ||
| 270 | fc ? "en" : "dis"); | ||
| 271 | netif_carrier_on(dev); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 276 | static struct dsa_switch_driver mv88e6060_switch_driver = { | 239 | static struct dsa_switch_driver mv88e6060_switch_driver = { |
| 277 | .tag_protocol = DSA_TAG_PROTO_TRAILER, | 240 | .tag_protocol = DSA_TAG_PROTO_TRAILER, |
| 278 | .probe = mv88e6060_probe, | 241 | .probe = mv88e6060_probe, |
| @@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = { | |||
| 280 | .set_addr = mv88e6060_set_addr, | 243 | .set_addr = mv88e6060_set_addr, |
| 281 | .phy_read = mv88e6060_phy_read, | 244 | .phy_read = mv88e6060_phy_read, |
| 282 | .phy_write = mv88e6060_phy_write, | 245 | .phy_write = mv88e6060_phy_write, |
| 283 | .poll_link = mv88e6060_poll_link, | ||
| 284 | }; | 246 | }; |
| 285 | 247 | ||
| 286 | static int __init mv88e6060_init(void) | 248 | static int __init mv88e6060_init(void) |
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h new file mode 100644 index 000000000000..cc9b2ed4aff4 --- /dev/null +++ b/drivers/net/dsa/mv88e6060.h | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support | ||
| 3 | * Copyright (c) 2015 Neil Armstrong | ||
| 4 | * | ||
| 5 | * Based on mv88e6xxx.h | ||
| 6 | * Copyright (c) 2008 Marvell Semiconductor | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef __MV88E6060_H | ||
| 15 | #define __MV88E6060_H | ||
| 16 | |||
| 17 | #define MV88E6060_PORTS 6 | ||
| 18 | |||
| 19 | #define REG_PORT(p) (0x8 + (p)) | ||
| 20 | #define PORT_STATUS 0x00 | ||
| 21 | #define PORT_STATUS_PAUSE_EN BIT(15) | ||
| 22 | #define PORT_STATUS_MY_PAUSE BIT(14) | ||
| 23 | #define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN) | ||
| 24 | #define PORT_STATUS_RESOLVED BIT(13) | ||
| 25 | #define PORT_STATUS_LINK BIT(12) | ||
| 26 | #define PORT_STATUS_PORTMODE BIT(11) | ||
| 27 | #define PORT_STATUS_PHYMODE BIT(10) | ||
| 28 | #define PORT_STATUS_DUPLEX BIT(9) | ||
| 29 | #define PORT_STATUS_SPEED BIT(8) | ||
| 30 | #define PORT_SWITCH_ID 0x03 | ||
| 31 | #define PORT_SWITCH_ID_6060 0x0600 | ||
| 32 | #define PORT_SWITCH_ID_6060_MASK 0xfff0 | ||
| 33 | #define PORT_SWITCH_ID_6060_R1 0x0601 | ||
| 34 | #define PORT_SWITCH_ID_6060_R2 0x0602 | ||
| 35 | #define PORT_CONTROL 0x04 | ||
| 36 | #define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15) | ||
| 37 | #define PORT_CONTROL_TRAILER BIT(14) | ||
| 38 | #define PORT_CONTROL_HEADER BIT(11) | ||
| 39 | #define PORT_CONTROL_INGRESS_MODE BIT(8) | ||
| 40 | #define PORT_CONTROL_VLAN_TUNNEL BIT(7) | ||
| 41 | #define PORT_CONTROL_STATE_MASK 0x03 | ||
| 42 | #define PORT_CONTROL_STATE_DISABLED 0x00 | ||
| 43 | #define PORT_CONTROL_STATE_BLOCKING 0x01 | ||
| 44 | #define PORT_CONTROL_STATE_LEARNING 0x02 | ||
| 45 | #define PORT_CONTROL_STATE_FORWARDING 0x03 | ||
| 46 | #define PORT_VLAN_MAP 0x06 | ||
| 47 | #define PORT_VLAN_MAP_DBNUM_SHIFT 12 | ||
| 48 | #define PORT_VLAN_MAP_TABLE_MASK 0x1f | ||
| 49 | #define PORT_ASSOC_VECTOR 0x0b | ||
| 50 | #define PORT_ASSOC_VECTOR_MONITOR BIT(15) | ||
| 51 | #define PORT_ASSOC_VECTOR_PAV_MASK 0x1f | ||
| 52 | #define PORT_RX_CNTR 0x10 | ||
| 53 | #define PORT_TX_CNTR 0x11 | ||
| 54 | |||
| 55 | #define REG_GLOBAL 0x0f | ||
| 56 | #define GLOBAL_STATUS 0x00 | ||
| 57 | #define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12) | ||
| 58 | #define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12) | ||
| 59 | #define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12) | ||
| 60 | #define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12) | ||
| 61 | #define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12) | ||
| 62 | #define GLOBAL_STATUS_INIT_READY BIT(11) | ||
| 63 | #define GLOBAL_STATUS_ATU_FULL BIT(3) | ||
| 64 | #define GLOBAL_STATUS_ATU_DONE BIT(2) | ||
| 65 | #define GLOBAL_STATUS_PHY_INT BIT(1) | ||
| 66 | #define GLOBAL_STATUS_EEINT BIT(0) | ||
| 67 | #define GLOBAL_MAC_01 0x01 | ||
| 68 | #define GLOBAL_MAC_01_DIFF_ADDR BIT(8) | ||
| 69 | #define GLOBAL_MAC_23 0x02 | ||
| 70 | #define GLOBAL_MAC_45 0x03 | ||
| 71 | #define GLOBAL_CONTROL 0x04 | ||
| 72 | #define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) | ||
| 73 | #define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10) | ||
| 74 | #define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) | ||
| 75 | #define GLOBAL_CONTROL_CTRMODE BIT(8) | ||
| 76 | #define GLOBAL_CONTROL_ATU_FULL_EN BIT(3) | ||
| 77 | #define GLOBAL_CONTROL_ATU_DONE_EN BIT(2) | ||
| 78 | #define GLOBAL_CONTROL_PHYINT_EN BIT(1) | ||
| 79 | #define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) | ||
| 80 | #define GLOBAL_ATU_CONTROL 0x0a | ||
| 81 | #define GLOBAL_ATU_CONTROL_SWRESET BIT(15) | ||
| 82 | #define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14) | ||
| 83 | #define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12) | ||
| 84 | #define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12) | ||
| 85 | #define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12) | ||
| 86 | #define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4 | ||
| 87 | #define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4) | ||
| 88 | #define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4) | ||
| 89 | #define GLOBAL_ATU_OP 0x0b | ||
| 90 | #define GLOBAL_ATU_OP_BUSY BIT(15) | ||
| 91 | #define GLOBAL_ATU_OP_NOP (0 << 12) | ||
| 92 | #define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 93 | #define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 94 | #define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 95 | #define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 96 | #define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 97 | #define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY) | ||
| 98 | #define GLOBAL_ATU_DATA 0x0c | ||
| 99 | #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0 | ||
| 100 | #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 | ||
| 101 | #define GLOBAL_ATU_DATA_STATE_MASK 0x0f | ||
| 102 | #define GLOBAL_ATU_DATA_STATE_UNUSED 0x00 | ||
| 103 | #define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e | ||
| 104 | #define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f | ||
| 105 | #define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07 | ||
| 106 | #define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e | ||
| 107 | #define GLOBAL_ATU_MAC_01 0x0d | ||
| 108 | #define GLOBAL_ATU_MAC_23 0x0e | ||
| 109 | #define GLOBAL_ATU_MAC_45 0x0f | ||
| 110 | |||
| 111 | #endif | ||
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 05aa7597dab9..955d06b9cdba 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
| @@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig" | |||
| 78 | source "drivers/net/ethernet/intel/Kconfig" | 78 | source "drivers/net/ethernet/intel/Kconfig" |
| 79 | source "drivers/net/ethernet/i825xx/Kconfig" | 79 | source "drivers/net/ethernet/i825xx/Kconfig" |
| 80 | source "drivers/net/ethernet/xscale/Kconfig" | 80 | source "drivers/net/ethernet/xscale/Kconfig" |
| 81 | source "drivers/net/ethernet/icplus/Kconfig" | ||
| 82 | 81 | ||
| 83 | config JME | 82 | config JME |
| 84 | tristate "JMicron(R) PCI-Express Gigabit Ethernet support" | 83 | tristate "JMicron(R) PCI-Express Gigabit Ethernet support" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index ddfc808110a1..4a2ee98738f0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
| @@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ | |||
| 41 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ | 41 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ |
| 42 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ | 42 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ |
| 43 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ | 43 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ |
| 44 | obj-$(CONFIG_IP1000) += icplus/ | ||
| 45 | obj-$(CONFIG_JME) += jme.o | 44 | obj-$(CONFIG_JME) += jme.o |
| 46 | obj-$(CONFIG_KORINA) += korina.o | 45 | obj-$(CONFIG_KORINA) += korina.o |
| 47 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o | 46 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e2afabf3a465..7ccebae9cb48 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
| @@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1500 | return -ENODEV; | 1500 | return -ENODEV; |
| 1501 | } | 1501 | } |
| 1502 | 1502 | ||
| 1503 | if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { | 1503 | err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); |
| 1504 | if (err) { | ||
| 1504 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1505 | if (pcnet32_debug & NETIF_MSG_PROBE) |
| 1505 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); | 1506 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); |
| 1506 | return -ENODEV; | 1507 | return err; |
| 1507 | } | 1508 | } |
| 1508 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { | 1509 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { |
| 1509 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1510 | if (pcnet32_debug & NETIF_MSG_PROBE) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f1d62d5dbaff..c9b036789184 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
| 13207 | 13207 | ||
| 13208 | /* VF with OLD Hypervisor or old PF do not support filtering */ | 13208 | /* VF with OLD Hypervisor or old PF do not support filtering */ |
| 13209 | if (IS_PF(bp)) { | 13209 | if (IS_PF(bp)) { |
| 13210 | if (CHIP_IS_E1x(bp)) | 13210 | if (chip_is_e1x) |
| 13211 | bp->accept_any_vlan = true; | 13211 | bp->accept_any_vlan = true; |
| 13212 | else | 13212 | else |
| 13213 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; | 13213 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f683d97d7614..b89504405b72 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c | |||
| @@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev) | |||
| 560 | #endif | 560 | #endif |
| 561 | 561 | ||
| 562 | /* For PCI-E Advanced Error Recovery (AER) Interface */ | 562 | /* For PCI-E Advanced Error Recovery (AER) Interface */ |
| 563 | static struct pci_error_handlers liquidio_err_handler = { | 563 | static const struct pci_error_handlers liquidio_err_handler = { |
| 564 | .error_detected = liquidio_pcie_error_detected, | 564 | .error_detected = liquidio_pcie_error_detected, |
| 565 | .mmio_enabled = liquidio_pcie_mmio_enabled, | 565 | .mmio_enabled = liquidio_pcie_mmio_enabled, |
| 566 | .slot_reset = liquidio_pcie_slot_reset, | 566 | .slot_reset = liquidio_pcie_slot_reset, |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a9377727c11c..7f709cbdcd87 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -1583,8 +1583,14 @@ err_disable_device: | |||
| 1583 | static void nicvf_remove(struct pci_dev *pdev) | 1583 | static void nicvf_remove(struct pci_dev *pdev) |
| 1584 | { | 1584 | { |
| 1585 | struct net_device *netdev = pci_get_drvdata(pdev); | 1585 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 1586 | struct nicvf *nic = netdev_priv(netdev); | 1586 | struct nicvf *nic; |
| 1587 | struct net_device *pnetdev = nic->pnicvf->netdev; | 1587 | struct net_device *pnetdev; |
| 1588 | |||
| 1589 | if (!netdev) | ||
| 1590 | return; | ||
| 1591 | |||
| 1592 | nic = netdev_priv(netdev); | ||
| 1593 | pnetdev = nic->pnicvf->netdev; | ||
| 1588 | 1594 | ||
| 1589 | /* Check if this Qset is assigned to different VF. | 1595 | /* Check if this Qset is assigned to different VF. |
| 1590 | * If yes, clean primary and all secondary Qsets. | 1596 | * If yes, clean primary and all secondary Qsets. |
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index f6e858d0b9d4..ebdc83247bb6 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig | |||
| @@ -17,15 +17,16 @@ config NET_VENDOR_DLINK | |||
| 17 | if NET_VENDOR_DLINK | 17 | if NET_VENDOR_DLINK |
| 18 | 18 | ||
| 19 | config DL2K | 19 | config DL2K |
| 20 | tristate "DL2000/TC902x-based Gigabit Ethernet support" | 20 | tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support" |
| 21 | depends on PCI | 21 | depends on PCI |
| 22 | select CRC32 | 22 | select CRC32 |
| 23 | ---help--- | 23 | ---help--- |
| 24 | This driver supports DL2000/TC902x-based Gigabit ethernet cards, | 24 | This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards, |
| 25 | which includes | 25 | which includes |
| 26 | D-Link DGE-550T Gigabit Ethernet Adapter. | 26 | D-Link DGE-550T Gigabit Ethernet Adapter. |
| 27 | D-Link DL2000-based Gigabit Ethernet Adapter. | 27 | D-Link DL2000-based Gigabit Ethernet Adapter. |
| 28 | Sundance/Tamarack TC902x Gigabit Ethernet Adapter. | 28 | Sundance/Tamarack TC902x Gigabit Ethernet Adapter. |
| 29 | ICPlus IP1000A-based cards | ||
| 29 | 30 | ||
| 30 | To compile this driver as a module, choose M here: the | 31 | To compile this driver as a module, choose M here: the |
| 31 | module will be called dl2k. | 32 | module will be called dl2k. |
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index cf0a5fcdaaaf..ccca4799c27b 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c | |||
| @@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 253 | if (err) | 253 | if (err) |
| 254 | goto err_out_unmap_rx; | 254 | goto err_out_unmap_rx; |
| 255 | 255 | ||
| 256 | if (np->chip_id == CHIP_IP1000A && | ||
| 257 | (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { | ||
| 258 | /* PHY magic taken from ipg driver, undocumented registers */ | ||
| 259 | mii_write(dev, np->phy_addr, 31, 0x0001); | ||
| 260 | mii_write(dev, np->phy_addr, 27, 0x01e0); | ||
| 261 | mii_write(dev, np->phy_addr, 31, 0x0002); | ||
| 262 | mii_write(dev, np->phy_addr, 27, 0xeb8e); | ||
| 263 | mii_write(dev, np->phy_addr, 31, 0x0000); | ||
| 264 | mii_write(dev, np->phy_addr, 30, 0x005e); | ||
| 265 | /* advertise 1000BASE-T half & full duplex, prefer MASTER */ | ||
| 266 | mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); | ||
| 267 | } | ||
| 268 | |||
| 256 | /* Fiber device? */ | 269 | /* Fiber device? */ |
| 257 | np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; | 270 | np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; |
| 258 | np->link_status = 0; | 271 | np->link_status = 0; |
| @@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev) | |||
| 361 | for (i = 0; i < 6; i++) | 374 | for (i = 0; i < 6; i++) |
| 362 | dev->dev_addr[i] = psrom->mac_addr[i]; | 375 | dev->dev_addr[i] = psrom->mac_addr[i]; |
| 363 | 376 | ||
| 377 | if (np->chip_id == CHIP_IP1000A) { | ||
| 378 | np->led_mode = psrom->led_mode; | ||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 364 | if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { | 382 | if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { |
| 365 | return 0; | 383 | return 0; |
| 366 | } | 384 | } |
| @@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev) | |||
| 406 | return 0; | 424 | return 0; |
| 407 | } | 425 | } |
| 408 | 426 | ||
| 427 | static void rio_set_led_mode(struct net_device *dev) | ||
| 428 | { | ||
| 429 | struct netdev_private *np = netdev_priv(dev); | ||
| 430 | void __iomem *ioaddr = np->ioaddr; | ||
| 431 | u32 mode; | ||
| 432 | |||
| 433 | if (np->chip_id != CHIP_IP1000A) | ||
| 434 | return; | ||
| 435 | |||
| 436 | mode = dr32(ASICCtrl); | ||
| 437 | mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); | ||
| 438 | |||
| 439 | if (np->led_mode & 0x01) | ||
| 440 | mode |= IPG_AC_LED_MODE; | ||
| 441 | if (np->led_mode & 0x02) | ||
| 442 | mode |= IPG_AC_LED_MODE_BIT_1; | ||
| 443 | if (np->led_mode & 0x08) | ||
| 444 | mode |= IPG_AC_LED_SPEED; | ||
| 445 | |||
| 446 | dw32(ASICCtrl, mode); | ||
| 447 | } | ||
| 448 | |||
| 409 | static int | 449 | static int |
| 410 | rio_open (struct net_device *dev) | 450 | rio_open (struct net_device *dev) |
| 411 | { | 451 | { |
| @@ -424,6 +464,8 @@ rio_open (struct net_device *dev) | |||
| 424 | GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); | 464 | GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); |
| 425 | mdelay(10); | 465 | mdelay(10); |
| 426 | 466 | ||
| 467 | rio_set_led_mode(dev); | ||
| 468 | |||
| 427 | /* DebugCtrl bit 4, 5, 9 must set */ | 469 | /* DebugCtrl bit 4, 5, 9 must set */ |
| 428 | dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); | 470 | dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); |
| 429 | 471 | ||
| @@ -433,9 +475,13 @@ rio_open (struct net_device *dev) | |||
| 433 | 475 | ||
| 434 | alloc_list (dev); | 476 | alloc_list (dev); |
| 435 | 477 | ||
| 436 | /* Get station address */ | 478 | /* Set station address */ |
| 437 | for (i = 0; i < 6; i++) | 479 | /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works |
| 438 | dw8(StationAddr0 + i, dev->dev_addr[i]); | 480 | * too. However, it doesn't work on IP1000A so we use 16-bit access. |
| 481 | */ | ||
| 482 | for (i = 0; i < 3; i++) | ||
| 483 | dw16(StationAddr0 + 2 * i, | ||
| 484 | cpu_to_le16(((u16 *)dev->dev_addr)[i])); | ||
| 439 | 485 | ||
| 440 | set_multicast (dev); | 486 | set_multicast (dev); |
| 441 | if (np->coalesce) { | 487 | if (np->coalesce) { |
| @@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status) | |||
| 780 | break; | 826 | break; |
| 781 | mdelay (1); | 827 | mdelay (1); |
| 782 | } | 828 | } |
| 829 | rio_set_led_mode(dev); | ||
| 783 | rio_free_tx (dev, 1); | 830 | rio_free_tx (dev, 1); |
| 784 | /* Reset TFDListPtr */ | 831 | /* Reset TFDListPtr */ |
| 785 | dw32(TFDListPtr0, np->tx_ring_dma + | 832 | dw32(TFDListPtr0, np->tx_ring_dma + |
| @@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status) | |||
| 799 | break; | 846 | break; |
| 800 | mdelay (1); | 847 | mdelay (1); |
| 801 | } | 848 | } |
| 849 | rio_set_led_mode(dev); | ||
| 802 | /* Let TxStartThresh stay default value */ | 850 | /* Let TxStartThresh stay default value */ |
| 803 | } | 851 | } |
| 804 | /* Maximum Collisions */ | 852 | /* Maximum Collisions */ |
| @@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status) | |||
| 965 | dev->name, int_status); | 1013 | dev->name, int_status); |
| 966 | dw16(ASICCtrl + 2, GlobalReset | HostReset); | 1014 | dw16(ASICCtrl + 2, GlobalReset | HostReset); |
| 967 | mdelay (500); | 1015 | mdelay (500); |
| 1016 | rio_set_led_mode(dev); | ||
| 968 | } | 1017 | } |
| 969 | } | 1018 | } |
| 970 | 1019 | ||
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 23c07b007069..8f4f61262d5c 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h | |||
| @@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits { | |||
| 211 | ResetBusy = 0x0400, | 211 | ResetBusy = 0x0400, |
| 212 | }; | 212 | }; |
| 213 | 213 | ||
| 214 | #define IPG_AC_LED_MODE BIT(14) | ||
| 215 | #define IPG_AC_LED_SPEED BIT(27) | ||
| 216 | #define IPG_AC_LED_MODE_BIT_1 BIT(29) | ||
| 217 | |||
| 214 | /* Transmit Frame Control bits */ | 218 | /* Transmit Frame Control bits */ |
| 215 | enum TFC_bits { | 219 | enum TFC_bits { |
| 216 | DwordAlign = 0x00000000, | 220 | DwordAlign = 0x00000000, |
| @@ -332,7 +336,10 @@ typedef struct t_SROM { | |||
| 332 | u16 asic_ctrl; /* 0x02 */ | 336 | u16 asic_ctrl; /* 0x02 */ |
| 333 | u16 sub_vendor_id; /* 0x04 */ | 337 | u16 sub_vendor_id; /* 0x04 */ |
| 334 | u16 sub_system_id; /* 0x06 */ | 338 | u16 sub_system_id; /* 0x06 */ |
| 335 | u16 reserved1[12]; /* 0x08-0x1f */ | 339 | u16 pci_base_1; /* 0x08 (IP1000A only) */ |
| 340 | u16 pci_base_2; /* 0x0a (IP1000A only) */ | ||
| 341 | u16 led_mode; /* 0x0c (IP1000A only) */ | ||
| 342 | u16 reserved1[9]; /* 0x0e-0x1f */ | ||
| 336 | u8 mac_addr[6]; /* 0x20-0x25 */ | 343 | u8 mac_addr[6]; /* 0x20-0x25 */ |
| 337 | u8 reserved2[10]; /* 0x26-0x2f */ | 344 | u8 reserved2[10]; /* 0x26-0x2f */ |
| 338 | u8 sib[204]; /* 0x30-0xfb */ | 345 | u8 sib[204]; /* 0x30-0xfb */ |
| @@ -397,6 +404,7 @@ struct netdev_private { | |||
| 397 | u16 advertising; /* NWay media advertisement */ | 404 | u16 advertising; /* NWay media advertisement */ |
| 398 | u16 negotiate; /* Negotiated media */ | 405 | u16 negotiate; /* Negotiated media */ |
| 399 | int phy_addr; /* PHY addresses. */ | 406 | int phy_addr; /* PHY addresses. */ |
| 407 | u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */ | ||
| 400 | }; | 408 | }; |
| 401 | 409 | ||
| 402 | /* The station address location in the EEPROM. */ | 410 | /* The station address location in the EEPROM. */ |
| @@ -407,10 +415,15 @@ struct netdev_private { | |||
| 407 | class_mask of the class are honored during the comparison. | 415 | class_mask of the class are honored during the comparison. |
| 408 | driver_data Data private to the driver. | 416 | driver_data Data private to the driver. |
| 409 | */ | 417 | */ |
| 418 | #define CHIP_IP1000A 1 | ||
| 410 | 419 | ||
| 411 | static const struct pci_device_id rio_pci_tbl[] = { | 420 | static const struct pci_device_id rio_pci_tbl[] = { |
| 412 | {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, | 421 | {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, |
| 413 | {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, | 422 | {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, |
| 423 | { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A }, | ||
| 424 | { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A }, | ||
| 425 | { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A }, | ||
| 426 | { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A }, | ||
| 414 | { } | 427 | { } |
| 415 | }; | 428 | }; |
| 416 | MODULE_DEVICE_TABLE (pci, rio_pci_tbl); | 429 | MODULE_DEVICE_TABLE (pci, rio_pci_tbl); |
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index f4cb8e425853..734f655c99c1 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |||
| @@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, | |||
| 1062 | static int be_set_rss_hash_opts(struct be_adapter *adapter, | 1062 | static int be_set_rss_hash_opts(struct be_adapter *adapter, |
| 1063 | struct ethtool_rxnfc *cmd) | 1063 | struct ethtool_rxnfc *cmd) |
| 1064 | { | 1064 | { |
| 1065 | struct be_rx_obj *rxo; | 1065 | int status; |
| 1066 | int status = 0, i, j; | ||
| 1067 | u8 rsstable[128]; | ||
| 1068 | u32 rss_flags = adapter->rss_info.rss_flags; | 1066 | u32 rss_flags = adapter->rss_info.rss_flags; |
| 1069 | 1067 | ||
| 1070 | if (cmd->data != L3_RSS_FLAGS && | 1068 | if (cmd->data != L3_RSS_FLAGS && |
| @@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter, | |||
| 1113 | } | 1111 | } |
| 1114 | 1112 | ||
| 1115 | if (rss_flags == adapter->rss_info.rss_flags) | 1113 | if (rss_flags == adapter->rss_info.rss_flags) |
| 1116 | return status; | 1114 | return 0; |
| 1117 | |||
| 1118 | if (be_multi_rxq(adapter)) { | ||
| 1119 | for (j = 0; j < 128; j += adapter->num_rss_qs) { | ||
| 1120 | for_all_rss_queues(adapter, rxo, i) { | ||
| 1121 | if ((j + i) >= 128) | ||
| 1122 | break; | ||
| 1123 | rsstable[j + i] = rxo->rss_id; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | } | ||
| 1127 | 1115 | ||
| 1128 | status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, | 1116 | status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, |
| 1129 | rss_flags, 128, adapter->rss_info.rss_hkey); | 1117 | rss_flags, RSS_INDIR_TABLE_LEN, |
| 1118 | adapter->rss_info.rss_hkey); | ||
| 1130 | if (!status) | 1119 | if (!status) |
| 1131 | adapter->rss_info.rss_flags = rss_flags; | 1120 | adapter->rss_info.rss_flags = rss_flags; |
| 1132 | 1121 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eb48a977f8da..b6ad02909d6b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) | |||
| 3518 | 3518 | ||
| 3519 | netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); | 3519 | netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); |
| 3520 | rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, | 3520 | rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, |
| 3521 | 128, rss_key); | 3521 | RSS_INDIR_TABLE_LEN, rss_key); |
| 3522 | if (rc) { | 3522 | if (rc) { |
| 3523 | rss->rss_flags = RSS_ENABLE_NONE; | 3523 | rss->rss_flags = RSS_ENABLE_NONE; |
| 3524 | return rc; | 3524 | return rc; |
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig deleted file mode 100644 index 14a66e9d2e26..000000000000 --- a/drivers/net/ethernet/icplus/Kconfig +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # IC Plus device configuration | ||
| 3 | # | ||
| 4 | |||
| 5 | config IP1000 | ||
| 6 | tristate "IP1000 Gigabit Ethernet support" | ||
| 7 | depends on PCI | ||
| 8 | select MII | ||
| 9 | ---help--- | ||
| 10 | This driver supports IP1000 gigabit Ethernet cards. | ||
| 11 | |||
| 12 | To compile this driver as a module, choose M here: the module | ||
| 13 | will be called ipg. This is recommended. | ||
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile deleted file mode 100644 index 5bc87c1f36aa..000000000000 --- a/drivers/net/ethernet/icplus/Makefile +++ /dev/null | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Makefile for the IC Plus device drivers | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-$(CONFIG_IP1000) += ipg.o | ||
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c deleted file mode 100644 index c3b6af83f070..000000000000 --- a/drivers/net/ethernet/icplus/ipg.c +++ /dev/null | |||
| @@ -1,2300 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter | ||
| 3 | * | ||
| 4 | * Copyright (C) 2003, 2007 IC Plus Corp | ||
| 5 | * | ||
| 6 | * Original Author: | ||
| 7 | * | ||
| 8 | * Craig Rich | ||
| 9 | * Sundance Technology, Inc. | ||
| 10 | * www.sundanceti.com | ||
| 11 | * craig_rich@sundanceti.com | ||
| 12 | * | ||
| 13 | * Current Maintainer: | ||
| 14 | * | ||
| 15 | * Sorbica Shieh. | ||
| 16 | * http://www.icplus.com.tw | ||
| 17 | * sorbica@icplus.com.tw | ||
| 18 | * | ||
| 19 | * Jesse Huang | ||
| 20 | * http://www.icplus.com.tw | ||
| 21 | * jesse@icplus.com.tw | ||
| 22 | */ | ||
| 23 | |||
| 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 25 | |||
| 26 | #include <linux/crc32.h> | ||
| 27 | #include <linux/ethtool.h> | ||
| 28 | #include <linux/interrupt.h> | ||
| 29 | #include <linux/gfp.h> | ||
| 30 | #include <linux/mii.h> | ||
| 31 | #include <linux/mutex.h> | ||
| 32 | |||
| 33 | #include <asm/div64.h> | ||
| 34 | |||
| 35 | #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) | ||
| 36 | #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) | ||
| 37 | #define IPG_RESET_MASK \ | ||
| 38 | (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ | ||
| 39 | IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ | ||
| 40 | IPG_AC_AUTO_INIT) | ||
| 41 | |||
| 42 | #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) | ||
| 43 | #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) | ||
| 44 | #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) | ||
| 45 | |||
| 46 | #define ipg_r32(reg) ioread32(ioaddr + (reg)) | ||
| 47 | #define ipg_r16(reg) ioread16(ioaddr + (reg)) | ||
| 48 | #define ipg_r8(reg) ioread8(ioaddr + (reg)) | ||
| 49 | |||
| 50 | enum { | ||
| 51 | netdev_io_size = 128 | ||
| 52 | }; | ||
| 53 | |||
| 54 | #include "ipg.h" | ||
| 55 | #define DRV_NAME "ipg" | ||
| 56 | |||
| 57 | MODULE_AUTHOR("IC Plus Corp. 2003"); | ||
| 58 | MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); | ||
| 59 | MODULE_LICENSE("GPL"); | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Defaults | ||
| 63 | */ | ||
| 64 | #define IPG_MAX_RXFRAME_SIZE 0x0600 | ||
| 65 | #define IPG_RXFRAG_SIZE 0x0600 | ||
| 66 | #define IPG_RXSUPPORT_SIZE 0x0600 | ||
| 67 | #define IPG_IS_JUMBO false | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Variable record -- index by leading revision/length | ||
| 71 | * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN | ||
| 72 | */ | ||
| 73 | static const unsigned short DefaultPhyParam[] = { | ||
| 74 | /* 11/12/03 IP1000A v1-3 rev=0x40 */ | ||
| 75 | /*-------------------------------------------------------------------------- | ||
| 76 | (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, | ||
| 77 | 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, | ||
| 78 | 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, | ||
| 79 | --------------------------------------------------------------------------*/ | ||
| 80 | /* 12/17/03 IP1000A v1-4 rev=0x40 */ | ||
| 81 | (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
| 82 | 0x0000, | ||
| 83 | 30, 0x005e, 9, 0x0700, | ||
| 84 | /* 01/09/04 IP1000A v1-5 rev=0x41 */ | ||
| 85 | (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
| 86 | 0x0000, | ||
| 87 | 30, 0x005e, 9, 0x0700, | ||
| 88 | 0x0000 | ||
| 89 | }; | ||
| 90 | |||
| 91 | static const char * const ipg_brand_name[] = { | ||
| 92 | "IC PLUS IP1000 1000/100/10 based NIC", | ||
| 93 | "Sundance Technology ST2021 based NIC", | ||
| 94 | "Tamarack Microelectronics TC9020/9021 based NIC", | ||
| 95 | "D-Link NIC IP1000A" | ||
| 96 | }; | ||
| 97 | |||
| 98 | static const struct pci_device_id ipg_pci_tbl[] = { | ||
| 99 | { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, | ||
| 100 | { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, | ||
| 101 | { PCI_VDEVICE(DLINK, 0x9021), 2 }, | ||
| 102 | { PCI_VDEVICE(DLINK, 0x4020), 3 }, | ||
| 103 | { 0, } | ||
| 104 | }; | ||
| 105 | |||
| 106 | MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); | ||
| 107 | |||
| 108 | static inline void __iomem *ipg_ioaddr(struct net_device *dev) | ||
| 109 | { | ||
| 110 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 111 | return sp->ioaddr; | ||
| 112 | } | ||
| 113 | |||
| 114 | #ifdef IPG_DEBUG | ||
| 115 | static void ipg_dump_rfdlist(struct net_device *dev) | ||
| 116 | { | ||
| 117 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 118 | void __iomem *ioaddr = sp->ioaddr; | ||
| 119 | unsigned int i; | ||
| 120 | u32 offset; | ||
| 121 | |||
| 122 | IPG_DEBUG_MSG("_dump_rfdlist\n"); | ||
| 123 | |||
| 124 | netdev_info(dev, "rx_current = %02x\n", sp->rx_current); | ||
| 125 | netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty); | ||
| 126 | netdev_info(dev, "RFDList start address = %016lx\n", | ||
| 127 | (unsigned long)sp->rxd_map); | ||
| 128 | netdev_info(dev, "RFDListPtr register = %08x%08x\n", | ||
| 129 | ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); | ||
| 130 | |||
| 131 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
| 132 | offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; | ||
| 133 | netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n", | ||
| 134 | i, offset, (unsigned long)sp->rxd[i].next_desc); | ||
| 135 | offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; | ||
| 136 | netdev_info(dev, "%02x %04x RFS = %016lx\n", | ||
| 137 | i, offset, (unsigned long)sp->rxd[i].rfs); | ||
| 138 | offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; | ||
| 139 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
| 140 | i, offset, (unsigned long)sp->rxd[i].frag_info); | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | static void ipg_dump_tfdlist(struct net_device *dev) | ||
| 145 | { | ||
| 146 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 147 | void __iomem *ioaddr = sp->ioaddr; | ||
| 148 | unsigned int i; | ||
| 149 | u32 offset; | ||
| 150 | |||
| 151 | IPG_DEBUG_MSG("_dump_tfdlist\n"); | ||
| 152 | |||
| 153 | netdev_info(dev, "tx_current = %02x\n", sp->tx_current); | ||
| 154 | netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty); | ||
| 155 | netdev_info(dev, "TFDList start address = %016lx\n", | ||
| 156 | (unsigned long) sp->txd_map); | ||
| 157 | netdev_info(dev, "TFDListPtr register = %08x%08x\n", | ||
| 158 | ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); | ||
| 159 | |||
| 160 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
| 161 | offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; | ||
| 162 | netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n", | ||
| 163 | i, offset, (unsigned long)sp->txd[i].next_desc); | ||
| 164 | |||
| 165 | offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; | ||
| 166 | netdev_info(dev, "%02x %04x TFC = %016lx\n", | ||
| 167 | i, offset, (unsigned long) sp->txd[i].tfc); | ||
| 168 | offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; | ||
| 169 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
| 170 | i, offset, (unsigned long) sp->txd[i].frag_info); | ||
| 171 | } | ||
| 172 | } | ||
| 173 | #endif | ||
| 174 | |||
| 175 | static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) | ||
| 176 | { | ||
| 177 | ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); | ||
| 178 | ndelay(IPG_PC_PHYCTRLWAIT_NS); | ||
| 179 | } | ||
| 180 | |||
| 181 | static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) | ||
| 182 | { | ||
| 183 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); | ||
| 184 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); | ||
| 185 | } | ||
| 186 | |||
| 187 | static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
| 188 | { | ||
| 189 | phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; | ||
| 190 | |||
| 191 | ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); | ||
| 192 | } | ||
| 193 | |||
| 194 | static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
| 195 | { | ||
| 196 | ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | | ||
| 197 | phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); | ||
| 198 | } | ||
| 199 | |||
| 200 | static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
| 201 | { | ||
| 202 | u16 bit_data; | ||
| 203 | |||
| 204 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); | ||
| 205 | |||
| 206 | bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; | ||
| 207 | |||
| 208 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); | ||
| 209 | |||
| 210 | return bit_data; | ||
| 211 | } | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Read a register from the Physical Layer device located | ||
| 215 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
| 216 | */ | ||
| 217 | static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) | ||
| 218 | { | ||
| 219 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 220 | /* | ||
| 221 | * The GMII mangement frame structure for a read is as follows: | ||
| 222 | * | ||
| 223 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
| 224 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
| 225 | * | ||
| 226 | * <32 1s> = 32 consecutive logic 1 values | ||
| 227 | * A = bit of Physical Layer device address (MSB first) | ||
| 228 | * R = bit of register address (MSB first) | ||
| 229 | * z = High impedance state | ||
| 230 | * D = bit of read data (MSB first) | ||
| 231 | * | ||
| 232 | * Transmission order is 'Preamble' field first, bits transmitted | ||
| 233 | * left to right (first to last). | ||
| 234 | */ | ||
| 235 | struct { | ||
| 236 | u32 field; | ||
| 237 | unsigned int len; | ||
| 238 | } p[] = { | ||
| 239 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
| 240 | { GMII_ST, 2 }, /* ST */ | ||
| 241 | { GMII_READ, 2 }, /* OP */ | ||
| 242 | { phy_id, 5 }, /* PHYAD */ | ||
| 243 | { phy_reg, 5 }, /* REGAD */ | ||
| 244 | { 0x0000, 2 }, /* TA */ | ||
| 245 | { 0x0000, 16 }, /* DATA */ | ||
| 246 | { 0x0000, 1 } /* IDLE */ | ||
| 247 | }; | ||
| 248 | unsigned int i, j; | ||
| 249 | u8 polarity, data; | ||
| 250 | |||
| 251 | polarity = ipg_r8(PHY_CTRL); | ||
| 252 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
| 253 | |||
| 254 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
| 255 | for (j = 0; j < 5; j++) { | ||
| 256 | for (i = 0; i < p[j].len; i++) { | ||
| 257 | /* For each variable length field, the MSB must be | ||
| 258 | * transmitted first. Rotate through the field bits, | ||
| 259 | * starting with the MSB, and move each bit into the | ||
| 260 | * the 1st (2^1) bit position (this is the bit position | ||
| 261 | * corresponding to the MgmtData bit of the PhyCtrl | ||
| 262 | * register for the IPG). | ||
| 263 | * | ||
| 264 | * Example: ST = 01; | ||
| 265 | * | ||
| 266 | * First write a '0' to bit 1 of the PhyCtrl | ||
| 267 | * register, then write a '1' to bit 1 of the | ||
| 268 | * PhyCtrl register. | ||
| 269 | * | ||
| 270 | * To do this, right shift the MSB of ST by the value: | ||
| 271 | * [field length - 1 - #ST bits already written] | ||
| 272 | * then left shift this result by 1. | ||
| 273 | */ | ||
| 274 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
| 275 | data &= IPG_PC_MGMTDATA; | ||
| 276 | data |= polarity | IPG_PC_MGMTDIR; | ||
| 277 | |||
| 278 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | send_three_state(ioaddr, polarity); | ||
| 283 | |||
| 284 | read_phy_bit(ioaddr, polarity); | ||
| 285 | |||
| 286 | /* | ||
| 287 | * For a read cycle, the bits for the next two fields (TA and | ||
| 288 | * DATA) are driven by the PHY (the IPG reads these bits). | ||
| 289 | */ | ||
| 290 | for (i = 0; i < p[6].len; i++) { | ||
| 291 | p[6].field |= | ||
| 292 | (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); | ||
| 293 | } | ||
| 294 | |||
| 295 | send_three_state(ioaddr, polarity); | ||
| 296 | send_three_state(ioaddr, polarity); | ||
| 297 | send_three_state(ioaddr, polarity); | ||
| 298 | send_end(ioaddr, polarity); | ||
| 299 | |||
| 300 | /* Return the value of the DATA field. */ | ||
| 301 | return p[6].field; | ||
| 302 | } | ||
| 303 | |||
| 304 | /* | ||
| 305 | * Write to a register from the Physical Layer device located | ||
| 306 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
| 307 | */ | ||
| 308 | static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) | ||
| 309 | { | ||
| 310 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 311 | /* | ||
| 312 | * The GMII mangement frame structure for a read is as follows: | ||
| 313 | * | ||
| 314 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
| 315 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
| 316 | * | ||
| 317 | * <32 1s> = 32 consecutive logic 1 values | ||
| 318 | * A = bit of Physical Layer device address (MSB first) | ||
| 319 | * R = bit of register address (MSB first) | ||
| 320 | * z = High impedance state | ||
| 321 | * D = bit of write data (MSB first) | ||
| 322 | * | ||
| 323 | * Transmission order is 'Preamble' field first, bits transmitted | ||
| 324 | * left to right (first to last). | ||
| 325 | */ | ||
| 326 | struct { | ||
| 327 | u32 field; | ||
| 328 | unsigned int len; | ||
| 329 | } p[] = { | ||
| 330 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
| 331 | { GMII_ST, 2 }, /* ST */ | ||
| 332 | { GMII_WRITE, 2 }, /* OP */ | ||
| 333 | { phy_id, 5 }, /* PHYAD */ | ||
| 334 | { phy_reg, 5 }, /* REGAD */ | ||
| 335 | { 0x0002, 2 }, /* TA */ | ||
| 336 | { val & 0xffff, 16 }, /* DATA */ | ||
| 337 | { 0x0000, 1 } /* IDLE */ | ||
| 338 | }; | ||
| 339 | unsigned int i, j; | ||
| 340 | u8 polarity, data; | ||
| 341 | |||
| 342 | polarity = ipg_r8(PHY_CTRL); | ||
| 343 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
| 344 | |||
| 345 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
| 346 | for (j = 0; j < 7; j++) { | ||
| 347 | for (i = 0; i < p[j].len; i++) { | ||
| 348 | /* For each variable length field, the MSB must be | ||
| 349 | * transmitted first. Rotate through the field bits, | ||
| 350 | * starting with the MSB, and move each bit into the | ||
| 351 | * the 1st (2^1) bit position (this is the bit position | ||
| 352 | * corresponding to the MgmtData bit of the PhyCtrl | ||
| 353 | * register for the IPG). | ||
| 354 | * | ||
| 355 | * Example: ST = 01; | ||
| 356 | * | ||
| 357 | * First write a '0' to bit 1 of the PhyCtrl | ||
| 358 | * register, then write a '1' to bit 1 of the | ||
| 359 | * PhyCtrl register. | ||
| 360 | * | ||
| 361 | * To do this, right shift the MSB of ST by the value: | ||
| 362 | * [field length - 1 - #ST bits already written] | ||
| 363 | * then left shift this result by 1. | ||
| 364 | */ | ||
| 365 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
| 366 | data &= IPG_PC_MGMTDATA; | ||
| 367 | data |= polarity | IPG_PC_MGMTDIR; | ||
| 368 | |||
| 369 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | /* The last cycle is a tri-state, so read from the PHY. */ | ||
| 374 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); | ||
| 375 | ipg_r8(PHY_CTRL); | ||
| 376 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); | ||
| 377 | } | ||
| 378 | |||
| 379 | static void ipg_set_led_mode(struct net_device *dev) | ||
| 380 | { | ||
| 381 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 382 | void __iomem *ioaddr = sp->ioaddr; | ||
| 383 | u32 mode; | ||
| 384 | |||
| 385 | mode = ipg_r32(ASIC_CTRL); | ||
| 386 | mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); | ||
| 387 | |||
| 388 | if ((sp->led_mode & 0x03) > 1) | ||
| 389 | mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ | ||
| 390 | |||
| 391 | if ((sp->led_mode & 0x01) == 1) | ||
| 392 | mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ | ||
| 393 | |||
| 394 | if ((sp->led_mode & 0x08) == 8) | ||
| 395 | mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ | ||
| 396 | |||
| 397 | ipg_w32(mode, ASIC_CTRL); | ||
| 398 | } | ||
| 399 | |||
| 400 | static void ipg_set_phy_set(struct net_device *dev) | ||
| 401 | { | ||
| 402 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 403 | void __iomem *ioaddr = sp->ioaddr; | ||
| 404 | int physet; | ||
| 405 | |||
| 406 | physet = ipg_r8(PHY_SET); | ||
| 407 | physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); | ||
| 408 | physet |= ((sp->led_mode & 0x70) >> 4); | ||
| 409 | ipg_w8(physet, PHY_SET); | ||
| 410 | } | ||
| 411 | |||
| 412 | static int ipg_reset(struct net_device *dev, u32 resetflags) | ||
| 413 | { | ||
| 414 | /* Assert functional resets via the IPG AsicCtrl | ||
| 415 | * register as specified by the 'resetflags' input | ||
| 416 | * parameter. | ||
| 417 | */ | ||
| 418 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 419 | unsigned int timeout_count = 0; | ||
| 420 | |||
| 421 | IPG_DEBUG_MSG("_reset\n"); | ||
| 422 | |||
| 423 | ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); | ||
| 424 | |||
| 425 | /* Delay added to account for problem with 10Mbps reset. */ | ||
| 426 | mdelay(IPG_AC_RESETWAIT); | ||
| 427 | |||
| 428 | while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { | ||
| 429 | mdelay(IPG_AC_RESETWAIT); | ||
| 430 | if (++timeout_count > IPG_AC_RESET_TIMEOUT) | ||
| 431 | return -ETIME; | ||
| 432 | } | ||
| 433 | /* Set LED Mode in Asic Control */ | ||
| 434 | ipg_set_led_mode(dev); | ||
| 435 | |||
| 436 | /* Set PHYSet Register Value */ | ||
| 437 | ipg_set_phy_set(dev); | ||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | /* Find the GMII PHY address. */ | ||
| 442 | static int ipg_find_phyaddr(struct net_device *dev) | ||
| 443 | { | ||
| 444 | unsigned int phyaddr, i; | ||
| 445 | |||
| 446 | for (i = 0; i < 32; i++) { | ||
| 447 | u32 status; | ||
| 448 | |||
| 449 | /* Search for the correct PHY address among 32 possible. */ | ||
| 450 | phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; | ||
| 451 | |||
| 452 | /* 10/22/03 Grace change verify from GMII_PHY_STATUS to | ||
| 453 | GMII_PHY_ID1 | ||
| 454 | */ | ||
| 455 | |||
| 456 | status = mdio_read(dev, phyaddr, MII_BMSR); | ||
| 457 | |||
| 458 | if ((status != 0xFFFF) && (status != 0)) | ||
| 459 | return phyaddr; | ||
| 460 | } | ||
| 461 | |||
| 462 | return 0x1f; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* | ||
| 466 | * Configure IPG based on result of IEEE 802.3 PHY | ||
| 467 | * auto-negotiation. | ||
| 468 | */ | ||
| 469 | static int ipg_config_autoneg(struct net_device *dev) | ||
| 470 | { | ||
| 471 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 472 | void __iomem *ioaddr = sp->ioaddr; | ||
| 473 | unsigned int txflowcontrol; | ||
| 474 | unsigned int rxflowcontrol; | ||
| 475 | unsigned int fullduplex; | ||
| 476 | u32 mac_ctrl_val; | ||
| 477 | u32 asicctrl; | ||
| 478 | u8 phyctrl; | ||
| 479 | const char *speed; | ||
| 480 | const char *duplex; | ||
| 481 | const char *tx_desc; | ||
| 482 | const char *rx_desc; | ||
| 483 | |||
| 484 | IPG_DEBUG_MSG("_config_autoneg\n"); | ||
| 485 | |||
| 486 | asicctrl = ipg_r32(ASIC_CTRL); | ||
| 487 | phyctrl = ipg_r8(PHY_CTRL); | ||
| 488 | mac_ctrl_val = ipg_r32(MAC_CTRL); | ||
| 489 | |||
| 490 | /* Set flags for use in resolving auto-negotiation, assuming | ||
| 491 | * non-1000Mbps, half duplex, no flow control. | ||
| 492 | */ | ||
| 493 | fullduplex = 0; | ||
| 494 | txflowcontrol = 0; | ||
| 495 | rxflowcontrol = 0; | ||
| 496 | |||
| 497 | /* To accommodate a problem in 10Mbps operation, | ||
| 498 | * set a global flag if PHY running in 10Mbps mode. | ||
| 499 | */ | ||
| 500 | sp->tenmbpsmode = 0; | ||
| 501 | |||
| 502 | /* Determine actual speed of operation. */ | ||
| 503 | switch (phyctrl & IPG_PC_LINK_SPEED) { | ||
| 504 | case IPG_PC_LINK_SPEED_10MBPS: | ||
| 505 | speed = "10Mbps"; | ||
| 506 | sp->tenmbpsmode = 1; | ||
| 507 | break; | ||
| 508 | case IPG_PC_LINK_SPEED_100MBPS: | ||
| 509 | speed = "100Mbps"; | ||
| 510 | break; | ||
| 511 | case IPG_PC_LINK_SPEED_1000MBPS: | ||
| 512 | speed = "1000Mbps"; | ||
| 513 | break; | ||
| 514 | default: | ||
| 515 | speed = "undefined!"; | ||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | |||
| 519 | netdev_info(dev, "Link speed = %s\n", speed); | ||
| 520 | if (sp->tenmbpsmode == 1) | ||
| 521 | netdev_info(dev, "10Mbps operational mode enabled\n"); | ||
| 522 | |||
| 523 | if (phyctrl & IPG_PC_DUPLEX_STATUS) { | ||
| 524 | fullduplex = 1; | ||
| 525 | txflowcontrol = 1; | ||
| 526 | rxflowcontrol = 1; | ||
| 527 | } | ||
| 528 | |||
| 529 | /* Configure full duplex, and flow control. */ | ||
| 530 | if (fullduplex == 1) { | ||
| 531 | |||
| 532 | /* Configure IPG for full duplex operation. */ | ||
| 533 | |||
| 534 | duplex = "full"; | ||
| 535 | |||
| 536 | mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; | ||
| 537 | |||
| 538 | if (txflowcontrol == 1) { | ||
| 539 | tx_desc = ""; | ||
| 540 | mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
| 541 | } else { | ||
| 542 | tx_desc = "no "; | ||
| 543 | mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
| 544 | } | ||
| 545 | |||
| 546 | if (rxflowcontrol == 1) { | ||
| 547 | rx_desc = ""; | ||
| 548 | mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
| 549 | } else { | ||
| 550 | rx_desc = "no "; | ||
| 551 | mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
| 552 | } | ||
| 553 | } else { | ||
| 554 | duplex = "half"; | ||
| 555 | tx_desc = "no "; | ||
| 556 | rx_desc = "no "; | ||
| 557 | mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD & | ||
| 558 | ~IPG_MC_TX_FLOW_CONTROL_ENABLE & | ||
| 559 | ~IPG_MC_RX_FLOW_CONTROL_ENABLE); | ||
| 560 | } | ||
| 561 | |||
| 562 | netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n", | ||
| 563 | duplex, tx_desc, rx_desc); | ||
| 564 | ipg_w32(mac_ctrl_val, MAC_CTRL); | ||
| 565 | |||
| 566 | return 0; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* Determine and configure multicast operation and set | ||
| 570 | * receive mode for IPG. | ||
| 571 | */ | ||
| 572 | static void ipg_nic_set_multicast_list(struct net_device *dev) | ||
| 573 | { | ||
| 574 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 575 | struct netdev_hw_addr *ha; | ||
| 576 | unsigned int hashindex; | ||
| 577 | u32 hashtable[2]; | ||
| 578 | u8 receivemode; | ||
| 579 | |||
| 580 | IPG_DEBUG_MSG("_nic_set_multicast_list\n"); | ||
| 581 | |||
| 582 | receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; | ||
| 583 | |||
| 584 | if (dev->flags & IFF_PROMISC) { | ||
| 585 | /* NIC to be configured in promiscuous mode. */ | ||
| 586 | receivemode = IPG_RM_RECEIVEALLFRAMES; | ||
| 587 | } else if ((dev->flags & IFF_ALLMULTI) || | ||
| 588 | ((dev->flags & IFF_MULTICAST) && | ||
| 589 | (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { | ||
| 590 | /* NIC to be configured to receive all multicast | ||
| 591 | * frames. */ | ||
| 592 | receivemode |= IPG_RM_RECEIVEMULTICAST; | ||
| 593 | } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { | ||
| 594 | /* NIC to be configured to receive selected | ||
| 595 | * multicast addresses. */ | ||
| 596 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; | ||
| 597 | } | ||
| 598 | |||
| 599 | /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. | ||
| 600 | * The IPG applies a cyclic-redundancy-check (the same CRC | ||
| 601 | * used to calculate the frame data FCS) to the destination | ||
| 602 | * address all incoming multicast frames whose destination | ||
| 603 | * address has the multicast bit set. The least significant | ||
| 604 | * 6 bits of the CRC result are used as an addressing index | ||
| 605 | * into the hash table. If the value of the bit addressed by | ||
| 606 | * this index is a 1, the frame is passed to the host system. | ||
| 607 | */ | ||
| 608 | |||
| 609 | /* Clear hashtable. */ | ||
| 610 | hashtable[0] = 0x00000000; | ||
| 611 | hashtable[1] = 0x00000000; | ||
| 612 | |||
| 613 | /* Cycle through all multicast addresses to filter. */ | ||
| 614 | netdev_for_each_mc_addr(ha, dev) { | ||
| 615 | /* Calculate CRC result for each multicast address. */ | ||
| 616 | hashindex = crc32_le(0xffffffff, ha->addr, | ||
| 617 | ETH_ALEN); | ||
| 618 | |||
| 619 | /* Use only the least significant 6 bits. */ | ||
| 620 | hashindex = hashindex & 0x3F; | ||
| 621 | |||
| 622 | /* Within "hashtable", set bit number "hashindex" | ||
| 623 | * to a logic 1. | ||
| 624 | */ | ||
| 625 | set_bit(hashindex, (void *)hashtable); | ||
| 626 | } | ||
| 627 | |||
| 628 | /* Write the value of the hashtable, to the 4, 16 bit | ||
| 629 | * HASHTABLE IPG registers. | ||
| 630 | */ | ||
| 631 | ipg_w32(hashtable[0], HASHTABLE_0); | ||
| 632 | ipg_w32(hashtable[1], HASHTABLE_1); | ||
| 633 | |||
| 634 | ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); | ||
| 635 | |||
| 636 | IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); | ||
| 637 | } | ||
| 638 | |||
| 639 | static int ipg_io_config(struct net_device *dev) | ||
| 640 | { | ||
| 641 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 642 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 643 | u32 origmacctrl; | ||
| 644 | u32 restoremacctrl; | ||
| 645 | |||
| 646 | IPG_DEBUG_MSG("_io_config\n"); | ||
| 647 | |||
| 648 | origmacctrl = ipg_r32(MAC_CTRL); | ||
| 649 | |||
| 650 | restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; | ||
| 651 | |||
| 652 | /* Based on compilation option, determine if FCS is to be | ||
| 653 | * stripped on receive frames by IPG. | ||
| 654 | */ | ||
| 655 | if (!IPG_STRIP_FCS_ON_RX) | ||
| 656 | restoremacctrl |= IPG_MC_RCV_FCS; | ||
| 657 | |||
| 658 | /* Determine if transmitter and/or receiver are | ||
| 659 | * enabled so we may restore MACCTRL correctly. | ||
| 660 | */ | ||
| 661 | if (origmacctrl & IPG_MC_TX_ENABLED) | ||
| 662 | restoremacctrl |= IPG_MC_TX_ENABLE; | ||
| 663 | |||
| 664 | if (origmacctrl & IPG_MC_RX_ENABLED) | ||
| 665 | restoremacctrl |= IPG_MC_RX_ENABLE; | ||
| 666 | |||
| 667 | /* Transmitter and receiver must be disabled before setting | ||
| 668 | * IFSSelect. | ||
| 669 | */ | ||
| 670 | ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & | ||
| 671 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 672 | |||
| 673 | /* Now that transmitter and receiver are disabled, write | ||
| 674 | * to IFSSelect. | ||
| 675 | */ | ||
| 676 | ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 677 | |||
| 678 | /* Set RECEIVEMODE register. */ | ||
| 679 | ipg_nic_set_multicast_list(dev); | ||
| 680 | |||
| 681 | ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); | ||
| 682 | |||
| 683 | ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); | ||
| 684 | ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); | ||
| 685 | ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); | ||
| 686 | ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); | ||
| 687 | ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); | ||
| 688 | ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); | ||
| 689 | ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | | ||
| 690 | IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | | ||
| 691 | IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | | ||
| 692 | IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); | ||
| 693 | ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); | ||
| 694 | ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); | ||
| 695 | |||
| 696 | /* IPG multi-frag frame bug workaround. | ||
| 697 | * Per silicon revision B3 eratta. | ||
| 698 | */ | ||
| 699 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); | ||
| 700 | |||
| 701 | /* IPG TX poll now bug workaround. | ||
| 702 | * Per silicon revision B3 eratta. | ||
| 703 | */ | ||
| 704 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); | ||
| 705 | |||
| 706 | /* IPG RX poll now bug workaround. | ||
| 707 | * Per silicon revision B3 eratta. | ||
| 708 | */ | ||
| 709 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); | ||
| 710 | |||
| 711 | /* Now restore MACCTRL to original setting. */ | ||
| 712 | ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); | ||
| 713 | |||
| 714 | /* Disable unused RMON statistics. */ | ||
| 715 | ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); | ||
| 716 | |||
| 717 | /* Disable unused MIB statistics. */ | ||
| 718 | ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | | ||
| 719 | IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | | ||
| 720 | IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | | ||
| 721 | IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | | ||
| 722 | IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | | ||
| 723 | IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); | ||
| 724 | |||
| 725 | return 0; | ||
| 726 | } | ||
| 727 | |||
| 728 | /* | ||
| 729 | * Create a receive buffer within system memory and update | ||
| 730 | * NIC private structure appropriately. | ||
| 731 | */ | ||
| 732 | static int ipg_get_rxbuff(struct net_device *dev, int entry) | ||
| 733 | { | ||
| 734 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 735 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
| 736 | struct sk_buff *skb; | ||
| 737 | u64 rxfragsize; | ||
| 738 | |||
| 739 | IPG_DEBUG_MSG("_get_rxbuff\n"); | ||
| 740 | |||
| 741 | skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); | ||
| 742 | if (!skb) { | ||
| 743 | sp->rx_buff[entry] = NULL; | ||
| 744 | return -ENOMEM; | ||
| 745 | } | ||
| 746 | |||
| 747 | /* Save the address of the sk_buff structure. */ | ||
| 748 | sp->rx_buff[entry] = skb; | ||
| 749 | |||
| 750 | rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
| 751 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | ||
| 752 | |||
| 753 | /* Set the RFD fragment length. */ | ||
| 754 | rxfragsize = sp->rxfrag_size; | ||
| 755 | rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); | ||
| 756 | |||
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | static int init_rfdlist(struct net_device *dev) | ||
| 761 | { | ||
| 762 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 763 | void __iomem *ioaddr = sp->ioaddr; | ||
| 764 | unsigned int i; | ||
| 765 | |||
| 766 | IPG_DEBUG_MSG("_init_rfdlist\n"); | ||
| 767 | |||
| 768 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
| 769 | struct ipg_rx *rxfd = sp->rxd + i; | ||
| 770 | |||
| 771 | if (sp->rx_buff[i]) { | ||
| 772 | pci_unmap_single(sp->pdev, | ||
| 773 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 774 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 775 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
| 776 | sp->rx_buff[i] = NULL; | ||
| 777 | } | ||
| 778 | |||
| 779 | /* Clear out the RFS field. */ | ||
| 780 | rxfd->rfs = 0x0000000000000000; | ||
| 781 | |||
| 782 | if (ipg_get_rxbuff(dev, i) < 0) { | ||
| 783 | /* | ||
| 784 | * A receive buffer was not ready, break the | ||
| 785 | * RFD list here. | ||
| 786 | */ | ||
| 787 | IPG_DEBUG_MSG("Cannot allocate Rx buffer\n"); | ||
| 788 | |||
| 789 | /* Just in case we cannot allocate a single RFD. | ||
| 790 | * Should not occur. | ||
| 791 | */ | ||
| 792 | if (i == 0) { | ||
| 793 | netdev_err(dev, "No memory available for RFD list\n"); | ||
| 794 | return -ENOMEM; | ||
| 795 | } | ||
| 796 | } | ||
| 797 | |||
| 798 | rxfd->next_desc = cpu_to_le64(sp->rxd_map + | ||
| 799 | sizeof(struct ipg_rx)*(i + 1)); | ||
| 800 | } | ||
| 801 | sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); | ||
| 802 | |||
| 803 | sp->rx_current = 0; | ||
| 804 | sp->rx_dirty = 0; | ||
| 805 | |||
| 806 | /* Write the location of the RFDList to the IPG. */ | ||
| 807 | ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); | ||
| 808 | ipg_w32(0x00000000, RFD_LIST_PTR_1); | ||
| 809 | |||
| 810 | return 0; | ||
| 811 | } | ||
| 812 | |||
| 813 | static void init_tfdlist(struct net_device *dev) | ||
| 814 | { | ||
| 815 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 816 | void __iomem *ioaddr = sp->ioaddr; | ||
| 817 | unsigned int i; | ||
| 818 | |||
| 819 | IPG_DEBUG_MSG("_init_tfdlist\n"); | ||
| 820 | |||
| 821 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
| 822 | struct ipg_tx *txfd = sp->txd + i; | ||
| 823 | |||
| 824 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
| 825 | |||
| 826 | if (sp->tx_buff[i]) { | ||
| 827 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
| 828 | sp->tx_buff[i] = NULL; | ||
| 829 | } | ||
| 830 | |||
| 831 | txfd->next_desc = cpu_to_le64(sp->txd_map + | ||
| 832 | sizeof(struct ipg_tx)*(i + 1)); | ||
| 833 | } | ||
| 834 | sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); | ||
| 835 | |||
| 836 | sp->tx_current = 0; | ||
| 837 | sp->tx_dirty = 0; | ||
| 838 | |||
| 839 | /* Write the location of the TFDList to the IPG. */ | ||
| 840 | IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n", | ||
| 841 | (u32) sp->txd_map); | ||
| 842 | ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); | ||
| 843 | ipg_w32(0x00000000, TFD_LIST_PTR_1); | ||
| 844 | |||
| 845 | sp->reset_current_tfd = 1; | ||
| 846 | } | ||
| 847 | |||
| 848 | /* | ||
| 849 | * Free all transmit buffers which have already been transferred | ||
| 850 | * via DMA to the IPG. | ||
| 851 | */ | ||
| 852 | static void ipg_nic_txfree(struct net_device *dev) | ||
| 853 | { | ||
| 854 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 855 | unsigned int released, pending, dirty; | ||
| 856 | |||
| 857 | IPG_DEBUG_MSG("_nic_txfree\n"); | ||
| 858 | |||
| 859 | pending = sp->tx_current - sp->tx_dirty; | ||
| 860 | dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; | ||
| 861 | |||
| 862 | for (released = 0; released < pending; released++) { | ||
| 863 | struct sk_buff *skb = sp->tx_buff[dirty]; | ||
| 864 | struct ipg_tx *txfd = sp->txd + dirty; | ||
| 865 | |||
| 866 | IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc); | ||
| 867 | |||
| 868 | /* Look at each TFD's TFC field beginning | ||
| 869 | * at the last freed TFD up to the current TFD. | ||
| 870 | * If the TFDDone bit is set, free the associated | ||
| 871 | * buffer. | ||
| 872 | */ | ||
| 873 | if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) | ||
| 874 | break; | ||
| 875 | |||
| 876 | /* Free the transmit buffer. */ | ||
| 877 | if (skb) { | ||
| 878 | pci_unmap_single(sp->pdev, | ||
| 879 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
| 880 | skb->len, PCI_DMA_TODEVICE); | ||
| 881 | |||
| 882 | dev_kfree_skb_irq(skb); | ||
| 883 | |||
| 884 | sp->tx_buff[dirty] = NULL; | ||
| 885 | } | ||
| 886 | dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; | ||
| 887 | } | ||
| 888 | |||
| 889 | sp->tx_dirty += released; | ||
| 890 | |||
| 891 | if (netif_queue_stopped(dev) && | ||
| 892 | (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { | ||
| 893 | netif_wake_queue(dev); | ||
| 894 | } | ||
| 895 | } | ||
| 896 | |||
| 897 | static void ipg_tx_timeout(struct net_device *dev) | ||
| 898 | { | ||
| 899 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 900 | void __iomem *ioaddr = sp->ioaddr; | ||
| 901 | |||
| 902 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | | ||
| 903 | IPG_AC_FIFO); | ||
| 904 | |||
| 905 | spin_lock_irq(&sp->lock); | ||
| 906 | |||
| 907 | /* Re-configure after DMA reset. */ | ||
| 908 | if (ipg_io_config(dev) < 0) | ||
| 909 | netdev_info(dev, "Error during re-configuration\n"); | ||
| 910 | |||
| 911 | init_tfdlist(dev); | ||
| 912 | |||
| 913 | spin_unlock_irq(&sp->lock); | ||
| 914 | |||
| 915 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, | ||
| 916 | MAC_CTRL); | ||
| 917 | } | ||
| 918 | |||
| 919 | /* | ||
| 920 | * For TxComplete interrupts, free all transmit | ||
| 921 | * buffers which have already been transferred via DMA | ||
| 922 | * to the IPG. | ||
| 923 | */ | ||
| 924 | static void ipg_nic_txcleanup(struct net_device *dev) | ||
| 925 | { | ||
| 926 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 927 | void __iomem *ioaddr = sp->ioaddr; | ||
| 928 | unsigned int i; | ||
| 929 | |||
| 930 | IPG_DEBUG_MSG("_nic_txcleanup\n"); | ||
| 931 | |||
| 932 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
| 933 | /* Reading the TXSTATUS register clears the | ||
| 934 | * TX_COMPLETE interrupt. | ||
| 935 | */ | ||
| 936 | u32 txstatusdword = ipg_r32(TX_STATUS); | ||
| 937 | |||
| 938 | IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword); | ||
| 939 | |||
| 940 | /* Check for Transmit errors. Error bits only valid if | ||
| 941 | * TX_COMPLETE bit in the TXSTATUS register is a 1. | ||
| 942 | */ | ||
| 943 | if (!(txstatusdword & IPG_TS_TX_COMPLETE)) | ||
| 944 | break; | ||
| 945 | |||
| 946 | /* If in 10Mbps mode, indicate transmit is ready. */ | ||
| 947 | if (sp->tenmbpsmode) { | ||
| 948 | netif_wake_queue(dev); | ||
| 949 | } | ||
| 950 | |||
| 951 | /* Transmit error, increment stat counters. */ | ||
| 952 | if (txstatusdword & IPG_TS_TX_ERROR) { | ||
| 953 | IPG_DEBUG_MSG("Transmit error\n"); | ||
| 954 | sp->stats.tx_errors++; | ||
| 955 | } | ||
| 956 | |||
| 957 | /* Late collision, re-enable transmitter. */ | ||
| 958 | if (txstatusdword & IPG_TS_LATE_COLLISION) { | ||
| 959 | IPG_DEBUG_MSG("Late collision on transmit\n"); | ||
| 960 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
| 961 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 962 | } | ||
| 963 | |||
| 964 | /* Maximum collisions, re-enable transmitter. */ | ||
| 965 | if (txstatusdword & IPG_TS_TX_MAX_COLL) { | ||
| 966 | IPG_DEBUG_MSG("Maximum collisions on transmit\n"); | ||
| 967 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
| 968 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 969 | } | ||
| 970 | |||
| 971 | /* Transmit underrun, reset and re-enable | ||
| 972 | * transmitter. | ||
| 973 | */ | ||
| 974 | if (txstatusdword & IPG_TS_TX_UNDERRUN) { | ||
| 975 | IPG_DEBUG_MSG("Transmitter underrun\n"); | ||
| 976 | sp->stats.tx_fifo_errors++; | ||
| 977 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | | ||
| 978 | IPG_AC_NETWORK | IPG_AC_FIFO); | ||
| 979 | |||
| 980 | /* Re-configure after DMA reset. */ | ||
| 981 | if (ipg_io_config(dev) < 0) { | ||
| 982 | netdev_info(dev, "Error during re-configuration\n"); | ||
| 983 | } | ||
| 984 | init_tfdlist(dev); | ||
| 985 | |||
| 986 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
| 987 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 988 | } | ||
| 989 | } | ||
| 990 | |||
| 991 | ipg_nic_txfree(dev); | ||
| 992 | } | ||
| 993 | |||
| 994 | /* Provides statistical information about the IPG NIC. */ | ||
| 995 | static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) | ||
| 996 | { | ||
| 997 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 998 | void __iomem *ioaddr = sp->ioaddr; | ||
| 999 | u16 temp1; | ||
| 1000 | u16 temp2; | ||
| 1001 | |||
| 1002 | IPG_DEBUG_MSG("_nic_get_stats\n"); | ||
| 1003 | |||
| 1004 | /* Check to see if the NIC has been initialized via nic_open, | ||
| 1005 | * before trying to read statistic registers. | ||
| 1006 | */ | ||
| 1007 | if (!netif_running(dev)) | ||
| 1008 | return &sp->stats; | ||
| 1009 | |||
| 1010 | sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); | ||
| 1011 | sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); | ||
| 1012 | sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); | ||
| 1013 | sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); | ||
| 1014 | temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); | ||
| 1015 | sp->stats.rx_errors += temp1; | ||
| 1016 | sp->stats.rx_missed_errors += temp1; | ||
| 1017 | temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + | ||
| 1018 | ipg_r32(IPG_LATECOLLISIONS); | ||
| 1019 | temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); | ||
| 1020 | sp->stats.collisions += temp1; | ||
| 1021 | sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); | ||
| 1022 | sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + | ||
| 1023 | ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; | ||
| 1024 | sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); | ||
| 1025 | |||
| 1026 | /* detailed tx_errors */ | ||
| 1027 | sp->stats.tx_carrier_errors += temp2; | ||
| 1028 | |||
| 1029 | /* detailed rx_errors */ | ||
| 1030 | sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + | ||
| 1031 | ipg_r16(IPG_FRAMETOOLONGERRORS); | ||
| 1032 | sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); | ||
| 1033 | |||
| 1034 | /* Unutilized IPG statistic registers. */ | ||
| 1035 | ipg_r32(IPG_MCSTFRAMESRCVDOK); | ||
| 1036 | |||
| 1037 | return &sp->stats; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | /* Restore used receive buffers. */ | ||
| 1041 | static int ipg_nic_rxrestore(struct net_device *dev) | ||
| 1042 | { | ||
| 1043 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1044 | const unsigned int curr = sp->rx_current; | ||
| 1045 | unsigned int dirty = sp->rx_dirty; | ||
| 1046 | |||
| 1047 | IPG_DEBUG_MSG("_nic_rxrestore\n"); | ||
| 1048 | |||
| 1049 | for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { | ||
| 1050 | unsigned int entry = dirty % IPG_RFDLIST_LENGTH; | ||
| 1051 | |||
| 1052 | /* rx_copybreak may poke hole here and there. */ | ||
| 1053 | if (sp->rx_buff[entry]) | ||
| 1054 | continue; | ||
| 1055 | |||
| 1056 | /* Generate a new receive buffer to replace the | ||
| 1057 | * current buffer (which will be released by the | ||
| 1058 | * Linux system). | ||
| 1059 | */ | ||
| 1060 | if (ipg_get_rxbuff(dev, entry) < 0) { | ||
| 1061 | IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n"); | ||
| 1062 | |||
| 1063 | break; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | /* Reset the RFS field. */ | ||
| 1067 | sp->rxd[entry].rfs = 0x0000000000000000; | ||
| 1068 | } | ||
| 1069 | sp->rx_dirty = dirty; | ||
| 1070 | |||
| 1071 | return 0; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | /* use jumboindex and jumbosize to control jumbo frame status | ||
| 1075 | * initial status is jumboindex=-1 and jumbosize=0 | ||
| 1076 | * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. | ||
| 1077 | * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving | ||
| 1078 | * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump | ||
| 1079 | * previous receiving and need to continue dumping the current one | ||
| 1080 | */ | ||
| 1081 | enum { | ||
| 1082 | NORMAL_PACKET, | ||
| 1083 | ERROR_PACKET | ||
| 1084 | }; | ||
| 1085 | |||
| 1086 | enum { | ||
| 1087 | FRAME_NO_START_NO_END = 0, | ||
| 1088 | FRAME_WITH_START = 1, | ||
| 1089 | FRAME_WITH_END = 10, | ||
| 1090 | FRAME_WITH_START_WITH_END = 11 | ||
| 1091 | }; | ||
| 1092 | |||
| 1093 | static void ipg_nic_rx_free_skb(struct net_device *dev) | ||
| 1094 | { | ||
| 1095 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1096 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
| 1097 | |||
| 1098 | if (sp->rx_buff[entry]) { | ||
| 1099 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
| 1100 | |||
| 1101 | pci_unmap_single(sp->pdev, | ||
| 1102 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 1103 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1104 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
| 1105 | sp->rx_buff[entry] = NULL; | ||
| 1106 | } | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | static int ipg_nic_rx_check_frame_type(struct net_device *dev) | ||
| 1110 | { | ||
| 1111 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1112 | struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); | ||
| 1113 | int type = FRAME_NO_START_NO_END; | ||
| 1114 | |||
| 1115 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) | ||
| 1116 | type += FRAME_WITH_START; | ||
| 1117 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) | ||
| 1118 | type += FRAME_WITH_END; | ||
| 1119 | return type; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | static int ipg_nic_rx_check_error(struct net_device *dev) | ||
| 1123 | { | ||
| 1124 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1125 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
| 1126 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
| 1127 | |||
| 1128 | if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
| 1129 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
| 1130 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
| 1131 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { | ||
| 1132 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
| 1133 | (unsigned long) rxfd->rfs); | ||
| 1134 | |||
| 1135 | /* Increment general receive error statistic. */ | ||
| 1136 | sp->stats.rx_errors++; | ||
| 1137 | |||
| 1138 | /* Increment detailed receive error statistics. */ | ||
| 1139 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
| 1140 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
| 1141 | |||
| 1142 | sp->stats.rx_fifo_errors++; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
| 1146 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
| 1147 | sp->stats.rx_length_errors++; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, | ||
| 1151 | * error count handled by a IPG statistic register. | ||
| 1152 | */ | ||
| 1153 | |||
| 1154 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
| 1155 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
| 1156 | sp->stats.rx_frame_errors++; | ||
| 1157 | } | ||
| 1158 | |||
| 1159 | /* Do nothing for IPG_RFS_RXFCSERROR, error count | ||
| 1160 | * handled by a IPG statistic register. | ||
| 1161 | */ | ||
| 1162 | |||
| 1163 | /* Free the memory associated with the RX | ||
| 1164 | * buffer since it is erroneous and we will | ||
| 1165 | * not pass it to higher layer processes. | ||
| 1166 | */ | ||
| 1167 | if (sp->rx_buff[entry]) { | ||
| 1168 | pci_unmap_single(sp->pdev, | ||
| 1169 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 1170 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1171 | |||
| 1172 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
| 1173 | sp->rx_buff[entry] = NULL; | ||
| 1174 | } | ||
| 1175 | return ERROR_PACKET; | ||
| 1176 | } | ||
| 1177 | return NORMAL_PACKET; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static void ipg_nic_rx_with_start_and_end(struct net_device *dev, | ||
| 1181 | struct ipg_nic_private *sp, | ||
| 1182 | struct ipg_rx *rxfd, unsigned entry) | ||
| 1183 | { | ||
| 1184 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
| 1185 | struct sk_buff *skb; | ||
| 1186 | int framelen; | ||
| 1187 | |||
| 1188 | if (jumbo->found_start) { | ||
| 1189 | dev_kfree_skb_irq(jumbo->skb); | ||
| 1190 | jumbo->found_start = 0; | ||
| 1191 | jumbo->current_size = 0; | ||
| 1192 | jumbo->skb = NULL; | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | /* 1: found error, 0 no error */ | ||
| 1196 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
| 1197 | return; | ||
| 1198 | |||
| 1199 | skb = sp->rx_buff[entry]; | ||
| 1200 | if (!skb) | ||
| 1201 | return; | ||
| 1202 | |||
| 1203 | /* accept this frame and send to upper layer */ | ||
| 1204 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
| 1205 | if (framelen > sp->rxfrag_size) | ||
| 1206 | framelen = sp->rxfrag_size; | ||
| 1207 | |||
| 1208 | skb_put(skb, framelen); | ||
| 1209 | skb->protocol = eth_type_trans(skb, dev); | ||
| 1210 | skb_checksum_none_assert(skb); | ||
| 1211 | netif_rx(skb); | ||
| 1212 | sp->rx_buff[entry] = NULL; | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | static void ipg_nic_rx_with_start(struct net_device *dev, | ||
| 1216 | struct ipg_nic_private *sp, | ||
| 1217 | struct ipg_rx *rxfd, unsigned entry) | ||
| 1218 | { | ||
| 1219 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
| 1220 | struct pci_dev *pdev = sp->pdev; | ||
| 1221 | struct sk_buff *skb; | ||
| 1222 | |||
| 1223 | /* 1: found error, 0 no error */ | ||
| 1224 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
| 1225 | return; | ||
| 1226 | |||
| 1227 | /* accept this frame and send to upper layer */ | ||
| 1228 | skb = sp->rx_buff[entry]; | ||
| 1229 | if (!skb) | ||
| 1230 | return; | ||
| 1231 | |||
| 1232 | if (jumbo->found_start) | ||
| 1233 | dev_kfree_skb_irq(jumbo->skb); | ||
| 1234 | |||
| 1235 | pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 1236 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1237 | |||
| 1238 | skb_put(skb, sp->rxfrag_size); | ||
| 1239 | |||
| 1240 | jumbo->found_start = 1; | ||
| 1241 | jumbo->current_size = sp->rxfrag_size; | ||
| 1242 | jumbo->skb = skb; | ||
| 1243 | |||
| 1244 | sp->rx_buff[entry] = NULL; | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | static void ipg_nic_rx_with_end(struct net_device *dev, | ||
| 1248 | struct ipg_nic_private *sp, | ||
| 1249 | struct ipg_rx *rxfd, unsigned entry) | ||
| 1250 | { | ||
| 1251 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
| 1252 | |||
| 1253 | /* 1: found error, 0 no error */ | ||
| 1254 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
| 1255 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
| 1256 | |||
| 1257 | if (!skb) | ||
| 1258 | return; | ||
| 1259 | |||
| 1260 | if (jumbo->found_start) { | ||
| 1261 | int framelen, endframelen; | ||
| 1262 | |||
| 1263 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
| 1264 | |||
| 1265 | endframelen = framelen - jumbo->current_size; | ||
| 1266 | if (framelen > sp->rxsupport_size) | ||
| 1267 | dev_kfree_skb_irq(jumbo->skb); | ||
| 1268 | else { | ||
| 1269 | memcpy(skb_put(jumbo->skb, endframelen), | ||
| 1270 | skb->data, endframelen); | ||
| 1271 | |||
| 1272 | jumbo->skb->protocol = | ||
| 1273 | eth_type_trans(jumbo->skb, dev); | ||
| 1274 | |||
| 1275 | skb_checksum_none_assert(jumbo->skb); | ||
| 1276 | netif_rx(jumbo->skb); | ||
| 1277 | } | ||
| 1278 | } | ||
| 1279 | |||
| 1280 | jumbo->found_start = 0; | ||
| 1281 | jumbo->current_size = 0; | ||
| 1282 | jumbo->skb = NULL; | ||
| 1283 | |||
| 1284 | ipg_nic_rx_free_skb(dev); | ||
| 1285 | } else { | ||
| 1286 | dev_kfree_skb_irq(jumbo->skb); | ||
| 1287 | jumbo->found_start = 0; | ||
| 1288 | jumbo->current_size = 0; | ||
| 1289 | jumbo->skb = NULL; | ||
| 1290 | } | ||
| 1291 | } | ||
| 1292 | |||
| 1293 | static void ipg_nic_rx_no_start_no_end(struct net_device *dev, | ||
| 1294 | struct ipg_nic_private *sp, | ||
| 1295 | struct ipg_rx *rxfd, unsigned entry) | ||
| 1296 | { | ||
| 1297 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
| 1298 | |||
| 1299 | /* 1: found error, 0 no error */ | ||
| 1300 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
| 1301 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
| 1302 | |||
| 1303 | if (skb) { | ||
| 1304 | if (jumbo->found_start) { | ||
| 1305 | jumbo->current_size += sp->rxfrag_size; | ||
| 1306 | if (jumbo->current_size <= sp->rxsupport_size) { | ||
| 1307 | memcpy(skb_put(jumbo->skb, | ||
| 1308 | sp->rxfrag_size), | ||
| 1309 | skb->data, sp->rxfrag_size); | ||
| 1310 | } | ||
| 1311 | } | ||
| 1312 | ipg_nic_rx_free_skb(dev); | ||
| 1313 | } | ||
| 1314 | } else { | ||
| 1315 | dev_kfree_skb_irq(jumbo->skb); | ||
| 1316 | jumbo->found_start = 0; | ||
| 1317 | jumbo->current_size = 0; | ||
| 1318 | jumbo->skb = NULL; | ||
| 1319 | } | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | static int ipg_nic_rx_jumbo(struct net_device *dev) | ||
| 1323 | { | ||
| 1324 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1325 | unsigned int curr = sp->rx_current; | ||
| 1326 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1327 | unsigned int i; | ||
| 1328 | |||
| 1329 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
| 1330 | |||
| 1331 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
| 1332 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
| 1333 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
| 1334 | |||
| 1335 | if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) | ||
| 1336 | break; | ||
| 1337 | |||
| 1338 | switch (ipg_nic_rx_check_frame_type(dev)) { | ||
| 1339 | case FRAME_WITH_START_WITH_END: | ||
| 1340 | ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); | ||
| 1341 | break; | ||
| 1342 | case FRAME_WITH_START: | ||
| 1343 | ipg_nic_rx_with_start(dev, sp, rxfd, entry); | ||
| 1344 | break; | ||
| 1345 | case FRAME_WITH_END: | ||
| 1346 | ipg_nic_rx_with_end(dev, sp, rxfd, entry); | ||
| 1347 | break; | ||
| 1348 | case FRAME_NO_START_NO_END: | ||
| 1349 | ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); | ||
| 1350 | break; | ||
| 1351 | } | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | sp->rx_current = curr; | ||
| 1355 | |||
| 1356 | if (i == IPG_MAXRFDPROCESS_COUNT) { | ||
| 1357 | /* There are more RFDs to process, however the | ||
| 1358 | * allocated amount of RFD processing time has | ||
| 1359 | * expired. Assert Interrupt Requested to make | ||
| 1360 | * sure we come back to process the remaining RFDs. | ||
| 1361 | */ | ||
| 1362 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | ipg_nic_rxrestore(dev); | ||
| 1366 | |||
| 1367 | return 0; | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static int ipg_nic_rx(struct net_device *dev) | ||
| 1371 | { | ||
| 1372 | /* Transfer received Ethernet frames to higher network layers. */ | ||
| 1373 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1374 | unsigned int curr = sp->rx_current; | ||
| 1375 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1376 | struct ipg_rx *rxfd; | ||
| 1377 | unsigned int i; | ||
| 1378 | |||
| 1379 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
| 1380 | |||
| 1381 | #define __RFS_MASK \ | ||
| 1382 | cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) | ||
| 1383 | |||
| 1384 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
| 1385 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
| 1386 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
| 1387 | unsigned int framelen; | ||
| 1388 | |||
| 1389 | rxfd = sp->rxd + entry; | ||
| 1390 | |||
| 1391 | if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) | ||
| 1392 | break; | ||
| 1393 | |||
| 1394 | /* Get received frame length. */ | ||
| 1395 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
| 1396 | |||
| 1397 | /* Check for jumbo frame arrival with too small | ||
| 1398 | * RXFRAG_SIZE. | ||
| 1399 | */ | ||
| 1400 | if (framelen > sp->rxfrag_size) { | ||
| 1401 | IPG_DEBUG_MSG | ||
| 1402 | ("RFS FrameLen > allocated fragment size\n"); | ||
| 1403 | |||
| 1404 | framelen = sp->rxfrag_size; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
| 1408 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
| 1409 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
| 1410 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { | ||
| 1411 | |||
| 1412 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
| 1413 | (unsigned long int) rxfd->rfs); | ||
| 1414 | |||
| 1415 | /* Increment general receive error statistic. */ | ||
| 1416 | sp->stats.rx_errors++; | ||
| 1417 | |||
| 1418 | /* Increment detailed receive error statistics. */ | ||
| 1419 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
| 1420 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
| 1421 | sp->stats.rx_fifo_errors++; | ||
| 1422 | } | ||
| 1423 | |||
| 1424 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
| 1425 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
| 1426 | sp->stats.rx_length_errors++; | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; | ||
| 1430 | /* Do nothing, error count handled by a IPG | ||
| 1431 | * statistic register. | ||
| 1432 | */ | ||
| 1433 | |||
| 1434 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
| 1435 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
| 1436 | sp->stats.rx_frame_errors++; | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; | ||
| 1440 | /* Do nothing, error count handled by a IPG | ||
| 1441 | * statistic register. | ||
| 1442 | */ | ||
| 1443 | |||
| 1444 | /* Free the memory associated with the RX | ||
| 1445 | * buffer since it is erroneous and we will | ||
| 1446 | * not pass it to higher layer processes. | ||
| 1447 | */ | ||
| 1448 | if (skb) { | ||
| 1449 | __le64 info = rxfd->frag_info; | ||
| 1450 | |||
| 1451 | pci_unmap_single(sp->pdev, | ||
| 1452 | le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, | ||
| 1453 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1454 | |||
| 1455 | dev_kfree_skb_irq(skb); | ||
| 1456 | } | ||
| 1457 | } else { | ||
| 1458 | |||
| 1459 | /* Adjust the new buffer length to accommodate the size | ||
| 1460 | * of the received frame. | ||
| 1461 | */ | ||
| 1462 | skb_put(skb, framelen); | ||
| 1463 | |||
| 1464 | /* Set the buffer's protocol field to Ethernet. */ | ||
| 1465 | skb->protocol = eth_type_trans(skb, dev); | ||
| 1466 | |||
| 1467 | /* The IPG encountered an error with (or | ||
| 1468 | * there were no) IP/TCP/UDP checksums. | ||
| 1469 | * This may or may not indicate an invalid | ||
| 1470 | * IP/TCP/UDP frame was received. Let the | ||
| 1471 | * upper layer decide. | ||
| 1472 | */ | ||
| 1473 | skb_checksum_none_assert(skb); | ||
| 1474 | |||
| 1475 | /* Hand off frame for higher layer processing. | ||
| 1476 | * The function netif_rx() releases the sk_buff | ||
| 1477 | * when processing completes. | ||
| 1478 | */ | ||
| 1479 | netif_rx(skb); | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | /* Assure RX buffer is not reused by IPG. */ | ||
| 1483 | sp->rx_buff[entry] = NULL; | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | /* | ||
| 1487 | * If there are more RFDs to process and the allocated amount of RFD | ||
| 1488 | * processing time has expired, assert Interrupt Requested to make | ||
| 1489 | * sure we come back to process the remaining RFDs. | ||
| 1490 | */ | ||
| 1491 | if (i == IPG_MAXRFDPROCESS_COUNT) | ||
| 1492 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
| 1493 | |||
| 1494 | #ifdef IPG_DEBUG | ||
| 1495 | /* Check if the RFD list contained no receive frame data. */ | ||
| 1496 | if (!i) | ||
| 1497 | sp->EmptyRFDListCount++; | ||
| 1498 | #endif | ||
| 1499 | while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && | ||
| 1500 | !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && | ||
| 1501 | (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { | ||
| 1502 | unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; | ||
| 1503 | |||
| 1504 | rxfd = sp->rxd + entry; | ||
| 1505 | |||
| 1506 | IPG_DEBUG_MSG("Frame requires multiple RFDs\n"); | ||
| 1507 | |||
| 1508 | /* An unexpected event, additional code needed to handle | ||
| 1509 | * properly. So for the time being, just disregard the | ||
| 1510 | * frame. | ||
| 1511 | */ | ||
| 1512 | |||
| 1513 | /* Free the memory associated with the RX | ||
| 1514 | * buffer since it is erroneous and we will | ||
| 1515 | * not pass it to higher layer processes. | ||
| 1516 | */ | ||
| 1517 | if (sp->rx_buff[entry]) { | ||
| 1518 | pci_unmap_single(sp->pdev, | ||
| 1519 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 1520 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1521 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | /* Assure RX buffer is not reused by IPG. */ | ||
| 1525 | sp->rx_buff[entry] = NULL; | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | sp->rx_current = curr; | ||
| 1529 | |||
| 1530 | /* Check to see if there are a minimum number of used | ||
| 1531 | * RFDs before restoring any (should improve performance.) | ||
| 1532 | */ | ||
| 1533 | if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) | ||
| 1534 | ipg_nic_rxrestore(dev); | ||
| 1535 | |||
| 1536 | return 0; | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | static void ipg_reset_after_host_error(struct work_struct *work) | ||
| 1540 | { | ||
| 1541 | struct ipg_nic_private *sp = | ||
| 1542 | container_of(work, struct ipg_nic_private, task.work); | ||
| 1543 | struct net_device *dev = sp->dev; | ||
| 1544 | |||
| 1545 | /* | ||
| 1546 | * Acknowledge HostError interrupt by resetting | ||
| 1547 | * IPG DMA and HOST. | ||
| 1548 | */ | ||
| 1549 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
| 1550 | |||
| 1551 | init_rfdlist(dev); | ||
| 1552 | init_tfdlist(dev); | ||
| 1553 | |||
| 1554 | if (ipg_io_config(dev) < 0) { | ||
| 1555 | netdev_info(dev, "Cannot recover from PCI error\n"); | ||
| 1556 | schedule_delayed_work(&sp->task, HZ); | ||
| 1557 | } | ||
| 1558 | } | ||
| 1559 | |||
| 1560 | static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) | ||
| 1561 | { | ||
| 1562 | struct net_device *dev = dev_inst; | ||
| 1563 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1564 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1565 | unsigned int handled = 0; | ||
| 1566 | u16 status; | ||
| 1567 | |||
| 1568 | IPG_DEBUG_MSG("_interrupt_handler\n"); | ||
| 1569 | |||
| 1570 | if (sp->is_jumbo) | ||
| 1571 | ipg_nic_rxrestore(dev); | ||
| 1572 | |||
| 1573 | spin_lock(&sp->lock); | ||
| 1574 | |||
| 1575 | /* Get interrupt source information, and acknowledge | ||
| 1576 | * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, | ||
| 1577 | * IntRequested, MacControlFrame, LinkEvent) interrupts | ||
| 1578 | * if issued. Also, all IPG interrupts are disabled by | ||
| 1579 | * reading IntStatusAck. | ||
| 1580 | */ | ||
| 1581 | status = ipg_r16(INT_STATUS_ACK); | ||
| 1582 | |||
| 1583 | IPG_DEBUG_MSG("IntStatusAck = %04x\n", status); | ||
| 1584 | |||
| 1585 | /* Shared IRQ of remove event. */ | ||
| 1586 | if (!(status & IPG_IS_RSVD_MASK)) | ||
| 1587 | goto out_enable; | ||
| 1588 | |||
| 1589 | handled = 1; | ||
| 1590 | |||
| 1591 | if (unlikely(!netif_running(dev))) | ||
| 1592 | goto out_unlock; | ||
| 1593 | |||
| 1594 | /* If RFDListEnd interrupt, restore all used RFDs. */ | ||
| 1595 | if (status & IPG_IS_RFD_LIST_END) { | ||
| 1596 | IPG_DEBUG_MSG("RFDListEnd Interrupt\n"); | ||
| 1597 | |||
| 1598 | /* The RFD list end indicates an RFD was encountered | ||
| 1599 | * with a 0 NextPtr, or with an RFDDone bit set to 1 | ||
| 1600 | * (indicating the RFD is not read for use by the | ||
| 1601 | * IPG.) Try to restore all RFDs. | ||
| 1602 | */ | ||
| 1603 | ipg_nic_rxrestore(dev); | ||
| 1604 | |||
| 1605 | #ifdef IPG_DEBUG | ||
| 1606 | /* Increment the RFDlistendCount counter. */ | ||
| 1607 | sp->RFDlistendCount++; | ||
| 1608 | #endif | ||
| 1609 | } | ||
| 1610 | |||
| 1611 | /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or | ||
| 1612 | * IntRequested interrupt, process received frames. */ | ||
| 1613 | if ((status & IPG_IS_RX_DMA_PRIORITY) || | ||
| 1614 | (status & IPG_IS_RFD_LIST_END) || | ||
| 1615 | (status & IPG_IS_RX_DMA_COMPLETE) || | ||
| 1616 | (status & IPG_IS_INT_REQUESTED)) { | ||
| 1617 | #ifdef IPG_DEBUG | ||
| 1618 | /* Increment the RFD list checked counter if interrupted | ||
| 1619 | * only to check the RFD list. */ | ||
| 1620 | if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | | ||
| 1621 | IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & | ||
| 1622 | (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | | ||
| 1623 | IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | | ||
| 1624 | IPG_IS_UPDATE_STATS))) | ||
| 1625 | sp->RFDListCheckedCount++; | ||
| 1626 | #endif | ||
| 1627 | |||
| 1628 | if (sp->is_jumbo) | ||
| 1629 | ipg_nic_rx_jumbo(dev); | ||
| 1630 | else | ||
| 1631 | ipg_nic_rx(dev); | ||
| 1632 | } | ||
| 1633 | |||
| 1634 | /* If TxDMAComplete interrupt, free used TFDs. */ | ||
| 1635 | if (status & IPG_IS_TX_DMA_COMPLETE) | ||
| 1636 | ipg_nic_txfree(dev); | ||
| 1637 | |||
| 1638 | /* TxComplete interrupts indicate one of numerous actions. | ||
| 1639 | * Determine what action to take based on TXSTATUS register. | ||
| 1640 | */ | ||
| 1641 | if (status & IPG_IS_TX_COMPLETE) | ||
| 1642 | ipg_nic_txcleanup(dev); | ||
| 1643 | |||
| 1644 | /* If UpdateStats interrupt, update Linux Ethernet statistics */ | ||
| 1645 | if (status & IPG_IS_UPDATE_STATS) | ||
| 1646 | ipg_nic_get_stats(dev); | ||
| 1647 | |||
| 1648 | /* If HostError interrupt, reset IPG. */ | ||
| 1649 | if (status & IPG_IS_HOST_ERROR) { | ||
| 1650 | IPG_DDEBUG_MSG("HostError Interrupt\n"); | ||
| 1651 | |||
| 1652 | schedule_delayed_work(&sp->task, 0); | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | /* If LinkEvent interrupt, resolve autonegotiation. */ | ||
| 1656 | if (status & IPG_IS_LINK_EVENT) { | ||
| 1657 | if (ipg_config_autoneg(dev) < 0) | ||
| 1658 | netdev_info(dev, "Auto-negotiation error\n"); | ||
| 1659 | } | ||
| 1660 | |||
| 1661 | /* If MACCtrlFrame interrupt, do nothing. */ | ||
| 1662 | if (status & IPG_IS_MAC_CTRL_FRAME) | ||
| 1663 | IPG_DEBUG_MSG("MACCtrlFrame interrupt\n"); | ||
| 1664 | |||
| 1665 | /* If RxComplete interrupt, do nothing. */ | ||
| 1666 | if (status & IPG_IS_RX_COMPLETE) | ||
| 1667 | IPG_DEBUG_MSG("RxComplete interrupt\n"); | ||
| 1668 | |||
| 1669 | /* If RxEarly interrupt, do nothing. */ | ||
| 1670 | if (status & IPG_IS_RX_EARLY) | ||
| 1671 | IPG_DEBUG_MSG("RxEarly interrupt\n"); | ||
| 1672 | |||
| 1673 | out_enable: | ||
| 1674 | /* Re-enable IPG interrupts. */ | ||
| 1675 | ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | | ||
| 1676 | IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | | ||
| 1677 | IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); | ||
| 1678 | out_unlock: | ||
| 1679 | spin_unlock(&sp->lock); | ||
| 1680 | |||
| 1681 | return IRQ_RETVAL(handled); | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | static void ipg_rx_clear(struct ipg_nic_private *sp) | ||
| 1685 | { | ||
| 1686 | unsigned int i; | ||
| 1687 | |||
| 1688 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
| 1689 | if (sp->rx_buff[i]) { | ||
| 1690 | struct ipg_rx *rxfd = sp->rxd + i; | ||
| 1691 | |||
| 1692 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
| 1693 | sp->rx_buff[i] = NULL; | ||
| 1694 | pci_unmap_single(sp->pdev, | ||
| 1695 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
| 1696 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 1697 | } | ||
| 1698 | } | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static void ipg_tx_clear(struct ipg_nic_private *sp) | ||
| 1702 | { | ||
| 1703 | unsigned int i; | ||
| 1704 | |||
| 1705 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
| 1706 | if (sp->tx_buff[i]) { | ||
| 1707 | struct ipg_tx *txfd = sp->txd + i; | ||
| 1708 | |||
| 1709 | pci_unmap_single(sp->pdev, | ||
| 1710 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
| 1711 | sp->tx_buff[i]->len, PCI_DMA_TODEVICE); | ||
| 1712 | |||
| 1713 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
| 1714 | |||
| 1715 | sp->tx_buff[i] = NULL; | ||
| 1716 | } | ||
| 1717 | } | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | static int ipg_nic_open(struct net_device *dev) | ||
| 1721 | { | ||
| 1722 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1723 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1724 | struct pci_dev *pdev = sp->pdev; | ||
| 1725 | int rc; | ||
| 1726 | |||
| 1727 | IPG_DEBUG_MSG("_nic_open\n"); | ||
| 1728 | |||
| 1729 | sp->rx_buf_sz = sp->rxsupport_size; | ||
| 1730 | |||
| 1731 | /* Check for interrupt line conflicts, and request interrupt | ||
| 1732 | * line for IPG. | ||
| 1733 | * | ||
| 1734 | * IMPORTANT: Disable IPG interrupts prior to registering | ||
| 1735 | * IRQ. | ||
| 1736 | */ | ||
| 1737 | ipg_w16(0x0000, INT_ENABLE); | ||
| 1738 | |||
| 1739 | /* Register the interrupt line to be used by the IPG within | ||
| 1740 | * the Linux system. | ||
| 1741 | */ | ||
| 1742 | rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, | ||
| 1743 | dev->name, dev); | ||
| 1744 | if (rc < 0) { | ||
| 1745 | netdev_info(dev, "Error when requesting interrupt\n"); | ||
| 1746 | goto out; | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | dev->irq = pdev->irq; | ||
| 1750 | |||
| 1751 | rc = -ENOMEM; | ||
| 1752 | |||
| 1753 | sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, | ||
| 1754 | &sp->rxd_map, GFP_KERNEL); | ||
| 1755 | if (!sp->rxd) | ||
| 1756 | goto err_free_irq_0; | ||
| 1757 | |||
| 1758 | sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, | ||
| 1759 | &sp->txd_map, GFP_KERNEL); | ||
| 1760 | if (!sp->txd) | ||
| 1761 | goto err_free_rx_1; | ||
| 1762 | |||
| 1763 | rc = init_rfdlist(dev); | ||
| 1764 | if (rc < 0) { | ||
| 1765 | netdev_info(dev, "Error during configuration\n"); | ||
| 1766 | goto err_free_tx_2; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | init_tfdlist(dev); | ||
| 1770 | |||
| 1771 | rc = ipg_io_config(dev); | ||
| 1772 | if (rc < 0) { | ||
| 1773 | netdev_info(dev, "Error during configuration\n"); | ||
| 1774 | goto err_release_tfdlist_3; | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | /* Resolve autonegotiation. */ | ||
| 1778 | if (ipg_config_autoneg(dev) < 0) | ||
| 1779 | netdev_info(dev, "Auto-negotiation error\n"); | ||
| 1780 | |||
| 1781 | /* initialize JUMBO Frame control variable */ | ||
| 1782 | sp->jumbo.found_start = 0; | ||
| 1783 | sp->jumbo.current_size = 0; | ||
| 1784 | sp->jumbo.skb = NULL; | ||
| 1785 | |||
| 1786 | /* Enable transmit and receive operation of the IPG. */ | ||
| 1787 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & | ||
| 1788 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
| 1789 | |||
| 1790 | netif_start_queue(dev); | ||
| 1791 | out: | ||
| 1792 | return rc; | ||
| 1793 | |||
| 1794 | err_release_tfdlist_3: | ||
| 1795 | ipg_tx_clear(sp); | ||
| 1796 | ipg_rx_clear(sp); | ||
| 1797 | err_free_tx_2: | ||
| 1798 | dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
| 1799 | err_free_rx_1: | ||
| 1800 | dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
| 1801 | err_free_irq_0: | ||
| 1802 | free_irq(pdev->irq, dev); | ||
| 1803 | goto out; | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | static int ipg_nic_stop(struct net_device *dev) | ||
| 1807 | { | ||
| 1808 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1809 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1810 | struct pci_dev *pdev = sp->pdev; | ||
| 1811 | |||
| 1812 | IPG_DEBUG_MSG("_nic_stop\n"); | ||
| 1813 | |||
| 1814 | netif_stop_queue(dev); | ||
| 1815 | |||
| 1816 | IPG_DUMPTFDLIST(dev); | ||
| 1817 | |||
| 1818 | do { | ||
| 1819 | (void) ipg_r16(INT_STATUS_ACK); | ||
| 1820 | |||
| 1821 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
| 1822 | |||
| 1823 | synchronize_irq(pdev->irq); | ||
| 1824 | } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); | ||
| 1825 | |||
| 1826 | ipg_rx_clear(sp); | ||
| 1827 | |||
| 1828 | ipg_tx_clear(sp); | ||
| 1829 | |||
| 1830 | pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
| 1831 | pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
| 1832 | |||
| 1833 | free_irq(pdev->irq, dev); | ||
| 1834 | |||
| 1835 | return 0; | ||
| 1836 | } | ||
| 1837 | |||
| 1838 | static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, | ||
| 1839 | struct net_device *dev) | ||
| 1840 | { | ||
| 1841 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 1842 | void __iomem *ioaddr = sp->ioaddr; | ||
| 1843 | unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; | ||
| 1844 | unsigned long flags; | ||
| 1845 | struct ipg_tx *txfd; | ||
| 1846 | |||
| 1847 | IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); | ||
| 1848 | |||
| 1849 | /* If in 10Mbps mode, stop the transmit queue so | ||
| 1850 | * no more transmit frames are accepted. | ||
| 1851 | */ | ||
| 1852 | if (sp->tenmbpsmode) | ||
| 1853 | netif_stop_queue(dev); | ||
| 1854 | |||
| 1855 | if (sp->reset_current_tfd) { | ||
| 1856 | sp->reset_current_tfd = 0; | ||
| 1857 | entry = 0; | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | txfd = sp->txd + entry; | ||
| 1861 | |||
| 1862 | sp->tx_buff[entry] = skb; | ||
| 1863 | |||
| 1864 | /* Clear all TFC fields, except TFDDONE. */ | ||
| 1865 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
| 1866 | |||
| 1867 | /* Specify the TFC field within the TFD. */ | ||
| 1868 | txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | | ||
| 1869 | (IPG_TFC_FRAMEID & sp->tx_current) | | ||
| 1870 | (IPG_TFC_FRAGCOUNT & (1 << 24))); | ||
| 1871 | /* | ||
| 1872 | * 16--17 (WordAlign) <- 3 (disable), | ||
| 1873 | * 0--15 (FrameId) <- sp->tx_current, | ||
| 1874 | * 24--27 (FragCount) <- 1 | ||
| 1875 | */ | ||
| 1876 | |||
| 1877 | /* Request TxComplete interrupts at an interval defined | ||
| 1878 | * by the constant IPG_FRAMESBETWEENTXCOMPLETES. | ||
| 1879 | * Request TxComplete interrupt for every frame | ||
| 1880 | * if in 10Mbps mode to accommodate problem with 10Mbps | ||
| 1881 | * processing. | ||
| 1882 | */ | ||
| 1883 | if (sp->tenmbpsmode) | ||
| 1884 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); | ||
| 1885 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); | ||
| 1886 | /* Based on compilation option, determine if FCS is to be | ||
| 1887 | * appended to transmit frame by IPG. | ||
| 1888 | */ | ||
| 1889 | if (!(IPG_APPEND_FCS_ON_TX)) | ||
| 1890 | txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); | ||
| 1891 | |||
| 1892 | /* Based on compilation option, determine if IP, TCP and/or | ||
| 1893 | * UDP checksums are to be added to transmit frame by IPG. | ||
| 1894 | */ | ||
| 1895 | if (IPG_ADD_IPCHECKSUM_ON_TX) | ||
| 1896 | txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); | ||
| 1897 | |||
| 1898 | if (IPG_ADD_TCPCHECKSUM_ON_TX) | ||
| 1899 | txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); | ||
| 1900 | |||
| 1901 | if (IPG_ADD_UDPCHECKSUM_ON_TX) | ||
| 1902 | txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); | ||
| 1903 | |||
| 1904 | /* Based on compilation option, determine if VLAN tag info is to be | ||
| 1905 | * inserted into transmit frame by IPG. | ||
| 1906 | */ | ||
| 1907 | if (IPG_INSERT_MANUAL_VLAN_TAG) { | ||
| 1908 | txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | | ||
| 1909 | ((u64) IPG_MANUAL_VLAN_VID << 32) | | ||
| 1910 | ((u64) IPG_MANUAL_VLAN_CFI << 44) | | ||
| 1911 | ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); | ||
| 1912 | } | ||
| 1913 | |||
| 1914 | /* The fragment start location within system memory is defined | ||
| 1915 | * by the sk_buff structure's data field. The physical address | ||
| 1916 | * of this location within the system's virtual memory space | ||
| 1917 | * is determined using the IPG_HOST2BUS_MAP function. | ||
| 1918 | */ | ||
| 1919 | txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
| 1920 | skb->len, PCI_DMA_TODEVICE)); | ||
| 1921 | |||
| 1922 | /* The length of the fragment within system memory is defined by | ||
| 1923 | * the sk_buff structure's len field. | ||
| 1924 | */ | ||
| 1925 | txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & | ||
| 1926 | ((u64) (skb->len & 0xffff) << 48)); | ||
| 1927 | |||
| 1928 | /* Clear the TFDDone bit last to indicate the TFD is ready | ||
| 1929 | * for transfer to the IPG. | ||
| 1930 | */ | ||
| 1931 | txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); | ||
| 1932 | |||
| 1933 | spin_lock_irqsave(&sp->lock, flags); | ||
| 1934 | |||
| 1935 | sp->tx_current++; | ||
| 1936 | |||
| 1937 | mmiowb(); | ||
| 1938 | |||
| 1939 | ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); | ||
| 1940 | |||
| 1941 | if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) | ||
| 1942 | netif_stop_queue(dev); | ||
| 1943 | |||
| 1944 | spin_unlock_irqrestore(&sp->lock, flags); | ||
| 1945 | |||
| 1946 | return NETDEV_TX_OK; | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | static void ipg_set_phy_default_param(unsigned char rev, | ||
| 1950 | struct net_device *dev, int phy_address) | ||
| 1951 | { | ||
| 1952 | unsigned short length; | ||
| 1953 | unsigned char revision; | ||
| 1954 | const unsigned short *phy_param; | ||
| 1955 | unsigned short address, value; | ||
| 1956 | |||
| 1957 | phy_param = &DefaultPhyParam[0]; | ||
| 1958 | length = *phy_param & 0x00FF; | ||
| 1959 | revision = (unsigned char)((*phy_param) >> 8); | ||
| 1960 | phy_param++; | ||
| 1961 | while (length != 0) { | ||
| 1962 | if (rev == revision) { | ||
| 1963 | while (length > 1) { | ||
| 1964 | address = *phy_param; | ||
| 1965 | value = *(phy_param + 1); | ||
| 1966 | phy_param += 2; | ||
| 1967 | mdio_write(dev, phy_address, address, value); | ||
| 1968 | length -= 4; | ||
| 1969 | } | ||
| 1970 | break; | ||
| 1971 | } else { | ||
| 1972 | phy_param += length / 2; | ||
| 1973 | length = *phy_param & 0x00FF; | ||
| 1974 | revision = (unsigned char)((*phy_param) >> 8); | ||
| 1975 | phy_param++; | ||
| 1976 | } | ||
| 1977 | } | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | static int read_eeprom(struct net_device *dev, int eep_addr) | ||
| 1981 | { | ||
| 1982 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
| 1983 | unsigned int i; | ||
| 1984 | int ret = 0; | ||
| 1985 | u16 value; | ||
| 1986 | |||
| 1987 | value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); | ||
| 1988 | ipg_w16(value, EEPROM_CTRL); | ||
| 1989 | |||
| 1990 | for (i = 0; i < 1000; i++) { | ||
| 1991 | u16 data; | ||
| 1992 | |||
| 1993 | mdelay(10); | ||
| 1994 | data = ipg_r16(EEPROM_CTRL); | ||
| 1995 | if (!(data & IPG_EC_EEPROM_BUSY)) { | ||
| 1996 | ret = ipg_r16(EEPROM_DATA); | ||
| 1997 | break; | ||
| 1998 | } | ||
| 1999 | } | ||
| 2000 | return ret; | ||
| 2001 | } | ||
| 2002 | |||
| 2003 | static void ipg_init_mii(struct net_device *dev) | ||
| 2004 | { | ||
| 2005 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2006 | struct mii_if_info *mii_if = &sp->mii_if; | ||
| 2007 | int phyaddr; | ||
| 2008 | |||
| 2009 | mii_if->dev = dev; | ||
| 2010 | mii_if->mdio_read = mdio_read; | ||
| 2011 | mii_if->mdio_write = mdio_write; | ||
| 2012 | mii_if->phy_id_mask = 0x1f; | ||
| 2013 | mii_if->reg_num_mask = 0x1f; | ||
| 2014 | |||
| 2015 | mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); | ||
| 2016 | |||
| 2017 | if (phyaddr != 0x1f) { | ||
| 2018 | u16 mii_phyctrl, mii_1000cr; | ||
| 2019 | |||
| 2020 | mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); | ||
| 2021 | mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | | ||
| 2022 | GMII_PHY_1000BASETCONTROL_PreferMaster; | ||
| 2023 | mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); | ||
| 2024 | |||
| 2025 | mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); | ||
| 2026 | |||
| 2027 | /* Set default phyparam */ | ||
| 2028 | ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); | ||
| 2029 | |||
| 2030 | /* Reset PHY */ | ||
| 2031 | mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; | ||
| 2032 | mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); | ||
| 2033 | |||
| 2034 | } | ||
| 2035 | } | ||
| 2036 | |||
| 2037 | static int ipg_hw_init(struct net_device *dev) | ||
| 2038 | { | ||
| 2039 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2040 | void __iomem *ioaddr = sp->ioaddr; | ||
| 2041 | unsigned int i; | ||
| 2042 | int rc; | ||
| 2043 | |||
| 2044 | /* Read/Write and Reset EEPROM Value */ | ||
| 2045 | /* Read LED Mode Configuration from EEPROM */ | ||
| 2046 | sp->led_mode = read_eeprom(dev, 6); | ||
| 2047 | |||
| 2048 | /* Reset all functions within the IPG. Do not assert | ||
| 2049 | * RST_OUT as not compatible with some PHYs. | ||
| 2050 | */ | ||
| 2051 | rc = ipg_reset(dev, IPG_RESET_MASK); | ||
| 2052 | if (rc < 0) | ||
| 2053 | goto out; | ||
| 2054 | |||
| 2055 | ipg_init_mii(dev); | ||
| 2056 | |||
| 2057 | /* Read MAC Address from EEPROM */ | ||
| 2058 | for (i = 0; i < 3; i++) | ||
| 2059 | sp->station_addr[i] = read_eeprom(dev, 16 + i); | ||
| 2060 | |||
| 2061 | for (i = 0; i < 3; i++) | ||
| 2062 | ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); | ||
| 2063 | |||
| 2064 | /* Set station address in ethernet_device structure. */ | ||
| 2065 | dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; | ||
| 2066 | dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; | ||
| 2067 | dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; | ||
| 2068 | dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; | ||
| 2069 | dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; | ||
| 2070 | dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; | ||
| 2071 | out: | ||
| 2072 | return rc; | ||
| 2073 | } | ||
| 2074 | |||
| 2075 | static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
| 2076 | { | ||
| 2077 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2078 | int rc; | ||
| 2079 | |||
| 2080 | mutex_lock(&sp->mii_mutex); | ||
| 2081 | rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); | ||
| 2082 | mutex_unlock(&sp->mii_mutex); | ||
| 2083 | |||
| 2084 | return rc; | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) | ||
| 2088 | { | ||
| 2089 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2090 | int err; | ||
| 2091 | |||
| 2092 | /* Function to accommodate changes to Maximum Transfer Unit | ||
| 2093 | * (or MTU) of IPG NIC. Cannot use default function since | ||
| 2094 | * the default will not allow for MTU > 1500 bytes. | ||
| 2095 | */ | ||
| 2096 | |||
| 2097 | IPG_DEBUG_MSG("_nic_change_mtu\n"); | ||
| 2098 | |||
| 2099 | /* | ||
| 2100 | * Check that the new MTU value is between 68 (14 byte header, 46 byte | ||
| 2101 | * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. | ||
| 2102 | */ | ||
| 2103 | if (new_mtu < 68 || new_mtu > 10240) | ||
| 2104 | return -EINVAL; | ||
| 2105 | |||
| 2106 | err = ipg_nic_stop(dev); | ||
| 2107 | if (err) | ||
| 2108 | return err; | ||
| 2109 | |||
| 2110 | dev->mtu = new_mtu; | ||
| 2111 | |||
| 2112 | sp->max_rxframe_size = new_mtu; | ||
| 2113 | |||
| 2114 | sp->rxfrag_size = new_mtu; | ||
| 2115 | if (sp->rxfrag_size > 4088) | ||
| 2116 | sp->rxfrag_size = 4088; | ||
| 2117 | |||
| 2118 | sp->rxsupport_size = sp->max_rxframe_size; | ||
| 2119 | |||
| 2120 | if (new_mtu > 0x0600) | ||
| 2121 | sp->is_jumbo = true; | ||
| 2122 | else | ||
| 2123 | sp->is_jumbo = false; | ||
| 2124 | |||
| 2125 | return ipg_nic_open(dev); | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 2129 | { | ||
| 2130 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2131 | int rc; | ||
| 2132 | |||
| 2133 | mutex_lock(&sp->mii_mutex); | ||
| 2134 | rc = mii_ethtool_gset(&sp->mii_if, cmd); | ||
| 2135 | mutex_unlock(&sp->mii_mutex); | ||
| 2136 | |||
| 2137 | return rc; | ||
| 2138 | } | ||
| 2139 | |||
| 2140 | static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 2141 | { | ||
| 2142 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2143 | int rc; | ||
| 2144 | |||
| 2145 | mutex_lock(&sp->mii_mutex); | ||
| 2146 | rc = mii_ethtool_sset(&sp->mii_if, cmd); | ||
| 2147 | mutex_unlock(&sp->mii_mutex); | ||
| 2148 | |||
| 2149 | return rc; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | static int ipg_nway_reset(struct net_device *dev) | ||
| 2153 | { | ||
| 2154 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2155 | int rc; | ||
| 2156 | |||
| 2157 | mutex_lock(&sp->mii_mutex); | ||
| 2158 | rc = mii_nway_restart(&sp->mii_if); | ||
| 2159 | mutex_unlock(&sp->mii_mutex); | ||
| 2160 | |||
| 2161 | return rc; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | static const struct ethtool_ops ipg_ethtool_ops = { | ||
| 2165 | .get_settings = ipg_get_settings, | ||
| 2166 | .set_settings = ipg_set_settings, | ||
| 2167 | .nway_reset = ipg_nway_reset, | ||
| 2168 | }; | ||
| 2169 | |||
| 2170 | static void ipg_remove(struct pci_dev *pdev) | ||
| 2171 | { | ||
| 2172 | struct net_device *dev = pci_get_drvdata(pdev); | ||
| 2173 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
| 2174 | |||
| 2175 | IPG_DEBUG_MSG("_remove\n"); | ||
| 2176 | |||
| 2177 | /* Un-register Ethernet device. */ | ||
| 2178 | unregister_netdev(dev); | ||
| 2179 | |||
| 2180 | pci_iounmap(pdev, sp->ioaddr); | ||
| 2181 | |||
| 2182 | pci_release_regions(pdev); | ||
| 2183 | |||
| 2184 | free_netdev(dev); | ||
| 2185 | pci_disable_device(pdev); | ||
| 2186 | } | ||
| 2187 | |||
| 2188 | static const struct net_device_ops ipg_netdev_ops = { | ||
| 2189 | .ndo_open = ipg_nic_open, | ||
| 2190 | .ndo_stop = ipg_nic_stop, | ||
| 2191 | .ndo_start_xmit = ipg_nic_hard_start_xmit, | ||
| 2192 | .ndo_get_stats = ipg_nic_get_stats, | ||
| 2193 | .ndo_set_rx_mode = ipg_nic_set_multicast_list, | ||
| 2194 | .ndo_do_ioctl = ipg_ioctl, | ||
| 2195 | .ndo_tx_timeout = ipg_tx_timeout, | ||
| 2196 | .ndo_change_mtu = ipg_nic_change_mtu, | ||
| 2197 | .ndo_set_mac_address = eth_mac_addr, | ||
| 2198 | .ndo_validate_addr = eth_validate_addr, | ||
| 2199 | }; | ||
| 2200 | |||
| 2201 | static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
| 2202 | { | ||
| 2203 | unsigned int i = id->driver_data; | ||
| 2204 | struct ipg_nic_private *sp; | ||
| 2205 | struct net_device *dev; | ||
| 2206 | void __iomem *ioaddr; | ||
| 2207 | int rc; | ||
| 2208 | |||
| 2209 | rc = pci_enable_device(pdev); | ||
| 2210 | if (rc < 0) | ||
| 2211 | goto out; | ||
| 2212 | |||
| 2213 | pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]); | ||
| 2214 | |||
| 2215 | pci_set_master(pdev); | ||
| 2216 | |||
| 2217 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); | ||
| 2218 | if (rc < 0) { | ||
| 2219 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 2220 | if (rc < 0) { | ||
| 2221 | pr_err("%s: DMA config failed\n", pci_name(pdev)); | ||
| 2222 | goto err_disable_0; | ||
| 2223 | } | ||
| 2224 | } | ||
| 2225 | |||
| 2226 | /* | ||
| 2227 | * Initialize net device. | ||
| 2228 | */ | ||
| 2229 | dev = alloc_etherdev(sizeof(struct ipg_nic_private)); | ||
| 2230 | if (!dev) { | ||
| 2231 | rc = -ENOMEM; | ||
| 2232 | goto err_disable_0; | ||
| 2233 | } | ||
| 2234 | |||
| 2235 | sp = netdev_priv(dev); | ||
| 2236 | spin_lock_init(&sp->lock); | ||
| 2237 | mutex_init(&sp->mii_mutex); | ||
| 2238 | |||
| 2239 | sp->is_jumbo = IPG_IS_JUMBO; | ||
| 2240 | sp->rxfrag_size = IPG_RXFRAG_SIZE; | ||
| 2241 | sp->rxsupport_size = IPG_RXSUPPORT_SIZE; | ||
| 2242 | sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; | ||
| 2243 | |||
| 2244 | /* Declare IPG NIC functions for Ethernet device methods. | ||
| 2245 | */ | ||
| 2246 | dev->netdev_ops = &ipg_netdev_ops; | ||
| 2247 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 2248 | dev->ethtool_ops = &ipg_ethtool_ops; | ||
| 2249 | |||
| 2250 | rc = pci_request_regions(pdev, DRV_NAME); | ||
| 2251 | if (rc) | ||
| 2252 | goto err_free_dev_1; | ||
| 2253 | |||
| 2254 | ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); | ||
| 2255 | if (!ioaddr) { | ||
| 2256 | pr_err("%s: cannot map MMIO\n", pci_name(pdev)); | ||
| 2257 | rc = -EIO; | ||
| 2258 | goto err_release_regions_2; | ||
| 2259 | } | ||
| 2260 | |||
| 2261 | /* Save the pointer to the PCI device information. */ | ||
| 2262 | sp->ioaddr = ioaddr; | ||
| 2263 | sp->pdev = pdev; | ||
| 2264 | sp->dev = dev; | ||
| 2265 | |||
| 2266 | INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); | ||
| 2267 | |||
| 2268 | pci_set_drvdata(pdev, dev); | ||
| 2269 | |||
| 2270 | rc = ipg_hw_init(dev); | ||
| 2271 | if (rc < 0) | ||
| 2272 | goto err_unmap_3; | ||
| 2273 | |||
| 2274 | rc = register_netdev(dev); | ||
| 2275 | if (rc < 0) | ||
| 2276 | goto err_unmap_3; | ||
| 2277 | |||
| 2278 | netdev_info(dev, "Ethernet device registered\n"); | ||
| 2279 | out: | ||
| 2280 | return rc; | ||
| 2281 | |||
| 2282 | err_unmap_3: | ||
| 2283 | pci_iounmap(pdev, ioaddr); | ||
| 2284 | err_release_regions_2: | ||
| 2285 | pci_release_regions(pdev); | ||
| 2286 | err_free_dev_1: | ||
| 2287 | free_netdev(dev); | ||
| 2288 | err_disable_0: | ||
| 2289 | pci_disable_device(pdev); | ||
| 2290 | goto out; | ||
| 2291 | } | ||
| 2292 | |||
| 2293 | static struct pci_driver ipg_pci_driver = { | ||
| 2294 | .name = IPG_DRIVER_NAME, | ||
| 2295 | .id_table = ipg_pci_tbl, | ||
| 2296 | .probe = ipg_probe, | ||
| 2297 | .remove = ipg_remove, | ||
| 2298 | }; | ||
| 2299 | |||
| 2300 | module_pci_driver(ipg_pci_driver); | ||
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h deleted file mode 100644 index de606281f97b..000000000000 --- a/drivers/net/ethernet/icplus/ipg.h +++ /dev/null | |||
| @@ -1,748 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Include file for Gigabit Ethernet device driver for Network | ||
| 3 | * Interface Cards (NICs) utilizing the Tamarack Microelectronics | ||
| 4 | * Inc. IPG Gigabit or Triple Speed Ethernet Media Access | ||
| 5 | * Controller. | ||
| 6 | */ | ||
| 7 | #ifndef __LINUX_IPG_H | ||
| 8 | #define __LINUX_IPG_H | ||
| 9 | |||
| 10 | #include <linux/module.h> | ||
| 11 | |||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/pci.h> | ||
| 14 | #include <linux/ioport.h> | ||
| 15 | #include <linux/errno.h> | ||
| 16 | #include <asm/io.h> | ||
| 17 | #include <linux/delay.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | #include <linux/netdevice.h> | ||
| 20 | #include <linux/etherdevice.h> | ||
| 21 | #include <linux/skbuff.h> | ||
| 22 | #include <asm/bitops.h> | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Constants | ||
| 26 | */ | ||
| 27 | |||
| 28 | /* GMII based PHY IDs */ | ||
| 29 | #define NS 0x2000 | ||
| 30 | #define MARVELL 0x0141 | ||
| 31 | #define ICPLUS_PHY 0x243 | ||
| 32 | |||
| 33 | /* NIC Physical Layer Device MII register fields. */ | ||
| 34 | #define MII_PHY_SELECTOR_IEEE8023 0x0001 | ||
| 35 | #define MII_PHY_TECHABILITYFIELD 0x1FE0 | ||
| 36 | |||
| 37 | /* GMII_PHY_1000 need to set to prefer master */ | ||
| 38 | #define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 | ||
| 39 | |||
| 40 | /* NIC Physical Layer Device GMII constants. */ | ||
| 41 | #define GMII_PREAMBLE 0xFFFFFFFF | ||
| 42 | #define GMII_ST 0x1 | ||
| 43 | #define GMII_READ 0x2 | ||
| 44 | #define GMII_WRITE 0x1 | ||
| 45 | #define GMII_TA_READ_MASK 0x1 | ||
| 46 | #define GMII_TA_WRITE 0x2 | ||
| 47 | |||
| 48 | /* I/O register offsets. */ | ||
| 49 | enum ipg_regs { | ||
| 50 | DMA_CTRL = 0x00, | ||
| 51 | RX_DMA_STATUS = 0x08, /* Unused + reserved */ | ||
| 52 | TFD_LIST_PTR_0 = 0x10, | ||
| 53 | TFD_LIST_PTR_1 = 0x14, | ||
| 54 | TX_DMA_BURST_THRESH = 0x18, | ||
| 55 | TX_DMA_URGENT_THRESH = 0x19, | ||
| 56 | TX_DMA_POLL_PERIOD = 0x1a, | ||
| 57 | RFD_LIST_PTR_0 = 0x1c, | ||
| 58 | RFD_LIST_PTR_1 = 0x20, | ||
| 59 | RX_DMA_BURST_THRESH = 0x24, | ||
| 60 | RX_DMA_URGENT_THRESH = 0x25, | ||
| 61 | RX_DMA_POLL_PERIOD = 0x26, | ||
| 62 | DEBUG_CTRL = 0x2c, | ||
| 63 | ASIC_CTRL = 0x30, | ||
| 64 | FIFO_CTRL = 0x38, /* Unused */ | ||
| 65 | FLOW_OFF_THRESH = 0x3c, | ||
| 66 | FLOW_ON_THRESH = 0x3e, | ||
| 67 | EEPROM_DATA = 0x48, | ||
| 68 | EEPROM_CTRL = 0x4a, | ||
| 69 | EXPROM_ADDR = 0x4c, /* Unused */ | ||
| 70 | EXPROM_DATA = 0x50, /* Unused */ | ||
| 71 | WAKE_EVENT = 0x51, /* Unused */ | ||
| 72 | COUNTDOWN = 0x54, /* Unused */ | ||
| 73 | INT_STATUS_ACK = 0x5a, | ||
| 74 | INT_ENABLE = 0x5c, | ||
| 75 | INT_STATUS = 0x5e, /* Unused */ | ||
| 76 | TX_STATUS = 0x60, | ||
| 77 | MAC_CTRL = 0x6c, | ||
| 78 | VLAN_TAG = 0x70, /* Unused */ | ||
| 79 | PHY_SET = 0x75, | ||
| 80 | PHY_CTRL = 0x76, | ||
| 81 | STATION_ADDRESS_0 = 0x78, | ||
| 82 | STATION_ADDRESS_1 = 0x7a, | ||
| 83 | STATION_ADDRESS_2 = 0x7c, | ||
| 84 | MAX_FRAME_SIZE = 0x86, | ||
| 85 | RECEIVE_MODE = 0x88, | ||
| 86 | HASHTABLE_0 = 0x8c, | ||
| 87 | HASHTABLE_1 = 0x90, | ||
| 88 | RMON_STATISTICS_MASK = 0x98, | ||
| 89 | STATISTICS_MASK = 0x9c, | ||
| 90 | RX_JUMBO_FRAMES = 0xbc, /* Unused */ | ||
| 91 | TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */ | ||
| 92 | IP_CHECKSUM_ERRORS = 0xc2, /* Unused */ | ||
| 93 | UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */ | ||
| 94 | TX_JUMBO_FRAMES = 0xf4 /* Unused */ | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* Ethernet MIB statistic register offsets. */ | ||
| 98 | #define IPG_OCTETRCVOK 0xA8 | ||
| 99 | #define IPG_MCSTOCTETRCVDOK 0xAC | ||
| 100 | #define IPG_BCSTOCTETRCVOK 0xB0 | ||
| 101 | #define IPG_FRAMESRCVDOK 0xB4 | ||
| 102 | #define IPG_MCSTFRAMESRCVDOK 0xB8 | ||
| 103 | #define IPG_BCSTFRAMESRCVDOK 0xBE | ||
| 104 | #define IPG_MACCONTROLFRAMESRCVD 0xC6 | ||
| 105 | #define IPG_FRAMETOOLONGERRORS 0xC8 | ||
| 106 | #define IPG_INRANGELENGTHERRORS 0xCA | ||
| 107 | #define IPG_FRAMECHECKSEQERRORS 0xCC | ||
| 108 | #define IPG_FRAMESLOSTRXERRORS 0xCE | ||
| 109 | #define IPG_OCTETXMTOK 0xD0 | ||
| 110 | #define IPG_MCSTOCTETXMTOK 0xD4 | ||
| 111 | #define IPG_BCSTOCTETXMTOK 0xD8 | ||
| 112 | #define IPG_FRAMESXMTDOK 0xDC | ||
| 113 | #define IPG_MCSTFRAMESXMTDOK 0xE0 | ||
| 114 | #define IPG_FRAMESWDEFERREDXMT 0xE4 | ||
| 115 | #define IPG_LATECOLLISIONS 0xE8 | ||
| 116 | #define IPG_MULTICOLFRAMES 0xEC | ||
| 117 | #define IPG_SINGLECOLFRAMES 0xF0 | ||
| 118 | #define IPG_BCSTFRAMESXMTDOK 0xF6 | ||
| 119 | #define IPG_CARRIERSENSEERRORS 0xF8 | ||
| 120 | #define IPG_MACCONTROLFRAMESXMTDOK 0xFA | ||
| 121 | #define IPG_FRAMESABORTXSCOLLS 0xFC | ||
| 122 | #define IPG_FRAMESWEXDEFERRAL 0xFE | ||
| 123 | |||
| 124 | /* RMON statistic register offsets. */ | ||
| 125 | #define IPG_ETHERSTATSCOLLISIONS 0x100 | ||
| 126 | #define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 | ||
| 127 | #define IPG_ETHERSTATSPKTSTRANSMIT 0x108 | ||
| 128 | #define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C | ||
| 129 | #define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 | ||
| 130 | #define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 | ||
| 131 | #define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 | ||
| 132 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C | ||
| 133 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 | ||
| 134 | #define IPG_ETHERSTATSCRCALIGNERRORS 0x124 | ||
| 135 | #define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 | ||
| 136 | #define IPG_ETHERSTATSFRAGMENTS 0x12C | ||
| 137 | #define IPG_ETHERSTATSJABBERS 0x130 | ||
| 138 | #define IPG_ETHERSTATSOCTETS 0x134 | ||
| 139 | #define IPG_ETHERSTATSPKTS 0x138 | ||
| 140 | #define IPG_ETHERSTATSPKTS64OCTESTS 0x13C | ||
| 141 | #define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 | ||
| 142 | #define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 | ||
| 143 | #define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 | ||
| 144 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C | ||
| 145 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 | ||
| 146 | |||
| 147 | /* RMON statistic register equivalents. */ | ||
| 148 | #define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 | ||
| 149 | #define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 | ||
| 150 | #define IPG_ETHERSTATSMULTICASTPKTS 0xB8 | ||
| 151 | #define IPG_ETHERSTATSBROADCASTPKTS 0xBE | ||
| 152 | #define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 | ||
| 153 | #define IPG_ETHERSTATSDROPEVENTS 0xCE | ||
| 154 | |||
| 155 | /* Serial EEPROM offsets */ | ||
| 156 | #define IPG_EEPROM_CONFIGPARAM 0x00 | ||
| 157 | #define IPG_EEPROM_ASICCTRL 0x01 | ||
| 158 | #define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 | ||
| 159 | #define IPG_EEPROM_SUBSYSTEMID 0x03 | ||
| 160 | #define IPG_EEPROM_STATIONADDRESS0 0x10 | ||
| 161 | #define IPG_EEPROM_STATIONADDRESS1 0x11 | ||
| 162 | #define IPG_EEPROM_STATIONADDRESS2 0x12 | ||
| 163 | |||
| 164 | /* Register & data structure bit masks */ | ||
| 165 | |||
| 166 | /* PCI register masks. */ | ||
| 167 | |||
| 168 | /* IOBaseAddress */ | ||
| 169 | #define IPG_PIB_RSVD_MASK 0xFFFFFE01 | ||
| 170 | #define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 | ||
| 171 | #define IPG_PIB_IOBASEADDRIND 0x00000001 | ||
| 172 | |||
| 173 | /* MemBaseAddress */ | ||
| 174 | #define IPG_PMB_RSVD_MASK 0xFFFFFE07 | ||
| 175 | #define IPG_PMB_MEMBASEADDRIND 0x00000001 | ||
| 176 | #define IPG_PMB_MEMMAPTYPE 0x00000006 | ||
| 177 | #define IPG_PMB_MEMMAPTYPE0 0x00000002 | ||
| 178 | #define IPG_PMB_MEMMAPTYPE1 0x00000004 | ||
| 179 | #define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 | ||
| 180 | |||
| 181 | /* ConfigStatus */ | ||
| 182 | #define IPG_CS_RSVD_MASK 0xFFB0 | ||
| 183 | #define IPG_CS_CAPABILITIES 0x0010 | ||
| 184 | #define IPG_CS_66MHZCAPABLE 0x0020 | ||
| 185 | #define IPG_CS_FASTBACK2BACK 0x0080 | ||
| 186 | #define IPG_CS_DATAPARITYREPORTED 0x0100 | ||
| 187 | #define IPG_CS_DEVSELTIMING 0x0600 | ||
| 188 | #define IPG_CS_SIGNALEDTARGETABORT 0x0800 | ||
| 189 | #define IPG_CS_RECEIVEDTARGETABORT 0x1000 | ||
| 190 | #define IPG_CS_RECEIVEDMASTERABORT 0x2000 | ||
| 191 | #define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 | ||
| 192 | #define IPG_CS_DETECTEDPARITYERROR 0x8000 | ||
| 193 | |||
| 194 | /* TFD data structure masks. */ | ||
| 195 | |||
| 196 | /* TFDList, TFC */ | ||
| 197 | #define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL | ||
| 198 | #define IPG_TFC_FRAMEID 0x000000000000FFFFULL | ||
| 199 | #define IPG_TFC_WORDALIGN 0x0000000000030000ULL | ||
| 200 | #define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL | ||
| 201 | #define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL | ||
| 202 | #define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL | ||
| 203 | #define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL | ||
| 204 | #define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL | ||
| 205 | #define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL | ||
| 206 | #define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL | ||
| 207 | #define IPG_TFC_TXINDICATE 0x0000000000400000ULL | ||
| 208 | #define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL | ||
| 209 | #define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL | ||
| 210 | #define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL | ||
| 211 | #define IPG_TFC_TFDDONE 0x0000000080000000ULL | ||
| 212 | #define IPG_TFC_VID 0x00000FFF00000000ULL | ||
| 213 | #define IPG_TFC_CFI 0x0000100000000000ULL | ||
| 214 | #define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL | ||
| 215 | |||
| 216 | /* TFDList, FragInfo */ | ||
| 217 | #define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL | ||
| 218 | #define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL | ||
| 219 | #define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL | ||
| 220 | |||
| 221 | /* RFD data structure masks. */ | ||
| 222 | |||
| 223 | /* RFDList, RFS */ | ||
| 224 | #define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL | ||
| 225 | #define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL | ||
| 226 | #define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL | ||
| 227 | #define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL | ||
| 228 | #define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL | ||
| 229 | #define IPG_RFS_RXFCSERROR 0x0000000000080000ULL | ||
| 230 | #define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL | ||
| 231 | #define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL | ||
| 232 | #define IPG_RFS_VLANDETECTED 0x0000000000400000ULL | ||
| 233 | #define IPG_RFS_TCPDETECTED 0x0000000000800000ULL | ||
| 234 | #define IPG_RFS_TCPERROR 0x0000000001000000ULL | ||
| 235 | #define IPG_RFS_UDPDETECTED 0x0000000002000000ULL | ||
| 236 | #define IPG_RFS_UDPERROR 0x0000000004000000ULL | ||
| 237 | #define IPG_RFS_IPDETECTED 0x0000000008000000ULL | ||
| 238 | #define IPG_RFS_IPERROR 0x0000000010000000ULL | ||
| 239 | #define IPG_RFS_FRAMESTART 0x0000000020000000ULL | ||
| 240 | #define IPG_RFS_FRAMEEND 0x0000000040000000ULL | ||
| 241 | #define IPG_RFS_RFDDONE 0x0000000080000000ULL | ||
| 242 | #define IPG_RFS_TCI 0x0000FFFF00000000ULL | ||
| 243 | |||
| 244 | /* RFDList, FragInfo */ | ||
| 245 | #define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL | ||
| 246 | #define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL | ||
| 247 | #define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL | ||
| 248 | |||
| 249 | /* I/O Register masks. */ | ||
| 250 | |||
| 251 | /* RMON Statistics Mask */ | ||
| 252 | #define IPG_RZ_ALL 0x0FFFFFFF | ||
| 253 | |||
| 254 | /* Statistics Mask */ | ||
| 255 | #define IPG_SM_ALL 0x0FFFFFFF | ||
| 256 | #define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 | ||
| 257 | #define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 | ||
| 258 | #define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 | ||
| 259 | #define IPG_SM_RXJUMBOFRAMES 0x00000008 | ||
| 260 | #define IPG_SM_TCPCHECKSUMERRORS 0x00000010 | ||
| 261 | #define IPG_SM_IPCHECKSUMERRORS 0x00000020 | ||
| 262 | #define IPG_SM_UDPCHECKSUMERRORS 0x00000040 | ||
| 263 | #define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 | ||
| 264 | #define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 | ||
| 265 | #define IPG_SM_INRANGELENGTHERRORS 0x00000200 | ||
| 266 | #define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 | ||
| 267 | #define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 | ||
| 268 | #define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 | ||
| 269 | #define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 | ||
| 270 | #define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 | ||
| 271 | #define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 | ||
| 272 | #define IPG_SM_LATECOLLISIONS 0x00010000 | ||
| 273 | #define IPG_SM_MULTICOLFRAMES 0x00020000 | ||
| 274 | #define IPG_SM_SINGLECOLFRAMES 0x00040000 | ||
| 275 | #define IPG_SM_TXJUMBOFRAMES 0x00080000 | ||
| 276 | #define IPG_SM_CARRIERSENSEERRORS 0x00100000 | ||
| 277 | #define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 | ||
| 278 | #define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 | ||
| 279 | #define IPG_SM_FRAMESWEXDEFERAL 0x00800000 | ||
| 280 | |||
| 281 | /* Countdown */ | ||
| 282 | #define IPG_CD_RSVD_MASK 0x0700FFFF | ||
| 283 | #define IPG_CD_COUNT 0x0000FFFF | ||
| 284 | #define IPG_CD_COUNTDOWNSPEED 0x01000000 | ||
| 285 | #define IPG_CD_COUNTDOWNMODE 0x02000000 | ||
| 286 | #define IPG_CD_COUNTINTENABLED 0x04000000 | ||
| 287 | |||
| 288 | /* TxDMABurstThresh */ | ||
| 289 | #define IPG_TB_RSVD_MASK 0xFF | ||
| 290 | |||
| 291 | /* TxDMAUrgentThresh */ | ||
| 292 | #define IPG_TU_RSVD_MASK 0xFF | ||
| 293 | |||
| 294 | /* TxDMAPollPeriod */ | ||
| 295 | #define IPG_TP_RSVD_MASK 0xFF | ||
| 296 | |||
| 297 | /* RxDMAUrgentThresh */ | ||
| 298 | #define IPG_RU_RSVD_MASK 0xFF | ||
| 299 | |||
| 300 | /* RxDMAPollPeriod */ | ||
| 301 | #define IPG_RP_RSVD_MASK 0xFF | ||
| 302 | |||
| 303 | /* ReceiveMode */ | ||
| 304 | #define IPG_RM_RSVD_MASK 0x3F | ||
| 305 | #define IPG_RM_RECEIVEUNICAST 0x01 | ||
| 306 | #define IPG_RM_RECEIVEMULTICAST 0x02 | ||
| 307 | #define IPG_RM_RECEIVEBROADCAST 0x04 | ||
| 308 | #define IPG_RM_RECEIVEALLFRAMES 0x08 | ||
| 309 | #define IPG_RM_RECEIVEMULTICASTHASH 0x10 | ||
| 310 | #define IPG_RM_RECEIVEIPMULTICAST 0x20 | ||
| 311 | |||
| 312 | /* PhySet */ | ||
| 313 | #define IPG_PS_MEM_LENB9B 0x01 | ||
| 314 | #define IPG_PS_MEM_LEN9 0x02 | ||
| 315 | #define IPG_PS_NON_COMPDET 0x04 | ||
| 316 | |||
| 317 | /* PhyCtrl */ | ||
| 318 | #define IPG_PC_RSVD_MASK 0xFF | ||
| 319 | #define IPG_PC_MGMTCLK_LO 0x00 | ||
| 320 | #define IPG_PC_MGMTCLK_HI 0x01 | ||
| 321 | #define IPG_PC_MGMTCLK 0x01 | ||
| 322 | #define IPG_PC_MGMTDATA 0x02 | ||
| 323 | #define IPG_PC_MGMTDIR 0x04 | ||
| 324 | #define IPG_PC_DUPLEX_POLARITY 0x08 | ||
| 325 | #define IPG_PC_DUPLEX_STATUS 0x10 | ||
| 326 | #define IPG_PC_LINK_POLARITY 0x20 | ||
| 327 | #define IPG_PC_LINK_SPEED 0xC0 | ||
| 328 | #define IPG_PC_LINK_SPEED_10MBPS 0x40 | ||
| 329 | #define IPG_PC_LINK_SPEED_100MBPS 0x80 | ||
| 330 | #define IPG_PC_LINK_SPEED_1000MBPS 0xC0 | ||
| 331 | |||
| 332 | /* DMACtrl */ | ||
| 333 | #define IPG_DC_RSVD_MASK 0xC07D9818 | ||
| 334 | #define IPG_DC_RX_DMA_COMPLETE 0x00000008 | ||
| 335 | #define IPG_DC_RX_DMA_POLL_NOW 0x00000010 | ||
| 336 | #define IPG_DC_TX_DMA_COMPLETE 0x00000800 | ||
| 337 | #define IPG_DC_TX_DMA_POLL_NOW 0x00001000 | ||
| 338 | #define IPG_DC_TX_DMA_IN_PROG 0x00008000 | ||
| 339 | #define IPG_DC_RX_EARLY_DISABLE 0x00010000 | ||
| 340 | #define IPG_DC_MWI_DISABLE 0x00040000 | ||
| 341 | #define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 | ||
| 342 | #define IPG_DC_TX_BURST_LIMIT 0x00700000 | ||
| 343 | #define IPG_DC_TARGET_ABORT 0x40000000 | ||
| 344 | #define IPG_DC_MASTER_ABORT 0x80000000 | ||
| 345 | |||
| 346 | /* ASICCtrl */ | ||
| 347 | #define IPG_AC_RSVD_MASK 0x07FFEFF2 | ||
| 348 | #define IPG_AC_EXP_ROM_SIZE 0x00000002 | ||
| 349 | #define IPG_AC_PHY_SPEED10 0x00000010 | ||
| 350 | #define IPG_AC_PHY_SPEED100 0x00000020 | ||
| 351 | #define IPG_AC_PHY_SPEED1000 0x00000040 | ||
| 352 | #define IPG_AC_PHY_MEDIA 0x00000080 | ||
| 353 | #define IPG_AC_FORCED_CFG 0x00000700 | ||
| 354 | #define IPG_AC_D3RESETDISABLE 0x00000800 | ||
| 355 | #define IPG_AC_SPEED_UP_MODE 0x00002000 | ||
| 356 | #define IPG_AC_LED_MODE 0x00004000 | ||
| 357 | #define IPG_AC_RST_OUT_POLARITY 0x00008000 | ||
| 358 | #define IPG_AC_GLOBAL_RESET 0x00010000 | ||
| 359 | #define IPG_AC_RX_RESET 0x00020000 | ||
| 360 | #define IPG_AC_TX_RESET 0x00040000 | ||
| 361 | #define IPG_AC_DMA 0x00080000 | ||
| 362 | #define IPG_AC_FIFO 0x00100000 | ||
| 363 | #define IPG_AC_NETWORK 0x00200000 | ||
| 364 | #define IPG_AC_HOST 0x00400000 | ||
| 365 | #define IPG_AC_AUTO_INIT 0x00800000 | ||
| 366 | #define IPG_AC_RST_OUT 0x01000000 | ||
| 367 | #define IPG_AC_INT_REQUEST 0x02000000 | ||
| 368 | #define IPG_AC_RESET_BUSY 0x04000000 | ||
| 369 | #define IPG_AC_LED_SPEED 0x08000000 | ||
| 370 | #define IPG_AC_LED_MODE_BIT_1 0x20000000 | ||
| 371 | |||
| 372 | /* EepromCtrl */ | ||
| 373 | #define IPG_EC_RSVD_MASK 0x83FF | ||
| 374 | #define IPG_EC_EEPROM_ADDR 0x00FF | ||
| 375 | #define IPG_EC_EEPROM_OPCODE 0x0300 | ||
| 376 | #define IPG_EC_EEPROM_SUBCOMMAD 0x0000 | ||
| 377 | #define IPG_EC_EEPROM_WRITEOPCODE 0x0100 | ||
| 378 | #define IPG_EC_EEPROM_READOPCODE 0x0200 | ||
| 379 | #define IPG_EC_EEPROM_ERASEOPCODE 0x0300 | ||
| 380 | #define IPG_EC_EEPROM_BUSY 0x8000 | ||
| 381 | |||
| 382 | /* FIFOCtrl */ | ||
| 383 | #define IPG_FC_RSVD_MASK 0xC001 | ||
| 384 | #define IPG_FC_RAM_TEST_MODE 0x0001 | ||
| 385 | #define IPG_FC_TRANSMITTING 0x4000 | ||
| 386 | #define IPG_FC_RECEIVING 0x8000 | ||
| 387 | |||
| 388 | /* TxStatus */ | ||
| 389 | #define IPG_TS_RSVD_MASK 0xFFFF00DD | ||
| 390 | #define IPG_TS_TX_ERROR 0x00000001 | ||
| 391 | #define IPG_TS_LATE_COLLISION 0x00000004 | ||
| 392 | #define IPG_TS_TX_MAX_COLL 0x00000008 | ||
| 393 | #define IPG_TS_TX_UNDERRUN 0x00000010 | ||
| 394 | #define IPG_TS_TX_IND_REQD 0x00000040 | ||
| 395 | #define IPG_TS_TX_COMPLETE 0x00000080 | ||
| 396 | #define IPG_TS_TX_FRAMEID 0xFFFF0000 | ||
| 397 | |||
| 398 | /* WakeEvent */ | ||
| 399 | #define IPG_WE_WAKE_PKT_ENABLE 0x01 | ||
| 400 | #define IPG_WE_MAGIC_PKT_ENABLE 0x02 | ||
| 401 | #define IPG_WE_LINK_EVT_ENABLE 0x04 | ||
| 402 | #define IPG_WE_WAKE_POLARITY 0x08 | ||
| 403 | #define IPG_WE_WAKE_PKT_EVT 0x10 | ||
| 404 | #define IPG_WE_MAGIC_PKT_EVT 0x20 | ||
| 405 | #define IPG_WE_LINK_EVT 0x40 | ||
| 406 | #define IPG_WE_WOL_ENABLE 0x80 | ||
| 407 | |||
| 408 | /* IntEnable */ | ||
| 409 | #define IPG_IE_RSVD_MASK 0x1FFE | ||
| 410 | #define IPG_IE_HOST_ERROR 0x0002 | ||
| 411 | #define IPG_IE_TX_COMPLETE 0x0004 | ||
| 412 | #define IPG_IE_MAC_CTRL_FRAME 0x0008 | ||
| 413 | #define IPG_IE_RX_COMPLETE 0x0010 | ||
| 414 | #define IPG_IE_RX_EARLY 0x0020 | ||
| 415 | #define IPG_IE_INT_REQUESTED 0x0040 | ||
| 416 | #define IPG_IE_UPDATE_STATS 0x0080 | ||
| 417 | #define IPG_IE_LINK_EVENT 0x0100 | ||
| 418 | #define IPG_IE_TX_DMA_COMPLETE 0x0200 | ||
| 419 | #define IPG_IE_RX_DMA_COMPLETE 0x0400 | ||
| 420 | #define IPG_IE_RFD_LIST_END 0x0800 | ||
| 421 | #define IPG_IE_RX_DMA_PRIORITY 0x1000 | ||
| 422 | |||
| 423 | /* IntStatus */ | ||
| 424 | #define IPG_IS_RSVD_MASK 0x1FFF | ||
| 425 | #define IPG_IS_INTERRUPT_STATUS 0x0001 | ||
| 426 | #define IPG_IS_HOST_ERROR 0x0002 | ||
| 427 | #define IPG_IS_TX_COMPLETE 0x0004 | ||
| 428 | #define IPG_IS_MAC_CTRL_FRAME 0x0008 | ||
| 429 | #define IPG_IS_RX_COMPLETE 0x0010 | ||
| 430 | #define IPG_IS_RX_EARLY 0x0020 | ||
| 431 | #define IPG_IS_INT_REQUESTED 0x0040 | ||
| 432 | #define IPG_IS_UPDATE_STATS 0x0080 | ||
| 433 | #define IPG_IS_LINK_EVENT 0x0100 | ||
| 434 | #define IPG_IS_TX_DMA_COMPLETE 0x0200 | ||
| 435 | #define IPG_IS_RX_DMA_COMPLETE 0x0400 | ||
| 436 | #define IPG_IS_RFD_LIST_END 0x0800 | ||
| 437 | #define IPG_IS_RX_DMA_PRIORITY 0x1000 | ||
| 438 | |||
| 439 | /* MACCtrl */ | ||
| 440 | #define IPG_MC_RSVD_MASK 0x7FE33FA3 | ||
| 441 | #define IPG_MC_IFS_SELECT 0x00000003 | ||
| 442 | #define IPG_MC_IFS_4352BIT 0x00000003 | ||
| 443 | #define IPG_MC_IFS_1792BIT 0x00000002 | ||
| 444 | #define IPG_MC_IFS_1024BIT 0x00000001 | ||
| 445 | #define IPG_MC_IFS_96BIT 0x00000000 | ||
| 446 | #define IPG_MC_DUPLEX_SELECT 0x00000020 | ||
| 447 | #define IPG_MC_DUPLEX_SELECT_FD 0x00000020 | ||
| 448 | #define IPG_MC_DUPLEX_SELECT_HD 0x00000000 | ||
| 449 | #define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 | ||
| 450 | #define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 | ||
| 451 | #define IPG_MC_RCV_FCS 0x00000200 | ||
| 452 | #define IPG_MC_FIFO_LOOPBACK 0x00000400 | ||
| 453 | #define IPG_MC_MAC_LOOPBACK 0x00000800 | ||
| 454 | #define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 | ||
| 455 | #define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 | ||
| 456 | #define IPG_MC_COLLISION_DETECT 0x00010000 | ||
| 457 | #define IPG_MC_CARRIER_SENSE 0x00020000 | ||
| 458 | #define IPG_MC_STATISTICS_ENABLE 0x00200000 | ||
| 459 | #define IPG_MC_STATISTICS_DISABLE 0x00400000 | ||
| 460 | #define IPG_MC_STATISTICS_ENABLED 0x00800000 | ||
| 461 | #define IPG_MC_TX_ENABLE 0x01000000 | ||
| 462 | #define IPG_MC_TX_DISABLE 0x02000000 | ||
| 463 | #define IPG_MC_TX_ENABLED 0x04000000 | ||
| 464 | #define IPG_MC_RX_ENABLE 0x08000000 | ||
| 465 | #define IPG_MC_RX_DISABLE 0x10000000 | ||
| 466 | #define IPG_MC_RX_ENABLED 0x20000000 | ||
| 467 | #define IPG_MC_PAUSED 0x40000000 | ||
| 468 | |||
| 469 | /* | ||
| 470 | * Tune | ||
| 471 | */ | ||
| 472 | |||
| 473 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ | ||
| 474 | #define IPG_APPEND_FCS_ON_TX 1 | ||
| 475 | |||
| 476 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ | ||
| 477 | #define IPG_STRIP_FCS_ON_RX 1 | ||
| 478 | |||
| 479 | /* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with | ||
| 480 | * Ethernet errors. | ||
| 481 | */ | ||
| 482 | #define IPG_DROP_ON_RX_ETH_ERRORS 1 | ||
| 483 | |||
| 484 | /* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually | ||
| 485 | * (via TFC). | ||
| 486 | */ | ||
| 487 | #define IPG_INSERT_MANUAL_VLAN_TAG 0 | ||
| 488 | |||
| 489 | /* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ | ||
| 490 | #define IPG_ADD_IPCHECKSUM_ON_TX 0 | ||
| 491 | |||
| 492 | /* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. | ||
| 493 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
| 494 | */ | ||
| 495 | #define IPG_ADD_TCPCHECKSUM_ON_TX 0 | ||
| 496 | |||
| 497 | /* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. | ||
| 498 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
| 499 | */ | ||
| 500 | #define IPG_ADD_UDPCHECKSUM_ON_TX 0 | ||
| 501 | |||
| 502 | /* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx | ||
| 503 | * constants as desired. | ||
| 504 | */ | ||
| 505 | #define IPG_MANUAL_VLAN_VID 0xABC | ||
| 506 | #define IPG_MANUAL_VLAN_CFI 0x1 | ||
| 507 | #define IPG_MANUAL_VLAN_USERPRIORITY 0x5 | ||
| 508 | |||
| 509 | #define IPG_IO_REG_RANGE 0xFF | ||
| 510 | #define IPG_MEM_REG_RANGE 0x154 | ||
| 511 | #define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" | ||
| 512 | #define IPG_NIC_PHY_ADDRESS 0x01 | ||
| 513 | #define IPG_DMALIST_ALIGN_PAD 0x07 | ||
| 514 | #define IPG_MULTICAST_HASHTABLE_SIZE 0x40 | ||
| 515 | |||
| 516 | /* Number of milliseconds to wait after issuing a software reset. | ||
| 517 | * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. | ||
| 518 | */ | ||
| 519 | #define IPG_AC_RESETWAIT 0x05 | ||
| 520 | |||
| 521 | /* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ | ||
| 522 | #define IPG_AC_RESET_TIMEOUT 0x0A | ||
| 523 | |||
| 524 | /* Minimum number of nanoseconds used to toggle MDC clock during | ||
| 525 | * MII/GMII register access. | ||
| 526 | */ | ||
| 527 | #define IPG_PC_PHYCTRLWAIT_NS 200 | ||
| 528 | |||
| 529 | #define IPG_TFDLIST_LENGTH 0x100 | ||
| 530 | |||
| 531 | /* Number of frames between TxDMAComplete interrupt. | ||
| 532 | * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH | ||
| 533 | */ | ||
| 534 | #define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 | ||
| 535 | |||
| 536 | #define IPG_RFDLIST_LENGTH 0x100 | ||
| 537 | |||
| 538 | /* Maximum number of RFDs to process per interrupt. | ||
| 539 | * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH | ||
| 540 | */ | ||
| 541 | #define IPG_MAXRFDPROCESS_COUNT 0x80 | ||
| 542 | |||
| 543 | /* Minimum margin between last freed RFD, and current RFD. | ||
| 544 | * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH | ||
| 545 | */ | ||
| 546 | #define IPG_MINUSEDRFDSTOFREE 0x80 | ||
| 547 | |||
| 548 | /* specify the jumbo frame maximum size | ||
| 549 | * per unit is 0x600 (the rx_buffer size that one RFD can carry) | ||
| 550 | */ | ||
| 551 | #define MAX_JUMBOSIZE 0x8 /* max is 12K */ | ||
| 552 | |||
| 553 | /* Key register values loaded at driver start up. */ | ||
| 554 | |||
| 555 | /* TXDMAPollPeriod is specified in 320ns increments. | ||
| 556 | * | ||
| 557 | * Value Time | ||
| 558 | * --------------------- | ||
| 559 | * 0x00-0x01 320ns | ||
| 560 | * 0x03 ~1us | ||
| 561 | * 0x1F ~10us | ||
| 562 | * 0xFF ~82us | ||
| 563 | */ | ||
| 564 | #define IPG_TXDMAPOLLPERIOD_VALUE 0x26 | ||
| 565 | |||
| 566 | /* TxDMAUrgentThresh specifies the minimum amount of | ||
| 567 | * data in the transmit FIFO before asserting an | ||
| 568 | * urgent transmit DMA request. | ||
| 569 | * | ||
| 570 | * Value Min TxFIFO occupied space before urgent TX request | ||
| 571 | * --------------------------------------------------------------- | ||
| 572 | * 0x00-0x04 128 bytes (1024 bits) | ||
| 573 | * 0x27 1248 bytes (~10000 bits) | ||
| 574 | * 0x30 1536 bytes (12288 bits) | ||
| 575 | * 0xFF 8192 bytes (65535 bits) | ||
| 576 | */ | ||
| 577 | #define IPG_TXDMAURGENTTHRESH_VALUE 0x04 | ||
| 578 | |||
| 579 | /* TxDMABurstThresh specifies the minimum amount of | ||
| 580 | * free space in the transmit FIFO before asserting an | ||
| 581 | * transmit DMA request. | ||
| 582 | * | ||
| 583 | * Value Min TxFIFO free space before TX request | ||
| 584 | * ---------------------------------------------------- | ||
| 585 | * 0x00-0x08 256 bytes | ||
| 586 | * 0x30 1536 bytes | ||
| 587 | * 0xFF 8192 bytes | ||
| 588 | */ | ||
| 589 | #define IPG_TXDMABURSTTHRESH_VALUE 0x30 | ||
| 590 | |||
| 591 | /* RXDMAPollPeriod is specified in 320ns increments. | ||
| 592 | * | ||
| 593 | * Value Time | ||
| 594 | * --------------------- | ||
| 595 | * 0x00-0x01 320ns | ||
| 596 | * 0x03 ~1us | ||
| 597 | * 0x1F ~10us | ||
| 598 | * 0xFF ~82us | ||
| 599 | */ | ||
| 600 | #define IPG_RXDMAPOLLPERIOD_VALUE 0x01 | ||
| 601 | |||
| 602 | /* RxDMAUrgentThresh specifies the minimum amount of | ||
| 603 | * free space within the receive FIFO before asserting | ||
| 604 | * a urgent receive DMA request. | ||
| 605 | * | ||
| 606 | * Value Min RxFIFO free space before urgent RX request | ||
| 607 | * --------------------------------------------------------------- | ||
| 608 | * 0x00-0x04 128 bytes (1024 bits) | ||
| 609 | * 0x27 1248 bytes (~10000 bits) | ||
| 610 | * 0x30 1536 bytes (12288 bits) | ||
| 611 | * 0xFF 8192 bytes (65535 bits) | ||
| 612 | */ | ||
| 613 | #define IPG_RXDMAURGENTTHRESH_VALUE 0x30 | ||
| 614 | |||
| 615 | /* RxDMABurstThresh specifies the minimum amount of | ||
| 616 | * occupied space within the receive FIFO before asserting | ||
| 617 | * a receive DMA request. | ||
| 618 | * | ||
| 619 | * Value Min TxFIFO free space before TX request | ||
| 620 | * ---------------------------------------------------- | ||
| 621 | * 0x00-0x08 256 bytes | ||
| 622 | * 0x30 1536 bytes | ||
| 623 | * 0xFF 8192 bytes | ||
| 624 | */ | ||
| 625 | #define IPG_RXDMABURSTTHRESH_VALUE 0x30 | ||
| 626 | |||
| 627 | /* FlowOnThresh specifies the maximum amount of occupied | ||
| 628 | * space in the receive FIFO before a PAUSE frame with | ||
| 629 | * maximum pause time transmitted. | ||
| 630 | * | ||
| 631 | * Value Max RxFIFO occupied space before PAUSE | ||
| 632 | * --------------------------------------------------- | ||
| 633 | * 0x0000 0 bytes | ||
| 634 | * 0x0740 29,696 bytes | ||
| 635 | * 0x07FF 32,752 bytes | ||
| 636 | */ | ||
| 637 | #define IPG_FLOWONTHRESH_VALUE 0x0740 | ||
| 638 | |||
| 639 | /* FlowOffThresh specifies the minimum amount of occupied | ||
| 640 | * space in the receive FIFO before a PAUSE frame with | ||
| 641 | * zero pause time is transmitted. | ||
| 642 | * | ||
| 643 | * Value Max RxFIFO occupied space before PAUSE | ||
| 644 | * --------------------------------------------------- | ||
| 645 | * 0x0000 0 bytes | ||
| 646 | * 0x00BF 3056 bytes | ||
| 647 | * 0x07FF 32,752 bytes | ||
| 648 | */ | ||
| 649 | #define IPG_FLOWOFFTHRESH_VALUE 0x00BF | ||
| 650 | |||
| 651 | /* | ||
| 652 | * Miscellaneous macros. | ||
| 653 | */ | ||
| 654 | |||
| 655 | /* Macros for printing debug statements. */ | ||
| 656 | #ifdef IPG_DEBUG | ||
| 657 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
| 658 | do { \ | ||
| 659 | if (0) \ | ||
| 660 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
| 661 | } while (0) | ||
| 662 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
| 663 | printk(KERN_DEBUG "IPG: " fmt, ##args) | ||
| 664 | # define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) | ||
| 665 | # define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) | ||
| 666 | #else | ||
| 667 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
| 668 | do { \ | ||
| 669 | if (0) \ | ||
| 670 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
| 671 | } while (0) | ||
| 672 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
| 673 | do { \ | ||
| 674 | if (0) \ | ||
| 675 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
| 676 | } while (0) | ||
| 677 | # define IPG_DUMPRFDLIST(args) | ||
| 678 | # define IPG_DUMPTFDLIST(args) | ||
| 679 | #endif | ||
| 680 | |||
| 681 | /* | ||
| 682 | * End miscellaneous macros. | ||
| 683 | */ | ||
| 684 | |||
| 685 | /* Transmit Frame Descriptor. The IPG supports 15 fragments, | ||
| 686 | * however Linux requires only a single fragment. Note, each | ||
| 687 | * TFD field is 64 bits wide. | ||
| 688 | */ | ||
| 689 | struct ipg_tx { | ||
| 690 | __le64 next_desc; | ||
| 691 | __le64 tfc; | ||
| 692 | __le64 frag_info; | ||
| 693 | }; | ||
| 694 | |||
| 695 | /* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. | ||
| 696 | */ | ||
| 697 | struct ipg_rx { | ||
| 698 | __le64 next_desc; | ||
| 699 | __le64 rfs; | ||
| 700 | __le64 frag_info; | ||
| 701 | }; | ||
| 702 | |||
| 703 | struct ipg_jumbo { | ||
| 704 | int found_start; | ||
| 705 | int current_size; | ||
| 706 | struct sk_buff *skb; | ||
| 707 | }; | ||
| 708 | |||
| 709 | /* Structure of IPG NIC specific data. */ | ||
| 710 | struct ipg_nic_private { | ||
| 711 | void __iomem *ioaddr; | ||
| 712 | struct ipg_tx *txd; | ||
| 713 | struct ipg_rx *rxd; | ||
| 714 | dma_addr_t txd_map; | ||
| 715 | dma_addr_t rxd_map; | ||
| 716 | struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH]; | ||
| 717 | struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH]; | ||
| 718 | unsigned int tx_current; | ||
| 719 | unsigned int tx_dirty; | ||
| 720 | unsigned int rx_current; | ||
| 721 | unsigned int rx_dirty; | ||
| 722 | bool is_jumbo; | ||
| 723 | struct ipg_jumbo jumbo; | ||
| 724 | unsigned long rxfrag_size; | ||
| 725 | unsigned long rxsupport_size; | ||
| 726 | unsigned long max_rxframe_size; | ||
| 727 | unsigned int rx_buf_sz; | ||
| 728 | struct pci_dev *pdev; | ||
| 729 | struct net_device *dev; | ||
| 730 | struct net_device_stats stats; | ||
| 731 | spinlock_t lock; | ||
| 732 | int tenmbpsmode; | ||
| 733 | |||
| 734 | u16 led_mode; | ||
| 735 | u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ | ||
| 736 | |||
| 737 | struct mutex mii_mutex; | ||
| 738 | struct mii_if_info mii_if; | ||
| 739 | int reset_current_tfd; | ||
| 740 | #ifdef IPG_DEBUG | ||
| 741 | int RFDlistendCount; | ||
| 742 | int RFDListCheckedCount; | ||
| 743 | int EmptyRFDListCount; | ||
| 744 | #endif | ||
| 745 | struct delayed_work task; | ||
| 746 | }; | ||
| 747 | |||
| 748 | #endif /* __LINUX_IPG_H */ | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 85f1b1e7e505..31c491e02e69 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
| 892 | dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; | 892 | dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; |
| 893 | dev->caps.port_mask[i] = dev->caps.port_type[i]; | 893 | dev->caps.port_mask[i] = dev->caps.port_type[i]; |
| 894 | dev->caps.phys_port_id[i] = func_cap.phys_port_id; | 894 | dev->caps.phys_port_id[i] = func_cap.phys_port_id; |
| 895 | if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, | 895 | err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, |
| 896 | &dev->caps.gid_table_len[i], | 896 | &dev->caps.gid_table_len[i], |
| 897 | &dev->caps.pkey_table_len[i])) | 897 | &dev->caps.pkey_table_len[i]); |
| 898 | if (err) | ||
| 898 | goto err_mem; | 899 | goto err_mem; |
| 899 | } | 900 | } |
| 900 | 901 | ||
| @@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
| 906 | dev->caps.uar_page_size * dev->caps.num_uars, | 907 | dev->caps.uar_page_size * dev->caps.num_uars, |
| 907 | (unsigned long long) | 908 | (unsigned long long) |
| 908 | pci_resource_len(dev->persist->pdev, 2)); | 909 | pci_resource_len(dev->persist->pdev, 2)); |
| 910 | err = -ENOMEM; | ||
| 909 | goto err_mem; | 911 | goto err_mem; |
| 910 | } | 912 | } |
| 911 | 913 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 9813d34f3e5b..6fec3e993d02 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) | |||
| 4952 | struct res_counter *counter; | 4952 | struct res_counter *counter; |
| 4953 | struct res_counter *tmp; | 4953 | struct res_counter *tmp; |
| 4954 | int err; | 4954 | int err; |
| 4955 | int index; | 4955 | int *counters_arr = NULL; |
| 4956 | int i, j; | ||
| 4956 | 4957 | ||
| 4957 | err = move_all_busy(dev, slave, RES_COUNTER); | 4958 | err = move_all_busy(dev, slave, RES_COUNTER); |
| 4958 | if (err) | 4959 | if (err) |
| 4959 | mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", | 4960 | mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", |
| 4960 | slave); | 4961 | slave); |
| 4961 | 4962 | ||
| 4962 | spin_lock_irq(mlx4_tlock(dev)); | 4963 | counters_arr = kmalloc_array(dev->caps.max_counters, |
| 4963 | list_for_each_entry_safe(counter, tmp, counter_list, com.list) { | 4964 | sizeof(*counters_arr), GFP_KERNEL); |
| 4964 | if (counter->com.owner == slave) { | 4965 | if (!counters_arr) |
| 4965 | index = counter->com.res_id; | 4966 | return; |
| 4966 | rb_erase(&counter->com.node, | 4967 | |
| 4967 | &tracker->res_tree[RES_COUNTER]); | 4968 | do { |
| 4968 | list_del(&counter->com.list); | 4969 | i = 0; |
| 4969 | kfree(counter); | 4970 | j = 0; |
| 4970 | __mlx4_counter_free(dev, index); | 4971 | spin_lock_irq(mlx4_tlock(dev)); |
| 4972 | list_for_each_entry_safe(counter, tmp, counter_list, com.list) { | ||
| 4973 | if (counter->com.owner == slave) { | ||
| 4974 | counters_arr[i++] = counter->com.res_id; | ||
| 4975 | rb_erase(&counter->com.node, | ||
| 4976 | &tracker->res_tree[RES_COUNTER]); | ||
| 4977 | list_del(&counter->com.list); | ||
| 4978 | kfree(counter); | ||
| 4979 | } | ||
| 4980 | } | ||
| 4981 | spin_unlock_irq(mlx4_tlock(dev)); | ||
| 4982 | |||
| 4983 | while (j < i) { | ||
| 4984 | __mlx4_counter_free(dev, counters_arr[j++]); | ||
| 4971 | mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); | 4985 | mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); |
| 4972 | } | 4986 | } |
| 4973 | } | 4987 | } while (i); |
| 4974 | spin_unlock_irq(mlx4_tlock(dev)); | 4988 | |
| 4989 | kfree(counters_arr); | ||
| 4975 | } | 4990 | } |
| 4976 | 4991 | ||
| 4977 | static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) | 4992 | static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f2ae62dd8c09..22e72bf1ae48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb { | |||
| 334 | 334 | ||
| 335 | #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) | 335 | #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) |
| 336 | 336 | ||
| 337 | enum mlx5e_dma_map_type { | ||
| 338 | MLX5E_DMA_MAP_SINGLE, | ||
| 339 | MLX5E_DMA_MAP_PAGE | ||
| 340 | }; | ||
| 341 | |||
| 337 | struct mlx5e_sq_dma { | 342 | struct mlx5e_sq_dma { |
| 338 | dma_addr_t addr; | 343 | dma_addr_t addr; |
| 339 | u32 size; | 344 | u32 size; |
| 345 | enum mlx5e_dma_map_type type; | ||
| 340 | }; | 346 | }; |
| 341 | 347 | ||
| 342 | enum { | 348 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5fc4d2d78cdf..1e52db32c73d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) | |||
| 1332 | return err; | 1332 | return err; |
| 1333 | } | 1333 | } |
| 1334 | 1334 | ||
| 1335 | static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, | ||
| 1336 | u32 tirn) | ||
| 1337 | { | ||
| 1338 | void *in; | ||
| 1339 | int inlen; | ||
| 1340 | int err; | ||
| 1341 | |||
| 1342 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | ||
| 1343 | in = mlx5_vzalloc(inlen); | ||
| 1344 | if (!in) | ||
| 1345 | return -ENOMEM; | ||
| 1346 | |||
| 1347 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); | ||
| 1348 | |||
| 1349 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); | ||
| 1350 | |||
| 1351 | kvfree(in); | ||
| 1352 | |||
| 1353 | return err; | ||
| 1354 | } | ||
| 1355 | |||
| 1356 | static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) | ||
| 1357 | { | ||
| 1358 | int err; | ||
| 1359 | int i; | ||
| 1360 | |||
| 1361 | for (i = 0; i < MLX5E_NUM_TT; i++) { | ||
| 1362 | err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev, | ||
| 1363 | priv->tirn[i]); | ||
| 1364 | if (err) | ||
| 1365 | return err; | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | return 0; | ||
| 1369 | } | ||
| 1370 | |||
| 1335 | static int mlx5e_set_dev_port_mtu(struct net_device *netdev) | 1371 | static int mlx5e_set_dev_port_mtu(struct net_device *netdev) |
| 1336 | { | 1372 | { |
| 1337 | struct mlx5e_priv *priv = netdev_priv(netdev); | 1373 | struct mlx5e_priv *priv = netdev_priv(netdev); |
| @@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev) | |||
| 1376 | goto err_clear_state_opened_flag; | 1412 | goto err_clear_state_opened_flag; |
| 1377 | } | 1413 | } |
| 1378 | 1414 | ||
| 1415 | err = mlx5e_refresh_tirs_self_loopback_enable(priv); | ||
| 1416 | if (err) { | ||
| 1417 | netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", | ||
| 1418 | __func__, err); | ||
| 1419 | goto err_close_channels; | ||
| 1420 | } | ||
| 1421 | |||
| 1379 | mlx5e_update_carrier(priv); | 1422 | mlx5e_update_carrier(priv); |
| 1380 | mlx5e_redirect_rqts(priv); | 1423 | mlx5e_redirect_rqts(priv); |
| 1381 | 1424 | ||
| @@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev) | |||
| 1383 | 1426 | ||
| 1384 | return 0; | 1427 | return 0; |
| 1385 | 1428 | ||
| 1429 | err_close_channels: | ||
| 1430 | mlx5e_close_channels(priv); | ||
| 1386 | err_clear_state_opened_flag: | 1431 | err_clear_state_opened_flag: |
| 1387 | clear_bit(MLX5E_STATE_OPENED, &priv->state); | 1432 | clear_bit(MLX5E_STATE_OPENED, &priv->state); |
| 1388 | return err; | 1433 | return err; |
| @@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 1856 | 1901 | ||
| 1857 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); | 1902 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
| 1858 | 1903 | ||
| 1904 | max_mtu = MLX5E_HW2SW_MTU(max_mtu); | ||
| 1905 | |||
| 1859 | if (new_mtu > max_mtu) { | 1906 | if (new_mtu > max_mtu) { |
| 1860 | netdev_err(netdev, | 1907 | netdev_err(netdev, |
| 1861 | "%s: Bad MTU (%d) > (%d) Max\n", | 1908 | "%s: Bad MTU (%d) > (%d) Max\n", |
| @@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) | |||
| 1909 | "Not creating net device, some required device capabilities are missing\n"); | 1956 | "Not creating net device, some required device capabilities are missing\n"); |
| 1910 | return -ENOTSUPP; | 1957 | return -ENOTSUPP; |
| 1911 | } | 1958 | } |
| 1959 | if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) | ||
| 1960 | mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); | ||
| 1961 | |||
| 1912 | return 0; | 1962 | return 0; |
| 1913 | } | 1963 | } |
| 1914 | 1964 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index cd8f85a251d7..1341b1d3c421 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, | 64 | static inline void mlx5e_tx_dma_unmap(struct device *pdev, |
| 65 | u32 *size) | 65 | struct mlx5e_sq_dma *dma) |
| 66 | { | 66 | { |
| 67 | sq->dma_fifo_pc--; | 67 | switch (dma->type) { |
| 68 | *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; | 68 | case MLX5E_DMA_MAP_SINGLE: |
| 69 | *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; | 69 | dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
| 70 | } | 70 | break; |
| 71 | 71 | case MLX5E_DMA_MAP_PAGE: | |
| 72 | static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) | 72 | dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
| 73 | { | 73 | break; |
| 74 | dma_addr_t addr; | 74 | default: |
| 75 | u32 size; | 75 | WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); |
| 76 | int i; | ||
| 77 | |||
| 78 | for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { | ||
| 79 | mlx5e_dma_pop_last_pushed(sq, &addr, &size); | ||
| 80 | dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); | ||
| 81 | } | 76 | } |
| 82 | } | 77 | } |
| 83 | 78 | ||
| 84 | static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, | 79 | static inline void mlx5e_dma_push(struct mlx5e_sq *sq, |
| 85 | u32 size) | 80 | dma_addr_t addr, |
| 81 | u32 size, | ||
| 82 | enum mlx5e_dma_map_type map_type) | ||
| 86 | { | 83 | { |
| 87 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; | 84 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; |
| 88 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; | 85 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; |
| 86 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type; | ||
| 89 | sq->dma_fifo_pc++; | 87 | sq->dma_fifo_pc++; |
| 90 | } | 88 | } |
| 91 | 89 | ||
| 92 | static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, | 90 | static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) |
| 93 | u32 *size) | ||
| 94 | { | 91 | { |
| 95 | *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; | 92 | return &sq->dma_fifo[i & sq->dma_fifo_mask]; |
| 96 | *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; | 93 | } |
| 94 | |||
| 95 | static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) | ||
| 96 | { | ||
| 97 | int i; | ||
| 98 | |||
| 99 | for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { | ||
| 100 | struct mlx5e_sq_dma *last_pushed_dma = | ||
| 101 | mlx5e_dma_get(sq, --sq->dma_fifo_pc); | ||
| 102 | |||
| 103 | mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); | ||
| 104 | } | ||
| 97 | } | 105 | } |
| 98 | 106 | ||
| 99 | u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, | 107 | u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, |
| @@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, | |||
| 118 | */ | 126 | */ |
| 119 | #define MLX5E_MIN_INLINE ETH_HLEN | 127 | #define MLX5E_MIN_INLINE ETH_HLEN |
| 120 | 128 | ||
| 121 | if (bf && (skb_headlen(skb) <= sq->max_inline)) | 129 | if (bf) { |
| 122 | return skb_headlen(skb); | 130 | u16 ihs = skb_headlen(skb); |
| 131 | |||
| 132 | if (skb_vlan_tag_present(skb)) | ||
| 133 | ihs += VLAN_HLEN; | ||
| 134 | |||
| 135 | if (ihs <= sq->max_inline) | ||
| 136 | return skb_headlen(skb); | ||
| 137 | } | ||
| 123 | 138 | ||
| 124 | return MLX5E_MIN_INLINE; | 139 | return MLX5E_MIN_INLINE; |
| 125 | } | 140 | } |
| @@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 218 | dseg->lkey = sq->mkey_be; | 233 | dseg->lkey = sq->mkey_be; |
| 219 | dseg->byte_count = cpu_to_be32(headlen); | 234 | dseg->byte_count = cpu_to_be32(headlen); |
| 220 | 235 | ||
| 221 | mlx5e_dma_push(sq, dma_addr, headlen); | 236 | mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); |
| 222 | MLX5E_TX_SKB_CB(skb)->num_dma++; | 237 | MLX5E_TX_SKB_CB(skb)->num_dma++; |
| 223 | 238 | ||
| 224 | dseg++; | 239 | dseg++; |
| @@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 237 | dseg->lkey = sq->mkey_be; | 252 | dseg->lkey = sq->mkey_be; |
| 238 | dseg->byte_count = cpu_to_be32(fsz); | 253 | dseg->byte_count = cpu_to_be32(fsz); |
| 239 | 254 | ||
| 240 | mlx5e_dma_push(sq, dma_addr, fsz); | 255 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); |
| 241 | MLX5E_TX_SKB_CB(skb)->num_dma++; | 256 | MLX5E_TX_SKB_CB(skb)->num_dma++; |
| 242 | 257 | ||
| 243 | dseg++; | 258 | dseg++; |
| @@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) | |||
| 353 | } | 368 | } |
| 354 | 369 | ||
| 355 | for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { | 370 | for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { |
| 356 | dma_addr_t addr; | 371 | struct mlx5e_sq_dma *dma = |
| 357 | u32 size; | 372 | mlx5e_dma_get(sq, dma_fifo_cc++); |
| 358 | 373 | ||
| 359 | mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); | 374 | mlx5e_tx_dma_unmap(sq->pdev, dma); |
| 360 | dma_fifo_cc++; | ||
| 361 | dma_unmap_single(sq->pdev, addr, size, | ||
| 362 | DMA_TO_DEVICE); | ||
| 363 | } | 375 | } |
| 364 | 376 | ||
| 365 | npkts++; | 377 | npkts++; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b4f21232019a..79ef799f88ab 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -7429,15 +7429,15 @@ process_pkt: | |||
| 7429 | 7429 | ||
| 7430 | rtl8169_rx_vlan_tag(desc, skb); | 7430 | rtl8169_rx_vlan_tag(desc, skb); |
| 7431 | 7431 | ||
| 7432 | if (skb->pkt_type == PACKET_MULTICAST) | ||
| 7433 | dev->stats.multicast++; | ||
| 7434 | |||
| 7432 | napi_gro_receive(&tp->napi, skb); | 7435 | napi_gro_receive(&tp->napi, skb); |
| 7433 | 7436 | ||
| 7434 | u64_stats_update_begin(&tp->rx_stats.syncp); | 7437 | u64_stats_update_begin(&tp->rx_stats.syncp); |
| 7435 | tp->rx_stats.packets++; | 7438 | tp->rx_stats.packets++; |
| 7436 | tp->rx_stats.bytes += pkt_size; | 7439 | tp->rx_stats.bytes += pkt_size; |
| 7437 | u64_stats_update_end(&tp->rx_stats.syncp); | 7440 | u64_stats_update_end(&tp->rx_stats.syncp); |
| 7438 | |||
| 7439 | if (skb->pkt_type == PACKET_MULTICAST) | ||
| 7440 | dev->stats.multicast++; | ||
| 7441 | } | 7441 | } |
| 7442 | release_descriptor: | 7442 | release_descriptor: |
| 7443 | desc->opts2 = 0; | 7443 | desc->opts2 = 0; |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aa7b2083cb53..ee8d1ec61fab 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev) | |||
| 408 | /* Interrupt enable: */ | 408 | /* Interrupt enable: */ |
| 409 | /* Frame receive */ | 409 | /* Frame receive */ |
| 410 | ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); | 410 | ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); |
| 411 | /* Receive FIFO full warning */ | ||
| 412 | ravb_write(ndev, RIC1_RFWE, RIC1); | ||
| 413 | /* Receive FIFO full error, descriptor empty */ | 411 | /* Receive FIFO full error, descriptor empty */ |
| 414 | ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); | 412 | ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); |
| 415 | /* Frame transmitted, timestamp FIFO updated */ | 413 | /* Frame transmitted, timestamp FIFO updated */ |
| @@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) | |||
| 733 | ((tis & tic) & BIT(q))) { | 731 | ((tis & tic) & BIT(q))) { |
| 734 | if (napi_schedule_prep(&priv->napi[q])) { | 732 | if (napi_schedule_prep(&priv->napi[q])) { |
| 735 | /* Mask RX and TX interrupts */ | 733 | /* Mask RX and TX interrupts */ |
| 736 | ravb_write(ndev, ric0 & ~BIT(q), RIC0); | 734 | ric0 &= ~BIT(q); |
| 737 | ravb_write(ndev, tic & ~BIT(q), TIC); | 735 | tic &= ~BIT(q); |
| 736 | ravb_write(ndev, ric0, RIC0); | ||
| 737 | ravb_write(ndev, tic, TIC); | ||
| 738 | __napi_schedule(&priv->napi[q]); | 738 | __napi_schedule(&priv->napi[q]); |
| 739 | } else { | 739 | } else { |
| 740 | netdev_warn(ndev, | 740 | netdev_warn(ndev, |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index d288f1c928de..a3c42a376741 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -3422,7 +3422,7 @@ out: | |||
| 3422 | * with our request for slot reset the mmio_enabled callback will never be | 3422 | * with our request for slot reset the mmio_enabled callback will never be |
| 3423 | * called, and the link_reset callback is not used by AER or EEH mechanisms. | 3423 | * called, and the link_reset callback is not used by AER or EEH mechanisms. |
| 3424 | */ | 3424 | */ |
| 3425 | static struct pci_error_handlers efx_err_handlers = { | 3425 | static const struct pci_error_handlers efx_err_handlers = { |
| 3426 | .error_detected = efx_io_error_detected, | 3426 | .error_detected = efx_io_error_detected, |
| 3427 | .slot_reset = efx_io_slot_reset, | 3427 | .slot_reset = efx_io_slot_reset, |
| 3428 | .resume = efx_io_resume, | 3428 | .resume = efx_io_resume, |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index c860c9007e49..219a99b7a631 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
| @@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) | |||
| 809 | 809 | ||
| 810 | static int smsc911x_phy_reset(struct smsc911x_data *pdata) | 810 | static int smsc911x_phy_reset(struct smsc911x_data *pdata) |
| 811 | { | 811 | { |
| 812 | struct phy_device *phy_dev = pdata->phy_dev; | ||
| 813 | unsigned int temp; | 812 | unsigned int temp; |
| 814 | unsigned int i = 100000; | 813 | unsigned int i = 100000; |
| 815 | 814 | ||
| 816 | BUG_ON(!phy_dev); | 815 | temp = smsc911x_reg_read(pdata, PMT_CTRL); |
| 817 | BUG_ON(!phy_dev->bus); | 816 | smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_); |
| 818 | |||
| 819 | SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); | ||
| 820 | smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); | ||
| 821 | do { | 817 | do { |
| 822 | msleep(1); | 818 | msleep(1); |
| 823 | temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, | 819 | temp = smsc911x_reg_read(pdata, PMT_CTRL); |
| 824 | MII_BMCR); | 820 | } while ((i--) && (temp & PMT_CTRL_PHY_RST_)); |
| 825 | } while ((i--) && (temp & BMCR_RESET)); | ||
| 826 | 821 | ||
| 827 | if (temp & BMCR_RESET) { | 822 | if (unlikely(temp & PMT_CTRL_PHY_RST_)) { |
| 828 | SMSC_WARN(pdata, hw, "PHY reset failed to complete"); | 823 | SMSC_WARN(pdata, hw, "PHY reset failed to complete"); |
| 829 | return -EIO; | 824 | return -EIO; |
| 830 | } | 825 | } |
| @@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev) | |||
| 2296 | } | 2291 | } |
| 2297 | 2292 | ||
| 2298 | /* Reset the LAN911x */ | 2293 | /* Reset the LAN911x */ |
| 2299 | if (smsc911x_soft_reset(pdata)) | 2294 | if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata)) |
| 2300 | return -ENODEV; | 2295 | return -ENODEV; |
| 2301 | 2296 | ||
| 2302 | dev->flags |= IFF_MULTICAST; | 2297 | dev->flags |= IFF_MULTICAST; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 9d89bdbf029f..82de68b1a452 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | |||
| @@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) | |||
| 337 | QSGMII_PHY_RX_SIGNAL_DETECT_EN | | 337 | QSGMII_PHY_RX_SIGNAL_DETECT_EN | |
| 338 | QSGMII_PHY_TX_DRIVER_EN | | 338 | QSGMII_PHY_TX_DRIVER_EN | |
| 339 | QSGMII_PHY_QSGMII_EN | | 339 | QSGMII_PHY_QSGMII_EN | |
| 340 | 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | | 340 | 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | |
| 341 | 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | | 341 | 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET | |
| 342 | 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | | 342 | 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET | |
| 343 | 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | | 343 | 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET | |
| 344 | 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); | 344 | 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | plat_dat->has_gmac = true; | 347 | plat_dat->has_gmac = true; |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index ae68afd50a15..f38696ceee74 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
| @@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability"); | |||
| 345 | */ | 345 | */ |
| 346 | VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); | 346 | VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); |
| 347 | 347 | ||
| 348 | #define VAL_PKT_LEN_DEF 0 | ||
| 349 | /* ValPktLen[] is used for setting the checksum offload ability of NIC. | ||
| 350 | 0: Receive frame with invalid layer 2 length (Default) | ||
| 351 | 1: Drop frame with invalid layer 2 length | ||
| 352 | */ | ||
| 353 | VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); | ||
| 354 | |||
| 355 | #define WOL_OPT_DEF 0 | 348 | #define WOL_OPT_DEF 0 |
| 356 | #define WOL_OPT_MIN 0 | 349 | #define WOL_OPT_MIN 0 |
| 357 | #define WOL_OPT_MAX 7 | 350 | #define WOL_OPT_MAX 7 |
| @@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index, | |||
| 494 | 487 | ||
| 495 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); | 488 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); |
| 496 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); | 489 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); |
| 497 | velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); | ||
| 498 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); | 490 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); |
| 499 | velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); | 491 | velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); |
| 500 | opts->numrx = (opts->numrx & ~3); | 492 | opts->numrx = (opts->numrx & ~3); |
| @@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
| 2055 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; | 2047 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; |
| 2056 | struct sk_buff *skb; | 2048 | struct sk_buff *skb; |
| 2057 | 2049 | ||
| 2058 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { | 2050 | if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) { |
| 2059 | VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); | 2051 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) |
| 2052 | VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); | ||
| 2060 | stats->rx_length_errors++; | 2053 | stats->rx_length_errors++; |
| 2061 | return -EINVAL; | 2054 | return -EINVAL; |
| 2062 | } | 2055 | } |
| @@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
| 2069 | dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, | 2062 | dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, |
| 2070 | vptr->rx.buf_sz, DMA_FROM_DEVICE); | 2063 | vptr->rx.buf_sz, DMA_FROM_DEVICE); |
| 2071 | 2064 | ||
| 2072 | /* | ||
| 2073 | * Drop frame not meeting IEEE 802.3 | ||
| 2074 | */ | ||
| 2075 | |||
| 2076 | if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { | ||
| 2077 | if (rd->rdesc0.RSR & RSR_RL) { | ||
| 2078 | stats->rx_length_errors++; | ||
| 2079 | return -EINVAL; | ||
| 2080 | } | ||
| 2081 | } | ||
| 2082 | |||
| 2083 | velocity_rx_csum(rd, skb); | 2065 | velocity_rx_csum(rd, skb); |
| 2084 | 2066 | ||
| 2085 | if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { | 2067 | if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { |
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index bb8b5304d851..b103adb8d62e 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c | |||
| @@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid) | |||
| 599 | FJES_CMD_REQ_RES_CODE_BUSY) && | 599 | FJES_CMD_REQ_RES_CODE_BUSY) && |
| 600 | (timeout > 0)) { | 600 | (timeout > 0)) { |
| 601 | msleep(200 + hw->my_epid * 20); | 601 | msleep(200 + hw->my_epid * 20); |
| 602 | timeout -= (200 + hw->my_epid * 20); | 602 | timeout -= (200 + hw->my_epid * 20); |
| 603 | 603 | ||
| 604 | res_buf->unshare_buffer.length = 0; | 604 | res_buf->unshare_buffer.length = 0; |
| 605 | res_buf->unshare_buffer.code = 0; | 605 | res_buf->unshare_buffer.code = 0; |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index d50887e3df6d..8c48bb2a94ea 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
| @@ -254,7 +254,7 @@ acct: | |||
| 254 | } | 254 | } |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, | 257 | static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, |
| 258 | bool local) | 258 | bool local) |
| 259 | { | 259 | { |
| 260 | struct ipvl_dev *ipvlan = addr->master; | 260 | struct ipvl_dev *ipvlan = addr->master; |
| @@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, | |||
| 262 | unsigned int len; | 262 | unsigned int len; |
| 263 | rx_handler_result_t ret = RX_HANDLER_CONSUMED; | 263 | rx_handler_result_t ret = RX_HANDLER_CONSUMED; |
| 264 | bool success = false; | 264 | bool success = false; |
| 265 | struct sk_buff *skb = *pskb; | ||
| 265 | 266 | ||
| 266 | len = skb->len + ETH_HLEN; | 267 | len = skb->len + ETH_HLEN; |
| 267 | if (unlikely(!(dev->flags & IFF_UP))) { | 268 | if (unlikely(!(dev->flags & IFF_UP))) { |
| @@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, | |||
| 273 | if (!skb) | 274 | if (!skb) |
| 274 | goto out; | 275 | goto out; |
| 275 | 276 | ||
| 277 | *pskb = skb; | ||
| 276 | skb->dev = dev; | 278 | skb->dev = dev; |
| 277 | skb->pkt_type = PACKET_HOST; | 279 | skb->pkt_type = PACKET_HOST; |
| 278 | 280 | ||
| @@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) | |||
| 486 | 488 | ||
| 487 | addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); | 489 | addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); |
| 488 | if (addr) | 490 | if (addr) |
| 489 | return ipvlan_rcv_frame(addr, skb, true); | 491 | return ipvlan_rcv_frame(addr, &skb, true); |
| 490 | 492 | ||
| 491 | out: | 493 | out: |
| 492 | skb->dev = ipvlan->phy_dev; | 494 | skb->dev = ipvlan->phy_dev; |
| @@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) | |||
| 506 | if (lyr3h) { | 508 | if (lyr3h) { |
| 507 | addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); | 509 | addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); |
| 508 | if (addr) | 510 | if (addr) |
| 509 | return ipvlan_rcv_frame(addr, skb, true); | 511 | return ipvlan_rcv_frame(addr, &skb, true); |
| 510 | } | 512 | } |
| 511 | skb = skb_share_check(skb, GFP_ATOMIC); | 513 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 512 | if (!skb) | 514 | if (!skb) |
| @@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, | |||
| 589 | 591 | ||
| 590 | addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); | 592 | addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); |
| 591 | if (addr) | 593 | if (addr) |
| 592 | ret = ipvlan_rcv_frame(addr, skb, false); | 594 | ret = ipvlan_rcv_frame(addr, pskb, false); |
| 593 | 595 | ||
| 594 | out: | 596 | out: |
| 595 | return ret; | 597 | return ret; |
| @@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, | |||
| 626 | 628 | ||
| 627 | addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); | 629 | addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); |
| 628 | if (addr) | 630 | if (addr) |
| 629 | ret = ipvlan_rcv_frame(addr, skb, false); | 631 | ret = ipvlan_rcv_frame(addr, pskb, false); |
| 630 | } | 632 | } |
| 631 | 633 | ||
| 632 | return ret; | 634 | return ret; |
| @@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) | |||
| 651 | WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", | 653 | WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", |
| 652 | port->mode); | 654 | port->mode); |
| 653 | kfree_skb(skb); | 655 | kfree_skb(skb); |
| 654 | return NET_RX_DROP; | 656 | return RX_HANDLER_CONSUMED; |
| 655 | } | 657 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 86f6c6292c27..06c8bfeaccd6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
| 415 | skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); | 415 | skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); |
| 416 | if (!skb) | 416 | if (!skb) |
| 417 | return RX_HANDLER_CONSUMED; | 417 | return RX_HANDLER_CONSUMED; |
| 418 | *pskb = skb; | ||
| 418 | eth = eth_hdr(skb); | 419 | eth = eth_hdr(skb); |
| 419 | macvlan_forward_source(skb, port, eth->h_source); | 420 | macvlan_forward_source(skb, port, eth->h_source); |
| 420 | src = macvlan_hash_lookup(port, eth->h_source); | 421 | src = macvlan_hash_lookup(port, eth->h_source); |
| @@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
| 456 | goto out; | 457 | goto out; |
| 457 | } | 458 | } |
| 458 | 459 | ||
| 460 | *pskb = skb; | ||
| 459 | skb->dev = dev; | 461 | skb->dev = dev; |
| 460 | skb->pkt_type = PACKET_HOST; | 462 | skb->pkt_type = PACKET_HOST; |
| 461 | 463 | ||
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index fabf11d32d27..2d020a3ec0b5 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c | |||
| @@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = { | |||
| 308 | .flags = PHY_HAS_INTERRUPT, | 308 | .flags = PHY_HAS_INTERRUPT, |
| 309 | .config_aneg = genphy_config_aneg, | 309 | .config_aneg = genphy_config_aneg, |
| 310 | .read_status = genphy_read_status, | 310 | .read_status = genphy_read_status, |
| 311 | .ack_interrupt = at803x_ack_interrupt, | ||
| 312 | .config_intr = at803x_config_intr, | ||
| 311 | .driver = { | 313 | .driver = { |
| 312 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
| 313 | }, | 315 | }, |
| @@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = { | |||
| 327 | .flags = PHY_HAS_INTERRUPT, | 329 | .flags = PHY_HAS_INTERRUPT, |
| 328 | .config_aneg = genphy_config_aneg, | 330 | .config_aneg = genphy_config_aneg, |
| 329 | .read_status = genphy_read_status, | 331 | .read_status = genphy_read_status, |
| 332 | .ack_interrupt = at803x_ack_interrupt, | ||
| 333 | .config_intr = at803x_config_intr, | ||
| 330 | .driver = { | 334 | .driver = { |
| 331 | .owner = THIS_MODULE, | 335 | .owner = THIS_MODULE, |
| 332 | }, | 336 | }, |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5de8d5827536..0240552b50f3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
| @@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = { | |||
| 1154 | .driver = { .owner = THIS_MODULE }, | 1154 | .driver = { .owner = THIS_MODULE }, |
| 1155 | }, | 1155 | }, |
| 1156 | { | 1156 | { |
| 1157 | .phy_id = MARVELL_PHY_ID_88E1540, | ||
| 1158 | .phy_id_mask = MARVELL_PHY_ID_MASK, | ||
| 1159 | .name = "Marvell 88E1540", | ||
| 1160 | .features = PHY_GBIT_FEATURES, | ||
| 1161 | .flags = PHY_HAS_INTERRUPT, | ||
| 1162 | .config_aneg = &m88e1510_config_aneg, | ||
| 1163 | .read_status = &marvell_read_status, | ||
| 1164 | .ack_interrupt = &marvell_ack_interrupt, | ||
| 1165 | .config_intr = &marvell_config_intr, | ||
| 1166 | .did_interrupt = &m88e1121_did_interrupt, | ||
| 1167 | .resume = &genphy_resume, | ||
| 1168 | .suspend = &genphy_suspend, | ||
| 1169 | .driver = { .owner = THIS_MODULE }, | ||
| 1170 | }, | ||
| 1171 | { | ||
| 1157 | .phy_id = MARVELL_PHY_ID_88E3016, | 1172 | .phy_id = MARVELL_PHY_ID_88E3016, |
| 1158 | .phy_id_mask = MARVELL_PHY_ID_MASK, | 1173 | .phy_id_mask = MARVELL_PHY_ID_MASK, |
| 1159 | .name = "Marvell 88E3016", | 1174 | .name = "Marvell 88E3016", |
| @@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { | |||
| 1186 | { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, | 1201 | { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, |
| 1187 | { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, | 1202 | { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, |
| 1188 | { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, | 1203 | { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, |
| 1204 | { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, | ||
| 1189 | { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, | 1205 | { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, |
| 1190 | { } | 1206 | { } |
| 1191 | }; | 1207 | }; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index adb48abafc87..48ce6ef400fe 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -863,6 +863,9 @@ void phy_state_machine(struct work_struct *work) | |||
| 863 | needs_aneg = true; | 863 | needs_aneg = true; |
| 864 | break; | 864 | break; |
| 865 | case PHY_NOLINK: | 865 | case PHY_NOLINK: |
| 866 | if (phy_interrupt_is_valid(phydev)) | ||
| 867 | break; | ||
| 868 | |||
| 866 | err = phy_read_status(phydev); | 869 | err = phy_read_status(phydev); |
| 867 | if (err) | 870 | if (err) |
| 868 | break; | 871 | break; |
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 76cad712ddb2..dd295dbaa074 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
| @@ -66,6 +66,7 @@ | |||
| 66 | #define PHY_ID_VSC8244 0x000fc6c0 | 66 | #define PHY_ID_VSC8244 0x000fc6c0 |
| 67 | #define PHY_ID_VSC8514 0x00070670 | 67 | #define PHY_ID_VSC8514 0x00070670 |
| 68 | #define PHY_ID_VSC8574 0x000704a0 | 68 | #define PHY_ID_VSC8574 0x000704a0 |
| 69 | #define PHY_ID_VSC8601 0x00070420 | ||
| 69 | #define PHY_ID_VSC8662 0x00070660 | 70 | #define PHY_ID_VSC8662 0x00070660 |
| 70 | #define PHY_ID_VSC8221 0x000fc550 | 71 | #define PHY_ID_VSC8221 0x000fc550 |
| 71 | #define PHY_ID_VSC8211 0x000fc4b0 | 72 | #define PHY_ID_VSC8211 0x000fc4b0 |
| @@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev) | |||
| 133 | (phydev->drv->phy_id == PHY_ID_VSC8234 || | 134 | (phydev->drv->phy_id == PHY_ID_VSC8234 || |
| 134 | phydev->drv->phy_id == PHY_ID_VSC8244 || | 135 | phydev->drv->phy_id == PHY_ID_VSC8244 || |
| 135 | phydev->drv->phy_id == PHY_ID_VSC8514 || | 136 | phydev->drv->phy_id == PHY_ID_VSC8514 || |
| 136 | phydev->drv->phy_id == PHY_ID_VSC8574) ? | 137 | phydev->drv->phy_id == PHY_ID_VSC8574 || |
| 138 | phydev->drv->phy_id == PHY_ID_VSC8601) ? | ||
| 137 | MII_VSC8244_IMASK_MASK : | 139 | MII_VSC8244_IMASK_MASK : |
| 138 | MII_VSC8221_IMASK_MASK); | 140 | MII_VSC8221_IMASK_MASK); |
| 139 | else { | 141 | else { |
| @@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = { | |||
| 272 | .config_intr = &vsc82xx_config_intr, | 274 | .config_intr = &vsc82xx_config_intr, |
| 273 | .driver = { .owner = THIS_MODULE,}, | 275 | .driver = { .owner = THIS_MODULE,}, |
| 274 | }, { | 276 | }, { |
| 277 | .phy_id = PHY_ID_VSC8601, | ||
| 278 | .name = "Vitesse VSC8601", | ||
| 279 | .phy_id_mask = 0x000ffff0, | ||
| 280 | .features = PHY_GBIT_FEATURES, | ||
| 281 | .flags = PHY_HAS_INTERRUPT, | ||
| 282 | .config_init = &genphy_config_init, | ||
| 283 | .config_aneg = &genphy_config_aneg, | ||
| 284 | .read_status = &genphy_read_status, | ||
| 285 | .ack_interrupt = &vsc824x_ack_interrupt, | ||
| 286 | .config_intr = &vsc82xx_config_intr, | ||
| 287 | .driver = { .owner = THIS_MODULE,}, | ||
| 288 | }, { | ||
| 275 | .phy_id = PHY_ID_VSC8662, | 289 | .phy_id = PHY_ID_VSC8662, |
| 276 | .name = "Vitesse VSC8662", | 290 | .name = "Vitesse VSC8662", |
| 277 | .phy_id_mask = 0x000ffff0, | 291 | .phy_id_mask = 0x000ffff0, |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c78d3cb1b464..3da70bf9936a 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -696,6 +696,11 @@ static const struct usb_device_id products[] = { | |||
| 696 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | 696 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), |
| 697 | .driver_info = (kernel_ulong_t) &wwan_info, | 697 | .driver_info = (kernel_ulong_t) &wwan_info, |
| 698 | }, { | 698 | }, { |
| 699 | /* Dell DW5580 modules */ | ||
| 700 | USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM, | ||
| 701 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 702 | .driver_info = (kernel_ulong_t)&wwan_info, | ||
| 703 | }, { | ||
| 699 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, | 704 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, |
| 700 | USB_CDC_PROTO_NONE), | 705 | USB_CDC_PROTO_NONE), |
| 701 | .driver_info = (unsigned long) &cdc_info, | 706 | .driver_info = (unsigned long) &cdc_info, |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 46f4caddccbe..899ea4288197 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -2157,12 +2157,13 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
| 2157 | if (!netdev_mc_empty(netdev)) { | 2157 | if (!netdev_mc_empty(netdev)) { |
| 2158 | new_table = vmxnet3_copy_mc(netdev); | 2158 | new_table = vmxnet3_copy_mc(netdev); |
| 2159 | if (new_table) { | 2159 | if (new_table) { |
| 2160 | rxConf->mfTableLen = cpu_to_le16( | 2160 | size_t sz = netdev_mc_count(netdev) * ETH_ALEN; |
| 2161 | netdev_mc_count(netdev) * ETH_ALEN); | 2161 | |
| 2162 | rxConf->mfTableLen = cpu_to_le16(sz); | ||
| 2162 | new_table_pa = dma_map_single( | 2163 | new_table_pa = dma_map_single( |
| 2163 | &adapter->pdev->dev, | 2164 | &adapter->pdev->dev, |
| 2164 | new_table, | 2165 | new_table, |
| 2165 | rxConf->mfTableLen, | 2166 | sz, |
| 2166 | PCI_DMA_TODEVICE); | 2167 | PCI_DMA_TODEVICE); |
| 2167 | } | 2168 | } |
| 2168 | 2169 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3f859a55c035..4c58c83dc225 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040300 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040400 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..9202d1a468d0 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
| @@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { | |||
| 93 | __le16 cdw14[6]; | 93 | __le16 cdw14[6]; |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | struct nvme_nvm_bbtbl { | 96 | struct nvme_nvm_getbbtbl { |
| 97 | __u8 opcode; | 97 | __u8 opcode; |
| 98 | __u8 flags; | 98 | __u8 flags; |
| 99 | __u16 command_id; | 99 | __u16 command_id; |
| @@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { | |||
| 101 | __u64 rsvd[2]; | 101 | __u64 rsvd[2]; |
| 102 | __le64 prp1; | 102 | __le64 prp1; |
| 103 | __le64 prp2; | 103 | __le64 prp2; |
| 104 | __le32 prp1_len; | 104 | __le64 spba; |
| 105 | __le32 prp2_len; | 105 | __u32 rsvd4[4]; |
| 106 | __le32 lbb; | 106 | }; |
| 107 | __u32 rsvd11[3]; | 107 | |
| 108 | struct nvme_nvm_setbbtbl { | ||
| 109 | __u8 opcode; | ||
| 110 | __u8 flags; | ||
| 111 | __u16 command_id; | ||
| 112 | __le32 nsid; | ||
| 113 | __le64 rsvd[2]; | ||
| 114 | __le64 prp1; | ||
| 115 | __le64 prp2; | ||
| 116 | __le64 spba; | ||
| 117 | __le16 nlb; | ||
| 118 | __u8 value; | ||
| 119 | __u8 rsvd3; | ||
| 120 | __u32 rsvd4[3]; | ||
| 108 | }; | 121 | }; |
| 109 | 122 | ||
| 110 | struct nvme_nvm_erase_blk { | 123 | struct nvme_nvm_erase_blk { |
| @@ -129,8 +142,8 @@ struct nvme_nvm_command { | |||
| 129 | struct nvme_nvm_hb_rw hb_rw; | 142 | struct nvme_nvm_hb_rw hb_rw; |
| 130 | struct nvme_nvm_ph_rw ph_rw; | 143 | struct nvme_nvm_ph_rw ph_rw; |
| 131 | struct nvme_nvm_l2ptbl l2p; | 144 | struct nvme_nvm_l2ptbl l2p; |
| 132 | struct nvme_nvm_bbtbl get_bb; | 145 | struct nvme_nvm_getbbtbl get_bb; |
| 133 | struct nvme_nvm_bbtbl set_bb; | 146 | struct nvme_nvm_setbbtbl set_bb; |
| 134 | struct nvme_nvm_erase_blk erase; | 147 | struct nvme_nvm_erase_blk erase; |
| 135 | }; | 148 | }; |
| 136 | }; | 149 | }; |
| @@ -142,11 +155,13 @@ struct nvme_nvm_id_group { | |||
| 142 | __u8 num_ch; | 155 | __u8 num_ch; |
| 143 | __u8 num_lun; | 156 | __u8 num_lun; |
| 144 | __u8 num_pln; | 157 | __u8 num_pln; |
| 158 | __u8 rsvd1; | ||
| 145 | __le16 num_blk; | 159 | __le16 num_blk; |
| 146 | __le16 num_pg; | 160 | __le16 num_pg; |
| 147 | __le16 fpg_sz; | 161 | __le16 fpg_sz; |
| 148 | __le16 csecs; | 162 | __le16 csecs; |
| 149 | __le16 sos; | 163 | __le16 sos; |
| 164 | __le16 rsvd2; | ||
| 150 | __le32 trdt; | 165 | __le32 trdt; |
| 151 | __le32 trdm; | 166 | __le32 trdm; |
| 152 | __le32 tprt; | 167 | __le32 tprt; |
| @@ -154,8 +169,9 @@ struct nvme_nvm_id_group { | |||
| 154 | __le32 tbet; | 169 | __le32 tbet; |
| 155 | __le32 tbem; | 170 | __le32 tbem; |
| 156 | __le32 mpos; | 171 | __le32 mpos; |
| 172 | __le32 mccap; | ||
| 157 | __le16 cpar; | 173 | __le16 cpar; |
| 158 | __u8 reserved[913]; | 174 | __u8 reserved[906]; |
| 159 | } __packed; | 175 | } __packed; |
| 160 | 176 | ||
| 161 | struct nvme_nvm_addr_format { | 177 | struct nvme_nvm_addr_format { |
| @@ -178,15 +194,28 @@ struct nvme_nvm_id { | |||
| 178 | __u8 ver_id; | 194 | __u8 ver_id; |
| 179 | __u8 vmnt; | 195 | __u8 vmnt; |
| 180 | __u8 cgrps; | 196 | __u8 cgrps; |
| 181 | __u8 res[5]; | 197 | __u8 res; |
| 182 | __le32 cap; | 198 | __le32 cap; |
| 183 | __le32 dom; | 199 | __le32 dom; |
| 184 | struct nvme_nvm_addr_format ppaf; | 200 | struct nvme_nvm_addr_format ppaf; |
| 185 | __u8 ppat; | 201 | __u8 resv[228]; |
| 186 | __u8 resv[223]; | ||
| 187 | struct nvme_nvm_id_group groups[4]; | 202 | struct nvme_nvm_id_group groups[4]; |
| 188 | } __packed; | 203 | } __packed; |
| 189 | 204 | ||
| 205 | struct nvme_nvm_bb_tbl { | ||
| 206 | __u8 tblid[4]; | ||
| 207 | __le16 verid; | ||
| 208 | __le16 revid; | ||
| 209 | __le32 rvsd1; | ||
| 210 | __le32 tblks; | ||
| 211 | __le32 tfact; | ||
| 212 | __le32 tgrown; | ||
| 213 | __le32 tdresv; | ||
| 214 | __le32 thresv; | ||
| 215 | __le32 rsvd2[8]; | ||
| 216 | __u8 blk[0]; | ||
| 217 | }; | ||
| 218 | |||
| 190 | /* | 219 | /* |
| 191 | * Check we didn't inadvertently grow the command struct | 220 | * Check we didn't inadvertently grow the command struct |
| 192 | */ | 221 | */ |
| @@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void) | |||
| 195 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); | 224 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); |
| 196 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); | 225 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); |
| 197 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); | 226 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); |
| 198 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); | 227 | BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); |
| 228 | BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); | ||
| 199 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); | 229 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); |
| 200 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); | 230 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); |
| 201 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); | 231 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); |
| 202 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); | 232 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); |
| 203 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); | 233 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); |
| 234 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); | ||
| 204 | } | 235 | } |
| 205 | 236 | ||
| 206 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | 237 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) |
| @@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
| 234 | dst->tbet = le32_to_cpu(src->tbet); | 265 | dst->tbet = le32_to_cpu(src->tbet); |
| 235 | dst->tbem = le32_to_cpu(src->tbem); | 266 | dst->tbem = le32_to_cpu(src->tbem); |
| 236 | dst->mpos = le32_to_cpu(src->mpos); | 267 | dst->mpos = le32_to_cpu(src->mpos); |
| 268 | dst->mccap = le32_to_cpu(src->mccap); | ||
| 237 | 269 | ||
| 238 | dst->cpar = le16_to_cpu(src->cpar); | 270 | dst->cpar = le16_to_cpu(src->cpar); |
| 239 | } | 271 | } |
| @@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
| 244 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | 276 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) |
| 245 | { | 277 | { |
| 246 | struct nvme_ns *ns = q->queuedata; | 278 | struct nvme_ns *ns = q->queuedata; |
| 279 | struct nvme_dev *dev = ns->dev; | ||
| 247 | struct nvme_nvm_id *nvme_nvm_id; | 280 | struct nvme_nvm_id *nvme_nvm_id; |
| 248 | struct nvme_nvm_command c = {}; | 281 | struct nvme_nvm_command c = {}; |
| 249 | int ret; | 282 | int ret; |
| @@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
| 256 | if (!nvme_nvm_id) | 289 | if (!nvme_nvm_id) |
| 257 | return -ENOMEM; | 290 | return -ENOMEM; |
| 258 | 291 | ||
| 259 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, | 292 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
| 260 | sizeof(struct nvme_nvm_id)); | 293 | nvme_nvm_id, sizeof(struct nvme_nvm_id)); |
| 261 | if (ret) { | 294 | if (ret) { |
| 262 | ret = -EIO; | 295 | ret = -EIO; |
| 263 | goto out; | 296 | goto out; |
| @@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
| 268 | nvm_id->cgrps = nvme_nvm_id->cgrps; | 301 | nvm_id->cgrps = nvme_nvm_id->cgrps; |
| 269 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); | 302 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); |
| 270 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); | 303 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); |
| 304 | memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, | ||
| 305 | sizeof(struct nvme_nvm_addr_format)); | ||
| 271 | 306 | ||
| 272 | ret = init_grps(nvm_id, nvme_nvm_id); | 307 | ret = init_grps(nvm_id, nvme_nvm_id); |
| 273 | out: | 308 | out: |
| @@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
| 281 | struct nvme_ns *ns = q->queuedata; | 316 | struct nvme_ns *ns = q->queuedata; |
| 282 | struct nvme_dev *dev = ns->dev; | 317 | struct nvme_dev *dev = ns->dev; |
| 283 | struct nvme_nvm_command c = {}; | 318 | struct nvme_nvm_command c = {}; |
| 284 | u32 len = queue_max_hw_sectors(q) << 9; | 319 | u32 len = queue_max_hw_sectors(dev->admin_q) << 9; |
| 285 | u32 nlb_pr_rq = len / sizeof(u64); | 320 | u32 nlb_pr_rq = len / sizeof(u64); |
| 286 | u64 cmd_slba = slba; | 321 | u64 cmd_slba = slba; |
| 287 | void *entries; | 322 | void *entries; |
| @@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
| 299 | c.l2p.slba = cpu_to_le64(cmd_slba); | 334 | c.l2p.slba = cpu_to_le64(cmd_slba); |
| 300 | c.l2p.nlb = cpu_to_le32(cmd_nlb); | 335 | c.l2p.nlb = cpu_to_le32(cmd_nlb); |
| 301 | 336 | ||
| 302 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, | 337 | ret = nvme_submit_sync_cmd(dev->admin_q, |
| 303 | entries, len); | 338 | (struct nvme_command *)&c, entries, len); |
| 304 | if (ret) { | 339 | if (ret) { |
| 305 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", | 340 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", |
| 306 | ret); | 341 | ret); |
| @@ -322,43 +357,82 @@ out: | |||
| 322 | return ret; | 357 | return ret; |
| 323 | } | 358 | } |
| 324 | 359 | ||
| 325 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, | 360 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, |
| 326 | unsigned int nr_blocks, | 361 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
| 327 | nvm_bb_update_fn *update_bbtbl, void *priv) | 362 | void *priv) |
| 328 | { | 363 | { |
| 329 | struct nvme_ns *ns = q->queuedata; | 364 | struct nvme_ns *ns = q->queuedata; |
| 330 | struct nvme_dev *dev = ns->dev; | 365 | struct nvme_dev *dev = ns->dev; |
| 331 | struct nvme_nvm_command c = {}; | 366 | struct nvme_nvm_command c = {}; |
| 332 | void *bb_bitmap; | 367 | struct nvme_nvm_bb_tbl *bb_tbl; |
| 333 | u16 bb_bitmap_size; | 368 | int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; |
| 334 | int ret = 0; | 369 | int ret = 0; |
| 335 | 370 | ||
| 336 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; | 371 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; |
| 337 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); | 372 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); |
| 338 | c.get_bb.lbb = cpu_to_le32(lunid); | 373 | c.get_bb.spba = cpu_to_le64(ppa.ppa); |
| 339 | bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; | ||
| 340 | bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); | ||
| 341 | if (!bb_bitmap) | ||
| 342 | return -ENOMEM; | ||
| 343 | 374 | ||
| 344 | bitmap_zero(bb_bitmap, nr_blocks); | 375 | bb_tbl = kzalloc(tblsz, GFP_KERNEL); |
| 376 | if (!bb_tbl) | ||
| 377 | return -ENOMEM; | ||
| 345 | 378 | ||
| 346 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, | 379 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
| 347 | bb_bitmap_size); | 380 | bb_tbl, tblsz); |
| 348 | if (ret) { | 381 | if (ret) { |
| 349 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); | 382 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); |
| 350 | ret = -EIO; | 383 | ret = -EIO; |
| 351 | goto out; | 384 | goto out; |
| 352 | } | 385 | } |
| 353 | 386 | ||
| 354 | ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); | 387 | if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || |
| 388 | bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { | ||
| 389 | dev_err(dev->dev, "bbt format mismatch\n"); | ||
| 390 | ret = -EINVAL; | ||
| 391 | goto out; | ||
| 392 | } | ||
| 393 | |||
| 394 | if (le16_to_cpu(bb_tbl->verid) != 1) { | ||
| 395 | ret = -EINVAL; | ||
| 396 | dev_err(dev->dev, "bbt version not supported\n"); | ||
| 397 | goto out; | ||
| 398 | } | ||
| 399 | |||
| 400 | if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { | ||
| 401 | ret = -EINVAL; | ||
| 402 | dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", | ||
| 403 | le32_to_cpu(bb_tbl->tblks), nr_blocks); | ||
| 404 | goto out; | ||
| 405 | } | ||
| 406 | |||
| 407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | ||
| 355 | if (ret) { | 408 | if (ret) { |
| 356 | ret = -EINTR; | 409 | ret = -EINTR; |
| 357 | goto out; | 410 | goto out; |
| 358 | } | 411 | } |
| 359 | 412 | ||
| 360 | out: | 413 | out: |
| 361 | kfree(bb_bitmap); | 414 | kfree(bb_tbl); |
| 415 | return ret; | ||
| 416 | } | ||
| 417 | |||
| 418 | static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, | ||
| 419 | int type) | ||
| 420 | { | ||
| 421 | struct nvme_ns *ns = q->queuedata; | ||
| 422 | struct nvme_dev *dev = ns->dev; | ||
| 423 | struct nvme_nvm_command c = {}; | ||
| 424 | int ret = 0; | ||
| 425 | |||
| 426 | c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; | ||
| 427 | c.set_bb.nsid = cpu_to_le32(ns->ns_id); | ||
| 428 | c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); | ||
| 429 | c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); | ||
| 430 | c.set_bb.value = type; | ||
| 431 | |||
| 432 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, | ||
| 433 | NULL, 0); | ||
| 434 | if (ret) | ||
| 435 | dev_err(dev->dev, "set bad block table failed (%d)\n", ret); | ||
| 362 | return ret; | 436 | return ret; |
| 363 | } | 437 | } |
| 364 | 438 | ||
| @@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { | |||
| 474 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, | 548 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, |
| 475 | 549 | ||
| 476 | .get_bb_tbl = nvme_nvm_get_bb_tbl, | 550 | .get_bb_tbl = nvme_nvm_get_bb_tbl, |
| 551 | .set_bb_tbl = nvme_nvm_set_bb_tbl, | ||
| 477 | 552 | ||
| 478 | .submit_io = nvme_nvm_submit_io, | 553 | .submit_io = nvme_nvm_submit_io, |
| 479 | .erase_block = nvme_nvm_erase_block, | 554 | .erase_block = nvme_nvm_erase_block, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..f3b53af789ef 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 896 | goto retry_cmd; | 896 | goto retry_cmd; |
| 897 | } | 897 | } |
| 898 | if (blk_integrity_rq(req)) { | 898 | if (blk_integrity_rq(req)) { |
| 899 | if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) | 899 | if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { |
| 900 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 901 | dma_dir); | ||
| 900 | goto error_cmd; | 902 | goto error_cmd; |
| 903 | } | ||
| 901 | 904 | ||
| 902 | sg_init_table(iod->meta_sg, 1); | 905 | sg_init_table(iod->meta_sg, 1); |
| 903 | if (blk_rq_map_integrity_sg( | 906 | if (blk_rq_map_integrity_sg( |
| 904 | req->q, req->bio, iod->meta_sg) != 1) | 907 | req->q, req->bio, iod->meta_sg) != 1) { |
| 908 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 909 | dma_dir); | ||
| 905 | goto error_cmd; | 910 | goto error_cmd; |
| 911 | } | ||
| 906 | 912 | ||
| 907 | if (rq_data_dir(req)) | 913 | if (rq_data_dir(req)) |
| 908 | nvme_dif_remap(req, nvme_dif_prep); | 914 | nvme_dif_remap(req, nvme_dif_prep); |
| 909 | 915 | ||
| 910 | if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) | 916 | if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { |
| 917 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 918 | dma_dir); | ||
| 911 | goto error_cmd; | 919 | goto error_cmd; |
| 920 | } | ||
| 912 | } | 921 | } |
| 913 | } | 922 | } |
| 914 | 923 | ||
| @@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) | |||
| 968 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) | 977 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) |
| 969 | return; | 978 | return; |
| 970 | 979 | ||
| 971 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | 980 | if (likely(nvmeq->cq_vector >= 0)) |
| 981 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | ||
| 972 | nvmeq->cq_head = head; | 982 | nvmeq->cq_head = head; |
| 973 | nvmeq->cq_phase = phase; | 983 | nvmeq->cq_phase = phase; |
| 974 | 984 | ||
| @@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1727 | u32 aqa; | 1737 | u32 aqa; |
| 1728 | u64 cap = lo_hi_readq(&dev->bar->cap); | 1738 | u64 cap = lo_hi_readq(&dev->bar->cap); |
| 1729 | struct nvme_queue *nvmeq; | 1739 | struct nvme_queue *nvmeq; |
| 1730 | unsigned page_shift = PAGE_SHIFT; | 1740 | /* |
| 1741 | * default to a 4K page size, with the intention to update this | ||
| 1742 | * path in the future to accomodate architectures with differing | ||
| 1743 | * kernel and IO page sizes. | ||
| 1744 | */ | ||
| 1745 | unsigned page_shift = 12; | ||
| 1731 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | 1746 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; |
| 1732 | unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; | ||
| 1733 | 1747 | ||
| 1734 | if (page_shift < dev_page_min) { | 1748 | if (page_shift < dev_page_min) { |
| 1735 | dev_err(dev->dev, | 1749 | dev_err(dev->dev, |
| @@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1738 | 1 << page_shift); | 1752 | 1 << page_shift); |
| 1739 | return -ENODEV; | 1753 | return -ENODEV; |
| 1740 | } | 1754 | } |
| 1741 | if (page_shift > dev_page_max) { | ||
| 1742 | dev_info(dev->dev, | ||
| 1743 | "Device maximum page size (%u) smaller than " | ||
| 1744 | "host (%u); enabling work-around\n", | ||
| 1745 | 1 << dev_page_max, 1 << page_shift); | ||
| 1746 | page_shift = dev_page_max; | ||
| 1747 | } | ||
| 1748 | 1755 | ||
| 1749 | dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? | 1756 | dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? |
| 1750 | NVME_CAP_NSSRC(cap) : 0; | 1757 | NVME_CAP_NSSRC(cap) : 0; |
| @@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) | |||
| 2268 | if (dev->max_hw_sectors) { | 2275 | if (dev->max_hw_sectors) { |
| 2269 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 2276 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
| 2270 | blk_queue_max_segments(ns->queue, | 2277 | blk_queue_max_segments(ns->queue, |
| 2271 | ((dev->max_hw_sectors << 9) / dev->page_size) + 1); | 2278 | (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); |
| 2272 | } | 2279 | } |
| 2273 | if (dev->stripe_size) | 2280 | if (dev->stripe_size) |
| 2274 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); | 2281 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); |
| @@ -2787,6 +2794,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) | |||
| 2787 | { | 2794 | { |
| 2788 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; | 2795 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; |
| 2789 | nvme_put_dq(dq); | 2796 | nvme_put_dq(dq); |
| 2797 | |||
| 2798 | spin_lock_irq(&nvmeq->q_lock); | ||
| 2799 | nvme_process_cq(nvmeq); | ||
| 2800 | spin_unlock_irq(&nvmeq->q_lock); | ||
| 2790 | } | 2801 | } |
| 2791 | 2802 | ||
| 2792 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, | 2803 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 540f077c37ea..02a7452bdf23 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
| @@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 440 | ret, pp->io); | 440 | ret, pp->io); |
| 441 | continue; | 441 | continue; |
| 442 | } | 442 | } |
| 443 | pp->io_base = pp->io->start; | ||
| 444 | break; | 443 | break; |
| 445 | case IORESOURCE_MEM: | 444 | case IORESOURCE_MEM: |
| 446 | pp->mem = win->res; | 445 | pp->mem = win->res; |
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 35457ecd8e70..163671a4f798 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c | |||
| @@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = { | |||
| 111 | .link_up = hisi_pcie_link_up, | 111 | .link_up = hisi_pcie_link_up, |
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | static int __init hisi_add_pcie_port(struct pcie_port *pp, | 114 | static int hisi_add_pcie_port(struct pcie_port *pp, |
| 115 | struct platform_device *pdev) | 115 | struct platform_device *pdev) |
| 116 | { | 116 | { |
| 117 | int ret; | 117 | int ret; |
| @@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp, | |||
| 139 | return 0; | 139 | return 0; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | static int __init hisi_pcie_probe(struct platform_device *pdev) | 142 | static int hisi_pcie_probe(struct platform_device *pdev) |
| 143 | { | 143 | { |
| 144 | struct hisi_pcie *hisi_pcie; | 144 | struct hisi_pcie *hisi_pcie; |
| 145 | struct pcie_port *pp; | 145 | struct pcie_port *pp; |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 92618686604c..eead54cd01b2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev, | |||
| 216 | if (ret) | 216 | if (ret) |
| 217 | return ret; | 217 | return ret; |
| 218 | 218 | ||
| 219 | if (node >= MAX_NUMNODES || !node_online(node)) | 219 | if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) |
| 220 | return -EINVAL; | ||
| 221 | |||
| 222 | if (node != NUMA_NO_NODE && !node_online(node)) | ||
| 220 | return -EINVAL; | 223 | return -EINVAL; |
| 221 | 224 | ||
| 222 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 225 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fd2f03fa53f3..d390fc1475ec 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) | |||
| 337 | } | 337 | } |
| 338 | #endif | 338 | #endif |
| 339 | 339 | ||
| 340 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); | ||
| 341 | |||
| 342 | #endif /* DRIVERS_PCI_H */ | 340 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e735c728e3b3..edb1984201e9 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev) | |||
| 1685 | { | 1685 | { |
| 1686 | struct device *bridge = pci_get_host_bridge_device(dev); | 1686 | struct device *bridge = pci_get_host_bridge_device(dev); |
| 1687 | 1687 | ||
| 1688 | if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { | 1688 | if (IS_ENABLED(CONFIG_OF) && |
| 1689 | if (bridge->parent) | 1689 | bridge->parent && bridge->parent->of_node) { |
| 1690 | of_dma_configure(&dev->dev, bridge->parent->of_node); | 1690 | of_dma_configure(&dev->dev, bridge->parent->of_node); |
| 1691 | } else if (has_acpi_companion(bridge)) { | 1691 | } else if (has_acpi_companion(bridge)) { |
| 1692 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); | 1692 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); |
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8b3130f22b42..9e03d158f411 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
| @@ -1478,6 +1478,8 @@ module_init(remoteproc_init); | |||
| 1478 | 1478 | ||
| 1479 | static void __exit remoteproc_exit(void) | 1479 | static void __exit remoteproc_exit(void) |
| 1480 | { | 1480 | { |
| 1481 | ida_destroy(&rproc_dev_index); | ||
| 1482 | |||
| 1481 | rproc_exit_debugfs(); | 1483 | rproc_exit_debugfs(); |
| 1482 | } | 1484 | } |
| 1483 | module_exit(remoteproc_exit); | 1485 | module_exit(remoteproc_exit); |
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 9d30809bb407..916af5096f57 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c | |||
| @@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf, | |||
| 156 | char buf[10]; | 156 | char buf[10]; |
| 157 | int ret; | 157 | int ret; |
| 158 | 158 | ||
| 159 | if (count > sizeof(buf)) | 159 | if (count < 1 || count > sizeof(buf)) |
| 160 | return count; | 160 | return count; |
| 161 | 161 | ||
| 162 | ret = copy_from_user(buf, user_buf, count); | 162 | ret = copy_from_user(buf, user_buf, count); |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 188006c55ce0..aa705bb4748c 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
| @@ -15,9 +15,6 @@ | |||
| 15 | #include <linux/i2c.h> | 15 | #include <linux/i2c.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/of_device.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | #include <linux/pm_wakeirq.h> | ||
| 21 | #include <linux/rtc/ds1307.h> | 18 | #include <linux/rtc/ds1307.h> |
| 22 | #include <linux/rtc.h> | 19 | #include <linux/rtc.h> |
| 23 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| @@ -117,7 +114,6 @@ struct ds1307 { | |||
| 117 | #define HAS_ALARM 1 /* bit 1 == irq claimed */ | 114 | #define HAS_ALARM 1 /* bit 1 == irq claimed */ |
| 118 | struct i2c_client *client; | 115 | struct i2c_client *client; |
| 119 | struct rtc_device *rtc; | 116 | struct rtc_device *rtc; |
| 120 | int wakeirq; | ||
| 121 | s32 (*read_block_data)(const struct i2c_client *client, u8 command, | 117 | s32 (*read_block_data)(const struct i2c_client *client, u8 command, |
| 122 | u8 length, u8 *values); | 118 | u8 length, u8 *values); |
| 123 | s32 (*write_block_data)(const struct i2c_client *client, u8 command, | 119 | s32 (*write_block_data)(const struct i2c_client *client, u8 command, |
| @@ -1138,7 +1134,10 @@ read_rtc: | |||
| 1138 | bin2bcd(tmp)); | 1134 | bin2bcd(tmp)); |
| 1139 | } | 1135 | } |
| 1140 | 1136 | ||
| 1141 | device_set_wakeup_capable(&client->dev, want_irq); | 1137 | if (want_irq) { |
| 1138 | device_set_wakeup_capable(&client->dev, true); | ||
| 1139 | set_bit(HAS_ALARM, &ds1307->flags); | ||
| 1140 | } | ||
| 1142 | ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, | 1141 | ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, |
| 1143 | rtc_ops, THIS_MODULE); | 1142 | rtc_ops, THIS_MODULE); |
| 1144 | if (IS_ERR(ds1307->rtc)) { | 1143 | if (IS_ERR(ds1307->rtc)) { |
| @@ -1146,43 +1145,19 @@ read_rtc: | |||
| 1146 | } | 1145 | } |
| 1147 | 1146 | ||
| 1148 | if (want_irq) { | 1147 | if (want_irq) { |
| 1149 | struct device_node *node = client->dev.of_node; | ||
| 1150 | |||
| 1151 | err = devm_request_threaded_irq(&client->dev, | 1148 | err = devm_request_threaded_irq(&client->dev, |
| 1152 | client->irq, NULL, irq_handler, | 1149 | client->irq, NULL, irq_handler, |
| 1153 | IRQF_SHARED | IRQF_ONESHOT, | 1150 | IRQF_SHARED | IRQF_ONESHOT, |
| 1154 | ds1307->rtc->name, client); | 1151 | ds1307->rtc->name, client); |
| 1155 | if (err) { | 1152 | if (err) { |
| 1156 | client->irq = 0; | 1153 | client->irq = 0; |
| 1154 | device_set_wakeup_capable(&client->dev, false); | ||
| 1155 | clear_bit(HAS_ALARM, &ds1307->flags); | ||
| 1157 | dev_err(&client->dev, "unable to request IRQ!\n"); | 1156 | dev_err(&client->dev, "unable to request IRQ!\n"); |
| 1158 | goto no_irq; | 1157 | } else |
| 1159 | } | 1158 | dev_dbg(&client->dev, "got IRQ %d\n", client->irq); |
| 1160 | |||
| 1161 | set_bit(HAS_ALARM, &ds1307->flags); | ||
| 1162 | dev_dbg(&client->dev, "got IRQ %d\n", client->irq); | ||
| 1163 | |||
| 1164 | /* Currently supported by OF code only! */ | ||
| 1165 | if (!node) | ||
| 1166 | goto no_irq; | ||
| 1167 | |||
| 1168 | err = of_irq_get(node, 1); | ||
| 1169 | if (err <= 0) { | ||
| 1170 | if (err == -EPROBE_DEFER) | ||
| 1171 | goto exit; | ||
| 1172 | goto no_irq; | ||
| 1173 | } | ||
| 1174 | ds1307->wakeirq = err; | ||
| 1175 | |||
| 1176 | err = dev_pm_set_dedicated_wake_irq(&client->dev, | ||
| 1177 | ds1307->wakeirq); | ||
| 1178 | if (err) { | ||
| 1179 | dev_err(&client->dev, "unable to setup wakeIRQ %d!\n", | ||
| 1180 | err); | ||
| 1181 | goto exit; | ||
| 1182 | } | ||
| 1183 | } | 1159 | } |
| 1184 | 1160 | ||
| 1185 | no_irq: | ||
| 1186 | if (chip->nvram_size) { | 1161 | if (chip->nvram_size) { |
| 1187 | 1162 | ||
| 1188 | ds1307->nvram = devm_kzalloc(&client->dev, | 1163 | ds1307->nvram = devm_kzalloc(&client->dev, |
| @@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client) | |||
| 1226 | { | 1201 | { |
| 1227 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | 1202 | struct ds1307 *ds1307 = i2c_get_clientdata(client); |
| 1228 | 1203 | ||
| 1229 | if (ds1307->wakeirq) | ||
| 1230 | dev_pm_clear_wake_irq(&client->dev); | ||
| 1231 | |||
| 1232 | if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) | 1204 | if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) |
| 1233 | sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); | 1205 | sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); |
| 1234 | 1206 | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 548a18916a31..a831d18596a5 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void) | |||
| 1080 | free_page((unsigned long)sei_page); | 1080 | free_page((unsigned long)sei_page); |
| 1081 | } | 1081 | } |
| 1082 | 1082 | ||
| 1083 | int chsc_enable_facility(int operation_code) | 1083 | int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code) |
| 1084 | { | 1084 | { |
| 1085 | unsigned long flags; | ||
| 1086 | int ret; | 1085 | int ret; |
| 1087 | struct { | ||
| 1088 | struct chsc_header request; | ||
| 1089 | u8 reserved1:4; | ||
| 1090 | u8 format:4; | ||
| 1091 | u8 reserved2; | ||
| 1092 | u16 operation_code; | ||
| 1093 | u32 reserved3; | ||
| 1094 | u32 reserved4; | ||
| 1095 | u32 operation_data_area[252]; | ||
| 1096 | struct chsc_header response; | ||
| 1097 | u32 reserved5:4; | ||
| 1098 | u32 format2:4; | ||
| 1099 | u32 reserved6:24; | ||
| 1100 | } __attribute__ ((packed)) *sda_area; | ||
| 1101 | 1086 | ||
| 1102 | spin_lock_irqsave(&chsc_page_lock, flags); | ||
| 1103 | memset(chsc_page, 0, PAGE_SIZE); | ||
| 1104 | sda_area = chsc_page; | ||
| 1105 | sda_area->request.length = 0x0400; | 1087 | sda_area->request.length = 0x0400; |
| 1106 | sda_area->request.code = 0x0031; | 1088 | sda_area->request.code = 0x0031; |
| 1107 | sda_area->operation_code = operation_code; | 1089 | sda_area->operation_code = operation_code; |
| @@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code) | |||
| 1119 | default: | 1101 | default: |
| 1120 | ret = chsc_error_from_response(sda_area->response.code); | 1102 | ret = chsc_error_from_response(sda_area->response.code); |
| 1121 | } | 1103 | } |
| 1104 | out: | ||
| 1105 | return ret; | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | int chsc_enable_facility(int operation_code) | ||
| 1109 | { | ||
| 1110 | struct chsc_sda_area *sda_area; | ||
| 1111 | unsigned long flags; | ||
| 1112 | int ret; | ||
| 1113 | |||
| 1114 | spin_lock_irqsave(&chsc_page_lock, flags); | ||
| 1115 | memset(chsc_page, 0, PAGE_SIZE); | ||
| 1116 | sda_area = chsc_page; | ||
| 1117 | |||
| 1118 | ret = __chsc_enable_facility(sda_area, operation_code); | ||
| 1122 | if (ret != 0) | 1119 | if (ret != 0) |
| 1123 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 1120 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
| 1124 | operation_code, sda_area->response.code); | 1121 | operation_code, sda_area->response.code); |
| 1125 | out: | 1122 | |
| 1126 | spin_unlock_irqrestore(&chsc_page_lock, flags); | 1123 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
| 1127 | return ret; | 1124 | return ret; |
| 1128 | } | 1125 | } |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 76c9b50700b2..0de134c3a204 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
| @@ -115,6 +115,20 @@ struct chsc_scpd { | |||
| 115 | u8 data[PAGE_SIZE - 20]; | 115 | u8 data[PAGE_SIZE - 20]; |
| 116 | } __attribute__ ((packed)); | 116 | } __attribute__ ((packed)); |
| 117 | 117 | ||
| 118 | struct chsc_sda_area { | ||
| 119 | struct chsc_header request; | ||
| 120 | u8 :4; | ||
| 121 | u8 format:4; | ||
| 122 | u8 :8; | ||
| 123 | u16 operation_code; | ||
| 124 | u32 :32; | ||
| 125 | u32 :32; | ||
| 126 | u32 operation_data_area[252]; | ||
| 127 | struct chsc_header response; | ||
| 128 | u32 :4; | ||
| 129 | u32 format2:4; | ||
| 130 | u32 :24; | ||
| 131 | } __packed __aligned(PAGE_SIZE); | ||
| 118 | 132 | ||
| 119 | extern int chsc_get_ssd_info(struct subchannel_id schid, | 133 | extern int chsc_get_ssd_info(struct subchannel_id schid, |
| 120 | struct chsc_ssd_info *ssd); | 134 | struct chsc_ssd_info *ssd); |
| @@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void); | |||
| 122 | extern int chsc_init(void); | 136 | extern int chsc_init(void); |
| 123 | extern void chsc_init_cleanup(void); | 137 | extern void chsc_init_cleanup(void); |
| 124 | 138 | ||
| 139 | int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code); | ||
| 125 | extern int chsc_enable_facility(int); | 140 | extern int chsc_enable_facility(int); |
| 126 | struct channel_subsystem; | 141 | struct channel_subsystem; |
| 127 | extern int chsc_secm(struct channel_subsystem *, int); | 142 | extern int chsc_secm(struct channel_subsystem *, int); |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b5620e818d6b..690b8547e828 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) | |||
| 925 | 925 | ||
| 926 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | 926 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) |
| 927 | { | 927 | { |
| 928 | static struct chsc_sda_area sda_area __initdata; | ||
| 928 | struct subchannel_id schid; | 929 | struct subchannel_id schid; |
| 929 | struct schib schib; | 930 | struct schib schib; |
| 930 | 931 | ||
| 931 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; | 932 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
| 932 | if (!schid.one) | 933 | if (!schid.one) |
| 933 | return -ENODEV; | 934 | return -ENODEV; |
| 935 | |||
| 936 | if (schid.ssid) { | ||
| 937 | /* | ||
| 938 | * Firmware should have already enabled MSS but whoever started | ||
| 939 | * the kernel might have initiated a channel subsystem reset. | ||
| 940 | * Ensure that MSS is enabled. | ||
| 941 | */ | ||
| 942 | memset(&sda_area, 0, sizeof(sda_area)); | ||
| 943 | if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS)) | ||
| 944 | return -ENODEV; | ||
| 945 | } | ||
| 934 | if (stsch_err(schid, &schib)) | 946 | if (stsch_err(schid, &schib)) |
| 935 | return -ENODEV; | 947 | return -ENODEV; |
| 936 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | 948 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) |
| 937 | return -ENODEV; | 949 | return -ENODEV; |
| 938 | if (!schib.pmcw.dnv) | 950 | if (!schib.pmcw.dnv) |
| 939 | return -ENODEV; | 951 | return -ENODEV; |
| 952 | |||
| 953 | iplinfo->ssid = schid.ssid; | ||
| 940 | iplinfo->devno = schib.pmcw.dev; | 954 | iplinfo->devno = schib.pmcw.dev; |
| 941 | iplinfo->is_qdio = schib.pmcw.qf; | 955 | iplinfo->is_qdio = schib.pmcw.qf; |
| 942 | return 0; | 956 | return 0; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2ee3053bdc12..489e703dc82d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | |||
| 702 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; | 702 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; |
| 703 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; | 703 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; |
| 704 | } else { | 704 | } else { |
| 705 | #ifdef CONFIG_SMP | ||
| 706 | css->global_pgid.pgid_high.cpu_addr = stap(); | 705 | css->global_pgid.pgid_high.cpu_addr = stap(); |
| 707 | #else | ||
| 708 | css->global_pgid.pgid_high.cpu_addr = 0; | ||
| 709 | #endif | ||
| 710 | } | 706 | } |
| 711 | get_cpu_id(&cpu_id); | 707 | get_cpu_id(&cpu_id); |
| 712 | css->global_pgid.cpu_id = cpu_id.ident; | 708 | css->global_pgid.cpu_id = cpu_id.ident; |
| 713 | css->global_pgid.cpu_model = cpu_id.machine; | 709 | css->global_pgid.cpu_model = cpu_id.machine; |
| 714 | css->global_pgid.tod_high = tod_high; | 710 | css->global_pgid.tod_high = tod_high; |
| 715 | |||
| 716 | } | 711 | } |
| 717 | 712 | ||
| 718 | static void | 713 | static void |
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 57f710b3c8a4..b8ab18676e69 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | ap-objs := ap_bus.o | 5 | ap-objs := ap_bus.o |
| 6 | obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o | 6 | # zcrypt_api depends on ap |
| 7 | obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o | 7 | obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o |
| 8 | # msgtype* depend on zcrypt_api | ||
| 8 | obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o | 9 | obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o |
| 10 | # adapter drivers depend on ap, zcrypt_api and msgtype* | ||
| 11 | obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o | ||
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9cb3dfbcaddb..61f768518a34 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
| @@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL; | |||
| 74 | static struct ap_config_info *ap_configuration; | 74 | static struct ap_config_info *ap_configuration; |
| 75 | static DEFINE_SPINLOCK(ap_device_list_lock); | 75 | static DEFINE_SPINLOCK(ap_device_list_lock); |
| 76 | static LIST_HEAD(ap_device_list); | 76 | static LIST_HEAD(ap_device_list); |
| 77 | static bool initialised; | ||
| 77 | 78 | ||
| 78 | /* | 79 | /* |
| 79 | * Workqueue timer for bus rescan. | 80 | * Workqueue timer for bus rescan. |
| @@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, | |||
| 1384 | { | 1385 | { |
| 1385 | struct device_driver *drv = &ap_drv->driver; | 1386 | struct device_driver *drv = &ap_drv->driver; |
| 1386 | 1387 | ||
| 1388 | if (!initialised) | ||
| 1389 | return -ENODEV; | ||
| 1390 | |||
| 1387 | drv->bus = &ap_bus_type; | 1391 | drv->bus = &ap_bus_type; |
| 1388 | drv->probe = ap_device_probe; | 1392 | drv->probe = ap_device_probe; |
| 1389 | drv->remove = ap_device_remove; | 1393 | drv->remove = ap_device_remove; |
| @@ -1808,6 +1812,7 @@ int __init ap_module_init(void) | |||
| 1808 | goto out_pm; | 1812 | goto out_pm; |
| 1809 | 1813 | ||
| 1810 | queue_work(system_long_wq, &ap_scan_work); | 1814 | queue_work(system_long_wq, &ap_scan_work); |
| 1815 | initialised = true; | ||
| 1811 | 1816 | ||
| 1812 | return 0; | 1817 | return 0; |
| 1813 | 1818 | ||
| @@ -1837,6 +1842,7 @@ void ap_module_exit(void) | |||
| 1837 | { | 1842 | { |
| 1838 | int i; | 1843 | int i; |
| 1839 | 1844 | ||
| 1845 | initialised = false; | ||
| 1840 | ap_reset_domain(); | 1846 | ap_reset_domain(); |
| 1841 | ap_poll_thread_stop(); | 1847 | ap_poll_thread_stop(); |
| 1842 | del_timer_sync(&ap_config_timer); | 1848 | del_timer_sync(&ap_config_timer); |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index a9603ebbc1f8..9f8fa42c062c 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
| @@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister); | |||
| 317 | 317 | ||
| 318 | void zcrypt_msgtype_register(struct zcrypt_ops *zops) | 318 | void zcrypt_msgtype_register(struct zcrypt_ops *zops) |
| 319 | { | 319 | { |
| 320 | if (zops->owner) { | 320 | spin_lock_bh(&zcrypt_ops_list_lock); |
| 321 | spin_lock_bh(&zcrypt_ops_list_lock); | 321 | list_add_tail(&zops->list, &zcrypt_ops_list); |
| 322 | list_add_tail(&zops->list, &zcrypt_ops_list); | 322 | spin_unlock_bh(&zcrypt_ops_list_lock); |
| 323 | spin_unlock_bh(&zcrypt_ops_list_lock); | ||
| 324 | } | ||
| 325 | } | 323 | } |
| 326 | EXPORT_SYMBOL(zcrypt_msgtype_register); | 324 | EXPORT_SYMBOL(zcrypt_msgtype_register); |
| 327 | 325 | ||
| @@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) | |||
| 342 | spin_lock_bh(&zcrypt_ops_list_lock); | 340 | spin_lock_bh(&zcrypt_ops_list_lock); |
| 343 | list_for_each_entry(zops, &zcrypt_ops_list, list) { | 341 | list_for_each_entry(zops, &zcrypt_ops_list, list) { |
| 344 | if ((zops->variant == variant) && | 342 | if ((zops->variant == variant) && |
| 345 | (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { | 343 | (!strncmp(zops->name, name, sizeof(zops->name)))) { |
| 346 | found = 1; | 344 | found = 1; |
| 347 | break; | 345 | break; |
| 348 | } | 346 | } |
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 750876891931..38618f05ad92 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h | |||
| @@ -96,6 +96,7 @@ struct zcrypt_ops { | |||
| 96 | struct list_head list; /* zcrypt ops list. */ | 96 | struct list_head list; /* zcrypt ops list. */ |
| 97 | struct module *owner; | 97 | struct module *owner; |
| 98 | int variant; | 98 | int variant; |
| 99 | char name[128]; | ||
| 99 | }; | 100 | }; |
| 100 | 101 | ||
| 101 | struct zcrypt_device { | 102 | struct zcrypt_device { |
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 71ceee9137a8..74edf2934e7c 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c | |||
| @@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = { | |||
| 513 | .rsa_modexpo = zcrypt_cex2a_modexpo, | 513 | .rsa_modexpo = zcrypt_cex2a_modexpo, |
| 514 | .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, | 514 | .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, |
| 515 | .owner = THIS_MODULE, | 515 | .owner = THIS_MODULE, |
| 516 | .name = MSGTYPE50_NAME, | ||
| 516 | .variant = MSGTYPE50_VARIANT_DEFAULT, | 517 | .variant = MSGTYPE50_VARIANT_DEFAULT, |
| 517 | }; | 518 | }; |
| 518 | 519 | ||
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 74762214193b..9a2dd472c1cc 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c | |||
| @@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, | |||
| 1119 | */ | 1119 | */ |
| 1120 | static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { | 1120 | static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { |
| 1121 | .owner = THIS_MODULE, | 1121 | .owner = THIS_MODULE, |
| 1122 | .name = MSGTYPE06_NAME, | ||
| 1122 | .variant = MSGTYPE06_VARIANT_NORNG, | 1123 | .variant = MSGTYPE06_VARIANT_NORNG, |
| 1123 | .rsa_modexpo = zcrypt_msgtype6_modexpo, | 1124 | .rsa_modexpo = zcrypt_msgtype6_modexpo, |
| 1124 | .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, | 1125 | .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, |
| @@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { | |||
| 1127 | 1128 | ||
| 1128 | static struct zcrypt_ops zcrypt_msgtype6_ops = { | 1129 | static struct zcrypt_ops zcrypt_msgtype6_ops = { |
| 1129 | .owner = THIS_MODULE, | 1130 | .owner = THIS_MODULE, |
| 1131 | .name = MSGTYPE06_NAME, | ||
| 1130 | .variant = MSGTYPE06_VARIANT_DEFAULT, | 1132 | .variant = MSGTYPE06_VARIANT_DEFAULT, |
| 1131 | .rsa_modexpo = zcrypt_msgtype6_modexpo, | 1133 | .rsa_modexpo = zcrypt_msgtype6_modexpo, |
| 1132 | .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, | 1134 | .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, |
| @@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = { | |||
| 1136 | 1138 | ||
| 1137 | static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { | 1139 | static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { |
| 1138 | .owner = THIS_MODULE, | 1140 | .owner = THIS_MODULE, |
| 1141 | .name = MSGTYPE06_NAME, | ||
| 1139 | .variant = MSGTYPE06_VARIANT_EP11, | 1142 | .variant = MSGTYPE06_VARIANT_EP11, |
| 1140 | .rsa_modexpo = NULL, | 1143 | .rsa_modexpo = NULL, |
| 1141 | .rsa_modexpo_crt = NULL, | 1144 | .rsa_modexpo_crt = NULL, |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3ba2e9564b9a..81af294f15a7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, | |||
| 902 | return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); | 902 | return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); |
| 903 | } | 903 | } |
| 904 | 904 | ||
| 905 | CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); | 905 | CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); |
| 906 | CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); | 906 | CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); |
| 907 | CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); | 907 | CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); |
| 908 | 908 | ||
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 25abd4eb7d10..91a003011acf 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
| @@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = { | |||
| 34 | 34 | ||
| 35 | static int __init sh_pm_runtime_init(void) | 35 | static int __init sh_pm_runtime_init(void) |
| 36 | { | 36 | { |
| 37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | 37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { |
| 38 | if (!of_find_compatible_node(NULL, NULL, | 38 | if (!of_find_compatible_node(NULL, NULL, |
| 39 | "renesas,cpg-mstp-clocks")) | 39 | "renesas,cpg-mstp-clocks")) |
| 40 | return 0; | 40 | return 0; |
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 9d5068248aa0..0a4ea809a61b 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig | |||
| @@ -23,6 +23,7 @@ config MTK_PMIC_WRAP | |||
| 23 | config MTK_SCPSYS | 23 | config MTK_SCPSYS |
| 24 | bool "MediaTek SCPSYS Support" | 24 | bool "MediaTek SCPSYS Support" |
| 25 | depends on ARCH_MEDIATEK || COMPILE_TEST | 25 | depends on ARCH_MEDIATEK || COMPILE_TEST |
| 26 | default ARM64 && ARCH_MEDIATEK | ||
| 26 | select REGMAP | 27 | select REGMAP |
| 27 | select MTK_INFRACFG | 28 | select MTK_INFRACFG |
| 28 | select PM_GENERIC_DOMAINS if PM | 29 | select PM_GENERIC_DOMAINS if PM |
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index f3a0b6a4b54e..8c03a80b482d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c | |||
| @@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev) | |||
| 1179 | 1179 | ||
| 1180 | block++; | 1180 | block++; |
| 1181 | if (!block->size) | 1181 | if (!block->size) |
| 1182 | return 0; | 1182 | continue; |
| 1183 | 1183 | ||
| 1184 | dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", | 1184 | dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", |
| 1185 | block->phys, block->virt, block->size); | 1185 | block->phys, block->virt, block->size); |
| @@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev, | |||
| 1519 | 1519 | ||
| 1520 | for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { | 1520 | for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { |
| 1521 | if (knav_acc_firmwares[i]) { | 1521 | if (knav_acc_firmwares[i]) { |
| 1522 | ret = request_firmware(&fw, | 1522 | ret = request_firmware_direct(&fw, |
| 1523 | knav_acc_firmwares[i], | 1523 | knav_acc_firmwares[i], |
| 1524 | kdev->dev); | 1524 | kdev->dev); |
| 1525 | if (!ret) { | 1525 | if (!ret) { |
| 1526 | found = true; | 1526 | found = true; |
| 1527 | break; | 1527 | break; |
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 6d5b38d69578..9d7f0004d2d7 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig | |||
| @@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig" | |||
| 18 | source "drivers/staging/iio/trigger/Kconfig" | 18 | source "drivers/staging/iio/trigger/Kconfig" |
| 19 | 19 | ||
| 20 | config IIO_DUMMY_EVGEN | 20 | config IIO_DUMMY_EVGEN |
| 21 | tristate | 21 | tristate |
| 22 | select IRQ_WORK | ||
| 22 | 23 | ||
| 23 | config IIO_SIMPLE_DUMMY | 24 | config IIO_SIMPLE_DUMMY |
| 24 | tristate "An example driver with no hardware requirements" | 25 | tristate "An example driver with no hardware requirements" |
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c index d11c54b72186..b51f237cd817 100644 --- a/drivers/staging/iio/adc/lpc32xx_adc.c +++ b/drivers/staging/iio/adc/lpc32xx_adc.c | |||
| @@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, | |||
| 76 | 76 | ||
| 77 | if (mask == IIO_CHAN_INFO_RAW) { | 77 | if (mask == IIO_CHAN_INFO_RAW) { |
| 78 | mutex_lock(&indio_dev->mlock); | 78 | mutex_lock(&indio_dev->mlock); |
| 79 | clk_enable(info->clk); | 79 | clk_prepare_enable(info->clk); |
| 80 | /* Measurement setup */ | 80 | /* Measurement setup */ |
| 81 | __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, | 81 | __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, |
| 82 | LPC32XX_ADC_SELECT(info->adc_base)); | 82 | LPC32XX_ADC_SELECT(info->adc_base)); |
| @@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, | |||
| 84 | __raw_writel(AD_PDN_CTRL | AD_STROBE, | 84 | __raw_writel(AD_PDN_CTRL | AD_STROBE, |
| 85 | LPC32XX_ADC_CTRL(info->adc_base)); | 85 | LPC32XX_ADC_CTRL(info->adc_base)); |
| 86 | wait_for_completion(&info->completion); /* set by ISR */ | 86 | wait_for_completion(&info->completion); /* set by ISR */ |
| 87 | clk_disable(info->clk); | 87 | clk_disable_unprepare(info->clk); |
| 88 | *val = info->value; | 88 | *val = info->value; |
| 89 | mutex_unlock(&indio_dev->mlock); | 89 | mutex_unlock(&indio_dev->mlock); |
| 90 | 90 | ||
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c index e10c6ffa698a..9568bdb6319b 100644 --- a/drivers/staging/wilc1000/coreconfigurator.c +++ b/drivers/staging/wilc1000/coreconfigurator.c | |||
| @@ -13,12 +13,8 @@ | |||
| 13 | #include "wilc_wlan.h" | 13 | #include "wilc_wlan.h" |
| 14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/etherdevice.h> | ||
| 17 | #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ | 16 | #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ |
| 18 | BEACON_INTERVAL_LEN + CAP_INFO_LEN) | 17 | BEACON_INTERVAL_LEN + CAP_INFO_LEN) |
| 19 | #define ADDR1 4 | ||
| 20 | #define ADDR2 10 | ||
| 21 | #define ADDR3 16 | ||
| 22 | 18 | ||
| 23 | /* Basic Frame Type Codes (2-bit) */ | 19 | /* Basic Frame Type Codes (2-bit) */ |
| 24 | enum basic_frame_type { | 20 | enum basic_frame_type { |
| @@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header) | |||
| 175 | return ((header[1] & 0x02) >> 1); | 171 | return ((header[1] & 0x02) >> 1); |
| 176 | } | 172 | } |
| 177 | 173 | ||
| 174 | /* This function extracts the MAC Address in 'address1' field of the MAC */ | ||
| 175 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 176 | static inline void get_address1(u8 *pu8msa, u8 *addr) | ||
| 177 | { | ||
| 178 | memcpy(addr, pu8msa + 4, 6); | ||
| 179 | } | ||
| 180 | |||
| 181 | /* This function extracts the MAC Address in 'address2' field of the MAC */ | ||
| 182 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 183 | static inline void get_address2(u8 *pu8msa, u8 *addr) | ||
| 184 | { | ||
| 185 | memcpy(addr, pu8msa + 10, 6); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* This function extracts the MAC Address in 'address3' field of the MAC */ | ||
| 189 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 190 | static inline void get_address3(u8 *pu8msa, u8 *addr) | ||
| 191 | { | ||
| 192 | memcpy(addr, pu8msa + 16, 6); | ||
| 193 | } | ||
| 194 | |||
| 178 | /* This function extracts the BSSID from the incoming WLAN packet based on */ | 195 | /* This function extracts the BSSID from the incoming WLAN packet based on */ |
| 179 | /* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ | 196 | /* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */ |
| 180 | /* variable. */ | 197 | /* variable. */ |
| 181 | static inline void get_BSSID(u8 *data, u8 *bssid) | 198 | static inline void get_BSSID(u8 *data, u8 *bssid) |
| 182 | { | 199 | { |
| 183 | if (get_from_ds(data) == 1) | 200 | if (get_from_ds(data) == 1) |
| 184 | /* | 201 | get_address2(data, bssid); |
| 185 | * Extract the MAC Address in 'address2' field of the MAC | ||
| 186 | * header and update the MAC Address in the allocated 'data' | ||
| 187 | * variable. | ||
| 188 | */ | ||
| 189 | ether_addr_copy(data, bssid + ADDR2); | ||
| 190 | else if (get_to_ds(data) == 1) | 202 | else if (get_to_ds(data) == 1) |
| 191 | /* | 203 | get_address1(data, bssid); |
| 192 | * Extract the MAC Address in 'address1' field of the MAC | ||
| 193 | * header and update the MAC Address in the allocated 'data' | ||
| 194 | * variable. | ||
| 195 | */ | ||
| 196 | ether_addr_copy(data, bssid + ADDR1); | ||
| 197 | else | 204 | else |
| 198 | /* | 205 | get_address3(data, bssid); |
| 199 | * Extract the MAC Address in 'address3' field of the MAC | ||
| 200 | * header and update the MAC Address in the allocated 'data' | ||
| 201 | * variable. | ||
| 202 | */ | ||
| 203 | ether_addr_copy(data, bssid + ADDR3); | ||
| 204 | } | 206 | } |
| 205 | 207 | ||
| 206 | /* This function extracts the SSID from a beacon/probe response frame */ | 208 | /* This function extracts the SSID from a beacon/probe response frame */ |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 342a07c58d89..72204fbf2bb1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -4074,6 +4074,17 @@ reject: | |||
| 4074 | return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); | 4074 | return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); |
| 4075 | } | 4075 | } |
| 4076 | 4076 | ||
| 4077 | static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) | ||
| 4078 | { | ||
| 4079 | bool ret; | ||
| 4080 | |||
| 4081 | spin_lock_bh(&conn->state_lock); | ||
| 4082 | ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); | ||
| 4083 | spin_unlock_bh(&conn->state_lock); | ||
| 4084 | |||
| 4085 | return ret; | ||
| 4086 | } | ||
| 4087 | |||
| 4077 | int iscsi_target_rx_thread(void *arg) | 4088 | int iscsi_target_rx_thread(void *arg) |
| 4078 | { | 4089 | { |
| 4079 | int ret, rc; | 4090 | int ret, rc; |
| @@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4091 | * incoming iscsi/tcp socket I/O, and/or failing the connection. | 4102 | * incoming iscsi/tcp socket I/O, and/or failing the connection. |
| 4092 | */ | 4103 | */ |
| 4093 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | 4104 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); |
| 4094 | if (rc < 0) | 4105 | if (rc < 0 || iscsi_target_check_conn_state(conn)) |
| 4095 | return 0; | 4106 | return 0; |
| 4096 | 4107 | ||
| 4097 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { | 4108 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 5c964c09c89f..9fc9117d0f22 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
| @@ -388,6 +388,7 @@ err: | |||
| 388 | if (login->login_complete) { | 388 | if (login->login_complete) { |
| 389 | if (conn->rx_thread && conn->rx_thread_active) { | 389 | if (conn->rx_thread && conn->rx_thread_active) { |
| 390 | send_sig(SIGINT, conn->rx_thread, 1); | 390 | send_sig(SIGINT, conn->rx_thread, 1); |
| 391 | complete(&conn->rx_login_comp); | ||
| 391 | kthread_stop(conn->rx_thread); | 392 | kthread_stop(conn->rx_thread); |
| 392 | } | 393 | } |
| 393 | if (conn->tx_thread && conn->tx_thread_active) { | 394 | if (conn->tx_thread && conn->tx_thread_active) { |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 51d1734d5390..2cbea2af7cd0 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
| @@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) | |||
| 208 | if (!pl) { | 208 | if (!pl) { |
| 209 | pr_err("Unable to allocate memory for" | 209 | pr_err("Unable to allocate memory for" |
| 210 | " struct iscsi_param_list.\n"); | 210 | " struct iscsi_param_list.\n"); |
| 211 | return -1 ; | 211 | return -ENOMEM; |
| 212 | } | 212 | } |
| 213 | INIT_LIST_HEAD(&pl->param_list); | 213 | INIT_LIST_HEAD(&pl->param_list); |
| 214 | INIT_LIST_HEAD(&pl->extra_response_list); | 214 | INIT_LIST_HEAD(&pl->extra_response_list); |
| @@ -578,7 +578,7 @@ int iscsi_copy_param_list( | |||
| 578 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); | 578 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); |
| 579 | if (!param_list) { | 579 | if (!param_list) { |
| 580 | pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); | 580 | pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); |
| 581 | return -1; | 581 | return -ENOMEM; |
| 582 | } | 582 | } |
| 583 | INIT_LIST_HEAD(¶m_list->param_list); | 583 | INIT_LIST_HEAD(¶m_list->param_list); |
| 584 | INIT_LIST_HEAD(¶m_list->extra_response_list); | 584 | INIT_LIST_HEAD(¶m_list->extra_response_list); |
| @@ -629,7 +629,7 @@ int iscsi_copy_param_list( | |||
| 629 | 629 | ||
| 630 | err_out: | 630 | err_out: |
| 631 | iscsi_release_param_list(param_list); | 631 | iscsi_release_param_list(param_list); |
| 632 | return -1; | 632 | return -ENOMEM; |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) | 635 | static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) |
| @@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response( | |||
| 729 | if (!extra_response) { | 729 | if (!extra_response) { |
| 730 | pr_err("Unable to allocate memory for" | 730 | pr_err("Unable to allocate memory for" |
| 731 | " struct iscsi_extra_response.\n"); | 731 | " struct iscsi_extra_response.\n"); |
| 732 | return -1; | 732 | return -ENOMEM; |
| 733 | } | 733 | } |
| 734 | INIT_LIST_HEAD(&extra_response->er_list); | 734 | INIT_LIST_HEAD(&extra_response->er_list); |
| 735 | 735 | ||
| @@ -1370,7 +1370,7 @@ int iscsi_decode_text_input( | |||
| 1370 | tmpbuf = kzalloc(length + 1, GFP_KERNEL); | 1370 | tmpbuf = kzalloc(length + 1, GFP_KERNEL); |
| 1371 | if (!tmpbuf) { | 1371 | if (!tmpbuf) { |
| 1372 | pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); | 1372 | pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); |
| 1373 | return -1; | 1373 | return -ENOMEM; |
| 1374 | } | 1374 | } |
| 1375 | 1375 | ||
| 1376 | memcpy(tmpbuf, textbuf, length); | 1376 | memcpy(tmpbuf, textbuf, length); |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 0b4b2a67d9f9..98698d875742 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
| @@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o | |||
| 371 | return 0; | 371 | return 0; |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) | 374 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, |
| 375 | int *post_ret) | ||
| 375 | { | 376 | { |
| 376 | unsigned char *buf, *addr; | 377 | unsigned char *buf, *addr; |
| 377 | struct scatterlist *sg; | 378 | struct scatterlist *sg; |
| @@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd) | |||
| 437 | cmd->data_direction); | 438 | cmd->data_direction); |
| 438 | } | 439 | } |
| 439 | 440 | ||
| 440 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) | 441 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, |
| 442 | int *post_ret) | ||
| 441 | { | 443 | { |
| 442 | struct se_device *dev = cmd->se_dev; | 444 | struct se_device *dev = cmd->se_dev; |
| 443 | 445 | ||
| @@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) | |||
| 447 | * sent to the backend driver. | 449 | * sent to the backend driver. |
| 448 | */ | 450 | */ |
| 449 | spin_lock_irq(&cmd->t_state_lock); | 451 | spin_lock_irq(&cmd->t_state_lock); |
| 450 | if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) | 452 | if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { |
| 451 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; | 453 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; |
| 454 | *post_ret = 1; | ||
| 455 | } | ||
| 452 | spin_unlock_irq(&cmd->t_state_lock); | 456 | spin_unlock_irq(&cmd->t_state_lock); |
| 453 | 457 | ||
| 454 | /* | 458 | /* |
| @@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) | |||
| 460 | return TCM_NO_SENSE; | 464 | return TCM_NO_SENSE; |
| 461 | } | 465 | } |
| 462 | 466 | ||
| 463 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) | 467 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, |
| 468 | int *post_ret) | ||
| 464 | { | 469 | { |
| 465 | struct se_device *dev = cmd->se_dev; | 470 | struct se_device *dev = cmd->se_dev; |
| 466 | struct scatterlist *write_sg = NULL, *sg; | 471 | struct scatterlist *write_sg = NULL, *sg; |
| @@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes | |||
| 556 | 561 | ||
| 557 | if (block_size < PAGE_SIZE) { | 562 | if (block_size < PAGE_SIZE) { |
| 558 | sg_set_page(&write_sg[i], m.page, block_size, | 563 | sg_set_page(&write_sg[i], m.page, block_size, |
| 559 | block_size); | 564 | m.piter.sg->offset + block_size); |
| 560 | } else { | 565 | } else { |
| 561 | sg_miter_next(&m); | 566 | sg_miter_next(&m); |
| 562 | sg_set_page(&write_sg[i], m.page, block_size, | 567 | sg_set_page(&write_sg[i], m.page, block_size, |
| 563 | 0); | 568 | m.piter.sg->offset); |
| 564 | } | 569 | } |
| 565 | len -= block_size; | 570 | len -= block_size; |
| 566 | i++; | 571 | i++; |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 273c72b2b83d..81a6b3e07687 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
| @@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) | |||
| 246 | char str[sizeof(dev->t10_wwn.model)+1]; | 246 | char str[sizeof(dev->t10_wwn.model)+1]; |
| 247 | 247 | ||
| 248 | /* scsiLuProductId */ | 248 | /* scsiLuProductId */ |
| 249 | for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) | 249 | for (i = 0; i < sizeof(dev->t10_wwn.model); i++) |
| 250 | str[i] = ISPRINT(dev->t10_wwn.model[i]) ? | 250 | str[i] = ISPRINT(dev->t10_wwn.model[i]) ? |
| 251 | dev->t10_wwn.model[i] : ' '; | 251 | dev->t10_wwn.model[i] : ' '; |
| 252 | str[i] = '\0'; | 252 | str[i] = '\0'; |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 5b2820312310..28fb3016370f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
| @@ -130,6 +130,9 @@ void core_tmr_abort_task( | |||
| 130 | if (tmr->ref_task_tag != ref_tag) | 130 | if (tmr->ref_task_tag != ref_tag) |
| 131 | continue; | 131 | continue; |
| 132 | 132 | ||
| 133 | if (!kref_get_unless_zero(&se_cmd->cmd_kref)) | ||
| 134 | continue; | ||
| 135 | |||
| 133 | printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", | 136 | printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", |
| 134 | se_cmd->se_tfo->get_fabric_name(), ref_tag); | 137 | se_cmd->se_tfo->get_fabric_name(), ref_tag); |
| 135 | 138 | ||
| @@ -139,13 +142,15 @@ void core_tmr_abort_task( | |||
| 139 | " skipping\n", ref_tag); | 142 | " skipping\n", ref_tag); |
| 140 | spin_unlock(&se_cmd->t_state_lock); | 143 | spin_unlock(&se_cmd->t_state_lock); |
| 141 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 144 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 145 | |||
| 146 | target_put_sess_cmd(se_cmd); | ||
| 147 | |||
| 142 | goto out; | 148 | goto out; |
| 143 | } | 149 | } |
| 144 | se_cmd->transport_state |= CMD_T_ABORTED; | 150 | se_cmd->transport_state |= CMD_T_ABORTED; |
| 145 | spin_unlock(&se_cmd->t_state_lock); | 151 | spin_unlock(&se_cmd->t_state_lock); |
| 146 | 152 | ||
| 147 | list_del_init(&se_cmd->se_cmd_list); | 153 | list_del_init(&se_cmd->se_cmd_list); |
| 148 | kref_get(&se_cmd->cmd_kref); | ||
| 149 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 154 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 150 | 155 | ||
| 151 | cancel_work_sync(&se_cmd->work); | 156 | cancel_work_sync(&se_cmd->work); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5bacc7b5ed6d..4fdcee2006d1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) | |||
| 1658 | void transport_generic_request_failure(struct se_cmd *cmd, | 1658 | void transport_generic_request_failure(struct se_cmd *cmd, |
| 1659 | sense_reason_t sense_reason) | 1659 | sense_reason_t sense_reason) |
| 1660 | { | 1660 | { |
| 1661 | int ret = 0; | 1661 | int ret = 0, post_ret = 0; |
| 1662 | 1662 | ||
| 1663 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" | 1663 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" |
| 1664 | " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); | 1664 | " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); |
| @@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, | |||
| 1680 | */ | 1680 | */ |
| 1681 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | 1681 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
| 1682 | cmd->transport_complete_callback) | 1682 | cmd->transport_complete_callback) |
| 1683 | cmd->transport_complete_callback(cmd, false); | 1683 | cmd->transport_complete_callback(cmd, false, &post_ret); |
| 1684 | 1684 | ||
| 1685 | switch (sense_reason) { | 1685 | switch (sense_reason) { |
| 1686 | case TCM_NON_EXISTENT_LUN: | 1686 | case TCM_NON_EXISTENT_LUN: |
| @@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work) | |||
| 2068 | */ | 2068 | */ |
| 2069 | if (cmd->transport_complete_callback) { | 2069 | if (cmd->transport_complete_callback) { |
| 2070 | sense_reason_t rc; | 2070 | sense_reason_t rc; |
| 2071 | bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); | ||
| 2072 | bool zero_dl = !(cmd->data_length); | ||
| 2073 | int post_ret = 0; | ||
| 2071 | 2074 | ||
| 2072 | rc = cmd->transport_complete_callback(cmd, true); | 2075 | rc = cmd->transport_complete_callback(cmd, true, &post_ret); |
| 2073 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { | 2076 | if (!rc && !post_ret) { |
| 2074 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | 2077 | if (caw && zero_dl) |
| 2075 | !cmd->data_length) | ||
| 2076 | goto queue_rsp; | 2078 | goto queue_rsp; |
| 2077 | 2079 | ||
| 2078 | return; | 2080 | return; |
| @@ -2507,23 +2509,24 @@ out: | |||
| 2507 | EXPORT_SYMBOL(target_get_sess_cmd); | 2509 | EXPORT_SYMBOL(target_get_sess_cmd); |
| 2508 | 2510 | ||
| 2509 | static void target_release_cmd_kref(struct kref *kref) | 2511 | static void target_release_cmd_kref(struct kref *kref) |
| 2510 | __releases(&se_cmd->se_sess->sess_cmd_lock) | ||
| 2511 | { | 2512 | { |
| 2512 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2513 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
| 2513 | struct se_session *se_sess = se_cmd->se_sess; | 2514 | struct se_session *se_sess = se_cmd->se_sess; |
| 2515 | unsigned long flags; | ||
| 2514 | 2516 | ||
| 2517 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | ||
| 2515 | if (list_empty(&se_cmd->se_cmd_list)) { | 2518 | if (list_empty(&se_cmd->se_cmd_list)) { |
| 2516 | spin_unlock(&se_sess->sess_cmd_lock); | 2519 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2517 | se_cmd->se_tfo->release_cmd(se_cmd); | 2520 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2518 | return; | 2521 | return; |
| 2519 | } | 2522 | } |
| 2520 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | 2523 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { |
| 2521 | spin_unlock(&se_sess->sess_cmd_lock); | 2524 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2522 | complete(&se_cmd->cmd_wait_comp); | 2525 | complete(&se_cmd->cmd_wait_comp); |
| 2523 | return; | 2526 | return; |
| 2524 | } | 2527 | } |
| 2525 | list_del(&se_cmd->se_cmd_list); | 2528 | list_del(&se_cmd->se_cmd_list); |
| 2526 | spin_unlock(&se_sess->sess_cmd_lock); | 2529 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2527 | 2530 | ||
| 2528 | se_cmd->se_tfo->release_cmd(se_cmd); | 2531 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2529 | } | 2532 | } |
| @@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) | |||
| 2539 | se_cmd->se_tfo->release_cmd(se_cmd); | 2542 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2540 | return 1; | 2543 | return 1; |
| 2541 | } | 2544 | } |
| 2542 | return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, | 2545 | return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); |
| 2543 | &se_sess->sess_cmd_lock); | ||
| 2544 | } | 2546 | } |
| 2545 | EXPORT_SYMBOL(target_put_sess_cmd); | 2547 | EXPORT_SYMBOL(target_put_sess_cmd); |
| 2546 | 2548 | ||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 937cebf76633..5e6d6cb348fc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
| 638 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | 638 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) |
| 639 | return 0; | 639 | return 0; |
| 640 | 640 | ||
| 641 | if (!time_after(cmd->deadline, jiffies)) | 641 | if (!time_after(jiffies, cmd->deadline)) |
| 642 | return 0; | 642 | return 0; |
| 643 | 643 | ||
| 644 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); | 644 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); |
| @@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd) | |||
| 1101 | 1101 | ||
| 1102 | static const struct target_backend_ops tcmu_ops = { | 1102 | static const struct target_backend_ops tcmu_ops = { |
| 1103 | .name = "user", | 1103 | .name = "user", |
| 1104 | .inquiry_prod = "USER", | ||
| 1105 | .inquiry_rev = TCMU_VERSION, | ||
| 1106 | .owner = THIS_MODULE, | 1104 | .owner = THIS_MODULE, |
| 1107 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, | 1105 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
| 1108 | .attach_hba = tcmu_attach_hba, | 1106 | .attach_hba = tcmu_attach_hba, |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c463c89b90ef..8cc4ac64a91c 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -382,7 +382,7 @@ endmenu | |||
| 382 | 382 | ||
| 383 | config QCOM_SPMI_TEMP_ALARM | 383 | config QCOM_SPMI_TEMP_ALARM |
| 384 | tristate "Qualcomm SPMI PMIC Temperature Alarm" | 384 | tristate "Qualcomm SPMI PMIC Temperature Alarm" |
| 385 | depends on OF && (SPMI || COMPILE_TEST) && IIO | 385 | depends on OF && SPMI && IIO |
| 386 | select REGMAP_SPMI | 386 | select REGMAP_SPMI |
| 387 | help | 387 | help |
| 388 | This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) | 388 | This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) |
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c8fe3cac2e0e..c5547bd711db 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | #define TEMPSENSE2_PANIC_VALUE_SHIFT 16 | 55 | #define TEMPSENSE2_PANIC_VALUE_SHIFT 16 |
| 56 | #define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 | 56 | #define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 |
| 57 | 57 | ||
| 58 | #define OCOTP_MEM0 0x0480 | ||
| 58 | #define OCOTP_ANA1 0x04e0 | 59 | #define OCOTP_ANA1 0x04e0 |
| 59 | 60 | ||
| 60 | /* The driver supports 1 passive trip point and 1 critical trip point */ | 61 | /* The driver supports 1 passive trip point and 1 critical trip point */ |
| @@ -64,12 +65,6 @@ enum imx_thermal_trip { | |||
| 64 | IMX_TRIP_NUM, | 65 | IMX_TRIP_NUM, |
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | /* | ||
| 68 | * It defines the temperature in millicelsius for passive trip point | ||
| 69 | * that will trigger cooling action when crossed. | ||
| 70 | */ | ||
| 71 | #define IMX_TEMP_PASSIVE 85000 | ||
| 72 | |||
| 73 | #define IMX_POLLING_DELAY 2000 /* millisecond */ | 68 | #define IMX_POLLING_DELAY 2000 /* millisecond */ |
| 74 | #define IMX_PASSIVE_DELAY 1000 | 69 | #define IMX_PASSIVE_DELAY 1000 |
| 75 | 70 | ||
| @@ -100,12 +95,14 @@ struct imx_thermal_data { | |||
| 100 | u32 c1, c2; /* See formula in imx_get_sensor_data() */ | 95 | u32 c1, c2; /* See formula in imx_get_sensor_data() */ |
| 101 | int temp_passive; | 96 | int temp_passive; |
| 102 | int temp_critical; | 97 | int temp_critical; |
| 98 | int temp_max; | ||
| 103 | int alarm_temp; | 99 | int alarm_temp; |
| 104 | int last_temp; | 100 | int last_temp; |
| 105 | bool irq_enabled; | 101 | bool irq_enabled; |
| 106 | int irq; | 102 | int irq; |
| 107 | struct clk *thermal_clk; | 103 | struct clk *thermal_clk; |
| 108 | const struct thermal_soc_data *socdata; | 104 | const struct thermal_soc_data *socdata; |
| 105 | const char *temp_grade; | ||
| 109 | }; | 106 | }; |
| 110 | 107 | ||
| 111 | static void imx_set_panic_temp(struct imx_thermal_data *data, | 108 | static void imx_set_panic_temp(struct imx_thermal_data *data, |
| @@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, | |||
| 285 | { | 282 | { |
| 286 | struct imx_thermal_data *data = tz->devdata; | 283 | struct imx_thermal_data *data = tz->devdata; |
| 287 | 284 | ||
| 285 | /* do not allow changing critical threshold */ | ||
| 288 | if (trip == IMX_TRIP_CRITICAL) | 286 | if (trip == IMX_TRIP_CRITICAL) |
| 289 | return -EPERM; | 287 | return -EPERM; |
| 290 | 288 | ||
| 291 | if (temp < 0 || temp > IMX_TEMP_PASSIVE) | 289 | /* do not allow passive to be set higher than critical */ |
| 290 | if (temp < 0 || temp > data->temp_critical) | ||
| 292 | return -EINVAL; | 291 | return -EINVAL; |
| 293 | 292 | ||
| 294 | data->temp_passive = temp; | 293 | data->temp_passive = temp; |
| @@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev) | |||
| 404 | data->c1 = temp64; | 403 | data->c1 = temp64; |
| 405 | data->c2 = n1 * data->c1 + 1000 * t1; | 404 | data->c2 = n1 * data->c1 + 1000 * t1; |
| 406 | 405 | ||
| 407 | /* | 406 | /* use OTP for thermal grade */ |
| 408 | * Set the default passive cooling trip point, | 407 | ret = regmap_read(map, OCOTP_MEM0, &val); |
| 409 | * can be changed from userspace. | 408 | if (ret) { |
| 410 | */ | 409 | dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret); |
| 411 | data->temp_passive = IMX_TEMP_PASSIVE; | 410 | return ret; |
| 411 | } | ||
| 412 | |||
| 413 | /* The maximum die temp is specified by the Temperature Grade */ | ||
| 414 | switch ((val >> 6) & 0x3) { | ||
| 415 | case 0: /* Commercial (0 to 95C) */ | ||
| 416 | data->temp_grade = "Commercial"; | ||
| 417 | data->temp_max = 95000; | ||
| 418 | break; | ||
| 419 | case 1: /* Extended Commercial (-20 to 105C) */ | ||
| 420 | data->temp_grade = "Extended Commercial"; | ||
| 421 | data->temp_max = 105000; | ||
| 422 | break; | ||
| 423 | case 2: /* Industrial (-40 to 105C) */ | ||
| 424 | data->temp_grade = "Industrial"; | ||
| 425 | data->temp_max = 105000; | ||
| 426 | break; | ||
| 427 | case 3: /* Automotive (-40 to 125C) */ | ||
| 428 | data->temp_grade = "Automotive"; | ||
| 429 | data->temp_max = 125000; | ||
| 430 | break; | ||
| 431 | } | ||
| 412 | 432 | ||
| 413 | /* | 433 | /* |
| 414 | * The maximum die temperature set to 20 C higher than | 434 | * Set the critical trip point at 5C under max |
| 415 | * IMX_TEMP_PASSIVE. | 435 | * Set the passive trip point at 10C under max (can change via sysfs) |
| 416 | */ | 436 | */ |
| 417 | data->temp_critical = 1000 * 20 + data->temp_passive; | 437 | data->temp_critical = data->temp_max - (1000 * 5); |
| 438 | data->temp_passive = data->temp_max - (1000 * 10); | ||
| 418 | 439 | ||
| 419 | return 0; | 440 | return 0; |
| 420 | } | 441 | } |
| @@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev) | |||
| 551 | return ret; | 572 | return ret; |
| 552 | } | 573 | } |
| 553 | 574 | ||
| 575 | dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" | ||
| 576 | " critical:%dC passive:%dC\n", data->temp_grade, | ||
| 577 | data->temp_max / 1000, data->temp_critical / 1000, | ||
| 578 | data->temp_passive / 1000); | ||
| 579 | |||
| 554 | /* Enable measurements at ~ 10 Hz */ | 580 | /* Enable measurements at ~ 10 Hz */ |
| 555 | regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); | 581 | regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); |
| 556 | measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ | 582 | measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 42b7d4253b94..be4eedcb839a 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
| @@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void) | |||
| 964 | 964 | ||
| 965 | np = of_find_node_by_name(NULL, "thermal-zones"); | 965 | np = of_find_node_by_name(NULL, "thermal-zones"); |
| 966 | if (!np) { | 966 | if (!np) { |
| 967 | pr_err("unable to find thermal zones\n"); | 967 | pr_debug("unable to find thermal zones\n"); |
| 968 | return; | 968 | return; |
| 969 | } | 969 | } |
| 970 | 970 | ||
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index f0fbea386869..1246aa6fcab0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c | |||
| @@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, | |||
| 174 | /** | 174 | /** |
| 175 | * pid_controller() - PID controller | 175 | * pid_controller() - PID controller |
| 176 | * @tz: thermal zone we are operating in | 176 | * @tz: thermal zone we are operating in |
| 177 | * @current_temp: the current temperature in millicelsius | ||
| 178 | * @control_temp: the target temperature in millicelsius | 177 | * @control_temp: the target temperature in millicelsius |
| 179 | * @max_allocatable_power: maximum allocatable power for this thermal zone | 178 | * @max_allocatable_power: maximum allocatable power for this thermal zone |
| 180 | * | 179 | * |
| @@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, | |||
| 191 | * Return: The power budget for the next period. | 190 | * Return: The power budget for the next period. |
| 192 | */ | 191 | */ |
| 193 | static u32 pid_controller(struct thermal_zone_device *tz, | 192 | static u32 pid_controller(struct thermal_zone_device *tz, |
| 194 | int current_temp, | ||
| 195 | int control_temp, | 193 | int control_temp, |
| 196 | u32 max_allocatable_power) | 194 | u32 max_allocatable_power) |
| 197 | { | 195 | { |
| @@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, | |||
| 211 | true); | 209 | true); |
| 212 | } | 210 | } |
| 213 | 211 | ||
| 214 | err = control_temp - current_temp; | 212 | err = control_temp - tz->temperature; |
| 215 | err = int_to_frac(err); | 213 | err = int_to_frac(err); |
| 216 | 214 | ||
| 217 | /* Calculate the proportional term */ | 215 | /* Calculate the proportional term */ |
| @@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, | |||
| 332 | } | 330 | } |
| 333 | 331 | ||
| 334 | static int allocate_power(struct thermal_zone_device *tz, | 332 | static int allocate_power(struct thermal_zone_device *tz, |
| 335 | int current_temp, | ||
| 336 | int control_temp) | 333 | int control_temp) |
| 337 | { | 334 | { |
| 338 | struct thermal_instance *instance; | 335 | struct thermal_instance *instance; |
| @@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz, | |||
| 418 | i++; | 415 | i++; |
| 419 | } | 416 | } |
| 420 | 417 | ||
| 421 | power_range = pid_controller(tz, current_temp, control_temp, | 418 | power_range = pid_controller(tz, control_temp, max_allocatable_power); |
| 422 | max_allocatable_power); | ||
| 423 | 419 | ||
| 424 | divvy_up_power(weighted_req_power, max_power, num_actors, | 420 | divvy_up_power(weighted_req_power, max_power, num_actors, |
| 425 | total_weighted_req_power, power_range, granted_power, | 421 | total_weighted_req_power, power_range, granted_power, |
| @@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz, | |||
| 444 | trace_thermal_power_allocator(tz, req_power, total_req_power, | 440 | trace_thermal_power_allocator(tz, req_power, total_req_power, |
| 445 | granted_power, total_granted_power, | 441 | granted_power, total_granted_power, |
| 446 | num_actors, power_range, | 442 | num_actors, power_range, |
| 447 | max_allocatable_power, current_temp, | 443 | max_allocatable_power, tz->temperature, |
| 448 | control_temp - current_temp); | 444 | control_temp - tz->temperature); |
| 449 | 445 | ||
| 450 | kfree(req_power); | 446 | kfree(req_power); |
| 451 | unlock: | 447 | unlock: |
| @@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) | |||
| 612 | static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) | 608 | static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) |
| 613 | { | 609 | { |
| 614 | int ret; | 610 | int ret; |
| 615 | int switch_on_temp, control_temp, current_temp; | 611 | int switch_on_temp, control_temp; |
| 616 | struct power_allocator_params *params = tz->governor_data; | 612 | struct power_allocator_params *params = tz->governor_data; |
| 617 | 613 | ||
| 618 | /* | 614 | /* |
| @@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) | |||
| 622 | if (trip != params->trip_max_desired_temperature) | 618 | if (trip != params->trip_max_desired_temperature) |
| 623 | return 0; | 619 | return 0; |
| 624 | 620 | ||
| 625 | ret = thermal_zone_get_temp(tz, ¤t_temp); | ||
| 626 | if (ret) { | ||
| 627 | dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); | ||
| 628 | return ret; | ||
| 629 | } | ||
| 630 | |||
| 631 | ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, | 621 | ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, |
| 632 | &switch_on_temp); | 622 | &switch_on_temp); |
| 633 | if (!ret && (current_temp < switch_on_temp)) { | 623 | if (!ret && (tz->temperature < switch_on_temp)) { |
| 634 | tz->passive = 0; | 624 | tz->passive = 0; |
| 635 | reset_pid_controller(params); | 625 | reset_pid_controller(params); |
| 636 | allow_maximum_power(tz); | 626 | allow_maximum_power(tz); |
| @@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) | |||
| 648 | return ret; | 638 | return ret; |
| 649 | } | 639 | } |
| 650 | 640 | ||
| 651 | return allocate_power(tz, current_temp, control_temp); | 641 | return allocate_power(tz, control_temp); |
| 652 | } | 642 | } |
| 653 | 643 | ||
| 654 | static struct thermal_governor thermal_gov_power_allocator = { | 644 | static struct thermal_governor thermal_gov_power_allocator = { |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 5d4ae7d705e0..13d01edc7a04 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
| @@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) | |||
| 361 | /* | 361 | /* |
| 362 | * platform functions | 362 | * platform functions |
| 363 | */ | 363 | */ |
| 364 | static int rcar_thermal_remove(struct platform_device *pdev) | ||
| 365 | { | ||
| 366 | struct rcar_thermal_common *common = platform_get_drvdata(pdev); | ||
| 367 | struct device *dev = &pdev->dev; | ||
| 368 | struct rcar_thermal_priv *priv; | ||
| 369 | |||
| 370 | rcar_thermal_for_each_priv(priv, common) { | ||
| 371 | if (rcar_has_irq_support(priv)) | ||
| 372 | rcar_thermal_irq_disable(priv); | ||
| 373 | thermal_zone_device_unregister(priv->zone); | ||
| 374 | } | ||
| 375 | |||
| 376 | pm_runtime_put(dev); | ||
| 377 | pm_runtime_disable(dev); | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 364 | static int rcar_thermal_probe(struct platform_device *pdev) | 382 | static int rcar_thermal_probe(struct platform_device *pdev) |
| 365 | { | 383 | { |
| 366 | struct rcar_thermal_common *common; | 384 | struct rcar_thermal_common *common; |
| @@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
| 377 | if (!common) | 395 | if (!common) |
| 378 | return -ENOMEM; | 396 | return -ENOMEM; |
| 379 | 397 | ||
| 398 | platform_set_drvdata(pdev, common); | ||
| 399 | |||
| 380 | INIT_LIST_HEAD(&common->head); | 400 | INIT_LIST_HEAD(&common->head); |
| 381 | spin_lock_init(&common->lock); | 401 | spin_lock_init(&common->lock); |
| 382 | common->dev = dev; | 402 | common->dev = dev; |
| @@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
| 454 | rcar_thermal_common_write(common, ENR, enr_bits); | 474 | rcar_thermal_common_write(common, ENR, enr_bits); |
| 455 | } | 475 | } |
| 456 | 476 | ||
| 457 | platform_set_drvdata(pdev, common); | ||
| 458 | |||
| 459 | dev_info(dev, "%d sensor probed\n", i); | 477 | dev_info(dev, "%d sensor probed\n", i); |
| 460 | 478 | ||
| 461 | return 0; | 479 | return 0; |
| 462 | 480 | ||
| 463 | error_unregister: | 481 | error_unregister: |
| 464 | rcar_thermal_for_each_priv(priv, common) { | 482 | rcar_thermal_remove(pdev); |
| 465 | if (rcar_has_irq_support(priv)) | ||
| 466 | rcar_thermal_irq_disable(priv); | ||
| 467 | thermal_zone_device_unregister(priv->zone); | ||
| 468 | } | ||
| 469 | |||
| 470 | pm_runtime_put(dev); | ||
| 471 | pm_runtime_disable(dev); | ||
| 472 | 483 | ||
| 473 | return ret; | 484 | return ret; |
| 474 | } | 485 | } |
| 475 | 486 | ||
| 476 | static int rcar_thermal_remove(struct platform_device *pdev) | ||
| 477 | { | ||
| 478 | struct rcar_thermal_common *common = platform_get_drvdata(pdev); | ||
| 479 | struct device *dev = &pdev->dev; | ||
| 480 | struct rcar_thermal_priv *priv; | ||
| 481 | |||
| 482 | rcar_thermal_for_each_priv(priv, common) { | ||
| 483 | if (rcar_has_irq_support(priv)) | ||
| 484 | rcar_thermal_irq_disable(priv); | ||
| 485 | thermal_zone_device_unregister(priv->zone); | ||
| 486 | } | ||
| 487 | |||
| 488 | pm_runtime_put(dev); | ||
| 489 | pm_runtime_disable(dev); | ||
| 490 | |||
| 491 | return 0; | ||
| 492 | } | ||
| 493 | |||
| 494 | static const struct of_device_id rcar_thermal_dt_ids[] = { | 487 | static const struct of_device_id rcar_thermal_dt_ids[] = { |
| 495 | { .compatible = "renesas,rcar-thermal", }, | 488 | { .compatible = "renesas,rcar-thermal", }, |
| 496 | {}, | 489 | {}, |
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 9787e8aa509f..e845841ab036 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd | 2 | * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd | ||
| 5 | * Caesar Wang <wxt@rock-chips.com> | ||
| 6 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. | 9 | * version 2, as published by the Free Software Foundation. |
| @@ -45,17 +48,50 @@ enum tshut_polarity { | |||
| 45 | }; | 48 | }; |
| 46 | 49 | ||
| 47 | /** | 50 | /** |
| 48 | * The system has three Temperature Sensors. channel 0 is reserved, | 51 | * The system has two Temperature Sensors. |
| 49 | * channel 1 is for CPU, and channel 2 is for GPU. | 52 | * sensor0 is for CPU, and sensor1 is for GPU. |
| 50 | */ | 53 | */ |
| 51 | enum sensor_id { | 54 | enum sensor_id { |
| 52 | SENSOR_CPU = 1, | 55 | SENSOR_CPU = 0, |
| 53 | SENSOR_GPU, | 56 | SENSOR_GPU, |
| 54 | }; | 57 | }; |
| 55 | 58 | ||
| 59 | /** | ||
| 60 | * The conversion table has the adc value and temperature. | ||
| 61 | * ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table) | ||
| 62 | * ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table) | ||
| 63 | */ | ||
| 64 | enum adc_sort_mode { | ||
| 65 | ADC_DECREMENT = 0, | ||
| 66 | ADC_INCREMENT, | ||
| 67 | }; | ||
| 68 | |||
| 69 | /** | ||
| 70 | * The max sensors is two in rockchip SoCs. | ||
| 71 | * Two sensors: CPU and GPU sensor. | ||
| 72 | */ | ||
| 73 | #define SOC_MAX_SENSORS 2 | ||
| 74 | |||
| 75 | struct chip_tsadc_table { | ||
| 76 | const struct tsadc_table *id; | ||
| 77 | |||
| 78 | /* the array table size*/ | ||
| 79 | unsigned int length; | ||
| 80 | |||
| 81 | /* that analogic mask data */ | ||
| 82 | u32 data_mask; | ||
| 83 | |||
| 84 | /* the sort mode is adc value that increment or decrement in table */ | ||
| 85 | enum adc_sort_mode mode; | ||
| 86 | }; | ||
| 87 | |||
| 56 | struct rockchip_tsadc_chip { | 88 | struct rockchip_tsadc_chip { |
| 89 | /* The sensor id of chip correspond to the ADC channel */ | ||
| 90 | int chn_id[SOC_MAX_SENSORS]; | ||
| 91 | int chn_num; | ||
| 92 | |||
| 57 | /* The hardware-controlled tshut property */ | 93 | /* The hardware-controlled tshut property */ |
| 58 | long tshut_temp; | 94 | int tshut_temp; |
| 59 | enum tshut_mode tshut_mode; | 95 | enum tshut_mode tshut_mode; |
| 60 | enum tshut_polarity tshut_polarity; | 96 | enum tshut_polarity tshut_polarity; |
| 61 | 97 | ||
| @@ -65,37 +101,40 @@ struct rockchip_tsadc_chip { | |||
| 65 | void (*control)(void __iomem *reg, bool on); | 101 | void (*control)(void __iomem *reg, bool on); |
| 66 | 102 | ||
| 67 | /* Per-sensor methods */ | 103 | /* Per-sensor methods */ |
| 68 | int (*get_temp)(int chn, void __iomem *reg, int *temp); | 104 | int (*get_temp)(struct chip_tsadc_table table, |
| 69 | void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); | 105 | int chn, void __iomem *reg, int *temp); |
| 106 | void (*set_tshut_temp)(struct chip_tsadc_table table, | ||
| 107 | int chn, void __iomem *reg, int temp); | ||
| 70 | void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); | 108 | void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); |
| 109 | |||
| 110 | /* Per-table methods */ | ||
| 111 | struct chip_tsadc_table table; | ||
| 71 | }; | 112 | }; |
| 72 | 113 | ||
| 73 | struct rockchip_thermal_sensor { | 114 | struct rockchip_thermal_sensor { |
| 74 | struct rockchip_thermal_data *thermal; | 115 | struct rockchip_thermal_data *thermal; |
| 75 | struct thermal_zone_device *tzd; | 116 | struct thermal_zone_device *tzd; |
| 76 | enum sensor_id id; | 117 | int id; |
| 77 | }; | 118 | }; |
| 78 | 119 | ||
| 79 | #define NUM_SENSORS 2 /* Ignore unused sensor 0 */ | ||
| 80 | |||
| 81 | struct rockchip_thermal_data { | 120 | struct rockchip_thermal_data { |
| 82 | const struct rockchip_tsadc_chip *chip; | 121 | const struct rockchip_tsadc_chip *chip; |
| 83 | struct platform_device *pdev; | 122 | struct platform_device *pdev; |
| 84 | struct reset_control *reset; | 123 | struct reset_control *reset; |
| 85 | 124 | ||
| 86 | struct rockchip_thermal_sensor sensors[NUM_SENSORS]; | 125 | struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS]; |
| 87 | 126 | ||
| 88 | struct clk *clk; | 127 | struct clk *clk; |
| 89 | struct clk *pclk; | 128 | struct clk *pclk; |
| 90 | 129 | ||
| 91 | void __iomem *regs; | 130 | void __iomem *regs; |
| 92 | 131 | ||
| 93 | long tshut_temp; | 132 | int tshut_temp; |
| 94 | enum tshut_mode tshut_mode; | 133 | enum tshut_mode tshut_mode; |
| 95 | enum tshut_polarity tshut_polarity; | 134 | enum tshut_polarity tshut_polarity; |
| 96 | }; | 135 | }; |
| 97 | 136 | ||
| 98 | /* TSADC V2 Sensor info define: */ | 137 | /* TSADC Sensor info define: */ |
| 99 | #define TSADCV2_AUTO_CON 0x04 | 138 | #define TSADCV2_AUTO_CON 0x04 |
| 100 | #define TSADCV2_INT_EN 0x08 | 139 | #define TSADCV2_INT_EN 0x08 |
| 101 | #define TSADCV2_INT_PD 0x0c | 140 | #define TSADCV2_INT_PD 0x0c |
| @@ -117,6 +156,8 @@ struct rockchip_thermal_data { | |||
| 117 | #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) | 156 | #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) |
| 118 | 157 | ||
| 119 | #define TSADCV2_DATA_MASK 0xfff | 158 | #define TSADCV2_DATA_MASK 0xfff |
| 159 | #define TSADCV3_DATA_MASK 0x3ff | ||
| 160 | |||
| 120 | #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 | 161 | #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 |
| 121 | #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 | 162 | #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 |
| 122 | #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ | 163 | #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ |
| @@ -124,7 +165,7 @@ struct rockchip_thermal_data { | |||
| 124 | 165 | ||
| 125 | struct tsadc_table { | 166 | struct tsadc_table { |
| 126 | u32 code; | 167 | u32 code; |
| 127 | long temp; | 168 | int temp; |
| 128 | }; | 169 | }; |
| 129 | 170 | ||
| 130 | static const struct tsadc_table v2_code_table[] = { | 171 | static const struct tsadc_table v2_code_table[] = { |
| @@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = { | |||
| 165 | {3421, 125000}, | 206 | {3421, 125000}, |
| 166 | }; | 207 | }; |
| 167 | 208 | ||
| 168 | static u32 rk_tsadcv2_temp_to_code(long temp) | 209 | static const struct tsadc_table v3_code_table[] = { |
| 210 | {0, -40000}, | ||
| 211 | {106, -40000}, | ||
| 212 | {108, -35000}, | ||
| 213 | {110, -30000}, | ||
| 214 | {112, -25000}, | ||
| 215 | {114, -20000}, | ||
| 216 | {116, -15000}, | ||
| 217 | {118, -10000}, | ||
| 218 | {120, -5000}, | ||
| 219 | {122, 0}, | ||
| 220 | {124, 5000}, | ||
| 221 | {126, 10000}, | ||
| 222 | {128, 15000}, | ||
| 223 | {130, 20000}, | ||
| 224 | {132, 25000}, | ||
| 225 | {134, 30000}, | ||
| 226 | {136, 35000}, | ||
| 227 | {138, 40000}, | ||
| 228 | {140, 45000}, | ||
| 229 | {142, 50000}, | ||
| 230 | {144, 55000}, | ||
| 231 | {146, 60000}, | ||
| 232 | {148, 65000}, | ||
| 233 | {150, 70000}, | ||
| 234 | {152, 75000}, | ||
| 235 | {154, 80000}, | ||
| 236 | {156, 85000}, | ||
| 237 | {158, 90000}, | ||
| 238 | {160, 95000}, | ||
| 239 | {162, 100000}, | ||
| 240 | {163, 105000}, | ||
| 241 | {165, 110000}, | ||
| 242 | {167, 115000}, | ||
| 243 | {169, 120000}, | ||
| 244 | {171, 125000}, | ||
| 245 | {TSADCV3_DATA_MASK, 125000}, | ||
| 246 | }; | ||
| 247 | |||
| 248 | static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, | ||
| 249 | int temp) | ||
| 169 | { | 250 | { |
| 170 | int high, low, mid; | 251 | int high, low, mid; |
| 171 | 252 | ||
| 172 | low = 0; | 253 | low = 0; |
| 173 | high = ARRAY_SIZE(v2_code_table) - 1; | 254 | high = table.length - 1; |
| 174 | mid = (high + low) / 2; | 255 | mid = (high + low) / 2; |
| 175 | 256 | ||
| 176 | if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) | 257 | if (temp < table.id[low].temp || temp > table.id[high].temp) |
| 177 | return 0; | 258 | return 0; |
| 178 | 259 | ||
| 179 | while (low <= high) { | 260 | while (low <= high) { |
| 180 | if (temp == v2_code_table[mid].temp) | 261 | if (temp == table.id[mid].temp) |
| 181 | return v2_code_table[mid].code; | 262 | return table.id[mid].code; |
| 182 | else if (temp < v2_code_table[mid].temp) | 263 | else if (temp < table.id[mid].temp) |
| 183 | high = mid - 1; | 264 | high = mid - 1; |
| 184 | else | 265 | else |
| 185 | low = mid + 1; | 266 | low = mid + 1; |
| @@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp) | |||
| 189 | return 0; | 270 | return 0; |
| 190 | } | 271 | } |
| 191 | 272 | ||
| 192 | static int rk_tsadcv2_code_to_temp(u32 code, int *temp) | 273 | static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, |
| 274 | int *temp) | ||
| 193 | { | 275 | { |
| 194 | unsigned int low = 1; | 276 | unsigned int low = 1; |
| 195 | unsigned int high = ARRAY_SIZE(v2_code_table) - 1; | 277 | unsigned int high = table.length - 1; |
| 196 | unsigned int mid = (low + high) / 2; | 278 | unsigned int mid = (low + high) / 2; |
| 197 | unsigned int num; | 279 | unsigned int num; |
| 198 | unsigned long denom; | 280 | unsigned long denom; |
| 199 | 281 | ||
| 200 | BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); | 282 | WARN_ON(table.length < 2); |
| 201 | 283 | ||
| 202 | code &= TSADCV2_DATA_MASK; | 284 | switch (table.mode) { |
| 203 | if (code < v2_code_table[high].code) | 285 | case ADC_DECREMENT: |
| 204 | return -EAGAIN; /* Incorrect reading */ | 286 | code &= table.data_mask; |
| 205 | 287 | if (code < table.id[high].code) | |
| 206 | while (low <= high) { | 288 | return -EAGAIN; /* Incorrect reading */ |
| 207 | if (code >= v2_code_table[mid].code && | 289 | |
| 208 | code < v2_code_table[mid - 1].code) | 290 | while (low <= high) { |
| 209 | break; | 291 | if (code >= table.id[mid].code && |
| 210 | else if (code < v2_code_table[mid].code) | 292 | code < table.id[mid - 1].code) |
| 211 | low = mid + 1; | 293 | break; |
| 212 | else | 294 | else if (code < table.id[mid].code) |
| 213 | high = mid - 1; | 295 | low = mid + 1; |
| 214 | mid = (low + high) / 2; | 296 | else |
| 297 | high = mid - 1; | ||
| 298 | |||
| 299 | mid = (low + high) / 2; | ||
| 300 | } | ||
| 301 | break; | ||
| 302 | case ADC_INCREMENT: | ||
| 303 | code &= table.data_mask; | ||
| 304 | if (code < table.id[low].code) | ||
| 305 | return -EAGAIN; /* Incorrect reading */ | ||
| 306 | |||
| 307 | while (low <= high) { | ||
| 308 | if (code >= table.id[mid - 1].code && | ||
| 309 | code < table.id[mid].code) | ||
| 310 | break; | ||
| 311 | else if (code > table.id[mid].code) | ||
| 312 | low = mid + 1; | ||
| 313 | else | ||
| 314 | high = mid - 1; | ||
| 315 | |||
| 316 | mid = (low + high) / 2; | ||
| 317 | } | ||
| 318 | break; | ||
| 319 | default: | ||
| 320 | pr_err("Invalid the conversion table\n"); | ||
| 215 | } | 321 | } |
| 216 | 322 | ||
| 217 | /* | 323 | /* |
| @@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp) | |||
| 220 | * temperature between 2 table entries is linear and interpolate | 326 | * temperature between 2 table entries is linear and interpolate |
| 221 | * to produce less granular result. | 327 | * to produce less granular result. |
| 222 | */ | 328 | */ |
| 223 | num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; | 329 | num = table.id[mid].temp - v2_code_table[mid - 1].temp; |
| 224 | num *= v2_code_table[mid - 1].code - code; | 330 | num *= abs(table.id[mid - 1].code - code); |
| 225 | denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; | 331 | denom = abs(table.id[mid - 1].code - table.id[mid].code); |
| 226 | *temp = v2_code_table[mid - 1].temp + (num / denom); | 332 | *temp = table.id[mid - 1].temp + (num / denom); |
| 227 | 333 | ||
| 228 | return 0; | 334 | return 0; |
| 229 | } | 335 | } |
| 230 | 336 | ||
| 231 | /** | 337 | /** |
| 232 | * rk_tsadcv2_initialize - initialize TASDC Controller | 338 | * rk_tsadcv2_initialize - initialize TASDC Controller. |
| 233 | * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between | 339 | * |
| 234 | * every two accessing of TSADC in normal operation. | 340 | * (1) Set TSADC_V2_AUTO_PERIOD: |
| 235 | * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between | 341 | * Configure the interleave between every two accessing of |
| 236 | * every two accessing of TSADC after the temperature is higher | 342 | * TSADC in normal operation. |
| 237 | * than COM_SHUT or COM_INT. | 343 | * |
| 238 | * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, | 344 | * (2) Set TSADCV2_AUTO_PERIOD_HT: |
| 239 | * if the temperature is higher than COMP_INT or COMP_SHUT for | 345 | * Configure the interleave between every two accessing of |
| 240 | * "debounce" times, TSADC controller will generate interrupt or TSHUT. | 346 | * TSADC after the temperature is higher than COM_SHUT or COM_INT. |
| 347 | * | ||
| 348 | * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: | ||
| 349 | * If the temperature is higher than COMP_INT or COMP_SHUT for | ||
| 350 | * "debounce" times, TSADC controller will generate interrupt or TSHUT. | ||
| 241 | */ | 351 | */ |
| 242 | static void rk_tsadcv2_initialize(void __iomem *regs, | 352 | static void rk_tsadcv2_initialize(void __iomem *regs, |
| 243 | enum tshut_polarity tshut_polarity) | 353 | enum tshut_polarity tshut_polarity) |
| @@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable) | |||
| 279 | writel_relaxed(val, regs + TSADCV2_AUTO_CON); | 389 | writel_relaxed(val, regs + TSADCV2_AUTO_CON); |
| 280 | } | 390 | } |
| 281 | 391 | ||
| 282 | static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) | 392 | static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, |
| 393 | int chn, void __iomem *regs, int *temp) | ||
| 283 | { | 394 | { |
| 284 | u32 val; | 395 | u32 val; |
| 285 | 396 | ||
| 286 | val = readl_relaxed(regs + TSADCV2_DATA(chn)); | 397 | val = readl_relaxed(regs + TSADCV2_DATA(chn)); |
| 287 | 398 | ||
| 288 | return rk_tsadcv2_code_to_temp(val, temp); | 399 | return rk_tsadcv2_code_to_temp(table, val, temp); |
| 289 | } | 400 | } |
| 290 | 401 | ||
| 291 | static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) | 402 | static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, |
| 403 | int chn, void __iomem *regs, int temp) | ||
| 292 | { | 404 | { |
| 293 | u32 tshut_value, val; | 405 | u32 tshut_value, val; |
| 294 | 406 | ||
| 295 | tshut_value = rk_tsadcv2_temp_to_code(temp); | 407 | tshut_value = rk_tsadcv2_temp_to_code(table, temp); |
| 296 | writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); | 408 | writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); |
| 297 | 409 | ||
| 298 | /* TSHUT will be valid */ | 410 | /* TSHUT will be valid */ |
| @@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, | |||
| 318 | } | 430 | } |
| 319 | 431 | ||
| 320 | static const struct rockchip_tsadc_chip rk3288_tsadc_data = { | 432 | static const struct rockchip_tsadc_chip rk3288_tsadc_data = { |
| 433 | .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */ | ||
| 434 | .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */ | ||
| 435 | .chn_num = 2, /* two channels for tsadc */ | ||
| 436 | |||
| 321 | .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ | 437 | .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ |
| 322 | .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ | 438 | .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ |
| 323 | .tshut_temp = 95000, | 439 | .tshut_temp = 95000, |
| @@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { | |||
| 328 | .get_temp = rk_tsadcv2_get_temp, | 444 | .get_temp = rk_tsadcv2_get_temp, |
| 329 | .set_tshut_temp = rk_tsadcv2_tshut_temp, | 445 | .set_tshut_temp = rk_tsadcv2_tshut_temp, |
| 330 | .set_tshut_mode = rk_tsadcv2_tshut_mode, | 446 | .set_tshut_mode = rk_tsadcv2_tshut_mode, |
| 447 | |||
| 448 | .table = { | ||
| 449 | .id = v2_code_table, | ||
| 450 | .length = ARRAY_SIZE(v2_code_table), | ||
| 451 | .data_mask = TSADCV2_DATA_MASK, | ||
| 452 | .mode = ADC_DECREMENT, | ||
| 453 | }, | ||
| 454 | }; | ||
| 455 | |||
| 456 | static const struct rockchip_tsadc_chip rk3368_tsadc_data = { | ||
| 457 | .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ | ||
| 458 | .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ | ||
| 459 | .chn_num = 2, /* two channels for tsadc */ | ||
| 460 | |||
| 461 | .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ | ||
| 462 | .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ | ||
| 463 | .tshut_temp = 95000, | ||
| 464 | |||
| 465 | .initialize = rk_tsadcv2_initialize, | ||
| 466 | .irq_ack = rk_tsadcv2_irq_ack, | ||
| 467 | .control = rk_tsadcv2_control, | ||
| 468 | .get_temp = rk_tsadcv2_get_temp, | ||
| 469 | .set_tshut_temp = rk_tsadcv2_tshut_temp, | ||
| 470 | .set_tshut_mode = rk_tsadcv2_tshut_mode, | ||
| 471 | |||
| 472 | .table = { | ||
| 473 | .id = v3_code_table, | ||
| 474 | .length = ARRAY_SIZE(v3_code_table), | ||
| 475 | .data_mask = TSADCV3_DATA_MASK, | ||
| 476 | .mode = ADC_INCREMENT, | ||
| 477 | }, | ||
| 331 | }; | 478 | }; |
| 332 | 479 | ||
| 333 | static const struct of_device_id of_rockchip_thermal_match[] = { | 480 | static const struct of_device_id of_rockchip_thermal_match[] = { |
| @@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { | |||
| 335 | .compatible = "rockchip,rk3288-tsadc", | 482 | .compatible = "rockchip,rk3288-tsadc", |
| 336 | .data = (void *)&rk3288_tsadc_data, | 483 | .data = (void *)&rk3288_tsadc_data, |
| 337 | }, | 484 | }, |
| 485 | { | ||
| 486 | .compatible = "rockchip,rk3368-tsadc", | ||
| 487 | .data = (void *)&rk3368_tsadc_data, | ||
| 488 | }, | ||
| 338 | { /* end */ }, | 489 | { /* end */ }, |
| 339 | }; | 490 | }; |
| 340 | MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); | 491 | MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); |
| @@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) | |||
| 357 | 508 | ||
| 358 | thermal->chip->irq_ack(thermal->regs); | 509 | thermal->chip->irq_ack(thermal->regs); |
| 359 | 510 | ||
| 360 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) | 511 | for (i = 0; i < thermal->chip->chn_num; i++) |
| 361 | thermal_zone_device_update(thermal->sensors[i].tzd); | 512 | thermal_zone_device_update(thermal->sensors[i].tzd); |
| 362 | 513 | ||
| 363 | return IRQ_HANDLED; | 514 | return IRQ_HANDLED; |
| @@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) | |||
| 370 | const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; | 521 | const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; |
| 371 | int retval; | 522 | int retval; |
| 372 | 523 | ||
| 373 | retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); | 524 | retval = tsadc->get_temp(tsadc->table, |
| 525 | sensor->id, thermal->regs, out_temp); | ||
| 374 | dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", | 526 | dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", |
| 375 | sensor->id, *out_temp, retval); | 527 | sensor->id, *out_temp, retval); |
| 376 | 528 | ||
| @@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev, | |||
| 389 | 541 | ||
| 390 | if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { | 542 | if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { |
| 391 | dev_warn(dev, | 543 | dev_warn(dev, |
| 392 | "Missing tshut temp property, using default %ld\n", | 544 | "Missing tshut temp property, using default %d\n", |
| 393 | thermal->chip->tshut_temp); | 545 | thermal->chip->tshut_temp); |
| 394 | thermal->tshut_temp = thermal->chip->tshut_temp; | 546 | thermal->tshut_temp = thermal->chip->tshut_temp; |
| 395 | } else { | 547 | } else { |
| @@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev, | |||
| 397 | } | 549 | } |
| 398 | 550 | ||
| 399 | if (thermal->tshut_temp > INT_MAX) { | 551 | if (thermal->tshut_temp > INT_MAX) { |
| 400 | dev_err(dev, "Invalid tshut temperature specified: %ld\n", | 552 | dev_err(dev, "Invalid tshut temperature specified: %d\n", |
| 401 | thermal->tshut_temp); | 553 | thermal->tshut_temp); |
| 402 | return -ERANGE; | 554 | return -ERANGE; |
| 403 | } | 555 | } |
| @@ -442,13 +594,14 @@ static int | |||
| 442 | rockchip_thermal_register_sensor(struct platform_device *pdev, | 594 | rockchip_thermal_register_sensor(struct platform_device *pdev, |
| 443 | struct rockchip_thermal_data *thermal, | 595 | struct rockchip_thermal_data *thermal, |
| 444 | struct rockchip_thermal_sensor *sensor, | 596 | struct rockchip_thermal_sensor *sensor, |
| 445 | enum sensor_id id) | 597 | int id) |
| 446 | { | 598 | { |
| 447 | const struct rockchip_tsadc_chip *tsadc = thermal->chip; | 599 | const struct rockchip_tsadc_chip *tsadc = thermal->chip; |
| 448 | int error; | 600 | int error; |
| 449 | 601 | ||
| 450 | tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); | 602 | tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); |
| 451 | tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); | 603 | tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, |
| 604 | thermal->tshut_temp); | ||
| 452 | 605 | ||
| 453 | sensor->thermal = thermal; | 606 | sensor->thermal = thermal; |
| 454 | sensor->id = id; | 607 | sensor->id = id; |
| @@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) | |||
| 481 | const struct of_device_id *match; | 634 | const struct of_device_id *match; |
| 482 | struct resource *res; | 635 | struct resource *res; |
| 483 | int irq; | 636 | int irq; |
| 484 | int i; | 637 | int i, j; |
| 485 | int error; | 638 | int error; |
| 486 | 639 | ||
| 487 | match = of_match_node(of_rockchip_thermal_match, np); | 640 | match = of_match_node(of_rockchip_thermal_match, np); |
| @@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev) | |||
| 556 | 709 | ||
| 557 | thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); | 710 | thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); |
| 558 | 711 | ||
| 559 | error = rockchip_thermal_register_sensor(pdev, thermal, | 712 | for (i = 0; i < thermal->chip->chn_num; i++) { |
| 560 | &thermal->sensors[0], | 713 | error = rockchip_thermal_register_sensor(pdev, thermal, |
| 561 | SENSOR_CPU); | 714 | &thermal->sensors[i], |
| 562 | if (error) { | 715 | thermal->chip->chn_id[i]); |
| 563 | dev_err(&pdev->dev, | 716 | if (error) { |
| 564 | "failed to register CPU thermal sensor: %d\n", error); | 717 | dev_err(&pdev->dev, |
| 565 | goto err_disable_pclk; | 718 | "failed to register sensor[%d] : error = %d\n", |
| 566 | } | 719 | i, error); |
| 567 | 720 | for (j = 0; j < i; j++) | |
| 568 | error = rockchip_thermal_register_sensor(pdev, thermal, | 721 | thermal_zone_of_sensor_unregister(&pdev->dev, |
| 569 | &thermal->sensors[1], | 722 | thermal->sensors[j].tzd); |
| 570 | SENSOR_GPU); | 723 | goto err_disable_pclk; |
| 571 | if (error) { | 724 | } |
| 572 | dev_err(&pdev->dev, | ||
| 573 | "failed to register GPU thermal sensor: %d\n", error); | ||
| 574 | goto err_unregister_cpu_sensor; | ||
| 575 | } | 725 | } |
| 576 | 726 | ||
| 577 | error = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 727 | error = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
| @@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev) | |||
| 581 | if (error) { | 731 | if (error) { |
| 582 | dev_err(&pdev->dev, | 732 | dev_err(&pdev->dev, |
| 583 | "failed to request tsadc irq: %d\n", error); | 733 | "failed to request tsadc irq: %d\n", error); |
| 584 | goto err_unregister_gpu_sensor; | 734 | goto err_unregister_sensor; |
| 585 | } | 735 | } |
| 586 | 736 | ||
| 587 | thermal->chip->control(thermal->regs, true); | 737 | thermal->chip->control(thermal->regs, true); |
| 588 | 738 | ||
| 589 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) | 739 | for (i = 0; i < thermal->chip->chn_num; i++) |
| 590 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); | 740 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); |
| 591 | 741 | ||
| 592 | platform_set_drvdata(pdev, thermal); | 742 | platform_set_drvdata(pdev, thermal); |
| 593 | 743 | ||
| 594 | return 0; | 744 | return 0; |
| 595 | 745 | ||
| 596 | err_unregister_gpu_sensor: | 746 | err_unregister_sensor: |
| 597 | thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); | 747 | while (i--) |
| 598 | err_unregister_cpu_sensor: | 748 | thermal_zone_of_sensor_unregister(&pdev->dev, |
| 599 | thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); | 749 | thermal->sensors[i].tzd); |
| 750 | |||
| 600 | err_disable_pclk: | 751 | err_disable_pclk: |
| 601 | clk_disable_unprepare(thermal->pclk); | 752 | clk_disable_unprepare(thermal->pclk); |
| 602 | err_disable_clk: | 753 | err_disable_clk: |
| @@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev) | |||
| 610 | struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); | 761 | struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); |
| 611 | int i; | 762 | int i; |
| 612 | 763 | ||
| 613 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { | 764 | for (i = 0; i < thermal->chip->chn_num; i++) { |
| 614 | struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; | 765 | struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; |
| 615 | 766 | ||
| 616 | rockchip_thermal_toggle_sensor(sensor, false); | 767 | rockchip_thermal_toggle_sensor(sensor, false); |
| @@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) | |||
| 631 | struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); | 782 | struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); |
| 632 | int i; | 783 | int i; |
| 633 | 784 | ||
| 634 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) | 785 | for (i = 0; i < thermal->chip->chn_num; i++) |
| 635 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); | 786 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); |
| 636 | 787 | ||
| 637 | thermal->chip->control(thermal->regs, false); | 788 | thermal->chip->control(thermal->regs, false); |
| @@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) | |||
| 663 | 814 | ||
| 664 | thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); | 815 | thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); |
| 665 | 816 | ||
| 666 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { | 817 | for (i = 0; i < thermal->chip->chn_num; i++) { |
| 667 | enum sensor_id id = thermal->sensors[i].id; | 818 | int id = thermal->sensors[i].id; |
| 668 | 819 | ||
| 669 | thermal->chip->set_tshut_mode(id, thermal->regs, | 820 | thermal->chip->set_tshut_mode(id, thermal->regs, |
| 670 | thermal->tshut_mode); | 821 | thermal->tshut_mode); |
| 671 | thermal->chip->set_tshut_temp(id, thermal->regs, | 822 | thermal->chip->set_tshut_temp(thermal->chip->table, |
| 823 | id, thermal->regs, | ||
| 672 | thermal->tshut_temp); | 824 | thermal->tshut_temp); |
| 673 | } | 825 | } |
| 674 | 826 | ||
| 675 | thermal->chip->control(thermal->regs, true); | 827 | thermal->chip->control(thermal->regs, true); |
| 676 | 828 | ||
| 677 | for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) | 829 | for (i = 0; i < thermal->chip->chn_num; i++) |
| 678 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); | 830 | rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); |
| 679 | 831 | ||
| 680 | pinctrl_pm_select_default_state(dev); | 832 | pinctrl_pm_select_default_state(dev); |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 13844261cd5f..ed776149261e 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
| @@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty, | |||
| 169 | { | 169 | { |
| 170 | struct n_tty_data *ldata = tty->disc_data; | 170 | struct n_tty_data *ldata = tty->disc_data; |
| 171 | 171 | ||
| 172 | tty_audit_add_data(tty, to, n, ldata->icanon); | 172 | tty_audit_add_data(tty, from, n, ldata->icanon); |
| 173 | return copy_to_user(to, from, n); | 173 | return copy_to_user(to, from, n); |
| 174 | } | 174 | } |
| 175 | 175 | ||
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index c0533a57ec53..910bfee5a88b 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c | |||
| @@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port) | |||
| 60 | spin_unlock_irqrestore(&up->port.lock, flags); | 60 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 61 | return 1; | 61 | return 1; |
| 62 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(fsl8250_handle_irq); | ||
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index e6f5e12a2d83..6412f1455beb 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig | |||
| @@ -373,6 +373,7 @@ config SERIAL_8250_MID | |||
| 373 | depends on SERIAL_8250 && PCI | 373 | depends on SERIAL_8250 && PCI |
| 374 | select HSU_DMA if SERIAL_8250_DMA | 374 | select HSU_DMA if SERIAL_8250_DMA |
| 375 | select HSU_DMA_PCI if X86_INTEL_MID | 375 | select HSU_DMA_PCI if X86_INTEL_MID |
| 376 | select RATIONAL | ||
| 376 | help | 377 | help |
| 377 | Selecting this option will enable handling of the extra features | 378 | Selecting this option will enable handling of the extra features |
| 378 | present on the UART found on Intel Medfield SOC and various other | 379 | present on the UART found on Intel Medfield SOC and various other |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 1aec4404062d..f38beb28e7ae 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
| @@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART | |||
| 1539 | tristate "Freescale lpuart serial port support" | 1539 | tristate "Freescale lpuart serial port support" |
| 1540 | depends on HAS_DMA | 1540 | depends on HAS_DMA |
| 1541 | select SERIAL_CORE | 1541 | select SERIAL_CORE |
| 1542 | select SERIAL_EARLYCON | ||
| 1543 | help | 1542 | help |
| 1544 | Support for the on-chip lpuart on some Freescale SOCs. | 1543 | Support for the on-chip lpuart on some Freescale SOCs. |
| 1545 | 1544 | ||
| @@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE | |||
| 1547 | bool "Console on Freescale lpuart serial port" | 1546 | bool "Console on Freescale lpuart serial port" |
| 1548 | depends on SERIAL_FSL_LPUART=y | 1547 | depends on SERIAL_FSL_LPUART=y |
| 1549 | select SERIAL_CORE_CONSOLE | 1548 | select SERIAL_CORE_CONSOLE |
| 1549 | select SERIAL_EARLYCON | ||
| 1550 | help | 1550 | help |
| 1551 | If you have enabled the lpuart serial port on the Freescale SoCs, | 1551 | If you have enabled the lpuart serial port on the Freescale SoCs, |
| 1552 | you can make it the console by answering Y to this option. | 1552 | you can make it the console by answering Y to this option. |
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 681e0f3d5e0e..a1c0a89d9c7f 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c | |||
| @@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port) | |||
| 474 | 474 | ||
| 475 | /* register irq and enable rx interrupts */ | 475 | /* register irq and enable rx interrupts */ |
| 476 | ret = request_irq(port->irq, bcm_uart_interrupt, 0, | 476 | ret = request_irq(port->irq, bcm_uart_interrupt, 0, |
| 477 | bcm_uart_type(port), port); | 477 | dev_name(port->dev), port); |
| 478 | if (ret) | 478 | if (ret) |
| 479 | return ret; | 479 | return ret; |
| 480 | bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); | 480 | bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); |
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index 6813e316e9ff..2f80bc7e44fb 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c | |||
| @@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev) | |||
| 894 | up->regi_ser = of_iomap(np, 0); | 894 | up->regi_ser = of_iomap(np, 0); |
| 895 | up->port.dev = &pdev->dev; | 895 | up->port.dev = &pdev->dev; |
| 896 | 896 | ||
| 897 | up->gpios = mctrl_gpio_init(&pdev->dev, 0); | 897 | up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0); |
| 898 | if (IS_ERR(up->gpios)) | 898 | if (IS_ERR(up->gpios)) |
| 899 | return PTR_ERR(up->gpios); | 899 | return PTR_ERR(up->gpios); |
| 900 | 900 | ||
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 90ca082935f6..3d245cd3d8e6 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c | |||
| @@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, | |||
| 265 | * | 265 | * |
| 266 | * Audit @data of @size from @tty, if necessary. | 266 | * Audit @data of @size from @tty, if necessary. |
| 267 | */ | 267 | */ |
| 268 | void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 268 | void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 269 | size_t size, unsigned icanon) | 269 | size_t size, unsigned icanon) |
| 270 | { | 270 | { |
| 271 | struct tty_audit_buf *buf; | 271 | struct tty_audit_buf *buf; |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 0c41dbcb90b8..bcc8e1e8bb72 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch) | |||
| 1282 | int was_stopped = tty->stopped; | 1282 | int was_stopped = tty->stopped; |
| 1283 | 1283 | ||
| 1284 | if (tty->ops->send_xchar) { | 1284 | if (tty->ops->send_xchar) { |
| 1285 | down_read(&tty->termios_rwsem); | ||
| 1285 | tty->ops->send_xchar(tty, ch); | 1286 | tty->ops->send_xchar(tty, ch); |
| 1287 | up_read(&tty->termios_rwsem); | ||
| 1286 | return 0; | 1288 | return 0; |
| 1287 | } | 1289 | } |
| 1288 | 1290 | ||
| 1289 | if (tty_write_lock(tty, 0) < 0) | 1291 | if (tty_write_lock(tty, 0) < 0) |
| 1290 | return -ERESTARTSYS; | 1292 | return -ERESTARTSYS; |
| 1291 | 1293 | ||
| 1294 | down_read(&tty->termios_rwsem); | ||
| 1292 | if (was_stopped) | 1295 | if (was_stopped) |
| 1293 | start_tty(tty); | 1296 | start_tty(tty); |
| 1294 | tty->ops->write(tty, &ch, 1); | 1297 | tty->ops->write(tty, &ch, 1); |
| 1295 | if (was_stopped) | 1298 | if (was_stopped) |
| 1296 | stop_tty(tty); | 1299 | stop_tty(tty); |
| 1300 | up_read(&tty->termios_rwsem); | ||
| 1297 | tty_write_unlock(tty); | 1301 | tty_write_unlock(tty); |
| 1298 | return 0; | 1302 | return 0; |
| 1299 | } | 1303 | } |
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 9c5aebfe7053..1445dd39aa62 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
| @@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | |||
| 1147 | spin_unlock_irq(&tty->flow_lock); | 1147 | spin_unlock_irq(&tty->flow_lock); |
| 1148 | break; | 1148 | break; |
| 1149 | case TCIOFF: | 1149 | case TCIOFF: |
| 1150 | down_read(&tty->termios_rwsem); | ||
| 1151 | if (STOP_CHAR(tty) != __DISABLED_CHAR) | 1150 | if (STOP_CHAR(tty) != __DISABLED_CHAR) |
| 1152 | retval = tty_send_xchar(tty, STOP_CHAR(tty)); | 1151 | retval = tty_send_xchar(tty, STOP_CHAR(tty)); |
| 1153 | up_read(&tty->termios_rwsem); | ||
| 1154 | break; | 1152 | break; |
| 1155 | case TCION: | 1153 | case TCION: |
| 1156 | down_read(&tty->termios_rwsem); | ||
| 1157 | if (START_CHAR(tty) != __DISABLED_CHAR) | 1154 | if (START_CHAR(tty) != __DISABLED_CHAR) |
| 1158 | retval = tty_send_xchar(tty, START_CHAR(tty)); | 1155 | retval = tty_send_xchar(tty, START_CHAR(tty)); |
| 1159 | up_read(&tty->termios_rwsem); | ||
| 1160 | break; | 1156 | break; |
| 1161 | default: | 1157 | default: |
| 1162 | return -EINVAL; | 1158 | return -EINVAL; |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 5af8f1874c1a..629e3c865072 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
| 592 | 592 | ||
| 593 | /* Restart the work queue in case no characters kick it off. Safe if | 593 | /* Restart the work queue in case no characters kick it off. Safe if |
| 594 | already running */ | 594 | already running */ |
| 595 | schedule_work(&tty->port->buf.work); | 595 | tty_buffer_restart_work(tty->port); |
| 596 | 596 | ||
| 597 | tty_unlock(tty); | 597 | tty_unlock(tty); |
| 598 | return retval; | 598 | return retval; |
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 6ccbf60cdd5c..5a048b7b92e8 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c | |||
| @@ -84,6 +84,12 @@ struct ci_hdrc_imx_data { | |||
| 84 | struct imx_usbmisc_data *usbmisc_data; | 84 | struct imx_usbmisc_data *usbmisc_data; |
| 85 | bool supports_runtime_pm; | 85 | bool supports_runtime_pm; |
| 86 | bool in_lpm; | 86 | bool in_lpm; |
| 87 | /* SoC before i.mx6 (except imx23/imx28) needs three clks */ | ||
| 88 | bool need_three_clks; | ||
| 89 | struct clk *clk_ipg; | ||
| 90 | struct clk *clk_ahb; | ||
| 91 | struct clk *clk_per; | ||
| 92 | /* --------------------------------- */ | ||
| 87 | }; | 93 | }; |
| 88 | 94 | ||
| 89 | /* Common functions shared by usbmisc drivers */ | 95 | /* Common functions shared by usbmisc drivers */ |
| @@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) | |||
| 135 | } | 141 | } |
| 136 | 142 | ||
| 137 | /* End of common functions shared by usbmisc drivers*/ | 143 | /* End of common functions shared by usbmisc drivers*/ |
| 144 | static int imx_get_clks(struct device *dev) | ||
| 145 | { | ||
| 146 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 147 | int ret = 0; | ||
| 148 | |||
| 149 | data->clk_ipg = devm_clk_get(dev, "ipg"); | ||
| 150 | if (IS_ERR(data->clk_ipg)) { | ||
| 151 | /* If the platform only needs one clocks */ | ||
| 152 | data->clk = devm_clk_get(dev, NULL); | ||
| 153 | if (IS_ERR(data->clk)) { | ||
| 154 | ret = PTR_ERR(data->clk); | ||
| 155 | dev_err(dev, | ||
| 156 | "Failed to get clks, err=%ld,%ld\n", | ||
| 157 | PTR_ERR(data->clk), PTR_ERR(data->clk_ipg)); | ||
| 158 | return ret; | ||
| 159 | } | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | data->clk_ahb = devm_clk_get(dev, "ahb"); | ||
| 164 | if (IS_ERR(data->clk_ahb)) { | ||
| 165 | ret = PTR_ERR(data->clk_ahb); | ||
| 166 | dev_err(dev, | ||
| 167 | "Failed to get ahb clock, err=%d\n", ret); | ||
| 168 | return ret; | ||
| 169 | } | ||
| 170 | |||
| 171 | data->clk_per = devm_clk_get(dev, "per"); | ||
| 172 | if (IS_ERR(data->clk_per)) { | ||
| 173 | ret = PTR_ERR(data->clk_per); | ||
| 174 | dev_err(dev, | ||
| 175 | "Failed to get per clock, err=%d\n", ret); | ||
| 176 | return ret; | ||
| 177 | } | ||
| 178 | |||
| 179 | data->need_three_clks = true; | ||
| 180 | return ret; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int imx_prepare_enable_clks(struct device *dev) | ||
| 184 | { | ||
| 185 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 186 | int ret = 0; | ||
| 187 | |||
| 188 | if (data->need_three_clks) { | ||
| 189 | ret = clk_prepare_enable(data->clk_ipg); | ||
| 190 | if (ret) { | ||
| 191 | dev_err(dev, | ||
| 192 | "Failed to prepare/enable ipg clk, err=%d\n", | ||
| 193 | ret); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | ret = clk_prepare_enable(data->clk_ahb); | ||
| 198 | if (ret) { | ||
| 199 | dev_err(dev, | ||
| 200 | "Failed to prepare/enable ahb clk, err=%d\n", | ||
| 201 | ret); | ||
| 202 | clk_disable_unprepare(data->clk_ipg); | ||
| 203 | return ret; | ||
| 204 | } | ||
| 205 | |||
| 206 | ret = clk_prepare_enable(data->clk_per); | ||
| 207 | if (ret) { | ||
| 208 | dev_err(dev, | ||
| 209 | "Failed to prepare/enable per clk, err=%d\n", | ||
| 210 | ret); | ||
| 211 | clk_disable_unprepare(data->clk_ahb); | ||
| 212 | clk_disable_unprepare(data->clk_ipg); | ||
| 213 | return ret; | ||
| 214 | } | ||
| 215 | } else { | ||
| 216 | ret = clk_prepare_enable(data->clk); | ||
| 217 | if (ret) { | ||
| 218 | dev_err(dev, | ||
| 219 | "Failed to prepare/enable clk, err=%d\n", | ||
| 220 | ret); | ||
| 221 | return ret; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | return ret; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void imx_disable_unprepare_clks(struct device *dev) | ||
| 229 | { | ||
| 230 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 231 | |||
| 232 | if (data->need_three_clks) { | ||
| 233 | clk_disable_unprepare(data->clk_per); | ||
| 234 | clk_disable_unprepare(data->clk_ahb); | ||
| 235 | clk_disable_unprepare(data->clk_ipg); | ||
| 236 | } else { | ||
| 237 | clk_disable_unprepare(data->clk); | ||
| 238 | } | ||
| 239 | } | ||
| 138 | 240 | ||
| 139 | static int ci_hdrc_imx_probe(struct platform_device *pdev) | 241 | static int ci_hdrc_imx_probe(struct platform_device *pdev) |
| 140 | { | 242 | { |
| @@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 145 | .flags = CI_HDRC_SET_NON_ZERO_TTHA, | 247 | .flags = CI_HDRC_SET_NON_ZERO_TTHA, |
| 146 | }; | 248 | }; |
| 147 | int ret; | 249 | int ret; |
| 148 | const struct of_device_id *of_id = | 250 | const struct of_device_id *of_id; |
| 149 | of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); | 251 | const struct ci_hdrc_imx_platform_flag *imx_platform_flag; |
| 150 | const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; | 252 | |
| 253 | of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); | ||
| 254 | if (!of_id) | ||
| 255 | return -ENODEV; | ||
| 256 | |||
| 257 | imx_platform_flag = of_id->data; | ||
| 151 | 258 | ||
| 152 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | 259 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); |
| 153 | if (!data) | 260 | if (!data) |
| 154 | return -ENOMEM; | 261 | return -ENOMEM; |
| 155 | 262 | ||
| 263 | platform_set_drvdata(pdev, data); | ||
| 156 | data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); | 264 | data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); |
| 157 | if (IS_ERR(data->usbmisc_data)) | 265 | if (IS_ERR(data->usbmisc_data)) |
| 158 | return PTR_ERR(data->usbmisc_data); | 266 | return PTR_ERR(data->usbmisc_data); |
| 159 | 267 | ||
| 160 | data->clk = devm_clk_get(&pdev->dev, NULL); | 268 | ret = imx_get_clks(&pdev->dev); |
| 161 | if (IS_ERR(data->clk)) { | 269 | if (ret) |
| 162 | dev_err(&pdev->dev, | 270 | return ret; |
| 163 | "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); | ||
| 164 | return PTR_ERR(data->clk); | ||
| 165 | } | ||
| 166 | 271 | ||
| 167 | ret = clk_prepare_enable(data->clk); | 272 | ret = imx_prepare_enable_clks(&pdev->dev); |
| 168 | if (ret) { | 273 | if (ret) |
| 169 | dev_err(&pdev->dev, | ||
| 170 | "Failed to prepare or enable clock, err=%d\n", ret); | ||
| 171 | return ret; | 274 | return ret; |
| 172 | } | ||
| 173 | 275 | ||
| 174 | data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); | 276 | data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); |
| 175 | if (IS_ERR(data->phy)) { | 277 | if (IS_ERR(data->phy)) { |
| @@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 212 | goto disable_device; | 314 | goto disable_device; |
| 213 | } | 315 | } |
| 214 | 316 | ||
| 215 | platform_set_drvdata(pdev, data); | ||
| 216 | |||
| 217 | if (data->supports_runtime_pm) { | 317 | if (data->supports_runtime_pm) { |
| 218 | pm_runtime_set_active(&pdev->dev); | 318 | pm_runtime_set_active(&pdev->dev); |
| 219 | pm_runtime_enable(&pdev->dev); | 319 | pm_runtime_enable(&pdev->dev); |
| @@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 226 | disable_device: | 326 | disable_device: |
| 227 | ci_hdrc_remove_device(data->ci_pdev); | 327 | ci_hdrc_remove_device(data->ci_pdev); |
| 228 | err_clk: | 328 | err_clk: |
| 229 | clk_disable_unprepare(data->clk); | 329 | imx_disable_unprepare_clks(&pdev->dev); |
| 230 | return ret; | 330 | return ret; |
| 231 | } | 331 | } |
| 232 | 332 | ||
| @@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) | |||
| 240 | pm_runtime_put_noidle(&pdev->dev); | 340 | pm_runtime_put_noidle(&pdev->dev); |
| 241 | } | 341 | } |
| 242 | ci_hdrc_remove_device(data->ci_pdev); | 342 | ci_hdrc_remove_device(data->ci_pdev); |
| 243 | clk_disable_unprepare(data->clk); | 343 | imx_disable_unprepare_clks(&pdev->dev); |
| 244 | 344 | ||
| 245 | return 0; | 345 | return 0; |
| 246 | } | 346 | } |
| @@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev) | |||
| 252 | 352 | ||
| 253 | dev_dbg(dev, "at %s\n", __func__); | 353 | dev_dbg(dev, "at %s\n", __func__); |
| 254 | 354 | ||
| 255 | clk_disable_unprepare(data->clk); | 355 | imx_disable_unprepare_clks(dev); |
| 256 | data->in_lpm = true; | 356 | data->in_lpm = true; |
| 257 | 357 | ||
| 258 | return 0; | 358 | return 0; |
| @@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev) | |||
| 270 | return 0; | 370 | return 0; |
| 271 | } | 371 | } |
| 272 | 372 | ||
| 273 | ret = clk_prepare_enable(data->clk); | 373 | ret = imx_prepare_enable_clks(dev); |
| 274 | if (ret) | 374 | if (ret) |
| 275 | return ret; | 375 | return ret; |
| 276 | 376 | ||
| @@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev) | |||
| 285 | return 0; | 385 | return 0; |
| 286 | 386 | ||
| 287 | clk_disable: | 387 | clk_disable: |
| 288 | clk_disable_unprepare(data->clk); | 388 | imx_disable_unprepare_clks(dev); |
| 289 | return ret; | 389 | return ret; |
| 290 | } | 390 | } |
| 291 | 391 | ||
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index 080b7be3daf0..58c8485a0715 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c | |||
| @@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf, | |||
| 322 | return -EINVAL; | 322 | return -EINVAL; |
| 323 | 323 | ||
| 324 | pm_runtime_get_sync(ci->dev); | 324 | pm_runtime_get_sync(ci->dev); |
| 325 | disable_irq(ci->irq); | ||
| 325 | ci_role_stop(ci); | 326 | ci_role_stop(ci); |
| 326 | ret = ci_role_start(ci, role); | 327 | ret = ci_role_start(ci, role); |
| 328 | enable_irq(ci->irq); | ||
| 327 | pm_runtime_put_sync(ci->dev); | 329 | pm_runtime_put_sync(ci->dev); |
| 328 | 330 | ||
| 329 | return ret ? ret : count; | 331 | return ret ? ret : count; |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 8223fe73ea85..391a1225b0ba 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
| @@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget, | |||
| 1751 | return retval; | 1751 | return retval; |
| 1752 | } | 1752 | } |
| 1753 | 1753 | ||
| 1754 | static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci) | ||
| 1755 | { | ||
| 1756 | if (!ci_otg_is_fsm_mode(ci)) | ||
| 1757 | return; | ||
| 1758 | |||
| 1759 | mutex_lock(&ci->fsm.lock); | ||
| 1760 | if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) { | ||
| 1761 | ci->fsm.a_bidl_adis_tmout = 1; | ||
| 1762 | ci_hdrc_otg_fsm_start(ci); | ||
| 1763 | } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) { | ||
| 1764 | ci->fsm.protocol = PROTO_UNDEF; | ||
| 1765 | ci->fsm.otg->state = OTG_STATE_UNDEFINED; | ||
| 1766 | } | ||
| 1767 | mutex_unlock(&ci->fsm.lock); | ||
| 1768 | } | ||
| 1769 | |||
| 1754 | /** | 1770 | /** |
| 1755 | * ci_udc_stop: unregister a gadget driver | 1771 | * ci_udc_stop: unregister a gadget driver |
| 1756 | */ | 1772 | */ |
| @@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget) | |||
| 1775 | ci->driver = NULL; | 1791 | ci->driver = NULL; |
| 1776 | spin_unlock_irqrestore(&ci->lock, flags); | 1792 | spin_unlock_irqrestore(&ci->lock, flags); |
| 1777 | 1793 | ||
| 1794 | ci_udc_stop_for_otg_fsm(ci); | ||
| 1778 | return 0; | 1795 | return 0; |
| 1779 | } | 1796 | } |
| 1780 | 1797 | ||
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index fcea4eb36eee..ab8b027e8cc8 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c | |||
| @@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev) | |||
| 500 | { | 500 | { |
| 501 | struct resource *res; | 501 | struct resource *res; |
| 502 | struct imx_usbmisc *data; | 502 | struct imx_usbmisc *data; |
| 503 | struct of_device_id *tmp_dev; | 503 | const struct of_device_id *of_id; |
| 504 | |||
| 505 | of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev); | ||
| 506 | if (!of_id) | ||
| 507 | return -ENODEV; | ||
| 504 | 508 | ||
| 505 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | 509 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); |
| 506 | if (!data) | 510 | if (!data) |
| @@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev) | |||
| 513 | if (IS_ERR(data->base)) | 517 | if (IS_ERR(data->base)) |
| 514 | return PTR_ERR(data->base); | 518 | return PTR_ERR(data->base); |
| 515 | 519 | ||
| 516 | tmp_dev = (struct of_device_id *) | 520 | data->ops = (const struct usbmisc_ops *)of_id->data; |
| 517 | of_match_device(usbmisc_imx_dt_ids, &pdev->dev); | ||
| 518 | data->ops = (const struct usbmisc_ops *)tmp_dev->data; | ||
| 519 | platform_set_drvdata(pdev, data); | 521 | platform_set_drvdata(pdev, data); |
| 520 | 522 | ||
| 521 | return 0; | 523 | return 0; |
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 433bbc34a8a4..071964c7847f 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c | |||
| @@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock) | |||
| 884 | 884 | ||
| 885 | add_wait_queue(&usblp->wwait, &waita); | 885 | add_wait_queue(&usblp->wwait, &waita); |
| 886 | for (;;) { | 886 | for (;;) { |
| 887 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 888 | if (mutex_lock_interruptible(&usblp->mut)) { | 887 | if (mutex_lock_interruptible(&usblp->mut)) { |
| 889 | rc = -EINTR; | 888 | rc = -EINTR; |
| 890 | break; | 889 | break; |
| 891 | } | 890 | } |
| 891 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 892 | rc = usblp_wtest(usblp, nonblock); | 892 | rc = usblp_wtest(usblp, nonblock); |
| 893 | mutex_unlock(&usblp->mut); | 893 | mutex_unlock(&usblp->mut); |
| 894 | if (rc <= 0) | 894 | if (rc <= 0) |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index a99c89e78126..dd280108758f 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
| @@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB | |||
| 77 | 77 | ||
| 78 | config USB_OTG_FSM | 78 | config USB_OTG_FSM |
| 79 | tristate "USB 2.0 OTG FSM implementation" | 79 | tristate "USB 2.0 OTG FSM implementation" |
| 80 | depends on USB | 80 | depends on USB && USB_OTG |
| 81 | select USB_OTG | ||
| 82 | select USB_PHY | 81 | select USB_PHY |
| 83 | help | 82 | help |
| 84 | Implements OTG Finite State Machine as specified in On-The-Go | 83 | Implements OTG Finite State Machine as specified in On-The-Go |
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index e79baf73c234..571c21727ff9 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c | |||
| @@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) | |||
| 324 | */ | 324 | */ |
| 325 | static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) | 325 | static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) |
| 326 | { | 326 | { |
| 327 | if (hsotg->lx_state == DWC2_L2) { | 327 | if (hsotg->bus_suspended) { |
| 328 | hsotg->flags.b.port_suspend_change = 1; | 328 | hsotg->flags.b.port_suspend_change = 1; |
| 329 | usb_hcd_resume_root_hub(hsotg->priv); | 329 | usb_hcd_resume_root_hub(hsotg->priv); |
| 330 | } else { | ||
| 331 | hsotg->flags.b.port_l1_change = 1; | ||
| 332 | } | 330 | } |
| 331 | |||
| 332 | if (hsotg->lx_state == DWC2_L1) | ||
| 333 | hsotg->flags.b.port_l1_change = 1; | ||
| 333 | } | 334 | } |
| 334 | 335 | ||
| 335 | /** | 336 | /** |
| @@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data) | |||
| 1428 | dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", | 1429 | dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", |
| 1429 | dwc2_readl(hsotg->regs + HPRT0)); | 1430 | dwc2_readl(hsotg->regs + HPRT0)); |
| 1430 | 1431 | ||
| 1431 | hsotg->bus_suspended = 0; | ||
| 1432 | dwc2_hcd_rem_wakeup(hsotg); | 1432 | dwc2_hcd_rem_wakeup(hsotg); |
| 1433 | hsotg->bus_suspended = 0; | ||
| 1433 | 1434 | ||
| 1434 | /* Change to L0 state */ | 1435 | /* Change to L0 state */ |
| 1435 | hsotg->lx_state = DWC2_L0; | 1436 | hsotg->lx_state = DWC2_L0; |
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 5859b0fa19ee..e61d773cf65e 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
| @@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = { | |||
| 108 | .host_ls_low_power_phy_clk = -1, | 108 | .host_ls_low_power_phy_clk = -1, |
| 109 | .ts_dline = -1, | 109 | .ts_dline = -1, |
| 110 | .reload_ctl = -1, | 110 | .reload_ctl = -1, |
| 111 | .ahbcfg = 0x7, /* INCR16 */ | 111 | .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << |
| 112 | GAHBCFG_HBSTLEN_SHIFT, | ||
| 112 | .uframe_sched = -1, | 113 | .uframe_sched = -1, |
| 113 | .external_id_pin_ctl = -1, | 114 | .external_id_pin_ctl = -1, |
| 114 | .hibernation = -1, | 115 | .hibernation = -1, |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 77a622cb48ab..009d83048c8c 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
| @@ -34,6 +34,8 @@ | |||
| 34 | #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 | 34 | #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 |
| 35 | #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 | 35 | #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 |
| 36 | #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 | 36 | #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 |
| 37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa | ||
| 38 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | ||
| 37 | 39 | ||
| 38 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | 40 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
| 39 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; | 41 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
| @@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
| 210 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, | 212 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, |
| 211 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, | 213 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, |
| 212 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, | 214 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, |
| 215 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, | ||
| 216 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | ||
| 213 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 217 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
| 214 | { } /* Terminating Entry */ | 218 | { } /* Terminating Entry */ |
| 215 | }; | 219 | }; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 55ba447fdf8b..e24a01cc98df 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc) | |||
| 2744 | } | 2744 | } |
| 2745 | 2745 | ||
| 2746 | dwc->gadget.ops = &dwc3_gadget_ops; | 2746 | dwc->gadget.ops = &dwc3_gadget_ops; |
| 2747 | dwc->gadget.max_speed = USB_SPEED_SUPER; | ||
| 2748 | dwc->gadget.speed = USB_SPEED_UNKNOWN; | 2747 | dwc->gadget.speed = USB_SPEED_UNKNOWN; |
| 2749 | dwc->gadget.sg_supported = true; | 2748 | dwc->gadget.sg_supported = true; |
| 2750 | dwc->gadget.name = "dwc3-gadget"; | 2749 | dwc->gadget.name = "dwc3-gadget"; |
| 2751 | 2750 | ||
| 2752 | /* | 2751 | /* |
| 2752 | * FIXME We might be setting max_speed to <SUPER, however versions | ||
| 2753 | * <2.20a of dwc3 have an issue with metastability (documented | ||
| 2754 | * elsewhere in this driver) which tells us we can't set max speed to | ||
| 2755 | * anything lower than SUPER. | ||
| 2756 | * | ||
| 2757 | * Because gadget.max_speed is only used by composite.c and function | ||
| 2758 | * drivers (i.e. it won't go into dwc3's registers) we are allowing this | ||
| 2759 | * to happen so we avoid sending SuperSpeed Capability descriptor | ||
| 2760 | * together with our BOS descriptor as that could confuse host into | ||
| 2761 | * thinking we can handle super speed. | ||
| 2762 | * | ||
| 2763 | * Note that, in fact, we won't even support GetBOS requests when speed | ||
| 2764 | * is less than super speed because we don't have means, yet, to tell | ||
| 2765 | * composite.c that we are USB 2.0 + LPM ECN. | ||
| 2766 | */ | ||
| 2767 | if (dwc->revision < DWC3_REVISION_220A) | ||
| 2768 | dwc3_trace(trace_dwc3_gadget, | ||
| 2769 | "Changing max_speed on rev %08x\n", | ||
| 2770 | dwc->revision); | ||
| 2771 | |||
| 2772 | dwc->gadget.max_speed = dwc->maximum_speed; | ||
| 2773 | |||
| 2774 | /* | ||
| 2753 | * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize | 2775 | * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize |
| 2754 | * on ep out. | 2776 | * on ep out. |
| 2755 | */ | 2777 | */ |
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 23933bdf2d9d..ddc3aad886b7 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c | |||
| @@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev, | |||
| 329 | for (i = 0; i < loop->qlen && result == 0; i++) { | 329 | for (i = 0; i < loop->qlen && result == 0; i++) { |
| 330 | result = -ENOMEM; | 330 | result = -ENOMEM; |
| 331 | 331 | ||
| 332 | in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); | 332 | in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC); |
| 333 | if (!in_req) | 333 | if (!in_req) |
| 334 | goto fail; | 334 | goto fail; |
| 335 | 335 | ||
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f0f2b066ac08..f92f5aff0dd5 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
| @@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) | |||
| 1633 | spin_lock(&udc->lock); | 1633 | spin_lock(&udc->lock); |
| 1634 | 1634 | ||
| 1635 | int_enb = usba_int_enb_get(udc); | 1635 | int_enb = usba_int_enb_get(udc); |
| 1636 | status = usba_readl(udc, INT_STA) & int_enb; | 1636 | status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); |
| 1637 | DBG(DBG_INT, "irq, status=%#08x\n", status); | 1637 | DBG(DBG_INT, "irq, status=%#08x\n", status); |
| 1638 | 1638 | ||
| 1639 | if (status & USBA_DET_SUSPEND) { | 1639 | if (status & USBA_DET_SUSPEND) { |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 5d2d7e954bd4..0230965fb78c 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 782 | status |= USB_PORT_STAT_SUSPEND; | 782 | status |= USB_PORT_STAT_SUSPEND; |
| 783 | } | 783 | } |
| 784 | } | 784 | } |
| 785 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 | 785 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && |
| 786 | && (raw_port_status & PORT_POWER) | 786 | (raw_port_status & PORT_POWER)) { |
| 787 | && (bus_state->suspended_ports & (1 << wIndex))) { | 787 | if (bus_state->suspended_ports & (1 << wIndex)) { |
| 788 | bus_state->suspended_ports &= ~(1 << wIndex); | 788 | bus_state->suspended_ports &= ~(1 << wIndex); |
| 789 | if (hcd->speed < HCD_USB3) | 789 | if (hcd->speed < HCD_USB3) |
| 790 | bus_state->port_c_suspend |= 1 << wIndex; | 790 | bus_state->port_c_suspend |= 1 << wIndex; |
| 791 | } | ||
| 792 | bus_state->resume_done[wIndex] = 0; | ||
| 793 | clear_bit(wIndex, &bus_state->resuming_ports); | ||
| 791 | } | 794 | } |
| 792 | if (raw_port_status & PORT_CONNECT) { | 795 | if (raw_port_status & PORT_CONNECT) { |
| 793 | status |= USB_PORT_STAT_CONNECTION; | 796 | status |= USB_PORT_STAT_CONNECTION; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fa836251ca21..6c5e8133cf87 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -3896,28 +3896,6 @@ cleanup: | |||
| 3896 | return ret; | 3896 | return ret; |
| 3897 | } | 3897 | } |
| 3898 | 3898 | ||
| 3899 | static int ep_ring_is_processing(struct xhci_hcd *xhci, | ||
| 3900 | int slot_id, unsigned int ep_index) | ||
| 3901 | { | ||
| 3902 | struct xhci_virt_device *xdev; | ||
| 3903 | struct xhci_ring *ep_ring; | ||
| 3904 | struct xhci_ep_ctx *ep_ctx; | ||
| 3905 | struct xhci_virt_ep *xep; | ||
| 3906 | dma_addr_t hw_deq; | ||
| 3907 | |||
| 3908 | xdev = xhci->devs[slot_id]; | ||
| 3909 | xep = &xhci->devs[slot_id]->eps[ep_index]; | ||
| 3910 | ep_ring = xep->ring; | ||
| 3911 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | ||
| 3912 | |||
| 3913 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING) | ||
| 3914 | return 0; | ||
| 3915 | |||
| 3916 | hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; | ||
| 3917 | return (hw_deq != | ||
| 3918 | xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue)); | ||
| 3919 | } | ||
| 3920 | |||
| 3921 | /* | 3899 | /* |
| 3922 | * Check transfer ring to guarantee there is enough room for the urb. | 3900 | * Check transfer ring to guarantee there is enough room for the urb. |
| 3923 | * Update ISO URB start_frame and interval. | 3901 | * Update ISO URB start_frame and interval. |
| @@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 3983 | } | 3961 | } |
| 3984 | 3962 | ||
| 3985 | /* Calculate the start frame and put it in urb->start_frame. */ | 3963 | /* Calculate the start frame and put it in urb->start_frame. */ |
| 3986 | if (HCC_CFC(xhci->hcc_params) && | 3964 | if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { |
| 3987 | ep_ring_is_processing(xhci, slot_id, ep_index)) { | 3965 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == |
| 3988 | urb->start_frame = xep->next_frame_id; | 3966 | EP_STATE_RUNNING) { |
| 3989 | goto skip_start_over; | 3967 | urb->start_frame = xep->next_frame_id; |
| 3968 | goto skip_start_over; | ||
| 3969 | } | ||
| 3990 | } | 3970 | } |
| 3991 | 3971 | ||
| 3992 | start_frame = readl(&xhci->run_regs->microframe_index); | 3972 | start_frame = readl(&xhci->run_regs->microframe_index); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6e7dc6f93978..dfa44d3e8eee 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
| 175 | command |= CMD_RESET; | 175 | command |= CMD_RESET; |
| 176 | writel(command, &xhci->op_regs->command); | 176 | writel(command, &xhci->op_regs->command); |
| 177 | 177 | ||
| 178 | /* Existing Intel xHCI controllers require a delay of 1 mS, | ||
| 179 | * after setting the CMD_RESET bit, and before accessing any | ||
| 180 | * HC registers. This allows the HC to complete the | ||
| 181 | * reset operation and be ready for HC register access. | ||
| 182 | * Without this delay, the subsequent HC register access, | ||
| 183 | * may result in a system hang very rarely. | ||
| 184 | */ | ||
| 185 | if (xhci->quirks & XHCI_INTEL_HOST) | ||
| 186 | udelay(1000); | ||
| 187 | |||
| 178 | ret = xhci_handshake(&xhci->op_regs->command, | 188 | ret = xhci_handshake(&xhci->op_regs->command, |
| 179 | CMD_RESET, 0, 10 * 1000 * 1000); | 189 | CMD_RESET, 0, 10 * 1000 * 1000); |
| 180 | if (ret) | 190 | if (ret) |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ba13529cbd52..18cfc0a361cb 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev) | |||
| 132 | /*-------------------------------------------------------------------------*/ | 132 | /*-------------------------------------------------------------------------*/ |
| 133 | 133 | ||
| 134 | #ifndef CONFIG_BLACKFIN | 134 | #ifndef CONFIG_BLACKFIN |
| 135 | static int musb_ulpi_read(struct usb_phy *phy, u32 offset) | 135 | static int musb_ulpi_read(struct usb_phy *phy, u32 reg) |
| 136 | { | 136 | { |
| 137 | void __iomem *addr = phy->io_priv; | 137 | void __iomem *addr = phy->io_priv; |
| 138 | int i = 0; | 138 | int i = 0; |
| @@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset) | |||
| 151 | * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. | 151 | * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. |
| 152 | */ | 152 | */ |
| 153 | 153 | ||
| 154 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); | 154 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); |
| 155 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, | 155 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, |
| 156 | MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); | 156 | MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); |
| 157 | 157 | ||
| @@ -176,7 +176,7 @@ out: | |||
| 176 | return ret; | 176 | return ret; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) | 179 | static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg) |
| 180 | { | 180 | { |
| 181 | void __iomem *addr = phy->io_priv; | 181 | void __iomem *addr = phy->io_priv; |
| 182 | int i = 0; | 182 | int i = 0; |
| @@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) | |||
| 191 | power &= ~MUSB_POWER_SUSPENDM; | 191 | power &= ~MUSB_POWER_SUSPENDM; |
| 192 | musb_writeb(addr, MUSB_POWER, power); | 192 | musb_writeb(addr, MUSB_POWER, power); |
| 193 | 193 | ||
| 194 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); | 194 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); |
| 195 | musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); | 195 | musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val); |
| 196 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); | 196 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); |
| 197 | 197 | ||
| 198 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) | 198 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) |
| @@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt); | |||
| 1668 | static bool use_dma = 1; | 1668 | static bool use_dma = 1; |
| 1669 | 1669 | ||
| 1670 | /* "modprobe ... use_dma=0" etc */ | 1670 | /* "modprobe ... use_dma=0" etc */ |
| 1671 | module_param(use_dma, bool, 0); | 1671 | module_param(use_dma, bool, 0644); |
| 1672 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | 1672 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); |
| 1673 | 1673 | ||
| 1674 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) | 1674 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 26c65e66cc0f..795a45b1b25b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
| 112 | struct musb *musb = ep->musb; | 112 | struct musb *musb = ep->musb; |
| 113 | void __iomem *epio = ep->regs; | 113 | void __iomem *epio = ep->regs; |
| 114 | u16 csr; | 114 | u16 csr; |
| 115 | u16 lastcsr = 0; | ||
| 116 | int retries = 1000; | 115 | int retries = 1000; |
| 117 | 116 | ||
| 118 | csr = musb_readw(epio, MUSB_TXCSR); | 117 | csr = musb_readw(epio, MUSB_TXCSR); |
| 119 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 118 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
| 120 | if (csr != lastcsr) | ||
| 121 | dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | ||
| 122 | lastcsr = csr; | ||
| 123 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; | 119 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; |
| 124 | musb_writew(epio, MUSB_TXCSR, csr); | 120 | musb_writew(epio, MUSB_TXCSR, csr); |
| 125 | csr = musb_readw(epio, MUSB_TXCSR); | 121 | csr = musb_readw(epio, MUSB_TXCSR); |
| 126 | if (WARN(retries-- < 1, | 122 | |
| 123 | /* | ||
| 124 | * FIXME: sometimes the tx fifo flush failed, it has been | ||
| 125 | * observed during device disconnect on AM335x. | ||
| 126 | * | ||
| 127 | * To reproduce the issue, ensure tx urb(s) are queued when | ||
| 128 | * unplug the usb device which is connected to AM335x usb | ||
| 129 | * host port. | ||
| 130 | * | ||
| 131 | * I found using a usb-ethernet device and running iperf | ||
| 132 | * (client on AM335x) has very high chance to trigger it. | ||
| 133 | * | ||
| 134 | * Better to turn on dev_dbg() in musb_cleanup_urb() with | ||
| 135 | * CPPI enabled to see the issue when aborting the tx channel. | ||
| 136 | */ | ||
| 137 | if (dev_WARN_ONCE(musb->controller, retries-- < 1, | ||
| 127 | "Could not flush host TX%d fifo: csr: %04x\n", | 138 | "Could not flush host TX%d fifo: csr: %04x\n", |
| 128 | ep->epnum, csr)) | 139 | ep->epnum, csr)) |
| 129 | return; | 140 | return; |
| 130 | mdelay(1); | ||
| 131 | } | 141 | } |
| 132 | } | 142 | } |
| 133 | 143 | ||
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 173132416170..22e8ecb6bfbd 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
| @@ -21,7 +21,6 @@ config AB8500_USB | |||
| 21 | config FSL_USB2_OTG | 21 | config FSL_USB2_OTG |
| 22 | bool "Freescale USB OTG Transceiver Driver" | 22 | bool "Freescale USB OTG Transceiver Driver" |
| 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM | 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM |
| 24 | select USB_OTG | ||
| 25 | select USB_PHY | 24 | select USB_PHY |
| 26 | help | 25 | help |
| 27 | Enable this to support Freescale USB OTG transceiver. | 26 | Enable this to support Freescale USB OTG transceiver. |
| @@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY | |||
| 168 | 167 | ||
| 169 | config USB_MV_OTG | 168 | config USB_MV_OTG |
| 170 | tristate "Marvell USB OTG support" | 169 | tristate "Marvell USB OTG support" |
| 171 | depends on USB_EHCI_MV && USB_MV_UDC && PM | 170 | depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG |
| 172 | select USB_OTG | ||
| 173 | select USB_PHY | 171 | select USB_PHY |
| 174 | help | 172 | help |
| 175 | Say Y here if you want to build Marvell USB OTG transciever | 173 | Say Y here if you want to build Marvell USB OTG transciever |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 4d863ebc117c..b7536af777ab 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
| @@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev) | |||
| 452 | struct clk *clk; | 452 | struct clk *clk; |
| 453 | struct mxs_phy *mxs_phy; | 453 | struct mxs_phy *mxs_phy; |
| 454 | int ret; | 454 | int ret; |
| 455 | const struct of_device_id *of_id = | 455 | const struct of_device_id *of_id; |
| 456 | of_match_device(mxs_phy_dt_ids, &pdev->dev); | ||
| 457 | struct device_node *np = pdev->dev.of_node; | 456 | struct device_node *np = pdev->dev.of_node; |
| 458 | 457 | ||
| 458 | of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); | ||
| 459 | if (!of_id) | ||
| 460 | return -ENODEV; | ||
| 461 | |||
| 459 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 462 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 460 | base = devm_ioremap_resource(&pdev->dev, res); | 463 | base = devm_ioremap_resource(&pdev->dev, res); |
| 461 | if (IS_ERR(base)) | 464 | if (IS_ERR(base)) |
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 1270906ccb95..c4bf2de6d14e 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c | |||
| @@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
| 105 | extcon = extcon_get_extcon_dev(config->extcon); | 105 | extcon = extcon_get_extcon_dev(config->extcon); |
| 106 | if (!extcon) | 106 | if (!extcon) |
| 107 | return -EPROBE_DEFER; | 107 | return -EPROBE_DEFER; |
| 108 | otg_dev->extcon = extcon; | ||
| 109 | 108 | ||
| 110 | otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); | 109 | otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); |
| 111 | if (!otg_dev) | 110 | if (!otg_dev) |
| @@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
| 115 | if (IS_ERR(otg_dev->base)) | 114 | if (IS_ERR(otg_dev->base)) |
| 116 | return PTR_ERR(otg_dev->base); | 115 | return PTR_ERR(otg_dev->base); |
| 117 | 116 | ||
| 117 | otg_dev->extcon = extcon; | ||
| 118 | otg_dev->id_nb.notifier_call = omap_otg_id_notifier; | 118 | otg_dev->id_nb.notifier_call = omap_otg_id_notifier; |
| 119 | otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; | 119 | otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; |
| 120 | 120 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 685fef71d3d1..f2280606b73c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 161 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 | 161 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 |
| 162 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 | 162 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 |
| 163 | #define NOVATELWIRELESS_PRODUCT_E371 0x9011 | 163 | #define NOVATELWIRELESS_PRODUCT_E371 0x9011 |
| 164 | #define NOVATELWIRELESS_PRODUCT_U620L 0x9022 | ||
| 164 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 | 165 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 |
| 165 | #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 | 166 | #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 |
| 166 | 167 | ||
| @@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 354 | /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * | 355 | /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * |
| 355 | * It seems to contain a Qualcomm QSC6240/6290 chipset */ | 356 | * It seems to contain a Qualcomm QSC6240/6290 chipset */ |
| 356 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 | 357 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 |
| 358 | #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 | ||
| 357 | 359 | ||
| 358 | /* iBall 3.5G connect wireless modem */ | 360 | /* iBall 3.5G connect wireless modem */ |
| 359 | #define IBALL_3_5G_CONNECT 0x9605 | 361 | #define IBALL_3_5G_CONNECT 0x9605 |
| @@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = { | |||
| 519 | .sendsetup = BIT(0) | BIT(1), | 521 | .sendsetup = BIT(0) | BIT(1), |
| 520 | }; | 522 | }; |
| 521 | 523 | ||
| 524 | static const struct option_blacklist_info four_g_w100_blacklist = { | ||
| 525 | .sendsetup = BIT(1) | BIT(2), | ||
| 526 | .reserved = BIT(3), | ||
| 527 | }; | ||
| 528 | |||
| 522 | static const struct option_blacklist_info alcatel_x200_blacklist = { | 529 | static const struct option_blacklist_info alcatel_x200_blacklist = { |
| 523 | .sendsetup = BIT(0) | BIT(1), | 530 | .sendsetup = BIT(0) | BIT(1), |
| 524 | .reserved = BIT(4), | 531 | .reserved = BIT(4), |
| @@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 1052 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, | 1059 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, |
| 1053 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, | 1060 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, |
| 1054 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, | 1061 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, |
| 1062 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) }, | ||
| 1055 | 1063 | ||
| 1056 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 1064 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
| 1057 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 1065 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
| @@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = { | |||
| 1641 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), | 1649 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), |
| 1642 | .driver_info = (kernel_ulong_t)&four_g_w14_blacklist | 1650 | .driver_info = (kernel_ulong_t)&four_g_w14_blacklist |
| 1643 | }, | 1651 | }, |
| 1652 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), | ||
| 1653 | .driver_info = (kernel_ulong_t)&four_g_w100_blacklist | ||
| 1654 | }, | ||
| 1644 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, | 1655 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, |
| 1645 | { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, | 1656 | { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, |
| 1646 | { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, | 1657 | { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 5022fcfa0260..9919d2a9faf2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #define DRIVER_AUTHOR "Qualcomm Inc" | 22 | #define DRIVER_AUTHOR "Qualcomm Inc" |
| 23 | #define DRIVER_DESC "Qualcomm USB Serial driver" | 23 | #define DRIVER_DESC "Qualcomm USB Serial driver" |
| 24 | 24 | ||
| 25 | #define QUECTEL_EC20_PID 0x9215 | ||
| 26 | |||
| 25 | /* standard device layouts supported by this driver */ | 27 | /* standard device layouts supported by this driver */ |
| 26 | enum qcserial_layouts { | 28 | enum qcserial_layouts { |
| 27 | QCSERIAL_G2K = 0, /* Gobi 2000 */ | 29 | QCSERIAL_G2K = 0, /* Gobi 2000 */ |
| @@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = { | |||
| 171 | }; | 173 | }; |
| 172 | MODULE_DEVICE_TABLE(usb, id_table); | 174 | MODULE_DEVICE_TABLE(usb, id_table); |
| 173 | 175 | ||
| 176 | static int handle_quectel_ec20(struct device *dev, int ifnum) | ||
| 177 | { | ||
| 178 | int altsetting = 0; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Quectel EC20 Mini PCIe LTE module layout: | ||
| 182 | * 0: DM/DIAG (use libqcdm from ModemManager for communication) | ||
| 183 | * 1: NMEA | ||
| 184 | * 2: AT-capable modem port | ||
| 185 | * 3: Modem interface | ||
| 186 | * 4: NDIS | ||
| 187 | */ | ||
| 188 | switch (ifnum) { | ||
| 189 | case 0: | ||
| 190 | dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n"); | ||
| 191 | break; | ||
| 192 | case 1: | ||
| 193 | dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n"); | ||
| 194 | break; | ||
| 195 | case 2: | ||
| 196 | case 3: | ||
| 197 | dev_dbg(dev, "Quectel EC20 Modem port found\n"); | ||
| 198 | break; | ||
| 199 | case 4: | ||
| 200 | /* Don't claim the QMI/net interface */ | ||
| 201 | altsetting = -1; | ||
| 202 | break; | ||
| 203 | } | ||
| 204 | |||
| 205 | return altsetting; | ||
| 206 | } | ||
| 207 | |||
| 174 | static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | 208 | static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
| 175 | { | 209 | { |
| 176 | struct usb_host_interface *intf = serial->interface->cur_altsetting; | 210 | struct usb_host_interface *intf = serial->interface->cur_altsetting; |
| @@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 181 | int altsetting = -1; | 215 | int altsetting = -1; |
| 182 | bool sendsetup = false; | 216 | bool sendsetup = false; |
| 183 | 217 | ||
| 218 | /* we only support vendor specific functions */ | ||
| 219 | if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) | ||
| 220 | goto done; | ||
| 221 | |||
| 184 | nintf = serial->dev->actconfig->desc.bNumInterfaces; | 222 | nintf = serial->dev->actconfig->desc.bNumInterfaces; |
| 185 | dev_dbg(dev, "Num Interfaces = %d\n", nintf); | 223 | dev_dbg(dev, "Num Interfaces = %d\n", nintf); |
| 186 | ifnum = intf->desc.bInterfaceNumber; | 224 | ifnum = intf->desc.bInterfaceNumber; |
| @@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 240 | altsetting = -1; | 278 | altsetting = -1; |
| 241 | break; | 279 | break; |
| 242 | case QCSERIAL_G2K: | 280 | case QCSERIAL_G2K: |
| 281 | /* handle non-standard layouts */ | ||
| 282 | if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) { | ||
| 283 | altsetting = handle_quectel_ec20(dev, ifnum); | ||
| 284 | goto done; | ||
| 285 | } | ||
| 286 | |||
| 243 | /* | 287 | /* |
| 244 | * Gobi 2K+ USB layout: | 288 | * Gobi 2K+ USB layout: |
| 245 | * 0: QMI/net | 289 | * 0: QMI/net |
| @@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 301 | break; | 345 | break; |
| 302 | case QCSERIAL_HWI: | 346 | case QCSERIAL_HWI: |
| 303 | /* | 347 | /* |
| 304 | * Huawei layout: | 348 | * Huawei devices map functions by subclass + protocol |
| 305 | * 0: AT-capable modem port | 349 | * instead of interface numbers. The protocol identify |
| 306 | * 1: DM/DIAG | 350 | * a specific function, while the subclass indicate a |
| 307 | * 2: AT-capable modem port | 351 | * specific firmware source |
| 308 | * 3: CCID-compatible PCSC interface | 352 | * |
| 309 | * 4: QMI/net | 353 | * This is a blacklist of functions known to be |
| 310 | * 5: NMEA | 354 | * non-serial. The rest are assumed to be serial and |
| 355 | * will be handled by this driver | ||
| 311 | */ | 356 | */ |
| 312 | switch (ifnum) { | 357 | switch (intf->desc.bInterfaceProtocol) { |
| 313 | case 0: | 358 | /* QMI combined (qmi_wwan) */ |
| 314 | case 2: | 359 | case 0x07: |
| 315 | dev_dbg(dev, "Modem port found\n"); | 360 | case 0x37: |
| 316 | break; | 361 | case 0x67: |
| 317 | case 1: | 362 | /* QMI data (qmi_wwan) */ |
| 318 | dev_dbg(dev, "DM/DIAG interface found\n"); | 363 | case 0x08: |
| 319 | break; | 364 | case 0x38: |
| 320 | case 5: | 365 | case 0x68: |
| 321 | dev_dbg(dev, "NMEA GPS interface found\n"); | 366 | /* QMI control (qmi_wwan) */ |
| 322 | break; | 367 | case 0x09: |
| 323 | default: | 368 | case 0x39: |
| 324 | /* don't claim any unsupported interface */ | 369 | case 0x69: |
| 370 | /* NCM like (huawei_cdc_ncm) */ | ||
| 371 | case 0x16: | ||
| 372 | case 0x46: | ||
| 373 | case 0x76: | ||
| 325 | altsetting = -1; | 374 | altsetting = -1; |
| 326 | break; | 375 | break; |
| 376 | default: | ||
| 377 | dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n", | ||
| 378 | intf->desc.bInterfaceClass, | ||
| 379 | intf->desc.bInterfaceSubClass, | ||
| 380 | intf->desc.bInterfaceProtocol); | ||
| 327 | } | 381 | } |
| 328 | break; | 382 | break; |
| 329 | default: | 383 | default: |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e9da41d9fe7f..2694df2f4559 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = { | |||
| 159 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, | 159 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, |
| 160 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, | 160 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
| 161 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, | 161 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
| 162 | { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, | ||
| 162 | { } /* terminator */ | 163 | { } /* terminator */ |
| 163 | }; | 164 | }; |
| 164 | 165 | ||
| @@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = { | |||
| 191 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, | 192 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, |
| 192 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, | 193 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
| 193 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, | 194 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
| 195 | { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, | ||
| 194 | { } /* terminator */ | 196 | { } /* terminator */ |
| 195 | }; | 197 | }; |
| 196 | 198 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..98f35c656c02 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
| @@ -56,6 +56,10 @@ | |||
| 56 | #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID | 56 | #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID |
| 57 | #define ABBOTT_STRIP_PORT_ID 0x3420 | 57 | #define ABBOTT_STRIP_PORT_ID 0x3420 |
| 58 | 58 | ||
| 59 | /* Honeywell vendor and product IDs */ | ||
| 60 | #define HONEYWELL_VENDOR_ID 0x10ac | ||
| 61 | #define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */ | ||
| 62 | |||
| 59 | /* Commands */ | 63 | /* Commands */ |
| 60 | #define TI_GET_VERSION 0x01 | 64 | #define TI_GET_VERSION 0x01 |
| 61 | #define TI_GET_PORT_STATUS 0x02 | 65 | #define TI_GET_PORT_STATUS 0x02 |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 7a8a6c6952e9..1c427beffadd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
| @@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG | |||
| 446 | 446 | ||
| 447 | config IMX2_WDT | 447 | config IMX2_WDT |
| 448 | tristate "IMX2+ Watchdog" | 448 | tristate "IMX2+ Watchdog" |
| 449 | depends on ARCH_MXC | 449 | depends on ARCH_MXC || ARCH_LAYERSCAPE |
| 450 | select REGMAP_MMIO | 450 | select REGMAP_MMIO |
| 451 | select WATCHDOG_CORE | 451 | select WATCHDOG_CORE |
| 452 | help | 452 | help |
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 6ad9df948711..b751f43d76ed 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c | |||
| @@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev) | |||
| 123 | 123 | ||
| 124 | reg = readl(wdt_base + WDT_MODE); | 124 | reg = readl(wdt_base + WDT_MODE); |
| 125 | reg &= ~WDT_MODE_EN; | 125 | reg &= ~WDT_MODE_EN; |
| 126 | reg |= WDT_MODE_KEY; | ||
| 126 | iowrite32(reg, wdt_base + WDT_MODE); | 127 | iowrite32(reg, wdt_base + WDT_MODE); |
| 127 | 128 | ||
| 128 | return 0; | 129 | return 0; |
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index d96bee017fd3..6f17c935a6cf 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c | |||
| @@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, | |||
| 205 | 205 | ||
| 206 | static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) | 206 | static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) |
| 207 | { | 207 | { |
| 208 | struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); | 208 | struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); |
| 209 | void __iomem *base = wdev->base; | 209 | void __iomem *base = wdev->base; |
| 210 | u32 value; | 210 | u32 value; |
| 211 | 211 | ||
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 4224b3ec83a5..313cd1c6fda0 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
| @@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT; | |||
| 80 | 80 | ||
| 81 | static DEFINE_SPINLOCK(io_lock); | 81 | static DEFINE_SPINLOCK(io_lock); |
| 82 | static void __iomem *wdt_base; | 82 | static void __iomem *wdt_base; |
| 83 | struct clk *wdt_clk; | 83 | static struct clk *wdt_clk; |
| 84 | 84 | ||
| 85 | static int pnx4008_wdt_start(struct watchdog_device *wdd) | 85 | static int pnx4008_wdt_start(struct watchdog_device *wdd) |
| 86 | { | 86 | { |
| @@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) | |||
| 161 | if (IS_ERR(wdt_clk)) | 161 | if (IS_ERR(wdt_clk)) |
| 162 | return PTR_ERR(wdt_clk); | 162 | return PTR_ERR(wdt_clk); |
| 163 | 163 | ||
| 164 | ret = clk_enable(wdt_clk); | 164 | ret = clk_prepare_enable(wdt_clk); |
| 165 | if (ret) | 165 | if (ret) |
| 166 | return ret; | 166 | return ret; |
| 167 | 167 | ||
| @@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) | |||
| 184 | return 0; | 184 | return 0; |
| 185 | 185 | ||
| 186 | disable_clk: | 186 | disable_clk: |
| 187 | clk_disable(wdt_clk); | 187 | clk_disable_unprepare(wdt_clk); |
| 188 | return ret; | 188 | return ret; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| @@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev) | |||
| 192 | { | 192 | { |
| 193 | watchdog_unregister_device(&pnx4008_wdd); | 193 | watchdog_unregister_device(&pnx4008_wdd); |
| 194 | 194 | ||
| 195 | clk_disable(wdt_clk); | 195 | clk_disable_unprepare(wdt_clk); |
| 196 | 196 | ||
| 197 | return 0; | 197 | return 0; |
| 198 | } | 198 | } |
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 7f97cdd53f29..9ec57608da82 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c | |||
| @@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd, | |||
| 140 | { | 140 | { |
| 141 | wdd->timeout = timeout; | 141 | wdd->timeout = timeout; |
| 142 | 142 | ||
| 143 | if (watchdog_active(wdd)) | 143 | if (watchdog_active(wdd)) { |
| 144 | tegra_wdt_stop(wdd); | ||
| 144 | return tegra_wdt_start(wdd); | 145 | return tegra_wdt_start(wdd); |
| 146 | } | ||
| 145 | 147 | ||
| 146 | return 0; | 148 | return 0; |
| 147 | } | 149 | } |
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 91bf55a20024..20e2bba10400 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c | |||
| @@ -224,7 +224,7 @@ static int wdt_keepalive(void) | |||
| 224 | 224 | ||
| 225 | static int wdt_set_timeout(int t) | 225 | static int wdt_set_timeout(int t) |
| 226 | { | 226 | { |
| 227 | int tmrval; | 227 | unsigned int tmrval; |
| 228 | 228 | ||
| 229 | /* | 229 | /* |
| 230 | * Convert seconds to watchdog counter time units, rounding up. | 230 | * Convert seconds to watchdog counter time units, rounding up. |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 849500e4e14d..524c22146429 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
| 40 | #include <asm/idle.h> | 40 | #include <asm/idle.h> |
| 41 | #include <asm/io_apic.h> | 41 | #include <asm/io_apic.h> |
| 42 | #include <asm/i8259.h> | ||
| 42 | #include <asm/xen/pci.h> | 43 | #include <asm/xen/pci.h> |
| 43 | #endif | 44 | #endif |
| 44 | #include <asm/sync_bitops.h> | 45 | #include <asm/sync_bitops.h> |
| @@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) | |||
| 420 | return xen_allocate_irq_dynamic(); | 421 | return xen_allocate_irq_dynamic(); |
| 421 | 422 | ||
| 422 | /* Legacy IRQ descriptors are already allocated by the arch. */ | 423 | /* Legacy IRQ descriptors are already allocated by the arch. */ |
| 423 | if (gsi < NR_IRQS_LEGACY) | 424 | if (gsi < nr_legacy_irqs()) |
| 424 | irq = gsi; | 425 | irq = gsi; |
| 425 | else | 426 | else |
| 426 | irq = irq_alloc_desc_at(gsi, -1); | 427 | irq = irq_alloc_desc_at(gsi, -1); |
| @@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq) | |||
| 446 | kfree(info); | 447 | kfree(info); |
| 447 | 448 | ||
| 448 | /* Legacy IRQ descriptors are managed by the arch. */ | 449 | /* Legacy IRQ descriptors are managed by the arch. */ |
| 449 | if (irq < NR_IRQS_LEGACY) | 450 | if (irq < nr_legacy_irqs()) |
| 450 | return; | 451 | return; |
| 451 | 452 | ||
| 452 | irq_free_desc(irq); | 453 | irq_free_desc(irq); |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 00f40f051d95..38272ad24551 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
| @@ -49,6 +49,8 @@ | |||
| 49 | #include <linux/init.h> | 49 | #include <linux/init.h> |
| 50 | #include <linux/mutex.h> | 50 | #include <linux/mutex.h> |
| 51 | #include <linux/cpu.h> | 51 | #include <linux/cpu.h> |
| 52 | #include <linux/mm.h> | ||
| 53 | #include <linux/vmalloc.h> | ||
| 52 | 54 | ||
| 53 | #include <xen/xen.h> | 55 | #include <xen/xen.h> |
| 54 | #include <xen/events.h> | 56 | #include <xen/events.h> |
| @@ -58,10 +60,10 @@ | |||
| 58 | struct per_user_data { | 60 | struct per_user_data { |
| 59 | struct mutex bind_mutex; /* serialize bind/unbind operations */ | 61 | struct mutex bind_mutex; /* serialize bind/unbind operations */ |
| 60 | struct rb_root evtchns; | 62 | struct rb_root evtchns; |
| 63 | unsigned int nr_evtchns; | ||
| 61 | 64 | ||
| 62 | /* Notification ring, accessed via /dev/xen/evtchn. */ | 65 | /* Notification ring, accessed via /dev/xen/evtchn. */ |
| 63 | #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) | 66 | unsigned int ring_size; |
| 64 | #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) | ||
| 65 | evtchn_port_t *ring; | 67 | evtchn_port_t *ring; |
| 66 | unsigned int ring_cons, ring_prod, ring_overflow; | 68 | unsigned int ring_cons, ring_prod, ring_overflow; |
| 67 | struct mutex ring_cons_mutex; /* protect against concurrent readers */ | 69 | struct mutex ring_cons_mutex; /* protect against concurrent readers */ |
| @@ -80,10 +82,41 @@ struct user_evtchn { | |||
| 80 | bool enabled; | 82 | bool enabled; |
| 81 | }; | 83 | }; |
| 82 | 84 | ||
| 85 | static evtchn_port_t *evtchn_alloc_ring(unsigned int size) | ||
| 86 | { | ||
| 87 | evtchn_port_t *ring; | ||
| 88 | size_t s = size * sizeof(*ring); | ||
| 89 | |||
| 90 | ring = kmalloc(s, GFP_KERNEL); | ||
| 91 | if (!ring) | ||
| 92 | ring = vmalloc(s); | ||
| 93 | |||
| 94 | return ring; | ||
| 95 | } | ||
| 96 | |||
| 97 | static void evtchn_free_ring(evtchn_port_t *ring) | ||
| 98 | { | ||
| 99 | kvfree(ring); | ||
| 100 | } | ||
| 101 | |||
| 102 | static unsigned int evtchn_ring_offset(struct per_user_data *u, | ||
| 103 | unsigned int idx) | ||
| 104 | { | ||
| 105 | return idx & (u->ring_size - 1); | ||
| 106 | } | ||
| 107 | |||
| 108 | static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, | ||
| 109 | unsigned int idx) | ||
| 110 | { | ||
| 111 | return u->ring + evtchn_ring_offset(u, idx); | ||
| 112 | } | ||
| 113 | |||
| 83 | static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) | 114 | static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) |
| 84 | { | 115 | { |
| 85 | struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; | 116 | struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; |
| 86 | 117 | ||
| 118 | u->nr_evtchns++; | ||
| 119 | |||
| 87 | while (*new) { | 120 | while (*new) { |
| 88 | struct user_evtchn *this; | 121 | struct user_evtchn *this; |
| 89 | 122 | ||
| @@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) | |||
| 107 | 140 | ||
| 108 | static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) | 141 | static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) |
| 109 | { | 142 | { |
| 143 | u->nr_evtchns--; | ||
| 110 | rb_erase(&evtchn->node, &u->evtchns); | 144 | rb_erase(&evtchn->node, &u->evtchns); |
| 111 | kfree(evtchn); | 145 | kfree(evtchn); |
| 112 | } | 146 | } |
| @@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) | |||
| 144 | 178 | ||
| 145 | spin_lock(&u->ring_prod_lock); | 179 | spin_lock(&u->ring_prod_lock); |
| 146 | 180 | ||
| 147 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | 181 | if ((u->ring_prod - u->ring_cons) < u->ring_size) { |
| 148 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; | 182 | *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; |
| 149 | wmb(); /* Ensure ring contents visible */ | 183 | wmb(); /* Ensure ring contents visible */ |
| 150 | if (u->ring_cons == u->ring_prod++) { | 184 | if (u->ring_cons == u->ring_prod++) { |
| 151 | wake_up_interruptible(&u->evtchn_wait); | 185 | wake_up_interruptible(&u->evtchn_wait); |
| @@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, | |||
| 200 | } | 234 | } |
| 201 | 235 | ||
| 202 | /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ | 236 | /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ |
| 203 | if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { | 237 | if (((c ^ p) & u->ring_size) != 0) { |
| 204 | bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * | 238 | bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * |
| 205 | sizeof(evtchn_port_t); | 239 | sizeof(evtchn_port_t); |
| 206 | bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); | 240 | bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); |
| 207 | } else { | 241 | } else { |
| 208 | bytes1 = (p - c) * sizeof(evtchn_port_t); | 242 | bytes1 = (p - c) * sizeof(evtchn_port_t); |
| 209 | bytes2 = 0; | 243 | bytes2 = 0; |
| @@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, | |||
| 219 | 253 | ||
| 220 | rc = -EFAULT; | 254 | rc = -EFAULT; |
| 221 | rmb(); /* Ensure that we see the port before we copy it. */ | 255 | rmb(); /* Ensure that we see the port before we copy it. */ |
| 222 | if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || | 256 | if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || |
| 223 | ((bytes2 != 0) && | 257 | ((bytes2 != 0) && |
| 224 | copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) | 258 | copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) |
| 225 | goto unlock_out; | 259 | goto unlock_out; |
| @@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, | |||
| 278 | return rc; | 312 | return rc; |
| 279 | } | 313 | } |
| 280 | 314 | ||
| 315 | static int evtchn_resize_ring(struct per_user_data *u) | ||
| 316 | { | ||
| 317 | unsigned int new_size; | ||
| 318 | evtchn_port_t *new_ring, *old_ring; | ||
| 319 | unsigned int p, c; | ||
| 320 | |||
| 321 | /* | ||
| 322 | * Ensure the ring is large enough to capture all possible | ||
| 323 | * events. i.e., one free slot for each bound event. | ||
| 324 | */ | ||
| 325 | if (u->nr_evtchns <= u->ring_size) | ||
| 326 | return 0; | ||
| 327 | |||
| 328 | if (u->ring_size == 0) | ||
| 329 | new_size = 64; | ||
| 330 | else | ||
| 331 | new_size = 2 * u->ring_size; | ||
| 332 | |||
| 333 | new_ring = evtchn_alloc_ring(new_size); | ||
| 334 | if (!new_ring) | ||
| 335 | return -ENOMEM; | ||
| 336 | |||
| 337 | old_ring = u->ring; | ||
| 338 | |||
| 339 | /* | ||
| 340 | * Access to the ring contents is serialized by either the | ||
| 341 | * prod /or/ cons lock so take both when resizing. | ||
| 342 | */ | ||
| 343 | mutex_lock(&u->ring_cons_mutex); | ||
| 344 | spin_lock_irq(&u->ring_prod_lock); | ||
| 345 | |||
| 346 | /* | ||
| 347 | * Copy the old ring contents to the new ring. | ||
| 348 | * | ||
| 349 | * If the ring contents crosses the end of the current ring, | ||
| 350 | * it needs to be copied in two chunks. | ||
| 351 | * | ||
| 352 | * +---------+ +------------------+ | ||
| 353 | * |34567 12| -> | 1234567 | | ||
| 354 | * +-----p-c-+ +------------------+ | ||
| 355 | */ | ||
| 356 | p = evtchn_ring_offset(u, u->ring_prod); | ||
| 357 | c = evtchn_ring_offset(u, u->ring_cons); | ||
| 358 | if (p < c) { | ||
| 359 | memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); | ||
| 360 | memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); | ||
| 361 | } else | ||
| 362 | memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); | ||
| 363 | |||
| 364 | u->ring = new_ring; | ||
| 365 | u->ring_size = new_size; | ||
| 366 | |||
| 367 | spin_unlock_irq(&u->ring_prod_lock); | ||
| 368 | mutex_unlock(&u->ring_cons_mutex); | ||
| 369 | |||
| 370 | evtchn_free_ring(old_ring); | ||
| 371 | |||
| 372 | return 0; | ||
| 373 | } | ||
| 374 | |||
| 281 | static int evtchn_bind_to_user(struct per_user_data *u, int port) | 375 | static int evtchn_bind_to_user(struct per_user_data *u, int port) |
| 282 | { | 376 | { |
| 283 | struct user_evtchn *evtchn; | 377 | struct user_evtchn *evtchn; |
| @@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
| 305 | if (rc < 0) | 399 | if (rc < 0) |
| 306 | goto err; | 400 | goto err; |
| 307 | 401 | ||
| 402 | rc = evtchn_resize_ring(u); | ||
| 403 | if (rc < 0) | ||
| 404 | goto err; | ||
| 405 | |||
| 308 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, | 406 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, |
| 309 | u->name, evtchn); | 407 | u->name, evtchn); |
| 310 | if (rc < 0) | 408 | if (rc < 0) |
| @@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp) | |||
| 503 | 601 | ||
| 504 | init_waitqueue_head(&u->evtchn_wait); | 602 | init_waitqueue_head(&u->evtchn_wait); |
| 505 | 603 | ||
| 506 | u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); | ||
| 507 | if (u->ring == NULL) { | ||
| 508 | kfree(u->name); | ||
| 509 | kfree(u); | ||
| 510 | return -ENOMEM; | ||
| 511 | } | ||
| 512 | |||
| 513 | mutex_init(&u->bind_mutex); | 604 | mutex_init(&u->bind_mutex); |
| 514 | mutex_init(&u->ring_cons_mutex); | 605 | mutex_init(&u->ring_cons_mutex); |
| 515 | spin_lock_init(&u->ring_prod_lock); | 606 | spin_lock_init(&u->ring_prod_lock); |
| @@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp) | |||
| 532 | evtchn_unbind_from_user(u, evtchn); | 623 | evtchn_unbind_from_user(u, evtchn); |
| 533 | } | 624 | } |
| 534 | 625 | ||
| 535 | free_page((unsigned long)u->ring); | 626 | evtchn_free_ring(u->ring); |
| 536 | kfree(u->name); | 627 | kfree(u->name); |
| 537 | kfree(u); | 628 | kfree(u); |
| 538 | 629 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ea0b3b2a91d..1be5dd048622 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
| 804 | 804 | ||
| 805 | vma->vm_ops = &gntdev_vmops; | 805 | vma->vm_ops = &gntdev_vmops; |
| 806 | 806 | ||
| 807 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | 807 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; |
| 808 | 808 | ||
| 809 | if (use_ptemod) | 809 | if (use_ptemod) |
| 810 | vma->vm_flags |= VM_DONTCOPY; | 810 | vma->vm_flags |= VM_DONTCOPY; |
diff --git a/fs/Kconfig b/fs/Kconfig index da3f32f1a4e4..6ce72d8d1ee1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -46,6 +46,12 @@ config FS_DAX | |||
| 46 | or if unsure, say N. Saying Y will increase the size of the kernel | 46 | or if unsure, say N. Saying Y will increase the size of the kernel |
| 47 | by about 5kB. | 47 | by about 5kB. |
| 48 | 48 | ||
| 49 | config FS_DAX_PMD | ||
| 50 | bool | ||
| 51 | default FS_DAX | ||
| 52 | depends on FS_DAX | ||
| 53 | depends on BROKEN | ||
| 54 | |||
| 49 | endif # BLOCK | 55 | endif # BLOCK |
| 50 | 56 | ||
| 51 | # Posix ACL utility routines | 57 | # Posix ACL utility routines |
diff --git a/fs/block_dev.c b/fs/block_dev.c index bb0dfb1c7af1..c25639e907bd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, | |||
| 390 | struct page *page) | 390 | struct page *page) |
| 391 | { | 391 | { |
| 392 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 392 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
| 393 | int result = -EOPNOTSUPP; | ||
| 394 | |||
| 393 | if (!ops->rw_page || bdev_get_integrity(bdev)) | 395 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
| 394 | return -EOPNOTSUPP; | 396 | return result; |
| 395 | return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); | 397 | |
| 398 | result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); | ||
| 399 | if (result) | ||
| 400 | return result; | ||
| 401 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); | ||
| 402 | blk_queue_exit(bdev->bd_queue); | ||
| 403 | return result; | ||
| 396 | } | 404 | } |
| 397 | EXPORT_SYMBOL_GPL(bdev_read_page); | 405 | EXPORT_SYMBOL_GPL(bdev_read_page); |
| 398 | 406 | ||
| @@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
| 421 | int result; | 429 | int result; |
| 422 | int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; | 430 | int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; |
| 423 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 431 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
| 432 | |||
| 424 | if (!ops->rw_page || bdev_get_integrity(bdev)) | 433 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
| 425 | return -EOPNOTSUPP; | 434 | return -EOPNOTSUPP; |
| 435 | result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); | ||
| 436 | if (result) | ||
| 437 | return result; | ||
| 438 | |||
| 426 | set_page_writeback(page); | 439 | set_page_writeback(page); |
| 427 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); | 440 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); |
| 428 | if (result) | 441 | if (result) |
| 429 | end_page_writeback(page); | 442 | end_page_writeback(page); |
| 430 | else | 443 | else |
| 431 | unlock_page(page); | 444 | unlock_page(page); |
| 445 | blk_queue_exit(bdev->bd_queue); | ||
| 432 | return result; | 446 | return result; |
| 433 | } | 447 | } |
| 434 | EXPORT_SYMBOL_GPL(bdev_write_page); | 448 | EXPORT_SYMBOL_GPL(bdev_write_page); |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6dcdb2ec9211..d453d62ab0c6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
| @@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
| 355 | 355 | ||
| 356 | index = srcu_read_lock(&fs_info->subvol_srcu); | 356 | index = srcu_read_lock(&fs_info->subvol_srcu); |
| 357 | 357 | ||
| 358 | root = btrfs_read_fs_root_no_name(fs_info, &root_key); | 358 | root = btrfs_get_fs_root(fs_info, &root_key, false); |
| 359 | if (IS_ERR(root)) { | 359 | if (IS_ERR(root)) { |
| 360 | srcu_read_unlock(&fs_info->subvol_srcu, index); | 360 | srcu_read_unlock(&fs_info->subvol_srcu, index); |
| 361 | ret = PTR_ERR(root); | 361 | ret = PTR_ERR(root); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8c58191249cc..35489e7129a7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, | |||
| 3416 | struct btrfs_block_group_cache *btrfs_lookup_block_group( | 3416 | struct btrfs_block_group_cache *btrfs_lookup_block_group( |
| 3417 | struct btrfs_fs_info *info, | 3417 | struct btrfs_fs_info *info, |
| 3418 | u64 bytenr); | 3418 | u64 bytenr); |
| 3419 | void btrfs_get_block_group(struct btrfs_block_group_cache *cache); | ||
| 3419 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache); | 3420 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache); |
| 3420 | int get_block_group_index(struct btrfs_block_group_cache *cache); | 3421 | int get_block_group_index(struct btrfs_block_group_cache *cache); |
| 3421 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | 3422 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, |
| @@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
| 3479 | struct btrfs_root *root, u64 bytes_used, | 3480 | struct btrfs_root *root, u64 bytes_used, |
| 3480 | u64 type, u64 chunk_objectid, u64 chunk_offset, | 3481 | u64 type, u64 chunk_objectid, u64 chunk_offset, |
| 3481 | u64 size); | 3482 | u64 size); |
| 3483 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( | ||
| 3484 | struct btrfs_fs_info *fs_info, | ||
| 3485 | const u64 chunk_offset); | ||
| 3482 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | 3486 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
| 3483 | struct btrfs_root *root, u64 group_start, | 3487 | struct btrfs_root *root, u64 group_start, |
| 3484 | struct extent_map *em); | 3488 | struct extent_map *em); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index acf3ed11cfb6..4b89680a1923 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | |||
| 124 | return (cache->flags & bits) == bits; | 124 | return (cache->flags & bits) == bits; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) | 127 | void btrfs_get_block_group(struct btrfs_block_group_cache *cache) |
| 128 | { | 128 | { |
| 129 | atomic_inc(&cache->count); | 129 | atomic_inc(&cache->count); |
| 130 | } | 130 | } |
| @@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
| 5915 | set_extent_dirty(info->pinned_extents, | 5915 | set_extent_dirty(info->pinned_extents, |
| 5916 | bytenr, bytenr + num_bytes - 1, | 5916 | bytenr, bytenr + num_bytes - 1, |
| 5917 | GFP_NOFS | __GFP_NOFAIL); | 5917 | GFP_NOFS | __GFP_NOFAIL); |
| 5918 | /* | ||
| 5919 | * No longer have used bytes in this block group, queue | ||
| 5920 | * it for deletion. | ||
| 5921 | */ | ||
| 5922 | if (old_val == 0) { | ||
| 5923 | spin_lock(&info->unused_bgs_lock); | ||
| 5924 | if (list_empty(&cache->bg_list)) { | ||
| 5925 | btrfs_get_block_group(cache); | ||
| 5926 | list_add_tail(&cache->bg_list, | ||
| 5927 | &info->unused_bgs); | ||
| 5928 | } | ||
| 5929 | spin_unlock(&info->unused_bgs_lock); | ||
| 5930 | } | ||
| 5931 | } | 5918 | } |
| 5932 | 5919 | ||
| 5933 | spin_lock(&trans->transaction->dirty_bgs_lock); | 5920 | spin_lock(&trans->transaction->dirty_bgs_lock); |
| @@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
| 5939 | } | 5926 | } |
| 5940 | spin_unlock(&trans->transaction->dirty_bgs_lock); | 5927 | spin_unlock(&trans->transaction->dirty_bgs_lock); |
| 5941 | 5928 | ||
| 5929 | /* | ||
| 5930 | * No longer have used bytes in this block group, queue it for | ||
| 5931 | * deletion. We do this after adding the block group to the | ||
| 5932 | * dirty list to avoid races between cleaner kthread and space | ||
| 5933 | * cache writeout. | ||
| 5934 | */ | ||
| 5935 | if (!alloc && old_val == 0) { | ||
| 5936 | spin_lock(&info->unused_bgs_lock); | ||
| 5937 | if (list_empty(&cache->bg_list)) { | ||
| 5938 | btrfs_get_block_group(cache); | ||
| 5939 | list_add_tail(&cache->bg_list, | ||
| 5940 | &info->unused_bgs); | ||
| 5941 | } | ||
| 5942 | spin_unlock(&info->unused_bgs_lock); | ||
| 5943 | } | ||
| 5944 | |||
| 5942 | btrfs_put_block_group(cache); | 5945 | btrfs_put_block_group(cache); |
| 5943 | total -= num_bytes; | 5946 | total -= num_bytes; |
| 5944 | bytenr += num_bytes; | 5947 | bytenr += num_bytes; |
| @@ -8105,21 +8108,47 @@ reada: | |||
| 8105 | } | 8108 | } |
| 8106 | 8109 | ||
| 8107 | /* | 8110 | /* |
| 8108 | * TODO: Modify related function to add related node/leaf to dirty_extent_root, | 8111 | * These may not be seen by the usual inc/dec ref code so we have to |
| 8109 | * for later qgroup accounting. | 8112 | * add them here. |
| 8110 | * | ||
| 8111 | * Current, this function does nothing. | ||
| 8112 | */ | 8113 | */ |
| 8114 | static int record_one_subtree_extent(struct btrfs_trans_handle *trans, | ||
| 8115 | struct btrfs_root *root, u64 bytenr, | ||
| 8116 | u64 num_bytes) | ||
| 8117 | { | ||
| 8118 | struct btrfs_qgroup_extent_record *qrecord; | ||
| 8119 | struct btrfs_delayed_ref_root *delayed_refs; | ||
| 8120 | |||
| 8121 | qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS); | ||
| 8122 | if (!qrecord) | ||
| 8123 | return -ENOMEM; | ||
| 8124 | |||
| 8125 | qrecord->bytenr = bytenr; | ||
| 8126 | qrecord->num_bytes = num_bytes; | ||
| 8127 | qrecord->old_roots = NULL; | ||
| 8128 | |||
| 8129 | delayed_refs = &trans->transaction->delayed_refs; | ||
| 8130 | spin_lock(&delayed_refs->lock); | ||
| 8131 | if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord)) | ||
| 8132 | kfree(qrecord); | ||
| 8133 | spin_unlock(&delayed_refs->lock); | ||
| 8134 | |||
| 8135 | return 0; | ||
| 8136 | } | ||
| 8137 | |||
| 8113 | static int account_leaf_items(struct btrfs_trans_handle *trans, | 8138 | static int account_leaf_items(struct btrfs_trans_handle *trans, |
| 8114 | struct btrfs_root *root, | 8139 | struct btrfs_root *root, |
| 8115 | struct extent_buffer *eb) | 8140 | struct extent_buffer *eb) |
| 8116 | { | 8141 | { |
| 8117 | int nr = btrfs_header_nritems(eb); | 8142 | int nr = btrfs_header_nritems(eb); |
| 8118 | int i, extent_type; | 8143 | int i, extent_type, ret; |
| 8119 | struct btrfs_key key; | 8144 | struct btrfs_key key; |
| 8120 | struct btrfs_file_extent_item *fi; | 8145 | struct btrfs_file_extent_item *fi; |
| 8121 | u64 bytenr, num_bytes; | 8146 | u64 bytenr, num_bytes; |
| 8122 | 8147 | ||
| 8148 | /* We can be called directly from walk_up_proc() */ | ||
| 8149 | if (!root->fs_info->quota_enabled) | ||
| 8150 | return 0; | ||
| 8151 | |||
| 8123 | for (i = 0; i < nr; i++) { | 8152 | for (i = 0; i < nr; i++) { |
| 8124 | btrfs_item_key_to_cpu(eb, &key, i); | 8153 | btrfs_item_key_to_cpu(eb, &key, i); |
| 8125 | 8154 | ||
| @@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans, | |||
| 8138 | continue; | 8167 | continue; |
| 8139 | 8168 | ||
| 8140 | num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); | 8169 | num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); |
| 8170 | |||
| 8171 | ret = record_one_subtree_extent(trans, root, bytenr, num_bytes); | ||
| 8172 | if (ret) | ||
| 8173 | return ret; | ||
| 8141 | } | 8174 | } |
| 8142 | return 0; | 8175 | return 0; |
| 8143 | } | 8176 | } |
| @@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root, | |||
| 8206 | 8239 | ||
| 8207 | /* | 8240 | /* |
| 8208 | * root_eb is the subtree root and is locked before this function is called. | 8241 | * root_eb is the subtree root and is locked before this function is called. |
| 8209 | * TODO: Modify this function to mark all (including complete shared node) | ||
| 8210 | * to dirty_extent_root to allow it get accounted in qgroup. | ||
| 8211 | */ | 8242 | */ |
| 8212 | static int account_shared_subtree(struct btrfs_trans_handle *trans, | 8243 | static int account_shared_subtree(struct btrfs_trans_handle *trans, |
| 8213 | struct btrfs_root *root, | 8244 | struct btrfs_root *root, |
| @@ -8285,6 +8316,11 @@ walk_down: | |||
| 8285 | btrfs_tree_read_lock(eb); | 8316 | btrfs_tree_read_lock(eb); |
| 8286 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | 8317 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
| 8287 | path->locks[level] = BTRFS_READ_LOCK_BLOCKING; | 8318 | path->locks[level] = BTRFS_READ_LOCK_BLOCKING; |
| 8319 | |||
| 8320 | ret = record_one_subtree_extent(trans, root, child_bytenr, | ||
| 8321 | root->nodesize); | ||
| 8322 | if (ret) | ||
| 8323 | goto out; | ||
| 8288 | } | 8324 | } |
| 8289 | 8325 | ||
| 8290 | if (level == 0) { | 8326 | if (level == 0) { |
| @@ -10256,6 +10292,47 @@ out: | |||
| 10256 | return ret; | 10292 | return ret; |
| 10257 | } | 10293 | } |
| 10258 | 10294 | ||
| 10295 | struct btrfs_trans_handle * | ||
| 10296 | btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, | ||
| 10297 | const u64 chunk_offset) | ||
| 10298 | { | ||
| 10299 | struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; | ||
| 10300 | struct extent_map *em; | ||
| 10301 | struct map_lookup *map; | ||
| 10302 | unsigned int num_items; | ||
| 10303 | |||
| 10304 | read_lock(&em_tree->lock); | ||
| 10305 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); | ||
| 10306 | read_unlock(&em_tree->lock); | ||
| 10307 | ASSERT(em && em->start == chunk_offset); | ||
| 10308 | |||
| 10309 | /* | ||
| 10310 | * We need to reserve 3 + N units from the metadata space info in order | ||
| 10311 | * to remove a block group (done at btrfs_remove_chunk() and at | ||
| 10312 | * btrfs_remove_block_group()), which are used for: | ||
| 10313 | * | ||
| 10314 | * 1 unit for adding the free space inode's orphan (located in the tree | ||
| 10315 | * of tree roots). | ||
| 10316 | * 1 unit for deleting the block group item (located in the extent | ||
| 10317 | * tree). | ||
| 10318 | * 1 unit for deleting the free space item (located in tree of tree | ||
| 10319 | * roots). | ||
| 10320 | * N units for deleting N device extent items corresponding to each | ||
| 10321 | * stripe (located in the device tree). | ||
| 10322 | * | ||
| 10323 | * In order to remove a block group we also need to reserve units in the | ||
| 10324 | * system space info in order to update the chunk tree (update one or | ||
| 10325 | * more device items and remove one chunk item), but this is done at | ||
| 10326 | * btrfs_remove_chunk() through a call to check_system_chunk(). | ||
| 10327 | */ | ||
| 10328 | map = (struct map_lookup *)em->bdev; | ||
| 10329 | num_items = 3 + map->num_stripes; | ||
| 10330 | free_extent_map(em); | ||
| 10331 | |||
| 10332 | return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, | ||
| 10333 | num_items, 1); | ||
| 10334 | } | ||
| 10335 | |||
| 10259 | /* | 10336 | /* |
| 10260 | * Process the unused_bgs list and remove any that don't have any allocated | 10337 | * Process the unused_bgs list and remove any that don't have any allocated |
| 10261 | * space inside of them. | 10338 | * space inside of them. |
| @@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) | |||
| 10322 | * Want to do this before we do anything else so we can recover | 10399 | * Want to do this before we do anything else so we can recover |
| 10323 | * properly if we fail to join the transaction. | 10400 | * properly if we fail to join the transaction. |
| 10324 | */ | 10401 | */ |
| 10325 | /* 1 for btrfs_orphan_reserve_metadata() */ | 10402 | trans = btrfs_start_trans_remove_block_group(fs_info, |
| 10326 | trans = btrfs_start_transaction(root, 1); | 10403 | block_group->key.objectid); |
| 10327 | if (IS_ERR(trans)) { | 10404 | if (IS_ERR(trans)) { |
| 10328 | btrfs_dec_block_group_ro(root, block_group); | 10405 | btrfs_dec_block_group_ro(root, block_group); |
| 10329 | ret = PTR_ERR(trans); | 10406 | ret = PTR_ERR(trans); |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 977e715f0bf2..72e73461c064 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1882,8 +1882,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 1882 | struct btrfs_log_ctx ctx; | 1882 | struct btrfs_log_ctx ctx; |
| 1883 | int ret = 0; | 1883 | int ret = 0; |
| 1884 | bool full_sync = 0; | 1884 | bool full_sync = 0; |
| 1885 | const u64 len = end - start + 1; | 1885 | u64 len; |
| 1886 | 1886 | ||
| 1887 | /* | ||
| 1888 | * The range length can be represented by u64, we have to do the typecasts | ||
| 1889 | * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() | ||
| 1890 | */ | ||
| 1891 | len = (u64)end - (u64)start + 1; | ||
| 1887 | trace_btrfs_sync_file(file, datasync); | 1892 | trace_btrfs_sync_file(file, datasync); |
| 1888 | 1893 | ||
| 1889 | /* | 1894 | /* |
| @@ -2071,8 +2076,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 2071 | } | 2076 | } |
| 2072 | } | 2077 | } |
| 2073 | if (!full_sync) { | 2078 | if (!full_sync) { |
| 2074 | ret = btrfs_wait_ordered_range(inode, start, | 2079 | ret = btrfs_wait_ordered_range(inode, start, len); |
| 2075 | end - start + 1); | ||
| 2076 | if (ret) { | 2080 | if (ret) { |
| 2077 | btrfs_end_transaction(trans, root); | 2081 | btrfs_end_transaction(trans, root); |
| 2078 | goto out; | 2082 | goto out; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 994490d5fa64..a70c5790f8f5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
| 4046 | */ | 4046 | */ |
| 4047 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) | 4047 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) |
| 4048 | { | 4048 | { |
| 4049 | struct btrfs_trans_handle *trans; | ||
| 4050 | struct btrfs_root *root = BTRFS_I(dir)->root; | 4049 | struct btrfs_root *root = BTRFS_I(dir)->root; |
| 4051 | int ret; | ||
| 4052 | 4050 | ||
| 4053 | /* | 4051 | /* |
| 4054 | * 1 for the possible orphan item | 4052 | * 1 for the possible orphan item |
| @@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) | |||
| 4057 | * 1 for the inode ref | 4055 | * 1 for the inode ref |
| 4058 | * 1 for the inode | 4056 | * 1 for the inode |
| 4059 | */ | 4057 | */ |
| 4060 | trans = btrfs_start_transaction(root, 5); | 4058 | return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); |
| 4061 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | ||
| 4062 | return trans; | ||
| 4063 | |||
| 4064 | if (PTR_ERR(trans) == -ENOSPC) { | ||
| 4065 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); | ||
| 4066 | |||
| 4067 | trans = btrfs_start_transaction(root, 0); | ||
| 4068 | if (IS_ERR(trans)) | ||
| 4069 | return trans; | ||
| 4070 | ret = btrfs_cond_migrate_bytes(root->fs_info, | ||
| 4071 | &root->fs_info->trans_block_rsv, | ||
| 4072 | num_bytes, 5); | ||
| 4073 | if (ret) { | ||
| 4074 | btrfs_end_transaction(trans, root); | ||
| 4075 | return ERR_PTR(ret); | ||
| 4076 | } | ||
| 4077 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 4078 | trans->bytes_reserved = num_bytes; | ||
| 4079 | } | ||
| 4080 | return trans; | ||
| 4081 | } | 4059 | } |
| 4082 | 4060 | ||
| 4083 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | 4061 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 93e12c18ffd7..5279fdae7142 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
| @@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, | |||
| 993 | mutex_lock(&fs_info->qgroup_ioctl_lock); | 993 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
| 994 | if (!fs_info->quota_root) | 994 | if (!fs_info->quota_root) |
| 995 | goto out; | 995 | goto out; |
| 996 | spin_lock(&fs_info->qgroup_lock); | ||
| 997 | fs_info->quota_enabled = 0; | 996 | fs_info->quota_enabled = 0; |
| 998 | fs_info->pending_quota_state = 0; | 997 | fs_info->pending_quota_state = 0; |
| 998 | btrfs_qgroup_wait_for_completion(fs_info); | ||
| 999 | spin_lock(&fs_info->qgroup_lock); | ||
| 999 | quota_root = fs_info->quota_root; | 1000 | quota_root = fs_info->quota_root; |
| 1000 | fs_info->quota_root = NULL; | 1001 | fs_info->quota_root = NULL; |
| 1001 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; | 1002 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; |
| @@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record | |||
| 1461 | struct btrfs_qgroup_extent_record *entry; | 1462 | struct btrfs_qgroup_extent_record *entry; |
| 1462 | u64 bytenr = record->bytenr; | 1463 | u64 bytenr = record->bytenr; |
| 1463 | 1464 | ||
| 1465 | assert_spin_locked(&delayed_refs->lock); | ||
| 1466 | |||
| 1464 | while (*p) { | 1467 | while (*p) { |
| 1465 | parent_node = *p; | 1468 | parent_node = *p; |
| 1466 | entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, | 1469 | entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2907a77fb1f6..b091d94ceef6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -3432,7 +3432,9 @@ out: | |||
| 3432 | static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, | 3432 | static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, |
| 3433 | struct btrfs_device *scrub_dev, | 3433 | struct btrfs_device *scrub_dev, |
| 3434 | u64 chunk_offset, u64 length, | 3434 | u64 chunk_offset, u64 length, |
| 3435 | u64 dev_offset, int is_dev_replace) | 3435 | u64 dev_offset, |
| 3436 | struct btrfs_block_group_cache *cache, | ||
| 3437 | int is_dev_replace) | ||
| 3436 | { | 3438 | { |
| 3437 | struct btrfs_mapping_tree *map_tree = | 3439 | struct btrfs_mapping_tree *map_tree = |
| 3438 | &sctx->dev_root->fs_info->mapping_tree; | 3440 | &sctx->dev_root->fs_info->mapping_tree; |
| @@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, | |||
| 3445 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); | 3447 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); |
| 3446 | read_unlock(&map_tree->map_tree.lock); | 3448 | read_unlock(&map_tree->map_tree.lock); |
| 3447 | 3449 | ||
| 3448 | if (!em) | 3450 | if (!em) { |
| 3449 | return -EINVAL; | 3451 | /* |
| 3452 | * Might have been an unused block group deleted by the cleaner | ||
| 3453 | * kthread or relocation. | ||
| 3454 | */ | ||
| 3455 | spin_lock(&cache->lock); | ||
| 3456 | if (!cache->removed) | ||
| 3457 | ret = -EINVAL; | ||
| 3458 | spin_unlock(&cache->lock); | ||
| 3459 | |||
| 3460 | return ret; | ||
| 3461 | } | ||
| 3450 | 3462 | ||
| 3451 | map = (struct map_lookup *)em->bdev; | 3463 | map = (struct map_lookup *)em->bdev; |
| 3452 | if (em->start != chunk_offset) | 3464 | if (em->start != chunk_offset) |
| @@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3483 | u64 length; | 3495 | u64 length; |
| 3484 | u64 chunk_offset; | 3496 | u64 chunk_offset; |
| 3485 | int ret = 0; | 3497 | int ret = 0; |
| 3498 | int ro_set; | ||
| 3486 | int slot; | 3499 | int slot; |
| 3487 | struct extent_buffer *l; | 3500 | struct extent_buffer *l; |
| 3488 | struct btrfs_key key; | 3501 | struct btrfs_key key; |
| @@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3568 | scrub_pause_on(fs_info); | 3581 | scrub_pause_on(fs_info); |
| 3569 | ret = btrfs_inc_block_group_ro(root, cache); | 3582 | ret = btrfs_inc_block_group_ro(root, cache); |
| 3570 | scrub_pause_off(fs_info); | 3583 | scrub_pause_off(fs_info); |
| 3571 | if (ret) { | 3584 | |
| 3585 | if (ret == 0) { | ||
| 3586 | ro_set = 1; | ||
| 3587 | } else if (ret == -ENOSPC) { | ||
| 3588 | /* | ||
| 3589 | * btrfs_inc_block_group_ro return -ENOSPC when it | ||
| 3590 | * failed in creating new chunk for metadata. | ||
| 3591 | * It is not a problem for scrub/replace, because | ||
| 3592 | * metadata are always cowed, and our scrub paused | ||
| 3593 | * commit_transactions. | ||
| 3594 | */ | ||
| 3595 | ro_set = 0; | ||
| 3596 | } else { | ||
| 3597 | btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n", | ||
| 3598 | ret); | ||
| 3572 | btrfs_put_block_group(cache); | 3599 | btrfs_put_block_group(cache); |
| 3573 | break; | 3600 | break; |
| 3574 | } | 3601 | } |
| @@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3577 | dev_replace->cursor_left = found_key.offset; | 3604 | dev_replace->cursor_left = found_key.offset; |
| 3578 | dev_replace->item_needs_writeback = 1; | 3605 | dev_replace->item_needs_writeback = 1; |
| 3579 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, | 3606 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, |
| 3580 | found_key.offset, is_dev_replace); | 3607 | found_key.offset, cache, is_dev_replace); |
| 3581 | 3608 | ||
| 3582 | /* | 3609 | /* |
| 3583 | * flush, submit all pending read and write bios, afterwards | 3610 | * flush, submit all pending read and write bios, afterwards |
| @@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3611 | 3638 | ||
| 3612 | scrub_pause_off(fs_info); | 3639 | scrub_pause_off(fs_info); |
| 3613 | 3640 | ||
| 3614 | btrfs_dec_block_group_ro(root, cache); | 3641 | if (ro_set) |
| 3642 | btrfs_dec_block_group_ro(root, cache); | ||
| 3643 | |||
| 3644 | /* | ||
| 3645 | * We might have prevented the cleaner kthread from deleting | ||
| 3646 | * this block group if it was already unused because we raced | ||
| 3647 | * and set it to RO mode first. So add it back to the unused | ||
| 3648 | * list, otherwise it might not ever be deleted unless a manual | ||
| 3649 | * balance is triggered or it becomes used and unused again. | ||
| 3650 | */ | ||
| 3651 | spin_lock(&cache->lock); | ||
| 3652 | if (!cache->removed && !cache->ro && cache->reserved == 0 && | ||
| 3653 | btrfs_block_group_used(&cache->item) == 0) { | ||
| 3654 | spin_unlock(&cache->lock); | ||
| 3655 | spin_lock(&fs_info->unused_bgs_lock); | ||
| 3656 | if (list_empty(&cache->bg_list)) { | ||
| 3657 | btrfs_get_block_group(cache); | ||
| 3658 | list_add_tail(&cache->bg_list, | ||
| 3659 | &fs_info->unused_bgs); | ||
| 3660 | } | ||
| 3661 | spin_unlock(&fs_info->unused_bgs_lock); | ||
| 3662 | } else { | ||
| 3663 | spin_unlock(&cache->lock); | ||
| 3664 | } | ||
| 3615 | 3665 | ||
| 3616 | btrfs_put_block_group(cache); | 3666 | btrfs_put_block_group(cache); |
| 3617 | if (ret) | 3667 | if (ret) |
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index c8c3d70c31ff..8b72b005bfb9 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c | |||
| @@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void) | |||
| 898 | } | 898 | } |
| 899 | 899 | ||
| 900 | root = btrfs_alloc_dummy_root(); | 900 | root = btrfs_alloc_dummy_root(); |
| 901 | if (!root) | 901 | if (IS_ERR(root)) { |
| 902 | ret = PTR_ERR(root); | ||
| 902 | goto out; | 903 | goto out; |
| 904 | } | ||
| 903 | 905 | ||
| 904 | root->fs_info = btrfs_alloc_dummy_fs_info(); | 906 | root->fs_info = btrfs_alloc_dummy_fs_info(); |
| 905 | if (!root->fs_info) | 907 | if (!root->fs_info) |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 418c6a2ad7d8..3367a3c6f214 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -592,6 +592,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | |||
| 592 | return start_transaction(root, num_items, TRANS_START, | 592 | return start_transaction(root, num_items, TRANS_START, |
| 593 | BTRFS_RESERVE_FLUSH_ALL); | 593 | BTRFS_RESERVE_FLUSH_ALL); |
| 594 | } | 594 | } |
| 595 | struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( | ||
| 596 | struct btrfs_root *root, | ||
| 597 | unsigned int num_items, | ||
| 598 | int min_factor) | ||
| 599 | { | ||
| 600 | struct btrfs_trans_handle *trans; | ||
| 601 | u64 num_bytes; | ||
| 602 | int ret; | ||
| 603 | |||
| 604 | trans = btrfs_start_transaction(root, num_items); | ||
| 605 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | ||
| 606 | return trans; | ||
| 607 | |||
| 608 | trans = btrfs_start_transaction(root, 0); | ||
| 609 | if (IS_ERR(trans)) | ||
| 610 | return trans; | ||
| 611 | |||
| 612 | num_bytes = btrfs_calc_trans_metadata_size(root, num_items); | ||
| 613 | ret = btrfs_cond_migrate_bytes(root->fs_info, | ||
| 614 | &root->fs_info->trans_block_rsv, | ||
| 615 | num_bytes, | ||
| 616 | min_factor); | ||
| 617 | if (ret) { | ||
| 618 | btrfs_end_transaction(trans, root); | ||
| 619 | return ERR_PTR(ret); | ||
| 620 | } | ||
| 621 | |||
| 622 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
| 623 | trans->bytes_reserved = num_bytes; | ||
| 624 | |||
| 625 | return trans; | ||
| 626 | } | ||
| 595 | 627 | ||
| 596 | struct btrfs_trans_handle *btrfs_start_transaction_lflush( | 628 | struct btrfs_trans_handle *btrfs_start_transaction_lflush( |
| 597 | struct btrfs_root *root, | 629 | struct btrfs_root *root, |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index b05b2f64d913..0da21ca9b3fb 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 185 | struct btrfs_root *root); | 185 | struct btrfs_root *root); |
| 186 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | 186 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
| 187 | unsigned int num_items); | 187 | unsigned int num_items); |
| 188 | struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( | ||
| 189 | struct btrfs_root *root, | ||
| 190 | unsigned int num_items, | ||
| 191 | int min_factor); | ||
| 188 | struct btrfs_trans_handle *btrfs_start_transaction_lflush( | 192 | struct btrfs_trans_handle *btrfs_start_transaction_lflush( |
| 189 | struct btrfs_root *root, | 193 | struct btrfs_root *root, |
| 190 | unsigned int num_items); | 194 | unsigned int num_items); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a6df8fdc1312..456452206609 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, | |||
| 1973 | if (srcdev->writeable) { | 1973 | if (srcdev->writeable) { |
| 1974 | fs_devices->rw_devices--; | 1974 | fs_devices->rw_devices--; |
| 1975 | /* zero out the old super if it is writable */ | 1975 | /* zero out the old super if it is writable */ |
| 1976 | btrfs_scratch_superblocks(srcdev->bdev, | 1976 | btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); |
| 1977 | rcu_str_deref(srcdev->name)); | ||
| 1978 | } | 1977 | } |
| 1979 | 1978 | ||
| 1980 | if (srcdev->bdev) | 1979 | if (srcdev->bdev) |
| @@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, | |||
| 2024 | btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); | 2023 | btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); |
| 2025 | 2024 | ||
| 2026 | if (tgtdev->bdev) { | 2025 | if (tgtdev->bdev) { |
| 2027 | btrfs_scratch_superblocks(tgtdev->bdev, | 2026 | btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); |
| 2028 | rcu_str_deref(tgtdev->name)); | ||
| 2029 | fs_info->fs_devices->open_devices--; | 2027 | fs_info->fs_devices->open_devices--; |
| 2030 | } | 2028 | } |
| 2031 | fs_info->fs_devices->num_devices--; | 2029 | fs_info->fs_devices->num_devices--; |
| @@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) | |||
| 2853 | if (ret) | 2851 | if (ret) |
| 2854 | return ret; | 2852 | return ret; |
| 2855 | 2853 | ||
| 2856 | trans = btrfs_start_transaction(root, 0); | 2854 | trans = btrfs_start_trans_remove_block_group(root->fs_info, |
| 2855 | chunk_offset); | ||
| 2857 | if (IS_ERR(trans)) { | 2856 | if (IS_ERR(trans)) { |
| 2858 | ret = PTR_ERR(trans); | 2857 | ret = PTR_ERR(trans); |
| 2859 | btrfs_std_error(root->fs_info, ret, NULL); | 2858 | btrfs_std_error(root->fs_info, ret, NULL); |
| @@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type, | |||
| 3123 | return 1; | 3122 | return 1; |
| 3124 | } | 3123 | } |
| 3125 | 3124 | ||
| 3126 | static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, | 3125 | static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, |
| 3127 | struct btrfs_balance_args *bargs) | 3126 | struct btrfs_balance_args *bargs) |
| 3128 | { | 3127 | { |
| 3129 | struct btrfs_block_group_cache *cache; | 3128 | struct btrfs_block_group_cache *cache; |
| @@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, | |||
| 3156 | return ret; | 3155 | return ret; |
| 3157 | } | 3156 | } |
| 3158 | 3157 | ||
| 3159 | static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, | 3158 | static int chunk_usage_filter(struct btrfs_fs_info *fs_info, |
| 3160 | u64 chunk_offset, struct btrfs_balance_args *bargs) | 3159 | u64 chunk_offset, struct btrfs_balance_args *bargs) |
| 3161 | { | 3160 | { |
| 3162 | struct btrfs_block_group_cache *cache; | 3161 | struct btrfs_block_group_cache *cache; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ec5712372732..d5c84f6b1353 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
| @@ -382,7 +382,7 @@ struct map_lookup { | |||
| 382 | #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) | 382 | #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) |
| 383 | #define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) | 383 | #define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) |
| 384 | #define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) | 384 | #define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) |
| 385 | #define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8) | 385 | #define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10) |
| 386 | 386 | ||
| 387 | #define BTRFS_BALANCE_ARGS_MASK \ | 387 | #define BTRFS_BALANCE_ARGS_MASK \ |
| 388 | (BTRFS_BALANCE_ARGS_PROFILES | \ | 388 | (BTRFS_BALANCE_ARGS_PROFILES | \ |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 7a6b02f72787..c0f3da3926a0 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
| @@ -879,7 +879,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |||
| 879 | loff_t pos, eof; | 879 | loff_t pos, eof; |
| 880 | size_t len; | 880 | size_t len; |
| 881 | void *data; | 881 | void *data; |
| 882 | int ret; | 882 | int ret = -ENOBUFS; |
| 883 | 883 | ||
| 884 | ASSERT(op != NULL); | 884 | ASSERT(op != NULL); |
| 885 | ASSERT(page != NULL); | 885 | ASSERT(page != NULL); |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c81ce7f200a6..a7a1b218f308 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = { | |||
| 1636 | .iterate = configfs_readdir, | 1636 | .iterate = configfs_readdir, |
| 1637 | }; | 1637 | }; |
| 1638 | 1638 | ||
| 1639 | /** | ||
| 1640 | * configfs_register_group - creates a parent-child relation between two groups | ||
| 1641 | * @parent_group: parent group | ||
| 1642 | * @group: child group | ||
| 1643 | * | ||
| 1644 | * link groups, creates dentry for the child and attaches it to the | ||
| 1645 | * parent dentry. | ||
| 1646 | * | ||
| 1647 | * Return: 0 on success, negative errno code on error | ||
| 1648 | */ | ||
| 1649 | int configfs_register_group(struct config_group *parent_group, | ||
| 1650 | struct config_group *group) | ||
| 1651 | { | ||
| 1652 | struct configfs_subsystem *subsys = parent_group->cg_subsys; | ||
| 1653 | struct dentry *parent; | ||
| 1654 | int ret; | ||
| 1655 | |||
| 1656 | mutex_lock(&subsys->su_mutex); | ||
| 1657 | link_group(parent_group, group); | ||
| 1658 | mutex_unlock(&subsys->su_mutex); | ||
| 1659 | |||
| 1660 | parent = parent_group->cg_item.ci_dentry; | ||
| 1661 | |||
| 1662 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
| 1663 | ret = create_default_group(parent_group, group); | ||
| 1664 | if (!ret) { | ||
| 1665 | spin_lock(&configfs_dirent_lock); | ||
| 1666 | configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); | ||
| 1667 | spin_unlock(&configfs_dirent_lock); | ||
| 1668 | } | ||
| 1669 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
| 1670 | return ret; | ||
| 1671 | } | ||
| 1672 | EXPORT_SYMBOL(configfs_register_group); | ||
| 1673 | |||
| 1674 | /** | ||
| 1675 | * configfs_unregister_group() - unregisters a child group from its parent | ||
| 1676 | * @group: parent group to be unregistered | ||
| 1677 | * | ||
| 1678 | * Undoes configfs_register_group() | ||
| 1679 | */ | ||
| 1680 | void configfs_unregister_group(struct config_group *group) | ||
| 1681 | { | ||
| 1682 | struct configfs_subsystem *subsys = group->cg_subsys; | ||
| 1683 | struct dentry *dentry = group->cg_item.ci_dentry; | ||
| 1684 | struct dentry *parent = group->cg_item.ci_parent->ci_dentry; | ||
| 1685 | |||
| 1686 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
| 1687 | spin_lock(&configfs_dirent_lock); | ||
| 1688 | configfs_detach_prep(dentry, NULL); | ||
| 1689 | spin_unlock(&configfs_dirent_lock); | ||
| 1690 | |||
| 1691 | configfs_detach_group(&group->cg_item); | ||
| 1692 | d_inode(dentry)->i_flags |= S_DEAD; | ||
| 1693 | dont_mount(dentry); | ||
| 1694 | d_delete(dentry); | ||
| 1695 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
| 1696 | |||
| 1697 | dput(dentry); | ||
| 1698 | |||
| 1699 | mutex_lock(&subsys->su_mutex); | ||
| 1700 | unlink_group(group); | ||
| 1701 | mutex_unlock(&subsys->su_mutex); | ||
| 1702 | } | ||
| 1703 | EXPORT_SYMBOL(configfs_unregister_group); | ||
| 1704 | |||
| 1705 | /** | ||
| 1706 | * configfs_register_default_group() - allocates and registers a child group | ||
| 1707 | * @parent_group: parent group | ||
| 1708 | * @name: child group name | ||
| 1709 | * @item_type: child item type description | ||
| 1710 | * | ||
| 1711 | * boilerplate to allocate and register a child group with its parent. We need | ||
| 1712 | * kzalloc'ed memory because child's default_group is initially empty. | ||
| 1713 | * | ||
| 1714 | * Return: allocated config group or ERR_PTR() on error | ||
| 1715 | */ | ||
| 1716 | struct config_group * | ||
| 1717 | configfs_register_default_group(struct config_group *parent_group, | ||
| 1718 | const char *name, | ||
| 1719 | struct config_item_type *item_type) | ||
| 1720 | { | ||
| 1721 | int ret; | ||
| 1722 | struct config_group *group; | ||
| 1723 | |||
| 1724 | group = kzalloc(sizeof(*group), GFP_KERNEL); | ||
| 1725 | if (!group) | ||
| 1726 | return ERR_PTR(-ENOMEM); | ||
| 1727 | config_group_init_type_name(group, name, item_type); | ||
| 1728 | |||
| 1729 | ret = configfs_register_group(parent_group, group); | ||
| 1730 | if (ret) { | ||
| 1731 | kfree(group); | ||
| 1732 | return ERR_PTR(ret); | ||
| 1733 | } | ||
| 1734 | return group; | ||
| 1735 | } | ||
| 1736 | EXPORT_SYMBOL(configfs_register_default_group); | ||
| 1737 | |||
| 1738 | /** | ||
| 1739 | * configfs_unregister_default_group() - unregisters and frees a child group | ||
| 1740 | * @group: the group to act on | ||
| 1741 | */ | ||
| 1742 | void configfs_unregister_default_group(struct config_group *group) | ||
| 1743 | { | ||
| 1744 | configfs_unregister_group(group); | ||
| 1745 | kfree(group); | ||
| 1746 | } | ||
| 1747 | EXPORT_SYMBOL(configfs_unregister_default_group); | ||
| 1748 | |||
| 1639 | int configfs_register_subsystem(struct configfs_subsystem *subsys) | 1749 | int configfs_register_subsystem(struct configfs_subsystem *subsys) |
| 1640 | { | 1750 | { |
| 1641 | int err; | 1751 | int err; |
| @@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 541 | unsigned long pfn; | 541 | unsigned long pfn; |
| 542 | int result = 0; | 542 | int result = 0; |
| 543 | 543 | ||
| 544 | /* dax pmd mappings are broken wrt gup and fork */ | ||
| 545 | if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) | ||
| 546 | return VM_FAULT_FALLBACK; | ||
| 547 | |||
| 544 | /* Fall back to PTEs if we're going to COW */ | 548 | /* Fall back to PTEs if we're going to COW */ |
| 545 | if (write && !(vma->vm_flags & VM_SHARED)) | 549 | if (write && !(vma->vm_flags & VM_SHARED)) |
| 546 | return VM_FAULT_FALLBACK; | 550 | return VM_FAULT_FALLBACK; |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 3a71cea68420..748d35afc902 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb) | |||
| 569 | /* Fall through */ | 569 | /* Fall through */ |
| 570 | case Opt_dax: | 570 | case Opt_dax: |
| 571 | #ifdef CONFIG_FS_DAX | 571 | #ifdef CONFIG_FS_DAX |
| 572 | ext2_msg(sb, KERN_WARNING, | ||
| 573 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); | ||
| 572 | set_opt(sbi->s_mount_opt, DAX); | 574 | set_opt(sbi->s_mount_opt, DAX); |
| 573 | #else | 575 | #else |
| 574 | ext2_msg(sb, KERN_INFO, "dax option not supported"); | 576 | ext2_msg(sb, KERN_INFO, "dax option not supported"); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 753f4e68b820..c9ab67da6e5a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, | |||
| 1664 | } | 1664 | } |
| 1665 | sbi->s_jquota_fmt = m->mount_opt; | 1665 | sbi->s_jquota_fmt = m->mount_opt; |
| 1666 | #endif | 1666 | #endif |
| 1667 | #ifndef CONFIG_FS_DAX | ||
| 1668 | } else if (token == Opt_dax) { | 1667 | } else if (token == Opt_dax) { |
| 1668 | #ifdef CONFIG_FS_DAX | ||
| 1669 | ext4_msg(sb, KERN_WARNING, | ||
| 1670 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); | ||
| 1671 | sbi->s_mount_opt |= m->mount_opt; | ||
| 1672 | #else | ||
| 1669 | ext4_msg(sb, KERN_INFO, "dax option not supported"); | 1673 | ext4_msg(sb, KERN_INFO, "dax option not supported"); |
| 1670 | return -1; | 1674 | return -1; |
| 1671 | #endif | 1675 | #endif |
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4afc4d9d2e41..8b2127ffb226 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
| @@ -610,9 +610,9 @@ parse_record: | |||
| 610 | int status = fat_parse_long(inode, &cpos, &bh, &de, | 610 | int status = fat_parse_long(inode, &cpos, &bh, &de, |
| 611 | &unicode, &nr_slots); | 611 | &unicode, &nr_slots); |
| 612 | if (status < 0) { | 612 | if (status < 0) { |
| 613 | ctx->pos = cpos; | 613 | bh = NULL; |
| 614 | ret = status; | 614 | ret = status; |
| 615 | goto out; | 615 | goto end_of_dir; |
| 616 | } else if (status == PARSE_INVALID) | 616 | } else if (status == PARSE_INVALID) |
| 617 | goto record_end; | 617 | goto record_end; |
| 618 | else if (status == PARSE_NOT_LONGNAME) | 618 | else if (status == PARSE_NOT_LONGNAME) |
| @@ -654,8 +654,9 @@ parse_record: | |||
| 654 | fill_len = short_len; | 654 | fill_len = short_len; |
| 655 | 655 | ||
| 656 | start_filldir: | 656 | start_filldir: |
| 657 | if (!fake_offset) | 657 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); |
| 658 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); | 658 | if (fake_offset && ctx->pos < 2) |
| 659 | ctx->pos = 2; | ||
| 659 | 660 | ||
| 660 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { | 661 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { |
| 661 | if (!dir_emit_dot(file, ctx)) | 662 | if (!dir_emit_dot(file, ctx)) |
| @@ -681,14 +682,19 @@ record_end: | |||
| 681 | fake_offset = 0; | 682 | fake_offset = 0; |
| 682 | ctx->pos = cpos; | 683 | ctx->pos = cpos; |
| 683 | goto get_new; | 684 | goto get_new; |
| 685 | |||
| 684 | end_of_dir: | 686 | end_of_dir: |
| 685 | ctx->pos = cpos; | 687 | if (fake_offset && cpos < 2) |
| 688 | ctx->pos = 2; | ||
| 689 | else | ||
| 690 | ctx->pos = cpos; | ||
| 686 | fill_failed: | 691 | fill_failed: |
| 687 | brelse(bh); | 692 | brelse(bh); |
| 688 | if (unicode) | 693 | if (unicode) |
| 689 | __putname(unicode); | 694 | __putname(unicode); |
| 690 | out: | 695 | out: |
| 691 | mutex_unlock(&sbi->s_lock); | 696 | mutex_unlock(&sbi->s_lock); |
| 697 | |||
| 692 | return ret; | 698 | return ret; |
| 693 | } | 699 | } |
| 694 | 700 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 316adb968b65..de4bdfac0cec 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page) | |||
| 332 | * truncation is indicated by end of range being LLONG_MAX | 332 | * truncation is indicated by end of range being LLONG_MAX |
| 333 | * In this case, we first scan the range and release found pages. | 333 | * In this case, we first scan the range and release found pages. |
| 334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | 334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv |
| 335 | * maps and global counts. | 335 | * maps and global counts. Page faults can not race with truncation |
| 336 | * in this routine. hugetlb_no_page() prevents page faults in the | ||
| 337 | * truncated range. It checks i_size before allocation, and again after | ||
| 338 | * with the page table lock for the page held. The same lock must be | ||
| 339 | * acquired to unmap a page. | ||
| 336 | * hole punch is indicated if end is not LLONG_MAX | 340 | * hole punch is indicated if end is not LLONG_MAX |
| 337 | * In the hole punch case we scan the range and release found pages. | 341 | * In the hole punch case we scan the range and release found pages. |
| 338 | * Only when releasing a page is the associated region/reserv map | 342 | * Only when releasing a page is the associated region/reserv map |
| 339 | * deleted. The region/reserv map for ranges without associated | 343 | * deleted. The region/reserv map for ranges without associated |
| 340 | * pages are not modified. | 344 | * pages are not modified. Page faults can race with hole punch. |
| 345 | * This is indicated if we find a mapped page. | ||
| 341 | * Note: If the passed end of range value is beyond the end of file, but | 346 | * Note: If the passed end of range value is beyond the end of file, but |
| 342 | * not LLONG_MAX this routine still performs a hole punch operation. | 347 | * not LLONG_MAX this routine still performs a hole punch operation. |
| 343 | */ | 348 | */ |
| @@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
| 361 | next = start; | 366 | next = start; |
| 362 | while (next < end) { | 367 | while (next < end) { |
| 363 | /* | 368 | /* |
| 364 | * Make sure to never grab more pages that we | 369 | * Don't grab more pages than the number left in the range. |
| 365 | * might possibly need. | ||
| 366 | */ | 370 | */ |
| 367 | if (end - next < lookup_nr) | 371 | if (end - next < lookup_nr) |
| 368 | lookup_nr = end - next; | 372 | lookup_nr = end - next; |
| 369 | 373 | ||
| 370 | /* | 374 | /* |
| 371 | * This pagevec_lookup() may return pages past 'end', | 375 | * When no more pages are found, we are done. |
| 372 | * so we must check for page->index > end. | ||
| 373 | */ | 376 | */ |
| 374 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { | 377 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) |
| 375 | if (next == start) | 378 | break; |
| 376 | break; | ||
| 377 | next = start; | ||
| 378 | continue; | ||
| 379 | } | ||
| 380 | 379 | ||
| 381 | for (i = 0; i < pagevec_count(&pvec); ++i) { | 380 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
| 382 | struct page *page = pvec.pages[i]; | 381 | struct page *page = pvec.pages[i]; |
| 383 | u32 hash; | 382 | u32 hash; |
| 384 | 383 | ||
| 384 | /* | ||
| 385 | * The page (index) could be beyond end. This is | ||
| 386 | * only possible in the punch hole case as end is | ||
| 387 | * max page offset in the truncate case. | ||
| 388 | */ | ||
| 389 | next = page->index; | ||
| 390 | if (next >= end) | ||
| 391 | break; | ||
| 392 | |||
| 385 | hash = hugetlb_fault_mutex_hash(h, current->mm, | 393 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
| 386 | &pseudo_vma, | 394 | &pseudo_vma, |
| 387 | mapping, next, 0); | 395 | mapping, next, 0); |
| 388 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 396 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 389 | 397 | ||
| 390 | lock_page(page); | 398 | lock_page(page); |
| 391 | if (page->index >= end) { | 399 | if (likely(!page_mapped(page))) { |
| 392 | unlock_page(page); | ||
| 393 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | ||
| 394 | next = end; /* we are done */ | ||
| 395 | break; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * If page is mapped, it was faulted in after being | ||
| 400 | * unmapped. Do nothing in this race case. In the | ||
| 401 | * normal case page is not mapped. | ||
| 402 | */ | ||
| 403 | if (!page_mapped(page)) { | ||
| 404 | bool rsv_on_error = !PagePrivate(page); | 400 | bool rsv_on_error = !PagePrivate(page); |
| 405 | /* | 401 | /* |
| 406 | * We must free the huge page and remove | 402 | * We must free the huge page and remove |
| @@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
| 421 | hugetlb_fix_reserve_counts( | 417 | hugetlb_fix_reserve_counts( |
| 422 | inode, rsv_on_error); | 418 | inode, rsv_on_error); |
| 423 | } | 419 | } |
| 420 | } else { | ||
| 421 | /* | ||
| 422 | * If page is mapped, it was faulted in after | ||
| 423 | * being unmapped. It indicates a race between | ||
| 424 | * hole punch and page fault. Do nothing in | ||
| 425 | * this case. Getting here in a truncate | ||
| 426 | * operation is a bug. | ||
| 427 | */ | ||
| 428 | BUG_ON(truncate_op); | ||
| 424 | } | 429 | } |
| 425 | 430 | ||
| 426 | if (page->index > next) | ||
| 427 | next = page->index; | ||
| 428 | |||
| 429 | ++next; | ||
| 430 | unlock_page(page); | 431 | unlock_page(page); |
| 431 | |||
| 432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 433 | } | 433 | } |
| 434 | ++next; | ||
| 434 | huge_pagevec_release(&pvec); | 435 | huge_pagevec_release(&pvec); |
| 436 | cond_resched(); | ||
| 435 | } | 437 | } |
| 436 | 438 | ||
| 437 | if (truncate_op) | 439 | if (truncate_op) |
| @@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |||
| 647 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | 649 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 648 | i_size_write(inode, offset + len); | 650 | i_size_write(inode, offset + len); |
| 649 | inode->i_ctime = CURRENT_TIME; | 651 | inode->i_ctime = CURRENT_TIME; |
| 650 | spin_lock(&inode->i_lock); | ||
| 651 | inode->i_private = NULL; | ||
| 652 | spin_unlock(&inode->i_lock); | ||
| 653 | out: | 652 | out: |
| 654 | mutex_unlock(&inode->i_mutex); | 653 | mutex_unlock(&inode->i_mutex); |
| 655 | return error; | 654 | return error; |
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 79b113048eac..0a3f9b594602 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c | |||
| @@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg | |||
| 525 | switch (rqdata.cmd) { | 525 | switch (rqdata.cmd) { |
| 526 | case NCP_LOCK_EX: | 526 | case NCP_LOCK_EX: |
| 527 | case NCP_LOCK_SH: | 527 | case NCP_LOCK_SH: |
| 528 | if (rqdata.timeout < 0) | ||
| 529 | return -EINVAL; | ||
| 528 | if (rqdata.timeout == 0) | 530 | if (rqdata.timeout == 0) |
| 529 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; | 531 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; |
| 530 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) | 532 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 646cdac73488..beac58b0e09c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
| @@ -78,7 +78,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) | |||
| 78 | 78 | ||
| 79 | p = xdr_inline_decode(xdr, nbytes); | 79 | p = xdr_inline_decode(xdr, nbytes); |
| 80 | if (unlikely(p == NULL)) | 80 | if (unlikely(p == NULL)) |
| 81 | printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n"); | 81 | printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed " |
| 82 | "or truncated request.\n"); | ||
| 82 | return p; | 83 | return p; |
| 83 | } | 84 | } |
| 84 | 85 | ||
| @@ -889,6 +890,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
| 889 | struct cb_compound_hdr_arg hdr_arg = { 0 }; | 890 | struct cb_compound_hdr_arg hdr_arg = { 0 }; |
| 890 | struct cb_compound_hdr_res hdr_res = { NULL }; | 891 | struct cb_compound_hdr_res hdr_res = { NULL }; |
| 891 | struct xdr_stream xdr_in, xdr_out; | 892 | struct xdr_stream xdr_in, xdr_out; |
| 893 | struct xdr_buf *rq_arg = &rqstp->rq_arg; | ||
| 892 | __be32 *p, status; | 894 | __be32 *p, status; |
| 893 | struct cb_process_state cps = { | 895 | struct cb_process_state cps = { |
| 894 | .drc_status = 0, | 896 | .drc_status = 0, |
| @@ -900,7 +902,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
| 900 | 902 | ||
| 901 | dprintk("%s: start\n", __func__); | 903 | dprintk("%s: start\n", __func__); |
| 902 | 904 | ||
| 903 | xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); | 905 | rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len; |
| 906 | xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base); | ||
| 904 | 907 | ||
| 905 | p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); | 908 | p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); |
| 906 | xdr_init_encode(&xdr_out, &rqstp->rq_res, p); | 909 | xdr_init_encode(&xdr_out, &rqstp->rq_res, p); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 326d9e10d833..31b0a52223a7 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, | |||
| 618 | nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); | 618 | nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); |
| 619 | nfs_vmtruncate(inode, attr->ia_size); | 619 | nfs_vmtruncate(inode, attr->ia_size); |
| 620 | } | 620 | } |
| 621 | nfs_update_inode(inode, fattr); | 621 | if (fattr->valid) |
| 622 | nfs_update_inode(inode, fattr); | ||
| 623 | else | ||
| 624 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; | ||
| 622 | spin_unlock(&inode->i_lock); | 625 | spin_unlock(&inode->i_lock); |
| 623 | } | 626 | } |
| 624 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); | 627 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); |
| @@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
| 1824 | if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) | 1827 | if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) |
| 1825 | nfsi->attr_gencount = fattr->gencount; | 1828 | nfsi->attr_gencount = fattr->gencount; |
| 1826 | } | 1829 | } |
| 1827 | invalid &= ~NFS_INO_INVALID_ATTR; | 1830 | |
| 1831 | /* Don't declare attrcache up to date if there were no attrs! */ | ||
| 1832 | if (fattr->valid != 0) | ||
| 1833 | invalid &= ~NFS_INO_INVALID_ATTR; | ||
| 1834 | |||
| 1828 | /* Don't invalidate the data if we were to blame */ | 1835 | /* Don't invalidate the data if we were to blame */ |
| 1829 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) | 1836 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) |
| 1830 | || S_ISLNK(inode->i_mode))) | 1837 | || S_ISLNK(inode->i_mode))) |
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 3e92a3cde15d..6b1ce9825430 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include "pnfs.h" | 14 | #include "pnfs.h" |
| 15 | #include "internal.h" | 15 | #include "internal.h" |
| 16 | 16 | ||
| 17 | #define NFSDBG_FACILITY NFSDBG_PNFS | 17 | #define NFSDBG_FACILITY NFSDBG_PROC |
| 18 | 18 | ||
| 19 | static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, | 19 | static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, |
| 20 | fmode_t fmode) | 20 | fmode_t fmode) |
| @@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, | |||
| 284 | .dst_fh = NFS_FH(dst_inode), | 284 | .dst_fh = NFS_FH(dst_inode), |
| 285 | .src_offset = src_offset, | 285 | .src_offset = src_offset, |
| 286 | .dst_offset = dst_offset, | 286 | .dst_offset = dst_offset, |
| 287 | .count = count, | ||
| 287 | .dst_bitmask = server->cache_consistency_bitmask, | 288 | .dst_bitmask = server->cache_consistency_bitmask, |
| 288 | }; | 289 | }; |
| 289 | struct nfs42_clone_res res = { | 290 | struct nfs42_clone_res res = { |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 223bedda64ae..10410e8b5853 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
| @@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) | |||
| 33 | return ret; | 33 | return ret; |
| 34 | idr_preload(GFP_KERNEL); | 34 | idr_preload(GFP_KERNEL); |
| 35 | spin_lock(&nn->nfs_client_lock); | 35 | spin_lock(&nn->nfs_client_lock); |
| 36 | ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT); | 36 | ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT); |
| 37 | if (ret >= 0) | 37 | if (ret >= 0) |
| 38 | clp->cl_cb_ident = ret; | 38 | clp->cl_cb_ident = ret; |
| 39 | spin_unlock(&nn->nfs_client_lock); | 39 | spin_unlock(&nn->nfs_client_lock); |
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 4aa571956cd6..db9b5fea5b3e 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/file.h> | 7 | #include <linux/file.h> |
| 8 | #include <linux/falloc.h> | 8 | #include <linux/falloc.h> |
| 9 | #include <linux/nfs_fs.h> | 9 | #include <linux/nfs_fs.h> |
| 10 | #include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */ | ||
| 10 | #include "delegation.h" | 11 | #include "delegation.h" |
| 11 | #include "internal.h" | 12 | #include "internal.h" |
| 12 | #include "iostat.h" | 13 | #include "iostat.h" |
| @@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, | |||
| 203 | struct fd src_file; | 204 | struct fd src_file; |
| 204 | struct inode *src_inode; | 205 | struct inode *src_inode; |
| 205 | unsigned int bs = server->clone_blksize; | 206 | unsigned int bs = server->clone_blksize; |
| 207 | bool same_inode = false; | ||
| 206 | int ret; | 208 | int ret; |
| 207 | 209 | ||
| 208 | /* dst file must be opened for writing */ | 210 | /* dst file must be opened for writing */ |
| @@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, | |||
| 221 | 223 | ||
| 222 | src_inode = file_inode(src_file.file); | 224 | src_inode = file_inode(src_file.file); |
| 223 | 225 | ||
| 224 | /* src and dst must be different files */ | ||
| 225 | ret = -EINVAL; | ||
| 226 | if (src_inode == dst_inode) | 226 | if (src_inode == dst_inode) |
| 227 | goto out_fput; | 227 | same_inode = true; |
| 228 | 228 | ||
| 229 | /* src file must be opened for reading */ | 229 | /* src file must be opened for reading */ |
| 230 | if (!(src_file.file->f_mode & FMODE_READ)) | 230 | if (!(src_file.file->f_mode & FMODE_READ)) |
| @@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, | |||
| 249 | goto out_fput; | 249 | goto out_fput; |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | /* verify if ranges are overlapped within the same file */ | ||
| 253 | if (same_inode) { | ||
| 254 | if (dst_off + count > src_off && dst_off < src_off + count) | ||
| 255 | goto out_fput; | ||
| 256 | } | ||
| 257 | |||
| 252 | /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ | 258 | /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ |
| 253 | if (dst_inode < src_inode) { | 259 | if (same_inode) { |
| 260 | mutex_lock(&src_inode->i_mutex); | ||
| 261 | } else if (dst_inode < src_inode) { | ||
| 254 | mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); | 262 | mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); |
| 255 | mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); | 263 | mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); |
| 256 | } else { | 264 | } else { |
| @@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, | |||
| 275 | truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); | 283 | truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); |
| 276 | 284 | ||
| 277 | out_unlock: | 285 | out_unlock: |
| 278 | if (dst_inode < src_inode) { | 286 | if (same_inode) { |
| 287 | mutex_unlock(&src_inode->i_mutex); | ||
| 288 | } else if (dst_inode < src_inode) { | ||
| 279 | mutex_unlock(&src_inode->i_mutex); | 289 | mutex_unlock(&src_inode->i_mutex); |
| 280 | mutex_unlock(&dst_inode->i_mutex); | 290 | mutex_unlock(&dst_inode->i_mutex); |
| 281 | } else { | 291 | } else { |
| @@ -291,46 +301,31 @@ out_drop_write: | |||
| 291 | 301 | ||
| 292 | static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) | 302 | static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) |
| 293 | { | 303 | { |
| 294 | struct nfs_ioctl_clone_range_args args; | 304 | struct btrfs_ioctl_clone_range_args args; |
| 295 | 305 | ||
| 296 | if (copy_from_user(&args, argp, sizeof(args))) | 306 | if (copy_from_user(&args, argp, sizeof(args))) |
| 297 | return -EFAULT; | 307 | return -EFAULT; |
| 298 | 308 | ||
| 299 | return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count); | 309 | return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset, |
| 300 | } | 310 | args.dest_offset, args.src_length); |
| 301 | #else | ||
| 302 | static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, | ||
| 303 | u64 src_off, u64 dst_off, u64 count) | ||
| 304 | { | ||
| 305 | return -ENOTTY; | ||
| 306 | } | ||
| 307 | |||
| 308 | static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) | ||
| 309 | { | ||
| 310 | return -ENOTTY; | ||
| 311 | } | 311 | } |
| 312 | #endif /* CONFIG_NFS_V4_2 */ | ||
| 313 | 312 | ||
| 314 | long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 313 | long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 315 | { | 314 | { |
| 316 | void __user *argp = (void __user *)arg; | 315 | void __user *argp = (void __user *)arg; |
| 317 | 316 | ||
| 318 | switch (cmd) { | 317 | switch (cmd) { |
| 319 | case NFS_IOC_CLONE: | 318 | case BTRFS_IOC_CLONE: |
| 320 | return nfs42_ioctl_clone(file, arg, 0, 0, 0); | 319 | return nfs42_ioctl_clone(file, arg, 0, 0, 0); |
| 321 | case NFS_IOC_CLONE_RANGE: | 320 | case BTRFS_IOC_CLONE_RANGE: |
| 322 | return nfs42_ioctl_clone_range(file, argp); | 321 | return nfs42_ioctl_clone_range(file, argp); |
| 323 | } | 322 | } |
| 324 | 323 | ||
| 325 | return -ENOTTY; | 324 | return -ENOTTY; |
| 326 | } | 325 | } |
| 326 | #endif /* CONFIG_NFS_V4_2 */ | ||
| 327 | 327 | ||
| 328 | const struct file_operations nfs4_file_operations = { | 328 | const struct file_operations nfs4_file_operations = { |
| 329 | #ifdef CONFIG_NFS_V4_2 | ||
| 330 | .llseek = nfs4_file_llseek, | ||
| 331 | #else | ||
| 332 | .llseek = nfs_file_llseek, | ||
| 333 | #endif | ||
| 334 | .read_iter = nfs_file_read, | 329 | .read_iter = nfs_file_read, |
| 335 | .write_iter = nfs_file_write, | 330 | .write_iter = nfs_file_write, |
| 336 | .mmap = nfs_file_mmap, | 331 | .mmap = nfs_file_mmap, |
| @@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = { | |||
| 342 | .flock = nfs_flock, | 337 | .flock = nfs_flock, |
| 343 | .splice_read = nfs_file_splice_read, | 338 | .splice_read = nfs_file_splice_read, |
| 344 | .splice_write = iter_file_splice_write, | 339 | .splice_write = iter_file_splice_write, |
| 345 | #ifdef CONFIG_NFS_V4_2 | ||
| 346 | .fallocate = nfs42_fallocate, | ||
| 347 | #endif /* CONFIG_NFS_V4_2 */ | ||
| 348 | .check_flags = nfs_check_flags, | 340 | .check_flags = nfs_check_flags, |
| 349 | .setlease = simple_nosetlease, | 341 | .setlease = simple_nosetlease, |
| 350 | #ifdef CONFIG_COMPAT | 342 | #ifdef CONFIG_NFS_V4_2 |
| 343 | .llseek = nfs4_file_llseek, | ||
| 344 | .fallocate = nfs42_fallocate, | ||
| 351 | .unlocked_ioctl = nfs4_ioctl, | 345 | .unlocked_ioctl = nfs4_ioctl, |
| 352 | #else | ||
| 353 | .compat_ioctl = nfs4_ioctl, | 346 | .compat_ioctl = nfs4_ioctl, |
| 354 | #endif /* CONFIG_COMPAT */ | 347 | #else |
| 348 | .llseek = nfs_file_llseek, | ||
| 349 | #endif | ||
| 355 | }; | 350 | }; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 765a03559363..89818036f035 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) | |||
| 7866 | spin_unlock(&inode->i_lock); | 7866 | spin_unlock(&inode->i_lock); |
| 7867 | goto out_restart; | 7867 | goto out_restart; |
| 7868 | } | 7868 | } |
| 7869 | if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) | 7869 | if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN) |
| 7870 | goto out_restart; | 7870 | goto out_restart; |
| 7871 | out: | 7871 | out: |
| 7872 | dprintk("<-- %s\n", __func__); | 7872 | dprintk("<-- %s\n", __func__); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index dfed4f5c8fcc..4e4441216804 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st | |||
| 3615 | status = 0; | 3615 | status = 0; |
| 3616 | if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) | 3616 | if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) |
| 3617 | goto out; | 3617 | goto out; |
| 3618 | bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS; | ||
| 3618 | status = -EIO; | 3619 | status = -EIO; |
| 3619 | /* Ignore borken servers that return unrequested attrs */ | 3620 | /* Ignore borken servers that return unrequested attrs */ |
| 3620 | if (unlikely(res == NULL)) | 3621 | if (unlikely(res == NULL)) |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 93496c059837..5a8ae2125b50 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
| @@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
| 872 | 872 | ||
| 873 | dprintk("--> %s\n", __func__); | 873 | dprintk("--> %s\n", __func__); |
| 874 | 874 | ||
| 875 | lgp = kzalloc(sizeof(*lgp), gfp_flags); | 875 | /* |
| 876 | if (lgp == NULL) | 876 | * Synchronously retrieve layout information from server and |
| 877 | return NULL; | 877 | * store in lseg. If we race with a concurrent seqid morphing |
| 878 | * op, then re-send the LAYOUTGET. | ||
| 879 | */ | ||
| 880 | do { | ||
| 881 | lgp = kzalloc(sizeof(*lgp), gfp_flags); | ||
| 882 | if (lgp == NULL) | ||
| 883 | return NULL; | ||
| 884 | |||
| 885 | i_size = i_size_read(ino); | ||
| 886 | |||
| 887 | lgp->args.minlength = PAGE_CACHE_SIZE; | ||
| 888 | if (lgp->args.minlength > range->length) | ||
| 889 | lgp->args.minlength = range->length; | ||
| 890 | if (range->iomode == IOMODE_READ) { | ||
| 891 | if (range->offset >= i_size) | ||
| 892 | lgp->args.minlength = 0; | ||
| 893 | else if (i_size - range->offset < lgp->args.minlength) | ||
| 894 | lgp->args.minlength = i_size - range->offset; | ||
| 895 | } | ||
| 896 | lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; | ||
| 897 | lgp->args.range = *range; | ||
| 898 | lgp->args.type = server->pnfs_curr_ld->id; | ||
| 899 | lgp->args.inode = ino; | ||
| 900 | lgp->args.ctx = get_nfs_open_context(ctx); | ||
| 901 | lgp->gfp_flags = gfp_flags; | ||
| 902 | lgp->cred = lo->plh_lc_cred; | ||
| 878 | 903 | ||
| 879 | i_size = i_size_read(ino); | 904 | lseg = nfs4_proc_layoutget(lgp, gfp_flags); |
| 905 | } while (lseg == ERR_PTR(-EAGAIN)); | ||
| 880 | 906 | ||
| 881 | lgp->args.minlength = PAGE_CACHE_SIZE; | ||
| 882 | if (lgp->args.minlength > range->length) | ||
| 883 | lgp->args.minlength = range->length; | ||
| 884 | if (range->iomode == IOMODE_READ) { | ||
| 885 | if (range->offset >= i_size) | ||
| 886 | lgp->args.minlength = 0; | ||
| 887 | else if (i_size - range->offset < lgp->args.minlength) | ||
| 888 | lgp->args.minlength = i_size - range->offset; | ||
| 889 | } | ||
| 890 | lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; | ||
| 891 | lgp->args.range = *range; | ||
| 892 | lgp->args.type = server->pnfs_curr_ld->id; | ||
| 893 | lgp->args.inode = ino; | ||
| 894 | lgp->args.ctx = get_nfs_open_context(ctx); | ||
| 895 | lgp->gfp_flags = gfp_flags; | ||
| 896 | lgp->cred = lo->plh_lc_cred; | ||
| 897 | |||
| 898 | /* Synchronously retrieve layout information from server and | ||
| 899 | * store in lseg. | ||
| 900 | */ | ||
| 901 | lseg = nfs4_proc_layoutget(lgp, gfp_flags); | ||
| 902 | if (IS_ERR(lseg)) { | 907 | if (IS_ERR(lseg)) { |
| 903 | switch (PTR_ERR(lseg)) { | 908 | switch (PTR_ERR(lseg)) { |
| 904 | case -ENOMEM: | 909 | case -ENOMEM: |
| @@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
| 1687 | /* existing state ID, make sure the sequence number matches. */ | 1692 | /* existing state ID, make sure the sequence number matches. */ |
| 1688 | if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { | 1693 | if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { |
| 1689 | dprintk("%s forget reply due to sequence\n", __func__); | 1694 | dprintk("%s forget reply due to sequence\n", __func__); |
| 1695 | status = -EAGAIN; | ||
| 1690 | goto out_forget_reply; | 1696 | goto out_forget_reply; |
| 1691 | } | 1697 | } |
| 1692 | pnfs_set_layout_stateid(lo, &res->stateid, false); | 1698 | pnfs_set_layout_stateid(lo, &res->stateid, false); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 3b48ac25d8a7..a03f6f433075 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir, | |||
| 372 | mlog_errno(status); | 372 | mlog_errno(status); |
| 373 | goto leave; | 373 | goto leave; |
| 374 | } | 374 | } |
| 375 | /* update inode->i_mode after mask with "umask". */ | ||
| 376 | inode->i_mode = mode; | ||
| 375 | 377 | ||
| 376 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, | 378 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, |
| 377 | S_ISDIR(mode), | 379 | S_ISDIR(mode), |
diff --git a/fs/splice.c b/fs/splice.c index 801c21cd77fe..4cf700d50b40 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des | |||
| 809 | */ | 809 | */ |
| 810 | static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) | 810 | static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) |
| 811 | { | 811 | { |
| 812 | /* | ||
| 813 | * Check for signal early to make process killable when there are | ||
| 814 | * always buffers available | ||
| 815 | */ | ||
| 816 | if (signal_pending(current)) | ||
| 817 | return -ERESTARTSYS; | ||
| 818 | |||
| 812 | while (!pipe->nrbufs) { | 819 | while (!pipe->nrbufs) { |
| 813 | if (!pipe->writers) | 820 | if (!pipe->writers) |
| 814 | return 0; | 821 | return 0; |
| @@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, | |||
| 884 | 891 | ||
| 885 | splice_from_pipe_begin(sd); | 892 | splice_from_pipe_begin(sd); |
| 886 | do { | 893 | do { |
| 894 | cond_resched(); | ||
| 887 | ret = splice_from_pipe_next(pipe, sd); | 895 | ret = splice_from_pipe_next(pipe, sd); |
| 888 | if (ret > 0) | 896 | if (ret > 0) |
| 889 | ret = splice_from_pipe_feed(pipe, sd, actor); | 897 | ret = splice_from_pipe_feed(pipe, sd, actor); |
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 590ad9206e3f..02fa1dcc5969 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
| @@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) | |||
| 162 | inode->i_fop = &sysv_dir_operations; | 162 | inode->i_fop = &sysv_dir_operations; |
| 163 | inode->i_mapping->a_ops = &sysv_aops; | 163 | inode->i_mapping->a_ops = &sysv_aops; |
| 164 | } else if (S_ISLNK(inode->i_mode)) { | 164 | } else if (S_ISLNK(inode->i_mode)) { |
| 165 | if (inode->i_blocks) { | 165 | inode->i_op = &sysv_symlink_inode_operations; |
| 166 | inode->i_op = &sysv_symlink_inode_operations; | 166 | inode->i_mapping->a_ops = &sysv_aops; |
| 167 | inode->i_mapping->a_ops = &sysv_aops; | ||
| 168 | } else { | ||
| 169 | inode->i_op = &simple_symlink_inode_operations; | ||
| 170 | inode->i_link = (char *)SYSV_I(inode)->i_data; | ||
| 171 | nd_terminate_link(inode->i_link, inode->i_size, | ||
| 172 | sizeof(SYSV_I(inode)->i_data) - 1); | ||
| 173 | } | ||
| 174 | } else | 167 | } else |
| 175 | init_special_inode(inode, inode->i_mode, rdev); | 168 | init_special_inode(inode, inode->i_mode, rdev); |
| 176 | } | 169 | } |
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index e67aeac2aee0..4b74c97d297a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h | |||
| @@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, | |||
| 136 | 136 | ||
| 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); | 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); |
| 138 | 138 | ||
| 139 | void | ||
| 140 | drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); | ||
| 141 | |||
| 139 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); | 142 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); |
| 140 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); | 143 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); |
| 141 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); | 144 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9c747cb14ad8..d2f41477f8ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, | |||
| 342 | struct irq_phys_map *map, bool level); | 342 | struct irq_phys_map *map, bool level); |
| 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
| 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); |
| 345 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); | ||
| 346 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, | 345 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, |
| 347 | int virt_irq, int irq); | 346 | int virt_irq, int irq); |
| 348 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | 347 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); |
| 348 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | ||
| 349 | 349 | ||
| 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
| 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) | 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3fe27f8d91f0..c0d2b7927c1f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 795 | struct scsi_ioctl_command __user *); | 795 | struct scsi_ioctl_command __user *); |
| 796 | 796 | ||
| 797 | extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
| 798 | extern void blk_queue_exit(struct request_queue *q); | ||
| 797 | extern void blk_start_queue(struct request_queue *q); | 799 | extern void blk_start_queue(struct request_queue *q); |
| 798 | extern void blk_stop_queue(struct request_queue *q); | 800 | extern void blk_stop_queue(struct request_queue *q); |
| 799 | extern void blk_sync_queue(struct request_queue *q); | 801 | extern void blk_sync_queue(struct request_queue *q); |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index a8a335b7fce0..758a029011b1 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
| @@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro | |||
| 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); |
| 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); |
| 199 | 199 | ||
| 200 | int configfs_register_group(struct config_group *parent_group, | ||
| 201 | struct config_group *group); | ||
| 202 | void configfs_unregister_group(struct config_group *group); | ||
| 203 | |||
| 204 | struct config_group * | ||
| 205 | configfs_register_default_group(struct config_group *parent_group, | ||
| 206 | const char *name, | ||
| 207 | struct config_item_type *item_type); | ||
| 208 | void configfs_unregister_default_group(struct config_group *group); | ||
| 209 | |||
| 200 | /* These functions can sleep and can alloc with GFP_KERNEL */ | 210 | /* These functions can sleep and can alloc with GFP_KERNEL */ |
| 201 | /* WARNING: These cannot be called underneath configfs callbacks!! */ | 211 | /* WARNING: These cannot be called underneath configfs callbacks!! */ |
| 202 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); | 212 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6523109e136d..8942af0813e3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | |||
| 271 | 271 | ||
| 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
| 273 | { | 273 | { |
| 274 | return gfp_flags & __GFP_DIRECT_RECLAIM; | 274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | #ifdef CONFIG_HIGHMEM | 277 | #ifdef CONFIG_HIGHMEM |
diff --git a/include/linux/kref.h b/include/linux/kref.h index 484604d184be..e15828fd71f1 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
| 22 | #include <linux/spinlock.h> | ||
| 23 | 22 | ||
| 24 | struct kref { | 23 | struct kref { |
| 25 | atomic_t refcount; | 24 | atomic_t refcount; |
| @@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) | |||
| 99 | return kref_sub(kref, 1, release); | 98 | return kref_sub(kref, 1, release); |
| 100 | } | 99 | } |
| 101 | 100 | ||
| 102 | /** | ||
| 103 | * kref_put_spinlock_irqsave - decrement refcount for object. | ||
| 104 | * @kref: object. | ||
| 105 | * @release: pointer to the function that will clean up the object when the | ||
| 106 | * last reference to the object is released. | ||
| 107 | * This pointer is required, and it is not acceptable to pass kfree | ||
| 108 | * in as this function. | ||
| 109 | * @lock: lock to take in release case | ||
| 110 | * | ||
| 111 | * Behaves identical to kref_put with one exception. If the reference count | ||
| 112 | * drops to zero, the lock will be taken atomically wrt dropping the reference | ||
| 113 | * count. The release function has to call spin_unlock() without _irqrestore. | ||
| 114 | */ | ||
| 115 | static inline int kref_put_spinlock_irqsave(struct kref *kref, | ||
| 116 | void (*release)(struct kref *kref), | ||
| 117 | spinlock_t *lock) | ||
| 118 | { | ||
| 119 | unsigned long flags; | ||
| 120 | |||
| 121 | WARN_ON(release == NULL); | ||
| 122 | if (atomic_add_unless(&kref->refcount, -1, 1)) | ||
| 123 | return 0; | ||
| 124 | spin_lock_irqsave(lock, flags); | ||
| 125 | if (atomic_dec_and_test(&kref->refcount)) { | ||
| 126 | release(kref); | ||
| 127 | local_irq_restore(flags); | ||
| 128 | return 1; | ||
| 129 | } | ||
| 130 | spin_unlock_irqrestore(lock, flags); | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline int kref_put_mutex(struct kref *kref, | 101 | static inline int kref_put_mutex(struct kref *kref, |
| 135 | void (*release)(struct kref *kref), | 102 | void (*release)(struct kref *kref), |
| 136 | struct mutex *lock) | 103 | struct mutex *lock) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5706a2108f0a..c923350ca20a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
| 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
| 461 | idx++) | 461 | idx++) |
| 462 | 462 | ||
| 463 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) | ||
| 464 | { | ||
| 465 | struct kvm_vcpu *vcpu; | ||
| 466 | int i; | ||
| 467 | |||
| 468 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
| 469 | if (vcpu->vcpu_id == id) | ||
| 470 | return vcpu; | ||
| 471 | return NULL; | ||
| 472 | } | ||
| 473 | |||
| 463 | #define kvm_for_each_memslot(memslot, slots) \ | 474 | #define kvm_for_each_memslot(memslot, slots) \ |
| 464 | for (memslot = &slots->memslots[0]; \ | 475 | for (memslot = &slots->memslots[0]; \ |
| 465 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ | 476 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..3db5552b17d5 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
| @@ -58,7 +58,6 @@ enum { | |||
| 58 | struct nvm_id_group { | 58 | struct nvm_id_group { |
| 59 | u8 mtype; | 59 | u8 mtype; |
| 60 | u8 fmtype; | 60 | u8 fmtype; |
| 61 | u16 res16; | ||
| 62 | u8 num_ch; | 61 | u8 num_ch; |
| 63 | u8 num_lun; | 62 | u8 num_lun; |
| 64 | u8 num_pln; | 63 | u8 num_pln; |
| @@ -74,9 +73,9 @@ struct nvm_id_group { | |||
| 74 | u32 tbet; | 73 | u32 tbet; |
| 75 | u32 tbem; | 74 | u32 tbem; |
| 76 | u32 mpos; | 75 | u32 mpos; |
| 76 | u32 mccap; | ||
| 77 | u16 cpar; | 77 | u16 cpar; |
| 78 | u8 res[913]; | 78 | }; |
| 79 | } __packed; | ||
| 80 | 79 | ||
| 81 | struct nvm_addr_format { | 80 | struct nvm_addr_format { |
| 82 | u8 ch_offset; | 81 | u8 ch_offset; |
| @@ -91,19 +90,15 @@ struct nvm_addr_format { | |||
| 91 | u8 pg_len; | 90 | u8 pg_len; |
| 92 | u8 sect_offset; | 91 | u8 sect_offset; |
| 93 | u8 sect_len; | 92 | u8 sect_len; |
| 94 | u8 res[4]; | ||
| 95 | }; | 93 | }; |
| 96 | 94 | ||
| 97 | struct nvm_id { | 95 | struct nvm_id { |
| 98 | u8 ver_id; | 96 | u8 ver_id; |
| 99 | u8 vmnt; | 97 | u8 vmnt; |
| 100 | u8 cgrps; | 98 | u8 cgrps; |
| 101 | u8 res[5]; | ||
| 102 | u32 cap; | 99 | u32 cap; |
| 103 | u32 dom; | 100 | u32 dom; |
| 104 | struct nvm_addr_format ppaf; | 101 | struct nvm_addr_format ppaf; |
| 105 | u8 ppat; | ||
| 106 | u8 resv[224]; | ||
| 107 | struct nvm_id_group groups[4]; | 102 | struct nvm_id_group groups[4]; |
| 108 | } __packed; | 103 | } __packed; |
| 109 | 104 | ||
| @@ -123,39 +118,28 @@ struct nvm_tgt_instance { | |||
| 123 | #define NVM_VERSION_MINOR 0 | 118 | #define NVM_VERSION_MINOR 0 |
| 124 | #define NVM_VERSION_PATCH 0 | 119 | #define NVM_VERSION_PATCH 0 |
| 125 | 120 | ||
| 126 | #define NVM_SEC_BITS (8) | ||
| 127 | #define NVM_PL_BITS (6) | ||
| 128 | #define NVM_PG_BITS (16) | ||
| 129 | #define NVM_BLK_BITS (16) | 121 | #define NVM_BLK_BITS (16) |
| 130 | #define NVM_LUN_BITS (10) | 122 | #define NVM_PG_BITS (16) |
| 123 | #define NVM_SEC_BITS (8) | ||
| 124 | #define NVM_PL_BITS (8) | ||
| 125 | #define NVM_LUN_BITS (8) | ||
| 131 | #define NVM_CH_BITS (8) | 126 | #define NVM_CH_BITS (8) |
| 132 | 127 | ||
| 133 | struct ppa_addr { | 128 | struct ppa_addr { |
| 129 | /* Generic structure for all addresses */ | ||
| 134 | union { | 130 | union { |
| 135 | /* Channel-based PPA format in nand 4x2x2x2x8x10 */ | ||
| 136 | struct { | ||
| 137 | u64 ch : 4; | ||
| 138 | u64 sec : 2; /* 4 sectors per page */ | ||
| 139 | u64 pl : 2; /* 4 planes per LUN */ | ||
| 140 | u64 lun : 2; /* 4 LUNs per channel */ | ||
| 141 | u64 pg : 8; /* 256 pages per block */ | ||
| 142 | u64 blk : 10;/* 1024 blocks per plane */ | ||
| 143 | u64 resved : 36; | ||
| 144 | } chnl; | ||
| 145 | |||
| 146 | /* Generic structure for all addresses */ | ||
| 147 | struct { | 131 | struct { |
| 132 | u64 blk : NVM_BLK_BITS; | ||
| 133 | u64 pg : NVM_PG_BITS; | ||
| 148 | u64 sec : NVM_SEC_BITS; | 134 | u64 sec : NVM_SEC_BITS; |
| 149 | u64 pl : NVM_PL_BITS; | 135 | u64 pl : NVM_PL_BITS; |
| 150 | u64 pg : NVM_PG_BITS; | ||
| 151 | u64 blk : NVM_BLK_BITS; | ||
| 152 | u64 lun : NVM_LUN_BITS; | 136 | u64 lun : NVM_LUN_BITS; |
| 153 | u64 ch : NVM_CH_BITS; | 137 | u64 ch : NVM_CH_BITS; |
| 154 | } g; | 138 | } g; |
| 155 | 139 | ||
| 156 | u64 ppa; | 140 | u64 ppa; |
| 157 | }; | 141 | }; |
| 158 | } __packed; | 142 | }; |
| 159 | 143 | ||
| 160 | struct nvm_rq { | 144 | struct nvm_rq { |
| 161 | struct nvm_tgt_instance *ins; | 145 | struct nvm_tgt_instance *ins; |
| @@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |||
| 191 | struct nvm_block; | 175 | struct nvm_block; |
| 192 | 176 | ||
| 193 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | 177 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); |
| 194 | typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); | 178 | typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); |
| 195 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | 179 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); |
| 196 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | 180 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, |
| 197 | nvm_l2p_update_fn *, void *); | 181 | nvm_l2p_update_fn *, void *); |
| 198 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, | 182 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, |
| 199 | nvm_bb_update_fn *, void *); | 183 | nvm_bb_update_fn *, void *); |
| 200 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | 184 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); |
| 201 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | 185 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); |
| @@ -210,7 +194,7 @@ struct nvm_dev_ops { | |||
| 210 | nvm_id_fn *identity; | 194 | nvm_id_fn *identity; |
| 211 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | 195 | nvm_get_l2p_tbl_fn *get_l2p_tbl; |
| 212 | nvm_op_bb_tbl_fn *get_bb_tbl; | 196 | nvm_op_bb_tbl_fn *get_bb_tbl; |
| 213 | nvm_op_set_bb_fn *set_bb; | 197 | nvm_op_set_bb_fn *set_bb_tbl; |
| 214 | 198 | ||
| 215 | nvm_submit_io_fn *submit_io; | 199 | nvm_submit_io_fn *submit_io; |
| 216 | nvm_erase_blk_fn *erase_block; | 200 | nvm_erase_blk_fn *erase_block; |
| @@ -220,7 +204,7 @@ struct nvm_dev_ops { | |||
| 220 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | 204 | nvm_dev_dma_alloc_fn *dev_dma_alloc; |
| 221 | nvm_dev_dma_free_fn *dev_dma_free; | 205 | nvm_dev_dma_free_fn *dev_dma_free; |
| 222 | 206 | ||
| 223 | uint8_t max_phys_sect; | 207 | unsigned int max_phys_sect; |
| 224 | }; | 208 | }; |
| 225 | 209 | ||
| 226 | struct nvm_lun { | 210 | struct nvm_lun { |
| @@ -229,7 +213,9 @@ struct nvm_lun { | |||
| 229 | int lun_id; | 213 | int lun_id; |
| 230 | int chnl_id; | 214 | int chnl_id; |
| 231 | 215 | ||
| 216 | unsigned int nr_inuse_blocks; /* Number of used blocks */ | ||
| 232 | unsigned int nr_free_blocks; /* Number of unused blocks */ | 217 | unsigned int nr_free_blocks; /* Number of unused blocks */ |
| 218 | unsigned int nr_bad_blocks; /* Number of bad blocks */ | ||
| 233 | struct nvm_block *blocks; | 219 | struct nvm_block *blocks; |
| 234 | 220 | ||
| 235 | spinlock_t lock; | 221 | spinlock_t lock; |
| @@ -263,8 +249,7 @@ struct nvm_dev { | |||
| 263 | int blks_per_lun; | 249 | int blks_per_lun; |
| 264 | int sec_size; | 250 | int sec_size; |
| 265 | int oob_size; | 251 | int oob_size; |
| 266 | int addr_mode; | 252 | struct nvm_addr_format ppaf; |
| 267 | struct nvm_addr_format addr_format; | ||
| 268 | 253 | ||
| 269 | /* Calculated/Cached values. These do not reflect the actual usable | 254 | /* Calculated/Cached values. These do not reflect the actual usable |
| 270 | * blocks at run-time. | 255 | * blocks at run-time. |
| @@ -290,118 +275,45 @@ struct nvm_dev { | |||
| 290 | char name[DISK_NAME_LEN]; | 275 | char name[DISK_NAME_LEN]; |
| 291 | }; | 276 | }; |
| 292 | 277 | ||
| 293 | /* fallback conversion */ | 278 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
| 294 | static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, | 279 | struct ppa_addr r) |
| 295 | struct ppa_addr r) | ||
| 296 | { | ||
| 297 | struct ppa_addr l; | ||
| 298 | |||
| 299 | l.ppa = r.g.sec + | ||
| 300 | r.g.pg * dev->sec_per_pg + | ||
| 301 | r.g.blk * (dev->pgs_per_blk * | ||
| 302 | dev->sec_per_pg) + | ||
| 303 | r.g.lun * (dev->blks_per_lun * | ||
| 304 | dev->pgs_per_blk * | ||
| 305 | dev->sec_per_pg) + | ||
| 306 | r.g.ch * (dev->blks_per_lun * | ||
| 307 | dev->pgs_per_blk * | ||
| 308 | dev->luns_per_chnl * | ||
| 309 | dev->sec_per_pg); | ||
| 310 | |||
| 311 | return l; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* fallback conversion */ | ||
| 315 | static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, | ||
| 316 | struct ppa_addr r) | ||
| 317 | { | 280 | { |
| 318 | struct ppa_addr l; | 281 | struct ppa_addr l; |
| 319 | int secs, pgs, blks, luns; | ||
| 320 | sector_t ppa = r.ppa; | ||
| 321 | 282 | ||
| 322 | l.ppa = 0; | 283 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; |
| 323 | 284 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; | |
| 324 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | 285 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; |
| 325 | l.g.sec = secs; | 286 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; |
| 326 | 287 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; | |
| 327 | sector_div(ppa, dev->sec_per_pg); | 288 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; |
| 328 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
| 329 | l.g.pg = pgs; | ||
| 330 | |||
| 331 | sector_div(ppa, dev->pgs_per_blk); | ||
| 332 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
| 333 | l.g.blk = blks; | ||
| 334 | |||
| 335 | sector_div(ppa, dev->blks_per_lun); | ||
| 336 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
| 337 | l.g.lun = luns; | ||
| 338 | |||
| 339 | sector_div(ppa, dev->luns_per_chnl); | ||
| 340 | l.g.ch = ppa; | ||
| 341 | 289 | ||
| 342 | return l; | 290 | return l; |
| 343 | } | 291 | } |
| 344 | 292 | ||
| 345 | static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) | 293 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
| 294 | struct ppa_addr r) | ||
| 346 | { | 295 | { |
| 347 | struct ppa_addr l; | 296 | struct ppa_addr l; |
| 348 | 297 | ||
| 349 | l.ppa = 0; | 298 | /* |
| 350 | 299 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. | |
| 351 | l.chnl.sec = r.g.sec; | 300 | */ |
| 352 | l.chnl.pl = r.g.pl; | 301 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & |
| 353 | l.chnl.pg = r.g.pg; | 302 | (((1 << dev->ppaf.blk_len) - 1)); |
| 354 | l.chnl.blk = r.g.blk; | 303 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & |
| 355 | l.chnl.lun = r.g.lun; | 304 | (((1 << dev->ppaf.pg_len) - 1)); |
| 356 | l.chnl.ch = r.g.ch; | 305 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & |
| 357 | 306 | (((1 << dev->ppaf.sect_len) - 1)); | |
| 358 | return l; | 307 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & |
| 359 | } | 308 | (((1 << dev->ppaf.pln_len) - 1)); |
| 360 | 309 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | |
| 361 | static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) | 310 | (((1 << dev->ppaf.lun_len) - 1)); |
| 362 | { | 311 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & |
| 363 | struct ppa_addr l; | 312 | (((1 << dev->ppaf.ch_len) - 1)); |
| 364 | |||
| 365 | l.ppa = 0; | ||
| 366 | |||
| 367 | l.g.sec = r.chnl.sec; | ||
| 368 | l.g.pl = r.chnl.pl; | ||
| 369 | l.g.pg = r.chnl.pg; | ||
| 370 | l.g.blk = r.chnl.blk; | ||
| 371 | l.g.lun = r.chnl.lun; | ||
| 372 | l.g.ch = r.chnl.ch; | ||
| 373 | 313 | ||
| 374 | return l; | 314 | return l; |
| 375 | } | 315 | } |
| 376 | 316 | ||
| 377 | static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, | ||
| 378 | struct ppa_addr gppa) | ||
| 379 | { | ||
| 380 | switch (dev->addr_mode) { | ||
| 381 | case NVM_ADDRMODE_LINEAR: | ||
| 382 | return __linear_to_generic_addr(dev, gppa); | ||
| 383 | case NVM_ADDRMODE_CHANNEL: | ||
| 384 | return __chnl_to_generic_addr(gppa); | ||
| 385 | default: | ||
| 386 | BUG(); | ||
| 387 | } | ||
| 388 | return gppa; | ||
| 389 | } | ||
| 390 | |||
| 391 | static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, | ||
| 392 | struct ppa_addr gppa) | ||
| 393 | { | ||
| 394 | switch (dev->addr_mode) { | ||
| 395 | case NVM_ADDRMODE_LINEAR: | ||
| 396 | return __generic_to_linear_addr(dev, gppa); | ||
| 397 | case NVM_ADDRMODE_CHANNEL: | ||
| 398 | return __generic_to_chnl_addr(gppa); | ||
| 399 | default: | ||
| 400 | BUG(); | ||
| 401 | } | ||
| 402 | return gppa; | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline int ppa_empty(struct ppa_addr ppa_addr) | 317 | static inline int ppa_empty(struct ppa_addr ppa_addr) |
| 406 | { | 318 | { |
| 407 | return (ppa_addr.ppa == ADDR_EMPTY); | 319 | return (ppa_addr.ppa == ADDR_EMPTY); |
| @@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); | |||
| 468 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | 380 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, |
| 469 | unsigned long); | 381 | unsigned long); |
| 470 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | 382 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); |
| 471 | typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); | 383 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); |
| 472 | 384 | ||
| 473 | struct nvmm_type { | 385 | struct nvmm_type { |
| 474 | const char *name; | 386 | const char *name; |
| @@ -492,7 +404,7 @@ struct nvmm_type { | |||
| 492 | nvmm_get_lun_fn *get_lun; | 404 | nvmm_get_lun_fn *get_lun; |
| 493 | 405 | ||
| 494 | /* Statistics */ | 406 | /* Statistics */ |
| 495 | nvmm_free_blocks_print_fn *free_blocks_print; | 407 | nvmm_lun_info_print_fn *lun_info_print; |
| 496 | struct list_head list; | 408 | struct list_head list; |
| 497 | }; | 409 | }; |
| 498 | 410 | ||
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index e6982ac3200d..a57f0dfb6db7 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
| 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
| 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
| 19 | #define MARVELL_PHY_ID_88E1540 0x01410eb0 | ||
| 19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | 20 | #define MARVELL_PHY_ID_88E3016 0x01410e60 |
| 20 | 21 | ||
| 21 | /* struct phy_device dev_flags definitions */ | 22 | /* struct phy_device dev_flags definitions */ |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dd2097455a2e..1565324eb620 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | |||
| 453 | u8 lro_cap[0x1]; | 453 | u8 lro_cap[0x1]; |
| 454 | u8 lro_psh_flag[0x1]; | 454 | u8 lro_psh_flag[0x1]; |
| 455 | u8 lro_time_stamp[0x1]; | 455 | u8 lro_time_stamp[0x1]; |
| 456 | u8 reserved_0[0x6]; | 456 | u8 reserved_0[0x3]; |
| 457 | u8 self_lb_en_modifiable[0x1]; | ||
| 458 | u8 reserved_1[0x2]; | ||
| 457 | u8 max_lso_cap[0x5]; | 459 | u8 max_lso_cap[0x5]; |
| 458 | u8 reserved_1[0x4]; | 460 | u8 reserved_2[0x4]; |
| 459 | u8 rss_ind_tbl_cap[0x4]; | 461 | u8 rss_ind_tbl_cap[0x4]; |
| 460 | u8 reserved_2[0x3]; | 462 | u8 reserved_3[0x3]; |
| 461 | u8 tunnel_lso_const_out_ip_id[0x1]; | 463 | u8 tunnel_lso_const_out_ip_id[0x1]; |
| 462 | u8 reserved_3[0x2]; | 464 | u8 reserved_4[0x2]; |
| 463 | u8 tunnel_statless_gre[0x1]; | 465 | u8 tunnel_statless_gre[0x1]; |
| 464 | u8 tunnel_stateless_vxlan[0x1]; | 466 | u8 tunnel_stateless_vxlan[0x1]; |
| 465 | 467 | ||
| 466 | u8 reserved_4[0x20]; | 468 | u8 reserved_5[0x20]; |
| 467 | 469 | ||
| 468 | u8 reserved_5[0x10]; | 470 | u8 reserved_6[0x10]; |
| 469 | u8 lro_min_mss_size[0x10]; | 471 | u8 lro_min_mss_size[0x10]; |
| 470 | 472 | ||
| 471 | u8 reserved_6[0x120]; | 473 | u8 reserved_7[0x120]; |
| 472 | 474 | ||
| 473 | u8 lro_timer_supported_periods[4][0x20]; | 475 | u8 lro_timer_supported_periods[4][0x20]; |
| 474 | 476 | ||
| 475 | u8 reserved_7[0x600]; | 477 | u8 reserved_8[0x600]; |
| 476 | }; | 478 | }; |
| 477 | 479 | ||
| 478 | struct mlx5_ifc_roce_cap_bits { | 480 | struct mlx5_ifc_roce_cap_bits { |
| @@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits { | |||
| 4051 | }; | 4053 | }; |
| 4052 | 4054 | ||
| 4053 | struct mlx5_ifc_modify_tir_bitmask_bits { | 4055 | struct mlx5_ifc_modify_tir_bitmask_bits { |
| 4054 | u8 reserved[0x20]; | 4056 | u8 reserved_0[0x20]; |
| 4055 | 4057 | ||
| 4056 | u8 reserved1[0x1f]; | 4058 | u8 reserved_1[0x1b]; |
| 4059 | u8 self_lb_en[0x1]; | ||
| 4060 | u8 reserved_2[0x3]; | ||
| 4057 | u8 lro[0x1]; | 4061 | u8 lro[0x1]; |
| 4058 | }; | 4062 | }; |
| 4059 | 4063 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d20891465247..67bfac1abfc1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -2068,20 +2068,23 @@ struct pcpu_sw_netstats { | |||
| 2068 | struct u64_stats_sync syncp; | 2068 | struct u64_stats_sync syncp; |
| 2069 | }; | 2069 | }; |
| 2070 | 2070 | ||
| 2071 | #define netdev_alloc_pcpu_stats(type) \ | 2071 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
| 2072 | ({ \ | 2072 | ({ \ |
| 2073 | typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ | 2073 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ |
| 2074 | if (pcpu_stats) { \ | 2074 | if (pcpu_stats) { \ |
| 2075 | int __cpu; \ | 2075 | int __cpu; \ |
| 2076 | for_each_possible_cpu(__cpu) { \ | 2076 | for_each_possible_cpu(__cpu) { \ |
| 2077 | typeof(type) *stat; \ | 2077 | typeof(type) *stat; \ |
| 2078 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | 2078 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
| 2079 | u64_stats_init(&stat->syncp); \ | 2079 | u64_stats_init(&stat->syncp); \ |
| 2080 | } \ | 2080 | } \ |
| 2081 | } \ | 2081 | } \ |
| 2082 | pcpu_stats; \ | 2082 | pcpu_stats; \ |
| 2083 | }) | 2083 | }) |
| 2084 | 2084 | ||
| 2085 | #define netdev_alloc_pcpu_stats(type) \ | ||
| 2086 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL); | ||
| 2087 | |||
| 2085 | #include <linux/notifier.h> | 2088 | #include <linux/notifier.h> |
| 2086 | 2089 | ||
| 2087 | /* netdevice notifier chain. Please remember to update the rtnetlink | 2090 | /* netdevice notifier chain. Please remember to update the rtnetlink |
| @@ -3854,6 +3857,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev) | |||
| 3854 | return dev->priv_flags & IFF_EBRIDGE; | 3857 | return dev->priv_flags & IFF_EBRIDGE; |
| 3855 | } | 3858 | } |
| 3856 | 3859 | ||
| 3860 | static inline bool netif_is_bridge_port(const struct net_device *dev) | ||
| 3861 | { | ||
| 3862 | return dev->priv_flags & IFF_BRIDGE_PORT; | ||
| 3863 | } | ||
| 3864 | |||
| 3857 | static inline bool netif_is_ovs_master(const struct net_device *dev) | 3865 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
| 3858 | { | 3866 | { |
| 3859 | return dev->priv_flags & IFF_OPENVSWITCH; | 3867 | return dev->priv_flags & IFF_OPENVSWITCH; |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 48bb01edcf30..0e1f433cc4b7 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
| @@ -421,7 +421,7 @@ extern void ip_set_free(void *members); | |||
| 421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); | 421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); |
| 422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); | 422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); |
| 423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], | 423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], |
| 424 | size_t len); | 424 | size_t len, size_t align); |
| 425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], | 425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], |
| 426 | struct ip_set_ext *ext); | 426 | struct ip_set_ext *ext); |
| 427 | 427 | ||
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 187feabe557c..5fcd375ef175 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h | |||
| @@ -5,10 +5,13 @@ | |||
| 5 | #include <linux/netdevice.h> | 5 | #include <linux/netdevice.h> |
| 6 | 6 | ||
| 7 | #ifdef CONFIG_NETFILTER_INGRESS | 7 | #ifdef CONFIG_NETFILTER_INGRESS |
| 8 | static inline int nf_hook_ingress_active(struct sk_buff *skb) | 8 | static inline bool nf_hook_ingress_active(const struct sk_buff *skb) |
| 9 | { | 9 | { |
| 10 | return nf_hook_list_active(&skb->dev->nf_hooks_ingress, | 10 | #ifdef HAVE_JUMP_LABEL |
| 11 | NFPROTO_NETDEV, NF_NETDEV_INGRESS); | 11 | if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) |
| 12 | return false; | ||
| 13 | #endif | ||
| 14 | return !list_empty(&skb->dev->nf_hooks_ingress); | ||
| 12 | } | 15 | } |
| 13 | 16 | ||
| 14 | static inline int nf_hook_ingress(struct sk_buff *skb) | 17 | static inline int nf_hook_ingress(struct sk_buff *skb) |
| @@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb) | |||
| 16 | struct nf_hook_state state; | 19 | struct nf_hook_state state; |
| 17 | 20 | ||
| 18 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, | 21 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, |
| 19 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, | 22 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, |
| 20 | skb->dev, NULL, dev_net(skb->dev), NULL); | 23 | skb->dev, NULL, NULL, dev_net(skb->dev), NULL); |
| 21 | return nf_hook_slow(skb, &state); | 24 | return nf_hook_slow(skb, &state); |
| 22 | } | 25 | } |
| 23 | 26 | ||
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 570d630f98ae..11bbae44f4cb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -251,6 +251,7 @@ struct nfs4_layoutget { | |||
| 251 | struct nfs4_layoutget_res res; | 251 | struct nfs4_layoutget_res res; |
| 252 | struct rpc_cred *cred; | 252 | struct rpc_cred *cred; |
| 253 | gfp_t gfp_flags; | 253 | gfp_t gfp_flags; |
| 254 | long timeout; | ||
| 254 | }; | 255 | }; |
| 255 | 256 | ||
| 256 | struct nfs4_getdeviceinfo_args { | 257 | struct nfs4_getdeviceinfo_args { |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 36112cdd665a..b90d8ec57c1f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
| @@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np, | |||
| 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
| 81 | const char *name) | 81 | const char *name) |
| 82 | { | 82 | { |
| 83 | return NULL; | 83 | return ERR_PTR(-ENODEV); |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
diff --git a/include/linux/pci.h b/include/linux/pci.h index e828e7b4afec..6ae25aae88fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -412,9 +412,18 @@ struct pci_host_bridge { | |||
| 412 | void (*release_fn)(struct pci_host_bridge *); | 412 | void (*release_fn)(struct pci_host_bridge *); |
| 413 | void *release_data; | 413 | void *release_data; |
| 414 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ | 414 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ |
| 415 | /* Resource alignment requirements */ | ||
| 416 | resource_size_t (*align_resource)(struct pci_dev *dev, | ||
| 417 | const struct resource *res, | ||
| 418 | resource_size_t start, | ||
| 419 | resource_size_t size, | ||
| 420 | resource_size_t align); | ||
| 415 | }; | 421 | }; |
| 416 | 422 | ||
| 417 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) | 423 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) |
| 424 | |||
| 425 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); | ||
| 426 | |||
| 418 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | 427 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, |
| 419 | void (*release_fn)(struct pci_host_bridge *), | 428 | void (*release_fn)(struct pci_host_bridge *), |
| 420 | void *release_data); | 429 | void *release_data); |
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index 80af3cd35ae4..72ce932c69b2 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h | |||
| @@ -71,7 +71,7 @@ struct scpi_ops { | |||
| 71 | int (*sensor_get_value)(u16, u32 *); | 71 | int (*sensor_get_value)(u16, u32 *); |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | #if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) | 74 | #if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) |
| 75 | struct scpi_ops *get_scpi_ops(void); | 75 | struct scpi_ops *get_scpi_ops(void); |
| 76 | #else | 76 | #else |
| 77 | static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } | 77 | static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } |
diff --git a/include/linux/signal.h b/include/linux/signal.h index ab1e0392b5ac..92557bbce7e7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); | |||
| 239 | extern void set_current_blocked(sigset_t *); | 239 | extern void set_current_blocked(sigset_t *); |
| 240 | extern void __set_current_blocked(const sigset_t *); | 240 | extern void __set_current_blocked(const sigset_t *); |
| 241 | extern int show_unhandled_signals; | 241 | extern int show_unhandled_signals; |
| 242 | extern int sigsuspend(sigset_t *); | ||
| 243 | 242 | ||
| 244 | struct sigaction { | 243 | struct sigaction { |
| 245 | #ifndef __ARCH_HAS_IRIX_SIGACTION | 244 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 7c82e3b307a3..2037a861e367 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -158,6 +158,24 @@ size_t ksize(const void *); | |||
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| 161 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 162 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 163 | * aligned buffers. | ||
| 164 | */ | ||
| 165 | #ifndef ARCH_SLAB_MINALIGN | ||
| 166 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 167 | #endif | ||
| 168 | |||
| 169 | /* | ||
| 170 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||
| 171 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||
| 172 | * aligned pointers. | ||
| 173 | */ | ||
| 174 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) | ||
| 175 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) | ||
| 176 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) | ||
| 177 | |||
| 178 | /* | ||
| 161 | * Kmalloc array related definitions | 179 | * Kmalloc array related definitions |
| 162 | */ | 180 | */ |
| 163 | 181 | ||
| @@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
| 286 | } | 304 | } |
| 287 | #endif /* !CONFIG_SLOB */ | 305 | #endif /* !CONFIG_SLOB */ |
| 288 | 306 | ||
| 289 | void *__kmalloc(size_t size, gfp_t flags); | 307 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; |
| 290 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | 308 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; |
| 291 | void kmem_cache_free(struct kmem_cache *, void *); | 309 | void kmem_cache_free(struct kmem_cache *, void *); |
| 292 | 310 | ||
| 293 | /* | 311 | /* |
| @@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *); | |||
| 298 | * Note that interrupts must be enabled when calling these functions. | 316 | * Note that interrupts must be enabled when calling these functions. |
| 299 | */ | 317 | */ |
| 300 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 318 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
| 301 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 319 | int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
| 302 | 320 | ||
| 303 | #ifdef CONFIG_NUMA | 321 | #ifdef CONFIG_NUMA |
| 304 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 322 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; |
| 305 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 323 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; |
| 306 | #else | 324 | #else |
| 307 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 325 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 308 | { | 326 | { |
| @@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
| 316 | #endif | 334 | #endif |
| 317 | 335 | ||
| 318 | #ifdef CONFIG_TRACING | 336 | #ifdef CONFIG_TRACING |
| 319 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | 337 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; |
| 320 | 338 | ||
| 321 | #ifdef CONFIG_NUMA | 339 | #ifdef CONFIG_NUMA |
| 322 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 340 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 323 | gfp_t gfpflags, | 341 | gfp_t gfpflags, |
| 324 | int node, size_t size); | 342 | int node, size_t size) __assume_slab_alignment; |
| 325 | #else | 343 | #else |
| 326 | static __always_inline void * | 344 | static __always_inline void * |
| 327 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| @@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
| 354 | } | 372 | } |
| 355 | #endif /* CONFIG_TRACING */ | 373 | #endif /* CONFIG_TRACING */ |
| 356 | 374 | ||
| 357 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | 375 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 358 | 376 | ||
| 359 | #ifdef CONFIG_TRACING | 377 | #ifdef CONFIG_TRACING |
| 360 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | 378 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 361 | #else | 379 | #else |
| 362 | static __always_inline void * | 380 | static __always_inline void * |
| 363 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 381 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| @@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 482 | return __kmalloc_node(size, flags, node); | 500 | return __kmalloc_node(size, flags, node); |
| 483 | } | 501 | } |
| 484 | 502 | ||
| 485 | /* | ||
| 486 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 487 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 488 | * aligned buffers. | ||
| 489 | */ | ||
| 490 | #ifndef ARCH_SLAB_MINALIGN | ||
| 491 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 492 | #endif | ||
| 493 | |||
| 494 | struct memcg_cache_array { | 503 | struct memcg_cache_array { |
| 495 | struct rcu_head rcu; | 504 | struct rcu_head rcu; |
| 496 | struct kmem_cache *entries[0]; | 505 | struct kmem_cache *entries[0]; |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a156b82dd14c..c2b66a277e98 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, | |||
| 524 | asmlinkage long sys_lchown(const char __user *filename, | 524 | asmlinkage long sys_lchown(const char __user *filename, |
| 525 | uid_t user, gid_t group); | 525 | uid_t user, gid_t group); |
| 526 | asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); | 526 | asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); |
| 527 | #ifdef CONFIG_UID16 | 527 | #ifdef CONFIG_HAVE_UID16 |
| 528 | asmlinkage long sys_chown16(const char __user *filename, | 528 | asmlinkage long sys_chown16(const char __user *filename, |
| 529 | old_uid_t user, old_gid_t group); | 529 | old_uid_t user, old_gid_t group); |
| 530 | asmlinkage long sys_lchown16(const char __user *filename, | 530 | asmlinkage long sys_lchown16(const char __user *filename, |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 4014a59828fc..613c29bd6baf 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister( | |||
| 438 | static inline int thermal_zone_bind_cooling_device( | 438 | static inline int thermal_zone_bind_cooling_device( |
| 439 | struct thermal_zone_device *tz, int trip, | 439 | struct thermal_zone_device *tz, int trip, |
| 440 | struct thermal_cooling_device *cdev, | 440 | struct thermal_cooling_device *cdev, |
| 441 | unsigned long upper, unsigned long lower) | 441 | unsigned long upper, unsigned long lower, |
| 442 | unsigned int weight) | ||
| 442 | { return -ENODEV; } | 443 | { return -ENODEV; } |
| 443 | static inline int thermal_zone_unbind_cooling_device( | 444 | static inline int thermal_zone_unbind_cooling_device( |
| 444 | struct thermal_zone_device *tz, int trip, | 445 | struct thermal_zone_device *tz, int trip, |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 5b04b0a5375b..5e31f1b99037 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); | |||
| 607 | 607 | ||
| 608 | /* tty_audit.c */ | 608 | /* tty_audit.c */ |
| 609 | #ifdef CONFIG_AUDIT | 609 | #ifdef CONFIG_AUDIT |
| 610 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 610 | extern void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 611 | size_t size, unsigned icanon); | 611 | size_t size, unsigned icanon); |
| 612 | extern void tty_audit_exit(void); | 612 | extern void tty_audit_exit(void); |
| 613 | extern void tty_audit_fork(struct signal_struct *sig); | 613 | extern void tty_audit_fork(struct signal_struct *sig); |
| @@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); | |||
| 615 | extern void tty_audit_push(struct tty_struct *tty); | 615 | extern void tty_audit_push(struct tty_struct *tty); |
| 616 | extern int tty_audit_push_current(void); | 616 | extern int tty_audit_push_current(void); |
| 617 | #else | 617 | #else |
| 618 | static inline void tty_audit_add_data(struct tty_struct *tty, | 618 | static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 619 | unsigned char *data, size_t size, unsigned icanon) | 619 | size_t size, unsigned icanon) |
| 620 | { | 620 | { |
| 621 | } | 621 | } |
| 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) | 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) |
diff --git a/include/linux/types.h b/include/linux/types.h index 70d8500bddf1..70dd3dfde631 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; | |||
| 35 | 35 | ||
| 36 | typedef unsigned long uintptr_t; | 36 | typedef unsigned long uintptr_t; |
| 37 | 37 | ||
| 38 | #ifdef CONFIG_UID16 | 38 | #ifdef CONFIG_HAVE_UID16 |
| 39 | /* This is defined by include/asm-{arch}/posix_types.h */ | 39 | /* This is defined by include/asm-{arch}/posix_types.h */ |
| 40 | typedef __kernel_old_uid_t old_uid_t; | 40 | typedef __kernel_old_uid_t old_uid_t; |
| 41 | typedef __kernel_old_gid_t old_gid_t; | 41 | typedef __kernel_old_gid_t old_gid_t; |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index aaf9700fc9e5..fb961a576abe 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
| @@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) | |||
| 167 | 167 | ||
| 168 | static inline u32 rt6_get_cookie(const struct rt6_info *rt) | 168 | static inline u32 rt6_get_cookie(const struct rt6_info *rt) |
| 169 | { | 169 | { |
| 170 | if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) | 170 | if (rt->rt6i_flags & RTF_PCPU || |
| 171 | (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) | ||
| 171 | rt = (struct rt6_info *)(rt->dst.from); | 172 | rt = (struct rt6_info *)(rt->dst.from); |
| 172 | 173 | ||
| 173 | return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; | 174 | return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index aaee6fa02cf1..ff788b665277 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
| @@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
| 90 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); | 90 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
| 91 | 91 | ||
| 92 | if (net_xmit_eval(err) == 0) { | 92 | if (net_xmit_eval(err) == 0) { |
| 93 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 93 | struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); |
| 94 | u64_stats_update_begin(&tstats->syncp); | 94 | u64_stats_update_begin(&tstats->syncp); |
| 95 | tstats->tx_bytes += pkt_len; | 95 | tstats->tx_bytes += pkt_len; |
| 96 | tstats->tx_packets++; | 96 | tstats->tx_packets++; |
| 97 | u64_stats_update_end(&tstats->syncp); | 97 | u64_stats_update_end(&tstats->syncp); |
| 98 | put_cpu_ptr(tstats); | ||
| 98 | } else { | 99 | } else { |
| 99 | stats->tx_errors++; | 100 | stats->tx_errors++; |
| 100 | stats->tx_aborted_errors++; | 101 | stats->tx_aborted_errors++; |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index f6dafec9102c..62a750a6a8f8 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
| @@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err, | |||
| 287 | struct pcpu_sw_netstats __percpu *stats) | 287 | struct pcpu_sw_netstats __percpu *stats) |
| 288 | { | 288 | { |
| 289 | if (err > 0) { | 289 | if (err > 0) { |
| 290 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); | 290 | struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats); |
| 291 | 291 | ||
| 292 | u64_stats_update_begin(&tstats->syncp); | 292 | u64_stats_update_begin(&tstats->syncp); |
| 293 | tstats->tx_bytes += err; | 293 | tstats->tx_bytes += err; |
| 294 | tstats->tx_packets++; | 294 | tstats->tx_packets++; |
| 295 | u64_stats_update_end(&tstats->syncp); | 295 | u64_stats_update_end(&tstats->syncp); |
| 296 | put_cpu_ptr(tstats); | ||
| 296 | } else if (err < 0) { | 297 | } else if (err < 0) { |
| 297 | err_stats->tx_errors++; | 298 | err_stats->tx_errors++; |
| 298 | err_stats->tx_aborted_errors++; | 299 | err_stats->tx_aborted_errors++; |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c9149cc0a02d..4bd7508bedc9 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
| @@ -618,6 +618,8 @@ struct nft_expr_ops { | |||
| 618 | void (*eval)(const struct nft_expr *expr, | 618 | void (*eval)(const struct nft_expr *expr, |
| 619 | struct nft_regs *regs, | 619 | struct nft_regs *regs, |
| 620 | const struct nft_pktinfo *pkt); | 620 | const struct nft_pktinfo *pkt); |
| 621 | int (*clone)(struct nft_expr *dst, | ||
| 622 | const struct nft_expr *src); | ||
| 621 | unsigned int size; | 623 | unsigned int size; |
| 622 | 624 | ||
| 623 | int (*init)(const struct nft_ctx *ctx, | 625 | int (*init)(const struct nft_ctx *ctx, |
| @@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr); | |||
| 660 | int nft_expr_dump(struct sk_buff *skb, unsigned int attr, | 662 | int nft_expr_dump(struct sk_buff *skb, unsigned int attr, |
| 661 | const struct nft_expr *expr); | 663 | const struct nft_expr *expr); |
| 662 | 664 | ||
| 663 | static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | 665 | static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) |
| 664 | { | 666 | { |
| 667 | int err; | ||
| 668 | |||
| 665 | __module_get(src->ops->type->owner); | 669 | __module_get(src->ops->type->owner); |
| 666 | memcpy(dst, src, src->ops->size); | 670 | if (src->ops->clone) { |
| 671 | dst->ops = src->ops; | ||
| 672 | err = src->ops->clone(dst, src); | ||
| 673 | if (err < 0) | ||
| 674 | return err; | ||
| 675 | } else { | ||
| 676 | memcpy(dst, src, src->ops->size); | ||
| 677 | } | ||
| 678 | return 0; | ||
| 667 | } | 679 | } |
| 668 | 680 | ||
| 669 | /** | 681 | /** |
diff --git a/include/net/sock.h b/include/net/sock.h index bbf7c2cf15b4..7f89e4ba18d1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -2226,6 +2226,31 @@ static inline bool sk_listener(const struct sock *sk) | |||
| 2226 | return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); | 2226 | return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); |
| 2227 | } | 2227 | } |
| 2228 | 2228 | ||
| 2229 | /** | ||
| 2230 | * sk_state_load - read sk->sk_state for lockless contexts | ||
| 2231 | * @sk: socket pointer | ||
| 2232 | * | ||
| 2233 | * Paired with sk_state_store(). Used in places we do not hold socket lock : | ||
| 2234 | * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... | ||
| 2235 | */ | ||
| 2236 | static inline int sk_state_load(const struct sock *sk) | ||
| 2237 | { | ||
| 2238 | return smp_load_acquire(&sk->sk_state); | ||
| 2239 | } | ||
| 2240 | |||
| 2241 | /** | ||
| 2242 | * sk_state_store - update sk->sk_state | ||
| 2243 | * @sk: socket pointer | ||
| 2244 | * @newstate: new state | ||
| 2245 | * | ||
| 2246 | * Paired with sk_state_load(). Should be used in contexts where | ||
| 2247 | * state change might impact lockless readers. | ||
| 2248 | */ | ||
| 2249 | static inline void sk_state_store(struct sock *sk, int newstate) | ||
| 2250 | { | ||
| 2251 | smp_store_release(&sk->sk_state, newstate); | ||
| 2252 | } | ||
| 2253 | |||
| 2229 | void sock_enable_timestamp(struct sock *sk, int flag); | 2254 | void sock_enable_timestamp(struct sock *sk, int flag); |
| 2230 | int sock_get_timestamp(struct sock *, struct timeval __user *); | 2255 | int sock_get_timestamp(struct sock *, struct timeval __user *); |
| 2231 | int sock_get_timestampns(struct sock *, struct timespec __user *); | 2256 | int sock_get_timestampns(struct sock *, struct timespec __user *); |
diff --git a/include/net/switchdev.h b/include/net/switchdev.h index bc865e244efe..1d22ce9f352e 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h | |||
| @@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb, | |||
| 323 | struct net_device *filter_dev, | 323 | struct net_device *filter_dev, |
| 324 | int idx) | 324 | int idx) |
| 325 | { | 325 | { |
| 326 | return -EOPNOTSUPP; | 326 | return idx; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static inline void switchdev_port_fwd_mark_set(struct net_device *dev, | 329 | static inline void switchdev_port_fwd_mark_set(struct net_device *dev, |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0a2c74008e53..aabf0aca0171 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -474,7 +474,7 @@ struct se_cmd { | |||
| 474 | struct completion cmd_wait_comp; | 474 | struct completion cmd_wait_comp; |
| 475 | const struct target_core_fabric_ops *se_tfo; | 475 | const struct target_core_fabric_ops *se_tfo; |
| 476 | sense_reason_t (*execute_cmd)(struct se_cmd *); | 476 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
| 477 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); | 477 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); |
| 478 | void *protocol_data; | 478 | void *protocol_data; |
| 479 | 479 | ||
| 480 | unsigned char *t_task_cdb; | 480 | unsigned char *t_task_cdb; |
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 654bae3f1a38..5e6296160361 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h | |||
| @@ -33,17 +33,6 @@ | |||
| 33 | 33 | ||
| 34 | #define NFS_PIPE_DIRNAME "nfs" | 34 | #define NFS_PIPE_DIRNAME "nfs" |
| 35 | 35 | ||
| 36 | /* NFS ioctls */ | ||
| 37 | /* Let's follow btrfs lead on CLONE to avoid messing userspace */ | ||
| 38 | #define NFS_IOC_CLONE _IOW(0x94, 9, int) | ||
| 39 | #define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int) | ||
| 40 | |||
| 41 | struct nfs_ioctl_clone_range_args { | ||
| 42 | __s64 src_fd; | ||
| 43 | __u64 src_off, count; | ||
| 44 | __u64 dst_off; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /* | 36 | /* |
| 48 | * NFS stats. The good thing with these values is that NFSv3 errors are | 37 | * NFS stats. The good thing with these values is that NFSv3 errors are |
| 49 | * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which | 38 | * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6e5344112419..db545cbcdb89 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod, | |||
| 294 | 294 | ||
| 295 | for (reloc = obj->relocs; reloc->name; reloc++) { | 295 | for (reloc = obj->relocs; reloc->name; reloc++) { |
| 296 | if (!klp_is_module(obj)) { | 296 | if (!klp_is_module(obj)) { |
| 297 | |||
| 298 | #if defined(CONFIG_RANDOMIZE_BASE) | ||
| 299 | /* If KASLR has been enabled, adjust old value accordingly */ | ||
| 300 | if (kaslr_enabled()) | ||
| 301 | reloc->val += kaslr_offset(); | ||
| 302 | #endif | ||
| 297 | ret = klp_verify_vmlinux_symbol(reloc->name, | 303 | ret = klp_verify_vmlinux_symbol(reloc->name, |
| 298 | reloc->val); | 304 | reloc->val); |
| 299 | if (ret) | 305 | if (ret) |
diff --git a/kernel/panic.c b/kernel/panic.c index 4579dbb7ed87..4b150bc0c6c1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -152,8 +152,11 @@ void panic(const char *fmt, ...) | |||
| 152 | * We may have ended up stopping the CPU holding the lock (in | 152 | * We may have ended up stopping the CPU holding the lock (in |
| 153 | * smp_send_stop()) while still having some valuable data in the console | 153 | * smp_send_stop()) while still having some valuable data in the console |
| 154 | * buffer. Try to acquire the lock then release it regardless of the | 154 | * buffer. Try to acquire the lock then release it regardless of the |
| 155 | * result. The release will also print the buffers out. | 155 | * result. The release will also print the buffers out. Locks debug |
| 156 | * should be disabled to avoid reporting bad unlock balance when | ||
| 157 | * panic() is not being callled from OOPS. | ||
| 156 | */ | 158 | */ |
| 159 | debug_locks_off(); | ||
| 157 | console_trylock(); | 160 | console_trylock(); |
| 158 | console_unlock(); | 161 | console_unlock(); |
| 159 | 162 | ||
diff --git a/kernel/pid.c b/kernel/pid.c index ca368793808e..78b3d9f80d44 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | |||
| 467 | rcu_read_lock(); | 467 | rcu_read_lock(); |
| 468 | if (type != PIDTYPE_PID) | 468 | if (type != PIDTYPE_PID) |
| 469 | task = task->group_leader; | 469 | task = task->group_leader; |
| 470 | pid = get_pid(task->pids[type].pid); | 470 | pid = get_pid(rcu_dereference(task->pids[type].pid)); |
| 471 | rcu_read_unlock(); | 471 | rcu_read_unlock(); |
| 472 | return pid; | 472 | return pid; |
| 473 | } | 473 | } |
| @@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | |||
| 528 | if (likely(pid_alive(task))) { | 528 | if (likely(pid_alive(task))) { |
| 529 | if (type != PIDTYPE_PID) | 529 | if (type != PIDTYPE_PID) |
| 530 | task = task->group_leader; | 530 | task = task->group_leader; |
| 531 | nr = pid_nr_ns(task->pids[type].pid, ns); | 531 | nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); |
| 532 | } | 532 | } |
| 533 | rcu_read_unlock(); | 533 | rcu_read_unlock(); |
| 534 | 534 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index c0b01fe24bbd..f3f1f7a972fd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause) | |||
| 3503 | 3503 | ||
| 3504 | #endif | 3504 | #endif |
| 3505 | 3505 | ||
| 3506 | int sigsuspend(sigset_t *set) | 3506 | static int sigsuspend(sigset_t *set) |
| 3507 | { | 3507 | { |
| 3508 | current->saved_sigmask = current->blocked; | 3508 | current->saved_sigmask = current->blocked; |
| 3509 | set_current_blocked(set); | 3509 | set_current_blocked(set); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75f1d05ea82d..9c6045a27ba3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event) | |||
| 1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; | 1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; |
| 1888 | } | 1888 | } |
| 1889 | 1889 | ||
| 1890 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | ||
| 1891 | { | ||
| 1892 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | ||
| 1893 | cpu_buffer->reader_page->read = 0; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | static void rb_inc_iter(struct ring_buffer_iter *iter) | 1890 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
| 1897 | { | 1891 | { |
| 1898 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1892 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| @@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
| 2803 | 2797 | ||
| 2804 | event = __rb_reserve_next(cpu_buffer, &info); | 2798 | event = __rb_reserve_next(cpu_buffer, &info); |
| 2805 | 2799 | ||
| 2806 | if (unlikely(PTR_ERR(event) == -EAGAIN)) | 2800 | if (unlikely(PTR_ERR(event) == -EAGAIN)) { |
| 2801 | if (info.add_timestamp) | ||
| 2802 | info.length -= RB_LEN_TIME_EXTEND; | ||
| 2807 | goto again; | 2803 | goto again; |
| 2804 | } | ||
| 2808 | 2805 | ||
| 2809 | if (!event) | 2806 | if (!event) |
| 2810 | goto out_fail; | 2807 | goto out_fail; |
| @@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3626 | 3623 | ||
| 3627 | /* Finally update the reader page to the new head */ | 3624 | /* Finally update the reader page to the new head */ |
| 3628 | cpu_buffer->reader_page = reader; | 3625 | cpu_buffer->reader_page = reader; |
| 3629 | rb_reset_reader_page(cpu_buffer); | 3626 | cpu_buffer->reader_page->read = 0; |
| 3630 | 3627 | ||
| 3631 | if (overwrite != cpu_buffer->last_overrun) { | 3628 | if (overwrite != cpu_buffer->last_overrun) { |
| 3632 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | 3629 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; |
| @@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3636 | goto again; | 3633 | goto again; |
| 3637 | 3634 | ||
| 3638 | out: | 3635 | out: |
| 3636 | /* Update the read_stamp on the first event */ | ||
| 3637 | if (reader && reader->read == 0) | ||
| 3638 | cpu_buffer->read_stamp = reader->page->time_stamp; | ||
| 3639 | |||
| 3639 | arch_spin_unlock(&cpu_buffer->lock); | 3640 | arch_spin_unlock(&cpu_buffer->lock); |
| 3640 | local_irq_restore(flags); | 3641 | local_irq_restore(flags); |
| 3641 | 3642 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c29ddebc8705..62fe06bb7d04 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
| 2009 | /* | 2009 | /* |
| 2010 | * Be somewhat over-protective like KSM for now! | 2010 | * Be somewhat over-protective like KSM for now! |
| 2011 | */ | 2011 | */ |
| 2012 | if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) | 2012 | if (*vm_flags & VM_NO_THP) |
| 2013 | return -EINVAL; | 2013 | return -EINVAL; |
| 2014 | *vm_flags &= ~VM_NOHUGEPAGE; | 2014 | *vm_flags &= ~VM_NOHUGEPAGE; |
| 2015 | *vm_flags |= VM_HUGEPAGE; | 2015 | *vm_flags |= VM_HUGEPAGE; |
| @@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
| 2025 | /* | 2025 | /* |
| 2026 | * Be somewhat over-protective like KSM for now! | 2026 | * Be somewhat over-protective like KSM for now! |
| 2027 | */ | 2027 | */ |
| 2028 | if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) | 2028 | if (*vm_flags & VM_NO_THP) |
| 2029 | return -EINVAL; | 2029 | return -EINVAL; |
| 2030 | *vm_flags &= ~VM_HUGEPAGE; | 2030 | *vm_flags &= ~VM_HUGEPAGE; |
| 2031 | *vm_flags |= VM_NOHUGEPAGE; | 2031 | *vm_flags |= VM_NOHUGEPAGE; |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d41b21bce6a0..bc0a8d8b8f42 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
| 20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/kmemleak.h> | ||
| 22 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
| 23 | #include <linux/memory.h> | 24 | #include <linux/memory.h> |
| 24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
| @@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size) | |||
| 444 | 445 | ||
| 445 | if (ret) { | 446 | if (ret) { |
| 446 | find_vm_area(addr)->flags |= VM_KASAN; | 447 | find_vm_area(addr)->flags |= VM_KASAN; |
| 448 | kmemleak_ignore(ret); | ||
| 447 | return 0; | 449 | return 0; |
| 448 | } | 450 | } |
| 449 | 451 | ||
diff --git a/mm/memory.c b/mm/memory.c index deb679c31f2a..c387430f06c3 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3015 | } else { | 3015 | } else { |
| 3016 | /* | 3016 | /* |
| 3017 | * The fault handler has no page to lock, so it holds | 3017 | * The fault handler has no page to lock, so it holds |
| 3018 | * i_mmap_lock for write to protect against truncate. | 3018 | * i_mmap_lock for read to protect against truncate. |
| 3019 | */ | 3019 | */ |
| 3020 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 3020 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
| 3021 | } | 3021 | } |
| 3022 | goto uncharge_out; | 3022 | goto uncharge_out; |
| 3023 | } | 3023 | } |
| @@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3031 | } else { | 3031 | } else { |
| 3032 | /* | 3032 | /* |
| 3033 | * The fault handler has no page to lock, so it holds | 3033 | * The fault handler has no page to lock, so it holds |
| 3034 | * i_mmap_lock for write to protect against truncate. | 3034 | * i_mmap_lock for read to protect against truncate. |
| 3035 | */ | 3035 | */ |
| 3036 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 3036 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
| 3037 | } | 3037 | } |
| 3038 | return ret; | 3038 | return ret; |
| 3039 | uncharge_out: | 3039 | uncharge_out: |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2c90357c34ea..3e4d65445fa7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
| 1542 | for (;;) { | 1542 | for (;;) { |
| 1543 | unsigned long now = jiffies; | 1543 | unsigned long now = jiffies; |
| 1544 | unsigned long dirty, thresh, bg_thresh; | 1544 | unsigned long dirty, thresh, bg_thresh; |
| 1545 | unsigned long m_dirty, m_thresh, m_bg_thresh; | 1545 | unsigned long m_dirty = 0; /* stop bogus uninit warnings */ |
| 1546 | unsigned long m_thresh = 0; | ||
| 1547 | unsigned long m_bg_thresh = 0; | ||
| 1546 | 1548 | ||
| 1547 | /* | 1549 | /* |
| 1548 | * Unstable writes are a feature of certain networked | 1550 | * Unstable writes are a feature of certain networked |
| @@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
| 3419 | } | 3419 | } |
| 3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 3421 | 3421 | ||
| 3422 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 3422 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 3423 | void **p) | 3423 | void **p) |
| 3424 | { | 3424 | { |
| 3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
| @@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
| 170 | * may be allocated or freed using these operations. | 170 | * may be allocated or freed using these operations. |
| 171 | */ | 171 | */ |
| 172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
| 173 | bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 173 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
| 174 | 174 | ||
| 175 | #ifdef CONFIG_MEMCG_KMEM | 175 | #ifdef CONFIG_MEMCG_KMEM |
| 176 | /* | 176 | /* |
diff --git a/mm/slab_common.c b/mm/slab_common.c index d88e97c10a2e..3c6a86b4ec25 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) | |||
| 112 | kmem_cache_free(s, p[i]); | 112 | kmem_cache_free(s, p[i]); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | 115 | int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, |
| 116 | void **p) | 116 | void **p) |
| 117 | { | 117 | { |
| 118 | size_t i; | 118 | size_t i; |
| @@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | |||
| 121 | void *x = p[i] = kmem_cache_alloc(s, flags); | 121 | void *x = p[i] = kmem_cache_alloc(s, flags); |
| 122 | if (!x) { | 122 | if (!x) { |
| 123 | __kmem_cache_free_bulk(s, i, p); | 123 | __kmem_cache_free_bulk(s, i, p); |
| 124 | return false; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | } | 126 | } |
| 127 | return true; | 127 | return i; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | #ifdef CONFIG_MEMCG_KMEM | 130 | #ifdef CONFIG_MEMCG_KMEM |
| @@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
| 617 | } | 617 | } |
| 618 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 618 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 619 | 619 | ||
| 620 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 620 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 621 | void **p) | 621 | void **p) |
| 622 | { | 622 | { |
| 623 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 623 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
| @@ -1065,11 +1065,15 @@ bad: | |||
| 1065 | return 0; | 1065 | return 0; |
| 1066 | } | 1066 | } |
| 1067 | 1067 | ||
| 1068 | /* Supports checking bulk free of a constructed freelist */ | ||
| 1068 | static noinline struct kmem_cache_node *free_debug_processing( | 1069 | static noinline struct kmem_cache_node *free_debug_processing( |
| 1069 | struct kmem_cache *s, struct page *page, void *object, | 1070 | struct kmem_cache *s, struct page *page, |
| 1071 | void *head, void *tail, int bulk_cnt, | ||
| 1070 | unsigned long addr, unsigned long *flags) | 1072 | unsigned long addr, unsigned long *flags) |
| 1071 | { | 1073 | { |
| 1072 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1074 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| 1075 | void *object = head; | ||
| 1076 | int cnt = 0; | ||
| 1073 | 1077 | ||
| 1074 | spin_lock_irqsave(&n->list_lock, *flags); | 1078 | spin_lock_irqsave(&n->list_lock, *flags); |
| 1075 | slab_lock(page); | 1079 | slab_lock(page); |
| @@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
| 1077 | if (!check_slab(s, page)) | 1081 | if (!check_slab(s, page)) |
| 1078 | goto fail; | 1082 | goto fail; |
| 1079 | 1083 | ||
| 1084 | next_object: | ||
| 1085 | cnt++; | ||
| 1086 | |||
| 1080 | if (!check_valid_pointer(s, page, object)) { | 1087 | if (!check_valid_pointer(s, page, object)) { |
| 1081 | slab_err(s, page, "Invalid object pointer 0x%p", object); | 1088 | slab_err(s, page, "Invalid object pointer 0x%p", object); |
| 1082 | goto fail; | 1089 | goto fail; |
| @@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
| 1107 | if (s->flags & SLAB_STORE_USER) | 1114 | if (s->flags & SLAB_STORE_USER) |
| 1108 | set_track(s, object, TRACK_FREE, addr); | 1115 | set_track(s, object, TRACK_FREE, addr); |
| 1109 | trace(s, page, object, 0); | 1116 | trace(s, page, object, 0); |
| 1117 | /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ | ||
| 1110 | init_object(s, object, SLUB_RED_INACTIVE); | 1118 | init_object(s, object, SLUB_RED_INACTIVE); |
| 1119 | |||
| 1120 | /* Reached end of constructed freelist yet? */ | ||
| 1121 | if (object != tail) { | ||
| 1122 | object = get_freepointer(s, object); | ||
| 1123 | goto next_object; | ||
| 1124 | } | ||
| 1111 | out: | 1125 | out: |
| 1126 | if (cnt != bulk_cnt) | ||
| 1127 | slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", | ||
| 1128 | bulk_cnt, cnt); | ||
| 1129 | |||
| 1112 | slab_unlock(page); | 1130 | slab_unlock(page); |
| 1113 | /* | 1131 | /* |
| 1114 | * Keep node_lock to preserve integrity | 1132 | * Keep node_lock to preserve integrity |
| @@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size, | |||
| 1204 | 1222 | ||
| 1205 | return flags; | 1223 | return flags; |
| 1206 | } | 1224 | } |
| 1207 | #else | 1225 | #else /* !CONFIG_SLUB_DEBUG */ |
| 1208 | static inline void setup_object_debug(struct kmem_cache *s, | 1226 | static inline void setup_object_debug(struct kmem_cache *s, |
| 1209 | struct page *page, void *object) {} | 1227 | struct page *page, void *object) {} |
| 1210 | 1228 | ||
| @@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s, | |||
| 1212 | struct page *page, void *object, unsigned long addr) { return 0; } | 1230 | struct page *page, void *object, unsigned long addr) { return 0; } |
| 1213 | 1231 | ||
| 1214 | static inline struct kmem_cache_node *free_debug_processing( | 1232 | static inline struct kmem_cache_node *free_debug_processing( |
| 1215 | struct kmem_cache *s, struct page *page, void *object, | 1233 | struct kmem_cache *s, struct page *page, |
| 1234 | void *head, void *tail, int bulk_cnt, | ||
| 1216 | unsigned long addr, unsigned long *flags) { return NULL; } | 1235 | unsigned long addr, unsigned long *flags) { return NULL; } |
| 1217 | 1236 | ||
| 1218 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1237 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
| @@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |||
| 1273 | return memcg_kmem_get_cache(s, flags); | 1292 | return memcg_kmem_get_cache(s, flags); |
| 1274 | } | 1293 | } |
| 1275 | 1294 | ||
| 1276 | static inline void slab_post_alloc_hook(struct kmem_cache *s, | 1295 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
| 1277 | gfp_t flags, void *object) | 1296 | size_t size, void **p) |
| 1278 | { | 1297 | { |
| 1298 | size_t i; | ||
| 1299 | |||
| 1279 | flags &= gfp_allowed_mask; | 1300 | flags &= gfp_allowed_mask; |
| 1280 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | 1301 | for (i = 0; i < size; i++) { |
| 1281 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); | 1302 | void *object = p[i]; |
| 1303 | |||
| 1304 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | ||
| 1305 | kmemleak_alloc_recursive(object, s->object_size, 1, | ||
| 1306 | s->flags, flags); | ||
| 1307 | kasan_slab_alloc(s, object); | ||
| 1308 | } | ||
| 1282 | memcg_kmem_put_cache(s); | 1309 | memcg_kmem_put_cache(s); |
| 1283 | kasan_slab_alloc(s, object); | ||
| 1284 | } | 1310 | } |
| 1285 | 1311 | ||
| 1286 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | 1312 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
| @@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
| 1308 | kasan_slab_free(s, x); | 1334 | kasan_slab_free(s, x); |
| 1309 | } | 1335 | } |
| 1310 | 1336 | ||
| 1337 | static inline void slab_free_freelist_hook(struct kmem_cache *s, | ||
| 1338 | void *head, void *tail) | ||
| 1339 | { | ||
| 1340 | /* | ||
| 1341 | * Compiler cannot detect this function can be removed if slab_free_hook() | ||
| 1342 | * evaluates to nothing. Thus, catch all relevant config debug options here. | ||
| 1343 | */ | ||
| 1344 | #if defined(CONFIG_KMEMCHECK) || \ | ||
| 1345 | defined(CONFIG_LOCKDEP) || \ | ||
| 1346 | defined(CONFIG_DEBUG_KMEMLEAK) || \ | ||
| 1347 | defined(CONFIG_DEBUG_OBJECTS_FREE) || \ | ||
| 1348 | defined(CONFIG_KASAN) | ||
| 1349 | |||
| 1350 | void *object = head; | ||
| 1351 | void *tail_obj = tail ? : head; | ||
| 1352 | |||
| 1353 | do { | ||
| 1354 | slab_free_hook(s, object); | ||
| 1355 | } while ((object != tail_obj) && | ||
| 1356 | (object = get_freepointer(s, object))); | ||
| 1357 | #endif | ||
| 1358 | } | ||
| 1359 | |||
| 1311 | static void setup_object(struct kmem_cache *s, struct page *page, | 1360 | static void setup_object(struct kmem_cache *s, struct page *page, |
| 1312 | void *object) | 1361 | void *object) |
| 1313 | { | 1362 | { |
| @@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) | |||
| 2295 | * And if we were unable to get a new slab from the partial slab lists then | 2344 | * And if we were unable to get a new slab from the partial slab lists then |
| 2296 | * we need to allocate a new slab. This is the slowest path since it involves | 2345 | * we need to allocate a new slab. This is the slowest path since it involves |
| 2297 | * a call to the page allocator and the setup of a new slab. | 2346 | * a call to the page allocator and the setup of a new slab. |
| 2347 | * | ||
| 2348 | * Version of __slab_alloc to use when we know that interrupts are | ||
| 2349 | * already disabled (which is the case for bulk allocation). | ||
| 2298 | */ | 2350 | */ |
| 2299 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | 2351 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| 2300 | unsigned long addr, struct kmem_cache_cpu *c) | 2352 | unsigned long addr, struct kmem_cache_cpu *c) |
| 2301 | { | 2353 | { |
| 2302 | void *freelist; | 2354 | void *freelist; |
| 2303 | struct page *page; | 2355 | struct page *page; |
| 2304 | unsigned long flags; | ||
| 2305 | |||
| 2306 | local_irq_save(flags); | ||
| 2307 | #ifdef CONFIG_PREEMPT | ||
| 2308 | /* | ||
| 2309 | * We may have been preempted and rescheduled on a different | ||
| 2310 | * cpu before disabling interrupts. Need to reload cpu area | ||
| 2311 | * pointer. | ||
| 2312 | */ | ||
| 2313 | c = this_cpu_ptr(s->cpu_slab); | ||
| 2314 | #endif | ||
| 2315 | 2356 | ||
| 2316 | page = c->page; | 2357 | page = c->page; |
| 2317 | if (!page) | 2358 | if (!page) |
| @@ -2369,7 +2410,6 @@ load_freelist: | |||
| 2369 | VM_BUG_ON(!c->page->frozen); | 2410 | VM_BUG_ON(!c->page->frozen); |
| 2370 | c->freelist = get_freepointer(s, freelist); | 2411 | c->freelist = get_freepointer(s, freelist); |
| 2371 | c->tid = next_tid(c->tid); | 2412 | c->tid = next_tid(c->tid); |
| 2372 | local_irq_restore(flags); | ||
| 2373 | return freelist; | 2413 | return freelist; |
| 2374 | 2414 | ||
| 2375 | new_slab: | 2415 | new_slab: |
| @@ -2386,7 +2426,6 @@ new_slab: | |||
| 2386 | 2426 | ||
| 2387 | if (unlikely(!freelist)) { | 2427 | if (unlikely(!freelist)) { |
| 2388 | slab_out_of_memory(s, gfpflags, node); | 2428 | slab_out_of_memory(s, gfpflags, node); |
| 2389 | local_irq_restore(flags); | ||
| 2390 | return NULL; | 2429 | return NULL; |
| 2391 | } | 2430 | } |
| 2392 | 2431 | ||
| @@ -2402,11 +2441,35 @@ new_slab: | |||
| 2402 | deactivate_slab(s, page, get_freepointer(s, freelist)); | 2441 | deactivate_slab(s, page, get_freepointer(s, freelist)); |
| 2403 | c->page = NULL; | 2442 | c->page = NULL; |
| 2404 | c->freelist = NULL; | 2443 | c->freelist = NULL; |
| 2405 | local_irq_restore(flags); | ||
| 2406 | return freelist; | 2444 | return freelist; |
| 2407 | } | 2445 | } |
| 2408 | 2446 | ||
| 2409 | /* | 2447 | /* |
| 2448 | * Another one that disabled interrupt and compensates for possible | ||
| 2449 | * cpu changes by refetching the per cpu area pointer. | ||
| 2450 | */ | ||
| 2451 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | ||
| 2452 | unsigned long addr, struct kmem_cache_cpu *c) | ||
| 2453 | { | ||
| 2454 | void *p; | ||
| 2455 | unsigned long flags; | ||
| 2456 | |||
| 2457 | local_irq_save(flags); | ||
| 2458 | #ifdef CONFIG_PREEMPT | ||
| 2459 | /* | ||
| 2460 | * We may have been preempted and rescheduled on a different | ||
| 2461 | * cpu before disabling interrupts. Need to reload cpu area | ||
| 2462 | * pointer. | ||
| 2463 | */ | ||
| 2464 | c = this_cpu_ptr(s->cpu_slab); | ||
| 2465 | #endif | ||
| 2466 | |||
| 2467 | p = ___slab_alloc(s, gfpflags, node, addr, c); | ||
| 2468 | local_irq_restore(flags); | ||
| 2469 | return p; | ||
| 2470 | } | ||
| 2471 | |||
| 2472 | /* | ||
| 2410 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) | 2473 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
| 2411 | * have the fastpath folded into their functions. So no function call | 2474 | * have the fastpath folded into their functions. So no function call |
| 2412 | * overhead for requests that can be satisfied on the fastpath. | 2475 | * overhead for requests that can be satisfied on the fastpath. |
| @@ -2419,7 +2482,7 @@ new_slab: | |||
| 2419 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, | 2482 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
| 2420 | gfp_t gfpflags, int node, unsigned long addr) | 2483 | gfp_t gfpflags, int node, unsigned long addr) |
| 2421 | { | 2484 | { |
| 2422 | void **object; | 2485 | void *object; |
| 2423 | struct kmem_cache_cpu *c; | 2486 | struct kmem_cache_cpu *c; |
| 2424 | struct page *page; | 2487 | struct page *page; |
| 2425 | unsigned long tid; | 2488 | unsigned long tid; |
| @@ -2498,7 +2561,7 @@ redo: | |||
| 2498 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 2561 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
| 2499 | memset(object, 0, s->object_size); | 2562 | memset(object, 0, s->object_size); |
| 2500 | 2563 | ||
| 2501 | slab_post_alloc_hook(s, gfpflags, object); | 2564 | slab_post_alloc_hook(s, gfpflags, 1, &object); |
| 2502 | 2565 | ||
| 2503 | return object; | 2566 | return object; |
| 2504 | } | 2567 | } |
| @@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
| 2569 | * handling required then we can return immediately. | 2632 | * handling required then we can return immediately. |
| 2570 | */ | 2633 | */ |
| 2571 | static void __slab_free(struct kmem_cache *s, struct page *page, | 2634 | static void __slab_free(struct kmem_cache *s, struct page *page, |
| 2572 | void *x, unsigned long addr) | 2635 | void *head, void *tail, int cnt, |
| 2636 | unsigned long addr) | ||
| 2637 | |||
| 2573 | { | 2638 | { |
| 2574 | void *prior; | 2639 | void *prior; |
| 2575 | void **object = (void *)x; | ||
| 2576 | int was_frozen; | 2640 | int was_frozen; |
| 2577 | struct page new; | 2641 | struct page new; |
| 2578 | unsigned long counters; | 2642 | unsigned long counters; |
| @@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2582 | stat(s, FREE_SLOWPATH); | 2646 | stat(s, FREE_SLOWPATH); |
| 2583 | 2647 | ||
| 2584 | if (kmem_cache_debug(s) && | 2648 | if (kmem_cache_debug(s) && |
| 2585 | !(n = free_debug_processing(s, page, x, addr, &flags))) | 2649 | !(n = free_debug_processing(s, page, head, tail, cnt, |
| 2650 | addr, &flags))) | ||
| 2586 | return; | 2651 | return; |
| 2587 | 2652 | ||
| 2588 | do { | 2653 | do { |
| @@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2592 | } | 2657 | } |
| 2593 | prior = page->freelist; | 2658 | prior = page->freelist; |
| 2594 | counters = page->counters; | 2659 | counters = page->counters; |
| 2595 | set_freepointer(s, object, prior); | 2660 | set_freepointer(s, tail, prior); |
| 2596 | new.counters = counters; | 2661 | new.counters = counters; |
| 2597 | was_frozen = new.frozen; | 2662 | was_frozen = new.frozen; |
| 2598 | new.inuse--; | 2663 | new.inuse -= cnt; |
| 2599 | if ((!new.inuse || !prior) && !was_frozen) { | 2664 | if ((!new.inuse || !prior) && !was_frozen) { |
| 2600 | 2665 | ||
| 2601 | if (kmem_cache_has_cpu_partial(s) && !prior) { | 2666 | if (kmem_cache_has_cpu_partial(s) && !prior) { |
| @@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2626 | 2691 | ||
| 2627 | } while (!cmpxchg_double_slab(s, page, | 2692 | } while (!cmpxchg_double_slab(s, page, |
| 2628 | prior, counters, | 2693 | prior, counters, |
| 2629 | object, new.counters, | 2694 | head, new.counters, |
| 2630 | "__slab_free")); | 2695 | "__slab_free")); |
| 2631 | 2696 | ||
| 2632 | if (likely(!n)) { | 2697 | if (likely(!n)) { |
| @@ -2691,15 +2756,20 @@ slab_empty: | |||
| 2691 | * | 2756 | * |
| 2692 | * If fastpath is not possible then fall back to __slab_free where we deal | 2757 | * If fastpath is not possible then fall back to __slab_free where we deal |
| 2693 | * with all sorts of special processing. | 2758 | * with all sorts of special processing. |
| 2759 | * | ||
| 2760 | * Bulk free of a freelist with several objects (all pointing to the | ||
| 2761 | * same page) possible by specifying head and tail ptr, plus objects | ||
| 2762 | * count (cnt). Bulk free indicated by tail pointer being set. | ||
| 2694 | */ | 2763 | */ |
| 2695 | static __always_inline void slab_free(struct kmem_cache *s, | 2764 | static __always_inline void slab_free(struct kmem_cache *s, struct page *page, |
| 2696 | struct page *page, void *x, unsigned long addr) | 2765 | void *head, void *tail, int cnt, |
| 2766 | unsigned long addr) | ||
| 2697 | { | 2767 | { |
| 2698 | void **object = (void *)x; | 2768 | void *tail_obj = tail ? : head; |
| 2699 | struct kmem_cache_cpu *c; | 2769 | struct kmem_cache_cpu *c; |
| 2700 | unsigned long tid; | 2770 | unsigned long tid; |
| 2701 | 2771 | ||
| 2702 | slab_free_hook(s, x); | 2772 | slab_free_freelist_hook(s, head, tail); |
| 2703 | 2773 | ||
| 2704 | redo: | 2774 | redo: |
| 2705 | /* | 2775 | /* |
| @@ -2718,19 +2788,19 @@ redo: | |||
| 2718 | barrier(); | 2788 | barrier(); |
| 2719 | 2789 | ||
| 2720 | if (likely(page == c->page)) { | 2790 | if (likely(page == c->page)) { |
| 2721 | set_freepointer(s, object, c->freelist); | 2791 | set_freepointer(s, tail_obj, c->freelist); |
| 2722 | 2792 | ||
| 2723 | if (unlikely(!this_cpu_cmpxchg_double( | 2793 | if (unlikely(!this_cpu_cmpxchg_double( |
| 2724 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2794 | s->cpu_slab->freelist, s->cpu_slab->tid, |
| 2725 | c->freelist, tid, | 2795 | c->freelist, tid, |
| 2726 | object, next_tid(tid)))) { | 2796 | head, next_tid(tid)))) { |
| 2727 | 2797 | ||
| 2728 | note_cmpxchg_failure("slab_free", s, tid); | 2798 | note_cmpxchg_failure("slab_free", s, tid); |
| 2729 | goto redo; | 2799 | goto redo; |
| 2730 | } | 2800 | } |
| 2731 | stat(s, FREE_FASTPATH); | 2801 | stat(s, FREE_FASTPATH); |
| 2732 | } else | 2802 | } else |
| 2733 | __slab_free(s, page, x, addr); | 2803 | __slab_free(s, page, head, tail_obj, cnt, addr); |
| 2734 | 2804 | ||
| 2735 | } | 2805 | } |
| 2736 | 2806 | ||
| @@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 2739 | s = cache_from_obj(s, x); | 2809 | s = cache_from_obj(s, x); |
| 2740 | if (!s) | 2810 | if (!s) |
| 2741 | return; | 2811 | return; |
| 2742 | slab_free(s, virt_to_head_page(x), x, _RET_IP_); | 2812 | slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); |
| 2743 | trace_kmem_cache_free(_RET_IP_, x); | 2813 | trace_kmem_cache_free(_RET_IP_, x); |
| 2744 | } | 2814 | } |
| 2745 | EXPORT_SYMBOL(kmem_cache_free); | 2815 | EXPORT_SYMBOL(kmem_cache_free); |
| 2746 | 2816 | ||
| 2747 | /* Note that interrupts must be enabled when calling this function. */ | 2817 | struct detached_freelist { |
| 2748 | void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | ||
| 2749 | { | ||
| 2750 | struct kmem_cache_cpu *c; | ||
| 2751 | struct page *page; | 2818 | struct page *page; |
| 2752 | int i; | 2819 | void *tail; |
| 2820 | void *freelist; | ||
| 2821 | int cnt; | ||
| 2822 | }; | ||
| 2753 | 2823 | ||
| 2754 | local_irq_disable(); | 2824 | /* |
| 2755 | c = this_cpu_ptr(s->cpu_slab); | 2825 | * This function progressively scans the array with free objects (with |
| 2826 | * a limited look ahead) and extract objects belonging to the same | ||
| 2827 | * page. It builds a detached freelist directly within the given | ||
| 2828 | * page/objects. This can happen without any need for | ||
| 2829 | * synchronization, because the objects are owned by running process. | ||
| 2830 | * The freelist is build up as a single linked list in the objects. | ||
| 2831 | * The idea is, that this detached freelist can then be bulk | ||
| 2832 | * transferred to the real freelist(s), but only requiring a single | ||
| 2833 | * synchronization primitive. Look ahead in the array is limited due | ||
| 2834 | * to performance reasons. | ||
| 2835 | */ | ||
| 2836 | static int build_detached_freelist(struct kmem_cache *s, size_t size, | ||
| 2837 | void **p, struct detached_freelist *df) | ||
| 2838 | { | ||
| 2839 | size_t first_skipped_index = 0; | ||
| 2840 | int lookahead = 3; | ||
| 2841 | void *object; | ||
| 2756 | 2842 | ||
| 2757 | for (i = 0; i < size; i++) { | 2843 | /* Always re-init detached_freelist */ |
| 2758 | void *object = p[i]; | 2844 | df->page = NULL; |
| 2759 | 2845 | ||
| 2760 | BUG_ON(!object); | 2846 | do { |
| 2761 | /* kmem cache debug support */ | 2847 | object = p[--size]; |
| 2762 | s = cache_from_obj(s, object); | 2848 | } while (!object && size); |
| 2763 | if (unlikely(!s)) | ||
| 2764 | goto exit; | ||
| 2765 | slab_free_hook(s, object); | ||
| 2766 | 2849 | ||
| 2767 | page = virt_to_head_page(object); | 2850 | if (!object) |
| 2851 | return 0; | ||
| 2768 | 2852 | ||
| 2769 | if (c->page == page) { | 2853 | /* Start new detached freelist */ |
| 2770 | /* Fastpath: local CPU free */ | 2854 | set_freepointer(s, object, NULL); |
| 2771 | set_freepointer(s, object, c->freelist); | 2855 | df->page = virt_to_head_page(object); |
| 2772 | c->freelist = object; | 2856 | df->tail = object; |
| 2773 | } else { | 2857 | df->freelist = object; |
| 2774 | c->tid = next_tid(c->tid); | 2858 | p[size] = NULL; /* mark object processed */ |
| 2775 | local_irq_enable(); | 2859 | df->cnt = 1; |
| 2776 | /* Slowpath: overhead locked cmpxchg_double_slab */ | 2860 | |
| 2777 | __slab_free(s, page, object, _RET_IP_); | 2861 | while (size) { |
| 2778 | local_irq_disable(); | 2862 | object = p[--size]; |
| 2779 | c = this_cpu_ptr(s->cpu_slab); | 2863 | if (!object) |
| 2864 | continue; /* Skip processed objects */ | ||
| 2865 | |||
| 2866 | /* df->page is always set at this point */ | ||
| 2867 | if (df->page == virt_to_head_page(object)) { | ||
| 2868 | /* Opportunity build freelist */ | ||
| 2869 | set_freepointer(s, object, df->freelist); | ||
| 2870 | df->freelist = object; | ||
| 2871 | df->cnt++; | ||
| 2872 | p[size] = NULL; /* mark object processed */ | ||
| 2873 | |||
| 2874 | continue; | ||
| 2780 | } | 2875 | } |
| 2876 | |||
| 2877 | /* Limit look ahead search */ | ||
| 2878 | if (!--lookahead) | ||
| 2879 | break; | ||
| 2880 | |||
| 2881 | if (!first_skipped_index) | ||
| 2882 | first_skipped_index = size + 1; | ||
| 2781 | } | 2883 | } |
| 2782 | exit: | 2884 | |
| 2783 | c->tid = next_tid(c->tid); | 2885 | return first_skipped_index; |
| 2784 | local_irq_enable(); | 2886 | } |
| 2887 | |||
| 2888 | |||
| 2889 | /* Note that interrupts must be enabled when calling this function. */ | ||
| 2890 | void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) | ||
| 2891 | { | ||
| 2892 | if (WARN_ON(!size)) | ||
| 2893 | return; | ||
| 2894 | |||
| 2895 | do { | ||
| 2896 | struct detached_freelist df; | ||
| 2897 | struct kmem_cache *s; | ||
| 2898 | |||
| 2899 | /* Support for memcg */ | ||
| 2900 | s = cache_from_obj(orig_s, p[size - 1]); | ||
| 2901 | |||
| 2902 | size = build_detached_freelist(s, size, p, &df); | ||
| 2903 | if (unlikely(!df.page)) | ||
| 2904 | continue; | ||
| 2905 | |||
| 2906 | slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); | ||
| 2907 | } while (likely(size)); | ||
| 2785 | } | 2908 | } |
| 2786 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 2909 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 2787 | 2910 | ||
| 2788 | /* Note that interrupts must be enabled when calling this function. */ | 2911 | /* Note that interrupts must be enabled when calling this function. */ |
| 2789 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 2912 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 2790 | void **p) | 2913 | void **p) |
| 2791 | { | 2914 | { |
| 2792 | struct kmem_cache_cpu *c; | 2915 | struct kmem_cache_cpu *c; |
| 2793 | int i; | 2916 | int i; |
| 2794 | 2917 | ||
| 2918 | /* memcg and kmem_cache debug support */ | ||
| 2919 | s = slab_pre_alloc_hook(s, flags); | ||
| 2920 | if (unlikely(!s)) | ||
| 2921 | return false; | ||
| 2795 | /* | 2922 | /* |
| 2796 | * Drain objects in the per cpu slab, while disabling local | 2923 | * Drain objects in the per cpu slab, while disabling local |
| 2797 | * IRQs, which protects against PREEMPT and interrupts | 2924 | * IRQs, which protects against PREEMPT and interrupts |
| @@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
| 2804 | void *object = c->freelist; | 2931 | void *object = c->freelist; |
| 2805 | 2932 | ||
| 2806 | if (unlikely(!object)) { | 2933 | if (unlikely(!object)) { |
| 2807 | local_irq_enable(); | ||
| 2808 | /* | 2934 | /* |
| 2809 | * Invoking slow path likely have side-effect | 2935 | * Invoking slow path likely have side-effect |
| 2810 | * of re-populating per CPU c->freelist | 2936 | * of re-populating per CPU c->freelist |
| 2811 | */ | 2937 | */ |
| 2812 | p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, | 2938 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
| 2813 | _RET_IP_, c); | 2939 | _RET_IP_, c); |
| 2814 | if (unlikely(!p[i])) { | 2940 | if (unlikely(!p[i])) |
| 2815 | __kmem_cache_free_bulk(s, i, p); | 2941 | goto error; |
| 2816 | return false; | 2942 | |
| 2817 | } | ||
| 2818 | local_irq_disable(); | ||
| 2819 | c = this_cpu_ptr(s->cpu_slab); | 2943 | c = this_cpu_ptr(s->cpu_slab); |
| 2820 | continue; /* goto for-loop */ | 2944 | continue; /* goto for-loop */ |
| 2821 | } | 2945 | } |
| 2822 | |||
| 2823 | /* kmem_cache debug support */ | ||
| 2824 | s = slab_pre_alloc_hook(s, flags); | ||
| 2825 | if (unlikely(!s)) { | ||
| 2826 | __kmem_cache_free_bulk(s, i, p); | ||
| 2827 | c->tid = next_tid(c->tid); | ||
| 2828 | local_irq_enable(); | ||
| 2829 | return false; | ||
| 2830 | } | ||
| 2831 | |||
| 2832 | c->freelist = get_freepointer(s, object); | 2946 | c->freelist = get_freepointer(s, object); |
| 2833 | p[i] = object; | 2947 | p[i] = object; |
| 2834 | |||
| 2835 | /* kmem_cache debug support */ | ||
| 2836 | slab_post_alloc_hook(s, flags, object); | ||
| 2837 | } | 2948 | } |
| 2838 | c->tid = next_tid(c->tid); | 2949 | c->tid = next_tid(c->tid); |
| 2839 | local_irq_enable(); | 2950 | local_irq_enable(); |
| @@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
| 2846 | memset(p[j], 0, s->object_size); | 2957 | memset(p[j], 0, s->object_size); |
| 2847 | } | 2958 | } |
| 2848 | 2959 | ||
| 2849 | return true; | 2960 | /* memcg and kmem_cache debug support */ |
| 2961 | slab_post_alloc_hook(s, flags, size, p); | ||
| 2962 | return i; | ||
| 2963 | error: | ||
| 2964 | local_irq_enable(); | ||
| 2965 | slab_post_alloc_hook(s, flags, i, p); | ||
| 2966 | __kmem_cache_free_bulk(s, i, p); | ||
| 2967 | return 0; | ||
| 2850 | } | 2968 | } |
| 2851 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); | 2969 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); |
| 2852 | 2970 | ||
| @@ -3511,7 +3629,7 @@ void kfree(const void *x) | |||
| 3511 | __free_kmem_pages(page, compound_order(page)); | 3629 | __free_kmem_pages(page, compound_order(page)); |
| 3512 | return; | 3630 | return; |
| 3513 | } | 3631 | } |
| 3514 | slab_free(page->slab_cache, page, object, _RET_IP_); | 3632 | slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); |
| 3515 | } | 3633 | } |
| 3516 | EXPORT_SYMBOL(kfree); | 3634 | EXPORT_SYMBOL(kfree); |
| 3517 | 3635 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d04563480c94..8e3c9c5a3042 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
| 1443 | vmap_debug_free_range(va->va_start, va->va_end); | 1443 | vmap_debug_free_range(va->va_start, va->va_end); |
| 1444 | kasan_free_shadow(vm); | 1444 | kasan_free_shadow(vm); |
| 1445 | free_unmap_vmap_area(va); | 1445 | free_unmap_vmap_area(va); |
| 1446 | vm->size -= PAGE_SIZE; | ||
| 1447 | 1446 | ||
| 1448 | return vm; | 1447 | return vm; |
| 1449 | } | 1448 | } |
| @@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
| 1468 | return; | 1467 | return; |
| 1469 | } | 1468 | } |
| 1470 | 1469 | ||
| 1471 | debug_check_no_locks_freed(addr, area->size); | 1470 | debug_check_no_locks_freed(addr, get_vm_area_size(area)); |
| 1472 | debug_check_no_obj_freed(addr, area->size); | 1471 | debug_check_no_obj_freed(addr, get_vm_area_size(area)); |
| 1473 | 1472 | ||
| 1474 | if (deallocate_pages) { | 1473 | if (deallocate_pages) { |
| 1475 | int i; | 1474 | int i; |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 496b27588493..e2ed69850489 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
| @@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
| 30 | skb->pkt_type = PACKET_HOST; | 30 | skb->pkt_type = PACKET_HOST; |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { | 33 | if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) && |
| 34 | !netif_is_macvlan_port(vlan_dev) && | ||
| 35 | !netif_is_bridge_port(vlan_dev)) { | ||
| 34 | unsigned int offset = skb->data - skb_mac_header(skb); | 36 | unsigned int offset = skb->data - skb_mac_header(skb); |
| 35 | 37 | ||
| 36 | /* | 38 | /* |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index f7e8dee64fc8..5f3f64553179 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
| @@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state) | |||
| 48 | 48 | ||
| 49 | p->state = state; | 49 | p->state = state; |
| 50 | err = switchdev_port_attr_set(p->dev, &attr); | 50 | err = switchdev_port_attr_set(p->dev, &attr); |
| 51 | if (err) | 51 | if (err && err != -EOPNOTSUPP) |
| 52 | br_warn(p->br, "error setting offload STP state on port %u(%s)\n", | 52 | br_warn(p->br, "error setting offload STP state on port %u(%s)\n", |
| 53 | (unsigned int) p->port_no, p->dev->name); | 53 | (unsigned int) p->port_no, p->dev->name); |
| 54 | } | 54 | } |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index fa53d7a89f48..5396ff08af32 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
| @@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p) | |||
| 50 | p->config_pending = 0; | 50 | p->config_pending = 0; |
| 51 | 51 | ||
| 52 | err = switchdev_port_attr_set(p->dev, &attr); | 52 | err = switchdev_port_attr_set(p->dev, &attr); |
| 53 | if (err) | 53 | if (err && err != -EOPNOTSUPP) |
| 54 | netdev_err(p->dev, "failed to set HW ageing time\n"); | 54 | netdev_err(p->dev, "failed to set HW ageing time\n"); |
| 55 | } | 55 | } |
| 56 | 56 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index ab9b8d0d115e..ae00b894e675 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2403,17 +2403,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) | |||
| 2403 | { | 2403 | { |
| 2404 | static const netdev_features_t null_features = 0; | 2404 | static const netdev_features_t null_features = 0; |
| 2405 | struct net_device *dev = skb->dev; | 2405 | struct net_device *dev = skb->dev; |
| 2406 | const char *driver = ""; | 2406 | const char *name = ""; |
| 2407 | 2407 | ||
| 2408 | if (!net_ratelimit()) | 2408 | if (!net_ratelimit()) |
| 2409 | return; | 2409 | return; |
| 2410 | 2410 | ||
| 2411 | if (dev && dev->dev.parent) | 2411 | if (dev) { |
| 2412 | driver = dev_driver_string(dev->dev.parent); | 2412 | if (dev->dev.parent) |
| 2413 | 2413 | name = dev_driver_string(dev->dev.parent); | |
| 2414 | else | ||
| 2415 | name = netdev_name(dev); | ||
| 2416 | } | ||
| 2414 | WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " | 2417 | WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " |
| 2415 | "gso_type=%d ip_summed=%d\n", | 2418 | "gso_type=%d ip_summed=%d\n", |
| 2416 | driver, dev ? &dev->features : &null_features, | 2419 | name, dev ? &dev->features : &null_features, |
| 2417 | skb->sk ? &skb->sk->sk_route_caps : &null_features, | 2420 | skb->sk ? &skb->sk->sk_route_caps : &null_features, |
| 2418 | skb->len, skb->data_len, skb_shinfo(skb)->gso_size, | 2421 | skb->len, skb->data_len, skb_shinfo(skb)->gso_size, |
| 2419 | skb_shinfo(skb)->gso_type, skb->ip_summed); | 2422 | skb_shinfo(skb)->gso_type, skb->ip_summed); |
| @@ -6426,11 +6429,16 @@ int __netdev_update_features(struct net_device *dev) | |||
| 6426 | 6429 | ||
| 6427 | if (dev->netdev_ops->ndo_set_features) | 6430 | if (dev->netdev_ops->ndo_set_features) |
| 6428 | err = dev->netdev_ops->ndo_set_features(dev, features); | 6431 | err = dev->netdev_ops->ndo_set_features(dev, features); |
| 6432 | else | ||
| 6433 | err = 0; | ||
| 6429 | 6434 | ||
| 6430 | if (unlikely(err < 0)) { | 6435 | if (unlikely(err < 0)) { |
| 6431 | netdev_err(dev, | 6436 | netdev_err(dev, |
| 6432 | "set_features() failed (%d); wanted %pNF, left %pNF\n", | 6437 | "set_features() failed (%d); wanted %pNF, left %pNF\n", |
| 6433 | err, &features, &dev->features); | 6438 | err, &features, &dev->features); |
| 6439 | /* return non-0 since some features might have changed and | ||
| 6440 | * it's better to fire a spurious notification than miss it | ||
| 6441 | */ | ||
| 6434 | return -1; | 6442 | return -1; |
| 6435 | } | 6443 | } |
| 6436 | 6444 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 1aa8437ed6c4..e6af42da28d9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh) | |||
| 857 | struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); | 857 | struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); |
| 858 | /* keep skb alive even if arp_queue overflows */ | 858 | /* keep skb alive even if arp_queue overflows */ |
| 859 | if (skb) | 859 | if (skb) |
| 860 | skb = skb_copy(skb, GFP_ATOMIC); | 860 | skb = skb_clone(skb, GFP_ATOMIC); |
| 861 | write_unlock(&neigh->lock); | 861 | write_unlock(&neigh->lock); |
| 862 | neigh->ops->solicit(neigh, skb); | 862 | neigh->ops->solicit(neigh, skb); |
| 863 | atomic_inc(&neigh->probes); | 863 | atomic_inc(&neigh->probes); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 504bd17b7456..34ba7a08876d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) | |||
| 1045 | return 0; | 1045 | return 0; |
| 1046 | } | 1046 | } |
| 1047 | 1047 | ||
| 1048 | static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, | ||
| 1049 | struct net_device *dev) | ||
| 1050 | { | ||
| 1051 | const struct rtnl_link_stats64 *stats; | ||
| 1052 | struct rtnl_link_stats64 temp; | ||
| 1053 | struct nlattr *attr; | ||
| 1054 | |||
| 1055 | stats = dev_get_stats(dev, &temp); | ||
| 1056 | |||
| 1057 | attr = nla_reserve(skb, IFLA_STATS, | ||
| 1058 | sizeof(struct rtnl_link_stats)); | ||
| 1059 | if (!attr) | ||
| 1060 | return -EMSGSIZE; | ||
| 1061 | |||
| 1062 | copy_rtnl_link_stats(nla_data(attr), stats); | ||
| 1063 | |||
| 1064 | attr = nla_reserve(skb, IFLA_STATS64, | ||
| 1065 | sizeof(struct rtnl_link_stats64)); | ||
| 1066 | if (!attr) | ||
| 1067 | return -EMSGSIZE; | ||
| 1068 | |||
| 1069 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
| 1070 | |||
| 1071 | return 0; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, | ||
| 1075 | struct net_device *dev, | ||
| 1076 | int vfs_num, | ||
| 1077 | struct nlattr *vfinfo) | ||
| 1078 | { | ||
| 1079 | struct ifla_vf_rss_query_en vf_rss_query_en; | ||
| 1080 | struct ifla_vf_link_state vf_linkstate; | ||
| 1081 | struct ifla_vf_spoofchk vf_spoofchk; | ||
| 1082 | struct ifla_vf_tx_rate vf_tx_rate; | ||
| 1083 | struct ifla_vf_stats vf_stats; | ||
| 1084 | struct ifla_vf_trust vf_trust; | ||
| 1085 | struct ifla_vf_vlan vf_vlan; | ||
| 1086 | struct ifla_vf_rate vf_rate; | ||
| 1087 | struct nlattr *vf, *vfstats; | ||
| 1088 | struct ifla_vf_mac vf_mac; | ||
| 1089 | struct ifla_vf_info ivi; | ||
| 1090 | |||
| 1091 | /* Not all SR-IOV capable drivers support the | ||
| 1092 | * spoofcheck and "RSS query enable" query. Preset to | ||
| 1093 | * -1 so the user space tool can detect that the driver | ||
| 1094 | * didn't report anything. | ||
| 1095 | */ | ||
| 1096 | ivi.spoofchk = -1; | ||
| 1097 | ivi.rss_query_en = -1; | ||
| 1098 | ivi.trusted = -1; | ||
| 1099 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
| 1100 | /* The default value for VF link state is "auto" | ||
| 1101 | * IFLA_VF_LINK_STATE_AUTO which equals zero | ||
| 1102 | */ | ||
| 1103 | ivi.linkstate = 0; | ||
| 1104 | if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) | ||
| 1105 | return 0; | ||
| 1106 | |||
| 1107 | vf_mac.vf = | ||
| 1108 | vf_vlan.vf = | ||
| 1109 | vf_rate.vf = | ||
| 1110 | vf_tx_rate.vf = | ||
| 1111 | vf_spoofchk.vf = | ||
| 1112 | vf_linkstate.vf = | ||
| 1113 | vf_rss_query_en.vf = | ||
| 1114 | vf_trust.vf = ivi.vf; | ||
| 1115 | |||
| 1116 | memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); | ||
| 1117 | vf_vlan.vlan = ivi.vlan; | ||
| 1118 | vf_vlan.qos = ivi.qos; | ||
| 1119 | vf_tx_rate.rate = ivi.max_tx_rate; | ||
| 1120 | vf_rate.min_tx_rate = ivi.min_tx_rate; | ||
| 1121 | vf_rate.max_tx_rate = ivi.max_tx_rate; | ||
| 1122 | vf_spoofchk.setting = ivi.spoofchk; | ||
| 1123 | vf_linkstate.link_state = ivi.linkstate; | ||
| 1124 | vf_rss_query_en.setting = ivi.rss_query_en; | ||
| 1125 | vf_trust.setting = ivi.trusted; | ||
| 1126 | vf = nla_nest_start(skb, IFLA_VF_INFO); | ||
| 1127 | if (!vf) { | ||
| 1128 | nla_nest_cancel(skb, vfinfo); | ||
| 1129 | return -EMSGSIZE; | ||
| 1130 | } | ||
| 1131 | if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || | ||
| 1132 | nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || | ||
| 1133 | nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), | ||
| 1134 | &vf_rate) || | ||
| 1135 | nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), | ||
| 1136 | &vf_tx_rate) || | ||
| 1137 | nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), | ||
| 1138 | &vf_spoofchk) || | ||
| 1139 | nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), | ||
| 1140 | &vf_linkstate) || | ||
| 1141 | nla_put(skb, IFLA_VF_RSS_QUERY_EN, | ||
| 1142 | sizeof(vf_rss_query_en), | ||
| 1143 | &vf_rss_query_en) || | ||
| 1144 | nla_put(skb, IFLA_VF_TRUST, | ||
| 1145 | sizeof(vf_trust), &vf_trust)) | ||
| 1146 | return -EMSGSIZE; | ||
| 1147 | memset(&vf_stats, 0, sizeof(vf_stats)); | ||
| 1148 | if (dev->netdev_ops->ndo_get_vf_stats) | ||
| 1149 | dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, | ||
| 1150 | &vf_stats); | ||
| 1151 | vfstats = nla_nest_start(skb, IFLA_VF_STATS); | ||
| 1152 | if (!vfstats) { | ||
| 1153 | nla_nest_cancel(skb, vf); | ||
| 1154 | nla_nest_cancel(skb, vfinfo); | ||
| 1155 | return -EMSGSIZE; | ||
| 1156 | } | ||
| 1157 | if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS, | ||
| 1158 | vf_stats.rx_packets) || | ||
| 1159 | nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS, | ||
| 1160 | vf_stats.tx_packets) || | ||
| 1161 | nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES, | ||
| 1162 | vf_stats.rx_bytes) || | ||
| 1163 | nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES, | ||
| 1164 | vf_stats.tx_bytes) || | ||
| 1165 | nla_put_u64(skb, IFLA_VF_STATS_BROADCAST, | ||
| 1166 | vf_stats.broadcast) || | ||
| 1167 | nla_put_u64(skb, IFLA_VF_STATS_MULTICAST, | ||
| 1168 | vf_stats.multicast)) | ||
| 1169 | return -EMSGSIZE; | ||
| 1170 | nla_nest_end(skb, vfstats); | ||
| 1171 | nla_nest_end(skb, vf); | ||
| 1172 | return 0; | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) | ||
| 1176 | { | ||
| 1177 | struct rtnl_link_ifmap map = { | ||
| 1178 | .mem_start = dev->mem_start, | ||
| 1179 | .mem_end = dev->mem_end, | ||
| 1180 | .base_addr = dev->base_addr, | ||
| 1181 | .irq = dev->irq, | ||
| 1182 | .dma = dev->dma, | ||
| 1183 | .port = dev->if_port, | ||
| 1184 | }; | ||
| 1185 | if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) | ||
| 1186 | return -EMSGSIZE; | ||
| 1187 | |||
| 1188 | return 0; | ||
| 1189 | } | ||
| 1190 | |||
| 1048 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | 1191 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, |
| 1049 | int type, u32 pid, u32 seq, u32 change, | 1192 | int type, u32 pid, u32 seq, u32 change, |
| 1050 | unsigned int flags, u32 ext_filter_mask) | 1193 | unsigned int flags, u32 ext_filter_mask) |
| 1051 | { | 1194 | { |
| 1052 | struct ifinfomsg *ifm; | 1195 | struct ifinfomsg *ifm; |
| 1053 | struct nlmsghdr *nlh; | 1196 | struct nlmsghdr *nlh; |
| 1054 | struct rtnl_link_stats64 temp; | 1197 | struct nlattr *af_spec; |
| 1055 | const struct rtnl_link_stats64 *stats; | ||
| 1056 | struct nlattr *attr, *af_spec; | ||
| 1057 | struct rtnl_af_ops *af_ops; | 1198 | struct rtnl_af_ops *af_ops; |
| 1058 | struct net_device *upper_dev = netdev_master_upper_dev_get(dev); | 1199 | struct net_device *upper_dev = netdev_master_upper_dev_get(dev); |
| 1059 | 1200 | ||
| @@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 1096 | nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) | 1237 | nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) |
| 1097 | goto nla_put_failure; | 1238 | goto nla_put_failure; |
| 1098 | 1239 | ||
| 1099 | if (1) { | 1240 | if (rtnl_fill_link_ifmap(skb, dev)) |
| 1100 | struct rtnl_link_ifmap map = { | 1241 | goto nla_put_failure; |
| 1101 | .mem_start = dev->mem_start, | ||
| 1102 | .mem_end = dev->mem_end, | ||
| 1103 | .base_addr = dev->base_addr, | ||
| 1104 | .irq = dev->irq, | ||
| 1105 | .dma = dev->dma, | ||
| 1106 | .port = dev->if_port, | ||
| 1107 | }; | ||
| 1108 | if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) | ||
| 1109 | goto nla_put_failure; | ||
| 1110 | } | ||
| 1111 | 1242 | ||
| 1112 | if (dev->addr_len) { | 1243 | if (dev->addr_len) { |
| 1113 | if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || | 1244 | if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || |
| @@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 1124 | if (rtnl_phys_switch_id_fill(skb, dev)) | 1255 | if (rtnl_phys_switch_id_fill(skb, dev)) |
| 1125 | goto nla_put_failure; | 1256 | goto nla_put_failure; |
| 1126 | 1257 | ||
| 1127 | attr = nla_reserve(skb, IFLA_STATS, | 1258 | if (rtnl_fill_stats(skb, dev)) |
| 1128 | sizeof(struct rtnl_link_stats)); | ||
| 1129 | if (attr == NULL) | ||
| 1130 | goto nla_put_failure; | ||
| 1131 | |||
| 1132 | stats = dev_get_stats(dev, &temp); | ||
| 1133 | copy_rtnl_link_stats(nla_data(attr), stats); | ||
| 1134 | |||
| 1135 | attr = nla_reserve(skb, IFLA_STATS64, | ||
| 1136 | sizeof(struct rtnl_link_stats64)); | ||
| 1137 | if (attr == NULL) | ||
| 1138 | goto nla_put_failure; | 1259 | goto nla_put_failure; |
| 1139 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
| 1140 | 1260 | ||
| 1141 | if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && | 1261 | if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && |
| 1142 | nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) | 1262 | nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) |
| 1143 | goto nla_put_failure; | 1263 | goto nla_put_failure; |
| 1144 | 1264 | ||
| 1145 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent | 1265 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent && |
| 1146 | && (ext_filter_mask & RTEXT_FILTER_VF)) { | 1266 | ext_filter_mask & RTEXT_FILTER_VF) { |
| 1147 | int i; | 1267 | int i; |
| 1148 | 1268 | struct nlattr *vfinfo; | |
| 1149 | struct nlattr *vfinfo, *vf, *vfstats; | ||
| 1150 | int num_vfs = dev_num_vf(dev->dev.parent); | 1269 | int num_vfs = dev_num_vf(dev->dev.parent); |
| 1151 | 1270 | ||
| 1152 | vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); | 1271 | vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); |
| 1153 | if (!vfinfo) | 1272 | if (!vfinfo) |
| 1154 | goto nla_put_failure; | 1273 | goto nla_put_failure; |
| 1155 | for (i = 0; i < num_vfs; i++) { | 1274 | for (i = 0; i < num_vfs; i++) { |
| 1156 | struct ifla_vf_info ivi; | 1275 | if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) |
| 1157 | struct ifla_vf_mac vf_mac; | ||
| 1158 | struct ifla_vf_vlan vf_vlan; | ||
| 1159 | struct ifla_vf_rate vf_rate; | ||
| 1160 | struct ifla_vf_tx_rate vf_tx_rate; | ||
| 1161 | struct ifla_vf_spoofchk vf_spoofchk; | ||
| 1162 | struct ifla_vf_link_state vf_linkstate; | ||
| 1163 | struct ifla_vf_rss_query_en vf_rss_query_en; | ||
| 1164 | struct ifla_vf_stats vf_stats; | ||
| 1165 | struct ifla_vf_trust vf_trust; | ||
| 1166 | |||
| 1167 | /* | ||
| 1168 | * Not all SR-IOV capable drivers support the | ||
| 1169 | * spoofcheck and "RSS query enable" query. Preset to | ||
| 1170 | * -1 so the user space tool can detect that the driver | ||
| 1171 | * didn't report anything. | ||
| 1172 | */ | ||
| 1173 | ivi.spoofchk = -1; | ||
| 1174 | ivi.rss_query_en = -1; | ||
| 1175 | ivi.trusted = -1; | ||
| 1176 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
| 1177 | /* The default value for VF link state is "auto" | ||
| 1178 | * IFLA_VF_LINK_STATE_AUTO which equals zero | ||
| 1179 | */ | ||
| 1180 | ivi.linkstate = 0; | ||
| 1181 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | ||
| 1182 | break; | ||
| 1183 | vf_mac.vf = | ||
| 1184 | vf_vlan.vf = | ||
| 1185 | vf_rate.vf = | ||
| 1186 | vf_tx_rate.vf = | ||
| 1187 | vf_spoofchk.vf = | ||
| 1188 | vf_linkstate.vf = | ||
| 1189 | vf_rss_query_en.vf = | ||
| 1190 | vf_trust.vf = ivi.vf; | ||
| 1191 | |||
| 1192 | memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); | ||
| 1193 | vf_vlan.vlan = ivi.vlan; | ||
| 1194 | vf_vlan.qos = ivi.qos; | ||
| 1195 | vf_tx_rate.rate = ivi.max_tx_rate; | ||
| 1196 | vf_rate.min_tx_rate = ivi.min_tx_rate; | ||
| 1197 | vf_rate.max_tx_rate = ivi.max_tx_rate; | ||
| 1198 | vf_spoofchk.setting = ivi.spoofchk; | ||
| 1199 | vf_linkstate.link_state = ivi.linkstate; | ||
| 1200 | vf_rss_query_en.setting = ivi.rss_query_en; | ||
| 1201 | vf_trust.setting = ivi.trusted; | ||
| 1202 | vf = nla_nest_start(skb, IFLA_VF_INFO); | ||
| 1203 | if (!vf) { | ||
| 1204 | nla_nest_cancel(skb, vfinfo); | ||
| 1205 | goto nla_put_failure; | ||
| 1206 | } | ||
| 1207 | if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || | ||
| 1208 | nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || | ||
| 1209 | nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), | ||
| 1210 | &vf_rate) || | ||
| 1211 | nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), | ||
| 1212 | &vf_tx_rate) || | ||
| 1213 | nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), | ||
| 1214 | &vf_spoofchk) || | ||
| 1215 | nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), | ||
| 1216 | &vf_linkstate) || | ||
| 1217 | nla_put(skb, IFLA_VF_RSS_QUERY_EN, | ||
| 1218 | sizeof(vf_rss_query_en), | ||
| 1219 | &vf_rss_query_en) || | ||
| 1220 | nla_put(skb, IFLA_VF_TRUST, | ||
| 1221 | sizeof(vf_trust), &vf_trust)) | ||
| 1222 | goto nla_put_failure; | 1276 | goto nla_put_failure; |
| 1223 | memset(&vf_stats, 0, sizeof(vf_stats)); | ||
| 1224 | if (dev->netdev_ops->ndo_get_vf_stats) | ||
| 1225 | dev->netdev_ops->ndo_get_vf_stats(dev, i, | ||
| 1226 | &vf_stats); | ||
| 1227 | vfstats = nla_nest_start(skb, IFLA_VF_STATS); | ||
| 1228 | if (!vfstats) { | ||
| 1229 | nla_nest_cancel(skb, vf); | ||
| 1230 | nla_nest_cancel(skb, vfinfo); | ||
| 1231 | goto nla_put_failure; | ||
| 1232 | } | ||
| 1233 | if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS, | ||
| 1234 | vf_stats.rx_packets) || | ||
| 1235 | nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS, | ||
| 1236 | vf_stats.tx_packets) || | ||
| 1237 | nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES, | ||
| 1238 | vf_stats.rx_bytes) || | ||
| 1239 | nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES, | ||
| 1240 | vf_stats.tx_bytes) || | ||
| 1241 | nla_put_u64(skb, IFLA_VF_STATS_BROADCAST, | ||
| 1242 | vf_stats.broadcast) || | ||
| 1243 | nla_put_u64(skb, IFLA_VF_STATS_MULTICAST, | ||
| 1244 | vf_stats.multicast)) | ||
| 1245 | goto nla_put_failure; | ||
| 1246 | nla_nest_end(skb, vfstats); | ||
| 1247 | nla_nest_end(skb, vf); | ||
| 1248 | } | 1277 | } |
| 1278 | |||
| 1249 | nla_nest_end(skb, vfinfo); | 1279 | nla_nest_end(skb, vfinfo); |
| 1250 | } | 1280 | } |
| 1251 | 1281 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index aa41e6dd6429..152b9c70e252 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -4268,7 +4268,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | |||
| 4268 | return NULL; | 4268 | return NULL; |
| 4269 | } | 4269 | } |
| 4270 | 4270 | ||
| 4271 | memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); | 4271 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len, |
| 4272 | 2 * ETH_ALEN); | ||
| 4272 | skb->mac_header += VLAN_HLEN; | 4273 | skb->mac_header += VLAN_HLEN; |
| 4273 | return skb; | 4274 | return skb; |
| 4274 | } | 4275 | } |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 1feb15f23de8..46b9c887bede 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data) | |||
| 563 | int max_retries, thresh; | 563 | int max_retries, thresh; |
| 564 | u8 defer_accept; | 564 | u8 defer_accept; |
| 565 | 565 | ||
| 566 | if (sk_listener->sk_state != TCP_LISTEN) | 566 | if (sk_state_load(sk_listener) != TCP_LISTEN) |
| 567 | goto drop; | 567 | goto drop; |
| 568 | 568 | ||
| 569 | max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; | 569 | max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; |
| @@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog) | |||
| 749 | * It is OK, because this socket enters to hash table only | 749 | * It is OK, because this socket enters to hash table only |
| 750 | * after validation is complete. | 750 | * after validation is complete. |
| 751 | */ | 751 | */ |
| 752 | sk->sk_state = TCP_LISTEN; | 752 | sk_state_store(sk, TCP_LISTEN); |
| 753 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { | 753 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
| 754 | inet->inet_sport = htons(inet->inet_num); | 754 | inet->inet_sport = htons(inet->inet_num); |
| 755 | 755 | ||
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 657d2307f031..b3ca21b2ba9b 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
| @@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct, | |||
| 45 | struct net *net = nf_ct_net(ct); | 45 | struct net *net = nf_ct_net(ct); |
| 46 | const struct nf_conn *master = ct->master; | 46 | const struct nf_conn *master = ct->master; |
| 47 | struct nf_conntrack_expect *other_exp; | 47 | struct nf_conntrack_expect *other_exp; |
| 48 | struct nf_conntrack_tuple t; | 48 | struct nf_conntrack_tuple t = {}; |
| 49 | const struct nf_ct_pptp_master *ct_pptp_info; | 49 | const struct nf_ct_pptp_master *ct_pptp_info; |
| 50 | const struct nf_nat_pptp *nat_pptp_info; | 50 | const struct nf_nat_pptp *nat_pptp_info; |
| 51 | struct nf_nat_range range; | 51 | struct nf_nat_range range; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 8c0d0bdc2a7c..63e5be0abd86 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, | |||
| 406 | ip_select_ident(net, skb, NULL); | 406 | ip_select_ident(net, skb, NULL); |
| 407 | 407 | ||
| 408 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 408 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
| 409 | skb->transport_header += iphlen; | ||
| 410 | if (iph->protocol == IPPROTO_ICMP && | ||
| 411 | length >= iphlen + sizeof(struct icmphdr)) | ||
| 412 | icmp_out_count(net, ((struct icmphdr *) | ||
| 413 | skb_transport_header(skb))->type); | ||
| 409 | } | 414 | } |
| 410 | if (iph->protocol == IPPROTO_ICMP) | ||
| 411 | icmp_out_count(net, ((struct icmphdr *) | ||
| 412 | skb_transport_header(skb))->type); | ||
| 413 | 415 | ||
| 414 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, | 416 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, |
| 415 | net, sk, skb, NULL, rt->dst.dev, | 417 | net, sk, skb, NULL, rt->dst.dev, |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0cfa7c0c1e80..c1728771cf89 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 451 | unsigned int mask; | 451 | unsigned int mask; |
| 452 | struct sock *sk = sock->sk; | 452 | struct sock *sk = sock->sk; |
| 453 | const struct tcp_sock *tp = tcp_sk(sk); | 453 | const struct tcp_sock *tp = tcp_sk(sk); |
| 454 | int state; | ||
| 454 | 455 | ||
| 455 | sock_rps_record_flow(sk); | 456 | sock_rps_record_flow(sk); |
| 456 | 457 | ||
| 457 | sock_poll_wait(file, sk_sleep(sk), wait); | 458 | sock_poll_wait(file, sk_sleep(sk), wait); |
| 458 | if (sk->sk_state == TCP_LISTEN) | 459 | |
| 460 | state = sk_state_load(sk); | ||
| 461 | if (state == TCP_LISTEN) | ||
| 459 | return inet_csk_listen_poll(sk); | 462 | return inet_csk_listen_poll(sk); |
| 460 | 463 | ||
| 461 | /* Socket is not locked. We are protected from async events | 464 | /* Socket is not locked. We are protected from async events |
| @@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 492 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | 495 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
| 493 | * blocking on fresh not-connected or disconnected socket. --ANK | 496 | * blocking on fresh not-connected or disconnected socket. --ANK |
| 494 | */ | 497 | */ |
| 495 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) | 498 | if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) |
| 496 | mask |= POLLHUP; | 499 | mask |= POLLHUP; |
| 497 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 500 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 498 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; | 501 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; |
| 499 | 502 | ||
| 500 | /* Connected or passive Fast Open socket? */ | 503 | /* Connected or passive Fast Open socket? */ |
| 501 | if (sk->sk_state != TCP_SYN_SENT && | 504 | if (state != TCP_SYN_SENT && |
| 502 | (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { | 505 | (state != TCP_SYN_RECV || tp->fastopen_rsk)) { |
| 503 | int target = sock_rcvlowat(sk, 0, INT_MAX); | 506 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
| 504 | 507 | ||
| 505 | if (tp->urg_seq == tp->copied_seq && | 508 | if (tp->urg_seq == tp->copied_seq && |
| @@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 507 | tp->urg_data) | 510 | tp->urg_data) |
| 508 | target++; | 511 | target++; |
| 509 | 512 | ||
| 510 | /* Potential race condition. If read of tp below will | ||
| 511 | * escape above sk->sk_state, we can be illegally awaken | ||
| 512 | * in SYN_* states. */ | ||
| 513 | if (tp->rcv_nxt - tp->copied_seq >= target) | 513 | if (tp->rcv_nxt - tp->copied_seq >= target) |
| 514 | mask |= POLLIN | POLLRDNORM; | 514 | mask |= POLLIN | POLLRDNORM; |
| 515 | 515 | ||
| @@ -1934,7 +1934,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
| 1934 | /* Change state AFTER socket is unhashed to avoid closed | 1934 | /* Change state AFTER socket is unhashed to avoid closed |
| 1935 | * socket sitting in hash tables. | 1935 | * socket sitting in hash tables. |
| 1936 | */ | 1936 | */ |
| 1937 | sk->sk_state = state; | 1937 | sk_state_store(sk, state); |
| 1938 | 1938 | ||
| 1939 | #ifdef STATE_TRACE | 1939 | #ifdef STATE_TRACE |
| 1940 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); | 1940 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); |
| @@ -2644,7 +2644,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
| 2644 | if (sk->sk_type != SOCK_STREAM) | 2644 | if (sk->sk_type != SOCK_STREAM) |
| 2645 | return; | 2645 | return; |
| 2646 | 2646 | ||
| 2647 | info->tcpi_state = sk->sk_state; | 2647 | info->tcpi_state = sk_state_load(sk); |
| 2648 | |||
| 2648 | info->tcpi_ca_state = icsk->icsk_ca_state; | 2649 | info->tcpi_ca_state = icsk->icsk_ca_state; |
| 2649 | info->tcpi_retransmits = icsk->icsk_retransmits; | 2650 | info->tcpi_retransmits = icsk->icsk_retransmits; |
| 2650 | info->tcpi_probes = icsk->icsk_probes_out; | 2651 | info->tcpi_probes = icsk->icsk_probes_out; |
| @@ -2672,7 +2673,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
| 2672 | info->tcpi_snd_mss = tp->mss_cache; | 2673 | info->tcpi_snd_mss = tp->mss_cache; |
| 2673 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; | 2674 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
| 2674 | 2675 | ||
| 2675 | if (sk->sk_state == TCP_LISTEN) { | 2676 | if (info->tcpi_state == TCP_LISTEN) { |
| 2676 | info->tcpi_unacked = sk->sk_ack_backlog; | 2677 | info->tcpi_unacked = sk->sk_ack_backlog; |
| 2677 | info->tcpi_sacked = sk->sk_max_ack_backlog; | 2678 | info->tcpi_sacked = sk->sk_max_ack_backlog; |
| 2678 | } else { | 2679 | } else { |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 479f34946177..b31604086edd 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
| @@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, | |||
| 21 | { | 21 | { |
| 22 | struct tcp_info *info = _info; | 22 | struct tcp_info *info = _info; |
| 23 | 23 | ||
| 24 | if (sk->sk_state == TCP_LISTEN) { | 24 | if (sk_state_load(sk) == TCP_LISTEN) { |
| 25 | r->idiag_rqueue = sk->sk_ack_backlog; | 25 | r->idiag_rqueue = sk->sk_ack_backlog; |
| 26 | r->idiag_wqueue = sk->sk_max_ack_backlog; | 26 | r->idiag_wqueue = sk->sk_max_ack_backlog; |
| 27 | } else if (sk->sk_type == SOCK_STREAM) { | 27 | } else if (sk->sk_type == SOCK_STREAM) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 950e28c0cdf2..ba09016d1bfd 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) | |||
| 2158 | __u16 destp = ntohs(inet->inet_dport); | 2158 | __u16 destp = ntohs(inet->inet_dport); |
| 2159 | __u16 srcp = ntohs(inet->inet_sport); | 2159 | __u16 srcp = ntohs(inet->inet_sport); |
| 2160 | int rx_queue; | 2160 | int rx_queue; |
| 2161 | int state; | ||
| 2161 | 2162 | ||
| 2162 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || | 2163 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || |
| 2163 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || | 2164 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || |
| @@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) | |||
| 2175 | timer_expires = jiffies; | 2176 | timer_expires = jiffies; |
| 2176 | } | 2177 | } |
| 2177 | 2178 | ||
| 2178 | if (sk->sk_state == TCP_LISTEN) | 2179 | state = sk_state_load(sk); |
| 2180 | if (state == TCP_LISTEN) | ||
| 2179 | rx_queue = sk->sk_ack_backlog; | 2181 | rx_queue = sk->sk_ack_backlog; |
| 2180 | else | 2182 | else |
| 2181 | /* | 2183 | /* Because we don't lock the socket, |
| 2182 | * because we dont lock socket, we might find a transient negative value | 2184 | * we might find a transient negative value. |
| 2183 | */ | 2185 | */ |
| 2184 | rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); | 2186 | rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); |
| 2185 | 2187 | ||
| 2186 | seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " | 2188 | seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " |
| 2187 | "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", | 2189 | "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", |
| 2188 | i, src, srcp, dest, destp, sk->sk_state, | 2190 | i, src, srcp, dest, destp, state, |
| 2189 | tp->write_seq - tp->snd_una, | 2191 | tp->write_seq - tp->snd_una, |
| 2190 | rx_queue, | 2192 | rx_queue, |
| 2191 | timer_active, | 2193 | timer_active, |
| @@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) | |||
| 2199 | jiffies_to_clock_t(icsk->icsk_ack.ato), | 2201 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
| 2200 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | 2202 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
| 2201 | tp->snd_cwnd, | 2203 | tp->snd_cwnd, |
| 2202 | sk->sk_state == TCP_LISTEN ? | 2204 | state == TCP_LISTEN ? |
| 2203 | (fastopenq ? fastopenq->max_qlen : 0) : | 2205 | fastopenq->max_qlen : |
| 2204 | (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); | 2206 | (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); |
| 2205 | } | 2207 | } |
| 2206 | 2208 | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 124338a39e29..5ee56d0a8699 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -1651,7 +1651,6 @@ out: | |||
| 1651 | if (!err) { | 1651 | if (!err) { |
| 1652 | ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); | 1652 | ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); |
| 1653 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); | 1653 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); |
| 1654 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); | ||
| 1655 | } else { | 1654 | } else { |
| 1656 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); | 1655 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); |
| 1657 | } | 1656 | } |
| @@ -2015,7 +2014,6 @@ out: | |||
| 2015 | if (!err) { | 2014 | if (!err) { |
| 2016 | ICMP6MSGOUT_INC_STATS(net, idev, type); | 2015 | ICMP6MSGOUT_INC_STATS(net, idev, type); |
| 2017 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); | 2016 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); |
| 2018 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len); | ||
| 2019 | } else | 2017 | } else |
| 2020 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); | 2018 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); |
| 2021 | 2019 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c8bc9b4ac328..6f01fe122abd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -404,6 +404,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
| 404 | } | 404 | } |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | static bool __rt6_check_expired(const struct rt6_info *rt) | ||
| 408 | { | ||
| 409 | if (rt->rt6i_flags & RTF_EXPIRES) | ||
| 410 | return time_after(jiffies, rt->dst.expires); | ||
| 411 | else | ||
| 412 | return false; | ||
| 413 | } | ||
| 414 | |||
| 407 | static bool rt6_check_expired(const struct rt6_info *rt) | 415 | static bool rt6_check_expired(const struct rt6_info *rt) |
| 408 | { | 416 | { |
| 409 | if (rt->rt6i_flags & RTF_EXPIRES) { | 417 | if (rt->rt6i_flags & RTF_EXPIRES) { |
| @@ -1252,7 +1260,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) | |||
| 1252 | 1260 | ||
| 1253 | static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) | 1261 | static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) |
| 1254 | { | 1262 | { |
| 1255 | if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && | 1263 | if (!__rt6_check_expired(rt) && |
| 1264 | rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && | ||
| 1256 | rt6_check((struct rt6_info *)(rt->dst.from), cookie)) | 1265 | rt6_check((struct rt6_info *)(rt->dst.from), cookie)) |
| 1257 | return &rt->dst; | 1266 | return &rt->dst; |
| 1258 | else | 1267 | else |
| @@ -1272,7 +1281,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
| 1272 | 1281 | ||
| 1273 | rt6_dst_from_metrics_check(rt); | 1282 | rt6_dst_from_metrics_check(rt); |
| 1274 | 1283 | ||
| 1275 | if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE)) | 1284 | if (rt->rt6i_flags & RTF_PCPU || |
| 1285 | (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from)) | ||
| 1276 | return rt6_dst_from_check(rt, cookie); | 1286 | return rt6_dst_from_check(rt, cookie); |
| 1277 | else | 1287 | else |
| 1278 | return rt6_check(rt, cookie); | 1288 | return rt6_check(rt, cookie); |
| @@ -1322,6 +1332,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) | |||
| 1322 | rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); | 1332 | rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); |
| 1323 | } | 1333 | } |
| 1324 | 1334 | ||
| 1335 | static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) | ||
| 1336 | { | ||
| 1337 | return !(rt->rt6i_flags & RTF_CACHE) && | ||
| 1338 | (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); | ||
| 1339 | } | ||
| 1340 | |||
| 1325 | static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | 1341 | static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, |
| 1326 | const struct ipv6hdr *iph, u32 mtu) | 1342 | const struct ipv6hdr *iph, u32 mtu) |
| 1327 | { | 1343 | { |
| @@ -1335,7 +1351,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
| 1335 | if (mtu >= dst_mtu(dst)) | 1351 | if (mtu >= dst_mtu(dst)) |
| 1336 | return; | 1352 | return; |
| 1337 | 1353 | ||
| 1338 | if (rt6->rt6i_flags & RTF_CACHE) { | 1354 | if (!rt6_cache_allowed_for_pmtu(rt6)) { |
| 1339 | rt6_do_update_pmtu(rt6, mtu); | 1355 | rt6_do_update_pmtu(rt6, mtu); |
| 1340 | } else { | 1356 | } else { |
| 1341 | const struct in6_addr *daddr, *saddr; | 1357 | const struct in6_addr *daddr, *saddr; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5baa8e754e41..c5429a636f1a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1690,6 +1690,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1690 | const struct tcp_sock *tp = tcp_sk(sp); | 1690 | const struct tcp_sock *tp = tcp_sk(sp); |
| 1691 | const struct inet_connection_sock *icsk = inet_csk(sp); | 1691 | const struct inet_connection_sock *icsk = inet_csk(sp); |
| 1692 | const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; | 1692 | const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; |
| 1693 | int rx_queue; | ||
| 1694 | int state; | ||
| 1693 | 1695 | ||
| 1694 | dest = &sp->sk_v6_daddr; | 1696 | dest = &sp->sk_v6_daddr; |
| 1695 | src = &sp->sk_v6_rcv_saddr; | 1697 | src = &sp->sk_v6_rcv_saddr; |
| @@ -1710,6 +1712,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1710 | timer_expires = jiffies; | 1712 | timer_expires = jiffies; |
| 1711 | } | 1713 | } |
| 1712 | 1714 | ||
| 1715 | state = sk_state_load(sp); | ||
| 1716 | if (state == TCP_LISTEN) | ||
| 1717 | rx_queue = sp->sk_ack_backlog; | ||
| 1718 | else | ||
| 1719 | /* Because we don't lock the socket, | ||
| 1720 | * we might find a transient negative value. | ||
| 1721 | */ | ||
| 1722 | rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); | ||
| 1723 | |||
| 1713 | seq_printf(seq, | 1724 | seq_printf(seq, |
| 1714 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 1725 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
| 1715 | "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", | 1726 | "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", |
| @@ -1718,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1718 | src->s6_addr32[2], src->s6_addr32[3], srcp, | 1729 | src->s6_addr32[2], src->s6_addr32[3], srcp, |
| 1719 | dest->s6_addr32[0], dest->s6_addr32[1], | 1730 | dest->s6_addr32[0], dest->s6_addr32[1], |
| 1720 | dest->s6_addr32[2], dest->s6_addr32[3], destp, | 1731 | dest->s6_addr32[2], dest->s6_addr32[3], destp, |
| 1721 | sp->sk_state, | 1732 | state, |
| 1722 | tp->write_seq-tp->snd_una, | 1733 | tp->write_seq - tp->snd_una, |
| 1723 | (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), | 1734 | rx_queue, |
| 1724 | timer_active, | 1735 | timer_active, |
| 1725 | jiffies_delta_to_clock_t(timer_expires - jiffies), | 1736 | jiffies_delta_to_clock_t(timer_expires - jiffies), |
| 1726 | icsk->icsk_retransmits, | 1737 | icsk->icsk_retransmits, |
| @@ -1732,7 +1743,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1732 | jiffies_to_clock_t(icsk->icsk_ack.ato), | 1743 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
| 1733 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | 1744 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
| 1734 | tp->snd_cwnd, | 1745 | tp->snd_cwnd, |
| 1735 | sp->sk_state == TCP_LISTEN ? | 1746 | state == TCP_LISTEN ? |
| 1736 | fastopenq->max_qlen : | 1747 | fastopenq->max_qlen : |
| 1737 | (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) | 1748 | (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) |
| 1738 | ); | 1749 | ); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index e22349ea7256..4692782b5280 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE | |||
| 869 | depends on IPV6 || IPV6=n | 869 | depends on IPV6 || IPV6=n |
| 870 | depends on !NF_CONNTRACK || NF_CONNTRACK | 870 | depends on !NF_CONNTRACK || NF_CONNTRACK |
| 871 | select NF_DUP_IPV4 | 871 | select NF_DUP_IPV4 |
| 872 | select NF_DUP_IPV6 if IP6_NF_IPTABLES | 872 | select NF_DUP_IPV6 if IP6_NF_IPTABLES != n |
| 873 | ---help--- | 873 | ---help--- |
| 874 | This option adds a "TEE" target with which a packet can be cloned and | 874 | This option adds a "TEE" target with which a packet can be cloned and |
| 875 | this clone be rerouted to another nexthop. | 875 | this clone be rerouted to another nexthop. |
| @@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY | |||
| 882 | depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n | 882 | depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n |
| 883 | depends on IP_NF_MANGLE | 883 | depends on IP_NF_MANGLE |
| 884 | select NF_DEFRAG_IPV4 | 884 | select NF_DEFRAG_IPV4 |
| 885 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 885 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n |
| 886 | help | 886 | help |
| 887 | This option adds a `TPROXY' target, which is somewhat similar to | 887 | This option adds a `TPROXY' target, which is somewhat similar to |
| 888 | REDIRECT. It can only be used in the mangle table and is useful | 888 | REDIRECT. It can only be used in the mangle table and is useful |
| @@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET | |||
| 1375 | depends on IPV6 || IPV6=n | 1375 | depends on IPV6 || IPV6=n |
| 1376 | depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n | 1376 | depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n |
| 1377 | select NF_DEFRAG_IPV4 | 1377 | select NF_DEFRAG_IPV4 |
| 1378 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 1378 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n |
| 1379 | help | 1379 | help |
| 1380 | This option adds a `socket' match, which can be used to match | 1380 | This option adds a `socket' match, which can be used to match |
| 1381 | packets for which a TCP or UDP socket lookup finds a valid socket. | 1381 | packets for which a TCP or UDP socket lookup finds a valid socket. |
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index d05e759ed0fa..b0bc475f641e 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #define mtype_gc IPSET_TOKEN(MTYPE, _gc) | 33 | #define mtype_gc IPSET_TOKEN(MTYPE, _gc) |
| 34 | #define mtype MTYPE | 34 | #define mtype MTYPE |
| 35 | 35 | ||
| 36 | #define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) | 36 | #define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id))) |
| 37 | 37 | ||
| 38 | static void | 38 | static void |
| 39 | mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) | 39 | mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) |
| @@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set) | |||
| 67 | del_timer_sync(&map->gc); | 67 | del_timer_sync(&map->gc); |
| 68 | 68 | ||
| 69 | ip_set_free(map->members); | 69 | ip_set_free(map->members); |
| 70 | if (set->dsize) { | 70 | if (set->dsize && set->extensions & IPSET_EXT_DESTROY) |
| 71 | if (set->extensions & IPSET_EXT_DESTROY) | 71 | mtype_ext_cleanup(set); |
| 72 | mtype_ext_cleanup(set); | 72 | ip_set_free(map); |
| 73 | ip_set_free(map->extensions); | ||
| 74 | } | ||
| 75 | kfree(map); | ||
| 76 | 73 | ||
| 77 | set->data = NULL; | 74 | set->data = NULL; |
| 78 | } | 75 | } |
| @@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) | |||
| 92 | { | 89 | { |
| 93 | const struct mtype *map = set->data; | 90 | const struct mtype *map = set->data; |
| 94 | struct nlattr *nested; | 91 | struct nlattr *nested; |
| 92 | size_t memsize = sizeof(*map) + map->memsize; | ||
| 95 | 93 | ||
| 96 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); | 94 | nested = ipset_nest_start(skb, IPSET_ATTR_DATA); |
| 97 | if (!nested) | 95 | if (!nested) |
| 98 | goto nla_put_failure; | 96 | goto nla_put_failure; |
| 99 | if (mtype_do_head(skb, map) || | 97 | if (mtype_do_head(skb, map) || |
| 100 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 98 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || |
| 101 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, | 99 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) |
| 102 | htonl(sizeof(*map) + | ||
| 103 | map->memsize + | ||
| 104 | set->dsize * map->elements))) | ||
| 105 | goto nla_put_failure; | 100 | goto nla_put_failure; |
| 106 | if (unlikely(ip_set_put_flags(skb, set))) | 101 | if (unlikely(ip_set_put_flags(skb, set))) |
| 107 | goto nla_put_failure; | 102 | goto nla_put_failure; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index 64a564334418..4783efff0bde 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c | |||
| @@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip"); | |||
| 41 | /* Type structure */ | 41 | /* Type structure */ |
| 42 | struct bitmap_ip { | 42 | struct bitmap_ip { |
| 43 | void *members; /* the set members */ | 43 | void *members; /* the set members */ |
| 44 | void *extensions; /* data extensions */ | ||
| 45 | u32 first_ip; /* host byte order, included in range */ | 44 | u32 first_ip; /* host byte order, included in range */ |
| 46 | u32 last_ip; /* host byte order, included in range */ | 45 | u32 last_ip; /* host byte order, included in range */ |
| 47 | u32 elements; /* number of max elements in the set */ | 46 | u32 elements; /* number of max elements in the set */ |
| @@ -49,6 +48,8 @@ struct bitmap_ip { | |||
| 49 | size_t memsize; /* members size */ | 48 | size_t memsize; /* members size */ |
| 50 | u8 netmask; /* subnet netmask */ | 49 | u8 netmask; /* subnet netmask */ |
| 51 | struct timer_list gc; /* garbage collection */ | 50 | struct timer_list gc; /* garbage collection */ |
| 51 | unsigned char extensions[0] /* data extensions */ | ||
| 52 | __aligned(__alignof__(u64)); | ||
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | /* ADT structure for generic function args */ | 55 | /* ADT structure for generic function args */ |
| @@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map, | |||
| 224 | map->members = ip_set_alloc(map->memsize); | 225 | map->members = ip_set_alloc(map->memsize); |
| 225 | if (!map->members) | 226 | if (!map->members) |
| 226 | return false; | 227 | return false; |
| 227 | if (set->dsize) { | ||
| 228 | map->extensions = ip_set_alloc(set->dsize * elements); | ||
| 229 | if (!map->extensions) { | ||
| 230 | kfree(map->members); | ||
| 231 | return false; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | map->first_ip = first_ip; | 228 | map->first_ip = first_ip; |
| 235 | map->last_ip = last_ip; | 229 | map->last_ip = last_ip; |
| 236 | map->elements = elements; | 230 | map->elements = elements; |
| @@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], | |||
| 316 | pr_debug("hosts %u, elements %llu\n", | 310 | pr_debug("hosts %u, elements %llu\n", |
| 317 | hosts, (unsigned long long)elements); | 311 | hosts, (unsigned long long)elements); |
| 318 | 312 | ||
| 319 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 313 | set->dsize = ip_set_elem_len(set, tb, 0, 0); |
| 314 | map = ip_set_alloc(sizeof(*map) + elements * set->dsize); | ||
| 320 | if (!map) | 315 | if (!map) |
| 321 | return -ENOMEM; | 316 | return -ENOMEM; |
| 322 | 317 | ||
| 323 | map->memsize = bitmap_bytes(0, elements - 1); | 318 | map->memsize = bitmap_bytes(0, elements - 1); |
| 324 | set->variant = &bitmap_ip; | 319 | set->variant = &bitmap_ip; |
| 325 | set->dsize = ip_set_elem_len(set, tb, 0); | ||
| 326 | if (!init_map_ip(set, map, first_ip, last_ip, | 320 | if (!init_map_ip(set, map, first_ip, last_ip, |
| 327 | elements, hosts, netmask)) { | 321 | elements, hosts, netmask)) { |
| 328 | kfree(map); | 322 | kfree(map); |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 1430535118fb..29dde208381d 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
| @@ -47,24 +47,26 @@ enum { | |||
| 47 | /* Type structure */ | 47 | /* Type structure */ |
| 48 | struct bitmap_ipmac { | 48 | struct bitmap_ipmac { |
| 49 | void *members; /* the set members */ | 49 | void *members; /* the set members */ |
| 50 | void *extensions; /* MAC + data extensions */ | ||
| 51 | u32 first_ip; /* host byte order, included in range */ | 50 | u32 first_ip; /* host byte order, included in range */ |
| 52 | u32 last_ip; /* host byte order, included in range */ | 51 | u32 last_ip; /* host byte order, included in range */ |
| 53 | u32 elements; /* number of max elements in the set */ | 52 | u32 elements; /* number of max elements in the set */ |
| 54 | size_t memsize; /* members size */ | 53 | size_t memsize; /* members size */ |
| 55 | struct timer_list gc; /* garbage collector */ | 54 | struct timer_list gc; /* garbage collector */ |
| 55 | unsigned char extensions[0] /* MAC + data extensions */ | ||
| 56 | __aligned(__alignof__(u64)); | ||
| 56 | }; | 57 | }; |
| 57 | 58 | ||
| 58 | /* ADT structure for generic function args */ | 59 | /* ADT structure for generic function args */ |
| 59 | struct bitmap_ipmac_adt_elem { | 60 | struct bitmap_ipmac_adt_elem { |
| 61 | unsigned char ether[ETH_ALEN] __aligned(2); | ||
| 60 | u16 id; | 62 | u16 id; |
| 61 | unsigned char *ether; | 63 | u16 add_mac; |
| 62 | }; | 64 | }; |
| 63 | 65 | ||
| 64 | struct bitmap_ipmac_elem { | 66 | struct bitmap_ipmac_elem { |
| 65 | unsigned char ether[ETH_ALEN]; | 67 | unsigned char ether[ETH_ALEN]; |
| 66 | unsigned char filled; | 68 | unsigned char filled; |
| 67 | } __attribute__ ((aligned)); | 69 | } __aligned(__alignof__(u64)); |
| 68 | 70 | ||
| 69 | static inline u32 | 71 | static inline u32 |
| 70 | ip_to_id(const struct bitmap_ipmac *m, u32 ip) | 72 | ip_to_id(const struct bitmap_ipmac *m, u32 ip) |
| @@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip) | |||
| 72 | return ip - m->first_ip; | 74 | return ip - m->first_ip; |
| 73 | } | 75 | } |
| 74 | 76 | ||
| 75 | static inline struct bitmap_ipmac_elem * | 77 | #define get_elem(extensions, id, dsize) \ |
| 76 | get_elem(void *extensions, u16 id, size_t dsize) | 78 | (struct bitmap_ipmac_elem *)(extensions + (id) * (dsize)) |
| 77 | { | 79 | |
| 78 | return (struct bitmap_ipmac_elem *)(extensions + id * dsize); | 80 | #define get_const_elem(extensions, id, dsize) \ |
| 79 | } | 81 | (const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize)) |
| 80 | 82 | ||
| 81 | /* Common functions */ | 83 | /* Common functions */ |
| 82 | 84 | ||
| @@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e, | |||
| 88 | 90 | ||
| 89 | if (!test_bit(e->id, map->members)) | 91 | if (!test_bit(e->id, map->members)) |
| 90 | return 0; | 92 | return 0; |
| 91 | elem = get_elem(map->extensions, e->id, dsize); | 93 | elem = get_const_elem(map->extensions, e->id, dsize); |
| 92 | if (elem->filled == MAC_FILLED) | 94 | if (e->add_mac && elem->filled == MAC_FILLED) |
| 93 | return !e->ether || | 95 | return ether_addr_equal(e->ether, elem->ether); |
| 94 | ether_addr_equal(e->ether, elem->ether); | ||
| 95 | /* Trigger kernel to fill out the ethernet address */ | 96 | /* Trigger kernel to fill out the ethernet address */ |
| 96 | return -EAGAIN; | 97 | return -EAGAIN; |
| 97 | } | 98 | } |
| @@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize) | |||
| 103 | 104 | ||
| 104 | if (!test_bit(id, map->members)) | 105 | if (!test_bit(id, map->members)) |
| 105 | return 0; | 106 | return 0; |
| 106 | elem = get_elem(map->extensions, id, dsize); | 107 | elem = get_const_elem(map->extensions, id, dsize); |
| 107 | /* Timer not started for the incomplete elements */ | 108 | /* Timer not started for the incomplete elements */ |
| 108 | return elem->filled == MAC_FILLED; | 109 | return elem->filled == MAC_FILLED; |
| 109 | } | 110 | } |
| @@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout, | |||
| 133 | * and we can reuse it later when MAC is filled out, | 134 | * and we can reuse it later when MAC is filled out, |
| 134 | * possibly by the kernel | 135 | * possibly by the kernel |
| 135 | */ | 136 | */ |
| 136 | if (e->ether) | 137 | if (e->add_mac) |
| 137 | ip_set_timeout_set(timeout, t); | 138 | ip_set_timeout_set(timeout, t); |
| 138 | else | 139 | else |
| 139 | *timeout = t; | 140 | *timeout = t; |
| @@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e, | |||
| 150 | elem = get_elem(map->extensions, e->id, dsize); | 151 | elem = get_elem(map->extensions, e->id, dsize); |
| 151 | if (test_bit(e->id, map->members)) { | 152 | if (test_bit(e->id, map->members)) { |
| 152 | if (elem->filled == MAC_FILLED) { | 153 | if (elem->filled == MAC_FILLED) { |
| 153 | if (e->ether && | 154 | if (e->add_mac && |
| 154 | (flags & IPSET_FLAG_EXIST) && | 155 | (flags & IPSET_FLAG_EXIST) && |
| 155 | !ether_addr_equal(e->ether, elem->ether)) { | 156 | !ether_addr_equal(e->ether, elem->ether)) { |
| 156 | /* memcpy isn't atomic */ | 157 | /* memcpy isn't atomic */ |
| @@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e, | |||
| 159 | ether_addr_copy(elem->ether, e->ether); | 160 | ether_addr_copy(elem->ether, e->ether); |
| 160 | } | 161 | } |
| 161 | return IPSET_ADD_FAILED; | 162 | return IPSET_ADD_FAILED; |
| 162 | } else if (!e->ether) | 163 | } else if (!e->add_mac) |
| 163 | /* Already added without ethernet address */ | 164 | /* Already added without ethernet address */ |
| 164 | return IPSET_ADD_FAILED; | 165 | return IPSET_ADD_FAILED; |
| 165 | /* Fill the MAC address and trigger the timer activation */ | 166 | /* Fill the MAC address and trigger the timer activation */ |
| @@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e, | |||
| 168 | ether_addr_copy(elem->ether, e->ether); | 169 | ether_addr_copy(elem->ether, e->ether); |
| 169 | elem->filled = MAC_FILLED; | 170 | elem->filled = MAC_FILLED; |
| 170 | return IPSET_ADD_START_STORED_TIMEOUT; | 171 | return IPSET_ADD_START_STORED_TIMEOUT; |
| 171 | } else if (e->ether) { | 172 | } else if (e->add_mac) { |
| 172 | /* We can store MAC too */ | 173 | /* We can store MAC too */ |
| 173 | ether_addr_copy(elem->ether, e->ether); | 174 | ether_addr_copy(elem->ether, e->ether); |
| 174 | elem->filled = MAC_FILLED; | 175 | elem->filled = MAC_FILLED; |
| @@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map, | |||
| 191 | u32 id, size_t dsize) | 192 | u32 id, size_t dsize) |
| 192 | { | 193 | { |
| 193 | const struct bitmap_ipmac_elem *elem = | 194 | const struct bitmap_ipmac_elem *elem = |
| 194 | get_elem(map->extensions, id, dsize); | 195 | get_const_elem(map->extensions, id, dsize); |
| 195 | 196 | ||
| 196 | return nla_put_ipaddr4(skb, IPSET_ATTR_IP, | 197 | return nla_put_ipaddr4(skb, IPSET_ATTR_IP, |
| 197 | htonl(map->first_ip + id)) || | 198 | htonl(map->first_ip + id)) || |
| @@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, | |||
| 213 | { | 214 | { |
| 214 | struct bitmap_ipmac *map = set->data; | 215 | struct bitmap_ipmac *map = set->data; |
| 215 | ipset_adtfn adtfn = set->variant->adt[adt]; | 216 | ipset_adtfn adtfn = set->variant->adt[adt]; |
| 216 | struct bitmap_ipmac_adt_elem e = { .id = 0 }; | 217 | struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 }; |
| 217 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); | 218 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); |
| 218 | u32 ip; | 219 | u32 ip; |
| 219 | 220 | ||
| @@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, | |||
| 231 | return -EINVAL; | 232 | return -EINVAL; |
| 232 | 233 | ||
| 233 | e.id = ip_to_id(map, ip); | 234 | e.id = ip_to_id(map, ip); |
| 234 | e.ether = eth_hdr(skb)->h_source; | 235 | memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); |
| 235 | 236 | ||
| 236 | return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); | 237 | return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); |
| 237 | } | 238 | } |
| @@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 265 | return -IPSET_ERR_BITMAP_RANGE; | 266 | return -IPSET_ERR_BITMAP_RANGE; |
| 266 | 267 | ||
| 267 | e.id = ip_to_id(map, ip); | 268 | e.id = ip_to_id(map, ip); |
| 268 | if (tb[IPSET_ATTR_ETHER]) | 269 | if (tb[IPSET_ATTR_ETHER]) { |
| 269 | e.ether = nla_data(tb[IPSET_ATTR_ETHER]); | 270 | memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); |
| 270 | else | 271 | e.add_mac = 1; |
| 271 | e.ether = NULL; | 272 | } |
| 272 | |||
| 273 | ret = adtfn(set, &e, &ext, &ext, flags); | 273 | ret = adtfn(set, &e, &ext, &ext, flags); |
| 274 | 274 | ||
| 275 | return ip_set_eexist(ret, flags) ? 0 : ret; | 275 | return ip_set_eexist(ret, flags) ? 0 : ret; |
| @@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, | |||
| 300 | map->members = ip_set_alloc(map->memsize); | 300 | map->members = ip_set_alloc(map->memsize); |
| 301 | if (!map->members) | 301 | if (!map->members) |
| 302 | return false; | 302 | return false; |
| 303 | if (set->dsize) { | ||
| 304 | map->extensions = ip_set_alloc(set->dsize * elements); | ||
| 305 | if (!map->extensions) { | ||
| 306 | kfree(map->members); | ||
| 307 | return false; | ||
| 308 | } | ||
| 309 | } | ||
| 310 | map->first_ip = first_ip; | 303 | map->first_ip = first_ip; |
| 311 | map->last_ip = last_ip; | 304 | map->last_ip = last_ip; |
| 312 | map->elements = elements; | 305 | map->elements = elements; |
| @@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], | |||
| 361 | if (elements > IPSET_BITMAP_MAX_RANGE + 1) | 354 | if (elements > IPSET_BITMAP_MAX_RANGE + 1) |
| 362 | return -IPSET_ERR_BITMAP_RANGE_SIZE; | 355 | return -IPSET_ERR_BITMAP_RANGE_SIZE; |
| 363 | 356 | ||
| 364 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 357 | set->dsize = ip_set_elem_len(set, tb, |
| 358 | sizeof(struct bitmap_ipmac_elem), | ||
| 359 | __alignof__(struct bitmap_ipmac_elem)); | ||
| 360 | map = ip_set_alloc(sizeof(*map) + elements * set->dsize); | ||
| 365 | if (!map) | 361 | if (!map) |
| 366 | return -ENOMEM; | 362 | return -ENOMEM; |
| 367 | 363 | ||
| 368 | map->memsize = bitmap_bytes(0, elements - 1); | 364 | map->memsize = bitmap_bytes(0, elements - 1); |
| 369 | set->variant = &bitmap_ipmac; | 365 | set->variant = &bitmap_ipmac; |
| 370 | set->dsize = ip_set_elem_len(set, tb, | ||
| 371 | sizeof(struct bitmap_ipmac_elem)); | ||
| 372 | if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { | 366 | if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { |
| 373 | kfree(map); | 367 | kfree(map); |
| 374 | return -ENOMEM; | 368 | return -ENOMEM; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 5338ccd5da46..7f0c733358a4 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c | |||
| @@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port"); | |||
| 35 | /* Type structure */ | 35 | /* Type structure */ |
| 36 | struct bitmap_port { | 36 | struct bitmap_port { |
| 37 | void *members; /* the set members */ | 37 | void *members; /* the set members */ |
| 38 | void *extensions; /* data extensions */ | ||
| 39 | u16 first_port; /* host byte order, included in range */ | 38 | u16 first_port; /* host byte order, included in range */ |
| 40 | u16 last_port; /* host byte order, included in range */ | 39 | u16 last_port; /* host byte order, included in range */ |
| 41 | u32 elements; /* number of max elements in the set */ | 40 | u32 elements; /* number of max elements in the set */ |
| 42 | size_t memsize; /* members size */ | 41 | size_t memsize; /* members size */ |
| 43 | struct timer_list gc; /* garbage collection */ | 42 | struct timer_list gc; /* garbage collection */ |
| 43 | unsigned char extensions[0] /* data extensions */ | ||
| 44 | __aligned(__alignof__(u64)); | ||
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 46 | /* ADT structure for generic function args */ | 47 | /* ADT structure for generic function args */ |
| @@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map, | |||
| 209 | map->members = ip_set_alloc(map->memsize); | 210 | map->members = ip_set_alloc(map->memsize); |
| 210 | if (!map->members) | 211 | if (!map->members) |
| 211 | return false; | 212 | return false; |
| 212 | if (set->dsize) { | ||
| 213 | map->extensions = ip_set_alloc(set->dsize * map->elements); | ||
| 214 | if (!map->extensions) { | ||
| 215 | kfree(map->members); | ||
| 216 | return false; | ||
| 217 | } | ||
| 218 | } | ||
| 219 | map->first_port = first_port; | 213 | map->first_port = first_port; |
| 220 | map->last_port = last_port; | 214 | map->last_port = last_port; |
| 221 | set->timeout = IPSET_NO_TIMEOUT; | 215 | set->timeout = IPSET_NO_TIMEOUT; |
| @@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], | |||
| 232 | { | 226 | { |
| 233 | struct bitmap_port *map; | 227 | struct bitmap_port *map; |
| 234 | u16 first_port, last_port; | 228 | u16 first_port, last_port; |
| 229 | u32 elements; | ||
| 235 | 230 | ||
| 236 | if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || | 231 | if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || |
| 237 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || | 232 | !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || |
| @@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], | |||
| 248 | last_port = tmp; | 243 | last_port = tmp; |
| 249 | } | 244 | } |
| 250 | 245 | ||
| 251 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 246 | elements = last_port - first_port + 1; |
| 247 | set->dsize = ip_set_elem_len(set, tb, 0, 0); | ||
| 248 | map = ip_set_alloc(sizeof(*map) + elements * set->dsize); | ||
| 252 | if (!map) | 249 | if (!map) |
| 253 | return -ENOMEM; | 250 | return -ENOMEM; |
| 254 | 251 | ||
| 255 | map->elements = last_port - first_port + 1; | 252 | map->elements = elements; |
| 256 | map->memsize = bitmap_bytes(0, map->elements); | 253 | map->memsize = bitmap_bytes(0, map->elements); |
| 257 | set->variant = &bitmap_port; | 254 | set->variant = &bitmap_port; |
| 258 | set->dsize = ip_set_elem_len(set, tb, 0); | ||
| 259 | if (!init_map_port(set, map, first_port, last_port)) { | 255 | if (!init_map_port(set, map, first_port, last_port)) { |
| 260 | kfree(map); | 256 | kfree(map); |
| 261 | return -ENOMEM; | 257 | return -ENOMEM; |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 69ab9c2634e1..54f3d7cb23e6 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | size_t | 366 | size_t |
| 367 | ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) | 367 | ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, |
| 368 | size_t align) | ||
| 368 | { | 369 | { |
| 369 | enum ip_set_ext_id id; | 370 | enum ip_set_ext_id id; |
| 370 | size_t offset = len; | ||
| 371 | u32 cadt_flags = 0; | 371 | u32 cadt_flags = 0; |
| 372 | 372 | ||
| 373 | if (tb[IPSET_ATTR_CADT_FLAGS]) | 373 | if (tb[IPSET_ATTR_CADT_FLAGS]) |
| 374 | cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); | 374 | cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); |
| 375 | if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) | 375 | if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) |
| 376 | set->flags |= IPSET_CREATE_FLAG_FORCEADD; | 376 | set->flags |= IPSET_CREATE_FLAG_FORCEADD; |
| 377 | if (!align) | ||
| 378 | align = 1; | ||
| 377 | for (id = 0; id < IPSET_EXT_ID_MAX; id++) { | 379 | for (id = 0; id < IPSET_EXT_ID_MAX; id++) { |
| 378 | if (!add_extension(id, cadt_flags, tb)) | 380 | if (!add_extension(id, cadt_flags, tb)) |
| 379 | continue; | 381 | continue; |
| 380 | offset = ALIGN(offset, ip_set_extensions[id].align); | 382 | len = ALIGN(len, ip_set_extensions[id].align); |
| 381 | set->offset[id] = offset; | 383 | set->offset[id] = len; |
| 382 | set->extensions |= ip_set_extensions[id].type; | 384 | set->extensions |= ip_set_extensions[id].type; |
| 383 | offset += ip_set_extensions[id].len; | 385 | len += ip_set_extensions[id].len; |
| 384 | } | 386 | } |
| 385 | return offset; | 387 | return ALIGN(len, align); |
| 386 | } | 388 | } |
| 387 | EXPORT_SYMBOL_GPL(ip_set_elem_len); | 389 | EXPORT_SYMBOL_GPL(ip_set_elem_len); |
| 388 | 390 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 691b54fcaf2a..e5336ab36d67 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
| @@ -72,8 +72,9 @@ struct hbucket { | |||
| 72 | DECLARE_BITMAP(used, AHASH_MAX_TUNED); | 72 | DECLARE_BITMAP(used, AHASH_MAX_TUNED); |
| 73 | u8 size; /* size of the array */ | 73 | u8 size; /* size of the array */ |
| 74 | u8 pos; /* position of the first free entry */ | 74 | u8 pos; /* position of the first free entry */ |
| 75 | unsigned char value[0]; /* the array of the values */ | 75 | unsigned char value[0] /* the array of the values */ |
| 76 | } __attribute__ ((aligned)); | 76 | __aligned(__alignof__(u64)); |
| 77 | }; | ||
| 77 | 78 | ||
| 78 | /* The hash table: the table size stored here in order to make resizing easy */ | 79 | /* The hash table: the table size stored here in order to make resizing easy */ |
| 79 | struct htable { | 80 | struct htable { |
| @@ -475,7 +476,7 @@ static void | |||
| 475 | mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) | 476 | mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) |
| 476 | { | 477 | { |
| 477 | struct htable *t; | 478 | struct htable *t; |
| 478 | struct hbucket *n; | 479 | struct hbucket *n, *tmp; |
| 479 | struct mtype_elem *data; | 480 | struct mtype_elem *data; |
| 480 | u32 i, j, d; | 481 | u32 i, j, d; |
| 481 | #ifdef IP_SET_HASH_WITH_NETS | 482 | #ifdef IP_SET_HASH_WITH_NETS |
| @@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) | |||
| 510 | } | 511 | } |
| 511 | } | 512 | } |
| 512 | if (d >= AHASH_INIT_SIZE) { | 513 | if (d >= AHASH_INIT_SIZE) { |
| 513 | struct hbucket *tmp = kzalloc(sizeof(*tmp) + | 514 | if (d >= n->size) { |
| 514 | (n->size - AHASH_INIT_SIZE) * dsize, | 515 | rcu_assign_pointer(hbucket(t, i), NULL); |
| 515 | GFP_ATOMIC); | 516 | kfree_rcu(n, rcu); |
| 517 | continue; | ||
| 518 | } | ||
| 519 | tmp = kzalloc(sizeof(*tmp) + | ||
| 520 | (n->size - AHASH_INIT_SIZE) * dsize, | ||
| 521 | GFP_ATOMIC); | ||
| 516 | if (!tmp) | 522 | if (!tmp) |
| 517 | /* Still try to delete expired elements */ | 523 | /* Still try to delete expired elements */ |
| 518 | continue; | 524 | continue; |
| @@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) | |||
| 522 | continue; | 528 | continue; |
| 523 | data = ahash_data(n, j, dsize); | 529 | data = ahash_data(n, j, dsize); |
| 524 | memcpy(tmp->value + d * dsize, data, dsize); | 530 | memcpy(tmp->value + d * dsize, data, dsize); |
| 525 | set_bit(j, tmp->used); | 531 | set_bit(d, tmp->used); |
| 526 | d++; | 532 | d++; |
| 527 | } | 533 | } |
| 528 | tmp->pos = d; | 534 | tmp->pos = d; |
| @@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, | |||
| 1323 | #endif | 1329 | #endif |
| 1324 | set->variant = &IPSET_TOKEN(HTYPE, 4_variant); | 1330 | set->variant = &IPSET_TOKEN(HTYPE, 4_variant); |
| 1325 | set->dsize = ip_set_elem_len(set, tb, | 1331 | set->dsize = ip_set_elem_len(set, tb, |
| 1326 | sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); | 1332 | sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)), |
| 1333 | __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem))); | ||
| 1327 | #ifndef IP_SET_PROTO_UNDEF | 1334 | #ifndef IP_SET_PROTO_UNDEF |
| 1328 | } else { | 1335 | } else { |
| 1329 | set->variant = &IPSET_TOKEN(HTYPE, 6_variant); | 1336 | set->variant = &IPSET_TOKEN(HTYPE, 6_variant); |
| 1330 | set->dsize = ip_set_elem_len(set, tb, | 1337 | set->dsize = ip_set_elem_len(set, tb, |
| 1331 | sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); | 1338 | sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)), |
| 1339 | __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem))); | ||
| 1332 | } | 1340 | } |
| 1333 | #endif | 1341 | #endif |
| 1334 | if (tb[IPSET_ATTR_TIMEOUT]) { | 1342 | if (tb[IPSET_ATTR_TIMEOUT]) { |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 5a30ce6e8c90..bbede95c9f68 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
| @@ -31,7 +31,7 @@ struct set_elem { | |||
| 31 | struct rcu_head rcu; | 31 | struct rcu_head rcu; |
| 32 | struct list_head list; | 32 | struct list_head list; |
| 33 | ip_set_id_t id; | 33 | ip_set_id_t id; |
| 34 | }; | 34 | } __aligned(__alignof__(u64)); |
| 35 | 35 | ||
| 36 | struct set_adt_elem { | 36 | struct set_adt_elem { |
| 37 | ip_set_id_t id; | 37 | ip_set_id_t id; |
| @@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], | |||
| 618 | size = IP_SET_LIST_MIN_SIZE; | 618 | size = IP_SET_LIST_MIN_SIZE; |
| 619 | 619 | ||
| 620 | set->variant = &set_variant; | 620 | set->variant = &set_variant; |
| 621 | set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); | 621 | set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem), |
| 622 | __alignof__(struct set_elem)); | ||
| 622 | if (!init_list_set(net, set, size)) | 623 | if (!init_list_set(net, set, size)) |
| 623 | return -ENOMEM; | 624 | return -ENOMEM; |
| 624 | if (tb[IPSET_ATTR_TIMEOUT]) { | 625 | if (tb[IPSET_ATTR_TIMEOUT]) { |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 1e24fff53e4b..f57b4dcdb233 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in | |||
| 1176 | struct ip_vs_protocol *pp; | 1176 | struct ip_vs_protocol *pp; |
| 1177 | struct ip_vs_proto_data *pd; | 1177 | struct ip_vs_proto_data *pd; |
| 1178 | struct ip_vs_conn *cp; | 1178 | struct ip_vs_conn *cp; |
| 1179 | struct sock *sk; | ||
| 1179 | 1180 | ||
| 1180 | EnterFunction(11); | 1181 | EnterFunction(11); |
| 1181 | 1182 | ||
| @@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in | |||
| 1183 | if (skb->ipvs_property) | 1184 | if (skb->ipvs_property) |
| 1184 | return NF_ACCEPT; | 1185 | return NF_ACCEPT; |
| 1185 | 1186 | ||
| 1187 | sk = skb_to_full_sk(skb); | ||
| 1186 | /* Bad... Do not break raw sockets */ | 1188 | /* Bad... Do not break raw sockets */ |
| 1187 | if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && | 1189 | if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT && |
| 1188 | af == AF_INET)) { | 1190 | af == AF_INET)) { |
| 1189 | struct sock *sk = skb->sk; | ||
| 1190 | struct inet_sock *inet = inet_sk(skb->sk); | ||
| 1191 | 1191 | ||
| 1192 | if (inet && sk->sk_family == PF_INET && inet->nodefrag) | 1192 | if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag) |
| 1193 | return NF_ACCEPT; | 1193 | return NF_ACCEPT; |
| 1194 | } | 1194 | } |
| 1195 | 1195 | ||
| @@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int | |||
| 1681 | struct ip_vs_conn *cp; | 1681 | struct ip_vs_conn *cp; |
| 1682 | int ret, pkts; | 1682 | int ret, pkts; |
| 1683 | int conn_reuse_mode; | 1683 | int conn_reuse_mode; |
| 1684 | struct sock *sk; | ||
| 1684 | 1685 | ||
| 1685 | /* Already marked as IPVS request or reply? */ | 1686 | /* Already marked as IPVS request or reply? */ |
| 1686 | if (skb->ipvs_property) | 1687 | if (skb->ipvs_property) |
| @@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int | |||
| 1708 | ip_vs_fill_iph_skb(af, skb, false, &iph); | 1709 | ip_vs_fill_iph_skb(af, skb, false, &iph); |
| 1709 | 1710 | ||
| 1710 | /* Bad... Do not break raw sockets */ | 1711 | /* Bad... Do not break raw sockets */ |
| 1711 | if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && | 1712 | sk = skb_to_full_sk(skb); |
| 1713 | if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT && | ||
| 1712 | af == AF_INET)) { | 1714 | af == AF_INET)) { |
| 1713 | struct sock *sk = skb->sk; | ||
| 1714 | struct inet_sock *inet = inet_sk(skb->sk); | ||
| 1715 | 1715 | ||
| 1716 | if (inet && sk->sk_family == PF_INET && inet->nodefrag) | 1716 | if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag) |
| 1717 | return NF_ACCEPT; | 1717 | return NF_ACCEPT; |
| 1718 | } | 1718 | } |
| 1719 | 1719 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 06eb48fceb42..740cce4685ac 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
| 825 | struct net *net = sock_net(ctnl); | 825 | struct net *net = sock_net(ctnl); |
| 826 | struct nfnl_log_net *log = nfnl_log_pernet(net); | 826 | struct nfnl_log_net *log = nfnl_log_pernet(net); |
| 827 | int ret = 0; | 827 | int ret = 0; |
| 828 | u16 flags; | 828 | u16 flags = 0; |
| 829 | 829 | ||
| 830 | if (nfula[NFULA_CFG_CMD]) { | 830 | if (nfula[NFULA_CFG_CMD]) { |
| 831 | u_int8_t pf = nfmsg->nfgen_family; | 831 | u_int8_t pf = nfmsg->nfgen_family; |
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index 1067fb4c1ffa..c7808fc19719 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c | |||
| @@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr, | |||
| 47 | local_bh_enable(); | 47 | local_bh_enable(); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) | 50 | static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter, |
| 51 | struct nft_counter *total) | ||
| 51 | { | 52 | { |
| 52 | struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); | 53 | const struct nft_counter_percpu *cpu_stats; |
| 53 | struct nft_counter_percpu *cpu_stats; | ||
| 54 | struct nft_counter total; | ||
| 55 | u64 bytes, packets; | 54 | u64 bytes, packets; |
| 56 | unsigned int seq; | 55 | unsigned int seq; |
| 57 | int cpu; | 56 | int cpu; |
| 58 | 57 | ||
| 59 | memset(&total, 0, sizeof(total)); | 58 | memset(total, 0, sizeof(*total)); |
| 60 | for_each_possible_cpu(cpu) { | 59 | for_each_possible_cpu(cpu) { |
| 61 | cpu_stats = per_cpu_ptr(priv->counter, cpu); | 60 | cpu_stats = per_cpu_ptr(counter, cpu); |
| 62 | do { | 61 | do { |
| 63 | seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | 62 | seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); |
| 64 | bytes = cpu_stats->counter.bytes; | 63 | bytes = cpu_stats->counter.bytes; |
| 65 | packets = cpu_stats->counter.packets; | 64 | packets = cpu_stats->counter.packets; |
| 66 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); | 65 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); |
| 67 | 66 | ||
| 68 | total.packets += packets; | 67 | total->packets += packets; |
| 69 | total.bytes += bytes; | 68 | total->bytes += bytes; |
| 70 | } | 69 | } |
| 70 | } | ||
| 71 | |||
| 72 | static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) | ||
| 73 | { | ||
| 74 | struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); | ||
| 75 | struct nft_counter total; | ||
| 76 | |||
| 77 | nft_counter_fetch(priv->counter, &total); | ||
| 71 | 78 | ||
| 72 | if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || | 79 | if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || |
| 73 | nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) | 80 | nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) |
| @@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx, | |||
| 118 | free_percpu(priv->counter); | 125 | free_percpu(priv->counter); |
| 119 | } | 126 | } |
| 120 | 127 | ||
| 128 | static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src) | ||
| 129 | { | ||
| 130 | struct nft_counter_percpu_priv *priv = nft_expr_priv(src); | ||
| 131 | struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst); | ||
| 132 | struct nft_counter_percpu __percpu *cpu_stats; | ||
| 133 | struct nft_counter_percpu *this_cpu; | ||
| 134 | struct nft_counter total; | ||
| 135 | |||
| 136 | nft_counter_fetch(priv->counter, &total); | ||
| 137 | |||
| 138 | cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu, | ||
| 139 | GFP_ATOMIC); | ||
| 140 | if (cpu_stats == NULL) | ||
| 141 | return ENOMEM; | ||
| 142 | |||
| 143 | preempt_disable(); | ||
| 144 | this_cpu = this_cpu_ptr(cpu_stats); | ||
| 145 | this_cpu->counter.packets = total.packets; | ||
| 146 | this_cpu->counter.bytes = total.bytes; | ||
| 147 | preempt_enable(); | ||
| 148 | |||
| 149 | priv_clone->counter = cpu_stats; | ||
| 150 | return 0; | ||
| 151 | } | ||
| 152 | |||
| 121 | static struct nft_expr_type nft_counter_type; | 153 | static struct nft_expr_type nft_counter_type; |
| 122 | static const struct nft_expr_ops nft_counter_ops = { | 154 | static const struct nft_expr_ops nft_counter_ops = { |
| 123 | .type = &nft_counter_type, | 155 | .type = &nft_counter_type, |
| @@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = { | |||
| 126 | .init = nft_counter_init, | 158 | .init = nft_counter_init, |
| 127 | .destroy = nft_counter_destroy, | 159 | .destroy = nft_counter_destroy, |
| 128 | .dump = nft_counter_dump, | 160 | .dump = nft_counter_dump, |
| 161 | .clone = nft_counter_clone, | ||
| 129 | }; | 162 | }; |
| 130 | 163 | ||
| 131 | static struct nft_expr_type nft_counter_type __read_mostly = { | 164 | static struct nft_expr_type nft_counter_type __read_mostly = { |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 513a8ef60a59..9dec3bd1b63c 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
| @@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, | |||
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | ext = nft_set_elem_ext(set, elem); | 52 | ext = nft_set_elem_ext(set, elem); |
| 53 | if (priv->expr != NULL) | 53 | if (priv->expr != NULL && |
| 54 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr); | 54 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) |
| 55 | return NULL; | ||
| 55 | 56 | ||
| 56 | return elem; | 57 | return elem; |
| 57 | } | 58 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index af399cac5205..1cf928fb573e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk) | |||
| 1741 | kfree_rcu(po->rollover, rcu); | 1741 | kfree_rcu(po->rollover, rcu); |
| 1742 | } | 1742 | } |
| 1743 | 1743 | ||
| 1744 | static bool packet_extra_vlan_len_allowed(const struct net_device *dev, | ||
| 1745 | struct sk_buff *skb) | ||
| 1746 | { | ||
| 1747 | /* Earlier code assumed this would be a VLAN pkt, double-check | ||
| 1748 | * this now that we have the actual packet in hand. We can only | ||
| 1749 | * do this check on Ethernet devices. | ||
| 1750 | */ | ||
| 1751 | if (unlikely(dev->type != ARPHRD_ETHER)) | ||
| 1752 | return false; | ||
| 1753 | |||
| 1754 | skb_reset_mac_header(skb); | ||
| 1755 | return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); | ||
| 1756 | } | ||
| 1757 | |||
| 1744 | static const struct proto_ops packet_ops; | 1758 | static const struct proto_ops packet_ops; |
| 1745 | 1759 | ||
| 1746 | static const struct proto_ops packet_ops_spkt; | 1760 | static const struct proto_ops packet_ops_spkt; |
| @@ -1902,18 +1916,10 @@ retry: | |||
| 1902 | goto retry; | 1916 | goto retry; |
| 1903 | } | 1917 | } |
| 1904 | 1918 | ||
| 1905 | if (len > (dev->mtu + dev->hard_header_len + extra_len)) { | 1919 | if (len > (dev->mtu + dev->hard_header_len + extra_len) && |
| 1906 | /* Earlier code assumed this would be a VLAN pkt, | 1920 | !packet_extra_vlan_len_allowed(dev, skb)) { |
| 1907 | * double-check this now that we have the actual | 1921 | err = -EMSGSIZE; |
| 1908 | * packet in hand. | 1922 | goto out_unlock; |
| 1909 | */ | ||
| 1910 | struct ethhdr *ehdr; | ||
| 1911 | skb_reset_mac_header(skb); | ||
| 1912 | ehdr = eth_hdr(skb); | ||
| 1913 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | ||
| 1914 | err = -EMSGSIZE; | ||
| 1915 | goto out_unlock; | ||
| 1916 | } | ||
| 1917 | } | 1923 | } |
| 1918 | 1924 | ||
| 1919 | skb->protocol = proto; | 1925 | skb->protocol = proto; |
| @@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len) | |||
| 2332 | return false; | 2338 | return false; |
| 2333 | } | 2339 | } |
| 2334 | 2340 | ||
| 2341 | static void tpacket_set_protocol(const struct net_device *dev, | ||
| 2342 | struct sk_buff *skb) | ||
| 2343 | { | ||
| 2344 | if (dev->type == ARPHRD_ETHER) { | ||
| 2345 | skb_reset_mac_header(skb); | ||
| 2346 | skb->protocol = eth_hdr(skb)->h_proto; | ||
| 2347 | } | ||
| 2348 | } | ||
| 2349 | |||
| 2335 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | 2350 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
| 2336 | void *frame, struct net_device *dev, int size_max, | 2351 | void *frame, struct net_device *dev, int size_max, |
| 2337 | __be16 proto, unsigned char *addr, int hlen) | 2352 | __be16 proto, unsigned char *addr, int hlen) |
| @@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 2368 | skb_reserve(skb, hlen); | 2383 | skb_reserve(skb, hlen); |
| 2369 | skb_reset_network_header(skb); | 2384 | skb_reset_network_header(skb); |
| 2370 | 2385 | ||
| 2371 | if (!packet_use_direct_xmit(po)) | ||
| 2372 | skb_probe_transport_header(skb, 0); | ||
| 2373 | if (unlikely(po->tp_tx_has_off)) { | 2386 | if (unlikely(po->tp_tx_has_off)) { |
| 2374 | int off_min, off_max, off; | 2387 | int off_min, off_max, off; |
| 2375 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | 2388 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); |
| @@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 2415 | dev->hard_header_len); | 2428 | dev->hard_header_len); |
| 2416 | if (unlikely(err)) | 2429 | if (unlikely(err)) |
| 2417 | return err; | 2430 | return err; |
| 2431 | if (!skb->protocol) | ||
| 2432 | tpacket_set_protocol(dev, skb); | ||
| 2418 | 2433 | ||
| 2419 | data += dev->hard_header_len; | 2434 | data += dev->hard_header_len; |
| 2420 | to_write -= dev->hard_header_len; | 2435 | to_write -= dev->hard_header_len; |
| @@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 2449 | len = ((to_write > len_max) ? len_max : to_write); | 2464 | len = ((to_write > len_max) ? len_max : to_write); |
| 2450 | } | 2465 | } |
| 2451 | 2466 | ||
| 2467 | skb_probe_transport_header(skb, 0); | ||
| 2468 | |||
| 2452 | return tp_len; | 2469 | return tp_len; |
| 2453 | } | 2470 | } |
| 2454 | 2471 | ||
| @@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
| 2493 | if (unlikely(!(dev->flags & IFF_UP))) | 2510 | if (unlikely(!(dev->flags & IFF_UP))) |
| 2494 | goto out_put; | 2511 | goto out_put; |
| 2495 | 2512 | ||
| 2496 | reserve = dev->hard_header_len + VLAN_HLEN; | 2513 | if (po->sk.sk_socket->type == SOCK_RAW) |
| 2514 | reserve = dev->hard_header_len; | ||
| 2497 | size_max = po->tx_ring.frame_size | 2515 | size_max = po->tx_ring.frame_size |
| 2498 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); | 2516 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
| 2499 | 2517 | ||
| 2500 | if (size_max > dev->mtu + reserve) | 2518 | if (size_max > dev->mtu + reserve + VLAN_HLEN) |
| 2501 | size_max = dev->mtu + reserve; | 2519 | size_max = dev->mtu + reserve + VLAN_HLEN; |
| 2502 | 2520 | ||
| 2503 | do { | 2521 | do { |
| 2504 | ph = packet_current_frame(po, &po->tx_ring, | 2522 | ph = packet_current_frame(po, &po->tx_ring, |
| @@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
| 2525 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, | 2543 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, |
| 2526 | addr, hlen); | 2544 | addr, hlen); |
| 2527 | if (likely(tp_len >= 0) && | 2545 | if (likely(tp_len >= 0) && |
| 2528 | tp_len > dev->mtu + dev->hard_header_len) { | 2546 | tp_len > dev->mtu + reserve && |
| 2529 | struct ethhdr *ehdr; | 2547 | !packet_extra_vlan_len_allowed(dev, skb)) |
| 2530 | /* Earlier code assumed this would be a VLAN pkt, | 2548 | tp_len = -EMSGSIZE; |
| 2531 | * double-check this now that we have the actual | ||
| 2532 | * packet in hand. | ||
| 2533 | */ | ||
| 2534 | 2549 | ||
| 2535 | skb_reset_mac_header(skb); | ||
| 2536 | ehdr = eth_hdr(skb); | ||
| 2537 | if (ehdr->h_proto != htons(ETH_P_8021Q)) | ||
| 2538 | tp_len = -EMSGSIZE; | ||
| 2539 | } | ||
| 2540 | if (unlikely(tp_len < 0)) { | 2550 | if (unlikely(tp_len < 0)) { |
| 2541 | if (po->tp_loss) { | 2551 | if (po->tp_loss) { |
| 2542 | __packet_set_status(po, ph, | 2552 | __packet_set_status(po, ph, |
| @@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2765 | 2775 | ||
| 2766 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); | 2776 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); |
| 2767 | 2777 | ||
| 2768 | if (!gso_type && (len > dev->mtu + reserve + extra_len)) { | 2778 | if (!gso_type && (len > dev->mtu + reserve + extra_len) && |
| 2769 | /* Earlier code assumed this would be a VLAN pkt, | 2779 | !packet_extra_vlan_len_allowed(dev, skb)) { |
| 2770 | * double-check this now that we have the actual | 2780 | err = -EMSGSIZE; |
| 2771 | * packet in hand. | 2781 | goto out_free; |
| 2772 | */ | ||
| 2773 | struct ethhdr *ehdr; | ||
| 2774 | skb_reset_mac_header(skb); | ||
| 2775 | ehdr = eth_hdr(skb); | ||
| 2776 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | ||
| 2777 | err = -EMSGSIZE; | ||
| 2778 | goto out_free; | ||
| 2779 | } | ||
| 2780 | } | 2782 | } |
| 2781 | 2783 | ||
| 2782 | skb->protocol = proto; | 2784 | skb->protocol = proto; |
| @@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2807 | len += vnet_hdr_len; | 2809 | len += vnet_hdr_len; |
| 2808 | } | 2810 | } |
| 2809 | 2811 | ||
| 2810 | if (!packet_use_direct_xmit(po)) | 2812 | skb_probe_transport_header(skb, reserve); |
| 2811 | skb_probe_transport_header(skb, reserve); | 2813 | |
| 2812 | if (unlikely(extra_len == 4)) | 2814 | if (unlikely(extra_len == 4)) |
| 2813 | skb->no_fcs = 1; | 2815 | skb->no_fcs = 1; |
| 2814 | 2816 | ||
| @@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4107 | err = -EINVAL; | 4109 | err = -EINVAL; |
| 4108 | if (unlikely((int)req->tp_block_size <= 0)) | 4110 | if (unlikely((int)req->tp_block_size <= 0)) |
| 4109 | goto out; | 4111 | goto out; |
| 4110 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) | 4112 | if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) |
| 4111 | goto out; | 4113 | goto out; |
| 4112 | if (po->tp_version >= TPACKET_V3 && | 4114 | if (po->tp_version >= TPACKET_V3 && |
| 4113 | (int)(req->tp_block_size - | 4115 | (int)(req->tp_block_size - |
| @@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4119 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) | 4121 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
| 4120 | goto out; | 4122 | goto out; |
| 4121 | 4123 | ||
| 4122 | rb->frames_per_block = req->tp_block_size/req->tp_frame_size; | 4124 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; |
| 4123 | if (unlikely(rb->frames_per_block <= 0)) | 4125 | if (unlikely(rb->frames_per_block == 0)) |
| 4124 | goto out; | 4126 | goto out; |
| 4125 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | 4127 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != |
| 4126 | req->tp_frame_nr)) | 4128 | req->tp_frame_nr)) |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 4f15b7d730e1..1543e39f47c3 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, | |||
| 809 | if (!has_sha1) | 809 | if (!has_sha1) |
| 810 | return -EINVAL; | 810 | return -EINVAL; |
| 811 | 811 | ||
| 812 | memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], | 812 | for (i = 0; i < hmacs->shmac_num_idents; i++) |
| 813 | hmacs->shmac_num_idents * sizeof(__u16)); | 813 | ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]); |
| 814 | ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + | 814 | ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + |
| 815 | hmacs->shmac_num_idents * sizeof(__u16)); | 815 | hmacs->shmac_num_idents * sizeof(__u16)); |
| 816 | return 0; | 816 | return 0; |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 229956bf8457..95f82d8d4888 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
| @@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) | |||
| 353 | { | 353 | { |
| 354 | struct rpc_xprt *xprt = req->rq_xprt; | 354 | struct rpc_xprt *xprt = req->rq_xprt; |
| 355 | struct svc_serv *bc_serv = xprt->bc_serv; | 355 | struct svc_serv *bc_serv = xprt->bc_serv; |
| 356 | struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf; | ||
| 356 | 357 | ||
| 357 | spin_lock(&xprt->bc_pa_lock); | 358 | spin_lock(&xprt->bc_pa_lock); |
| 358 | list_del(&req->rq_bc_pa_list); | 359 | list_del(&req->rq_bc_pa_list); |
| 359 | xprt_dec_alloc_count(xprt, 1); | 360 | xprt_dec_alloc_count(xprt, 1); |
| 360 | spin_unlock(&xprt->bc_pa_lock); | 361 | spin_unlock(&xprt->bc_pa_lock); |
| 361 | 362 | ||
| 363 | if (copied <= rq_rcv_buf->head[0].iov_len) { | ||
| 364 | rq_rcv_buf->head[0].iov_len = copied; | ||
| 365 | rq_rcv_buf->page_len = 0; | ||
| 366 | } else { | ||
| 367 | rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len; | ||
| 368 | } | ||
| 369 | |||
| 362 | req->rq_private_buf.len = copied; | 370 | req->rq_private_buf.len = copied; |
| 363 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | 371 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| 364 | 372 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index bc5b7b5032ca..7fccf9675df8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
| 1363 | memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); | 1363 | memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); |
| 1364 | memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); | 1364 | memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); |
| 1365 | memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); | 1365 | memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); |
| 1366 | rqstp->rq_arg.len = req->rq_private_buf.len; | ||
| 1366 | 1367 | ||
| 1367 | /* reset result send buffer "put" position */ | 1368 | /* reset result send buffer "put" position */ |
| 1368 | resv->iov_len = 0; | 1369 | resv->iov_len = 0; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index aaa0b58d6aba..955ec152cb71 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -441,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion) | |||
| 441 | if (state == TCP_LISTEN) | 441 | if (state == TCP_LISTEN) |
| 442 | unix_release_sock(skb->sk, 1); | 442 | unix_release_sock(skb->sk, 1); |
| 443 | /* passed fds are erased in the kfree_skb hook */ | 443 | /* passed fds are erased in the kfree_skb hook */ |
| 444 | UNIXCB(skb).consumed = skb->len; | ||
| 444 | kfree_skb(skb); | 445 | kfree_skb(skb); |
| 445 | } | 446 | } |
| 446 | 447 | ||
| @@ -1799,6 +1800,7 @@ alloc_skb: | |||
| 1799 | * this - does no harm | 1800 | * this - does no harm |
| 1800 | */ | 1801 | */ |
| 1801 | consume_skb(newskb); | 1802 | consume_skb(newskb); |
| 1803 | newskb = NULL; | ||
| 1802 | } | 1804 | } |
| 1803 | 1805 | ||
| 1804 | if (skb_append_pagefrags(skb, page, offset, size)) { | 1806 | if (skb_append_pagefrags(skb, page, offset, size)) { |
| @@ -1811,8 +1813,11 @@ alloc_skb: | |||
| 1811 | skb->truesize += size; | 1813 | skb->truesize += size; |
| 1812 | atomic_add(size, &sk->sk_wmem_alloc); | 1814 | atomic_add(size, &sk->sk_wmem_alloc); |
| 1813 | 1815 | ||
| 1814 | if (newskb) | 1816 | if (newskb) { |
| 1817 | spin_lock(&other->sk_receive_queue.lock); | ||
| 1815 | __skb_queue_tail(&other->sk_receive_queue, newskb); | 1818 | __skb_queue_tail(&other->sk_receive_queue, newskb); |
| 1819 | spin_unlock(&other->sk_receive_queue.lock); | ||
| 1820 | } | ||
| 1816 | 1821 | ||
| 1817 | unix_state_unlock(other); | 1822 | unix_state_unlock(other); |
| 1818 | mutex_unlock(&unix_sk(other)->readlock); | 1823 | mutex_unlock(&unix_sk(other)->readlock); |
| @@ -2072,6 +2077,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) | |||
| 2072 | 2077 | ||
| 2073 | do { | 2078 | do { |
| 2074 | int chunk; | 2079 | int chunk; |
| 2080 | bool drop_skb; | ||
| 2075 | struct sk_buff *skb, *last; | 2081 | struct sk_buff *skb, *last; |
| 2076 | 2082 | ||
| 2077 | unix_state_lock(sk); | 2083 | unix_state_lock(sk); |
| @@ -2152,7 +2158,11 @@ unlock: | |||
| 2152 | } | 2158 | } |
| 2153 | 2159 | ||
| 2154 | chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); | 2160 | chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); |
| 2161 | skb_get(skb); | ||
| 2155 | chunk = state->recv_actor(skb, skip, chunk, state); | 2162 | chunk = state->recv_actor(skb, skip, chunk, state); |
| 2163 | drop_skb = !unix_skb_len(skb); | ||
| 2164 | /* skb is only safe to use if !drop_skb */ | ||
| 2165 | consume_skb(skb); | ||
| 2156 | if (chunk < 0) { | 2166 | if (chunk < 0) { |
| 2157 | if (copied == 0) | 2167 | if (copied == 0) |
| 2158 | copied = -EFAULT; | 2168 | copied = -EFAULT; |
| @@ -2161,6 +2171,18 @@ unlock: | |||
| 2161 | copied += chunk; | 2171 | copied += chunk; |
| 2162 | size -= chunk; | 2172 | size -= chunk; |
| 2163 | 2173 | ||
| 2174 | if (drop_skb) { | ||
| 2175 | /* the skb was touched by a concurrent reader; | ||
| 2176 | * we should not expect anything from this skb | ||
| 2177 | * anymore and assume it invalid - we can be | ||
| 2178 | * sure it was dropped from the socket queue | ||
| 2179 | * | ||
| 2180 | * let's report a short read | ||
| 2181 | */ | ||
| 2182 | err = 0; | ||
| 2183 | break; | ||
| 2184 | } | ||
| 2185 | |||
| 2164 | /* Mark read part of skb as used */ | 2186 | /* Mark read part of skb as used */ |
| 2165 | if (!(flags & MSG_PEEK)) { | 2187 | if (!(flags & MSG_PEEK)) { |
| 2166 | UNIXCB(skb).consumed += chunk; | 2188 | UNIXCB(skb).consumed += chunk; |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 79b4596b5f9a..edd638b5825f 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
| @@ -67,10 +67,13 @@ HOSTLOADLIBES_lathist += -lelf | |||
| 67 | # point this to your LLVM backend with bpf support | 67 | # point this to your LLVM backend with bpf support |
| 68 | LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc | 68 | LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc |
| 69 | 69 | ||
| 70 | # asm/sysreg.h inline assmbly used by it is incompatible with llvm. | ||
| 71 | # But, ehere is not easy way to fix it, so just exclude it since it is | ||
| 72 | # useless for BPF samples. | ||
| 70 | $(obj)/%.o: $(src)/%.c | 73 | $(obj)/%.o: $(src)/%.c |
| 71 | clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ | 74 | clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ |
| 72 | -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ | 75 | -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ |
| 73 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ | 76 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ |
| 74 | clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ | 77 | clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ |
| 75 | -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ | 78 | -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ |
| 76 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s | 79 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 125b906cd1d4..638a38e1b419 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
| @@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version(); | |||
| 2711 | 2711 | ||
| 2712 | # generate a sequence of code that will splice in highlighting information | 2712 | # generate a sequence of code that will splice in highlighting information |
| 2713 | # using the s// operator. | 2713 | # using the s// operator. |
| 2714 | foreach my $k (keys @highlights) { | 2714 | for (my $k = 0; $k < @highlights; $k++) { |
| 2715 | my $pattern = $highlights[$k][0]; | 2715 | my $pattern = $highlights[$k][0]; |
| 2716 | my $result = $highlights[$k][1]; | 2716 | my $result = $highlights[$k][1]; |
| 2717 | # print STDERR "scanning pattern:$pattern, highlight:($result)\n"; | 2717 | # print STDERR "scanning pattern:$pattern, highlight:($result)\n"; |
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 927db9f35ad6..696ccfa08d10 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c | |||
| @@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) | |||
| 845 | size_t datalen = prep->datalen; | 845 | size_t datalen = prep->datalen; |
| 846 | int ret = 0; | 846 | int ret = 0; |
| 847 | 847 | ||
| 848 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | ||
| 849 | return -ENOKEY; | ||
| 848 | if (datalen <= 0 || datalen > 32767 || !prep->data) | 850 | if (datalen <= 0 || datalen > 32767 || !prep->data) |
| 849 | return -EINVAL; | 851 | return -EINVAL; |
| 850 | 852 | ||
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 903dace648a1..16dec53184b6 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
| @@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu) | |||
| 1007 | */ | 1007 | */ |
| 1008 | static int trusted_update(struct key *key, struct key_preparsed_payload *prep) | 1008 | static int trusted_update(struct key *key, struct key_preparsed_payload *prep) |
| 1009 | { | 1009 | { |
| 1010 | struct trusted_key_payload *p = key->payload.data[0]; | 1010 | struct trusted_key_payload *p; |
| 1011 | struct trusted_key_payload *new_p; | 1011 | struct trusted_key_payload *new_p; |
| 1012 | struct trusted_key_options *new_o; | 1012 | struct trusted_key_options *new_o; |
| 1013 | size_t datalen = prep->datalen; | 1013 | size_t datalen = prep->datalen; |
| 1014 | char *datablob; | 1014 | char *datablob; |
| 1015 | int ret = 0; | 1015 | int ret = 0; |
| 1016 | 1016 | ||
| 1017 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | ||
| 1018 | return -ENOKEY; | ||
| 1019 | p = key->payload.data[0]; | ||
| 1017 | if (!p->migratable) | 1020 | if (!p->migratable) |
| 1018 | return -EPERM; | 1021 | return -EPERM; |
| 1019 | if (datalen <= 0 || datalen > 32767 || !prep->data) | 1022 | if (datalen <= 0 || datalen > 32767 || !prep->data) |
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 28cb30f80256..8705d79b2c6f 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c | |||
| @@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) | |||
| 120 | 120 | ||
| 121 | if (ret == 0) { | 121 | if (ret == 0) { |
| 122 | /* attach the new data, displacing the old */ | 122 | /* attach the new data, displacing the old */ |
| 123 | zap = key->payload.data[0]; | 123 | if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) |
| 124 | zap = key->payload.data[0]; | ||
| 125 | else | ||
| 126 | zap = NULL; | ||
| 124 | rcu_assign_keypointer(key, upayload); | 127 | rcu_assign_keypointer(key, upayload); |
| 125 | key->expiry = 0; | 128 | key->expiry = 0; |
| 126 | } | 129 | } |
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 18643bf9894d..456e1a9bcfde 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c | |||
| @@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, | |||
| 638 | { | 638 | { |
| 639 | struct avtab_node *node; | 639 | struct avtab_node *node; |
| 640 | 640 | ||
| 641 | if (!ctab || !key || !avd || !xperms) | 641 | if (!ctab || !key || !avd) |
| 642 | return; | 642 | return; |
| 643 | 643 | ||
| 644 | for (node = avtab_search_node(ctab, key); node; | 644 | for (node = avtab_search_node(ctab, key); node; |
| @@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, | |||
| 657 | if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == | 657 | if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == |
| 658 | (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) | 658 | (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) |
| 659 | avd->auditallow |= node->datum.u.data; | 659 | avd->auditallow |= node->datum.u.data; |
| 660 | if ((node->key.specified & AVTAB_ENABLED) && | 660 | if (xperms && (node->key.specified & AVTAB_ENABLED) && |
| 661 | (node->key.specified & AVTAB_XPERMS)) | 661 | (node->key.specified & AVTAB_XPERMS)) |
| 662 | services_compute_xperms_drivers(xperms, node); | 662 | services_compute_xperms_drivers(xperms, node); |
| 663 | } | 663 | } |
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 5d99436dfcae..0cda05c72f50 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c | |||
| @@ -12,9 +12,11 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); | |||
| 12 | MODULE_LICENSE("GPL v2"); | 12 | MODULE_LICENSE("GPL v2"); |
| 13 | 13 | ||
| 14 | #define OUI_WEISS 0x001c6a | 14 | #define OUI_WEISS 0x001c6a |
| 15 | #define OUI_LOUD 0x000ff2 | ||
| 15 | 16 | ||
| 16 | #define DICE_CATEGORY_ID 0x04 | 17 | #define DICE_CATEGORY_ID 0x04 |
| 17 | #define WEISS_CATEGORY_ID 0x00 | 18 | #define WEISS_CATEGORY_ID 0x00 |
| 19 | #define LOUD_CATEGORY_ID 0x10 | ||
| 18 | 20 | ||
| 19 | static int dice_interface_check(struct fw_unit *unit) | 21 | static int dice_interface_check(struct fw_unit *unit) |
| 20 | { | 22 | { |
| @@ -57,6 +59,8 @@ static int dice_interface_check(struct fw_unit *unit) | |||
| 57 | } | 59 | } |
| 58 | if (vendor == OUI_WEISS) | 60 | if (vendor == OUI_WEISS) |
| 59 | category = WEISS_CATEGORY_ID; | 61 | category = WEISS_CATEGORY_ID; |
| 62 | else if (vendor == OUI_LOUD) | ||
| 63 | category = LOUD_CATEGORY_ID; | ||
| 60 | else | 64 | else |
| 61 | category = DICE_CATEGORY_ID; | 65 | category = DICE_CATEGORY_ID; |
| 62 | if (device->config_rom[3] != ((vendor << 8) | category) || | 66 | if (device->config_rom[3] != ((vendor << 8) | category) || |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8a7fbdcb4072..963f82430938 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -312,6 +312,10 @@ enum { | |||
| 312 | (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ | 312 | (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ |
| 313 | AZX_DCAPS_I915_POWERWELL) | 313 | AZX_DCAPS_I915_POWERWELL) |
| 314 | 314 | ||
| 315 | #define AZX_DCAPS_INTEL_BROXTON \ | ||
| 316 | (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ | ||
| 317 | AZX_DCAPS_I915_POWERWELL) | ||
| 318 | |||
| 315 | /* quirks for ATI SB / AMD Hudson */ | 319 | /* quirks for ATI SB / AMD Hudson */ |
| 316 | #define AZX_DCAPS_PRESET_ATI_SB \ | 320 | #define AZX_DCAPS_PRESET_ATI_SB \ |
| 317 | (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ | 321 | (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ |
| @@ -2124,6 +2128,9 @@ static const struct pci_device_id azx_ids[] = { | |||
| 2124 | /* Sunrise Point-LP */ | 2128 | /* Sunrise Point-LP */ |
| 2125 | { PCI_DEVICE(0x8086, 0x9d70), | 2129 | { PCI_DEVICE(0x8086, 0x9d70), |
| 2126 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 2130 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
| 2131 | /* Broxton-P(Apollolake) */ | ||
| 2132 | { PCI_DEVICE(0x8086, 0x5a98), | ||
| 2133 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, | ||
| 2127 | /* Haswell */ | 2134 | /* Haswell */ |
| 2128 | { PCI_DEVICE(0x8086, 0x0a0c), | 2135 | { PCI_DEVICE(0x8086, 0x0a0c), |
| 2129 | .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, | 2136 | .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 60cd9e700909..bdb6f226d006 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -2378,7 +2378,8 @@ static int patch_generic_hdmi(struct hda_codec *codec) | |||
| 2378 | * can cover the codec power request, and so need not set this flag. | 2378 | * can cover the codec power request, and so need not set this flag. |
| 2379 | * For previous platforms, there is no such power well feature. | 2379 | * For previous platforms, there is no such power well feature. |
| 2380 | */ | 2380 | */ |
| 2381 | if (is_valleyview_plus(codec) || is_skylake(codec)) | 2381 | if (is_valleyview_plus(codec) || is_skylake(codec) || |
| 2382 | is_broxton(codec)) | ||
| 2382 | codec->core.link_power_control = 1; | 2383 | codec->core.link_power_control = 1; |
| 2383 | 2384 | ||
| 2384 | if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { | 2385 | if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f7b065f9ac4..9bedf7c85e29 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -1759,6 +1759,7 @@ enum { | |||
| 1759 | ALC882_FIXUP_NO_PRIMARY_HP, | 1759 | ALC882_FIXUP_NO_PRIMARY_HP, |
| 1760 | ALC887_FIXUP_ASUS_BASS, | 1760 | ALC887_FIXUP_ASUS_BASS, |
| 1761 | ALC887_FIXUP_BASS_CHMAP, | 1761 | ALC887_FIXUP_BASS_CHMAP, |
| 1762 | ALC882_FIXUP_DISABLE_AAMIX, | ||
| 1762 | }; | 1763 | }; |
| 1763 | 1764 | ||
| 1764 | static void alc889_fixup_coef(struct hda_codec *codec, | 1765 | static void alc889_fixup_coef(struct hda_codec *codec, |
| @@ -1920,6 +1921,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec, | |||
| 1920 | 1921 | ||
| 1921 | static void alc_fixup_bass_chmap(struct hda_codec *codec, | 1922 | static void alc_fixup_bass_chmap(struct hda_codec *codec, |
| 1922 | const struct hda_fixup *fix, int action); | 1923 | const struct hda_fixup *fix, int action); |
| 1924 | static void alc_fixup_disable_aamix(struct hda_codec *codec, | ||
| 1925 | const struct hda_fixup *fix, int action); | ||
| 1923 | 1926 | ||
| 1924 | static const struct hda_fixup alc882_fixups[] = { | 1927 | static const struct hda_fixup alc882_fixups[] = { |
| 1925 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { | 1928 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { |
| @@ -2151,6 +2154,10 @@ static const struct hda_fixup alc882_fixups[] = { | |||
| 2151 | .type = HDA_FIXUP_FUNC, | 2154 | .type = HDA_FIXUP_FUNC, |
| 2152 | .v.func = alc_fixup_bass_chmap, | 2155 | .v.func = alc_fixup_bass_chmap, |
| 2153 | }, | 2156 | }, |
| 2157 | [ALC882_FIXUP_DISABLE_AAMIX] = { | ||
| 2158 | .type = HDA_FIXUP_FUNC, | ||
| 2159 | .v.func = alc_fixup_disable_aamix, | ||
| 2160 | }, | ||
| 2154 | }; | 2161 | }; |
| 2155 | 2162 | ||
| 2156 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { | 2163 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
| @@ -2218,6 +2225,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
| 2218 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), | 2225 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), |
| 2219 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | 2226 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
| 2220 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), | 2227 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), |
| 2228 | SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX), | ||
| 2221 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2229 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
| 2222 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2230 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
| 2223 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2231 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
| @@ -4587,6 +4595,7 @@ enum { | |||
| 4587 | ALC292_FIXUP_DISABLE_AAMIX, | 4595 | ALC292_FIXUP_DISABLE_AAMIX, |
| 4588 | ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, | 4596 | ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 4589 | ALC275_FIXUP_DELL_XPS, | 4597 | ALC275_FIXUP_DELL_XPS, |
| 4598 | ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, | ||
| 4590 | }; | 4599 | }; |
| 4591 | 4600 | ||
| 4592 | static const struct hda_fixup alc269_fixups[] = { | 4601 | static const struct hda_fixup alc269_fixups[] = { |
| @@ -5167,6 +5176,17 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 5167 | {} | 5176 | {} |
| 5168 | } | 5177 | } |
| 5169 | }, | 5178 | }, |
| 5179 | [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { | ||
| 5180 | .type = HDA_FIXUP_VERBS, | ||
| 5181 | .v.verbs = (const struct hda_verb[]) { | ||
| 5182 | /* Disable pass-through path for FRONT 14h */ | ||
| 5183 | {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, | ||
| 5184 | {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, | ||
| 5185 | {} | ||
| 5186 | }, | ||
| 5187 | .chained = true, | ||
| 5188 | .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE | ||
| 5189 | }, | ||
| 5170 | }; | 5190 | }; |
| 5171 | 5191 | ||
| 5172 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 5192 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
| @@ -5180,8 +5200,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5180 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), | 5200 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), |
| 5181 | SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), | 5201 | SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), |
| 5182 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), | 5202 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), |
| 5203 | SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), | ||
| 5183 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 5204 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
| 5184 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), | 5205 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), |
| 5206 | SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), | ||
| 5185 | SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), | 5207 | SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), |
| 5186 | SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), | 5208 | SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), |
| 5187 | SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), | 5209 | SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), |
| @@ -5204,6 +5226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5204 | SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), | 5226 | SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
| 5205 | SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), | 5227 | SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
| 5206 | SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), | 5228 | SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
| 5229 | SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | ||
| 5207 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5230 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 5208 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5231 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 5209 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 5232 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 826122d8acee..2c7c5eb8b1e9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -3110,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec, | |||
| 3110 | spec->gpio_led = 0x08; | 3110 | spec->gpio_led = 0x08; |
| 3111 | } | 3111 | } |
| 3112 | 3112 | ||
| 3113 | static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin) | ||
| 3114 | { | ||
| 3115 | unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin); | ||
| 3116 | |||
| 3117 | /* count line-out, too, as BIOS sets often so */ | ||
| 3118 | return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE && | ||
| 3119 | (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || | ||
| 3120 | get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT); | ||
| 3121 | } | ||
| 3122 | |||
| 3123 | static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin) | ||
| 3124 | { | ||
| 3125 | unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin); | ||
| 3126 | |||
| 3127 | /* It was changed in the BIOS to just satisfy MS DTM. | ||
| 3128 | * Lets turn it back into slaved HP | ||
| 3129 | */ | ||
| 3130 | pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | | ||
| 3131 | (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT); | ||
| 3132 | pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) | | ||
| 3133 | 0x1f; | ||
| 3134 | snd_hda_codec_set_pincfg(codec, pin, pin_cfg); | ||
| 3135 | } | ||
| 3113 | 3136 | ||
| 3114 | static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, | 3137 | static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, |
| 3115 | const struct hda_fixup *fix, int action) | 3138 | const struct hda_fixup *fix, int action) |
| @@ -3119,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, | |||
| 3119 | if (action != HDA_FIXUP_ACT_PRE_PROBE) | 3142 | if (action != HDA_FIXUP_ACT_PRE_PROBE) |
| 3120 | return; | 3143 | return; |
| 3121 | 3144 | ||
| 3122 | if (hp_blike_system(codec->core.subsystem_id)) { | 3145 | /* when both output A and F are assigned, these are supposedly |
| 3123 | unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); | 3146 | * dock and built-in headphones; fix both pin configs |
| 3124 | if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || | 3147 | */ |
| 3125 | get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || | 3148 | if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) { |
| 3126 | get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { | 3149 | fixup_hp_headphone(codec, 0x0a); |
| 3127 | /* It was changed in the BIOS to just satisfy MS DTM. | 3150 | fixup_hp_headphone(codec, 0x0f); |
| 3128 | * Lets turn it back into slaved HP | ||
| 3129 | */ | ||
| 3130 | pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | ||
| 3131 | | (AC_JACK_HP_OUT << | ||
| 3132 | AC_DEFCFG_DEVICE_SHIFT); | ||
| 3133 | pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | ||
| 3134 | | AC_DEFCFG_SEQUENCE))) | ||
| 3135 | | 0x1f; | ||
| 3136 | snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg); | ||
| 3137 | } | ||
| 3138 | } | 3151 | } |
| 3139 | 3152 | ||
| 3140 | if (find_mute_led_cfg(codec, 1)) | 3153 | if (find_mute_led_cfg(codec, 1)) |
diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 7661616f3636..5b4c58c3e2c5 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c | |||
| @@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint { | |||
| 174 | u8 running_status_length; | 174 | u8 running_status_length; |
| 175 | } ports[0x10]; | 175 | } ports[0x10]; |
| 176 | u8 seen_f5; | 176 | u8 seen_f5; |
| 177 | bool in_sysex; | ||
| 178 | u8 last_cin; | ||
| 177 | u8 error_resubmit; | 179 | u8 error_resubmit; |
| 178 | int current_port; | 180 | int current_port; |
| 179 | }; | 181 | }; |
| @@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input( | |||
| 468 | } | 470 | } |
| 469 | 471 | ||
| 470 | /* | 472 | /* |
| 473 | * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4 | ||
| 474 | * but the previously seen CIN, but still with three data bytes. | ||
| 475 | */ | ||
| 476 | static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep, | ||
| 477 | uint8_t *buffer, int buffer_length) | ||
| 478 | { | ||
| 479 | unsigned int i, cin, length; | ||
| 480 | |||
| 481 | for (i = 0; i + 3 < buffer_length; i += 4) { | ||
| 482 | if (buffer[i] == 0 && i > 0) | ||
| 483 | break; | ||
| 484 | cin = buffer[i] & 0x0f; | ||
| 485 | if (ep->in_sysex && | ||
| 486 | cin == ep->last_cin && | ||
| 487 | (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0) | ||
| 488 | cin = 0x4; | ||
| 489 | #if 0 | ||
| 490 | if (buffer[i + 1] == 0x90) { | ||
| 491 | /* | ||
| 492 | * Either a corrupted running status or a real note-on | ||
| 493 | * message; impossible to detect reliably. | ||
| 494 | */ | ||
| 495 | } | ||
| 496 | #endif | ||
| 497 | length = snd_usbmidi_cin_length[cin]; | ||
| 498 | snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length); | ||
| 499 | ep->in_sysex = cin == 0x4; | ||
| 500 | if (!ep->in_sysex) | ||
| 501 | ep->last_cin = cin; | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | /* | ||
| 471 | * CME protocol: like the standard protocol, but SysEx commands are sent as a | 506 | * CME protocol: like the standard protocol, but SysEx commands are sent as a |
| 472 | * single USB packet preceded by a 0x0F byte. | 507 | * single USB packet preceded by a 0x0F byte. |
| 473 | */ | 508 | */ |
| @@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = { | |||
| 660 | .output_packet = snd_usbmidi_output_standard_packet, | 695 | .output_packet = snd_usbmidi_output_standard_packet, |
| 661 | }; | 696 | }; |
| 662 | 697 | ||
| 698 | static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = { | ||
| 699 | .input = ch345_broken_sysex_input, | ||
| 700 | .output = snd_usbmidi_standard_output, | ||
| 701 | .output_packet = snd_usbmidi_output_standard_packet, | ||
| 702 | }; | ||
| 703 | |||
| 663 | /* | 704 | /* |
| 664 | * AKAI MPD16 protocol: | 705 | * AKAI MPD16 protocol: |
| 665 | * | 706 | * |
| @@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi, | |||
| 1341 | * Various chips declare a packet size larger than 4 bytes, but | 1382 | * Various chips declare a packet size larger than 4 bytes, but |
| 1342 | * do not actually work with larger packets: | 1383 | * do not actually work with larger packets: |
| 1343 | */ | 1384 | */ |
| 1385 | case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */ | ||
| 1344 | case USB_ID(0x0a92, 0x1020): /* ESI M4U */ | 1386 | case USB_ID(0x0a92, 0x1020): /* ESI M4U */ |
| 1345 | case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */ | 1387 | case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */ |
| 1346 | case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ | 1388 | case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ |
| @@ -2378,6 +2420,10 @@ int snd_usbmidi_create(struct snd_card *card, | |||
| 2378 | 2420 | ||
| 2379 | err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); | 2421 | err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); |
| 2380 | break; | 2422 | break; |
| 2423 | case QUIRK_MIDI_CH345: | ||
| 2424 | umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops; | ||
| 2425 | err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); | ||
| 2426 | break; | ||
| 2381 | default: | 2427 | default: |
| 2382 | dev_err(&umidi->dev->dev, "invalid quirk type %d\n", | 2428 | dev_err(&umidi->dev->dev, "invalid quirk type %d\n", |
| 2383 | quirk->type); | 2429 | quirk->type); |
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 1a1e2e4df35e..c60a776e815d 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
| @@ -2829,6 +2829,17 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
| 2829 | .idProduct = 0x1020, | 2829 | .idProduct = 0x1020, |
| 2830 | }, | 2830 | }, |
| 2831 | 2831 | ||
| 2832 | /* QinHeng devices */ | ||
| 2833 | { | ||
| 2834 | USB_DEVICE(0x1a86, 0x752d), | ||
| 2835 | .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { | ||
| 2836 | .vendor_name = "QinHeng", | ||
| 2837 | .product_name = "CH345", | ||
| 2838 | .ifnum = 1, | ||
| 2839 | .type = QUIRK_MIDI_CH345 | ||
| 2840 | } | ||
| 2841 | }, | ||
| 2842 | |||
| 2832 | /* KeithMcMillen Stringport */ | 2843 | /* KeithMcMillen Stringport */ |
| 2833 | { | 2844 | { |
| 2834 | USB_DEVICE(0x1f38, 0x0001), | 2845 | USB_DEVICE(0x1f38, 0x0001), |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5ca80e7d30cd..7016ad898187 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -538,6 +538,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, | |||
| 538 | [QUIRK_MIDI_CME] = create_any_midi_quirk, | 538 | [QUIRK_MIDI_CME] = create_any_midi_quirk, |
| 539 | [QUIRK_MIDI_AKAI] = create_any_midi_quirk, | 539 | [QUIRK_MIDI_AKAI] = create_any_midi_quirk, |
| 540 | [QUIRK_MIDI_FTDI] = create_any_midi_quirk, | 540 | [QUIRK_MIDI_FTDI] = create_any_midi_quirk, |
| 541 | [QUIRK_MIDI_CH345] = create_any_midi_quirk, | ||
| 541 | [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, | 542 | [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, |
| 542 | [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, | 543 | [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, |
| 543 | [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, | 544 | [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, |
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 15a12715bd05..b665d85555cb 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h | |||
| @@ -95,6 +95,7 @@ enum quirk_type { | |||
| 95 | QUIRK_MIDI_AKAI, | 95 | QUIRK_MIDI_AKAI, |
| 96 | QUIRK_MIDI_US122L, | 96 | QUIRK_MIDI_US122L, |
| 97 | QUIRK_MIDI_FTDI, | 97 | QUIRK_MIDI_FTDI, |
| 98 | QUIRK_MIDI_CH345, | ||
| 98 | QUIRK_AUDIO_STANDARD_INTERFACE, | 99 | QUIRK_AUDIO_STANDARD_INTERFACE, |
| 99 | QUIRK_AUDIO_FIXED_ENDPOINT, | 100 | QUIRK_AUDIO_FIXED_ENDPOINT, |
| 100 | QUIRK_AUDIO_EDIROL_UAXX, | 101 | QUIRK_AUDIO_EDIROL_UAXX, |
diff --git a/tools/Makefile b/tools/Makefile index d6f307dfb1a3..7dc820a8c1f1 100644 --- a/tools/Makefile +++ b/tools/Makefile | |||
| @@ -32,6 +32,10 @@ help: | |||
| 32 | @echo ' from the kernel command line to build and install one of' | 32 | @echo ' from the kernel command line to build and install one of' |
| 33 | @echo ' the tools above' | 33 | @echo ' the tools above' |
| 34 | @echo '' | 34 | @echo '' |
| 35 | @echo ' $$ make tools/all' | ||
| 36 | @echo '' | ||
| 37 | @echo ' builds all tools.' | ||
| 38 | @echo '' | ||
| 35 | @echo ' $$ make tools/install' | 39 | @echo ' $$ make tools/install' |
| 36 | @echo '' | 40 | @echo '' |
| 37 | @echo ' installs all tools.' | 41 | @echo ' installs all tools.' |
| @@ -77,6 +81,11 @@ tmon: FORCE | |||
| 77 | freefall: FORCE | 81 | freefall: FORCE |
| 78 | $(call descend,laptop/$@) | 82 | $(call descend,laptop/$@) |
| 79 | 83 | ||
| 84 | all: acpi cgroup cpupower hv firewire lguest \ | ||
| 85 | perf selftests turbostat usb \ | ||
| 86 | virtio vm net x86_energy_perf_policy \ | ||
| 87 | tmon freefall | ||
| 88 | |||
| 80 | acpi_install: | 89 | acpi_install: |
| 81 | $(call descend,power/$(@:_install=),install) | 90 | $(call descend,power/$(@:_install=),install) |
| 82 | 91 | ||
| @@ -101,7 +110,7 @@ freefall_install: | |||
| 101 | install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ | 110 | install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ |
| 102 | perf_install selftests_install turbostat_install usb_install \ | 111 | perf_install selftests_install turbostat_install usb_install \ |
| 103 | virtio_install vm_install net_install x86_energy_perf_policy_install \ | 112 | virtio_install vm_install net_install x86_energy_perf_policy_install \ |
| 104 | tmon freefall_install | 113 | tmon_install freefall_install |
| 105 | 114 | ||
| 106 | acpi_clean: | 115 | acpi_clean: |
| 107 | $(call descend,power/acpi,clean) | 116 | $(call descend,power/acpi,clean) |
diff --git a/tools/net/Makefile b/tools/net/Makefile index ee577ea03ba5..ddf888010652 100644 --- a/tools/net/Makefile +++ b/tools/net/Makefile | |||
| @@ -4,6 +4,9 @@ CC = gcc | |||
| 4 | LEX = flex | 4 | LEX = flex |
| 5 | YACC = bison | 5 | YACC = bison |
| 6 | 6 | ||
| 7 | CFLAGS += -Wall -O2 | ||
| 8 | CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include | ||
| 9 | |||
| 7 | %.yacc.c: %.y | 10 | %.yacc.c: %.y |
| 8 | $(YACC) -o $@ -d $< | 11 | $(YACC) -o $@ -d $< |
| 9 | 12 | ||
| @@ -12,15 +15,13 @@ YACC = bison | |||
| 12 | 15 | ||
| 13 | all : bpf_jit_disasm bpf_dbg bpf_asm | 16 | all : bpf_jit_disasm bpf_dbg bpf_asm |
| 14 | 17 | ||
| 15 | bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' | 18 | bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm' |
| 16 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl | 19 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl |
| 17 | bpf_jit_disasm : bpf_jit_disasm.o | 20 | bpf_jit_disasm : bpf_jit_disasm.o |
| 18 | 21 | ||
| 19 | bpf_dbg : CFLAGS = -Wall -O2 | ||
| 20 | bpf_dbg : LDLIBS = -lreadline | 22 | bpf_dbg : LDLIBS = -lreadline |
| 21 | bpf_dbg : bpf_dbg.o | 23 | bpf_dbg : bpf_dbg.o |
| 22 | 24 | ||
| 23 | bpf_asm : CFLAGS = -Wall -O2 -I. | ||
| 24 | bpf_asm : LDLIBS = | 25 | bpf_asm : LDLIBS = |
| 25 | bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o | 26 | bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o |
| 26 | bpf_exp.lex.o : bpf_exp.yacc.c | 27 | bpf_exp.lex.o : bpf_exp.yacc.c |
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 0a945d2e8ca5..99d127fe9c35 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c | |||
| @@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 675 | .fork = perf_event__repipe, | 675 | .fork = perf_event__repipe, |
| 676 | .exit = perf_event__repipe, | 676 | .exit = perf_event__repipe, |
| 677 | .lost = perf_event__repipe, | 677 | .lost = perf_event__repipe, |
| 678 | .lost_samples = perf_event__repipe, | ||
| 678 | .aux = perf_event__repipe, | 679 | .aux = perf_event__repipe, |
| 679 | .itrace_start = perf_event__repipe, | 680 | .itrace_start = perf_event__repipe, |
| 680 | .context_switch = perf_event__repipe, | 681 | .context_switch = perf_event__repipe, |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 2853ad2bd435..f256fac1e722 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | struct report { | 44 | struct report { |
| 45 | struct perf_tool tool; | 45 | struct perf_tool tool; |
| 46 | struct perf_session *session; | 46 | struct perf_session *session; |
| 47 | bool force, use_tui, use_gtk, use_stdio; | 47 | bool use_tui, use_gtk, use_stdio; |
| 48 | bool hide_unresolved; | 48 | bool hide_unresolved; |
| 49 | bool dont_use_callchains; | 49 | bool dont_use_callchains; |
| 50 | bool show_full_info; | 50 | bool show_full_info; |
| @@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 678 | "file", "vmlinux pathname"), | 678 | "file", "vmlinux pathname"), |
| 679 | OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, | 679 | OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, |
| 680 | "file", "kallsyms pathname"), | 680 | "file", "kallsyms pathname"), |
| 681 | OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), | 681 | OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), |
| 682 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, | 682 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, |
| 683 | "load module symbols - WARNING: use only with -k and LIVE kernel"), | 683 | "load module symbols - WARNING: use only with -k and LIVE kernel"), |
| 684 | OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, | 684 | OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, |
| @@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 832 | } | 832 | } |
| 833 | 833 | ||
| 834 | file.path = input_name; | 834 | file.path = input_name; |
| 835 | file.force = report.force; | 835 | file.force = symbol_conf.force; |
| 836 | 836 | ||
| 837 | repeat: | 837 | repeat: |
| 838 | session = perf_session__new(&file, false, &report.tool); | 838 | session = perf_session__new(&file, false, &report.tool); |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index e5afb8936040..fa9eb92c9e24 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
| @@ -1430,7 +1430,6 @@ close_file_and_continue: | |||
| 1430 | 1430 | ||
| 1431 | struct popup_action { | 1431 | struct popup_action { |
| 1432 | struct thread *thread; | 1432 | struct thread *thread; |
| 1433 | struct dso *dso; | ||
| 1434 | struct map_symbol ms; | 1433 | struct map_symbol ms; |
| 1435 | int socket; | 1434 | int socket; |
| 1436 | 1435 | ||
| @@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act, | |||
| 1565 | return 0; | 1564 | return 0; |
| 1566 | 1565 | ||
| 1567 | act->ms.map = map; | 1566 | act->ms.map = map; |
| 1568 | act->dso = map->dso; | ||
| 1569 | act->fn = do_zoom_dso; | 1567 | act->fn = do_zoom_dso; |
| 1570 | return 1; | 1568 | return 1; |
| 1571 | } | 1569 | } |
| @@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1827 | 1825 | ||
| 1828 | while (1) { | 1826 | while (1) { |
| 1829 | struct thread *thread = NULL; | 1827 | struct thread *thread = NULL; |
| 1830 | struct dso *dso = NULL; | ||
| 1831 | struct map *map = NULL; | 1828 | struct map *map = NULL; |
| 1832 | int choice = 0; | 1829 | int choice = 0; |
| 1833 | int socked_id = -1; | 1830 | int socked_id = -1; |
| @@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1839 | if (browser->he_selection != NULL) { | 1836 | if (browser->he_selection != NULL) { |
| 1840 | thread = hist_browser__selected_thread(browser); | 1837 | thread = hist_browser__selected_thread(browser); |
| 1841 | map = browser->selection->map; | 1838 | map = browser->selection->map; |
| 1842 | if (map) | ||
| 1843 | dso = map->dso; | ||
| 1844 | socked_id = browser->he_selection->socket; | 1839 | socked_id = browser->he_selection->socket; |
| 1845 | } | 1840 | } |
| 1846 | switch (key) { | 1841 | switch (key) { |
| @@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1874 | hist_browser__dump(browser); | 1869 | hist_browser__dump(browser); |
| 1875 | continue; | 1870 | continue; |
| 1876 | case 'd': | 1871 | case 'd': |
| 1877 | actions->dso = dso; | 1872 | actions->ms.map = map; |
| 1878 | do_zoom_dso(browser, actions); | 1873 | do_zoom_dso(browser, actions); |
| 1879 | continue; | 1874 | continue; |
| 1880 | case 'V': | 1875 | case 'V': |
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index d909459fb54c..217b5a60e2ab 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
| @@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = { | |||
| 76 | .exit = perf_event__exit_del_thread, | 76 | .exit = perf_event__exit_del_thread, |
| 77 | .attr = perf_event__process_attr, | 77 | .attr = perf_event__process_attr, |
| 78 | .build_id = perf_event__process_build_id, | 78 | .build_id = perf_event__process_build_id, |
| 79 | .ordered_events = true, | ||
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | int build_id__sprintf(const u8 *build_id, int len, char *bf) | 82 | int build_id__sprintf(const u8 *build_id, int len, char *bf) |
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 7c0c08386a1d..425df5c86c9c 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c | |||
| @@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root, | |||
| 933 | /* Add new node and rebalance tree */ | 933 | /* Add new node and rebalance tree */ |
| 934 | rb_link_node(&dso->rb_node, parent, p); | 934 | rb_link_node(&dso->rb_node, parent, p); |
| 935 | rb_insert_color(&dso->rb_node, root); | 935 | rb_insert_color(&dso->rb_node, root); |
| 936 | dso->root = root; | ||
| 936 | } | 937 | } |
| 937 | return NULL; | 938 | return NULL; |
| 938 | } | 939 | } |
| @@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root, | |||
| 945 | 946 | ||
| 946 | void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) | 947 | void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) |
| 947 | { | 948 | { |
| 949 | struct rb_root *root = dso->root; | ||
| 950 | |||
| 948 | if (name == NULL) | 951 | if (name == NULL) |
| 949 | return; | 952 | return; |
| 950 | 953 | ||
| 951 | if (dso->long_name_allocated) | 954 | if (dso->long_name_allocated) |
| 952 | free((char *)dso->long_name); | 955 | free((char *)dso->long_name); |
| 953 | 956 | ||
| 957 | if (root) { | ||
| 958 | rb_erase(&dso->rb_node, root); | ||
| 959 | /* | ||
| 960 | * __dso__findlink_by_longname() isn't guaranteed to add it | ||
| 961 | * back, so a clean removal is required here. | ||
| 962 | */ | ||
| 963 | RB_CLEAR_NODE(&dso->rb_node); | ||
| 964 | dso->root = NULL; | ||
| 965 | } | ||
| 966 | |||
| 954 | dso->long_name = name; | 967 | dso->long_name = name; |
| 955 | dso->long_name_len = strlen(name); | 968 | dso->long_name_len = strlen(name); |
| 956 | dso->long_name_allocated = name_allocated; | 969 | dso->long_name_allocated = name_allocated; |
| 970 | |||
| 971 | if (root) | ||
| 972 | __dso__findlink_by_longname(root, dso, NULL); | ||
| 957 | } | 973 | } |
| 958 | 974 | ||
| 959 | void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) | 975 | void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) |
| @@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name) | |||
| 1046 | dso->kernel = DSO_TYPE_USER; | 1062 | dso->kernel = DSO_TYPE_USER; |
| 1047 | dso->needs_swap = DSO_SWAP__UNSET; | 1063 | dso->needs_swap = DSO_SWAP__UNSET; |
| 1048 | RB_CLEAR_NODE(&dso->rb_node); | 1064 | RB_CLEAR_NODE(&dso->rb_node); |
| 1065 | dso->root = NULL; | ||
| 1049 | INIT_LIST_HEAD(&dso->node); | 1066 | INIT_LIST_HEAD(&dso->node); |
| 1050 | INIT_LIST_HEAD(&dso->data.open_entry); | 1067 | INIT_LIST_HEAD(&dso->data.open_entry); |
| 1051 | pthread_mutex_init(&dso->lock, NULL); | 1068 | pthread_mutex_init(&dso->lock, NULL); |
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index fc8db9c764ac..45ec4d0a50ed 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h | |||
| @@ -135,6 +135,7 @@ struct dso { | |||
| 135 | pthread_mutex_t lock; | 135 | pthread_mutex_t lock; |
| 136 | struct list_head node; | 136 | struct list_head node; |
| 137 | struct rb_node rb_node; /* rbtree node sorted by long name */ | 137 | struct rb_node rb_node; /* rbtree node sorted by long name */ |
| 138 | struct rb_root *root; /* root of rbtree that rb_node is in */ | ||
| 138 | struct rb_root symbols[MAP__NR_TYPES]; | 139 | struct rb_root symbols[MAP__NR_TYPES]; |
| 139 | struct rb_root symbol_names[MAP__NR_TYPES]; | 140 | struct rb_root symbol_names[MAP__NR_TYPES]; |
| 140 | struct { | 141 | struct { |
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 5ef90be2a249..8b303ff20289 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
| @@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos) | |||
| 91 | 91 | ||
| 92 | list_for_each_entry_safe(pos, n, &dsos->head, node) { | 92 | list_for_each_entry_safe(pos, n, &dsos->head, node) { |
| 93 | RB_CLEAR_NODE(&pos->rb_node); | 93 | RB_CLEAR_NODE(&pos->rb_node); |
| 94 | pos->root = NULL; | ||
| 94 | list_del_init(&pos->node); | 95 | list_del_init(&pos->node); |
| 95 | dso__put(pos); | 96 | dso__put(pos); |
| 96 | } | 97 | } |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index bd8f03de5e40..05012bb178d7 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
| @@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1183 | container_of(pf, struct trace_event_finder, pf); | 1183 | container_of(pf, struct trace_event_finder, pf); |
| 1184 | struct perf_probe_point *pp = &pf->pev->point; | 1184 | struct perf_probe_point *pp = &pf->pev->point; |
| 1185 | struct probe_trace_event *tev; | 1185 | struct probe_trace_event *tev; |
| 1186 | struct perf_probe_arg *args; | 1186 | struct perf_probe_arg *args = NULL; |
| 1187 | int ret, i; | 1187 | int ret, i; |
| 1188 | 1188 | ||
| 1189 | /* Check number of tevs */ | 1189 | /* Check number of tevs */ |
| @@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1198 | ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, | 1198 | ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, |
| 1199 | pp->retprobe, pp->function, &tev->point); | 1199 | pp->retprobe, pp->function, &tev->point); |
| 1200 | if (ret < 0) | 1200 | if (ret < 0) |
| 1201 | return ret; | 1201 | goto end; |
| 1202 | 1202 | ||
| 1203 | tev->point.realname = strdup(dwarf_diename(sc_die)); | 1203 | tev->point.realname = strdup(dwarf_diename(sc_die)); |
| 1204 | if (!tev->point.realname) | 1204 | if (!tev->point.realname) { |
| 1205 | return -ENOMEM; | 1205 | ret = -ENOMEM; |
| 1206 | goto end; | ||
| 1207 | } | ||
| 1206 | 1208 | ||
| 1207 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | 1209 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, |
| 1208 | tev->point.offset); | 1210 | tev->point.offset); |
| 1209 | 1211 | ||
| 1210 | /* Expand special probe argument if exist */ | 1212 | /* Expand special probe argument if exist */ |
| 1211 | args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); | 1213 | args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); |
| 1212 | if (args == NULL) | 1214 | if (args == NULL) { |
| 1213 | return -ENOMEM; | 1215 | ret = -ENOMEM; |
| 1216 | goto end; | ||
| 1217 | } | ||
| 1214 | 1218 | ||
| 1215 | ret = expand_probe_args(sc_die, pf, args); | 1219 | ret = expand_probe_args(sc_die, pf, args); |
| 1216 | if (ret < 0) | 1220 | if (ret < 0) |
| @@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1234 | } | 1238 | } |
| 1235 | 1239 | ||
| 1236 | end: | 1240 | end: |
| 1241 | if (ret) { | ||
| 1242 | clear_probe_trace_event(tev); | ||
| 1243 | tf->ntevs--; | ||
| 1244 | } | ||
| 1237 | free(args); | 1245 | free(args); |
| 1238 | return ret; | 1246 | return ret; |
| 1239 | } | 1247 | } |
| @@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, | |||
| 1246 | struct trace_event_finder tf = { | 1254 | struct trace_event_finder tf = { |
| 1247 | .pf = {.pev = pev, .callback = add_probe_trace_event}, | 1255 | .pf = {.pev = pev, .callback = add_probe_trace_event}, |
| 1248 | .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; | 1256 | .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; |
| 1249 | int ret; | 1257 | int ret, i; |
| 1250 | 1258 | ||
| 1251 | /* Allocate result tevs array */ | 1259 | /* Allocate result tevs array */ |
| 1252 | *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); | 1260 | *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); |
| @@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, | |||
| 1258 | 1266 | ||
| 1259 | ret = debuginfo__find_probes(dbg, &tf.pf); | 1267 | ret = debuginfo__find_probes(dbg, &tf.pf); |
| 1260 | if (ret < 0) { | 1268 | if (ret < 0) { |
| 1269 | for (i = 0; i < tf.ntevs; i++) | ||
| 1270 | clear_probe_trace_event(&tf.tevs[i]); | ||
| 1261 | zfree(tevs); | 1271 | zfree(tevs); |
| 1262 | return ret; | 1272 | return ret; |
| 1263 | } | 1273 | } |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b4cc7662677e..cd08027a6d2c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, | |||
| 654 | struct map_groups *kmaps = map__kmaps(map); | 654 | struct map_groups *kmaps = map__kmaps(map); |
| 655 | struct map *curr_map; | 655 | struct map *curr_map; |
| 656 | struct symbol *pos; | 656 | struct symbol *pos; |
| 657 | int count = 0, moved = 0; | 657 | int count = 0; |
| 658 | struct rb_root old_root = dso->symbols[map->type]; | ||
| 658 | struct rb_root *root = &dso->symbols[map->type]; | 659 | struct rb_root *root = &dso->symbols[map->type]; |
| 659 | struct rb_node *next = rb_first(root); | 660 | struct rb_node *next = rb_first(root); |
| 660 | 661 | ||
| 661 | if (!kmaps) | 662 | if (!kmaps) |
| 662 | return -1; | 663 | return -1; |
| 663 | 664 | ||
| 665 | *root = RB_ROOT; | ||
| 666 | |||
| 664 | while (next) { | 667 | while (next) { |
| 665 | char *module; | 668 | char *module; |
| 666 | 669 | ||
| 667 | pos = rb_entry(next, struct symbol, rb_node); | 670 | pos = rb_entry(next, struct symbol, rb_node); |
| 668 | next = rb_next(&pos->rb_node); | 671 | next = rb_next(&pos->rb_node); |
| 669 | 672 | ||
| 673 | rb_erase_init(&pos->rb_node, &old_root); | ||
| 674 | |||
| 670 | module = strchr(pos->name, '\t'); | 675 | module = strchr(pos->name, '\t'); |
| 671 | if (module) | 676 | if (module) |
| 672 | *module = '\0'; | 677 | *module = '\0'; |
| @@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, | |||
| 674 | curr_map = map_groups__find(kmaps, map->type, pos->start); | 679 | curr_map = map_groups__find(kmaps, map->type, pos->start); |
| 675 | 680 | ||
| 676 | if (!curr_map || (filter && filter(curr_map, pos))) { | 681 | if (!curr_map || (filter && filter(curr_map, pos))) { |
| 677 | rb_erase_init(&pos->rb_node, root); | ||
| 678 | symbol__delete(pos); | 682 | symbol__delete(pos); |
| 679 | } else { | 683 | continue; |
| 680 | pos->start -= curr_map->start - curr_map->pgoff; | ||
| 681 | if (pos->end) | ||
| 682 | pos->end -= curr_map->start - curr_map->pgoff; | ||
| 683 | if (curr_map->dso != map->dso) { | ||
| 684 | rb_erase_init(&pos->rb_node, root); | ||
| 685 | symbols__insert( | ||
| 686 | &curr_map->dso->symbols[curr_map->type], | ||
| 687 | pos); | ||
| 688 | ++moved; | ||
| 689 | } else { | ||
| 690 | ++count; | ||
| 691 | } | ||
| 692 | } | 684 | } |
| 685 | |||
| 686 | pos->start -= curr_map->start - curr_map->pgoff; | ||
| 687 | if (pos->end) | ||
| 688 | pos->end -= curr_map->start - curr_map->pgoff; | ||
| 689 | symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); | ||
| 690 | ++count; | ||
| 693 | } | 691 | } |
| 694 | 692 | ||
| 695 | /* Symbols have been adjusted */ | 693 | /* Symbols have been adjusted */ |
| 696 | dso->adjust_symbols = 1; | 694 | dso->adjust_symbols = 1; |
| 697 | 695 | ||
| 698 | return count + moved; | 696 | return count; |
| 699 | } | 697 | } |
| 700 | 698 | ||
| 701 | /* | 699 | /* |
| @@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
| 1438 | if (lstat(dso->name, &st) < 0) | 1436 | if (lstat(dso->name, &st) < 0) |
| 1439 | goto out; | 1437 | goto out; |
| 1440 | 1438 | ||
| 1441 | if (st.st_uid && (st.st_uid != geteuid())) { | 1439 | if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { |
| 1442 | pr_warning("File %s not owned by current user or root, " | 1440 | pr_warning("File %s not owned by current user or root, " |
| 1443 | "ignoring it.\n", dso->name); | 1441 | "ignoring it (use -f to override).\n", dso->name); |
| 1444 | goto out; | 1442 | goto out; |
| 1445 | } | 1443 | } |
| 1446 | 1444 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 40073c60b83d..dcd786e364f2 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
| @@ -84,6 +84,7 @@ struct symbol_conf { | |||
| 84 | unsigned short priv_size; | 84 | unsigned short priv_size; |
| 85 | unsigned short nr_events; | 85 | unsigned short nr_events; |
| 86 | bool try_vmlinux_path, | 86 | bool try_vmlinux_path, |
| 87 | force, | ||
| 87 | ignore_vmlinux, | 88 | ignore_vmlinux, |
| 88 | ignore_vmlinux_buildid, | 89 | ignore_vmlinux_buildid, |
| 89 | show_kernel_path, | 90 | show_kernel_path, |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d8e4b20b6d54..0dac7e05a6ac 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void) | |||
| 1173 | unsigned long long msr; | 1173 | unsigned long long msr; |
| 1174 | unsigned int ratio; | 1174 | unsigned int ratio; |
| 1175 | 1175 | ||
| 1176 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | 1176 | get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); |
| 1177 | 1177 | ||
| 1178 | fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); | 1178 | fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); |
| 1179 | 1179 | ||
| 1180 | ratio = (msr >> 40) & 0xFF; | 1180 | ratio = (msr >> 40) & 0xFF; |
| 1181 | fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", | 1181 | fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", |
| @@ -1807,7 +1807,7 @@ void check_permissions() | |||
| 1807 | * | 1807 | * |
| 1808 | * MSR_SMI_COUNT 0x00000034 | 1808 | * MSR_SMI_COUNT 0x00000034 |
| 1809 | * | 1809 | * |
| 1810 | * MSR_NHM_PLATFORM_INFO 0x000000ce | 1810 | * MSR_PLATFORM_INFO 0x000000ce |
| 1811 | * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | 1811 | * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
| 1812 | * | 1812 | * |
| 1813 | * MSR_PKG_C3_RESIDENCY 0x000003f8 | 1813 | * MSR_PKG_C3_RESIDENCY 0x000003f8 |
| @@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) | |||
| 1876 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); | 1876 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); |
| 1877 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; | 1877 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; |
| 1878 | 1878 | ||
| 1879 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | 1879 | get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); |
| 1880 | base_ratio = (msr >> 8) & 0xFF; | 1880 | base_ratio = (msr >> 8) & 0xFF; |
| 1881 | 1881 | ||
| 1882 | base_hz = base_ratio * bclk * 1000000; | 1882 | base_hz = base_ratio * bclk * 1000000; |
diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README index 3224a049b196..0558bb9ce0a6 100644 --- a/tools/testing/selftests/futex/README +++ b/tools/testing/selftests/futex/README | |||
| @@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or | |||
| 27 | o Where possible, any helper functions or other package-wide code shall be | 27 | o Where possible, any helper functions or other package-wide code shall be |
| 28 | implemented in header files, avoiding the need to compile intermediate object | 28 | implemented in header files, avoiding the need to compile intermediate object |
| 29 | files. | 29 | files. |
| 30 | o External dependendencies shall remain as minimal as possible. Currently gcc | 30 | o External dependencies shall remain as minimal as possible. Currently gcc |
| 31 | and glibc are the only dependencies. | 31 | and glibc are the only dependencies. |
| 32 | o Tests return 0 for success and < 0 for failure. | 32 | o Tests return 0 for success and < 0 for failure. |
| 33 | 33 | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index e38cc54942db..882fe83a3554 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
| @@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) | |||
| 492 | pid_t parent = getppid(); | 492 | pid_t parent = getppid(); |
| 493 | int fd; | 493 | int fd; |
| 494 | void *map1, *map2; | 494 | void *map1, *map2; |
| 495 | int page_size = sysconf(_SC_PAGESIZE); | ||
| 496 | |||
| 497 | ASSERT_LT(0, page_size); | ||
| 495 | 498 | ||
| 496 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 499 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
| 497 | ASSERT_EQ(0, ret); | 500 | ASSERT_EQ(0, ret); |
| @@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) | |||
| 504 | 507 | ||
| 505 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 508 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
| 506 | map1 = (void *)syscall(sysno, | 509 | map1 = (void *)syscall(sysno, |
| 507 | NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); | 510 | NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); |
| 508 | EXPECT_NE(MAP_FAILED, map1); | 511 | EXPECT_NE(MAP_FAILED, map1); |
| 509 | /* mmap2() should never return. */ | 512 | /* mmap2() should never return. */ |
| 510 | map2 = (void *)syscall(sysno, | 513 | map2 = (void *)syscall(sysno, |
| 511 | NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); | 514 | NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); |
| 512 | EXPECT_EQ(MAP_FAILED, map2); | 515 | EXPECT_EQ(MAP_FAILED, map2); |
| 513 | 516 | ||
| 514 | /* The test failed, so clean up the resources. */ | 517 | /* The test failed, so clean up the resources. */ |
| 515 | munmap(map1, PAGE_SIZE); | 518 | munmap(map1, page_size); |
| 516 | munmap(map2, PAGE_SIZE); | 519 | munmap(map2, page_size); |
| 517 | close(fd); | 520 | close(fd); |
| 518 | } | 521 | } |
| 519 | 522 | ||
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index bcf5ec760eb9..5a6016224bb9 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c | |||
| @@ -128,6 +128,7 @@ static const char * const page_flag_names[] = { | |||
| 128 | [KPF_THP] = "t:thp", | 128 | [KPF_THP] = "t:thp", |
| 129 | [KPF_BALLOON] = "o:balloon", | 129 | [KPF_BALLOON] = "o:balloon", |
| 130 | [KPF_ZERO_PAGE] = "z:zero_page", | 130 | [KPF_ZERO_PAGE] = "z:zero_page", |
| 131 | [KPF_IDLE] = "i:idle_page", | ||
| 131 | 132 | ||
| 132 | [KPF_RESERVED] = "r:reserved", | 133 | [KPF_RESERVED] = "r:reserved", |
| 133 | [KPF_MLOCKED] = "m:mlocked", | 134 | [KPF_MLOCKED] = "m:mlocked", |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 21a0ab2d8919..69bca185c471 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
| @@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 221 | kvm_timer_update_state(vcpu); | 221 | kvm_timer_update_state(vcpu); |
| 222 | 222 | ||
| 223 | /* | 223 | /* |
| 224 | * If we enter the guest with the virtual input level to the VGIC | 224 | * If we enter the guest with the virtual input level to the VGIC |
| 225 | * asserted, then we have already told the VGIC what we need to, and | 225 | * asserted, then we have already told the VGIC what we need to, and |
| 226 | * we don't need to exit from the guest until the guest deactivates | 226 | * we don't need to exit from the guest until the guest deactivates |
| 227 | * the already injected interrupt, so therefore we should set the | 227 | * the already injected interrupt, so therefore we should set the |
| 228 | * hardware active state to prevent unnecessary exits from the guest. | 228 | * hardware active state to prevent unnecessary exits from the guest. |
| 229 | * | 229 | * |
| 230 | * Conversely, if the virtual input level is deasserted, then always | 230 | * Also, if we enter the guest with the virtual timer interrupt active, |
| 231 | * clear the hardware active state to ensure that hardware interrupts | 231 | * then it must be active on the physical distributor, because we set |
| 232 | * from the timer triggers a guest exit. | 232 | * the HW bit and the guest must be able to deactivate the virtual and |
| 233 | */ | 233 | * physical interrupt at the same time. |
| 234 | if (timer->irq.level) | 234 | * |
| 235 | * Conversely, if the virtual input level is deasserted and the virtual | ||
| 236 | * interrupt is not active, then always clear the hardware active state | ||
| 237 | * to ensure that hardware interrupts from the timer triggers a guest | ||
| 238 | * exit. | ||
| 239 | */ | ||
| 240 | if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) | ||
| 235 | phys_active = true; | 241 | phys_active = true; |
| 236 | else | 242 | else |
| 237 | phys_active = false; | 243 | phys_active = false; |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 533538385d5d..65461f821a75 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
| @@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu) | |||
| 1096 | vgic_set_lr(vcpu, lr_nr, vlr); | 1096 | vgic_set_lr(vcpu, lr_nr, vlr); |
| 1097 | } | 1097 | } |
| 1098 | 1098 | ||
| 1099 | static bool dist_active_irq(struct kvm_vcpu *vcpu) | ||
| 1100 | { | ||
| 1101 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1102 | |||
| 1103 | return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) | ||
| 1107 | { | ||
| 1108 | int i; | ||
| 1109 | |||
| 1110 | for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { | ||
| 1111 | struct vgic_lr vlr = vgic_get_lr(vcpu, i); | ||
| 1112 | |||
| 1113 | if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) | ||
| 1114 | return true; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | return dist_active_irq(vcpu); | ||
| 1118 | } | ||
| 1119 | |||
| 1099 | /* | 1120 | /* |
| 1100 | * An interrupt may have been disabled after being made pending on the | 1121 | * An interrupt may have been disabled after being made pending on the |
| 1101 | * CPU interface (the classic case is a timer running while we're | 1122 | * CPU interface (the classic case is a timer running while we're |
| @@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 1248 | * may have been serviced from another vcpu. In all cases, | 1269 | * may have been serviced from another vcpu. In all cases, |
| 1249 | * move along. | 1270 | * move along. |
| 1250 | */ | 1271 | */ |
| 1251 | if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) | 1272 | if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) |
| 1252 | goto epilog; | 1273 | goto epilog; |
| 1253 | 1274 | ||
| 1254 | /* SGIs */ | 1275 | /* SGIs */ |
| @@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1396 | static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) | 1417 | static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) |
| 1397 | { | 1418 | { |
| 1398 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1419 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 1399 | struct irq_phys_map *map; | ||
| 1400 | bool phys_active; | ||
| 1401 | bool level_pending; | 1420 | bool level_pending; |
| 1402 | int ret; | ||
| 1403 | 1421 | ||
| 1404 | if (!(vlr.state & LR_HW)) | 1422 | if (!(vlr.state & LR_HW)) |
| 1405 | return false; | 1423 | return false; |
| 1406 | 1424 | ||
| 1407 | map = vgic_irq_map_search(vcpu, vlr.irq); | 1425 | if (vlr.state & LR_STATE_ACTIVE) |
| 1408 | BUG_ON(!map); | 1426 | return false; |
| 1409 | |||
| 1410 | ret = irq_get_irqchip_state(map->irq, | ||
| 1411 | IRQCHIP_STATE_ACTIVE, | ||
| 1412 | &phys_active); | ||
| 1413 | |||
| 1414 | WARN_ON(ret); | ||
| 1415 | |||
| 1416 | if (phys_active) | ||
| 1417 | return 0; | ||
| 1418 | 1427 | ||
| 1419 | spin_lock(&dist->lock); | 1428 | spin_lock(&dist->lock); |
| 1420 | level_pending = process_queued_irq(vcpu, lr, vlr); | 1429 | level_pending = process_queued_irq(vcpu, lr, vlr); |
| @@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
| 1479 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); | 1488 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
| 1480 | } | 1489 | } |
| 1481 | 1490 | ||
| 1482 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) | ||
| 1483 | { | ||
| 1484 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1485 | |||
| 1486 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
| 1487 | return 0; | ||
| 1488 | |||
| 1489 | return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | |||
| 1493 | void vgic_kick_vcpus(struct kvm *kvm) | 1491 | void vgic_kick_vcpus(struct kvm *kvm) |
| 1494 | { | 1492 | { |
| 1495 | struct kvm_vcpu *vcpu; | 1493 | struct kvm_vcpu *vcpu; |
