diff options
236 files changed, 2120 insertions, 1402 deletions
@@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de> | |||
33 | Brian Avery <b.avery@hp.com> | 33 | Brian Avery <b.avery@hp.com> |
34 | Brian King <brking@us.ibm.com> | 34 | Brian King <brking@us.ibm.com> |
35 | Christoph Hellwig <hch@lst.de> | 35 | Christoph Hellwig <hch@lst.de> |
36 | Christophe Ricard <christophe.ricard@gmail.com> | ||
36 | Corey Minyard <minyard@acm.org> | 37 | Corey Minyard <minyard@acm.org> |
37 | Damian Hobson-Garcia <dhobsong@igel.co.jp> | 38 | Damian Hobson-Garcia <dhobsong@igel.co.jp> |
38 | David Brownell <david-b@pacbell.net> | 39 | David Brownell <david-b@pacbell.net> |
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index fad63136ee3e..2f659129694b 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt | |||
@@ -386,7 +386,7 @@ used. First phase is to "prepare" anything needed, including various checks, | |||
386 | memory allocation, etc. The goal is to handle the stuff that is not unlikely | 386 | memory allocation, etc. The goal is to handle the stuff that is not unlikely |
387 | to fail here. The second phase is to "commit" the actual changes. | 387 | to fail here. The second phase is to "commit" the actual changes. |
388 | 388 | ||
389 | Switchdev provides an inftrastructure for sharing items (for example memory | 389 | Switchdev provides an infrastructure for sharing items (for example memory |
390 | allocations) between the two phases. | 390 | allocations) between the two phases. |
391 | 391 | ||
392 | The object created by a driver in "prepare" phase and it is queued up by: | 392 | The object created by a driver in "prepare" phase and it is queued up by: |
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt new file mode 100644 index 000000000000..06afac252f5b --- /dev/null +++ b/Documentation/x86/topology.txt | |||
@@ -0,0 +1,208 @@ | |||
1 | x86 Topology | ||
2 | ============ | ||
3 | |||
4 | This documents and clarifies the main aspects of x86 topology modelling and | ||
5 | representation in the kernel. Update/change when doing changes to the | ||
6 | respective code. | ||
7 | |||
8 | The architecture-agnostic topology definitions are in | ||
9 | Documentation/cputopology.txt. This file holds x86-specific | ||
10 | differences/specialities which must not necessarily apply to the generic | ||
11 | definitions. Thus, the way to read up on Linux topology on x86 is to start | ||
12 | with the generic one and look at this one in parallel for the x86 specifics. | ||
13 | |||
14 | Needless to say, code should use the generic functions - this file is *only* | ||
15 | here to *document* the inner workings of x86 topology. | ||
16 | |||
17 | Started by Thomas Gleixner <tglx@linutronix.de> and Borislav Petkov <bp@alien8.de>. | ||
18 | |||
19 | The main aim of the topology facilities is to present adequate interfaces to | ||
20 | code which needs to know/query/use the structure of the running system wrt | ||
21 | threads, cores, packages, etc. | ||
22 | |||
23 | The kernel does not care about the concept of physical sockets because a | ||
24 | socket has no relevance to software. It's an electromechanical component. In | ||
25 | the past a socket always contained a single package (see below), but with the | ||
26 | advent of Multi Chip Modules (MCM) a socket can hold more than one package. So | ||
27 | there might be still references to sockets in the code, but they are of | ||
28 | historical nature and should be cleaned up. | ||
29 | |||
30 | The topology of a system is described in the units of: | ||
31 | |||
32 | - packages | ||
33 | - cores | ||
34 | - threads | ||
35 | |||
36 | * Package: | ||
37 | |||
38 | Packages contain a number of cores plus shared resources, e.g. DRAM | ||
39 | controller, shared caches etc. | ||
40 | |||
41 | AMD nomenclature for package is 'Node'. | ||
42 | |||
43 | Package-related topology information in the kernel: | ||
44 | |||
45 | - cpuinfo_x86.x86_max_cores: | ||
46 | |||
47 | The number of cores in a package. This information is retrieved via CPUID. | ||
48 | |||
49 | - cpuinfo_x86.phys_proc_id: | ||
50 | |||
51 | The physical ID of the package. This information is retrieved via CPUID | ||
52 | and deduced from the APIC IDs of the cores in the package. | ||
53 | |||
54 | - cpuinfo_x86.logical_id: | ||
55 | |||
56 | The logical ID of the package. As we do not trust BIOSes to enumerate the | ||
57 | packages in a consistent way, we introduced the concept of logical package | ||
58 | ID so we can sanely calculate the number of maximum possible packages in | ||
59 | the system and have the packages enumerated linearly. | ||
60 | |||
61 | - topology_max_packages(): | ||
62 | |||
63 | The maximum possible number of packages in the system. Helpful for per | ||
64 | package facilities to preallocate per package information. | ||
65 | |||
66 | |||
67 | * Cores: | ||
68 | |||
69 | A core consists of 1 or more threads. It does not matter whether the threads | ||
70 | are SMT- or CMT-type threads. | ||
71 | |||
72 | AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses | ||
73 | "core". | ||
74 | |||
75 | Core-related topology information in the kernel: | ||
76 | |||
77 | - smp_num_siblings: | ||
78 | |||
79 | The number of threads in a core. The number of threads in a package can be | ||
80 | calculated by: | ||
81 | |||
82 | threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings | ||
83 | |||
84 | |||
85 | * Threads: | ||
86 | |||
87 | A thread is a single scheduling unit. It's the equivalent to a logical Linux | ||
88 | CPU. | ||
89 | |||
90 | AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always | ||
91 | uses "thread". | ||
92 | |||
93 | Thread-related topology information in the kernel: | ||
94 | |||
95 | - topology_core_cpumask(): | ||
96 | |||
97 | The cpumask contains all online threads in the package to which a thread | ||
98 | belongs. | ||
99 | |||
100 | The number of online threads is also printed in /proc/cpuinfo "siblings." | ||
101 | |||
102 | - topology_sibling_mask(): | ||
103 | |||
104 | The cpumask contains all online threads in the core to which a thread | ||
105 | belongs. | ||
106 | |||
107 | - topology_logical_package_id(): | ||
108 | |||
109 | The logical package ID to which a thread belongs. | ||
110 | |||
111 | - topology_physical_package_id(): | ||
112 | |||
113 | The physical package ID to which a thread belongs. | ||
114 | |||
115 | - topology_core_id(); | ||
116 | |||
117 | The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo | ||
118 | "core_id." | ||
119 | |||
120 | |||
121 | |||
122 | System topology examples | ||
123 | |||
124 | Note: | ||
125 | |||
126 | The alternative Linux CPU enumeration depends on how the BIOS enumerates the | ||
127 | threads. Many BIOSes enumerate all threads 0 first and then all threads 1. | ||
128 | That has the "advantage" that the logical Linux CPU numbers of threads 0 stay | ||
129 | the same whether threads are enabled or not. That's merely an implementation | ||
130 | detail and has no practical impact. | ||
131 | |||
132 | 1) Single Package, Single Core | ||
133 | |||
134 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
135 | |||
136 | 2) Single Package, Dual Core | ||
137 | |||
138 | a) One thread per core | ||
139 | |||
140 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
141 | -> [core 1] -> [thread 0] -> Linux CPU 1 | ||
142 | |||
143 | b) Two threads per core | ||
144 | |||
145 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
146 | -> [thread 1] -> Linux CPU 1 | ||
147 | -> [core 1] -> [thread 0] -> Linux CPU 2 | ||
148 | -> [thread 1] -> Linux CPU 3 | ||
149 | |||
150 | Alternative enumeration: | ||
151 | |||
152 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
153 | -> [thread 1] -> Linux CPU 2 | ||
154 | -> [core 1] -> [thread 0] -> Linux CPU 1 | ||
155 | -> [thread 1] -> Linux CPU 3 | ||
156 | |||
157 | AMD nomenclature for CMT systems: | ||
158 | |||
159 | [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0 | ||
160 | -> [Compute Unit Core 1] -> Linux CPU 1 | ||
161 | -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2 | ||
162 | -> [Compute Unit Core 1] -> Linux CPU 3 | ||
163 | |||
164 | 4) Dual Package, Dual Core | ||
165 | |||
166 | a) One thread per core | ||
167 | |||
168 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
169 | -> [core 1] -> [thread 0] -> Linux CPU 1 | ||
170 | |||
171 | [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2 | ||
172 | -> [core 1] -> [thread 0] -> Linux CPU 3 | ||
173 | |||
174 | b) Two threads per core | ||
175 | |||
176 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
177 | -> [thread 1] -> Linux CPU 1 | ||
178 | -> [core 1] -> [thread 0] -> Linux CPU 2 | ||
179 | -> [thread 1] -> Linux CPU 3 | ||
180 | |||
181 | [package 1] -> [core 0] -> [thread 0] -> Linux CPU 4 | ||
182 | -> [thread 1] -> Linux CPU 5 | ||
183 | -> [core 1] -> [thread 0] -> Linux CPU 6 | ||
184 | -> [thread 1] -> Linux CPU 7 | ||
185 | |||
186 | Alternative enumeration: | ||
187 | |||
188 | [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0 | ||
189 | -> [thread 1] -> Linux CPU 4 | ||
190 | -> [core 1] -> [thread 0] -> Linux CPU 1 | ||
191 | -> [thread 1] -> Linux CPU 5 | ||
192 | |||
193 | [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2 | ||
194 | -> [thread 1] -> Linux CPU 6 | ||
195 | -> [core 1] -> [thread 0] -> Linux CPU 3 | ||
196 | -> [thread 1] -> Linux CPU 7 | ||
197 | |||
198 | AMD nomenclature for CMT systems: | ||
199 | |||
200 | [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0 | ||
201 | -> [Compute Unit Core 1] -> Linux CPU 1 | ||
202 | -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2 | ||
203 | -> [Compute Unit Core 1] -> Linux CPU 3 | ||
204 | |||
205 | [node 1] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 4 | ||
206 | -> [Compute Unit Core 1] -> Linux CPU 5 | ||
207 | -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 6 | ||
208 | -> [Compute Unit Core 1] -> Linux CPU 7 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 03e00c7c88eb..1c32f8a3d6c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5042,6 +5042,7 @@ F: include/linux/hw_random.h | |||
5042 | HARDWARE SPINLOCK CORE | 5042 | HARDWARE SPINLOCK CORE |
5043 | M: Ohad Ben-Cohen <ohad@wizery.com> | 5043 | M: Ohad Ben-Cohen <ohad@wizery.com> |
5044 | M: Bjorn Andersson <bjorn.andersson@linaro.org> | 5044 | M: Bjorn Andersson <bjorn.andersson@linaro.org> |
5045 | L: linux-remoteproc@vger.kernel.org | ||
5045 | S: Maintained | 5046 | S: Maintained |
5046 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git | 5047 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git |
5047 | F: Documentation/hwspinlock.txt | 5048 | F: Documentation/hwspinlock.txt |
@@ -6402,7 +6403,7 @@ KPROBES | |||
6402 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 6403 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> |
6403 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 6404 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
6404 | M: "David S. Miller" <davem@davemloft.net> | 6405 | M: "David S. Miller" <davem@davemloft.net> |
6405 | M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 6406 | M: Masami Hiramatsu <mhiramat@kernel.org> |
6406 | S: Maintained | 6407 | S: Maintained |
6407 | F: Documentation/kprobes.txt | 6408 | F: Documentation/kprobes.txt |
6408 | F: include/linux/kprobes.h | 6409 | F: include/linux/kprobes.h |
@@ -8253,7 +8254,7 @@ F: Documentation/filesystems/overlayfs.txt | |||
8253 | 8254 | ||
8254 | ORANGEFS FILESYSTEM | 8255 | ORANGEFS FILESYSTEM |
8255 | M: Mike Marshall <hubcap@omnibond.com> | 8256 | M: Mike Marshall <hubcap@omnibond.com> |
8256 | L: pvfs2-developers@beowulf-underground.org | 8257 | L: pvfs2-developers@beowulf-underground.org (subscribers-only) |
8257 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git | 8258 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git |
8258 | S: Supported | 8259 | S: Supported |
8259 | F: fs/orangefs/ | 8260 | F: fs/orangefs/ |
@@ -9314,6 +9315,7 @@ F: include/linux/regmap.h | |||
9314 | REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM | 9315 | REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM |
9315 | M: Ohad Ben-Cohen <ohad@wizery.com> | 9316 | M: Ohad Ben-Cohen <ohad@wizery.com> |
9316 | M: Bjorn Andersson <bjorn.andersson@linaro.org> | 9317 | M: Bjorn Andersson <bjorn.andersson@linaro.org> |
9318 | L: linux-remoteproc@vger.kernel.org | ||
9317 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git | 9319 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git |
9318 | S: Maintained | 9320 | S: Maintained |
9319 | F: drivers/remoteproc/ | 9321 | F: drivers/remoteproc/ |
@@ -9323,6 +9325,7 @@ F: include/linux/remoteproc.h | |||
9323 | REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM | 9325 | REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM |
9324 | M: Ohad Ben-Cohen <ohad@wizery.com> | 9326 | M: Ohad Ben-Cohen <ohad@wizery.com> |
9325 | M: Bjorn Andersson <bjorn.andersson@linaro.org> | 9327 | M: Bjorn Andersson <bjorn.andersson@linaro.org> |
9328 | L: linux-remoteproc@vger.kernel.org | ||
9326 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git | 9329 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git |
9327 | S: Maintained | 9330 | S: Maintained |
9328 | F: drivers/rpmsg/ | 9331 | F: drivers/rpmsg/ |
@@ -11137,8 +11140,8 @@ F: include/uapi/linux/tipc*.h | |||
11137 | F: net/tipc/ | 11140 | F: net/tipc/ |
11138 | 11141 | ||
11139 | TILE ARCHITECTURE | 11142 | TILE ARCHITECTURE |
11140 | M: Chris Metcalf <cmetcalf@ezchip.com> | 11143 | M: Chris Metcalf <cmetcalf@mellanox.com> |
11141 | W: http://www.ezchip.com/scm/ | 11144 | W: http://www.mellanox.com/repository/solutions/tile-scm/ |
11142 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git | 11145 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git |
11143 | S: Supported | 11146 | S: Supported |
11144 | F: arch/tile/ | 11147 | F: arch/tile/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f70505186820..a44ef995d8ae 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -68,11 +68,13 @@ CONFIG_KSM=y | |||
68 | CONFIG_TRANSPARENT_HUGEPAGE=y | 68 | CONFIG_TRANSPARENT_HUGEPAGE=y |
69 | CONFIG_CMA=y | 69 | CONFIG_CMA=y |
70 | CONFIG_XEN=y | 70 | CONFIG_XEN=y |
71 | CONFIG_CMDLINE="console=ttyAMA0" | ||
72 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 71 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
73 | CONFIG_COMPAT=y | 72 | CONFIG_COMPAT=y |
74 | CONFIG_CPU_IDLE=y | 73 | CONFIG_CPU_IDLE=y |
75 | CONFIG_ARM_CPUIDLE=y | 74 | CONFIG_ARM_CPUIDLE=y |
75 | CONFIG_CPU_FREQ=y | ||
76 | CONFIG_ARM_BIG_LITTLE_CPUFREQ=y | ||
77 | CONFIG_ARM_SCPI_CPUFREQ=y | ||
76 | CONFIG_NET=y | 78 | CONFIG_NET=y |
77 | CONFIG_PACKET=y | 79 | CONFIG_PACKET=y |
78 | CONFIG_UNIX=y | 80 | CONFIG_UNIX=y |
@@ -80,7 +82,6 @@ CONFIG_INET=y | |||
80 | CONFIG_IP_PNP=y | 82 | CONFIG_IP_PNP=y |
81 | CONFIG_IP_PNP_DHCP=y | 83 | CONFIG_IP_PNP_DHCP=y |
82 | CONFIG_IP_PNP_BOOTP=y | 84 | CONFIG_IP_PNP_BOOTP=y |
83 | # CONFIG_INET_LRO is not set | ||
84 | # CONFIG_IPV6 is not set | 85 | # CONFIG_IPV6 is not set |
85 | CONFIG_BPF_JIT=y | 86 | CONFIG_BPF_JIT=y |
86 | # CONFIG_WIRELESS is not set | 87 | # CONFIG_WIRELESS is not set |
@@ -144,16 +145,18 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y | |||
144 | CONFIG_SERIAL_MVEBU_UART=y | 145 | CONFIG_SERIAL_MVEBU_UART=y |
145 | CONFIG_VIRTIO_CONSOLE=y | 146 | CONFIG_VIRTIO_CONSOLE=y |
146 | # CONFIG_HW_RANDOM is not set | 147 | # CONFIG_HW_RANDOM is not set |
147 | CONFIG_I2C=y | ||
148 | CONFIG_I2C_CHARDEV=y | 148 | CONFIG_I2C_CHARDEV=y |
149 | CONFIG_I2C_DESIGNWARE_PLATFORM=y | ||
149 | CONFIG_I2C_MV64XXX=y | 150 | CONFIG_I2C_MV64XXX=y |
150 | CONFIG_I2C_QUP=y | 151 | CONFIG_I2C_QUP=y |
152 | CONFIG_I2C_TEGRA=y | ||
151 | CONFIG_I2C_UNIPHIER_F=y | 153 | CONFIG_I2C_UNIPHIER_F=y |
152 | CONFIG_I2C_RCAR=y | 154 | CONFIG_I2C_RCAR=y |
153 | CONFIG_SPI=y | 155 | CONFIG_SPI=y |
154 | CONFIG_SPI_PL022=y | 156 | CONFIG_SPI_PL022=y |
155 | CONFIG_SPI_QUP=y | 157 | CONFIG_SPI_QUP=y |
156 | CONFIG_SPMI=y | 158 | CONFIG_SPMI=y |
159 | CONFIG_PINCTRL_SINGLE=y | ||
157 | CONFIG_PINCTRL_MSM8916=y | 160 | CONFIG_PINCTRL_MSM8916=y |
158 | CONFIG_PINCTRL_QCOM_SPMI_PMIC=y | 161 | CONFIG_PINCTRL_QCOM_SPMI_PMIC=y |
159 | CONFIG_GPIO_SYSFS=y | 162 | CONFIG_GPIO_SYSFS=y |
@@ -196,6 +199,7 @@ CONFIG_USB_EHCI_HCD_PLATFORM=y | |||
196 | CONFIG_USB_OHCI_HCD=y | 199 | CONFIG_USB_OHCI_HCD=y |
197 | CONFIG_USB_OHCI_HCD_PLATFORM=y | 200 | CONFIG_USB_OHCI_HCD_PLATFORM=y |
198 | CONFIG_USB_STORAGE=y | 201 | CONFIG_USB_STORAGE=y |
202 | CONFIG_USB_DWC2=y | ||
199 | CONFIG_USB_CHIPIDEA=y | 203 | CONFIG_USB_CHIPIDEA=y |
200 | CONFIG_USB_CHIPIDEA_UDC=y | 204 | CONFIG_USB_CHIPIDEA_UDC=y |
201 | CONFIG_USB_CHIPIDEA_HOST=y | 205 | CONFIG_USB_CHIPIDEA_HOST=y |
@@ -205,19 +209,20 @@ CONFIG_USB_MSM_OTG=y | |||
205 | CONFIG_USB_ULPI=y | 209 | CONFIG_USB_ULPI=y |
206 | CONFIG_USB_GADGET=y | 210 | CONFIG_USB_GADGET=y |
207 | CONFIG_MMC=y | 211 | CONFIG_MMC=y |
208 | CONFIG_MMC_BLOCK_MINORS=16 | 212 | CONFIG_MMC_BLOCK_MINORS=32 |
209 | CONFIG_MMC_ARMMMCI=y | 213 | CONFIG_MMC_ARMMMCI=y |
210 | CONFIG_MMC_SDHCI=y | 214 | CONFIG_MMC_SDHCI=y |
211 | CONFIG_MMC_SDHCI_PLTFM=y | 215 | CONFIG_MMC_SDHCI_PLTFM=y |
212 | CONFIG_MMC_SDHCI_TEGRA=y | 216 | CONFIG_MMC_SDHCI_TEGRA=y |
213 | CONFIG_MMC_SDHCI_MSM=y | 217 | CONFIG_MMC_SDHCI_MSM=y |
214 | CONFIG_MMC_SPI=y | 218 | CONFIG_MMC_SPI=y |
215 | CONFIG_MMC_SUNXI=y | ||
216 | CONFIG_MMC_DW=y | 219 | CONFIG_MMC_DW=y |
217 | CONFIG_MMC_DW_EXYNOS=y | 220 | CONFIG_MMC_DW_EXYNOS=y |
218 | CONFIG_MMC_BLOCK_MINORS=16 | 221 | CONFIG_MMC_DW_K3=y |
222 | CONFIG_MMC_SUNXI=y | ||
219 | CONFIG_NEW_LEDS=y | 223 | CONFIG_NEW_LEDS=y |
220 | CONFIG_LEDS_CLASS=y | 224 | CONFIG_LEDS_CLASS=y |
225 | CONFIG_LEDS_GPIO=y | ||
221 | CONFIG_LEDS_SYSCON=y | 226 | CONFIG_LEDS_SYSCON=y |
222 | CONFIG_LEDS_TRIGGERS=y | 227 | CONFIG_LEDS_TRIGGERS=y |
223 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 228 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
@@ -229,8 +234,8 @@ CONFIG_RTC_DRV_PL031=y | |||
229 | CONFIG_RTC_DRV_SUN6I=y | 234 | CONFIG_RTC_DRV_SUN6I=y |
230 | CONFIG_RTC_DRV_XGENE=y | 235 | CONFIG_RTC_DRV_XGENE=y |
231 | CONFIG_DMADEVICES=y | 236 | CONFIG_DMADEVICES=y |
232 | CONFIG_QCOM_BAM_DMA=y | ||
233 | CONFIG_TEGRA20_APB_DMA=y | 237 | CONFIG_TEGRA20_APB_DMA=y |
238 | CONFIG_QCOM_BAM_DMA=y | ||
234 | CONFIG_RCAR_DMAC=y | 239 | CONFIG_RCAR_DMAC=y |
235 | CONFIG_VFIO=y | 240 | CONFIG_VFIO=y |
236 | CONFIG_VFIO_PCI=y | 241 | CONFIG_VFIO_PCI=y |
@@ -239,20 +244,26 @@ CONFIG_VIRTIO_BALLOON=y | |||
239 | CONFIG_VIRTIO_MMIO=y | 244 | CONFIG_VIRTIO_MMIO=y |
240 | CONFIG_XEN_GNTDEV=y | 245 | CONFIG_XEN_GNTDEV=y |
241 | CONFIG_XEN_GRANT_DEV_ALLOC=y | 246 | CONFIG_XEN_GRANT_DEV_ALLOC=y |
247 | CONFIG_COMMON_CLK_SCPI=y | ||
242 | CONFIG_COMMON_CLK_CS2000_CP=y | 248 | CONFIG_COMMON_CLK_CS2000_CP=y |
243 | CONFIG_COMMON_CLK_QCOM=y | 249 | CONFIG_COMMON_CLK_QCOM=y |
244 | CONFIG_MSM_GCC_8916=y | 250 | CONFIG_MSM_GCC_8916=y |
245 | CONFIG_HWSPINLOCK_QCOM=y | 251 | CONFIG_HWSPINLOCK_QCOM=y |
252 | CONFIG_MAILBOX=y | ||
253 | CONFIG_ARM_MHU=y | ||
254 | CONFIG_HI6220_MBOX=y | ||
246 | CONFIG_ARM_SMMU=y | 255 | CONFIG_ARM_SMMU=y |
247 | CONFIG_QCOM_SMEM=y | 256 | CONFIG_QCOM_SMEM=y |
248 | CONFIG_QCOM_SMD=y | 257 | CONFIG_QCOM_SMD=y |
249 | CONFIG_QCOM_SMD_RPM=y | 258 | CONFIG_QCOM_SMD_RPM=y |
250 | CONFIG_ARCH_TEGRA_132_SOC=y | 259 | CONFIG_ARCH_TEGRA_132_SOC=y |
251 | CONFIG_ARCH_TEGRA_210_SOC=y | 260 | CONFIG_ARCH_TEGRA_210_SOC=y |
252 | CONFIG_HISILICON_IRQ_MBIGEN=y | ||
253 | CONFIG_EXTCON_USB_GPIO=y | 261 | CONFIG_EXTCON_USB_GPIO=y |
262 | CONFIG_COMMON_RESET_HI6220=y | ||
254 | CONFIG_PHY_RCAR_GEN3_USB2=y | 263 | CONFIG_PHY_RCAR_GEN3_USB2=y |
264 | CONFIG_PHY_HI6220_USB=y | ||
255 | CONFIG_PHY_XGENE=y | 265 | CONFIG_PHY_XGENE=y |
266 | CONFIG_ARM_SCPI_PROTOCOL=y | ||
256 | CONFIG_EXT2_FS=y | 267 | CONFIG_EXT2_FS=y |
257 | CONFIG_EXT3_FS=y | 268 | CONFIG_EXT3_FS=y |
258 | CONFIG_FANOTIFY=y | 269 | CONFIG_FANOTIFY=y |
@@ -264,6 +275,7 @@ CONFIG_CUSE=y | |||
264 | CONFIG_VFAT_FS=y | 275 | CONFIG_VFAT_FS=y |
265 | CONFIG_TMPFS=y | 276 | CONFIG_TMPFS=y |
266 | CONFIG_HUGETLBFS=y | 277 | CONFIG_HUGETLBFS=y |
278 | CONFIG_CONFIGFS_FS=y | ||
267 | CONFIG_EFIVAR_FS=y | 279 | CONFIG_EFIVAR_FS=y |
268 | CONFIG_SQUASHFS=y | 280 | CONFIG_SQUASHFS=y |
269 | CONFIG_NFS_FS=y | 281 | CONFIG_NFS_FS=y |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 227ed475dbd3..b7e82a795ac9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/kvm.h> | 27 | #include <asm/kvm.h> |
28 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
29 | #include <asm/kvm_mmio.h> | 29 | #include <asm/kvm_mmio.h> |
30 | #include <asm/kvm_perf_event.h> | ||
31 | 30 | ||
32 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | 31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
33 | 32 | ||
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index a46b019ebcf5..44eaff70da6a 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/kvm_mmu.h> | 23 | #include <asm/kvm_mmu.h> |
24 | #include <asm/kvm_perf_event.h> | ||
25 | #include <asm/sysreg.h> | 24 | #include <asm/sysreg.h> |
26 | 25 | ||
27 | #define __hyp_text __section(.hyp.text) notrace | 26 | #define __hyp_text __section(.hyp.text) notrace |
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h deleted file mode 100644 index c18fdebb8f66..000000000000 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #ifndef __ASM_KVM_PERF_EVENT_H | ||
18 | #define __ASM_KVM_PERF_EVENT_H | ||
19 | |||
20 | #define ARMV8_PMU_MAX_COUNTERS 32 | ||
21 | #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) | ||
22 | |||
23 | /* | ||
24 | * Per-CPU PMCR: config reg | ||
25 | */ | ||
26 | #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ | ||
27 | #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ | ||
28 | #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ | ||
29 | #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
30 | #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ | ||
31 | #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
32 | /* Determines which bit of PMCCNTR_EL0 generates an overflow */ | ||
33 | #define ARMV8_PMU_PMCR_LC (1 << 6) | ||
34 | #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ | ||
35 | #define ARMV8_PMU_PMCR_N_MASK 0x1f | ||
36 | #define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ | ||
37 | |||
38 | /* | ||
39 | * PMOVSR: counters overflow flag status reg | ||
40 | */ | ||
41 | #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ | ||
42 | #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK | ||
43 | |||
44 | /* | ||
45 | * PMXEVTYPER: Event selection reg | ||
46 | */ | ||
47 | #define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ | ||
48 | #define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ | ||
49 | |||
50 | #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ | ||
51 | |||
52 | /* | ||
53 | * Event filters for PMUv3 | ||
54 | */ | ||
55 | #define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) | ||
56 | #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) | ||
57 | #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) | ||
58 | |||
59 | /* | ||
60 | * PMUSERENR: user enable reg | ||
61 | */ | ||
62 | #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ | ||
63 | #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ | ||
64 | #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ | ||
65 | #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ | ||
66 | #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ | ||
67 | |||
68 | #endif | ||
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h index 4e603ea36ad3..123f45d92cd1 100644 --- a/arch/arm64/include/asm/opcodes.h +++ b/arch/arm64/include/asm/opcodes.h | |||
@@ -1 +1,5 @@ | |||
1 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
2 | #define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN | ||
3 | #endif | ||
4 | |||
1 | #include <../../arm/include/asm/opcodes.h> | 5 | #include <../../arm/include/asm/opcodes.h> |
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 7bd3cdb533ea..2065f46fa740 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h | |||
@@ -17,6 +17,53 @@ | |||
17 | #ifndef __ASM_PERF_EVENT_H | 17 | #ifndef __ASM_PERF_EVENT_H |
18 | #define __ASM_PERF_EVENT_H | 18 | #define __ASM_PERF_EVENT_H |
19 | 19 | ||
20 | #define ARMV8_PMU_MAX_COUNTERS 32 | ||
21 | #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) | ||
22 | |||
23 | /* | ||
24 | * Per-CPU PMCR: config reg | ||
25 | */ | ||
26 | #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ | ||
27 | #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ | ||
28 | #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ | ||
29 | #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
30 | #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ | ||
31 | #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
32 | #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ | ||
33 | #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ | ||
34 | #define ARMV8_PMU_PMCR_N_MASK 0x1f | ||
35 | #define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ | ||
36 | |||
37 | /* | ||
38 | * PMOVSR: counters overflow flag status reg | ||
39 | */ | ||
40 | #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ | ||
41 | #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK | ||
42 | |||
43 | /* | ||
44 | * PMXEVTYPER: Event selection reg | ||
45 | */ | ||
46 | #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ | ||
47 | #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ | ||
48 | |||
49 | #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ | ||
50 | |||
51 | /* | ||
52 | * Event filters for PMUv3 | ||
53 | */ | ||
54 | #define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) | ||
55 | #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) | ||
56 | #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) | ||
57 | |||
58 | /* | ||
59 | * PMUSERENR: user enable reg | ||
60 | */ | ||
61 | #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ | ||
62 | #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ | ||
63 | #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ | ||
64 | #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ | ||
65 | #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ | ||
66 | |||
20 | #ifdef CONFIG_PERF_EVENTS | 67 | #ifdef CONFIG_PERF_EVENTS |
21 | struct pt_regs; | 68 | struct pt_regs; |
22 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | 69 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 767c4f6e1f5b..f419a7c075a4 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <asm/irq_regs.h> | 22 | #include <asm/irq_regs.h> |
23 | #include <asm/perf_event.h> | ||
23 | #include <asm/virt.h> | 24 | #include <asm/virt.h> |
24 | 25 | ||
25 | #include <linux/of.h> | 26 | #include <linux/of.h> |
@@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { | |||
384 | #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ | 385 | #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ |
385 | (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | 386 | (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) |
386 | 387 | ||
387 | #define ARMV8_MAX_COUNTERS 32 | ||
388 | #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) | ||
389 | |||
390 | /* | 388 | /* |
391 | * ARMv8 low level PMU access | 389 | * ARMv8 low level PMU access |
392 | */ | 390 | */ |
@@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { | |||
395 | * Perf Event to low level counters mapping | 393 | * Perf Event to low level counters mapping |
396 | */ | 394 | */ |
397 | #define ARMV8_IDX_TO_COUNTER(x) \ | 395 | #define ARMV8_IDX_TO_COUNTER(x) \ |
398 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) | 396 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) |
399 | |||
400 | /* | ||
401 | * Per-CPU PMCR: config reg | ||
402 | */ | ||
403 | #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ | ||
404 | #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ | ||
405 | #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ | ||
406 | #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
407 | #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ | ||
408 | #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
409 | #define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ | ||
410 | #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ | ||
411 | #define ARMV8_PMCR_N_MASK 0x1f | ||
412 | #define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */ | ||
413 | |||
414 | /* | ||
415 | * PMOVSR: counters overflow flag status reg | ||
416 | */ | ||
417 | #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ | ||
418 | #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK | ||
419 | |||
420 | /* | ||
421 | * PMXEVTYPER: Event selection reg | ||
422 | */ | ||
423 | #define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ | ||
424 | #define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ | ||
425 | |||
426 | /* | ||
427 | * Event filters for PMUv3 | ||
428 | */ | ||
429 | #define ARMV8_EXCLUDE_EL1 (1 << 31) | ||
430 | #define ARMV8_EXCLUDE_EL0 (1 << 30) | ||
431 | #define ARMV8_INCLUDE_EL2 (1 << 27) | ||
432 | 397 | ||
433 | static inline u32 armv8pmu_pmcr_read(void) | 398 | static inline u32 armv8pmu_pmcr_read(void) |
434 | { | 399 | { |
@@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void) | |||
439 | 404 | ||
440 | static inline void armv8pmu_pmcr_write(u32 val) | 405 | static inline void armv8pmu_pmcr_write(u32 val) |
441 | { | 406 | { |
442 | val &= ARMV8_PMCR_MASK; | 407 | val &= ARMV8_PMU_PMCR_MASK; |
443 | isb(); | 408 | isb(); |
444 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); | 409 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); |
445 | } | 410 | } |
446 | 411 | ||
447 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | 412 | static inline int armv8pmu_has_overflowed(u32 pmovsr) |
448 | { | 413 | { |
449 | return pmovsr & ARMV8_OVERFLOWED_MASK; | 414 | return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; |
450 | } | 415 | } |
451 | 416 | ||
452 | static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) | 417 | static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
@@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) | |||
512 | static inline void armv8pmu_write_evtype(int idx, u32 val) | 477 | static inline void armv8pmu_write_evtype(int idx, u32 val) |
513 | { | 478 | { |
514 | if (armv8pmu_select_counter(idx) == idx) { | 479 | if (armv8pmu_select_counter(idx) == idx) { |
515 | val &= ARMV8_EVTYPE_MASK; | 480 | val &= ARMV8_PMU_EVTYPE_MASK; |
516 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); | 481 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); |
517 | } | 482 | } |
518 | } | 483 | } |
@@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void) | |||
558 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); | 523 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); |
559 | 524 | ||
560 | /* Write to clear flags */ | 525 | /* Write to clear flags */ |
561 | value &= ARMV8_OVSR_MASK; | 526 | value &= ARMV8_PMU_OVSR_MASK; |
562 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); | 527 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); |
563 | 528 | ||
564 | return value; | 529 | return value; |
@@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu) | |||
696 | 661 | ||
697 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 662 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
698 | /* Enable all counters */ | 663 | /* Enable all counters */ |
699 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); | 664 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); |
700 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 665 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
701 | } | 666 | } |
702 | 667 | ||
@@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu) | |||
707 | 672 | ||
708 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 673 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
709 | /* Disable all counters */ | 674 | /* Disable all counters */ |
710 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); | 675 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); |
711 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 676 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
712 | } | 677 | } |
713 | 678 | ||
@@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
717 | int idx; | 682 | int idx; |
718 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | 683 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
719 | struct hw_perf_event *hwc = &event->hw; | 684 | struct hw_perf_event *hwc = &event->hw; |
720 | unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT; | 685 | unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; |
721 | 686 | ||
722 | /* Always place a cycle counter into the cycle counter. */ | 687 | /* Always place a cycle counter into the cycle counter. */ |
723 | if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { | 688 | if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { |
@@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |||
754 | attr->exclude_kernel != attr->exclude_hv) | 719 | attr->exclude_kernel != attr->exclude_hv) |
755 | return -EINVAL; | 720 | return -EINVAL; |
756 | if (attr->exclude_user) | 721 | if (attr->exclude_user) |
757 | config_base |= ARMV8_EXCLUDE_EL0; | 722 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
758 | if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) | 723 | if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) |
759 | config_base |= ARMV8_EXCLUDE_EL1; | 724 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
760 | if (!attr->exclude_hv) | 725 | if (!attr->exclude_hv) |
761 | config_base |= ARMV8_INCLUDE_EL2; | 726 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
762 | 727 | ||
763 | /* | 728 | /* |
764 | * Install the filter into config_base as this is used to | 729 | * Install the filter into config_base as this is used to |
@@ -784,35 +749,36 @@ static void armv8pmu_reset(void *info) | |||
784 | * Initialize & Reset PMNC. Request overflow interrupt for | 749 | * Initialize & Reset PMNC. Request overflow interrupt for |
785 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). | 750 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). |
786 | */ | 751 | */ |
787 | armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC); | 752 | armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | |
753 | ARMV8_PMU_PMCR_LC); | ||
788 | } | 754 | } |
789 | 755 | ||
790 | static int armv8_pmuv3_map_event(struct perf_event *event) | 756 | static int armv8_pmuv3_map_event(struct perf_event *event) |
791 | { | 757 | { |
792 | return armpmu_map_event(event, &armv8_pmuv3_perf_map, | 758 | return armpmu_map_event(event, &armv8_pmuv3_perf_map, |
793 | &armv8_pmuv3_perf_cache_map, | 759 | &armv8_pmuv3_perf_cache_map, |
794 | ARMV8_EVTYPE_EVENT); | 760 | ARMV8_PMU_EVTYPE_EVENT); |
795 | } | 761 | } |
796 | 762 | ||
797 | static int armv8_a53_map_event(struct perf_event *event) | 763 | static int armv8_a53_map_event(struct perf_event *event) |
798 | { | 764 | { |
799 | return armpmu_map_event(event, &armv8_a53_perf_map, | 765 | return armpmu_map_event(event, &armv8_a53_perf_map, |
800 | &armv8_a53_perf_cache_map, | 766 | &armv8_a53_perf_cache_map, |
801 | ARMV8_EVTYPE_EVENT); | 767 | ARMV8_PMU_EVTYPE_EVENT); |
802 | } | 768 | } |
803 | 769 | ||
804 | static int armv8_a57_map_event(struct perf_event *event) | 770 | static int armv8_a57_map_event(struct perf_event *event) |
805 | { | 771 | { |
806 | return armpmu_map_event(event, &armv8_a57_perf_map, | 772 | return armpmu_map_event(event, &armv8_a57_perf_map, |
807 | &armv8_a57_perf_cache_map, | 773 | &armv8_a57_perf_cache_map, |
808 | ARMV8_EVTYPE_EVENT); | 774 | ARMV8_PMU_EVTYPE_EVENT); |
809 | } | 775 | } |
810 | 776 | ||
811 | static int armv8_thunder_map_event(struct perf_event *event) | 777 | static int armv8_thunder_map_event(struct perf_event *event) |
812 | { | 778 | { |
813 | return armpmu_map_event(event, &armv8_thunder_perf_map, | 779 | return armpmu_map_event(event, &armv8_thunder_perf_map, |
814 | &armv8_thunder_perf_cache_map, | 780 | &armv8_thunder_perf_cache_map, |
815 | ARMV8_EVTYPE_EVENT); | 781 | ARMV8_PMU_EVTYPE_EVENT); |
816 | } | 782 | } |
817 | 783 | ||
818 | static void armv8pmu_read_num_pmnc_events(void *info) | 784 | static void armv8pmu_read_num_pmnc_events(void *info) |
@@ -820,7 +786,7 @@ static void armv8pmu_read_num_pmnc_events(void *info) | |||
820 | int *nb_cnt = info; | 786 | int *nb_cnt = info; |
821 | 787 | ||
822 | /* Read the nb of CNTx counters supported from PMNC */ | 788 | /* Read the nb of CNTx counters supported from PMNC */ |
823 | *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; | 789 | *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; |
824 | 790 | ||
825 | /* Add the CPU cycles counter */ | 791 | /* Add the CPU cycles counter */ |
826 | *nb_cnt += 1; | 792 | *nb_cnt += 1; |
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c index 718dd197909f..367c5426157b 100644 --- a/arch/nios2/kernel/prom.c +++ b/arch/nios2/kernel/prom.c | |||
@@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node, | |||
97 | return 0; | 97 | return 0; |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | *addr64 = fdt_translate_address((const void *)initial_boot_params, | 100 | *addr64 = of_flat_dt_translate_address(node); |
101 | node); | ||
102 | 101 | ||
103 | return *addr64 == OF_BAD_ADDR ? 0 : 1; | 102 | return *addr64 == OF_BAD_ADDR ? 0 : 1; |
104 | } | 103 | } |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 989fa14147a9..bd3c873951a1 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -30,6 +30,7 @@ config PARISC | |||
30 | select TTY # Needed for pdc_cons.c | 30 | select TTY # Needed for pdc_cons.c |
31 | select HAVE_DEBUG_STACKOVERFLOW | 31 | select HAVE_DEBUG_STACKOVERFLOW |
32 | select HAVE_ARCH_AUDITSYSCALL | 32 | select HAVE_ARCH_AUDITSYSCALL |
33 | select HAVE_ARCH_SECCOMP_FILTER | ||
33 | select ARCH_NO_COHERENT_DMA_MMAP | 34 | select ARCH_NO_COHERENT_DMA_MMAP |
34 | 35 | ||
35 | help | 36 | help |
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 0448a2c8eafb..3387307cc33e 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h | |||
@@ -183,6 +183,13 @@ typedef struct compat_siginfo { | |||
183 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | 183 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ |
184 | int _fd; | 184 | int _fd; |
185 | } _sigpoll; | 185 | } _sigpoll; |
186 | |||
187 | /* SIGSYS */ | ||
188 | struct { | ||
189 | compat_uptr_t _call_addr; /* calling user insn */ | ||
190 | int _syscall; /* triggering system call number */ | ||
191 | compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */ | ||
192 | } _sigsys; | ||
186 | } _sifields; | 193 | } _sifields; |
187 | } compat_siginfo_t; | 194 | } compat_siginfo_t; |
188 | 195 | ||
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index a5eba95d87fe..637ce8d6f375 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h | |||
@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk, | |||
39 | } | 39 | } |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void syscall_set_return_value(struct task_struct *task, | ||
43 | struct pt_regs *regs, | ||
44 | int error, long val) | ||
45 | { | ||
46 | regs->gr[28] = error ? error : val; | ||
47 | } | ||
48 | |||
49 | static inline void syscall_rollback(struct task_struct *task, | ||
50 | struct pt_regs *regs) | ||
51 | { | ||
52 | /* do nothing */ | ||
53 | } | ||
54 | |||
42 | static inline int syscall_get_arch(void) | 55 | static inline int syscall_get_arch(void) |
43 | { | 56 | { |
44 | int arch = AUDIT_ARCH_PARISC; | 57 | int arch = AUDIT_ARCH_PARISC; |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index ce0b2b4075c7..8fb81a391599 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
270 | long do_syscall_trace_enter(struct pt_regs *regs) | 270 | long do_syscall_trace_enter(struct pt_regs *regs) |
271 | { | 271 | { |
272 | /* Do the secure computing check first. */ | 272 | /* Do the secure computing check first. */ |
273 | secure_computing_strict(regs->gr[20]); | 273 | if (secure_computing() == -1) |
274 | return -1; | ||
274 | 275 | ||
275 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 276 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
276 | tracehook_report_syscall_entry(regs)) { | 277 | tracehook_report_syscall_entry(regs)) { |
@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
296 | regs->gr[23] & 0xffffffff); | 297 | regs->gr[23] & 0xffffffff); |
297 | 298 | ||
298 | out: | 299 | out: |
299 | return regs->gr[20]; | 300 | /* |
301 | * Sign extend the syscall number to 64bit since it may have been | ||
302 | * modified by a compat ptrace call | ||
303 | */ | ||
304 | return (int) ((u32) regs->gr[20]); | ||
300 | } | 305 | } |
301 | 306 | ||
302 | void do_syscall_trace_exit(struct pt_regs *regs) | 307 | void do_syscall_trace_exit(struct pt_regs *regs) |
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index 984abbee71ca..c342b2e17492 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c | |||
@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from) | |||
371 | val = (compat_int_t)from->si_int; | 371 | val = (compat_int_t)from->si_int; |
372 | err |= __put_user(val, &to->si_int); | 372 | err |= __put_user(val, &to->si_int); |
373 | break; | 373 | break; |
374 | case __SI_SYS >> 16: | ||
375 | err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr); | ||
376 | err |= __put_user(from->si_syscall, &to->si_syscall); | ||
377 | err |= __put_user(from->si_arch, &to->si_arch); | ||
378 | break; | ||
374 | } | 379 | } |
375 | } | 380 | } |
376 | return err; | 381 | return err; |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index fbafa0d0e2bf..c976ebfe2269 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -329,6 +329,7 @@ tracesys_next: | |||
329 | 329 | ||
330 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 330 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
331 | LDREG TI_TASK(%r1), %r1 | 331 | LDREG TI_TASK(%r1), %r1 |
332 | LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */ | ||
332 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ | 333 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ |
333 | LDREG TASK_PT_GR25(%r1), %r25 | 334 | LDREG TASK_PT_GR25(%r1), %r25 |
334 | LDREG TASK_PT_GR24(%r1), %r24 | 335 | LDREG TASK_PT_GR24(%r1), %r24 |
@@ -342,6 +343,7 @@ tracesys_next: | |||
342 | stw %r21, -56(%r30) /* 6th argument */ | 343 | stw %r21, -56(%r30) /* 6th argument */ |
343 | #endif | 344 | #endif |
344 | 345 | ||
346 | cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ | ||
345 | comiclr,>>= __NR_Linux_syscalls, %r20, %r0 | 347 | comiclr,>>= __NR_Linux_syscalls, %r20, %r0 |
346 | b,n .Ltracesys_nosys | 348 | b,n .Ltracesys_nosys |
347 | 349 | ||
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8ab8a1a9610a..009fab130cd8 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -246,7 +246,7 @@ struct thread_struct { | |||
246 | #endif /* CONFIG_ALTIVEC */ | 246 | #endif /* CONFIG_ALTIVEC */ |
247 | #ifdef CONFIG_VSX | 247 | #ifdef CONFIG_VSX |
248 | /* VSR status */ | 248 | /* VSR status */ |
249 | int used_vsr; /* set if process has used altivec */ | 249 | int used_vsr; /* set if process has used VSX */ |
250 | #endif /* CONFIG_VSX */ | 250 | #endif /* CONFIG_VSX */ |
251 | #ifdef CONFIG_SPE | 251 | #ifdef CONFIG_SPE |
252 | unsigned long evr[32]; /* upper 32-bits of SPE regs */ | 252 | unsigned long evr[32]; /* upper 32-bits of SPE regs */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 612df305886b..b8500b4ac7fe 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -983,7 +983,7 @@ void restore_tm_state(struct pt_regs *regs) | |||
983 | static inline void save_sprs(struct thread_struct *t) | 983 | static inline void save_sprs(struct thread_struct *t) |
984 | { | 984 | { |
985 | #ifdef CONFIG_ALTIVEC | 985 | #ifdef CONFIG_ALTIVEC |
986 | if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC))) | 986 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
987 | t->vrsave = mfspr(SPRN_VRSAVE); | 987 | t->vrsave = mfspr(SPRN_VRSAVE); |
988 | #endif | 988 | #endif |
989 | #ifdef CONFIG_PPC_BOOK3S_64 | 989 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6dd272b6196f..d991b9e80dbb 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -413,13 +413,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |||
413 | { | 413 | { |
414 | struct hugepd_freelist **batchp; | 414 | struct hugepd_freelist **batchp; |
415 | 415 | ||
416 | batchp = this_cpu_ptr(&hugepd_freelist_cur); | 416 | batchp = &get_cpu_var(hugepd_freelist_cur); |
417 | 417 | ||
418 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 418 | if (atomic_read(&tlb->mm->mm_users) < 2 || |
419 | cpumask_equal(mm_cpumask(tlb->mm), | 419 | cpumask_equal(mm_cpumask(tlb->mm), |
420 | cpumask_of(smp_processor_id()))) { | 420 | cpumask_of(smp_processor_id()))) { |
421 | kmem_cache_free(hugepte_cache, hugepte); | 421 | kmem_cache_free(hugepte_cache, hugepte); |
422 | put_cpu_var(hugepd_freelist_cur); | 422 | put_cpu_var(hugepd_freelist_cur); |
423 | return; | 423 | return; |
424 | } | 424 | } |
425 | 425 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b9df8d11d7a9..aad23e3dff2c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -59,6 +59,9 @@ config PCI_QUIRKS | |||
59 | config ARCH_SUPPORTS_UPROBES | 59 | config ARCH_SUPPORTS_UPROBES |
60 | def_bool y | 60 | def_bool y |
61 | 61 | ||
62 | config DEBUG_RODATA | ||
63 | def_bool y | ||
64 | |||
62 | config S390 | 65 | config S390 |
63 | def_bool y | 66 | def_bool y |
64 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 67 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index b8045b97f4fb..d750cc0dfe30 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c | |||
@@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = { | |||
669 | static struct miscdevice prng_sha512_dev = { | 669 | static struct miscdevice prng_sha512_dev = { |
670 | .name = "prandom", | 670 | .name = "prandom", |
671 | .minor = MISC_DYNAMIC_MINOR, | 671 | .minor = MISC_DYNAMIC_MINOR, |
672 | .mode = 0644, | ||
672 | .fops = &prng_sha512_fops, | 673 | .fops = &prng_sha512_fops, |
673 | }; | 674 | }; |
674 | static struct miscdevice prng_tdes_dev = { | 675 | static struct miscdevice prng_tdes_dev = { |
675 | .name = "prandom", | 676 | .name = "prandom", |
676 | .minor = MISC_DYNAMIC_MINOR, | 677 | .minor = MISC_DYNAMIC_MINOR, |
678 | .mode = 0644, | ||
677 | .fops = &prng_tdes_fops, | 679 | .fops = &prng_tdes_fops, |
678 | }; | 680 | }; |
679 | 681 | ||
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 4d7ccac5fd1d..22da3b34c655 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h | |||
@@ -15,4 +15,7 @@ | |||
15 | 15 | ||
16 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | 16 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
17 | 17 | ||
18 | /* Read-only memory is marked before mark_rodata_ro() is called. */ | ||
19 | #define __ro_after_init __read_mostly | ||
20 | |||
18 | #endif | 21 | #endif |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index ab3aa6875a59..4384bc797a54 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
@@ -311,7 +311,9 @@ | |||
311 | #define __NR_shutdown 373 | 311 | #define __NR_shutdown 373 |
312 | #define __NR_mlock2 374 | 312 | #define __NR_mlock2 374 |
313 | #define __NR_copy_file_range 375 | 313 | #define __NR_copy_file_range 375 |
314 | #define NR_syscalls 376 | 314 | #define __NR_preadv2 376 |
315 | #define __NR_pwritev2 377 | ||
316 | #define NR_syscalls 378 | ||
315 | 317 | ||
316 | /* | 318 | /* |
317 | * There are some system calls that are not present on 64 bit, some | 319 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 58bf4572d457..62f066b5259e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -670,6 +670,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, | |||
670 | 670 | ||
671 | switch (action & ~CPU_TASKS_FROZEN) { | 671 | switch (action & ~CPU_TASKS_FROZEN) { |
672 | case CPU_ONLINE: | 672 | case CPU_ONLINE: |
673 | case CPU_DOWN_FAILED: | ||
673 | flags = PMC_INIT; | 674 | flags = PMC_INIT; |
674 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | 675 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); |
675 | break; | 676 | break; |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 1a43474df541..eaab9a7cb3be 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1521,7 +1521,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self, | |||
1521 | 1521 | ||
1522 | switch (action & ~CPU_TASKS_FROZEN) { | 1522 | switch (action & ~CPU_TASKS_FROZEN) { |
1523 | case CPU_ONLINE: | 1523 | case CPU_ONLINE: |
1524 | case CPU_ONLINE_FROZEN: | 1524 | case CPU_DOWN_FAILED: |
1525 | flags = PMC_INIT; | 1525 | flags = PMC_INIT; |
1526 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | 1526 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); |
1527 | break; | 1527 | break; |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 293d8b98fd52..9b59e6212d8f 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -384,3 +384,5 @@ SYSCALL(sys_recvmsg,compat_sys_recvmsg) | |||
384 | SYSCALL(sys_shutdown,sys_shutdown) | 384 | SYSCALL(sys_shutdown,sys_shutdown) |
385 | SYSCALL(sys_mlock2,compat_sys_mlock2) | 385 | SYSCALL(sys_mlock2,compat_sys_mlock2) |
386 | SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ | 386 | SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ |
387 | SYSCALL(sys_preadv2,compat_sys_preadv2) | ||
388 | SYSCALL(sys_pwritev2,compat_sys_pwritev2) | ||
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 49a1c84ed266..a8a6765f1a51 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -20,9 +20,9 @@ | |||
20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | 20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
21 | unsigned long end, int write, struct page **pages, int *nr) | 21 | unsigned long end, int write, struct page **pages, int *nr) |
22 | { | 22 | { |
23 | struct page *head, *page; | ||
23 | unsigned long mask; | 24 | unsigned long mask; |
24 | pte_t *ptep, pte; | 25 | pte_t *ptep, pte; |
25 | struct page *page; | ||
26 | 26 | ||
27 | mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; | 27 | mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; |
28 | 28 | ||
@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
37 | return 0; | 37 | return 0; |
38 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 38 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
39 | page = pte_page(pte); | 39 | page = pte_page(pte); |
40 | if (!page_cache_get_speculative(page)) | 40 | head = compound_head(page); |
41 | if (!page_cache_get_speculative(head)) | ||
41 | return 0; | 42 | return 0; |
42 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | 43 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
43 | put_page(page); | 44 | put_page(head); |
44 | return 0; | 45 | return 0; |
45 | } | 46 | } |
47 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | ||
46 | pages[*nr] = page; | 48 | pages[*nr] = page; |
47 | (*nr)++; | 49 | (*nr)++; |
48 | 50 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 73e290337092..c7b0451397d6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -108,6 +108,13 @@ void __init paging_init(void) | |||
108 | free_area_init_nodes(max_zone_pfns); | 108 | free_area_init_nodes(max_zone_pfns); |
109 | } | 109 | } |
110 | 110 | ||
111 | void mark_rodata_ro(void) | ||
112 | { | ||
113 | /* Text and rodata are already protected. Nothing to do here. */ | ||
114 | pr_info("Write protecting the kernel read-only data: %luk\n", | ||
115 | ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10); | ||
116 | } | ||
117 | |||
111 | void __init mem_init(void) | 118 | void __init mem_init(void) |
112 | { | 119 | { |
113 | if (MACHINE_HAS_TLB_LC) | 120 | if (MACHINE_HAS_TLB_LC) |
@@ -126,9 +133,6 @@ void __init mem_init(void) | |||
126 | setup_zero_pages(); /* Setup zeroed pages. */ | 133 | setup_zero_pages(); /* Setup zeroed pages. */ |
127 | 134 | ||
128 | mem_init_print_info(NULL); | 135 | mem_init_print_info(NULL); |
129 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | ||
130 | (unsigned long)&_stext, | ||
131 | PFN_ALIGN((unsigned long)&_eshared) - 1); | ||
132 | } | 136 | } |
133 | 137 | ||
134 | void free_initmem(void) | 138 | void free_initmem(void) |
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 21591ddb4c1f..1a4512c8544a 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c | |||
@@ -176,8 +176,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) | |||
176 | rc = clp_store_query_pci_fn(zdev, &rrb->response); | 176 | rc = clp_store_query_pci_fn(zdev, &rrb->response); |
177 | if (rc) | 177 | if (rc) |
178 | goto out; | 178 | goto out; |
179 | if (rrb->response.pfgid) | 179 | rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid); |
180 | rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid); | ||
181 | } else { | 180 | } else { |
182 | zpci_err("Q PCI FN:\n"); | 181 | zpci_err("Q PCI FN:\n"); |
183 | zpci_err_clp(rrb->response.hdr.rsp, rc); | 182 | zpci_err_clp(rrb->response.hdr.rsp, rc); |
diff --git a/arch/sparc/include/asm/compat_signal.h b/arch/sparc/include/asm/compat_signal.h index 9ed1f128b4d1..4b027b1044fa 100644 --- a/arch/sparc/include/asm/compat_signal.h +++ b/arch/sparc/include/asm/compat_signal.h | |||
@@ -6,17 +6,17 @@ | |||
6 | 6 | ||
7 | #ifdef CONFIG_COMPAT | 7 | #ifdef CONFIG_COMPAT |
8 | struct __new_sigaction32 { | 8 | struct __new_sigaction32 { |
9 | unsigned sa_handler; | 9 | unsigned int sa_handler; |
10 | unsigned int sa_flags; | 10 | unsigned int sa_flags; |
11 | unsigned sa_restorer; /* not used by Linux/SPARC yet */ | 11 | unsigned int sa_restorer; /* not used by Linux/SPARC yet */ |
12 | compat_sigset_t sa_mask; | 12 | compat_sigset_t sa_mask; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | struct __old_sigaction32 { | 15 | struct __old_sigaction32 { |
16 | unsigned sa_handler; | 16 | unsigned int sa_handler; |
17 | compat_old_sigset_t sa_mask; | 17 | compat_old_sigset_t sa_mask; |
18 | unsigned int sa_flags; | 18 | unsigned int sa_flags; |
19 | unsigned sa_restorer; /* not used by Linux/SPARC yet */ | 19 | unsigned int sa_restorer; /* not used by Linux/SPARC yet */ |
20 | }; | 20 | }; |
21 | #endif | 21 | #endif |
22 | 22 | ||
diff --git a/arch/sparc/include/asm/obio.h b/arch/sparc/include/asm/obio.h index 910c1d9af1f8..426ad75103fb 100644 --- a/arch/sparc/include/asm/obio.h +++ b/arch/sparc/include/asm/obio.h | |||
@@ -117,9 +117,9 @@ static inline void bw_clear_intr_mask(int sbus_level, int mask) | |||
117 | "i" (ASI_M_CTL)); | 117 | "i" (ASI_M_CTL)); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline unsigned bw_get_prof_limit(int cpu) | 120 | static inline unsigned int bw_get_prof_limit(int cpu) |
121 | { | 121 | { |
122 | unsigned limit; | 122 | unsigned int limit; |
123 | 123 | ||
124 | __asm__ __volatile__ ("lda [%1] %2, %0" : | 124 | __asm__ __volatile__ ("lda [%1] %2, %0" : |
125 | "=r" (limit) : | 125 | "=r" (limit) : |
@@ -128,7 +128,7 @@ static inline unsigned bw_get_prof_limit(int cpu) | |||
128 | return limit; | 128 | return limit; |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline void bw_set_prof_limit(int cpu, unsigned limit) | 131 | static inline void bw_set_prof_limit(int cpu, unsigned int limit) |
132 | { | 132 | { |
133 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 133 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
134 | "r" (limit), | 134 | "r" (limit), |
@@ -136,9 +136,9 @@ static inline void bw_set_prof_limit(int cpu, unsigned limit) | |||
136 | "i" (ASI_M_CTL)); | 136 | "i" (ASI_M_CTL)); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline unsigned bw_get_ctrl(int cpu) | 139 | static inline unsigned int bw_get_ctrl(int cpu) |
140 | { | 140 | { |
141 | unsigned ctrl; | 141 | unsigned int ctrl; |
142 | 142 | ||
143 | __asm__ __volatile__ ("lda [%1] %2, %0" : | 143 | __asm__ __volatile__ ("lda [%1] %2, %0" : |
144 | "=r" (ctrl) : | 144 | "=r" (ctrl) : |
@@ -147,7 +147,7 @@ static inline unsigned bw_get_ctrl(int cpu) | |||
147 | return ctrl; | 147 | return ctrl; |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline void bw_set_ctrl(int cpu, unsigned ctrl) | 150 | static inline void bw_set_ctrl(int cpu, unsigned int ctrl) |
151 | { | 151 | { |
152 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 152 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
153 | "r" (ctrl), | 153 | "r" (ctrl), |
@@ -155,9 +155,9 @@ static inline void bw_set_ctrl(int cpu, unsigned ctrl) | |||
155 | "i" (ASI_M_CTL)); | 155 | "i" (ASI_M_CTL)); |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline unsigned cc_get_ipen(void) | 158 | static inline unsigned int cc_get_ipen(void) |
159 | { | 159 | { |
160 | unsigned pending; | 160 | unsigned int pending; |
161 | 161 | ||
162 | __asm__ __volatile__ ("lduha [%1] %2, %0" : | 162 | __asm__ __volatile__ ("lduha [%1] %2, %0" : |
163 | "=r" (pending) : | 163 | "=r" (pending) : |
@@ -166,7 +166,7 @@ static inline unsigned cc_get_ipen(void) | |||
166 | return pending; | 166 | return pending; |
167 | } | 167 | } |
168 | 168 | ||
169 | static inline void cc_set_iclr(unsigned clear) | 169 | static inline void cc_set_iclr(unsigned int clear) |
170 | { | 170 | { |
171 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 171 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
172 | "r" (clear), | 172 | "r" (clear), |
@@ -174,9 +174,9 @@ static inline void cc_set_iclr(unsigned clear) | |||
174 | "i" (ASI_M_MXCC)); | 174 | "i" (ASI_M_MXCC)); |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline unsigned cc_get_imsk(void) | 177 | static inline unsigned int cc_get_imsk(void) |
178 | { | 178 | { |
179 | unsigned mask; | 179 | unsigned int mask; |
180 | 180 | ||
181 | __asm__ __volatile__ ("lduha [%1] %2, %0" : | 181 | __asm__ __volatile__ ("lduha [%1] %2, %0" : |
182 | "=r" (mask) : | 182 | "=r" (mask) : |
@@ -185,7 +185,7 @@ static inline unsigned cc_get_imsk(void) | |||
185 | return mask; | 185 | return mask; |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline void cc_set_imsk(unsigned mask) | 188 | static inline void cc_set_imsk(unsigned int mask) |
189 | { | 189 | { |
190 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 190 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
191 | "r" (mask), | 191 | "r" (mask), |
@@ -193,9 +193,9 @@ static inline void cc_set_imsk(unsigned mask) | |||
193 | "i" (ASI_M_MXCC)); | 193 | "i" (ASI_M_MXCC)); |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline unsigned cc_get_imsk_other(int cpuid) | 196 | static inline unsigned int cc_get_imsk_other(int cpuid) |
197 | { | 197 | { |
198 | unsigned mask; | 198 | unsigned int mask; |
199 | 199 | ||
200 | __asm__ __volatile__ ("lduha [%1] %2, %0" : | 200 | __asm__ __volatile__ ("lduha [%1] %2, %0" : |
201 | "=r" (mask) : | 201 | "=r" (mask) : |
@@ -204,7 +204,7 @@ static inline unsigned cc_get_imsk_other(int cpuid) | |||
204 | return mask; | 204 | return mask; |
205 | } | 205 | } |
206 | 206 | ||
207 | static inline void cc_set_imsk_other(int cpuid, unsigned mask) | 207 | static inline void cc_set_imsk_other(int cpuid, unsigned int mask) |
208 | { | 208 | { |
209 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 209 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
210 | "r" (mask), | 210 | "r" (mask), |
@@ -212,7 +212,7 @@ static inline void cc_set_imsk_other(int cpuid, unsigned mask) | |||
212 | "i" (ASI_M_CTL)); | 212 | "i" (ASI_M_CTL)); |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline void cc_set_igen(unsigned gen) | 215 | static inline void cc_set_igen(unsigned int gen) |
216 | { | 216 | { |
217 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 217 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
218 | "r" (gen), | 218 | "r" (gen), |
diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h index 47eaafad15ce..63374c4413a8 100644 --- a/arch/sparc/include/asm/openprom.h +++ b/arch/sparc/include/asm/openprom.h | |||
@@ -29,12 +29,12 @@ struct linux_dev_v0_funcs { | |||
29 | /* V2 and later prom device operations. */ | 29 | /* V2 and later prom device operations. */ |
30 | struct linux_dev_v2_funcs { | 30 | struct linux_dev_v2_funcs { |
31 | phandle (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */ | 31 | phandle (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */ |
32 | char * (*v2_dumb_mem_alloc)(char *va, unsigned sz); | 32 | char * (*v2_dumb_mem_alloc)(char *va, unsigned int sz); |
33 | void (*v2_dumb_mem_free)(char *va, unsigned sz); | 33 | void (*v2_dumb_mem_free)(char *va, unsigned int sz); |
34 | 34 | ||
35 | /* To map devices into virtual I/O space. */ | 35 | /* To map devices into virtual I/O space. */ |
36 | char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz); | 36 | char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned int paddr, unsigned int sz); |
37 | void (*v2_dumb_munmap)(char *virta, unsigned size); | 37 | void (*v2_dumb_munmap)(char *virta, unsigned int size); |
38 | 38 | ||
39 | int (*v2_dev_open)(char *devpath); | 39 | int (*v2_dev_open)(char *devpath); |
40 | void (*v2_dev_close)(int d); | 40 | void (*v2_dev_close)(int d); |
@@ -50,7 +50,7 @@ struct linux_dev_v2_funcs { | |||
50 | struct linux_mlist_v0 { | 50 | struct linux_mlist_v0 { |
51 | struct linux_mlist_v0 *theres_more; | 51 | struct linux_mlist_v0 *theres_more; |
52 | unsigned int start_adr; | 52 | unsigned int start_adr; |
53 | unsigned num_bytes; | 53 | unsigned int num_bytes; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct linux_mem_v0 { | 56 | struct linux_mem_v0 { |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7a38d6a576c5..f089cfa249f3 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -218,7 +218,7 @@ extern pgprot_t PAGE_KERNEL_LOCKED; | |||
218 | extern pgprot_t PAGE_COPY; | 218 | extern pgprot_t PAGE_COPY; |
219 | extern pgprot_t PAGE_SHARED; | 219 | extern pgprot_t PAGE_SHARED; |
220 | 220 | ||
221 | /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ | 221 | /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */ |
222 | extern unsigned long _PAGE_IE; | 222 | extern unsigned long _PAGE_IE; |
223 | extern unsigned long _PAGE_E; | 223 | extern unsigned long _PAGE_E; |
224 | extern unsigned long _PAGE_CACHE; | 224 | extern unsigned long _PAGE_CACHE; |
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 6924bdefe148..ce2595c89471 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h | |||
@@ -201,7 +201,7 @@ unsigned long get_wchan(struct task_struct *task); | |||
201 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) | 201 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) |
202 | 202 | ||
203 | /* Please see the commentary in asm/backoff.h for a description of | 203 | /* Please see the commentary in asm/backoff.h for a description of |
204 | * what these instructions are doing and how they have been choosen. | 204 | * what these instructions are doing and how they have been chosen. |
205 | * To make a long story short, we are trying to yield the current cpu | 205 | * To make a long story short, we are trying to yield the current cpu |
206 | * strand during busy loops. | 206 | * strand during busy loops. |
207 | */ | 207 | */ |
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h index fc2df1e892cb..f4eb630a58ed 100644 --- a/arch/sparc/include/asm/sigcontext.h +++ b/arch/sparc/include/asm/sigcontext.h | |||
@@ -25,7 +25,7 @@ struct sigcontext32 { | |||
25 | int sigc_oswins; /* outstanding windows */ | 25 | int sigc_oswins; /* outstanding windows */ |
26 | 26 | ||
27 | /* stack ptrs for each regwin buf */ | 27 | /* stack ptrs for each regwin buf */ |
28 | unsigned sigc_spbuf[__SUNOS_MAXWIN]; | 28 | unsigned int sigc_spbuf[__SUNOS_MAXWIN]; |
29 | 29 | ||
30 | /* Windows to restore after signal */ | 30 | /* Windows to restore after signal */ |
31 | struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN]; | 31 | struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN]; |
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index ecb49cfa3be9..c6a155c3904e 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -149,7 +149,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
149 | * page size in question. So for PMD mappings (which fall on | 149 | * page size in question. So for PMD mappings (which fall on |
150 | * bit 23, for 8MB per PMD) we must propagate bit 22 for a | 150 | * bit 23, for 8MB per PMD) we must propagate bit 22 for a |
151 | * 4MB huge page. For huge PUDs (which fall on bit 33, for | 151 | * 4MB huge page. For huge PUDs (which fall on bit 33, for |
152 | * 8GB per PUD), we have to accomodate 256MB and 2GB huge | 152 | * 8GB per PUD), we have to accommodate 256MB and 2GB huge |
153 | * pages. So for those we propagate bits 32 to 28. | 153 | * pages. So for those we propagate bits 32 to 28. |
154 | */ | 154 | */ |
155 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ | 155 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ |
diff --git a/arch/sparc/include/uapi/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h index a232e9e1f4e5..2f0583a2c689 100644 --- a/arch/sparc/include/uapi/asm/stat.h +++ b/arch/sparc/include/uapi/asm/stat.h | |||
@@ -6,13 +6,13 @@ | |||
6 | #if defined(__sparc__) && defined(__arch64__) | 6 | #if defined(__sparc__) && defined(__arch64__) |
7 | /* 64 bit sparc */ | 7 | /* 64 bit sparc */ |
8 | struct stat { | 8 | struct stat { |
9 | unsigned st_dev; | 9 | unsigned int st_dev; |
10 | ino_t st_ino; | 10 | ino_t st_ino; |
11 | mode_t st_mode; | 11 | mode_t st_mode; |
12 | short st_nlink; | 12 | short st_nlink; |
13 | uid_t st_uid; | 13 | uid_t st_uid; |
14 | gid_t st_gid; | 14 | gid_t st_gid; |
15 | unsigned st_rdev; | 15 | unsigned int st_rdev; |
16 | off_t st_size; | 16 | off_t st_size; |
17 | time_t st_atime; | 17 | time_t st_atime; |
18 | time_t st_mtime; | 18 | time_t st_mtime; |
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c index 24361b494a93..2585c1e14bcc 100644 --- a/arch/sparc/kernel/audit.c +++ b/arch/sparc/kernel/audit.c | |||
@@ -5,27 +5,27 @@ | |||
5 | 5 | ||
6 | #include "kernel.h" | 6 | #include "kernel.h" |
7 | 7 | ||
8 | static unsigned dir_class[] = { | 8 | static unsigned int dir_class[] = { |
9 | #include <asm-generic/audit_dir_write.h> | 9 | #include <asm-generic/audit_dir_write.h> |
10 | ~0U | 10 | ~0U |
11 | }; | 11 | }; |
12 | 12 | ||
13 | static unsigned read_class[] = { | 13 | static unsigned int read_class[] = { |
14 | #include <asm-generic/audit_read.h> | 14 | #include <asm-generic/audit_read.h> |
15 | ~0U | 15 | ~0U |
16 | }; | 16 | }; |
17 | 17 | ||
18 | static unsigned write_class[] = { | 18 | static unsigned int write_class[] = { |
19 | #include <asm-generic/audit_write.h> | 19 | #include <asm-generic/audit_write.h> |
20 | ~0U | 20 | ~0U |
21 | }; | 21 | }; |
22 | 22 | ||
23 | static unsigned chattr_class[] = { | 23 | static unsigned int chattr_class[] = { |
24 | #include <asm-generic/audit_change_attr.h> | 24 | #include <asm-generic/audit_change_attr.h> |
25 | ~0U | 25 | ~0U |
26 | }; | 26 | }; |
27 | 27 | ||
28 | static unsigned signal_class[] = { | 28 | static unsigned int signal_class[] = { |
29 | #include <asm-generic/audit_signal.h> | 29 | #include <asm-generic/audit_signal.h> |
30 | ~0U | 30 | ~0U |
31 | }; | 31 | }; |
@@ -39,7 +39,7 @@ int audit_classify_arch(int arch) | |||
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | int audit_classify_syscall(int abi, unsigned syscall) | 42 | int audit_classify_syscall(int abi, unsigned int syscall) |
43 | { | 43 | { |
44 | #ifdef CONFIG_COMPAT | 44 | #ifdef CONFIG_COMPAT |
45 | if (abi == AUDIT_ARCH_SPARC) | 45 | if (abi == AUDIT_ARCH_SPARC) |
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c index 7062263d09c1..e5611cd428f1 100644 --- a/arch/sparc/kernel/compat_audit.c +++ b/arch/sparc/kernel/compat_audit.c | |||
@@ -2,32 +2,32 @@ | |||
2 | #include <asm/unistd.h> | 2 | #include <asm/unistd.h> |
3 | #include "kernel.h" | 3 | #include "kernel.h" |
4 | 4 | ||
5 | unsigned sparc32_dir_class[] = { | 5 | unsigned int sparc32_dir_class[] = { |
6 | #include <asm-generic/audit_dir_write.h> | 6 | #include <asm-generic/audit_dir_write.h> |
7 | ~0U | 7 | ~0U |
8 | }; | 8 | }; |
9 | 9 | ||
10 | unsigned sparc32_chattr_class[] = { | 10 | unsigned int sparc32_chattr_class[] = { |
11 | #include <asm-generic/audit_change_attr.h> | 11 | #include <asm-generic/audit_change_attr.h> |
12 | ~0U | 12 | ~0U |
13 | }; | 13 | }; |
14 | 14 | ||
15 | unsigned sparc32_write_class[] = { | 15 | unsigned int sparc32_write_class[] = { |
16 | #include <asm-generic/audit_write.h> | 16 | #include <asm-generic/audit_write.h> |
17 | ~0U | 17 | ~0U |
18 | }; | 18 | }; |
19 | 19 | ||
20 | unsigned sparc32_read_class[] = { | 20 | unsigned int sparc32_read_class[] = { |
21 | #include <asm-generic/audit_read.h> | 21 | #include <asm-generic/audit_read.h> |
22 | ~0U | 22 | ~0U |
23 | }; | 23 | }; |
24 | 24 | ||
25 | unsigned sparc32_signal_class[] = { | 25 | unsigned int sparc32_signal_class[] = { |
26 | #include <asm-generic/audit_signal.h> | 26 | #include <asm-generic/audit_signal.h> |
27 | ~0U | 27 | ~0U |
28 | }; | 28 | }; |
29 | 29 | ||
30 | int sparc32_classify_syscall(unsigned syscall) | 30 | int sparc32_classify_syscall(unsigned int syscall) |
31 | { | 31 | { |
32 | switch(syscall) { | 32 | switch(syscall) { |
33 | case __NR_open: | 33 | case __NR_open: |
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index a83707c83be8..51aa6e86a5f8 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S | |||
@@ -1255,7 +1255,7 @@ flush_patch_exception: | |||
1255 | kuw_patch1_7win: sll %o3, 6, %o3 | 1255 | kuw_patch1_7win: sll %o3, 6, %o3 |
1256 | 1256 | ||
1257 | /* No matter how much overhead this routine has in the worst | 1257 | /* No matter how much overhead this routine has in the worst |
1258 | * case scenerio, it is several times better than taking the | 1258 | * case scenario, it is several times better than taking the |
1259 | * traps with the old method of just doing flush_user_windows(). | 1259 | * traps with the old method of just doing flush_user_windows(). |
1260 | */ | 1260 | */ |
1261 | kill_user_windows: | 1261 | kill_user_windows: |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 28fed53b13a0..ffd5ff4678cf 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -131,7 +131,7 @@ void __iomem *ioremap(unsigned long offset, unsigned long size) | |||
131 | EXPORT_SYMBOL(ioremap); | 131 | EXPORT_SYMBOL(ioremap); |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Comlimentary to ioremap(). | 134 | * Complementary to ioremap(). |
135 | */ | 135 | */ |
136 | void iounmap(volatile void __iomem *virtual) | 136 | void iounmap(volatile void __iomem *virtual) |
137 | { | 137 | { |
@@ -233,7 +233,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * Comlimentary to _sparc_ioremap(). | 236 | * Complementary to _sparc_ioremap(). |
237 | */ | 237 | */ |
238 | static void _sparc_free_io(struct resource *res) | 238 | static void _sparc_free_io(struct resource *res) |
239 | { | 239 | { |
@@ -532,7 +532,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | |||
532 | } | 532 | } |
533 | 533 | ||
534 | /* Map a set of buffers described by scatterlist in streaming | 534 | /* Map a set of buffers described by scatterlist in streaming |
535 | * mode for DMA. This is the scather-gather version of the | 535 | * mode for DMA. This is the scatter-gather version of the |
536 | * above pci_map_single interface. Here the scatter gather list | 536 | * above pci_map_single interface. Here the scatter gather list |
537 | * elements are each tagged with the appropriate dma address | 537 | * elements are each tagged with the appropriate dma address |
538 | * and length. They are obtained via sg_dma_{address,length}(SG). | 538 | * and length. They are obtained via sg_dma_{address,length}(SG). |
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index e7f652be9e61..5057ec2e4af6 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h | |||
@@ -54,12 +54,12 @@ void do_signal32(struct pt_regs * regs); | |||
54 | asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); | 54 | asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); |
55 | 55 | ||
56 | /* compat_audit.c */ | 56 | /* compat_audit.c */ |
57 | extern unsigned sparc32_dir_class[]; | 57 | extern unsigned int sparc32_dir_class[]; |
58 | extern unsigned sparc32_chattr_class[]; | 58 | extern unsigned int sparc32_chattr_class[]; |
59 | extern unsigned sparc32_write_class[]; | 59 | extern unsigned int sparc32_write_class[]; |
60 | extern unsigned sparc32_read_class[]; | 60 | extern unsigned int sparc32_read_class[]; |
61 | extern unsigned sparc32_signal_class[]; | 61 | extern unsigned int sparc32_signal_class[]; |
62 | int sparc32_classify_syscall(unsigned syscall); | 62 | int sparc32_classify_syscall(unsigned int syscall); |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #ifdef CONFIG_SPARC32 | 65 | #ifdef CONFIG_SPARC32 |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 42efcf85f721..33cd171d933e 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -203,7 +203,7 @@ static struct irq_chip leon_irq = { | |||
203 | 203 | ||
204 | /* | 204 | /* |
205 | * Build a LEON IRQ for the edge triggered LEON IRQ controller: | 205 | * Build a LEON IRQ for the edge triggered LEON IRQ controller: |
206 | * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack | 206 | * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack |
207 | * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR | 207 | * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR |
208 | * Per-CPU Edge - handle_percpu_irq, ack=0 | 208 | * Per-CPU Edge - handle_percpu_irq, ack=0 |
209 | */ | 209 | */ |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 46a59643bb1c..c16ef1af1843 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -103,7 +103,7 @@ static void show_regwindow32(struct pt_regs *regs) | |||
103 | mm_segment_t old_fs; | 103 | mm_segment_t old_fs; |
104 | 104 | ||
105 | __asm__ __volatile__ ("flushw"); | 105 | __asm__ __volatile__ ("flushw"); |
106 | rw = compat_ptr((unsigned)regs->u_regs[14]); | 106 | rw = compat_ptr((unsigned int)regs->u_regs[14]); |
107 | old_fs = get_fs(); | 107 | old_fs = get_fs(); |
108 | set_fs (USER_DS); | 108 | set_fs (USER_DS); |
109 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { | 109 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index baef495c06bd..69d75ff1c25c 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -109,7 +109,7 @@ unsigned long cmdline_memory_size __initdata = 0; | |||
109 | unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ | 109 | unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ |
110 | 110 | ||
111 | static void | 111 | static void |
112 | prom_console_write(struct console *con, const char *s, unsigned n) | 112 | prom_console_write(struct console *con, const char *s, unsigned int n) |
113 | { | 113 | { |
114 | prom_write(s, n); | 114 | prom_write(s, n); |
115 | } | 115 | } |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index f3185e2b028b..26db95b54ee9 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -77,7 +77,7 @@ struct screen_info screen_info = { | |||
77 | }; | 77 | }; |
78 | 78 | ||
79 | static void | 79 | static void |
80 | prom_console_write(struct console *con, const char *s, unsigned n) | 80 | prom_console_write(struct console *con, const char *s, unsigned int n) |
81 | { | 81 | { |
82 | prom_write(s, n); | 82 | prom_write(s, n); |
83 | } | 83 | } |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 4eed773a7735..3c25241fa5cb 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
@@ -144,7 +144,7 @@ void do_sigreturn32(struct pt_regs *regs) | |||
144 | compat_uptr_t fpu_save; | 144 | compat_uptr_t fpu_save; |
145 | compat_uptr_t rwin_save; | 145 | compat_uptr_t rwin_save; |
146 | unsigned int psr; | 146 | unsigned int psr; |
147 | unsigned pc, npc; | 147 | unsigned int pc, npc; |
148 | sigset_t set; | 148 | sigset_t set; |
149 | compat_sigset_t seta; | 149 | compat_sigset_t seta; |
150 | int err, i; | 150 | int err, i; |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index b489e9759518..fe8b8ee8e660 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -337,10 +337,10 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second | |||
337 | switch (call) { | 337 | switch (call) { |
338 | case SEMOP: | 338 | case SEMOP: |
339 | err = sys_semtimedop(first, ptr, | 339 | err = sys_semtimedop(first, ptr, |
340 | (unsigned)second, NULL); | 340 | (unsigned int)second, NULL); |
341 | goto out; | 341 | goto out; |
342 | case SEMTIMEDOP: | 342 | case SEMTIMEDOP: |
343 | err = sys_semtimedop(first, ptr, (unsigned)second, | 343 | err = sys_semtimedop(first, ptr, (unsigned int)second, |
344 | (const struct timespec __user *) | 344 | (const struct timespec __user *) |
345 | (unsigned long) fifth); | 345 | (unsigned long) fifth); |
346 | goto out; | 346 | goto out; |
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 7f41d40b7e6e..fa8e21abb5e0 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* sysfs.c: Toplogy sysfs support code for sparc64. | 1 | /* sysfs.c: Topology sysfs support code for sparc64. |
2 | * | 2 | * |
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> |
4 | */ | 4 | */ |
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index d89e97b374cf..9aacb9159262 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -209,8 +209,8 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, | |||
209 | if (size == 16) { | 209 | if (size == 16) { |
210 | size = 8; | 210 | size = 8; |
211 | zero = (((long)(reg_num ? | 211 | zero = (((long)(reg_num ? |
212 | (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | | 212 | (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) | |
213 | (unsigned)fetch_reg(reg_num + 1, regs); | 213 | (unsigned int)fetch_reg(reg_num + 1, regs); |
214 | } else if (reg_num) { | 214 | } else if (reg_num) { |
215 | src_val_p = fetch_reg_addr(reg_num, regs); | 215 | src_val_p = fetch_reg_addr(reg_num, regs); |
216 | } | 216 | } |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index c399e7b3b035..b6c559cbd64d 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -303,10 +303,10 @@ no_context: | |||
303 | fixup = search_extables_range(regs->pc, &g2); | 303 | fixup = search_extables_range(regs->pc, &g2); |
304 | /* Values below 10 are reserved for other things */ | 304 | /* Values below 10 are reserved for other things */ |
305 | if (fixup > 10) { | 305 | if (fixup > 10) { |
306 | extern const unsigned __memset_start[]; | 306 | extern const unsigned int __memset_start[]; |
307 | extern const unsigned __memset_end[]; | 307 | extern const unsigned int __memset_end[]; |
308 | extern const unsigned __csum_partial_copy_start[]; | 308 | extern const unsigned int __csum_partial_copy_start[]; |
309 | extern const unsigned __csum_partial_copy_end[]; | 309 | extern const unsigned int __csum_partial_copy_end[]; |
310 | 310 | ||
311 | #ifdef DEBUG_EXCEPTIONS | 311 | #ifdef DEBUG_EXCEPTIONS |
312 | printk("Exception: PC<%08lx> faddr<%08lx>\n", | 312 | printk("Exception: PC<%08lx> faddr<%08lx>\n", |
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 3e6e05a7c4c2..a6d9204a6a0b 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c | |||
@@ -351,7 +351,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \ | |||
351 | * | 351 | * |
352 | * Sometimes we need to emit a branch earlier in the code | 352 | * Sometimes we need to emit a branch earlier in the code |
353 | * sequence. And in these situations we adjust "destination" | 353 | * sequence. And in these situations we adjust "destination" |
354 | * to accomodate this difference. For example, if we needed | 354 | * to accommodate this difference. For example, if we needed |
355 | * to emit a branch (and it's delay slot) right before the | 355 | * to emit a branch (and it's delay slot) right before the |
356 | * final instruction emitted for a BPF opcode, we'd use | 356 | * final instruction emitted for a BPF opcode, we'd use |
357 | * "destination + 4" instead of just plain "destination" above. | 357 | * "destination + 4" instead of just plain "destination" above. |
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h index c97e416dd963..ff7f50f970a5 100644 --- a/arch/tile/include/hv/drv_mpipe_intf.h +++ b/arch/tile/include/hv/drv_mpipe_intf.h | |||
@@ -211,7 +211,7 @@ _gxio_mpipe_link_mac_t; | |||
211 | * request shared data permission on the same link. | 211 | * request shared data permission on the same link. |
212 | * | 212 | * |
213 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | 213 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, |
214 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | 214 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open() |
215 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | 215 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. |
216 | */ | 216 | */ |
217 | #define GXIO_MPIPE_LINK_DATA 0x00000001UL | 217 | #define GXIO_MPIPE_LINK_DATA 0x00000001UL |
@@ -219,7 +219,7 @@ _gxio_mpipe_link_mac_t; | |||
219 | /** Do not request data permission on the specified link. | 219 | /** Do not request data permission on the specified link. |
220 | * | 220 | * |
221 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | 221 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, |
222 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | 222 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open() |
223 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | 223 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. |
224 | */ | 224 | */ |
225 | #define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL | 225 | #define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL |
@@ -230,7 +230,7 @@ _gxio_mpipe_link_mac_t; | |||
230 | * data permission on it, this open will fail. | 230 | * data permission on it, this open will fail. |
231 | * | 231 | * |
232 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | 232 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, |
233 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | 233 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open() |
234 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | 234 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. |
235 | */ | 235 | */ |
236 | #define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL | 236 | #define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL |
@@ -241,7 +241,7 @@ _gxio_mpipe_link_mac_t; | |||
241 | * permission on the same link. | 241 | * permission on the same link. |
242 | * | 242 | * |
243 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | 243 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, |
244 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | 244 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open() |
245 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | 245 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. |
246 | */ | 246 | */ |
247 | #define GXIO_MPIPE_LINK_STATS 0x00000008UL | 247 | #define GXIO_MPIPE_LINK_STATS 0x00000008UL |
@@ -249,7 +249,7 @@ _gxio_mpipe_link_mac_t; | |||
249 | /** Do not request stats permission on the specified link. | 249 | /** Do not request stats permission on the specified link. |
250 | * | 250 | * |
251 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | 251 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, |
252 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | 252 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open() |
253 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | 253 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. |
254 | */ | 254 | */ |
255 | #define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL | 255 | #define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL |
@@ -267,7 +267,7 @@ _gxio_mpipe_link_mac_t; | |||
267 | * reset by other statistics programs. | 267 | * reset by other statistics programs. |
268 | * | 268 | * |
269 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | 269 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, |
270 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | 270 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open() |
271 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | 271 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. |
272 | */ | 272 | */ |
273 | #define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL | 273 | #define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL |
@@ -278,7 +278,7 @@ _gxio_mpipe_link_mac_t; | |||
278 | * permission on the same link. | 278 | * permission on the same link. |
279 | * | 279 | * |
280 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | 280 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, |
281 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | 281 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open() |
282 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | 282 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. |
283 | */ | 283 | */ |
284 | #define GXIO_MPIPE_LINK_CTL 0x00000040UL | 284 | #define GXIO_MPIPE_LINK_CTL 0x00000040UL |
@@ -286,7 +286,7 @@ _gxio_mpipe_link_mac_t; | |||
286 | /** Do not request control permission on the specified link. | 286 | /** Do not request control permission on the specified link. |
287 | * | 287 | * |
288 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | 288 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, |
289 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | 289 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open() |
290 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | 290 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. |
291 | */ | 291 | */ |
292 | #define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL | 292 | #define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL |
@@ -301,7 +301,7 @@ _gxio_mpipe_link_mac_t; | |||
301 | * it prevents programs like mpipe-link from configuring the link. | 301 | * it prevents programs like mpipe-link from configuring the link. |
302 | * | 302 | * |
303 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | 303 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, |
304 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | 304 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open() |
305 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | 305 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. |
306 | */ | 306 | */ |
307 | #define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL | 307 | #define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL |
@@ -311,7 +311,7 @@ _gxio_mpipe_link_mac_t; | |||
311 | * change the desired state of the link when it is closed or the process | 311 | * change the desired state of the link when it is closed or the process |
312 | * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | 312 | * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, |
313 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | 313 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or |
314 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | 314 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open() |
315 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | 315 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. |
316 | */ | 316 | */ |
317 | #define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL | 317 | #define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL |
@@ -322,7 +322,7 @@ _gxio_mpipe_link_mac_t; | |||
322 | * open, set the desired state of the link to down. No more than one of | 322 | * open, set the desired state of the link to down. No more than one of |
323 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | 323 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, |
324 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | 324 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be |
325 | * specifed in a gxio_mpipe_link_open() call. If none are specified, | 325 | * specified in a gxio_mpipe_link_open() call. If none are specified, |
326 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | 326 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. |
327 | */ | 327 | */ |
328 | #define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL | 328 | #define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL |
@@ -332,7 +332,7 @@ _gxio_mpipe_link_mac_t; | |||
332 | * process has the link open, set the desired state of the link to down. | 332 | * process has the link open, set the desired state of the link to down. |
333 | * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | 333 | * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, |
334 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | 334 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or |
335 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | 335 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open() |
336 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | 336 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. |
337 | */ | 337 | */ |
338 | #define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL | 338 | #define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL |
@@ -342,7 +342,7 @@ _gxio_mpipe_link_mac_t; | |||
342 | * closed or the process exits. No more than one of | 342 | * closed or the process exits. No more than one of |
343 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | 343 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, |
344 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | 344 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be |
345 | * specifed in a gxio_mpipe_link_open() call. If none are specified, | 345 | * specified in a gxio_mpipe_link_open() call. If none are specified, |
346 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | 346 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. |
347 | */ | 347 | */ |
348 | #define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL | 348 | #define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL |
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c index a506c2c28943..9247d6b562f4 100644 --- a/arch/tile/kernel/kgdb.c +++ b/arch/tile/kernel/kgdb.c | |||
@@ -126,15 +126,15 @@ void | |||
126 | sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) | 126 | sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) |
127 | { | 127 | { |
128 | struct pt_regs *thread_regs; | 128 | struct pt_regs *thread_regs; |
129 | const int NGPRS = TREG_LAST_GPR + 1; | ||
129 | 130 | ||
130 | if (task == NULL) | 131 | if (task == NULL) |
131 | return; | 132 | return; |
132 | 133 | ||
133 | /* Initialize to zero. */ | ||
134 | memset(gdb_regs, 0, NUMREGBYTES); | ||
135 | |||
136 | thread_regs = task_pt_regs(task); | 134 | thread_regs = task_pt_regs(task); |
137 | memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long)); | 135 | memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long)); |
136 | memset(&gdb_regs[NGPRS], 0, | ||
137 | (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long)); | ||
138 | gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; | 138 | gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; |
139 | gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; | 139 | gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; |
140 | } | 140 | } |
@@ -433,9 +433,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
433 | struct kgdb_arch arch_kgdb_ops; | 433 | struct kgdb_arch arch_kgdb_ops; |
434 | 434 | ||
435 | /* | 435 | /* |
436 | * kgdb_arch_init - Perform any architecture specific initalization. | 436 | * kgdb_arch_init - Perform any architecture specific initialization. |
437 | * | 437 | * |
438 | * This function will handle the initalization of any architecture | 438 | * This function will handle the initialization of any architecture |
439 | * specific callbacks. | 439 | * specific callbacks. |
440 | */ | 440 | */ |
441 | int kgdb_arch_init(void) | 441 | int kgdb_arch_init(void) |
@@ -447,9 +447,9 @@ int kgdb_arch_init(void) | |||
447 | } | 447 | } |
448 | 448 | ||
449 | /* | 449 | /* |
450 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | 450 | * kgdb_arch_exit - Perform any architecture specific uninitialization. |
451 | * | 451 | * |
452 | * This function will handle the uninitalization of any architecture | 452 | * This function will handle the uninitialization of any architecture |
453 | * specific callbacks, for dynamic registration and unregistration. | 453 | * specific callbacks, for dynamic registration and unregistration. |
454 | */ | 454 | */ |
455 | void kgdb_arch_exit(void) | 455 | void kgdb_arch_exit(void) |
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 4c017d0d2de8..aa2b44cd8fd3 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c | |||
@@ -1326,7 +1326,7 @@ invalid_device: | |||
1326 | 1326 | ||
1327 | 1327 | ||
1328 | /* | 1328 | /* |
1329 | * See tile_cfg_read() for relevent comments. | 1329 | * See tile_cfg_read() for relevant comments. |
1330 | * Note that "val" is the value to write, not a pointer to that value. | 1330 | * Note that "val" is the value to write, not a pointer to that value. |
1331 | */ | 1331 | */ |
1332 | static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, | 1332 | static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 049ada8d4e9c..86a9bec18dab 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
@@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu) | |||
369 | 369 | ||
370 | WARN_ON_ONCE(cpuc->amd_nb); | 370 | WARN_ON_ONCE(cpuc->amd_nb); |
371 | 371 | ||
372 | if (boot_cpu_data.x86_max_cores < 2) | 372 | if (!x86_pmu.amd_nb_constraints) |
373 | return NOTIFY_OK; | 373 | return NOTIFY_OK; |
374 | 374 | ||
375 | cpuc->amd_nb = amd_alloc_nb(cpu); | 375 | cpuc->amd_nb = amd_alloc_nb(cpu); |
@@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu) | |||
388 | 388 | ||
389 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; | 389 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
390 | 390 | ||
391 | if (boot_cpu_data.x86_max_cores < 2) | 391 | if (!x86_pmu.amd_nb_constraints) |
392 | return; | 392 | return; |
393 | 393 | ||
394 | nb_id = amd_get_nb_id(cpu); | 394 | nb_id = amd_get_nb_id(cpu); |
@@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu) | |||
414 | { | 414 | { |
415 | struct cpu_hw_events *cpuhw; | 415 | struct cpu_hw_events *cpuhw; |
416 | 416 | ||
417 | if (boot_cpu_data.x86_max_cores < 2) | 417 | if (!x86_pmu.amd_nb_constraints) |
418 | return; | 418 | return; |
419 | 419 | ||
420 | cpuhw = &per_cpu(cpu_hw_events, cpu); | 420 | cpuhw = &per_cpu(cpu_hw_events, cpu); |
@@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
648 | .cpu_prepare = amd_pmu_cpu_prepare, | 648 | .cpu_prepare = amd_pmu_cpu_prepare, |
649 | .cpu_starting = amd_pmu_cpu_starting, | 649 | .cpu_starting = amd_pmu_cpu_starting, |
650 | .cpu_dead = amd_pmu_cpu_dead, | 650 | .cpu_dead = amd_pmu_cpu_dead, |
651 | |||
652 | .amd_nb_constraints = 1, | ||
651 | }; | 653 | }; |
652 | 654 | ||
653 | static int __init amd_core_pmu_init(void) | 655 | static int __init amd_core_pmu_init(void) |
@@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void) | |||
674 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | 676 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; |
675 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | 677 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; |
676 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | 678 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; |
679 | /* | ||
680 | * AMD Core perfctr has separate MSRs for the NB events, see | ||
681 | * the amd/uncore.c driver. | ||
682 | */ | ||
683 | x86_pmu.amd_nb_constraints = 0; | ||
677 | 684 | ||
678 | pr_cont("core perfctr, "); | 685 | pr_cont("core perfctr, "); |
679 | return 0; | 686 | return 0; |
@@ -693,6 +700,14 @@ __init int amd_pmu_init(void) | |||
693 | if (ret) | 700 | if (ret) |
694 | return ret; | 701 | return ret; |
695 | 702 | ||
703 | if (num_possible_cpus() == 1) { | ||
704 | /* | ||
705 | * No point in allocating data structures to serialize | ||
706 | * against other CPUs, when there is only the one CPU. | ||
707 | */ | ||
708 | x86_pmu.amd_nb_constraints = 0; | ||
709 | } | ||
710 | |||
696 | /* Events are common for all AMDs */ | 711 | /* Events are common for all AMDs */ |
697 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 712 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
698 | sizeof(hw_cache_event_ids)); | 713 | sizeof(hw_cache_event_ids)); |
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 3ea25c3917c0..feb90f6730e8 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c | |||
@@ -28,10 +28,46 @@ static u32 ibs_caps; | |||
28 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) | 28 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) |
29 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT | 29 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT |
30 | 30 | ||
31 | |||
32 | /* | ||
33 | * IBS states: | ||
34 | * | ||
35 | * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken | ||
36 | * and any further add()s must fail. | ||
37 | * | ||
38 | * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are | ||
39 | * complicated by the fact that the IBS hardware can send late NMIs (ie. after | ||
40 | * we've cleared the EN bit). | ||
41 | * | ||
42 | * In order to consume these late NMIs we have the STOPPED state, any NMI that | ||
43 | * happens after we've cleared the EN state will clear this bit and report the | ||
44 | * NMI handled (this is fundamentally racy in the face or multiple NMI sources, | ||
45 | * someone else can consume our BIT and our NMI will go unhandled). | ||
46 | * | ||
47 | * And since we cannot set/clear this separate bit together with the EN bit, | ||
48 | * there are races; if we cleared STARTED early, an NMI could land in | ||
49 | * between clearing STARTED and clearing the EN bit (in fact multiple NMIs | ||
50 | * could happen if the period is small enough), and consume our STOPPED bit | ||
51 | * and trigger streams of unhandled NMIs. | ||
52 | * | ||
53 | * If, however, we clear STARTED late, an NMI can hit between clearing the | ||
54 | * EN bit and clearing STARTED, still see STARTED set and process the event. | ||
55 | * If this event will have the VALID bit clear, we bail properly, but this | ||
56 | * is not a given. With VALID set we can end up calling pmu::stop() again | ||
57 | * (the throttle logic) and trigger the WARNs in there. | ||
58 | * | ||
59 | * So what we do is set STOPPING before clearing EN to avoid the pmu::stop() | ||
60 | * nesting, and clear STARTED late, so that we have a well defined state over | ||
61 | * the clearing of the EN bit. | ||
62 | * | ||
63 | * XXX: we could probably be using !atomic bitops for all this. | ||
64 | */ | ||
65 | |||
31 | enum ibs_states { | 66 | enum ibs_states { |
32 | IBS_ENABLED = 0, | 67 | IBS_ENABLED = 0, |
33 | IBS_STARTED = 1, | 68 | IBS_STARTED = 1, |
34 | IBS_STOPPING = 2, | 69 | IBS_STOPPING = 2, |
70 | IBS_STOPPED = 3, | ||
35 | 71 | ||
36 | IBS_MAX_STATES, | 72 | IBS_MAX_STATES, |
37 | }; | 73 | }; |
@@ -377,11 +413,10 @@ static void perf_ibs_start(struct perf_event *event, int flags) | |||
377 | 413 | ||
378 | perf_ibs_set_period(perf_ibs, hwc, &period); | 414 | perf_ibs_set_period(perf_ibs, hwc, &period); |
379 | /* | 415 | /* |
380 | * Set STARTED before enabling the hardware, such that | 416 | * Set STARTED before enabling the hardware, such that a subsequent NMI |
381 | * a subsequent NMI must observe it. Then clear STOPPING | 417 | * must observe it. |
382 | * such that we don't consume NMIs by accident. | ||
383 | */ | 418 | */ |
384 | set_bit(IBS_STARTED, pcpu->state); | 419 | set_bit(IBS_STARTED, pcpu->state); |
385 | clear_bit(IBS_STOPPING, pcpu->state); | 420 | clear_bit(IBS_STOPPING, pcpu->state); |
386 | perf_ibs_enable_event(perf_ibs, hwc, period >> 4); | 421 | perf_ibs_enable_event(perf_ibs, hwc, period >> 4); |
387 | 422 | ||
@@ -396,6 +431,9 @@ static void perf_ibs_stop(struct perf_event *event, int flags) | |||
396 | u64 config; | 431 | u64 config; |
397 | int stopping; | 432 | int stopping; |
398 | 433 | ||
434 | if (test_and_set_bit(IBS_STOPPING, pcpu->state)) | ||
435 | return; | ||
436 | |||
399 | stopping = test_bit(IBS_STARTED, pcpu->state); | 437 | stopping = test_bit(IBS_STARTED, pcpu->state); |
400 | 438 | ||
401 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) | 439 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) |
@@ -405,12 +443,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags) | |||
405 | 443 | ||
406 | if (stopping) { | 444 | if (stopping) { |
407 | /* | 445 | /* |
408 | * Set STOPPING before disabling the hardware, such that it | 446 | * Set STOPPED before disabling the hardware, such that it |
409 | * must be visible to NMIs the moment we clear the EN bit, | 447 | * must be visible to NMIs the moment we clear the EN bit, |
410 | * at which point we can generate an !VALID sample which | 448 | * at which point we can generate an !VALID sample which |
411 | * we need to consume. | 449 | * we need to consume. |
412 | */ | 450 | */ |
413 | set_bit(IBS_STOPPING, pcpu->state); | 451 | set_bit(IBS_STOPPED, pcpu->state); |
414 | perf_ibs_disable_event(perf_ibs, hwc, config); | 452 | perf_ibs_disable_event(perf_ibs, hwc, config); |
415 | /* | 453 | /* |
416 | * Clear STARTED after disabling the hardware; if it were | 454 | * Clear STARTED after disabling the hardware; if it were |
@@ -556,7 +594,7 @@ fail: | |||
556 | * with samples that even have the valid bit cleared. | 594 | * with samples that even have the valid bit cleared. |
557 | * Mark all this NMIs as handled. | 595 | * Mark all this NMIs as handled. |
558 | */ | 596 | */ |
559 | if (test_and_clear_bit(IBS_STOPPING, pcpu->state)) | 597 | if (test_and_clear_bit(IBS_STOPPED, pcpu->state)) |
560 | return 1; | 598 | return 1; |
561 | 599 | ||
562 | return 0; | 600 | return 0; |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ba6ef18528c9..ad4dc7ffffb5 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -608,6 +608,11 @@ struct x86_pmu { | |||
608 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; | 608 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; |
609 | 609 | ||
610 | /* | 610 | /* |
611 | * AMD bits | ||
612 | */ | ||
613 | unsigned int amd_nb_constraints : 1; | ||
614 | |||
615 | /* | ||
611 | * Extra registers for events | 616 | * Extra registers for events |
612 | */ | 617 | */ |
613 | struct extra_reg *extra_regs; | 618 | struct extra_reg *extra_regs; |
@@ -795,6 +800,9 @@ ssize_t intel_event_sysfs_show(char *page, u64 config); | |||
795 | 800 | ||
796 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); | 801 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); |
797 | 802 | ||
803 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, | ||
804 | char *page); | ||
805 | |||
798 | #ifdef CONFIG_CPU_SUP_AMD | 806 | #ifdef CONFIG_CPU_SUP_AMD |
799 | 807 | ||
800 | int amd_pmu_init(void); | 808 | int amd_pmu_init(void); |
@@ -925,9 +933,6 @@ int p6_pmu_init(void); | |||
925 | 933 | ||
926 | int knc_pmu_init(void); | 934 | int knc_pmu_init(void); |
927 | 935 | ||
928 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, | ||
929 | char *page); | ||
930 | |||
931 | static inline int is_ht_workaround_enabled(void) | 936 | static inline int is_ht_workaround_enabled(void) |
932 | { | 937 | { |
933 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | 938 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 2da46ac16e37..426e946ed0c0 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -190,6 +190,7 @@ | |||
190 | #define MSR_PP1_ENERGY_STATUS 0x00000641 | 190 | #define MSR_PP1_ENERGY_STATUS 0x00000641 |
191 | #define MSR_PP1_POLICY 0x00000642 | 191 | #define MSR_PP1_POLICY 0x00000642 |
192 | 192 | ||
193 | /* Config TDP MSRs */ | ||
193 | #define MSR_CONFIG_TDP_NOMINAL 0x00000648 | 194 | #define MSR_CONFIG_TDP_NOMINAL 0x00000648 |
194 | #define MSR_CONFIG_TDP_LEVEL_1 0x00000649 | 195 | #define MSR_CONFIG_TDP_LEVEL_1 0x00000649 |
195 | #define MSR_CONFIG_TDP_LEVEL_2 0x0000064A | 196 | #define MSR_CONFIG_TDP_LEVEL_2 0x0000064A |
@@ -210,13 +211,6 @@ | |||
210 | #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 | 211 | #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 |
211 | #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 | 212 | #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 |
212 | 213 | ||
213 | /* Config TDP MSRs */ | ||
214 | #define MSR_CONFIG_TDP_NOMINAL 0x00000648 | ||
215 | #define MSR_CONFIG_TDP_LEVEL1 0x00000649 | ||
216 | #define MSR_CONFIG_TDP_LEVEL2 0x0000064A | ||
217 | #define MSR_CONFIG_TDP_CONTROL 0x0000064B | ||
218 | #define MSR_TURBO_ACTIVATION_RATIO 0x0000064C | ||
219 | |||
220 | /* Hardware P state interface */ | 214 | /* Hardware P state interface */ |
221 | #define MSR_PPERF 0x0000064e | 215 | #define MSR_PPERF 0x0000064e |
222 | #define MSR_PERF_LIMIT_REASONS 0x0000064f | 216 | #define MSR_PERF_LIMIT_REASONS 0x0000064f |
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index bf8b35d2035a..fbc5e92e1ecc 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h | |||
@@ -47,6 +47,15 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | |||
47 | BUG(); | 47 | BUG(); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, | ||
51 | size_t n) | ||
52 | { | ||
53 | if (static_cpu_has(X86_FEATURE_MCE_RECOVERY)) | ||
54 | return memcpy_mcsafe(dst, (void __force *) src, n); | ||
55 | memcpy(dst, (void __force *) src, n); | ||
56 | return 0; | ||
57 | } | ||
58 | |||
50 | /** | 59 | /** |
51 | * arch_wmb_pmem - synchronize writes to persistent memory | 60 | * arch_wmb_pmem - synchronize writes to persistent memory |
52 | * | 61 | * |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 983738ac014c..9264476f3d57 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -132,8 +132,6 @@ struct cpuinfo_x86 { | |||
132 | u16 logical_proc_id; | 132 | u16 logical_proc_id; |
133 | /* Core id: */ | 133 | /* Core id: */ |
134 | u16 cpu_core_id; | 134 | u16 cpu_core_id; |
135 | /* Compute unit id */ | ||
136 | u8 compute_unit_id; | ||
137 | /* Index into per_cpu list: */ | 135 | /* Index into per_cpu list: */ |
138 | u16 cpu_index; | 136 | u16 cpu_index; |
139 | u32 microcode; | 137 | u32 microcode; |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 20a3de5cb3b0..66b057306f40 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -155,6 +155,7 @@ static inline int wbinvd_on_all_cpus(void) | |||
155 | wbinvd(); | 155 | wbinvd(); |
156 | return 0; | 156 | return 0; |
157 | } | 157 | } |
158 | #define smp_num_siblings 1 | ||
158 | #endif /* CONFIG_SMP */ | 159 | #endif /* CONFIG_SMP */ |
159 | 160 | ||
160 | extern unsigned disabled_cpus; | 161 | extern unsigned disabled_cpus; |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 82866697fcf1..ffae84df8a93 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -276,11 +276,9 @@ static inline bool is_ia32_task(void) | |||
276 | */ | 276 | */ |
277 | #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME) | 277 | #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME) |
278 | 278 | ||
279 | #endif /* !__ASSEMBLY__ */ | ||
280 | |||
281 | #ifndef __ASSEMBLY__ | ||
282 | extern void arch_task_cache_init(void); | 279 | extern void arch_task_cache_init(void); |
283 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); | 280 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); |
284 | extern void arch_release_task_struct(struct task_struct *tsk); | 281 | extern void arch_release_task_struct(struct task_struct *tsk); |
285 | #endif | 282 | #endif /* !__ASSEMBLY__ */ |
283 | |||
286 | #endif /* _ASM_X86_THREAD_INFO_H */ | 284 | #endif /* _ASM_X86_THREAD_INFO_H */ |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index c24b4224d439..1fde8d580a5b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void) | |||
319 | 319 | ||
320 | #endif /* SMP */ | 320 | #endif /* SMP */ |
321 | 321 | ||
322 | /* Not inlined due to inc_irq_stat not being defined yet */ | ||
323 | #define flush_tlb_local() { \ | ||
324 | inc_irq_stat(irq_tlb_count); \ | ||
325 | local_flush_tlb(); \ | ||
326 | } | ||
327 | |||
328 | #ifndef CONFIG_PARAVIRT | 322 | #ifndef CONFIG_PARAVIRT |
329 | #define flush_tlb_others(mask, mm, start, end) \ | 323 | #define flush_tlb_others(mask, mm, start, end) \ |
330 | native_flush_tlb_others(mask, mm, start, end) | 324 | native_flush_tlb_others(mask, mm, start, end) |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 29fa475ec518..a147e676fc7b 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -170,15 +170,13 @@ int amd_get_subcaches(int cpu) | |||
170 | { | 170 | { |
171 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; | 171 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; |
172 | unsigned int mask; | 172 | unsigned int mask; |
173 | int cuid; | ||
174 | 173 | ||
175 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 174 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
176 | return 0; | 175 | return 0; |
177 | 176 | ||
178 | pci_read_config_dword(link, 0x1d4, &mask); | 177 | pci_read_config_dword(link, 0x1d4, &mask); |
179 | 178 | ||
180 | cuid = cpu_data(cpu).compute_unit_id; | 179 | return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; |
181 | return (mask >> (4 * cuid)) & 0xf; | ||
182 | } | 180 | } |
183 | 181 | ||
184 | int amd_set_subcaches(int cpu, unsigned long mask) | 182 | int amd_set_subcaches(int cpu, unsigned long mask) |
@@ -204,7 +202,7 @@ int amd_set_subcaches(int cpu, unsigned long mask) | |||
204 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); | 202 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); |
205 | } | 203 | } |
206 | 204 | ||
207 | cuid = cpu_data(cpu).compute_unit_id; | 205 | cuid = cpu_data(cpu).cpu_core_id; |
208 | mask <<= 4 * cuid; | 206 | mask <<= 4 * cuid; |
209 | mask |= (0xf ^ (1 << cuid)) << 26; | 207 | mask |= (0xf ^ (1 << cuid)) << 26; |
210 | 208 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 6e47e3a916f1..7b76eb67a9b3 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -300,7 +300,6 @@ static int nearby_node(int apicid) | |||
300 | #ifdef CONFIG_SMP | 300 | #ifdef CONFIG_SMP |
301 | static void amd_get_topology(struct cpuinfo_x86 *c) | 301 | static void amd_get_topology(struct cpuinfo_x86 *c) |
302 | { | 302 | { |
303 | u32 cores_per_cu = 1; | ||
304 | u8 node_id; | 303 | u8 node_id; |
305 | int cpu = smp_processor_id(); | 304 | int cpu = smp_processor_id(); |
306 | 305 | ||
@@ -313,8 +312,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c) | |||
313 | 312 | ||
314 | /* get compute unit information */ | 313 | /* get compute unit information */ |
315 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | 314 | smp_num_siblings = ((ebx >> 8) & 3) + 1; |
316 | c->compute_unit_id = ebx & 0xff; | 315 | c->x86_max_cores /= smp_num_siblings; |
317 | cores_per_cu += ((ebx >> 8) & 3); | 316 | c->cpu_core_id = ebx & 0xff; |
318 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | 317 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
319 | u64 value; | 318 | u64 value; |
320 | 319 | ||
@@ -325,19 +324,16 @@ static void amd_get_topology(struct cpuinfo_x86 *c) | |||
325 | 324 | ||
326 | /* fixup multi-node processor information */ | 325 | /* fixup multi-node processor information */ |
327 | if (nodes_per_socket > 1) { | 326 | if (nodes_per_socket > 1) { |
328 | u32 cores_per_node; | ||
329 | u32 cus_per_node; | 327 | u32 cus_per_node; |
330 | 328 | ||
331 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 329 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
332 | cores_per_node = c->x86_max_cores / nodes_per_socket; | 330 | cus_per_node = c->x86_max_cores / nodes_per_socket; |
333 | cus_per_node = cores_per_node / cores_per_cu; | ||
334 | 331 | ||
335 | /* store NodeID, use llc_shared_map to store sibling info */ | 332 | /* store NodeID, use llc_shared_map to store sibling info */ |
336 | per_cpu(cpu_llc_id, cpu) = node_id; | 333 | per_cpu(cpu_llc_id, cpu) = node_id; |
337 | 334 | ||
338 | /* core id has to be in the [0 .. cores_per_node - 1] range */ | 335 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
339 | c->cpu_core_id %= cores_per_node; | 336 | c->cpu_core_id %= cus_per_node; |
340 | c->compute_unit_id %= cus_per_node; | ||
341 | } | 337 | } |
342 | } | 338 | } |
343 | #endif | 339 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 0b445c2ff735..ac780cad3b86 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -384,6 +384,9 @@ static void intel_thermal_interrupt(void) | |||
384 | { | 384 | { |
385 | __u64 msr_val; | 385 | __u64 msr_val; |
386 | 386 | ||
387 | if (static_cpu_has(X86_FEATURE_HWP)) | ||
388 | wrmsrl_safe(MSR_HWP_STATUS, 0); | ||
389 | |||
387 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 390 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
388 | 391 | ||
389 | /* Check for violation of core thermal thresholds*/ | 392 | /* Check for violation of core thermal thresholds*/ |
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c index 31f0f335ed22..1dd8294fd730 100644 --- a/arch/x86/kernel/cpu/powerflags.c +++ b/arch/x86/kernel/cpu/powerflags.c | |||
@@ -18,4 +18,6 @@ const char *const x86_power_flags[32] = { | |||
18 | "", /* tsc invariant mapped to constant_tsc */ | 18 | "", /* tsc invariant mapped to constant_tsc */ |
19 | "cpb", /* core performance boost */ | 19 | "cpb", /* core performance boost */ |
20 | "eff_freq_ro", /* Readonly aperf/mperf */ | 20 | "eff_freq_ro", /* Readonly aperf/mperf */ |
21 | "proc_feedback", /* processor feedback interface */ | ||
22 | "acc_power", /* accumulated power mechanism */ | ||
21 | }; | 23 | }; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b2c99f811c3f..a2065d3b3b39 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -422,7 +422,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
422 | 422 | ||
423 | if (c->phys_proc_id == o->phys_proc_id && | 423 | if (c->phys_proc_id == o->phys_proc_id && |
424 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && | 424 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && |
425 | c->compute_unit_id == o->compute_unit_id) | 425 | c->cpu_core_id == o->cpu_core_id) |
426 | return topology_sane(c, o, "smt"); | 426 | return topology_sane(c, o, "smt"); |
427 | 427 | ||
428 | } else if (c->phys_proc_id == o->phys_proc_id && | 428 | } else if (c->phys_proc_id == o->phys_proc_id && |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 8f4cc3dfac32..fe9b9f776361 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info) | |||
104 | 104 | ||
105 | inc_irq_stat(irq_tlb_count); | 105 | inc_irq_stat(irq_tlb_count); |
106 | 106 | ||
107 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) | 107 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
108 | return; | 108 | return; |
109 | if (!f->flush_end) | ||
110 | f->flush_end = f->flush_start + PAGE_SIZE; | ||
111 | 109 | ||
112 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | 110 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
113 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | 111 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
135 | unsigned long end) | 133 | unsigned long end) |
136 | { | 134 | { |
137 | struct flush_tlb_info info; | 135 | struct flush_tlb_info info; |
136 | |||
137 | if (end == 0) | ||
138 | end = start + PAGE_SIZE; | ||
138 | info.flush_mm = mm; | 139 | info.flush_mm = mm; |
139 | info.flush_start = start; | 140 | info.flush_start = start; |
140 | info.flush_end = end; | 141 | info.flush_end = end; |
141 | 142 | ||
142 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); | 143 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
143 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); | 144 | if (end == TLB_FLUSH_ALL) |
145 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); | ||
146 | else | ||
147 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, | ||
148 | (end - start) >> PAGE_SHIFT); | ||
149 | |||
144 | if (is_uv_system()) { | 150 | if (is_uv_system()) { |
145 | unsigned int cpu; | 151 | unsigned int cpu; |
146 | 152 | ||
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c index 55d38cfa46c2..9e02dcaef683 100644 --- a/arch/x86/ras/mce_amd_inj.c +++ b/arch/x86/ras/mce_amd_inj.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | 21 | ||
22 | #include <asm/mce.h> | 22 | #include <asm/mce.h> |
23 | #include <asm/smp.h> | ||
23 | #include <asm/amd_nb.h> | 24 | #include <asm/amd_nb.h> |
24 | #include <asm/irq_vectors.h> | 25 | #include <asm/irq_vectors.h> |
25 | 26 | ||
@@ -206,7 +207,7 @@ static u32 get_nbc_for_node(int node_id) | |||
206 | struct cpuinfo_x86 *c = &boot_cpu_data; | 207 | struct cpuinfo_x86 *c = &boot_cpu_data; |
207 | u32 cores_per_node; | 208 | u32 cores_per_node; |
208 | 209 | ||
209 | cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket(); | 210 | cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket(); |
210 | 211 | ||
211 | return cores_per_node * node_id; | 212 | return cores_per_node * node_id; |
212 | } | 213 | } |
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index 3bbdcc79a3d3..7d7a39b47c62 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c | |||
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7, | |||
178 | int cached_ret = -ENOKEY; | 178 | int cached_ret = -ENOKEY; |
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | *_trusted = false; | ||
182 | |||
181 | for (p = pkcs7->certs; p; p = p->next) | 183 | for (p = pkcs7->certs; p; p = p->next) |
182 | p->seen = false; | 184 | p->seen = false; |
183 | 185 | ||
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index b5e54f2da53d..0d92d0f915e9 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
491 | } | 491 | } |
492 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 492 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
493 | 493 | ||
494 | #ifdef CONFIG_X86 | ||
495 | static bool acpi_hwp_native_thermal_lvt_set; | ||
496 | static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, | ||
497 | u32 lvl, | ||
498 | void *context, | ||
499 | void **rv) | ||
500 | { | ||
501 | u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; | ||
502 | u32 capbuf[2]; | ||
503 | struct acpi_osc_context osc_context = { | ||
504 | .uuid_str = sb_uuid_str, | ||
505 | .rev = 1, | ||
506 | .cap.length = 8, | ||
507 | .cap.pointer = capbuf, | ||
508 | }; | ||
509 | |||
510 | if (acpi_hwp_native_thermal_lvt_set) | ||
511 | return AE_CTRL_TERMINATE; | ||
512 | |||
513 | capbuf[0] = 0x0000; | ||
514 | capbuf[1] = 0x1000; /* set bit 12 */ | ||
515 | |||
516 | if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { | ||
517 | if (osc_context.ret.pointer && osc_context.ret.length > 1) { | ||
518 | u32 *capbuf_ret = osc_context.ret.pointer; | ||
519 | |||
520 | if (capbuf_ret[1] & 0x1000) { | ||
521 | acpi_handle_info(handle, | ||
522 | "_OSC native thermal LVT Acked\n"); | ||
523 | acpi_hwp_native_thermal_lvt_set = true; | ||
524 | } | ||
525 | } | ||
526 | kfree(osc_context.ret.pointer); | ||
527 | } | ||
528 | |||
529 | return AE_OK; | ||
530 | } | ||
531 | |||
532 | void __init acpi_early_processor_osc(void) | ||
533 | { | ||
534 | if (boot_cpu_has(X86_FEATURE_HWP)) { | ||
535 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | ||
536 | ACPI_UINT32_MAX, | ||
537 | acpi_hwp_native_thermal_lvt_osc, | ||
538 | NULL, NULL, NULL); | ||
539 | acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, | ||
540 | acpi_hwp_native_thermal_lvt_osc, | ||
541 | NULL, NULL); | ||
542 | } | ||
543 | } | ||
544 | #endif | ||
545 | |||
494 | /* | 546 | /* |
495 | * The following ACPI IDs are known to be suitable for representing as | 547 | * The following ACPI IDs are known to be suitable for representing as |
496 | * processor devices. | 548 | * processor devices. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 0e8567846f1a..c068c829b453 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void) | |||
1019 | goto error1; | 1019 | goto error1; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | /* Set capability bits for _OSC under processor scope */ | ||
1023 | acpi_early_processor_osc(); | ||
1024 | |||
1022 | /* | 1025 | /* |
1023 | * _OSC method may exist in module level code, | 1026 | * _OSC method may exist in module level code, |
1024 | * so it must be run after ACPI_FULL_INITIALIZATION | 1027 | * so it must be run after ACPI_FULL_INITIALIZATION |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a37508ef66c1..7c188472d9c2 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void); | |||
145 | static inline void acpi_early_processor_set_pdc(void) {} | 145 | static inline void acpi_early_processor_set_pdc(void) {} |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | #ifdef CONFIG_X86 | ||
149 | void acpi_early_processor_osc(void); | ||
150 | #else | ||
151 | static inline void acpi_early_processor_osc(void) {} | ||
152 | #endif | ||
153 | |||
148 | /* -------------------------------------------------------------------------- | 154 | /* -------------------------------------------------------------------------- |
149 | Embedded Controller | 155 | Embedded Controller |
150 | -------------------------------------------------------------------------- */ | 156 | -------------------------------------------------------------------------- */ |
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index 9e9fe4b19ac4..309049d41f1b 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c | |||
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev, | |||
57 | return mtk_reset_deassert(rcdev, id); | 57 | return mtk_reset_deassert(rcdev, id); |
58 | } | 58 | } |
59 | 59 | ||
60 | static struct reset_control_ops mtk_reset_ops = { | 60 | static const struct reset_control_ops mtk_reset_ops = { |
61 | .assert = mtk_reset_assert, | 61 | .assert = mtk_reset_assert, |
62 | .deassert = mtk_reset_deassert, | 62 | .deassert = mtk_reset_deassert, |
63 | .reset = mtk_reset, | 63 | .reset = mtk_reset, |
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c index b54da1fe73f0..b4e4d6aa2631 100644 --- a/drivers/clk/mmp/reset.c +++ b/drivers/clk/mmp/reset.c | |||
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev, | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static struct reset_control_ops mmp_clk_reset_ops = { | 77 | static const struct reset_control_ops mmp_clk_reset_ops = { |
78 | .assert = mmp_clk_reset_assert, | 78 | .assert = mmp_clk_reset_assert, |
79 | .deassert = mmp_clk_reset_deassert, | 79 | .deassert = mmp_clk_reset_deassert, |
80 | }; | 80 | }; |
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c index 5428efb9fbf5..3cd1af0af0d9 100644 --- a/drivers/clk/qcom/gcc-ipq4019.c +++ b/drivers/clk/qcom/gcc-ipq4019.c | |||
@@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = { | |||
129 | }; | 129 | }; |
130 | 130 | ||
131 | #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } | 131 | #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } |
132 | #define P_XO 0 | ||
133 | #define FE_PLL_200 1 | ||
134 | #define FE_PLL_500 2 | ||
135 | #define DDRC_PLL_666 3 | ||
136 | |||
137 | #define DDRC_PLL_666_SDCC 1 | ||
138 | #define FE_PLL_125_DLY 1 | ||
139 | |||
140 | #define FE_PLL_WCSS2G 1 | ||
141 | #define FE_PLL_WCSS5G 1 | ||
142 | 132 | ||
143 | static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = { | 133 | static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = { |
144 | F(48000000, P_XO, 1, 0, 0), | 134 | F(48000000, P_XO, 1, 0, 0), |
145 | F(200000000, FE_PLL_200, 1, 0, 0), | 135 | F(200000000, P_FEPLL200, 1, 0, 0), |
146 | { } | 136 | { } |
147 | }; | 137 | }; |
148 | 138 | ||
@@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { | |||
334 | }; | 324 | }; |
335 | 325 | ||
336 | static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = { | 326 | static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = { |
337 | F(1843200, FE_PLL_200, 1, 144, 15625), | 327 | F(1843200, P_FEPLL200, 1, 144, 15625), |
338 | F(3686400, FE_PLL_200, 1, 288, 15625), | 328 | F(3686400, P_FEPLL200, 1, 288, 15625), |
339 | F(7372800, FE_PLL_200, 1, 576, 15625), | 329 | F(7372800, P_FEPLL200, 1, 576, 15625), |
340 | F(14745600, FE_PLL_200, 1, 1152, 15625), | 330 | F(14745600, P_FEPLL200, 1, 1152, 15625), |
341 | F(16000000, FE_PLL_200, 1, 2, 25), | 331 | F(16000000, P_FEPLL200, 1, 2, 25), |
342 | F(24000000, P_XO, 1, 1, 2), | 332 | F(24000000, P_XO, 1, 1, 2), |
343 | F(32000000, FE_PLL_200, 1, 4, 25), | 333 | F(32000000, P_FEPLL200, 1, 4, 25), |
344 | F(40000000, FE_PLL_200, 1, 1, 5), | 334 | F(40000000, P_FEPLL200, 1, 1, 5), |
345 | F(46400000, FE_PLL_200, 1, 29, 125), | 335 | F(46400000, P_FEPLL200, 1, 29, 125), |
346 | F(48000000, P_XO, 1, 0, 0), | 336 | F(48000000, P_XO, 1, 0, 0), |
347 | { } | 337 | { } |
348 | }; | 338 | }; |
@@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = { | |||
410 | }; | 400 | }; |
411 | 401 | ||
412 | static const struct freq_tbl ftbl_gcc_gp_clk[] = { | 402 | static const struct freq_tbl ftbl_gcc_gp_clk[] = { |
413 | F(1250000, FE_PLL_200, 1, 16, 0), | 403 | F(1250000, P_FEPLL200, 1, 16, 0), |
414 | F(2500000, FE_PLL_200, 1, 8, 0), | 404 | F(2500000, P_FEPLL200, 1, 8, 0), |
415 | F(5000000, FE_PLL_200, 1, 4, 0), | 405 | F(5000000, P_FEPLL200, 1, 4, 0), |
416 | { } | 406 | { } |
417 | }; | 407 | }; |
418 | 408 | ||
@@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = { | |||
512 | static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { | 502 | static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { |
513 | F(144000, P_XO, 1, 3, 240), | 503 | F(144000, P_XO, 1, 3, 240), |
514 | F(400000, P_XO, 1, 1, 0), | 504 | F(400000, P_XO, 1, 1, 0), |
515 | F(20000000, FE_PLL_500, 1, 1, 25), | 505 | F(20000000, P_FEPLL500, 1, 1, 25), |
516 | F(25000000, FE_PLL_500, 1, 1, 20), | 506 | F(25000000, P_FEPLL500, 1, 1, 20), |
517 | F(50000000, FE_PLL_500, 1, 1, 10), | 507 | F(50000000, P_FEPLL500, 1, 1, 10), |
518 | F(100000000, FE_PLL_500, 1, 1, 5), | 508 | F(100000000, P_FEPLL500, 1, 1, 5), |
519 | F(193000000, DDRC_PLL_666_SDCC, 1, 0, 0), | 509 | F(193000000, P_DDRPLL, 1, 0, 0), |
520 | { } | 510 | { } |
521 | }; | 511 | }; |
522 | 512 | ||
@@ -536,9 +526,9 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { | |||
536 | 526 | ||
537 | static const struct freq_tbl ftbl_gcc_apps_clk[] = { | 527 | static const struct freq_tbl ftbl_gcc_apps_clk[] = { |
538 | F(48000000, P_XO, 1, 0, 0), | 528 | F(48000000, P_XO, 1, 0, 0), |
539 | F(200000000, FE_PLL_200, 1, 0, 0), | 529 | F(200000000, P_FEPLL200, 1, 0, 0), |
540 | F(500000000, FE_PLL_500, 1, 0, 0), | 530 | F(500000000, P_FEPLL500, 1, 0, 0), |
541 | F(626000000, DDRC_PLL_666, 1, 0, 0), | 531 | F(626000000, P_DDRPLLAPSS, 1, 0, 0), |
542 | { } | 532 | { } |
543 | }; | 533 | }; |
544 | 534 | ||
@@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = { | |||
557 | 547 | ||
558 | static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = { | 548 | static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = { |
559 | F(48000000, P_XO, 1, 0, 0), | 549 | F(48000000, P_XO, 1, 0, 0), |
560 | F(100000000, FE_PLL_200, 2, 0, 0), | 550 | F(100000000, P_FEPLL200, 2, 0, 0), |
561 | { } | 551 | { } |
562 | }; | 552 | }; |
563 | 553 | ||
@@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = { | |||
940 | }; | 930 | }; |
941 | 931 | ||
942 | static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = { | 932 | static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = { |
943 | F(2000000, FE_PLL_200, 10, 0, 0), | 933 | F(2000000, P_FEPLL200, 10, 0, 0), |
944 | { } | 934 | { } |
945 | }; | 935 | }; |
946 | 936 | ||
@@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = { | |||
1007 | }; | 997 | }; |
1008 | 998 | ||
1009 | static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = { | 999 | static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = { |
1010 | F(125000000, FE_PLL_125_DLY, 1, 0, 0), | 1000 | F(125000000, P_FEPLL125DLY, 1, 0, 0), |
1011 | { } | 1001 | { } |
1012 | }; | 1002 | }; |
1013 | 1003 | ||
@@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = { | |||
1027 | 1017 | ||
1028 | static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = { | 1018 | static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = { |
1029 | F(48000000, P_XO, 1, 0, 0), | 1019 | F(48000000, P_XO, 1, 0, 0), |
1030 | F(250000000, FE_PLL_WCSS2G, 1, 0, 0), | 1020 | F(250000000, P_FEPLLWCSS2G, 1, 0, 0), |
1031 | { } | 1021 | { } |
1032 | }; | 1022 | }; |
1033 | 1023 | ||
@@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = { | |||
1097 | 1087 | ||
1098 | static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = { | 1088 | static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = { |
1099 | F(48000000, P_XO, 1, 0, 0), | 1089 | F(48000000, P_XO, 1, 0, 0), |
1100 | F(250000000, FE_PLL_WCSS5G, 1, 0, 0), | 1090 | F(250000000, P_FEPLLWCSS5G, 1, 0, 0), |
1101 | { } | 1091 | { } |
1102 | }; | 1092 | }; |
1103 | 1093 | ||
@@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table); | |||
1325 | 1315 | ||
1326 | static int gcc_ipq4019_probe(struct platform_device *pdev) | 1316 | static int gcc_ipq4019_probe(struct platform_device *pdev) |
1327 | { | 1317 | { |
1318 | struct device *dev = &pdev->dev; | ||
1319 | |||
1320 | clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000); | ||
1321 | clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000); | ||
1322 | clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000); | ||
1323 | clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000); | ||
1324 | clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000); | ||
1325 | clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000); | ||
1326 | clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000); | ||
1327 | |||
1328 | return qcom_cc_probe(pdev, &gcc_ipq4019_desc); | 1328 | return qcom_cc_probe(pdev, &gcc_ipq4019_desc); |
1329 | } | 1329 | } |
1330 | 1330 | ||
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c index 6c977d3a8590..0324d8daab9b 100644 --- a/drivers/clk/qcom/reset.c +++ b/drivers/clk/qcom/reset.c | |||
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) | |||
55 | return regmap_update_bits(rst->regmap, map->reg, mask, 0); | 55 | return regmap_update_bits(rst->regmap, map->reg, mask, 0); |
56 | } | 56 | } |
57 | 57 | ||
58 | struct reset_control_ops qcom_reset_ops = { | 58 | const struct reset_control_ops qcom_reset_ops = { |
59 | .reset = qcom_reset, | 59 | .reset = qcom_reset, |
60 | .assert = qcom_reset_assert, | 60 | .assert = qcom_reset_assert, |
61 | .deassert = qcom_reset_deassert, | 61 | .deassert = qcom_reset_deassert, |
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h index 0e11e2130f97..cda877927d43 100644 --- a/drivers/clk/qcom/reset.h +++ b/drivers/clk/qcom/reset.h | |||
@@ -32,6 +32,6 @@ struct qcom_reset_controller { | |||
32 | #define to_qcom_reset_controller(r) \ | 32 | #define to_qcom_reset_controller(r) \ |
33 | container_of(r, struct qcom_reset_controller, rcdev); | 33 | container_of(r, struct qcom_reset_controller, rcdev); |
34 | 34 | ||
35 | extern struct reset_control_ops qcom_reset_ops; | 35 | extern const struct reset_control_ops qcom_reset_ops; |
36 | 36 | ||
37 | #endif | 37 | #endif |
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c index 552f7bb15bc5..21218987bbc3 100644 --- a/drivers/clk/rockchip/softrst.c +++ b/drivers/clk/rockchip/softrst.c | |||
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev, | |||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | 83 | ||
84 | static struct reset_control_ops rockchip_softrst_ops = { | 84 | static const struct reset_control_ops rockchip_softrst_ops = { |
85 | .assert = rockchip_softrst_assert, | 85 | .assert = rockchip_softrst_assert, |
86 | .deassert = rockchip_softrst_deassert, | 86 | .deassert = rockchip_softrst_deassert, |
87 | }; | 87 | }; |
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c index 957aae63e7cc..d0c6c9a2d06a 100644 --- a/drivers/clk/sirf/clk-atlas7.c +++ b/drivers/clk/sirf/clk-atlas7.c | |||
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev, | |||
1423 | return 0; | 1423 | return 0; |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | static struct reset_control_ops atlas7_rst_ops = { | 1426 | static const struct reset_control_ops atlas7_rst_ops = { |
1427 | .reset = atlas7_reset_module, | 1427 | .reset = atlas7_reset_module, |
1428 | }; | 1428 | }; |
1429 | 1429 | ||
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c index 044c1717b762..d9ea22ec4e25 100644 --- a/drivers/clk/sunxi/clk-a10-ve.c +++ b/drivers/clk/sunxi/clk-a10-ve.c | |||
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev, | |||
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
88 | static struct reset_control_ops sunxi_ve_reset_ops = { | 88 | static const struct reset_control_ops sunxi_ve_reset_ops = { |
89 | .assert = sunxi_ve_reset_assert, | 89 | .assert = sunxi_ve_reset_assert, |
90 | .deassert = sunxi_ve_reset_deassert, | 90 | .deassert = sunxi_ve_reset_deassert, |
91 | }; | 91 | }; |
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index a9b176139aca..028dd832a39f 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c | |||
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static struct reset_control_ops sun9i_mmc_reset_ops = { | 86 | static const struct reset_control_ops sun9i_mmc_reset_ops = { |
87 | .assert = sun9i_mmc_reset_assert, | 87 | .assert = sun9i_mmc_reset_assert, |
88 | .deassert = sun9i_mmc_reset_deassert, | 88 | .deassert = sun9i_mmc_reset_deassert, |
89 | }; | 89 | }; |
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c index 5432b1c198a4..fe0c3d169377 100644 --- a/drivers/clk/sunxi/clk-usb.c +++ b/drivers/clk/sunxi/clk-usb.c | |||
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev, | |||
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static struct reset_control_ops sunxi_usb_reset_ops = { | 79 | static const struct reset_control_ops sunxi_usb_reset_ops = { |
80 | .assert = sunxi_usb_reset_assert, | 80 | .assert = sunxi_usb_reset_assert, |
81 | .deassert = sunxi_usb_reset_deassert, | 81 | .deassert = sunxi_usb_reset_deassert, |
82 | }; | 82 | }; |
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 2a3a4fe803d6..f60fe2e344ca 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c | |||
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, | |||
271 | } | 271 | } |
272 | } | 272 | } |
273 | 273 | ||
274 | static struct reset_control_ops rst_ops = { | 274 | static const struct reset_control_ops rst_ops = { |
275 | .assert = tegra_clk_rst_assert, | 275 | .assert = tegra_clk_rst_assert, |
276 | .deassert = tegra_clk_rst_deassert, | 276 | .deassert = tegra_clk_rst_deassert, |
277 | }; | 277 | }; |
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c index a68e199d579d..c5c9599a3a71 100644 --- a/drivers/gpio/gpio-menz127.c +++ b/drivers/gpio/gpio-menz127.c | |||
@@ -37,7 +37,6 @@ struct men_z127_gpio { | |||
37 | void __iomem *reg_base; | 37 | void __iomem *reg_base; |
38 | struct mcb_device *mdev; | 38 | struct mcb_device *mdev; |
39 | struct resource *mem; | 39 | struct resource *mem; |
40 | spinlock_t lock; | ||
41 | }; | 40 | }; |
42 | 41 | ||
43 | static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, | 42 | static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, |
@@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, | |||
69 | debounce /= 50; | 68 | debounce /= 50; |
70 | } | 69 | } |
71 | 70 | ||
72 | spin_lock(&priv->lock); | 71 | spin_lock(&gc->bgpio_lock); |
73 | 72 | ||
74 | db_en = readl(priv->reg_base + MEN_Z127_DBER); | 73 | db_en = readl(priv->reg_base + MEN_Z127_DBER); |
75 | 74 | ||
@@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, | |||
84 | writel(db_en, priv->reg_base + MEN_Z127_DBER); | 83 | writel(db_en, priv->reg_base + MEN_Z127_DBER); |
85 | writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); | 84 | writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); |
86 | 85 | ||
87 | spin_unlock(&priv->lock); | 86 | spin_unlock(&gc->bgpio_lock); |
88 | 87 | ||
89 | return 0; | 88 | return 0; |
90 | } | 89 | } |
@@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin) | |||
97 | if (gpio_pin >= gc->ngpio) | 96 | if (gpio_pin >= gc->ngpio) |
98 | return -EINVAL; | 97 | return -EINVAL; |
99 | 98 | ||
100 | spin_lock(&priv->lock); | 99 | spin_lock(&gc->bgpio_lock); |
101 | od_en = readl(priv->reg_base + MEN_Z127_ODER); | 100 | od_en = readl(priv->reg_base + MEN_Z127_ODER); |
102 | 101 | ||
103 | if (gpiochip_line_is_open_drain(gc, gpio_pin)) | 102 | if (gpiochip_line_is_open_drain(gc, gpio_pin)) |
@@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin) | |||
106 | od_en &= ~BIT(gpio_pin); | 105 | od_en &= ~BIT(gpio_pin); |
107 | 106 | ||
108 | writel(od_en, priv->reg_base + MEN_Z127_ODER); | 107 | writel(od_en, priv->reg_base + MEN_Z127_ODER); |
109 | spin_unlock(&priv->lock); | 108 | spin_unlock(&gc->bgpio_lock); |
110 | 109 | ||
111 | return 0; | 110 | return 0; |
112 | } | 111 | } |
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index c0aa387664bf..0dc916191689 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c | |||
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 175 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
176 | if (!res) { | ||
177 | err = -EINVAL; | ||
178 | goto err; | ||
179 | } | ||
180 | |||
176 | gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, | 181 | gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, |
177 | resource_size(res)); | 182 | resource_size(res)); |
178 | if (!gpio->base) { | 183 | if (!gpio->base) { |
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 0f734ee05274..ca77ec10147c 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig | |||
@@ -1,10 +1,14 @@ | |||
1 | menu "ACP Configuration" | 1 | menu "ACP (Audio CoProcessor) Configuration" |
2 | 2 | ||
3 | config DRM_AMD_ACP | 3 | config DRM_AMD_ACP |
4 | bool "Enable ACP IP support" | 4 | bool "Enable AMD Audio CoProcessor IP support" |
5 | select MFD_CORE | 5 | select MFD_CORE |
6 | select PM_GENERIC_DOMAINS if PM | 6 | select PM_GENERIC_DOMAINS if PM |
7 | help | 7 | help |
8 | Choose this option to enable ACP IP support for AMD SOCs. | 8 | Choose this option to enable ACP IP support for AMD SOCs. |
9 | This adds the ACP (Audio CoProcessor) IP driver and wires | ||
10 | it up into the amdgpu driver. The ACP block provides the DMA | ||
11 | engine for the i2s-based ALSA driver. It is required for audio | ||
12 | on APUs which utilize an i2s codec. | ||
9 | 13 | ||
10 | endmenu | 14 | endmenu |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 151a2d42c639..56d1458393cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -608,6 +608,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
608 | if ((offset + size) <= adev->mc.visible_vram_size) | 608 | if ((offset + size) <= adev->mc.visible_vram_size) |
609 | return 0; | 609 | return 0; |
610 | 610 | ||
611 | /* Can't move a pinned BO to visible VRAM */ | ||
612 | if (abo->pin_count > 0) | ||
613 | return -EINVAL; | ||
614 | |||
611 | /* hurrah the memory is not visible ! */ | 615 | /* hurrah the memory is not visible ! */ |
612 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | 616 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); |
613 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | 617 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ab34190859a8..f1a55d1888cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |||
384 | struct ttm_mem_reg *new_mem) | 384 | struct ttm_mem_reg *new_mem) |
385 | { | 385 | { |
386 | struct amdgpu_device *adev; | 386 | struct amdgpu_device *adev; |
387 | struct amdgpu_bo *abo; | ||
387 | struct ttm_mem_reg *old_mem = &bo->mem; | 388 | struct ttm_mem_reg *old_mem = &bo->mem; |
388 | int r; | 389 | int r; |
389 | 390 | ||
391 | /* Can't move a pinned BO */ | ||
392 | abo = container_of(bo, struct amdgpu_bo, tbo); | ||
393 | if (WARN_ON_ONCE(abo->pin_count > 0)) | ||
394 | return -EINVAL; | ||
395 | |||
390 | adev = amdgpu_get_adev(bo->bdev); | 396 | adev = amdgpu_get_adev(bo->bdev); |
391 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | 397 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
392 | amdgpu_move_null(bo, new_mem); | 398 | amdgpu_move_null(bo, new_mem); |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 7d58f594cffe..df64ed1c0139 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, | |||
179 | { | 179 | { |
180 | struct drm_dp_aux_msg msg; | 180 | struct drm_dp_aux_msg msg; |
181 | unsigned int retry; | 181 | unsigned int retry; |
182 | int err; | 182 | int err = 0; |
183 | 183 | ||
184 | memset(&msg, 0, sizeof(msg)); | 184 | memset(&msg, 0, sizeof(msg)); |
185 | msg.address = offset; | 185 | msg.address = offset; |
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, | |||
187 | msg.buffer = buffer; | 187 | msg.buffer = buffer; |
188 | msg.size = size; | 188 | msg.size = size; |
189 | 189 | ||
190 | mutex_lock(&aux->hw_mutex); | ||
191 | |||
190 | /* | 192 | /* |
191 | * The specification doesn't give any recommendation on how often to | 193 | * The specification doesn't give any recommendation on how often to |
192 | * retry native transactions. We used to retry 7 times like for | 194 | * retry native transactions. We used to retry 7 times like for |
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, | |||
195 | */ | 197 | */ |
196 | for (retry = 0; retry < 32; retry++) { | 198 | for (retry = 0; retry < 32; retry++) { |
197 | 199 | ||
198 | mutex_lock(&aux->hw_mutex); | ||
199 | err = aux->transfer(aux, &msg); | 200 | err = aux->transfer(aux, &msg); |
200 | mutex_unlock(&aux->hw_mutex); | ||
201 | if (err < 0) { | 201 | if (err < 0) { |
202 | if (err == -EBUSY) | 202 | if (err == -EBUSY) |
203 | continue; | 203 | continue; |
204 | 204 | ||
205 | return err; | 205 | goto unlock; |
206 | } | 206 | } |
207 | 207 | ||
208 | 208 | ||
209 | switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { | 209 | switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { |
210 | case DP_AUX_NATIVE_REPLY_ACK: | 210 | case DP_AUX_NATIVE_REPLY_ACK: |
211 | if (err < size) | 211 | if (err < size) |
212 | return -EPROTO; | 212 | err = -EPROTO; |
213 | return err; | 213 | goto unlock; |
214 | 214 | ||
215 | case DP_AUX_NATIVE_REPLY_NACK: | 215 | case DP_AUX_NATIVE_REPLY_NACK: |
216 | return -EIO; | 216 | err = -EIO; |
217 | goto unlock; | ||
217 | 218 | ||
218 | case DP_AUX_NATIVE_REPLY_DEFER: | 219 | case DP_AUX_NATIVE_REPLY_DEFER: |
219 | usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); | 220 | usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); |
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, | |||
222 | } | 223 | } |
223 | 224 | ||
224 | DRM_DEBUG_KMS("too many retries, giving up\n"); | 225 | DRM_DEBUG_KMS("too many retries, giving up\n"); |
225 | return -EIO; | 226 | err = -EIO; |
227 | |||
228 | unlock: | ||
229 | mutex_unlock(&aux->hw_mutex); | ||
230 | return err; | ||
226 | } | 231 | } |
227 | 232 | ||
228 | /** | 233 | /** |
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | |||
544 | int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); | 549 | int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); |
545 | 550 | ||
546 | for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { | 551 | for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { |
547 | mutex_lock(&aux->hw_mutex); | ||
548 | ret = aux->transfer(aux, msg); | 552 | ret = aux->transfer(aux, msg); |
549 | mutex_unlock(&aux->hw_mutex); | ||
550 | if (ret < 0) { | 553 | if (ret < 0) { |
551 | if (ret == -EBUSY) | 554 | if (ret == -EBUSY) |
552 | continue; | 555 | continue; |
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, | |||
685 | 688 | ||
686 | memset(&msg, 0, sizeof(msg)); | 689 | memset(&msg, 0, sizeof(msg)); |
687 | 690 | ||
691 | mutex_lock(&aux->hw_mutex); | ||
692 | |||
688 | for (i = 0; i < num; i++) { | 693 | for (i = 0; i < num; i++) { |
689 | msg.address = msgs[i].addr; | 694 | msg.address = msgs[i].addr; |
690 | drm_dp_i2c_msg_set_request(&msg, &msgs[i]); | 695 | drm_dp_i2c_msg_set_request(&msg, &msgs[i]); |
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, | |||
739 | msg.size = 0; | 744 | msg.size = 0; |
740 | (void)drm_dp_i2c_do_msg(aux, &msg); | 745 | (void)drm_dp_i2c_do_msg(aux, &msg); |
741 | 746 | ||
747 | mutex_unlock(&aux->hw_mutex); | ||
748 | |||
742 | return err; | 749 | return err; |
743 | } | 750 | } |
744 | 751 | ||
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index b04a64664673..65428cf233ce 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h | |||
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void); | |||
196 | int msm_hdmi_pll_8960_init(struct platform_device *pdev); | 196 | int msm_hdmi_pll_8960_init(struct platform_device *pdev); |
197 | int msm_hdmi_pll_8996_init(struct platform_device *pdev); | 197 | int msm_hdmi_pll_8996_init(struct platform_device *pdev); |
198 | #else | 198 | #else |
199 | static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev); | 199 | static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev) |
200 | { | 200 | { |
201 | return -ENODEV; | 201 | return -ENODEV; |
202 | } | 202 | } |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d52910e2c26c..c03b96709179 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file) | |||
467 | struct msm_file_private *ctx = file->driver_priv; | 467 | struct msm_file_private *ctx = file->driver_priv; |
468 | struct msm_kms *kms = priv->kms; | 468 | struct msm_kms *kms = priv->kms; |
469 | 469 | ||
470 | if (kms) | ||
471 | kms->funcs->preclose(kms, file); | ||
472 | |||
473 | mutex_lock(&dev->struct_mutex); | 470 | mutex_lock(&dev->struct_mutex); |
474 | if (ctx == priv->lastctx) | 471 | if (ctx == priv->lastctx) |
475 | priv->lastctx = NULL; | 472 | priv->lastctx = NULL; |
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 9bcabaada179..e32222c3d44f 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h | |||
@@ -55,7 +55,6 @@ struct msm_kms_funcs { | |||
55 | struct drm_encoder *slave_encoder, | 55 | struct drm_encoder *slave_encoder, |
56 | bool is_cmd_mode); | 56 | bool is_cmd_mode); |
57 | /* cleanup: */ | 57 | /* cleanup: */ |
58 | void (*preclose)(struct msm_kms *kms, struct drm_file *file); | ||
59 | void (*destroy)(struct msm_kms *kms); | 58 | void (*destroy)(struct msm_kms *kms); |
60 | }; | 59 | }; |
61 | 60 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index dd46c38676db..2d901bf28a94 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
799 | if ((offset + size) <= rdev->mc.visible_vram_size) | 799 | if ((offset + size) <= rdev->mc.visible_vram_size) |
800 | return 0; | 800 | return 0; |
801 | 801 | ||
802 | /* Can't move a pinned BO to visible VRAM */ | ||
803 | if (rbo->pin_count > 0) | ||
804 | return -EINVAL; | ||
805 | |||
802 | /* hurrah the memory is not visible ! */ | 806 | /* hurrah the memory is not visible ! */ |
803 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | 807 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
804 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | 808 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 6d8c32377c6f..c008312e1bcd 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
397 | struct ttm_mem_reg *new_mem) | 397 | struct ttm_mem_reg *new_mem) |
398 | { | 398 | { |
399 | struct radeon_device *rdev; | 399 | struct radeon_device *rdev; |
400 | struct radeon_bo *rbo; | ||
400 | struct ttm_mem_reg *old_mem = &bo->mem; | 401 | struct ttm_mem_reg *old_mem = &bo->mem; |
401 | int r; | 402 | int r; |
402 | 403 | ||
404 | /* Can't move a pinned BO */ | ||
405 | rbo = container_of(bo, struct radeon_bo, tbo); | ||
406 | if (WARN_ON_ONCE(rbo->pin_count > 0)) | ||
407 | return -EINVAL; | ||
408 | |||
403 | rdev = radeon_get_rdev(bo->bdev); | 409 | rdev = radeon_get_rdev(bo->bdev); |
404 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | 410 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
405 | radeon_move_null(bo, new_mem); | 411 | radeon_move_null(bo, new_mem); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index cb75ab72098a..af4df81c4e0c 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { | |||
2926 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ | 2926 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ |
2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
2929 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, | ||
2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, | 2930 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, | 2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, |
2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, | 2932 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, |
2933 | { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, | ||
2932 | { 0, 0, 0, 0 }, | 2934 | { 0, 0, 0, 0 }, |
2933 | }; | 2935 | }; |
2934 | 2936 | ||
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
3008 | } | 3010 | } |
3009 | ++p; | 3011 | ++p; |
3010 | } | 3012 | } |
3013 | /* limit mclk on all R7 370 parts for stability */ | ||
3014 | if (rdev->pdev->device == 0x6811 && | ||
3015 | rdev->pdev->revision == 0x81) | ||
3016 | max_mclk = 120000; | ||
3011 | 3017 | ||
3012 | if (rps->vce_active) { | 3018 | if (rps->vce_active) { |
3013 | rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; | 3019 | rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; |
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 3d3cf2f8891e..d5cfef75fc80 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | |||
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, | |||
271 | if (!iores) | 271 | if (!iores) |
272 | return -ENXIO; | 272 | return -ENXIO; |
273 | 273 | ||
274 | platform_set_drvdata(pdev, hdmi); | ||
275 | |||
276 | encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); | 274 | encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); |
277 | /* | 275 | /* |
278 | * If we failed to find the CRTC(s) which this encoder is | 276 | * If we failed to find the CRTC(s) which this encoder is |
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, | |||
293 | drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, | 291 | drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, |
294 | DRM_MODE_ENCODER_TMDS, NULL); | 292 | DRM_MODE_ENCODER_TMDS, NULL); |
295 | 293 | ||
296 | return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); | 294 | ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); |
295 | |||
296 | /* | ||
297 | * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), | ||
298 | * which would have called the encoder cleanup. Do it manually. | ||
299 | */ | ||
300 | if (ret) | ||
301 | drm_encoder_cleanup(encoder); | ||
302 | |||
303 | return ret; | ||
297 | } | 304 | } |
298 | 305 | ||
299 | static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, | 306 | static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 896da09e49ee..f556a8f4fde6 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev) | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, | ||
255 | struct drm_file *file_priv) | ||
256 | { | ||
257 | struct rockchip_drm_private *priv = crtc->dev->dev_private; | ||
258 | int pipe = drm_crtc_index(crtc); | ||
259 | |||
260 | if (pipe < ROCKCHIP_MAX_CRTC && | ||
261 | priv->crtc_funcs[pipe] && | ||
262 | priv->crtc_funcs[pipe]->cancel_pending_vblank) | ||
263 | priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); | ||
264 | } | ||
265 | |||
266 | static void rockchip_drm_preclose(struct drm_device *dev, | ||
267 | struct drm_file *file_priv) | ||
268 | { | ||
269 | struct drm_crtc *crtc; | ||
270 | |||
271 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
272 | rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); | ||
273 | } | ||
274 | |||
254 | void rockchip_drm_lastclose(struct drm_device *dev) | 275 | void rockchip_drm_lastclose(struct drm_device *dev) |
255 | { | 276 | { |
256 | struct rockchip_drm_private *priv = dev->dev_private; | 277 | struct rockchip_drm_private *priv = dev->dev_private; |
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = { | |||
281 | DRIVER_PRIME | DRIVER_ATOMIC, | 302 | DRIVER_PRIME | DRIVER_ATOMIC, |
282 | .load = rockchip_drm_load, | 303 | .load = rockchip_drm_load, |
283 | .unload = rockchip_drm_unload, | 304 | .unload = rockchip_drm_unload, |
305 | .preclose = rockchip_drm_preclose, | ||
284 | .lastclose = rockchip_drm_lastclose, | 306 | .lastclose = rockchip_drm_lastclose, |
285 | .get_vblank_counter = drm_vblank_no_hw_counter, | 307 | .get_vblank_counter = drm_vblank_no_hw_counter, |
286 | .enable_vblank = rockchip_drm_crtc_enable_vblank, | 308 | .enable_vblank = rockchip_drm_crtc_enable_vblank, |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 3529f692edb8..00d17d71aa4c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h | |||
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs { | |||
40 | int (*enable_vblank)(struct drm_crtc *crtc); | 40 | int (*enable_vblank)(struct drm_crtc *crtc); |
41 | void (*disable_vblank)(struct drm_crtc *crtc); | 41 | void (*disable_vblank)(struct drm_crtc *crtc); |
42 | void (*wait_for_update)(struct drm_crtc *crtc); | 42 | void (*wait_for_update)(struct drm_crtc *crtc); |
43 | void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv); | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | struct rockchip_atomic_commit { | 46 | struct rockchip_atomic_commit { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fd370548d7d7..a619f120f801 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
@@ -499,10 +499,25 @@ err_disable_hclk: | |||
499 | static void vop_crtc_disable(struct drm_crtc *crtc) | 499 | static void vop_crtc_disable(struct drm_crtc *crtc) |
500 | { | 500 | { |
501 | struct vop *vop = to_vop(crtc); | 501 | struct vop *vop = to_vop(crtc); |
502 | int i; | ||
502 | 503 | ||
503 | if (!vop->is_enabled) | 504 | if (!vop->is_enabled) |
504 | return; | 505 | return; |
505 | 506 | ||
507 | /* | ||
508 | * We need to make sure that all windows are disabled before we | ||
509 | * disable that crtc. Otherwise we might try to scan from a destroyed | ||
510 | * buffer later. | ||
511 | */ | ||
512 | for (i = 0; i < vop->data->win_size; i++) { | ||
513 | struct vop_win *vop_win = &vop->win[i]; | ||
514 | const struct vop_win_data *win = vop_win->data; | ||
515 | |||
516 | spin_lock(&vop->reg_lock); | ||
517 | VOP_WIN_SET(vop, win, enable, 0); | ||
518 | spin_unlock(&vop->reg_lock); | ||
519 | } | ||
520 | |||
506 | drm_crtc_vblank_off(crtc); | 521 | drm_crtc_vblank_off(crtc); |
507 | 522 | ||
508 | /* | 523 | /* |
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, | |||
549 | struct drm_plane_state *state) | 564 | struct drm_plane_state *state) |
550 | { | 565 | { |
551 | struct drm_crtc *crtc = state->crtc; | 566 | struct drm_crtc *crtc = state->crtc; |
567 | struct drm_crtc_state *crtc_state; | ||
552 | struct drm_framebuffer *fb = state->fb; | 568 | struct drm_framebuffer *fb = state->fb; |
553 | struct vop_win *vop_win = to_vop_win(plane); | 569 | struct vop_win *vop_win = to_vop_win(plane); |
554 | struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); | 570 | struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); |
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane, | |||
563 | int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : | 579 | int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : |
564 | DRM_PLANE_HELPER_NO_SCALING; | 580 | DRM_PLANE_HELPER_NO_SCALING; |
565 | 581 | ||
566 | crtc = crtc ? crtc : plane->state->crtc; | ||
567 | /* | ||
568 | * Both crtc or plane->state->crtc can be null. | ||
569 | */ | ||
570 | if (!crtc || !fb) | 582 | if (!crtc || !fb) |
571 | goto out_disable; | 583 | goto out_disable; |
584 | |||
585 | crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); | ||
586 | if (WARN_ON(!crtc_state)) | ||
587 | return -EINVAL; | ||
588 | |||
572 | src->x1 = state->src_x; | 589 | src->x1 = state->src_x; |
573 | src->y1 = state->src_y; | 590 | src->y1 = state->src_y; |
574 | src->x2 = state->src_x + state->src_w; | 591 | src->x2 = state->src_x + state->src_w; |
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane, | |||
580 | 597 | ||
581 | clip.x1 = 0; | 598 | clip.x1 = 0; |
582 | clip.y1 = 0; | 599 | clip.y1 = 0; |
583 | clip.x2 = crtc->mode.hdisplay; | 600 | clip.x2 = crtc_state->adjusted_mode.hdisplay; |
584 | clip.y2 = crtc->mode.vdisplay; | 601 | clip.y2 = crtc_state->adjusted_mode.vdisplay; |
585 | 602 | ||
586 | ret = drm_plane_helper_check_update(plane, crtc, state->fb, | 603 | ret = drm_plane_helper_check_update(plane, crtc, state->fb, |
587 | src, dest, &clip, | 604 | src, dest, &clip, |
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc) | |||
873 | WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); | 890 | WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); |
874 | } | 891 | } |
875 | 892 | ||
893 | static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc, | ||
894 | struct drm_file *file_priv) | ||
895 | { | ||
896 | struct drm_device *drm = crtc->dev; | ||
897 | struct vop *vop = to_vop(crtc); | ||
898 | struct drm_pending_vblank_event *e; | ||
899 | unsigned long flags; | ||
900 | |||
901 | spin_lock_irqsave(&drm->event_lock, flags); | ||
902 | e = vop->event; | ||
903 | if (e && e->base.file_priv == file_priv) { | ||
904 | vop->event = NULL; | ||
905 | |||
906 | e->base.destroy(&e->base); | ||
907 | file_priv->event_space += sizeof(e->event); | ||
908 | } | ||
909 | spin_unlock_irqrestore(&drm->event_lock, flags); | ||
910 | } | ||
911 | |||
876 | static const struct rockchip_crtc_funcs private_crtc_funcs = { | 912 | static const struct rockchip_crtc_funcs private_crtc_funcs = { |
877 | .enable_vblank = vop_crtc_enable_vblank, | 913 | .enable_vblank = vop_crtc_enable_vblank, |
878 | .disable_vblank = vop_crtc_disable_vblank, | 914 | .disable_vblank = vop_crtc_disable_vblank, |
879 | .wait_for_update = vop_crtc_wait_for_update, | 915 | .wait_for_update = vop_crtc_wait_for_update, |
916 | .cancel_pending_vblank = vop_crtc_cancel_pending_vblank, | ||
880 | }; | 917 | }; |
881 | 918 | ||
882 | static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, | 919 | static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, |
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, | |||
885 | { | 922 | { |
886 | struct vop *vop = to_vop(crtc); | 923 | struct vop *vop = to_vop(crtc); |
887 | 924 | ||
888 | if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) | ||
889 | return false; | ||
890 | |||
891 | adjusted_mode->clock = | 925 | adjusted_mode->clock = |
892 | clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; | 926 | clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; |
893 | 927 | ||
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop) | |||
1108 | const struct vop_data *vop_data = vop->data; | 1142 | const struct vop_data *vop_data = vop->data; |
1109 | struct device *dev = vop->dev; | 1143 | struct device *dev = vop->dev; |
1110 | struct drm_device *drm_dev = vop->drm_dev; | 1144 | struct drm_device *drm_dev = vop->drm_dev; |
1111 | struct drm_plane *primary = NULL, *cursor = NULL, *plane; | 1145 | struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp; |
1112 | struct drm_crtc *crtc = &vop->crtc; | 1146 | struct drm_crtc *crtc = &vop->crtc; |
1113 | struct device_node *port; | 1147 | struct device_node *port; |
1114 | int ret; | 1148 | int ret; |
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop) | |||
1148 | ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, | 1182 | ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, |
1149 | &vop_crtc_funcs, NULL); | 1183 | &vop_crtc_funcs, NULL); |
1150 | if (ret) | 1184 | if (ret) |
1151 | return ret; | 1185 | goto err_cleanup_planes; |
1152 | 1186 | ||
1153 | drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); | 1187 | drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); |
1154 | 1188 | ||
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop) | |||
1181 | if (!port) { | 1215 | if (!port) { |
1182 | DRM_ERROR("no port node found in %s\n", | 1216 | DRM_ERROR("no port node found in %s\n", |
1183 | dev->of_node->full_name); | 1217 | dev->of_node->full_name); |
1218 | ret = -ENOENT; | ||
1184 | goto err_cleanup_crtc; | 1219 | goto err_cleanup_crtc; |
1185 | } | 1220 | } |
1186 | 1221 | ||
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop) | |||
1194 | err_cleanup_crtc: | 1229 | err_cleanup_crtc: |
1195 | drm_crtc_cleanup(crtc); | 1230 | drm_crtc_cleanup(crtc); |
1196 | err_cleanup_planes: | 1231 | err_cleanup_planes: |
1197 | list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) | 1232 | list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, |
1233 | head) | ||
1198 | drm_plane_cleanup(plane); | 1234 | drm_plane_cleanup(plane); |
1199 | return ret; | 1235 | return ret; |
1200 | } | 1236 | } |
@@ -1202,9 +1238,28 @@ err_cleanup_planes: | |||
1202 | static void vop_destroy_crtc(struct vop *vop) | 1238 | static void vop_destroy_crtc(struct vop *vop) |
1203 | { | 1239 | { |
1204 | struct drm_crtc *crtc = &vop->crtc; | 1240 | struct drm_crtc *crtc = &vop->crtc; |
1241 | struct drm_device *drm_dev = vop->drm_dev; | ||
1242 | struct drm_plane *plane, *tmp; | ||
1205 | 1243 | ||
1206 | rockchip_unregister_crtc_funcs(crtc); | 1244 | rockchip_unregister_crtc_funcs(crtc); |
1207 | of_node_put(crtc->port); | 1245 | of_node_put(crtc->port); |
1246 | |||
1247 | /* | ||
1248 | * We need to cleanup the planes now. Why? | ||
1249 | * | ||
1250 | * The planes are "&vop->win[i].base". That means the memory is | ||
1251 | * all part of the big "struct vop" chunk of memory. That memory | ||
1252 | * was devm allocated and associated with this component. We need to | ||
1253 | * free it ourselves before vop_unbind() finishes. | ||
1254 | */ | ||
1255 | list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, | ||
1256 | head) | ||
1257 | vop_plane_destroy(plane); | ||
1258 | |||
1259 | /* | ||
1260 | * Destroy CRTC after vop_plane_destroy() since vop_disable_plane() | ||
1261 | * references the CRTC. | ||
1262 | */ | ||
1208 | drm_crtc_cleanup(crtc); | 1263 | drm_crtc_cleanup(crtc); |
1209 | } | 1264 | } |
1210 | 1265 | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 33239a2b264a..fd1eb9d03f0b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper, | |||
536 | out_destroy_fbi: | 536 | out_destroy_fbi: |
537 | drm_fb_helper_release_fbi(helper); | 537 | drm_fb_helper_release_fbi(helper); |
538 | out_gfree: | 538 | out_gfree: |
539 | drm_gem_object_unreference(&ufbdev->ufb.obj->base); | 539 | drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); |
540 | out: | 540 | out: |
541 | return ret; | 541 | return ret; |
542 | } | 542 | } |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 2a0a784ab6ee..d7528e0d8442 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file, | |||
52 | return ret; | 52 | return ret; |
53 | } | 53 | } |
54 | 54 | ||
55 | drm_gem_object_unreference(&obj->base); | 55 | drm_gem_object_unreference_unlocked(&obj->base); |
56 | *handle_p = handle; | 56 | *handle_p = handle; |
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index 36544c4f653c..303d0c9df907 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c | |||
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111; | |||
85 | 85 | ||
86 | int max1111_read_channel(int channel) | 86 | int max1111_read_channel(int channel) |
87 | { | 87 | { |
88 | if (!the_max1111 || !the_max1111->spi) | ||
89 | return -ENODEV; | ||
90 | |||
88 | return max1111_read(&the_max1111->spi->dev, channel); | 91 | return max1111_read(&the_max1111->spi->dev, channel); |
89 | } | 92 | } |
90 | EXPORT_SYMBOL(max1111_read_channel); | 93 | EXPORT_SYMBOL(max1111_read_channel); |
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi) | |||
258 | { | 261 | { |
259 | struct max1111_data *data = spi_get_drvdata(spi); | 262 | struct max1111_data *data = spi_get_drvdata(spi); |
260 | 263 | ||
264 | #ifdef CONFIG_SHARPSL_PM | ||
265 | the_max1111 = NULL; | ||
266 | #endif | ||
261 | hwmon_device_unregister(data->hwmon_dev); | 267 | hwmon_device_unregister(data->hwmon_dev); |
262 | sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); | 268 | sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); |
263 | sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); | 269 | sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); |
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 9f0a48e39b8a..80e933b296f6 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c | |||
@@ -451,7 +451,7 @@ err_free: | |||
451 | return ret; | 451 | return ret; |
452 | } | 452 | } |
453 | 453 | ||
454 | static const struct ide_port_info icside_v6_port_info __initconst = { | 454 | static const struct ide_port_info icside_v6_port_info = { |
455 | .init_dma = icside_dma_off_init, | 455 | .init_dma = icside_dma_off_init, |
456 | .port_ops = &icside_v6_no_dma_port_ops, | 456 | .port_ops = &icside_v6_no_dma_port_ops, |
457 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, | 457 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, |
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 8012e43bf8f6..46427ea01753 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c | |||
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) | |||
325 | 325 | ||
326 | clk_enable(clk); | 326 | clk_enable(clk); |
327 | rate = clk_get_rate(clk); | 327 | rate = clk_get_rate(clk); |
328 | if (!rate) | ||
329 | return -EINVAL; | ||
328 | 330 | ||
329 | /* NOTE: round *down* to meet minimum timings; we count in clocks */ | 331 | /* NOTE: round *down* to meet minimum timings; we count in clocks */ |
330 | ideclk_period = 1000000000UL / rate; | 332 | ideclk_period = 1000000000UL / rate; |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 60b30d338a81..411e4464ca23 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn); | |||
63 | struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); | 63 | struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); |
64 | 64 | ||
65 | static void isert_release_work(struct work_struct *work); | 65 | static void isert_release_work(struct work_struct *work); |
66 | static void isert_wait4flush(struct isert_conn *isert_conn); | ||
67 | static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); | 66 | static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
68 | static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); | 67 | static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); |
69 | static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); | 68 | static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn, | |||
141 | attr.qp_context = isert_conn; | 140 | attr.qp_context = isert_conn; |
142 | attr.send_cq = comp->cq; | 141 | attr.send_cq = comp->cq; |
143 | attr.recv_cq = comp->cq; | 142 | attr.recv_cq = comp->cq; |
144 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; | 143 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; |
145 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; | 144 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; |
146 | attr.cap.max_send_sge = device->ib_device->attrs.max_sge; | 145 | attr.cap.max_send_sge = device->ib_device->attrs.max_sge; |
147 | isert_conn->max_sge = min(device->ib_device->attrs.max_sge, | 146 | isert_conn->max_sge = min(device->ib_device->attrs.max_sge, |
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, | |||
887 | break; | 886 | break; |
888 | case ISER_CONN_UP: | 887 | case ISER_CONN_UP: |
889 | isert_conn_terminate(isert_conn); | 888 | isert_conn_terminate(isert_conn); |
890 | isert_wait4flush(isert_conn); | 889 | ib_drain_qp(isert_conn->qp); |
891 | isert_handle_unbound_conn(isert_conn); | 890 | isert_handle_unbound_conn(isert_conn); |
892 | break; | 891 | break; |
893 | case ISER_CONN_BOUND: | 892 | case ISER_CONN_BOUND: |
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn) | |||
3213 | } | 3212 | } |
3214 | } | 3213 | } |
3215 | 3214 | ||
3216 | static void | ||
3217 | isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc) | ||
3218 | { | ||
3219 | struct isert_conn *isert_conn = wc->qp->qp_context; | ||
3220 | |||
3221 | isert_print_wc(wc, "beacon"); | ||
3222 | |||
3223 | isert_info("conn %p completing wait_comp_err\n", isert_conn); | ||
3224 | complete(&isert_conn->wait_comp_err); | ||
3225 | } | ||
3226 | |||
3227 | static void | ||
3228 | isert_wait4flush(struct isert_conn *isert_conn) | ||
3229 | { | ||
3230 | struct ib_recv_wr *bad_wr; | ||
3231 | static struct ib_cqe cqe = { .done = isert_beacon_done }; | ||
3232 | |||
3233 | isert_info("conn %p\n", isert_conn); | ||
3234 | |||
3235 | init_completion(&isert_conn->wait_comp_err); | ||
3236 | isert_conn->beacon.wr_cqe = &cqe; | ||
3237 | /* post an indication that all flush errors were consumed */ | ||
3238 | if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { | ||
3239 | isert_err("conn %p failed to post beacon", isert_conn); | ||
3240 | return; | ||
3241 | } | ||
3242 | |||
3243 | wait_for_completion(&isert_conn->wait_comp_err); | ||
3244 | } | ||
3245 | |||
3246 | /** | 3215 | /** |
3247 | * isert_put_unsol_pending_cmds() - Drop commands waiting for | 3216 | * isert_put_unsol_pending_cmds() - Drop commands waiting for |
3248 | * unsolicitate dataout | 3217 | * unsolicitate dataout |
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) | |||
3288 | isert_conn_terminate(isert_conn); | 3257 | isert_conn_terminate(isert_conn); |
3289 | mutex_unlock(&isert_conn->mutex); | 3258 | mutex_unlock(&isert_conn->mutex); |
3290 | 3259 | ||
3291 | isert_wait4flush(isert_conn); | 3260 | ib_drain_qp(isert_conn->qp); |
3292 | isert_put_unsol_pending_cmds(conn); | 3261 | isert_put_unsol_pending_cmds(conn); |
3293 | isert_wait4cmds(conn); | 3262 | isert_wait4cmds(conn); |
3294 | isert_wait4logout(isert_conn); | 3263 | isert_wait4logout(isert_conn); |
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn) | |||
3300 | { | 3269 | { |
3301 | struct isert_conn *isert_conn = conn->context; | 3270 | struct isert_conn *isert_conn = conn->context; |
3302 | 3271 | ||
3303 | isert_wait4flush(isert_conn); | 3272 | ib_drain_qp(isert_conn->qp); |
3304 | isert_put_conn(isert_conn); | 3273 | isert_put_conn(isert_conn); |
3305 | } | 3274 | } |
3306 | 3275 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 192788a4820c..147900cbb578 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -209,14 +209,12 @@ struct isert_conn { | |||
209 | struct ib_qp *qp; | 209 | struct ib_qp *qp; |
210 | struct isert_device *device; | 210 | struct isert_device *device; |
211 | struct mutex mutex; | 211 | struct mutex mutex; |
212 | struct completion wait_comp_err; | ||
213 | struct kref kref; | 212 | struct kref kref; |
214 | struct list_head fr_pool; | 213 | struct list_head fr_pool; |
215 | int fr_pool_size; | 214 | int fr_pool_size; |
216 | /* lock to protect fastreg pool */ | 215 | /* lock to protect fastreg pool */ |
217 | spinlock_t pool_lock; | 216 | spinlock_t pool_lock; |
218 | struct work_struct release_work; | 217 | struct work_struct release_work; |
219 | struct ib_recv_wr beacon; | ||
220 | bool logout_posted; | 218 | bool logout_posted; |
221 | bool snd_w_inv; | 219 | bool snd_w_inv; |
222 | }; | 220 | }; |
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c index 7fdf78f46433..df7e05ca8f9c 100644 --- a/drivers/isdn/hisax/isac.c +++ b/drivers/isdn/hisax/isac.c | |||
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val) | |||
215 | if (count == 0) | 215 | if (count == 0) |
216 | count = 32; | 216 | count = 32; |
217 | isac_empty_fifo(cs, count); | 217 | isac_empty_fifo(cs, count); |
218 | if ((count = cs->rcvidx) > 0) { | 218 | count = cs->rcvidx; |
219 | if (count > 0) { | ||
219 | cs->rcvidx = 0; | 220 | cs->rcvidx = 0; |
220 | if (!(skb = alloc_skb(count, GFP_ATOMIC))) | 221 | skb = alloc_skb(count, GFP_ATOMIC); |
222 | if (!skb) | ||
221 | printk(KERN_WARNING "HiSax: D receive out of memory\n"); | 223 | printk(KERN_WARNING "HiSax: D receive out of memory\n"); |
222 | else { | 224 | else { |
223 | memcpy(skb_put(skb, count), cs->rcvbuf, count); | 225 | memcpy(skb_put(skb, count), cs->rcvbuf, count); |
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val) | |||
251 | cs->tx_skb = NULL; | 253 | cs->tx_skb = NULL; |
252 | } | 254 | } |
253 | } | 255 | } |
254 | if ((cs->tx_skb = skb_dequeue(&cs->sq))) { | 256 | cs->tx_skb = skb_dequeue(&cs->sq); |
257 | if (cs->tx_skb) { | ||
255 | cs->tx_cnt = 0; | 258 | cs->tx_cnt = 0; |
256 | isac_fill_fifo(cs); | 259 | isac_fill_fifo(cs); |
257 | } else | 260 | } else |
@@ -313,7 +316,8 @@ afterXPR: | |||
313 | #if ARCOFI_USE | 316 | #if ARCOFI_USE |
314 | if (v1 & 0x08) { | 317 | if (v1 & 0x08) { |
315 | if (!cs->dc.isac.mon_rx) { | 318 | if (!cs->dc.isac.mon_rx) { |
316 | if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { | 319 | cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC); |
320 | if (!cs->dc.isac.mon_rx) { | ||
317 | if (cs->debug & L1_DEB_WARN) | 321 | if (cs->debug & L1_DEB_WARN) |
318 | debugl1(cs, "ISAC MON RX out of memory!"); | 322 | debugl1(cs, "ISAC MON RX out of memory!"); |
319 | cs->dc.isac.mocr &= 0xf0; | 323 | cs->dc.isac.mocr &= 0xf0; |
@@ -343,7 +347,8 @@ afterXPR: | |||
343 | afterMONR0: | 347 | afterMONR0: |
344 | if (v1 & 0x80) { | 348 | if (v1 & 0x80) { |
345 | if (!cs->dc.isac.mon_rx) { | 349 | if (!cs->dc.isac.mon_rx) { |
346 | if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { | 350 | cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC); |
351 | if (!cs->dc.isac.mon_rx) { | ||
347 | if (cs->debug & L1_DEB_WARN) | 352 | if (cs->debug & L1_DEB_WARN) |
348 | debugl1(cs, "ISAC MON RX out of memory!"); | 353 | debugl1(cs, "ISAC MON RX out of memory!"); |
349 | cs->dc.isac.mocr &= 0x0f; | 354 | cs->dc.isac.mocr &= 0x0f; |
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index 2a7b79bc90fd..2228cd3a846e 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c | |||
@@ -34,7 +34,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev) | |||
34 | { | 34 | { |
35 | struct media_entity *entity; | 35 | struct media_entity *entity; |
36 | struct media_entity *if_vid = NULL, *if_aud = NULL; | 36 | struct media_entity *if_vid = NULL, *if_aud = NULL; |
37 | struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL; | 37 | struct media_entity *tuner = NULL, *decoder = NULL; |
38 | struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; | 38 | struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; |
39 | bool is_webcam = false; | 39 | bool is_webcam = false; |
40 | u32 flags; | 40 | u32 flags; |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index fa086e09d6b7..50454be86570 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -2264,6 +2264,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) | |||
2264 | mutex_unlock(&ps->smi_mutex); | 2264 | mutex_unlock(&ps->smi_mutex); |
2265 | } | 2265 | } |
2266 | 2266 | ||
2267 | static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, | ||
2268 | int reg, int val) | ||
2269 | { | ||
2270 | int ret; | ||
2271 | |||
2272 | ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); | ||
2273 | if (ret < 0) | ||
2274 | goto restore_page_0; | ||
2275 | |||
2276 | ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val); | ||
2277 | restore_page_0: | ||
2278 | _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); | ||
2279 | |||
2280 | return ret; | ||
2281 | } | ||
2282 | |||
2283 | static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, | ||
2284 | int reg) | ||
2285 | { | ||
2286 | int ret; | ||
2287 | |||
2288 | ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); | ||
2289 | if (ret < 0) | ||
2290 | goto restore_page_0; | ||
2291 | |||
2292 | ret = _mv88e6xxx_phy_read_indirect(ds, port, reg); | ||
2293 | restore_page_0: | ||
2294 | _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); | ||
2295 | |||
2296 | return ret; | ||
2297 | } | ||
2298 | |||
2299 | static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds) | ||
2300 | { | ||
2301 | int ret; | ||
2302 | |||
2303 | ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES, | ||
2304 | MII_BMCR); | ||
2305 | if (ret < 0) | ||
2306 | return ret; | ||
2307 | |||
2308 | if (ret & BMCR_PDOWN) { | ||
2309 | ret &= ~BMCR_PDOWN; | ||
2310 | ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES, | ||
2311 | PAGE_FIBER_SERDES, MII_BMCR, | ||
2312 | ret); | ||
2313 | } | ||
2314 | |||
2315 | return ret; | ||
2316 | } | ||
2317 | |||
2267 | static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) | 2318 | static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) |
2268 | { | 2319 | { |
2269 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | 2320 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); |
@@ -2367,6 +2418,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) | |||
2367 | goto abort; | 2418 | goto abort; |
2368 | } | 2419 | } |
2369 | 2420 | ||
2421 | /* If this port is connected to a SerDes, make sure the SerDes is not | ||
2422 | * powered down. | ||
2423 | */ | ||
2424 | if (mv88e6xxx_6352_family(ds)) { | ||
2425 | ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS); | ||
2426 | if (ret < 0) | ||
2427 | goto abort; | ||
2428 | ret &= PORT_STATUS_CMODE_MASK; | ||
2429 | if ((ret == PORT_STATUS_CMODE_100BASE_X) || | ||
2430 | (ret == PORT_STATUS_CMODE_1000BASE_X) || | ||
2431 | (ret == PORT_STATUS_CMODE_SGMII)) { | ||
2432 | ret = mv88e6xxx_power_on_serdes(ds); | ||
2433 | if (ret < 0) | ||
2434 | goto abort; | ||
2435 | } | ||
2436 | } | ||
2437 | |||
2370 | /* Port Control 2: don't force a good FCS, set the maximum frame size to | 2438 | /* Port Control 2: don't force a good FCS, set the maximum frame size to |
2371 | * 10240 bytes, disable 802.1q tags checking, don't discard tagged or | 2439 | * 10240 bytes, disable 802.1q tags checking, don't discard tagged or |
2372 | * untagged frames on this port, do a destination address lookup on all | 2440 | * untagged frames on this port, do a destination address lookup on all |
@@ -2714,13 +2782,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg) | |||
2714 | int ret; | 2782 | int ret; |
2715 | 2783 | ||
2716 | mutex_lock(&ps->smi_mutex); | 2784 | mutex_lock(&ps->smi_mutex); |
2717 | ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); | 2785 | ret = _mv88e6xxx_phy_page_read(ds, port, page, reg); |
2718 | if (ret < 0) | ||
2719 | goto error; | ||
2720 | ret = _mv88e6xxx_phy_read_indirect(ds, port, reg); | ||
2721 | error: | ||
2722 | _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); | ||
2723 | mutex_unlock(&ps->smi_mutex); | 2786 | mutex_unlock(&ps->smi_mutex); |
2787 | |||
2724 | return ret; | 2788 | return ret; |
2725 | } | 2789 | } |
2726 | 2790 | ||
@@ -2731,14 +2795,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, | |||
2731 | int ret; | 2795 | int ret; |
2732 | 2796 | ||
2733 | mutex_lock(&ps->smi_mutex); | 2797 | mutex_lock(&ps->smi_mutex); |
2734 | ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); | 2798 | ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val); |
2735 | if (ret < 0) | ||
2736 | goto error; | ||
2737 | |||
2738 | ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val); | ||
2739 | error: | ||
2740 | _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); | ||
2741 | mutex_unlock(&ps->smi_mutex); | 2799 | mutex_unlock(&ps->smi_mutex); |
2800 | |||
2742 | return ret; | 2801 | return ret; |
2743 | } | 2802 | } |
2744 | 2803 | ||
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 9a038aba48fb..26a424acd10f 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h | |||
@@ -28,6 +28,10 @@ | |||
28 | #define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) | 28 | #define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) |
29 | #define SMI_DATA 0x01 | 29 | #define SMI_DATA 0x01 |
30 | 30 | ||
31 | /* Fiber/SERDES Registers are located at SMI address F, page 1 */ | ||
32 | #define REG_FIBER_SERDES 0x0f | ||
33 | #define PAGE_FIBER_SERDES 0x01 | ||
34 | |||
31 | #define REG_PORT(p) (0x10 + (p)) | 35 | #define REG_PORT(p) (0x10 + (p)) |
32 | #define PORT_STATUS 0x00 | 36 | #define PORT_STATUS 0x00 |
33 | #define PORT_STATUS_PAUSE_EN BIT(15) | 37 | #define PORT_STATUS_PAUSE_EN BIT(15) |
@@ -45,6 +49,10 @@ | |||
45 | #define PORT_STATUS_MGMII BIT(6) /* 6185 */ | 49 | #define PORT_STATUS_MGMII BIT(6) /* 6185 */ |
46 | #define PORT_STATUS_TX_PAUSED BIT(5) | 50 | #define PORT_STATUS_TX_PAUSED BIT(5) |
47 | #define PORT_STATUS_FLOW_CTRL BIT(4) | 51 | #define PORT_STATUS_FLOW_CTRL BIT(4) |
52 | #define PORT_STATUS_CMODE_MASK 0x0f | ||
53 | #define PORT_STATUS_CMODE_100BASE_X 0x8 | ||
54 | #define PORT_STATUS_CMODE_1000BASE_X 0x9 | ||
55 | #define PORT_STATUS_CMODE_SGMII 0xa | ||
48 | #define PORT_PCS_CTRL 0x01 | 56 | #define PORT_PCS_CTRL 0x01 |
49 | #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) | 57 | #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) |
50 | #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) | 58 | #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aabbd51db981..12a009d720cd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -2653,7 +2653,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
2653 | /* Write request msg to hwrm channel */ | 2653 | /* Write request msg to hwrm channel */ |
2654 | __iowrite32_copy(bp->bar0, data, msg_len / 4); | 2654 | __iowrite32_copy(bp->bar0, data, msg_len / 4); |
2655 | 2655 | ||
2656 | for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) | 2656 | for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4) |
2657 | writel(0, bp->bar0 + i); | 2657 | writel(0, bp->bar0 + i); |
2658 | 2658 | ||
2659 | /* currently supports only one outstanding message */ | 2659 | /* currently supports only one outstanding message */ |
@@ -3391,11 +3391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) | |||
3391 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; | 3391 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
3392 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; | 3392 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
3393 | 3393 | ||
3394 | cpr->cp_doorbell = bp->bar1 + i * 0x80; | ||
3394 | rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, | 3395 | rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, |
3395 | INVALID_STATS_CTX_ID); | 3396 | INVALID_STATS_CTX_ID); |
3396 | if (rc) | 3397 | if (rc) |
3397 | goto err_out; | 3398 | goto err_out; |
3398 | cpr->cp_doorbell = bp->bar1 + i * 0x80; | ||
3399 | BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); | 3399 | BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); |
3400 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; | 3400 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
3401 | } | 3401 | } |
@@ -3830,6 +3830,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) | |||
3830 | struct hwrm_ver_get_input req = {0}; | 3830 | struct hwrm_ver_get_input req = {0}; |
3831 | struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; | 3831 | struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; |
3832 | 3832 | ||
3833 | bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; | ||
3833 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); | 3834 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); |
3834 | req.hwrm_intf_maj = HWRM_VERSION_MAJOR; | 3835 | req.hwrm_intf_maj = HWRM_VERSION_MAJOR; |
3835 | req.hwrm_intf_min = HWRM_VERSION_MINOR; | 3836 | req.hwrm_intf_min = HWRM_VERSION_MINOR; |
@@ -3855,6 +3856,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) | |||
3855 | if (!bp->hwrm_cmd_timeout) | 3856 | if (!bp->hwrm_cmd_timeout) |
3856 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; | 3857 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
3857 | 3858 | ||
3859 | if (resp->hwrm_intf_maj >= 1) | ||
3860 | bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); | ||
3861 | |||
3858 | hwrm_ver_get_exit: | 3862 | hwrm_ver_get_exit: |
3859 | mutex_unlock(&bp->hwrm_cmd_lock); | 3863 | mutex_unlock(&bp->hwrm_cmd_lock); |
3860 | return rc; | 3864 | return rc; |
@@ -4555,7 +4559,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) | |||
4555 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) | 4559 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) |
4556 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; | 4560 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; |
4557 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) | 4561 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) |
4558 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; | 4562 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; |
4559 | req->enables |= | 4563 | req->enables |= |
4560 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); | 4564 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); |
4561 | } else { | 4565 | } else { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index ec04c47172b7..709b95b8fcba 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -477,6 +477,7 @@ struct rx_tpa_end_cmp_ext { | |||
477 | #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) | 477 | #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) |
478 | #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) | 478 | #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) |
479 | 479 | ||
480 | #define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) | ||
480 | #define DFLT_HWRM_CMD_TIMEOUT 500 | 481 | #define DFLT_HWRM_CMD_TIMEOUT 500 |
481 | #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) | 482 | #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) |
482 | #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) | 483 | #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) |
@@ -953,6 +954,7 @@ struct bnxt { | |||
953 | dma_addr_t hw_tx_port_stats_map; | 954 | dma_addr_t hw_tx_port_stats_map; |
954 | int hw_port_stats_size; | 955 | int hw_port_stats_size; |
955 | 956 | ||
957 | u16 hwrm_max_req_len; | ||
956 | int hwrm_cmd_timeout; | 958 | int hwrm_cmd_timeout; |
957 | struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ | 959 | struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ |
958 | struct hwrm_ver_get_output ver_resp; | 960 | struct hwrm_ver_get_output ver_resp; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9ada1662b651..2e472f6dbf2d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | |||
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev, | |||
855 | if (BNXT_VF(bp)) | 855 | if (BNXT_VF(bp)) |
856 | return; | 856 | return; |
857 | epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); | 857 | epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); |
858 | epause->rx_pause = | 858 | epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); |
859 | ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); | 859 | epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); |
860 | epause->tx_pause = | ||
861 | ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0); | ||
862 | } | 860 | } |
863 | 861 | ||
864 | static int bnxt_set_pauseparam(struct net_device *dev, | 862 | static int bnxt_set_pauseparam(struct net_device *dev, |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6746fd03cb3a..cf6445d148ca 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, | |||
1171 | struct enet_cb *tx_cb_ptr; | 1171 | struct enet_cb *tx_cb_ptr; |
1172 | struct netdev_queue *txq; | 1172 | struct netdev_queue *txq; |
1173 | unsigned int pkts_compl = 0; | 1173 | unsigned int pkts_compl = 0; |
1174 | unsigned int bytes_compl = 0; | ||
1174 | unsigned int c_index; | 1175 | unsigned int c_index; |
1175 | unsigned int txbds_ready; | 1176 | unsigned int txbds_ready; |
1176 | unsigned int txbds_processed = 0; | 1177 | unsigned int txbds_processed = 0; |
@@ -1193,16 +1194,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, | |||
1193 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; | 1194 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; |
1194 | if (tx_cb_ptr->skb) { | 1195 | if (tx_cb_ptr->skb) { |
1195 | pkts_compl++; | 1196 | pkts_compl++; |
1196 | dev->stats.tx_packets++; | 1197 | bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; |
1197 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | ||
1198 | dma_unmap_single(&dev->dev, | 1198 | dma_unmap_single(&dev->dev, |
1199 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 1199 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1200 | dma_unmap_len(tx_cb_ptr, dma_len), | 1200 | dma_unmap_len(tx_cb_ptr, dma_len), |
1201 | DMA_TO_DEVICE); | 1201 | DMA_TO_DEVICE); |
1202 | bcmgenet_free_cb(tx_cb_ptr); | 1202 | bcmgenet_free_cb(tx_cb_ptr); |
1203 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | 1203 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { |
1204 | dev->stats.tx_bytes += | ||
1205 | dma_unmap_len(tx_cb_ptr, dma_len); | ||
1206 | dma_unmap_page(&dev->dev, | 1204 | dma_unmap_page(&dev->dev, |
1207 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 1205 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1208 | dma_unmap_len(tx_cb_ptr, dma_len), | 1206 | dma_unmap_len(tx_cb_ptr, dma_len), |
@@ -1220,6 +1218,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, | |||
1220 | ring->free_bds += txbds_processed; | 1218 | ring->free_bds += txbds_processed; |
1221 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; | 1219 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; |
1222 | 1220 | ||
1221 | dev->stats.tx_packets += pkts_compl; | ||
1222 | dev->stats.tx_bytes += bytes_compl; | ||
1223 | |||
1223 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { | 1224 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
1224 | txq = netdev_get_tx_queue(dev, ring->queue); | 1225 | txq = netdev_get_tx_queue(dev, ring->queue); |
1225 | if (netif_tx_queue_stopped(txq)) | 1226 | if (netif_tx_queue_stopped(txq)) |
@@ -1296,7 +1297,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, | |||
1296 | 1297 | ||
1297 | tx_cb_ptr->skb = skb; | 1298 | tx_cb_ptr->skb = skb; |
1298 | 1299 | ||
1299 | skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); | 1300 | skb_len = skb_headlen(skb); |
1300 | 1301 | ||
1301 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | 1302 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); |
1302 | ret = dma_mapping_error(kdev, mapping); | 1303 | ret = dma_mapping_error(kdev, mapping); |
@@ -1464,6 +1465,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1464 | goto out; | 1465 | goto out; |
1465 | } | 1466 | } |
1466 | 1467 | ||
1468 | /* Retain how many bytes will be sent on the wire, without TSB inserted | ||
1469 | * by transmit checksum offload | ||
1470 | */ | ||
1471 | GENET_CB(skb)->bytes_sent = skb->len; | ||
1472 | |||
1467 | /* set the SKB transmit checksum */ | 1473 | /* set the SKB transmit checksum */ |
1468 | if (priv->desc_64b_en) { | 1474 | if (priv->desc_64b_en) { |
1469 | skb = bcmgenet_put_tx_csum(dev, skb); | 1475 | skb = bcmgenet_put_tx_csum(dev, skb); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 967367557309..1e2dc34d331a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params { | |||
531 | u32 flags; | 531 | u32 flags; |
532 | }; | 532 | }; |
533 | 533 | ||
534 | struct bcmgenet_skb_cb { | ||
535 | unsigned int bytes_sent; /* bytes on the wire (no TSB) */ | ||
536 | }; | ||
537 | |||
538 | #define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) | ||
539 | |||
534 | struct bcmgenet_tx_ring { | 540 | struct bcmgenet_tx_ring { |
535 | spinlock_t lock; /* ring lock */ | 541 | spinlock_t lock; /* ring lock */ |
536 | struct napi_struct napi; /* NAPI per tx queue */ | 542 | struct napi_struct napi; /* NAPI per tx queue */ |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 6619178ed77b..48a7d7dee846 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -917,7 +917,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
917 | unsigned int frag_len = bp->rx_buffer_size; | 917 | unsigned int frag_len = bp->rx_buffer_size; |
918 | 918 | ||
919 | if (offset + frag_len > len) { | 919 | if (offset + frag_len > len) { |
920 | BUG_ON(frag != last_frag); | 920 | if (unlikely(frag != last_frag)) { |
921 | dev_kfree_skb_any(skb); | ||
922 | return -1; | ||
923 | } | ||
921 | frag_len = len - offset; | 924 | frag_len = len - offset; |
922 | } | 925 | } |
923 | skb_copy_to_linear_data_offset(skb, offset, | 926 | skb_copy_to_linear_data_offset(skb, offset, |
@@ -945,8 +948,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
945 | return 0; | 948 | return 0; |
946 | } | 949 | } |
947 | 950 | ||
951 | static inline void macb_init_rx_ring(struct macb *bp) | ||
952 | { | ||
953 | dma_addr_t addr; | ||
954 | int i; | ||
955 | |||
956 | addr = bp->rx_buffers_dma; | ||
957 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
958 | bp->rx_ring[i].addr = addr; | ||
959 | bp->rx_ring[i].ctrl = 0; | ||
960 | addr += bp->rx_buffer_size; | ||
961 | } | ||
962 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | ||
963 | } | ||
964 | |||
948 | static int macb_rx(struct macb *bp, int budget) | 965 | static int macb_rx(struct macb *bp, int budget) |
949 | { | 966 | { |
967 | bool reset_rx_queue = false; | ||
950 | int received = 0; | 968 | int received = 0; |
951 | unsigned int tail; | 969 | unsigned int tail; |
952 | int first_frag = -1; | 970 | int first_frag = -1; |
@@ -972,10 +990,18 @@ static int macb_rx(struct macb *bp, int budget) | |||
972 | 990 | ||
973 | if (ctrl & MACB_BIT(RX_EOF)) { | 991 | if (ctrl & MACB_BIT(RX_EOF)) { |
974 | int dropped; | 992 | int dropped; |
975 | BUG_ON(first_frag == -1); | 993 | |
994 | if (unlikely(first_frag == -1)) { | ||
995 | reset_rx_queue = true; | ||
996 | continue; | ||
997 | } | ||
976 | 998 | ||
977 | dropped = macb_rx_frame(bp, first_frag, tail); | 999 | dropped = macb_rx_frame(bp, first_frag, tail); |
978 | first_frag = -1; | 1000 | first_frag = -1; |
1001 | if (unlikely(dropped < 0)) { | ||
1002 | reset_rx_queue = true; | ||
1003 | continue; | ||
1004 | } | ||
979 | if (!dropped) { | 1005 | if (!dropped) { |
980 | received++; | 1006 | received++; |
981 | budget--; | 1007 | budget--; |
@@ -983,6 +1009,26 @@ static int macb_rx(struct macb *bp, int budget) | |||
983 | } | 1009 | } |
984 | } | 1010 | } |
985 | 1011 | ||
1012 | if (unlikely(reset_rx_queue)) { | ||
1013 | unsigned long flags; | ||
1014 | u32 ctrl; | ||
1015 | |||
1016 | netdev_err(bp->dev, "RX queue corruption: reset it\n"); | ||
1017 | |||
1018 | spin_lock_irqsave(&bp->lock, flags); | ||
1019 | |||
1020 | ctrl = macb_readl(bp, NCR); | ||
1021 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | ||
1022 | |||
1023 | macb_init_rx_ring(bp); | ||
1024 | macb_writel(bp, RBQP, bp->rx_ring_dma); | ||
1025 | |||
1026 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); | ||
1027 | |||
1028 | spin_unlock_irqrestore(&bp->lock, flags); | ||
1029 | return received; | ||
1030 | } | ||
1031 | |||
986 | if (first_frag != -1) | 1032 | if (first_frag != -1) |
987 | bp->rx_tail = first_frag; | 1033 | bp->rx_tail = first_frag; |
988 | else | 1034 | else |
@@ -1100,7 +1146,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
1100 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); | 1146 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); |
1101 | 1147 | ||
1102 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | 1148 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1103 | macb_writel(bp, ISR, MACB_BIT(RXUBR)); | 1149 | queue_writel(queue, ISR, MACB_BIT(RXUBR)); |
1104 | } | 1150 | } |
1105 | 1151 | ||
1106 | if (status & MACB_BIT(ISR_ROVR)) { | 1152 | if (status & MACB_BIT(ISR_ROVR)) { |
@@ -1523,15 +1569,8 @@ static void gem_init_rings(struct macb *bp) | |||
1523 | static void macb_init_rings(struct macb *bp) | 1569 | static void macb_init_rings(struct macb *bp) |
1524 | { | 1570 | { |
1525 | int i; | 1571 | int i; |
1526 | dma_addr_t addr; | ||
1527 | 1572 | ||
1528 | addr = bp->rx_buffers_dma; | 1573 | macb_init_rx_ring(bp); |
1529 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1530 | bp->rx_ring[i].addr = addr; | ||
1531 | bp->rx_ring[i].ctrl = 0; | ||
1532 | addr += bp->rx_buffer_size; | ||
1533 | } | ||
1534 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | ||
1535 | 1574 | ||
1536 | for (i = 0; i < TX_RING_SIZE; i++) { | 1575 | for (i = 0; i < TX_RING_SIZE; i++) { |
1537 | bp->queues[0].tx_ring[i].addr = 0; | 1576 | bp->queues[0].tx_ring[i].addr = 0; |
@@ -2957,9 +2996,10 @@ static int macb_probe(struct platform_device *pdev) | |||
2957 | phy_node = of_get_next_available_child(np, NULL); | 2996 | phy_node = of_get_next_available_child(np, NULL); |
2958 | if (phy_node) { | 2997 | if (phy_node) { |
2959 | int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); | 2998 | int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); |
2960 | if (gpio_is_valid(gpio)) | 2999 | if (gpio_is_valid(gpio)) { |
2961 | bp->reset_gpio = gpio_to_desc(gpio); | 3000 | bp->reset_gpio = gpio_to_desc(gpio); |
2962 | gpiod_direction_output(bp->reset_gpio, 1); | 3001 | gpiod_direction_output(bp->reset_gpio, 1); |
3002 | } | ||
2963 | } | 3003 | } |
2964 | of_node_put(phy_node); | 3004 | of_node_put(phy_node); |
2965 | 3005 | ||
@@ -3029,7 +3069,8 @@ static int macb_remove(struct platform_device *pdev) | |||
3029 | mdiobus_free(bp->mii_bus); | 3069 | mdiobus_free(bp->mii_bus); |
3030 | 3070 | ||
3031 | /* Shutdown the PHY if there is a GPIO reset */ | 3071 | /* Shutdown the PHY if there is a GPIO reset */ |
3032 | gpiod_set_value(bp->reset_gpio, 0); | 3072 | if (bp->reset_gpio) |
3073 | gpiod_set_value(bp->reset_gpio, 0); | ||
3033 | 3074 | ||
3034 | unregister_netdev(dev); | 3075 | unregister_netdev(dev); |
3035 | clk_disable_unprepare(bp->tx_clk); | 3076 | clk_disable_unprepare(bp->tx_clk); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 37c081583084..08243c2ff4b4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev) | |||
943 | else | 943 | else |
944 | val &= ~FEC_RACC_OPTIONS; | 944 | val &= ~FEC_RACC_OPTIONS; |
945 | writel(val, fep->hwp + FEC_RACC); | 945 | writel(val, fep->hwp + FEC_RACC); |
946 | writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); | ||
946 | } | 947 | } |
947 | writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); | ||
948 | #endif | 948 | #endif |
949 | 949 | ||
950 | /* | 950 | /* |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 37d0cce392be..e8d36aaea223 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
@@ -469,7 +469,7 @@ struct hnae_ae_ops { | |||
469 | u32 *tx_usecs, u32 *rx_usecs); | 469 | u32 *tx_usecs, u32 *rx_usecs); |
470 | void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, | 470 | void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, |
471 | u32 *tx_frames, u32 *rx_frames); | 471 | u32 *tx_frames, u32 *rx_frames); |
472 | void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); | 472 | int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); |
473 | int (*set_coalesce_frames)(struct hnae_handle *handle, | 473 | int (*set_coalesce_frames)(struct hnae_handle *handle, |
474 | u32 coalesce_frames); | 474 | u32 coalesce_frames); |
475 | void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); | 475 | void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 285c893ab135..a1cb461ac45f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, | |||
159 | ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; | 159 | ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; |
160 | 160 | ||
161 | ring_pair_cb->used_by_vf = 1; | 161 | ring_pair_cb->used_by_vf = 1; |
162 | if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) | ||
163 | ring_pair_cb->port_id_in_dsa = port_idx; | ||
164 | else | ||
165 | ring_pair_cb->port_id_in_dsa = 0; | ||
166 | |||
167 | ring_pair_cb++; | 162 | ring_pair_cb++; |
168 | } | 163 | } |
169 | 164 | ||
@@ -453,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle, | |||
453 | static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, | 448 | static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, |
454 | u32 *tx_usecs, u32 *rx_usecs) | 449 | u32 *tx_usecs, u32 *rx_usecs) |
455 | { | 450 | { |
456 | int port; | 451 | struct ring_pair_cb *ring_pair = |
457 | 452 | container_of(handle->qs[0], struct ring_pair_cb, q); | |
458 | port = hns_ae_map_eport_to_dport(handle->eport_id); | ||
459 | 453 | ||
460 | *tx_usecs = hns_rcb_get_coalesce_usecs( | 454 | *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common, |
461 | hns_ae_get_dsaf_dev(handle->dev), | 455 | ring_pair->port_id_in_comm); |
462 | hns_dsaf_get_comm_idx_by_port(port)); | 456 | *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common, |
463 | *rx_usecs = hns_rcb_get_coalesce_usecs( | 457 | ring_pair->port_id_in_comm); |
464 | hns_ae_get_dsaf_dev(handle->dev), | ||
465 | hns_dsaf_get_comm_idx_by_port(port)); | ||
466 | } | 458 | } |
467 | 459 | ||
468 | static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, | 460 | static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, |
469 | u32 *tx_frames, u32 *rx_frames) | 461 | u32 *tx_frames, u32 *rx_frames) |
470 | { | 462 | { |
471 | int port; | 463 | struct ring_pair_cb *ring_pair = |
464 | container_of(handle->qs[0], struct ring_pair_cb, q); | ||
472 | 465 | ||
473 | assert(handle); | 466 | *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, |
474 | 467 | ring_pair->port_id_in_comm); | |
475 | port = hns_ae_map_eport_to_dport(handle->eport_id); | 468 | *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, |
476 | 469 | ring_pair->port_id_in_comm); | |
477 | *tx_frames = hns_rcb_get_coalesced_frames( | ||
478 | hns_ae_get_dsaf_dev(handle->dev), port); | ||
479 | *rx_frames = hns_rcb_get_coalesced_frames( | ||
480 | hns_ae_get_dsaf_dev(handle->dev), port); | ||
481 | } | 470 | } |
482 | 471 | ||
483 | static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle, | 472 | static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle, |
484 | u32 timeout) | 473 | u32 timeout) |
485 | { | 474 | { |
486 | int port; | 475 | struct ring_pair_cb *ring_pair = |
476 | container_of(handle->qs[0], struct ring_pair_cb, q); | ||
487 | 477 | ||
488 | assert(handle); | 478 | return hns_rcb_set_coalesce_usecs( |
489 | 479 | ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout); | |
490 | port = hns_ae_map_eport_to_dport(handle->eport_id); | ||
491 | |||
492 | hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev), | ||
493 | port, timeout); | ||
494 | } | 480 | } |
495 | 481 | ||
496 | static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, | 482 | static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, |
497 | u32 coalesce_frames) | 483 | u32 coalesce_frames) |
498 | { | 484 | { |
499 | int port; | 485 | struct ring_pair_cb *ring_pair = |
500 | int ret; | 486 | container_of(handle->qs[0], struct ring_pair_cb, q); |
501 | 487 | ||
502 | assert(handle); | 488 | return hns_rcb_set_coalesced_frames( |
503 | 489 | ring_pair->rcb_common, | |
504 | port = hns_ae_map_eport_to_dport(handle->eport_id); | 490 | ring_pair->port_id_in_comm, coalesce_frames); |
505 | |||
506 | ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev), | ||
507 | port, coalesce_frames); | ||
508 | return ret; | ||
509 | } | 491 | } |
510 | 492 | ||
511 | void hns_ae_update_stats(struct hnae_handle *handle, | 493 | void hns_ae_update_stats(struct hnae_handle *handle, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 6e2b76ede075..44abb08de155 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -664,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) | |||
664 | return; | 664 | return; |
665 | 665 | ||
666 | for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { | 666 | for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { |
667 | snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc); | 667 | snprintf(buff, ETH_GSTRING_LEN, "%s", |
668 | g_gmac_stats_string[i].desc); | ||
668 | buff = buff + ETH_GSTRING_LEN; | 669 | buff = buff + ETH_GSTRING_LEN; |
669 | } | 670 | } |
670 | } | 671 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 5c1ac9ba1bf2..5978a5c8ef35 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -2219,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) | |||
2219 | /* dsaf onode registers */ | 2219 | /* dsaf onode registers */ |
2220 | for (i = 0; i < DSAF_XOD_NUM; i++) { | 2220 | for (i = 0; i < DSAF_XOD_NUM; i++) { |
2221 | p[311 + i] = dsaf_read_dev(ddev, | 2221 | p[311 + i] = dsaf_read_dev(ddev, |
2222 | DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90); | 2222 | DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90); |
2223 | p[319 + i] = dsaf_read_dev(ddev, | 2223 | p[319 + i] = dsaf_read_dev(ddev, |
2224 | DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90); | 2224 | DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90); |
2225 | p[327 + i] = dsaf_read_dev(ddev, | 2225 | p[327 + i] = dsaf_read_dev(ddev, |
2226 | DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90); | 2226 | DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90); |
2227 | p[335 + i] = dsaf_read_dev(ddev, | 2227 | p[335 + i] = dsaf_read_dev(ddev, |
2228 | DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90); | 2228 | DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90); |
2229 | p[343 + i] = dsaf_read_dev(ddev, | 2229 | p[343 + i] = dsaf_read_dev(ddev, |
2230 | DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90); | 2230 | DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90); |
2231 | p[351 + i] = dsaf_read_dev(ddev, | 2231 | p[351 + i] = dsaf_read_dev(ddev, |
2232 | DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90); | 2232 | DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90); |
2233 | } | 2233 | } |
2234 | 2234 | ||
2235 | p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); | 2235 | p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 607c3be42241..e69b02287c44 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | |||
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) | |||
244 | */ | 244 | */ |
245 | phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) | 245 | phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) |
246 | { | 246 | { |
247 | u32 hilink3_mode; | 247 | u32 mode; |
248 | u32 hilink4_mode; | 248 | u32 reg; |
249 | u32 shift; | ||
250 | bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); | ||
249 | void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; | 251 | void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; |
250 | int dev_id = mac_cb->mac_id; | 252 | int mac_id = mac_cb->mac_id; |
251 | phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; | 253 | phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; |
252 | 254 | ||
253 | hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG); | 255 | if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) { |
254 | hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG); | 256 | phy_if = PHY_INTERFACE_MODE_SGMII; |
255 | if (dev_id >= 0 && dev_id <= 3) { | 257 | } else if (mac_id >= 0 && mac_id <= 3) { |
256 | if (hilink4_mode == 0) | 258 | reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG; |
257 | phy_if = PHY_INTERFACE_MODE_SGMII; | 259 | mode = dsaf_read_reg(sys_ctl_vaddr, reg); |
258 | else | 260 | /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */ |
261 | shift = is_ver1 ? 0 : mac_id; | ||
262 | if (dsaf_get_bit(mode, shift)) | ||
259 | phy_if = PHY_INTERFACE_MODE_XGMII; | 263 | phy_if = PHY_INTERFACE_MODE_XGMII; |
260 | } else if (dev_id >= 4 && dev_id <= 5) { | ||
261 | if (hilink3_mode == 0) | ||
262 | phy_if = PHY_INTERFACE_MODE_SGMII; | ||
263 | else | 264 | else |
265 | phy_if = PHY_INTERFACE_MODE_SGMII; | ||
266 | } else if (mac_id >= 4 && mac_id <= 7) { | ||
267 | reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG; | ||
268 | mode = dsaf_read_reg(sys_ctl_vaddr, reg); | ||
269 | /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */ | ||
270 | shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6; | ||
271 | if (dsaf_get_bit(mode, shift)) | ||
264 | phy_if = PHY_INTERFACE_MODE_XGMII; | 272 | phy_if = PHY_INTERFACE_MODE_XGMII; |
265 | } else { | 273 | else |
266 | phy_if = PHY_INTERFACE_MODE_SGMII; | 274 | phy_if = PHY_INTERFACE_MODE_SGMII; |
267 | } | 275 | } |
268 | |||
269 | dev_dbg(mac_cb->dev, | ||
270 | "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n", | ||
271 | hilink3_mode, hilink4_mode, dev_id, phy_if); | ||
272 | return phy_if; | 276 | return phy_if; |
273 | } | 277 | } |
274 | 278 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 12188807468c..28ee26e5c478 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) | |||
215 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, | 215 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, |
216 | bd_size_type); | 216 | bd_size_type); |
217 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, | 217 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, |
218 | ring_pair->port_id_in_dsa); | 218 | ring_pair->port_id_in_comm); |
219 | dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, | 219 | dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, |
220 | ring_pair->port_id_in_dsa); | 220 | ring_pair->port_id_in_comm); |
221 | } else { | 221 | } else { |
222 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, | 222 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, |
223 | (u32)dma); | 223 | (u32)dma); |
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) | |||
227 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, | 227 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, |
228 | bd_size_type); | 228 | bd_size_type); |
229 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, | 229 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, |
230 | ring_pair->port_id_in_dsa); | 230 | ring_pair->port_id_in_comm); |
231 | dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, | 231 | dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, |
232 | ring_pair->port_id_in_dsa); | 232 | ring_pair->port_id_in_comm); |
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, | |||
256 | desc_cnt); | 256 | desc_cnt); |
257 | } | 257 | } |
258 | 258 | ||
259 | /** | 259 | static void hns_rcb_set_port_timeout( |
260 | *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames | 260 | struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) |
261 | *@rcb_common: rcb_common device | ||
262 | *@port_idx:port index | ||
263 | *@coalesced_frames:BD num for coalesced frames | ||
264 | */ | ||
265 | static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, | ||
266 | u32 port_idx, | ||
267 | u32 coalesced_frames) | ||
268 | { | ||
269 | if (coalesced_frames >= rcb_common->desc_num || | ||
270 | coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) | ||
271 | return -EINVAL; | ||
272 | |||
273 | dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, | ||
274 | coalesced_frames); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames | ||
280 | *@rcb_common: rcb_common device | ||
281 | *@port_idx:port index | ||
282 | * return coaleseced frames value | ||
283 | */ | ||
284 | static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common, | ||
285 | u32 port_idx) | ||
286 | { | 261 | { |
287 | if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) | 262 | if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) |
288 | port_idx = 0; | 263 | dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, |
289 | 264 | timeout * HNS_RCB_CLK_FREQ_MHZ); | |
290 | return dsaf_read_dev(rcb_common, | 265 | else |
291 | RCB_CFG_PKTLINE_REG + port_idx * 4); | 266 | dsaf_write_dev(rcb_common, |
292 | } | 267 | RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, |
293 | 268 | timeout); | |
294 | /** | ||
295 | *hns_rcb_set_timeout - set rcb port coalesced time_out | ||
296 | *@rcb_common: rcb_common device | ||
297 | *@time_out:time for coalesced time_out | ||
298 | */ | ||
299 | static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common, | ||
300 | u32 timeout) | ||
301 | { | ||
302 | dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout); | ||
303 | } | 269 | } |
304 | 270 | ||
305 | static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) | 271 | static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) |
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) | |||
361 | 327 | ||
362 | for (i = 0; i < port_num; i++) { | 328 | for (i = 0; i < port_num; i++) { |
363 | hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); | 329 | hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); |
364 | (void)hns_rcb_set_port_coalesced_frames( | 330 | (void)hns_rcb_set_coalesced_frames( |
365 | rcb_common, i, rcb_common->coalesced_frames); | 331 | rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); |
332 | hns_rcb_set_port_timeout( | ||
333 | rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); | ||
366 | } | 334 | } |
367 | hns_rcb_set_timeout(rcb_common, rcb_common->timeout); | ||
368 | 335 | ||
369 | dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, | 336 | dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, |
370 | HNS_RCB_COMMON_ENDIAN); | 337 | HNS_RCB_COMMON_ENDIAN); |
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) | |||
460 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); | 427 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); |
461 | } | 428 | } |
462 | 429 | ||
463 | static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) | 430 | static int hns_rcb_get_port_in_comm( |
431 | struct rcb_common_cb *rcb_common, int ring_idx) | ||
464 | { | 432 | { |
465 | int comm_index = rcb_common->comm_index; | 433 | int comm_index = rcb_common->comm_index; |
466 | int port; | 434 | int port; |
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) | |||
470 | q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; | 438 | q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; |
471 | port = ring_idx / q_num; | 439 | port = ring_idx / q_num; |
472 | } else { | 440 | } else { |
473 | port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; | 441 | port = 0; /* config debug-ports port_id_in_comm to 0*/ |
474 | } | 442 | } |
475 | 443 | ||
476 | return port; | 444 | return port; |
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) | |||
518 | ring_pair_cb->index = i; | 486 | ring_pair_cb->index = i; |
519 | ring_pair_cb->q.io_base = | 487 | ring_pair_cb->q.io_base = |
520 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); | 488 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); |
521 | ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); | 489 | ring_pair_cb->port_id_in_comm = |
490 | hns_rcb_get_port_in_comm(rcb_common, i); | ||
522 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = | 491 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = |
523 | is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : | 492 | is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : |
524 | platform_get_irq(pdev, base_irq_idx + i * 3 + 1); | 493 | platform_get_irq(pdev, base_irq_idx + i * 3 + 1); |
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) | |||
534 | /** | 503 | /** |
535 | *hns_rcb_get_coalesced_frames - get rcb port coalesced frames | 504 | *hns_rcb_get_coalesced_frames - get rcb port coalesced frames |
536 | *@rcb_common: rcb_common device | 505 | *@rcb_common: rcb_common device |
537 | *@comm_index:port index | 506 | *@port_idx:port id in comm |
538 | *return coalesced_frames | 507 | * |
508 | *Returns: coalesced_frames | ||
539 | */ | 509 | */ |
540 | u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) | 510 | u32 hns_rcb_get_coalesced_frames( |
511 | struct rcb_common_cb *rcb_common, u32 port_idx) | ||
541 | { | 512 | { |
542 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | 513 | return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); |
543 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | ||
544 | |||
545 | return hns_rcb_get_port_coalesced_frames(rcb_comm, port); | ||
546 | } | 514 | } |
547 | 515 | ||
548 | /** | 516 | /** |
549 | *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out | 517 | *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out |
550 | *@rcb_common: rcb_common device | 518 | *@rcb_common: rcb_common device |
551 | *@comm_index:port index | 519 | *@port_idx:port id in comm |
552 | *return time_out | 520 | * |
521 | *Returns: time_out | ||
553 | */ | 522 | */ |
554 | u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) | 523 | u32 hns_rcb_get_coalesce_usecs( |
524 | struct rcb_common_cb *rcb_common, u32 port_idx) | ||
555 | { | 525 | { |
556 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | 526 | if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) |
557 | 527 | return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) / | |
558 | return rcb_comm->timeout; | 528 | HNS_RCB_CLK_FREQ_MHZ; |
529 | else | ||
530 | return dsaf_read_dev(rcb_common, | ||
531 | RCB_PORT_CFG_OVERTIME_REG + port_idx * 4); | ||
559 | } | 532 | } |
560 | 533 | ||
561 | /** | 534 | /** |
562 | *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out | 535 | *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out |
563 | *@rcb_common: rcb_common device | 536 | *@rcb_common: rcb_common device |
564 | *@comm_index: comm :index | 537 | *@port_idx:port id in comm |
565 | *@etx_usecs:tx time for coalesced time_out | 538 | *@timeout:tx/rx time for coalesced time_out |
566 | *@rx_usecs:rx time for coalesced time_out | 539 | * |
540 | * Returns: | ||
541 | * Zero for success, or an error code in case of failure | ||
567 | */ | 542 | */ |
568 | void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, | 543 | int hns_rcb_set_coalesce_usecs( |
569 | int port, u32 timeout) | 544 | struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) |
570 | { | 545 | { |
571 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | 546 | u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx); |
572 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | ||
573 | 547 | ||
574 | if (rcb_comm->timeout == timeout) | 548 | if (timeout == old_timeout) |
575 | return; | 549 | return 0; |
576 | 550 | ||
577 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | 551 | if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { |
578 | dev_err(dsaf_dev->dev, | 552 | if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
579 | "error: not support coalesce_usecs setting!\n"); | 553 | dev_err(rcb_common->dsaf_dev->dev, |
580 | return; | 554 | "error: not support coalesce_usecs setting!\n"); |
555 | return -EINVAL; | ||
556 | } | ||
581 | } | 557 | } |
582 | rcb_comm->timeout = timeout; | 558 | if (timeout > HNS_RCB_MAX_COALESCED_USECS) { |
583 | hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); | 559 | dev_err(rcb_common->dsaf_dev->dev, |
560 | "error: not support coalesce %dus!\n", timeout); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); | ||
564 | return 0; | ||
584 | } | 565 | } |
585 | 566 | ||
586 | /** | 567 | /** |
587 | *hns_rcb_set_coalesced_frames - set rcb coalesced frames | 568 | *hns_rcb_set_coalesced_frames - set rcb coalesced frames |
588 | *@rcb_common: rcb_common device | 569 | *@rcb_common: rcb_common device |
589 | *@tx_frames:tx BD num for coalesced frames | 570 | *@port_idx:port id in comm |
590 | *@rx_frames:rx BD num for coalesced frames | 571 | *@coalesced_frames:tx/rx BD num for coalesced frames |
591 | *Return 0 on success, negative on failure | 572 | * |
573 | * Returns: | ||
574 | * Zero for success, or an error code in case of failure | ||
592 | */ | 575 | */ |
593 | int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, | 576 | int hns_rcb_set_coalesced_frames( |
594 | int port, u32 coalesced_frames) | 577 | struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) |
595 | { | 578 | { |
596 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | 579 | u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); |
597 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | ||
598 | u32 coalesced_reg_val; | ||
599 | int ret; | ||
600 | 580 | ||
601 | coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); | 581 | if (coalesced_frames == old_waterline) |
602 | |||
603 | if (coalesced_reg_val == coalesced_frames) | ||
604 | return 0; | 582 | return 0; |
605 | 583 | ||
606 | if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { | 584 | if (coalesced_frames >= rcb_common->desc_num || |
607 | ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, | 585 | coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES || |
608 | coalesced_frames); | 586 | coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) { |
609 | return ret; | 587 | dev_err(rcb_common->dsaf_dev->dev, |
610 | } else { | 588 | "error: not support coalesce_frames setting!\n"); |
611 | return -EINVAL; | 589 | return -EINVAL; |
612 | } | 590 | } |
591 | |||
592 | dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, | ||
593 | coalesced_frames); | ||
594 | return 0; | ||
613 | } | 595 | } |
614 | 596 | ||
615 | /** | 597 | /** |
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, | |||
749 | rcb_common->dsaf_dev = dsaf_dev; | 731 | rcb_common->dsaf_dev = dsaf_dev; |
750 | 732 | ||
751 | rcb_common->desc_num = dsaf_dev->desc_num; | 733 | rcb_common->desc_num = dsaf_dev->desc_num; |
752 | rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES; | ||
753 | rcb_common->timeout = HNS_RCB_MAX_TIME_OUT; | ||
754 | 734 | ||
755 | hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); | 735 | hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); |
756 | rcb_common->max_vfn = max_vfn; | 736 | rcb_common->max_vfn = max_vfn; |
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index) | |||
951 | void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) | 931 | void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) |
952 | { | 932 | { |
953 | u32 *regs = data; | 933 | u32 *regs = data; |
934 | bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); | ||
935 | bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX); | ||
936 | u32 reg_tmp; | ||
937 | u32 reg_num_tmp; | ||
954 | u32 i = 0; | 938 | u32 i = 0; |
955 | 939 | ||
956 | /*rcb common registers */ | 940 | /*rcb common registers */ |
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) | |||
1004 | = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); | 988 | = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); |
1005 | } | 989 | } |
1006 | 990 | ||
1007 | regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); | 991 | reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; |
1008 | regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); | 992 | reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; |
1009 | regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); | 993 | for (i = 0; i < reg_num_tmp; i++) |
994 | regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); | ||
995 | |||
996 | regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); | ||
997 | regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); | ||
1010 | 998 | ||
1011 | /* mark end of rcb common regs */ | 999 | /* mark end of rcb common regs */ |
1012 | for (i = 73; i < 80; i++) | 1000 | for (i = 78; i < 80; i++) |
1013 | regs[i] = 0xcccccccc; | 1001 | regs[i] = 0xcccccccc; |
1014 | } | 1002 | } |
1015 | 1003 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 81fe9f849973..eb61014ad615 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | |||
@@ -38,7 +38,9 @@ struct rcb_common_cb; | |||
38 | #define HNS_RCB_MAX_COALESCED_FRAMES 1023 | 38 | #define HNS_RCB_MAX_COALESCED_FRAMES 1023 |
39 | #define HNS_RCB_MIN_COALESCED_FRAMES 1 | 39 | #define HNS_RCB_MIN_COALESCED_FRAMES 1 |
40 | #define HNS_RCB_DEF_COALESCED_FRAMES 50 | 40 | #define HNS_RCB_DEF_COALESCED_FRAMES 50 |
41 | #define HNS_RCB_MAX_TIME_OUT 0x500 | 41 | #define HNS_RCB_CLK_FREQ_MHZ 350 |
42 | #define HNS_RCB_MAX_COALESCED_USECS 0x3ff | ||
43 | #define HNS_RCB_DEF_COALESCED_USECS 3 | ||
42 | 44 | ||
43 | #define HNS_RCB_COMMON_ENDIAN 1 | 45 | #define HNS_RCB_COMMON_ENDIAN 1 |
44 | 46 | ||
@@ -82,7 +84,7 @@ struct ring_pair_cb { | |||
82 | 84 | ||
83 | int virq[HNS_RCB_IRQ_NUM_PER_QUEUE]; | 85 | int virq[HNS_RCB_IRQ_NUM_PER_QUEUE]; |
84 | 86 | ||
85 | u8 port_id_in_dsa; | 87 | u8 port_id_in_comm; |
86 | u8 used_by_vf; | 88 | u8 used_by_vf; |
87 | 89 | ||
88 | struct hns_ring_hw_stats hw_stats; | 90 | struct hns_ring_hw_stats hw_stats; |
@@ -97,8 +99,6 @@ struct rcb_common_cb { | |||
97 | 99 | ||
98 | u8 comm_index; | 100 | u8 comm_index; |
99 | u32 ring_num; | 101 | u32 ring_num; |
100 | u32 coalesced_frames; /* frames threshold of rx interrupt */ | ||
101 | u32 timeout; /* time threshold of rx interrupt */ | ||
102 | u32 desc_num; /* desc num per queue*/ | 102 | u32 desc_num; /* desc num per queue*/ |
103 | 103 | ||
104 | struct ring_pair_cb ring_pair_cb[0]; | 104 | struct ring_pair_cb ring_pair_cb[0]; |
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); | |||
125 | void hns_rcb_init_hw(struct ring_pair_cb *ring); | 125 | void hns_rcb_init_hw(struct ring_pair_cb *ring); |
126 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); | 126 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); |
127 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); | 127 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); |
128 | 128 | u32 hns_rcb_get_coalesced_frames( | |
129 | u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index); | 129 | struct rcb_common_cb *rcb_common, u32 port_idx); |
130 | u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index); | 130 | u32 hns_rcb_get_coalesce_usecs( |
131 | void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, | 131 | struct rcb_common_cb *rcb_common, u32 port_idx); |
132 | int comm_index, u32 timeout); | 132 | int hns_rcb_set_coalesce_usecs( |
133 | int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, | 133 | struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout); |
134 | int comm_index, u32 coalesce_frames); | 134 | int hns_rcb_set_coalesced_frames( |
135 | struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames); | ||
135 | void hns_rcb_update_stats(struct hnae_queue *queue); | 136 | void hns_rcb_update_stats(struct hnae_queue *queue); |
136 | 137 | ||
137 | void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data); | 138 | void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index bf62687e5ea7..7d7204f45e78 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -103,6 +103,8 @@ | |||
103 | /*serdes offset**/ | 103 | /*serdes offset**/ |
104 | #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG | 104 | #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG |
105 | #define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG | 105 | #define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG |
106 | #define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG | ||
107 | #define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG | ||
106 | #define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL | 108 | #define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL |
107 | #define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL | 109 | #define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL |
108 | #define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL | 110 | #define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL |
@@ -404,6 +406,7 @@ | |||
404 | #define RCB_CFG_OVERTIME_REG 0x9300 | 406 | #define RCB_CFG_OVERTIME_REG 0x9300 |
405 | #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 | 407 | #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 |
406 | #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 | 408 | #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 |
409 | #define RCB_PORT_CFG_OVERTIME_REG 0x9430 | ||
407 | 410 | ||
408 | #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 | 411 | #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 |
409 | #define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004 | 412 | #define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004 |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 71aa37b4b338..687204b780b0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -913,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, | |||
913 | static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) | 913 | static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) |
914 | { | 914 | { |
915 | struct hnae_ring *ring = ring_data->ring; | 915 | struct hnae_ring *ring = ring_data->ring; |
916 | int head = ring->next_to_clean; | 916 | int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); |
917 | |||
918 | /* for hardware bug fixed */ | ||
919 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); | ||
920 | 917 | ||
921 | if (head != ring->next_to_clean) { | 918 | if (head != ring->next_to_clean) { |
922 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | 919 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( |
@@ -959,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) | |||
959 | napi_complete(napi); | 956 | napi_complete(napi); |
960 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | 957 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( |
961 | ring_data->ring, 0); | 958 | ring_data->ring, 0); |
962 | 959 | if (ring_data->fini_process) | |
963 | ring_data->fini_process(ring_data); | 960 | ring_data->fini_process(ring_data); |
964 | return 0; | 961 | return 0; |
965 | } | 962 | } |
966 | 963 | ||
@@ -1723,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
1723 | { | 1720 | { |
1724 | struct hnae_handle *h = priv->ae_handle; | 1721 | struct hnae_handle *h = priv->ae_handle; |
1725 | struct hns_nic_ring_data *rd; | 1722 | struct hns_nic_ring_data *rd; |
1723 | bool is_ver1 = AE_IS_VER1(priv->enet_ver); | ||
1726 | int i; | 1724 | int i; |
1727 | 1725 | ||
1728 | if (h->q_num > NIC_MAX_Q_PER_VF) { | 1726 | if (h->q_num > NIC_MAX_Q_PER_VF) { |
@@ -1740,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
1740 | rd->queue_index = i; | 1738 | rd->queue_index = i; |
1741 | rd->ring = &h->qs[i]->tx_ring; | 1739 | rd->ring = &h->qs[i]->tx_ring; |
1742 | rd->poll_one = hns_nic_tx_poll_one; | 1740 | rd->poll_one = hns_nic_tx_poll_one; |
1743 | rd->fini_process = hns_nic_tx_fini_pro; | 1741 | rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; |
1744 | 1742 | ||
1745 | netif_napi_add(priv->netdev, &rd->napi, | 1743 | netif_napi_add(priv->netdev, &rd->napi, |
1746 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); | 1744 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); |
@@ -1752,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
1752 | rd->ring = &h->qs[i - h->q_num]->rx_ring; | 1750 | rd->ring = &h->qs[i - h->q_num]->rx_ring; |
1753 | rd->poll_one = hns_nic_rx_poll_one; | 1751 | rd->poll_one = hns_nic_rx_poll_one; |
1754 | rd->ex_process = hns_nic_rx_up_pro; | 1752 | rd->ex_process = hns_nic_rx_up_pro; |
1755 | rd->fini_process = hns_nic_rx_fini_pro; | 1753 | rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; |
1756 | 1754 | ||
1757 | netif_napi_add(priv->netdev, &rd->napi, | 1755 | netif_napi_add(priv->netdev, &rd->napi, |
1758 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); | 1756 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); |
@@ -1816,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) | |||
1816 | h = hnae_get_handle(&priv->netdev->dev, | 1814 | h = hnae_get_handle(&priv->netdev->dev, |
1817 | priv->ae_node, priv->port_id, NULL); | 1815 | priv->ae_node, priv->port_id, NULL); |
1818 | if (IS_ERR_OR_NULL(h)) { | 1816 | if (IS_ERR_OR_NULL(h)) { |
1819 | ret = PTR_ERR(h); | 1817 | ret = -ENODEV; |
1820 | dev_dbg(priv->dev, "has not handle, register notifier!\n"); | 1818 | dev_dbg(priv->dev, "has not handle, register notifier!\n"); |
1821 | goto out; | 1819 | goto out; |
1822 | } | 1820 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 9c3ba65988e1..3d746c887873 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev, | |||
794 | (!ops->set_coalesce_frames)) | 794 | (!ops->set_coalesce_frames)) |
795 | return -ESRCH; | 795 | return -ESRCH; |
796 | 796 | ||
797 | ops->set_coalesce_usecs(priv->ae_handle, | 797 | ret = ops->set_coalesce_usecs(priv->ae_handle, |
798 | ec->rx_coalesce_usecs); | 798 | ec->rx_coalesce_usecs); |
799 | if (ret) | ||
800 | return ret; | ||
799 | 801 | ||
800 | ret = ops->set_coalesce_frames( | 802 | ret = ops->set_coalesce_frames( |
801 | priv->ae_handle, | 803 | priv->ae_handle, |
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value) | |||
1013 | struct phy_device *phy_dev = priv->phy; | 1015 | struct phy_device *phy_dev = priv->phy; |
1014 | 1016 | ||
1015 | retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); | 1017 | retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); |
1016 | retval = phy_write(phy_dev, HNS_LED_FC_REG, value); | 1018 | retval |= phy_write(phy_dev, HNS_LED_FC_REG, value); |
1017 | retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); | 1019 | retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); |
1018 | if (retval) { | 1020 | if (retval) { |
1019 | netdev_err(netdev, "mdiobus_write fail !\n"); | 1021 | netdev_err(netdev, "mdiobus_write fail !\n"); |
1020 | return retval; | 1022 | return retval; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 84fa28ceb200..e4949af7dd6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -661,9 +661,7 @@ struct ixgbe_adapter { | |||
661 | #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) | 661 | #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) |
662 | #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) | 662 | #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) |
663 | #define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) | 663 | #define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) |
664 | #ifdef CONFIG_IXGBE_VXLAN | ||
665 | #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) | 664 | #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) |
666 | #endif | ||
667 | #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) | 665 | #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) |
668 | 666 | ||
669 | /* Tx fast path data */ | 667 | /* Tx fast path data */ |
@@ -675,6 +673,9 @@ struct ixgbe_adapter { | |||
675 | int num_rx_queues; | 673 | int num_rx_queues; |
676 | u16 rx_itr_setting; | 674 | u16 rx_itr_setting; |
677 | 675 | ||
676 | /* Port number used to identify VXLAN traffic */ | ||
677 | __be16 vxlan_port; | ||
678 | |||
678 | /* TX */ | 679 | /* TX */ |
679 | struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; | 680 | struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; |
680 | 681 | ||
@@ -782,9 +783,6 @@ struct ixgbe_adapter { | |||
782 | u32 timer_event_accumulator; | 783 | u32 timer_event_accumulator; |
783 | u32 vferr_refcount; | 784 | u32 vferr_refcount; |
784 | struct ixgbe_mac_addr *mac_table; | 785 | struct ixgbe_mac_addr *mac_table; |
785 | #ifdef CONFIG_IXGBE_VXLAN | ||
786 | u16 vxlan_port; | ||
787 | #endif | ||
788 | struct kobject *info_kobj; | 786 | struct kobject *info_kobj; |
789 | #ifdef CONFIG_IXGBE_HWMON | 787 | #ifdef CONFIG_IXGBE_HWMON |
790 | struct hwmon_buff *ixgbe_hwmon_buff; | 788 | struct hwmon_buff *ixgbe_hwmon_buff; |
@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[]; | |||
879 | extern char ixgbe_default_device_descr[]; | 877 | extern char ixgbe_default_device_descr[]; |
880 | #endif /* IXGBE_FCOE */ | 878 | #endif /* IXGBE_FCOE */ |
881 | 879 | ||
880 | int ixgbe_open(struct net_device *netdev); | ||
881 | int ixgbe_close(struct net_device *netdev); | ||
882 | void ixgbe_up(struct ixgbe_adapter *adapter); | 882 | void ixgbe_up(struct ixgbe_adapter *adapter); |
883 | void ixgbe_down(struct ixgbe_adapter *adapter); | 883 | void ixgbe_down(struct ixgbe_adapter *adapter); |
884 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); | 884 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 726e0eeee63b..b3530e1e3ce1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
2053 | 2053 | ||
2054 | if (if_running) | 2054 | if (if_running) |
2055 | /* indicate we're in test mode */ | 2055 | /* indicate we're in test mode */ |
2056 | dev_close(netdev); | 2056 | ixgbe_close(netdev); |
2057 | else | 2057 | else |
2058 | ixgbe_reset(adapter); | 2058 | ixgbe_reset(adapter); |
2059 | 2059 | ||
@@ -2091,7 +2091,7 @@ skip_loopback: | |||
2091 | /* clear testing bit and return adapter to previous state */ | 2091 | /* clear testing bit and return adapter to previous state */ |
2092 | clear_bit(__IXGBE_TESTING, &adapter->state); | 2092 | clear_bit(__IXGBE_TESTING, &adapter->state); |
2093 | if (if_running) | 2093 | if (if_running) |
2094 | dev_open(netdev); | 2094 | ixgbe_open(netdev); |
2095 | else if (hw->mac.ops.disable_tx_laser) | 2095 | else if (hw->mac.ops.disable_tx_laser) |
2096 | hw->mac.ops.disable_tx_laser(hw); | 2096 | hw->mac.ops.disable_tx_laser(hw); |
2097 | } else { | 2097 | } else { |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 569cb0757c93..7df3fe29b210 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) | |||
4531 | case ixgbe_mac_X550: | 4531 | case ixgbe_mac_X550: |
4532 | case ixgbe_mac_X550EM_x: | 4532 | case ixgbe_mac_X550EM_x: |
4533 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); | 4533 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); |
4534 | #ifdef CONFIG_IXGBE_VXLAN | ||
4535 | adapter->vxlan_port = 0; | 4534 | adapter->vxlan_port = 0; |
4536 | #endif | ||
4537 | break; | 4535 | break; |
4538 | default: | 4536 | default: |
4539 | break; | 4537 | break; |
@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
5994 | * handler is registered with the OS, the watchdog timer is started, | 5992 | * handler is registered with the OS, the watchdog timer is started, |
5995 | * and the stack is notified that the interface is ready. | 5993 | * and the stack is notified that the interface is ready. |
5996 | **/ | 5994 | **/ |
5997 | static int ixgbe_open(struct net_device *netdev) | 5995 | int ixgbe_open(struct net_device *netdev) |
5998 | { | 5996 | { |
5999 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5997 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6000 | struct ixgbe_hw *hw = &adapter->hw; | 5998 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) | |||
6096 | * needs to be disabled. A global MAC reset is issued to stop the | 6094 | * needs to be disabled. A global MAC reset is issued to stop the |
6097 | * hardware, and all transmit and receive resources are freed. | 6095 | * hardware, and all transmit and receive resources are freed. |
6098 | **/ | 6096 | **/ |
6099 | static int ixgbe_close(struct net_device *netdev) | 6097 | int ixgbe_close(struct net_device *netdev) |
6100 | { | 6098 | { |
6101 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6099 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6102 | 6100 | ||
@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
7560 | struct ipv6hdr *ipv6; | 7558 | struct ipv6hdr *ipv6; |
7561 | } hdr; | 7559 | } hdr; |
7562 | struct tcphdr *th; | 7560 | struct tcphdr *th; |
7561 | unsigned int hlen; | ||
7563 | struct sk_buff *skb; | 7562 | struct sk_buff *skb; |
7564 | #ifdef CONFIG_IXGBE_VXLAN | ||
7565 | u8 encap = false; | ||
7566 | #endif /* CONFIG_IXGBE_VXLAN */ | ||
7567 | __be16 vlan_id; | 7563 | __be16 vlan_id; |
7564 | int l4_proto; | ||
7568 | 7565 | ||
7569 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ | 7566 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ |
7570 | if (!q_vector) | 7567 | if (!q_vector) |
@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
7576 | 7573 | ||
7577 | ring->atr_count++; | 7574 | ring->atr_count++; |
7578 | 7575 | ||
7576 | /* currently only IPv4/IPv6 with TCP is supported */ | ||
7577 | if ((first->protocol != htons(ETH_P_IP)) && | ||
7578 | (first->protocol != htons(ETH_P_IPV6))) | ||
7579 | return; | ||
7580 | |||
7579 | /* snag network header to get L4 type and address */ | 7581 | /* snag network header to get L4 type and address */ |
7580 | skb = first->skb; | 7582 | skb = first->skb; |
7581 | hdr.network = skb_network_header(skb); | 7583 | hdr.network = skb_network_header(skb); |
7582 | if (!skb->encapsulation) { | ||
7583 | th = tcp_hdr(skb); | ||
7584 | } else { | ||
7585 | #ifdef CONFIG_IXGBE_VXLAN | 7584 | #ifdef CONFIG_IXGBE_VXLAN |
7585 | if (skb->encapsulation && | ||
7586 | first->protocol == htons(ETH_P_IP) && | ||
7587 | hdr.ipv4->protocol != IPPROTO_UDP) { | ||
7586 | struct ixgbe_adapter *adapter = q_vector->adapter; | 7588 | struct ixgbe_adapter *adapter = q_vector->adapter; |
7587 | 7589 | ||
7588 | if (!adapter->vxlan_port) | 7590 | /* verify the port is recognized as VXLAN */ |
7589 | return; | 7591 | if (adapter->vxlan_port && |
7590 | if (first->protocol != htons(ETH_P_IP) || | 7592 | udp_hdr(skb)->dest == adapter->vxlan_port) |
7591 | hdr.ipv4->version != IPVERSION || | 7593 | hdr.network = skb_inner_network_header(skb); |
7592 | hdr.ipv4->protocol != IPPROTO_UDP) { | ||
7593 | return; | ||
7594 | } | ||
7595 | if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port) | ||
7596 | return; | ||
7597 | encap = true; | ||
7598 | hdr.network = skb_inner_network_header(skb); | ||
7599 | th = inner_tcp_hdr(skb); | ||
7600 | #else | ||
7601 | return; | ||
7602 | #endif /* CONFIG_IXGBE_VXLAN */ | ||
7603 | } | 7594 | } |
7595 | #endif /* CONFIG_IXGBE_VXLAN */ | ||
7604 | 7596 | ||
7605 | /* Currently only IPv4/IPv6 with TCP is supported */ | 7597 | /* Currently only IPv4/IPv6 with TCP is supported */ |
7606 | switch (hdr.ipv4->version) { | 7598 | switch (hdr.ipv4->version) { |
7607 | case IPVERSION: | 7599 | case IPVERSION: |
7608 | if (hdr.ipv4->protocol != IPPROTO_TCP) | 7600 | /* access ihl as u8 to avoid unaligned access on ia64 */ |
7609 | return; | 7601 | hlen = (hdr.network[0] & 0x0F) << 2; |
7602 | l4_proto = hdr.ipv4->protocol; | ||
7610 | break; | 7603 | break; |
7611 | case 6: | 7604 | case 6: |
7612 | if (likely((unsigned char *)th - hdr.network == | 7605 | hlen = hdr.network - skb->data; |
7613 | sizeof(struct ipv6hdr))) { | 7606 | l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); |
7614 | if (hdr.ipv6->nexthdr != IPPROTO_TCP) | 7607 | hlen -= hdr.network - skb->data; |
7615 | return; | ||
7616 | } else { | ||
7617 | __be16 frag_off; | ||
7618 | u8 l4_hdr; | ||
7619 | |||
7620 | ipv6_skip_exthdr(skb, hdr.network - skb->data + | ||
7621 | sizeof(struct ipv6hdr), | ||
7622 | &l4_hdr, &frag_off); | ||
7623 | if (unlikely(frag_off)) | ||
7624 | return; | ||
7625 | if (l4_hdr != IPPROTO_TCP) | ||
7626 | return; | ||
7627 | } | ||
7628 | break; | 7608 | break; |
7629 | default: | 7609 | default: |
7630 | return; | 7610 | return; |
7631 | } | 7611 | } |
7632 | 7612 | ||
7633 | /* skip this packet since it is invalid or the socket is closing */ | 7613 | if (l4_proto != IPPROTO_TCP) |
7634 | if (!th || th->fin) | 7614 | return; |
7615 | |||
7616 | th = (struct tcphdr *)(hdr.network + hlen); | ||
7617 | |||
7618 | /* skip this packet since the socket is closing */ | ||
7619 | if (th->fin) | ||
7635 | return; | 7620 | return; |
7636 | 7621 | ||
7637 | /* sample on all syn packets or once every atr sample count */ | 7622 | /* sample on all syn packets or once every atr sample count */ |
@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
7682 | break; | 7667 | break; |
7683 | } | 7668 | } |
7684 | 7669 | ||
7685 | #ifdef CONFIG_IXGBE_VXLAN | 7670 | if (hdr.network != skb_network_header(skb)) |
7686 | if (encap) | ||
7687 | input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; | 7671 | input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; |
7688 | #endif /* CONFIG_IXGBE_VXLAN */ | ||
7689 | 7672 | ||
7690 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | 7673 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
7691 | ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, | 7674 | ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, |
@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
8209 | static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, | 8192 | static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, |
8210 | struct tc_cls_u32_offload *cls) | 8193 | struct tc_cls_u32_offload *cls) |
8211 | { | 8194 | { |
8195 | u32 uhtid = TC_U32_USERHTID(cls->knode.handle); | ||
8196 | u32 loc; | ||
8212 | int err; | 8197 | int err; |
8213 | 8198 | ||
8199 | if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) | ||
8200 | return -EINVAL; | ||
8201 | |||
8202 | loc = cls->knode.handle & 0xfffff; | ||
8203 | |||
8214 | spin_lock(&adapter->fdir_perfect_lock); | 8204 | spin_lock(&adapter->fdir_perfect_lock); |
8215 | err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); | 8205 | err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); |
8216 | spin_unlock(&adapter->fdir_perfect_lock); | 8206 | spin_unlock(&adapter->fdir_perfect_lock); |
8217 | return err; | 8207 | return err; |
8218 | } | 8208 | } |
@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, | |||
8221 | __be16 protocol, | 8211 | __be16 protocol, |
8222 | struct tc_cls_u32_offload *cls) | 8212 | struct tc_cls_u32_offload *cls) |
8223 | { | 8213 | { |
8214 | u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); | ||
8215 | |||
8216 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) | ||
8217 | return -EINVAL; | ||
8218 | |||
8224 | /* This ixgbe devices do not support hash tables at the moment | 8219 | /* This ixgbe devices do not support hash tables at the moment |
8225 | * so abort when given hash tables. | 8220 | * so abort when given hash tables. |
8226 | */ | 8221 | */ |
8227 | if (cls->hnode.divisor > 0) | 8222 | if (cls->hnode.divisor > 0) |
8228 | return -EINVAL; | 8223 | return -EINVAL; |
8229 | 8224 | ||
8230 | set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); | 8225 | set_bit(uhtid - 1, &adapter->tables); |
8231 | return 0; | 8226 | return 0; |
8232 | } | 8227 | } |
8233 | 8228 | ||
8234 | static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, | 8229 | static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, |
8235 | struct tc_cls_u32_offload *cls) | 8230 | struct tc_cls_u32_offload *cls) |
8236 | { | 8231 | { |
8237 | clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); | 8232 | u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); |
8233 | |||
8234 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) | ||
8235 | return -EINVAL; | ||
8236 | |||
8237 | clear_bit(uhtid - 1, &adapter->tables); | ||
8238 | return 0; | 8238 | return 0; |
8239 | } | 8239 | } |
8240 | 8240 | ||
@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, | |||
8252 | #endif | 8252 | #endif |
8253 | int i, err = 0; | 8253 | int i, err = 0; |
8254 | u8 queue; | 8254 | u8 queue; |
8255 | u32 handle; | 8255 | u32 uhtid, link_uhtid; |
8256 | 8256 | ||
8257 | memset(&mask, 0, sizeof(union ixgbe_atr_input)); | 8257 | memset(&mask, 0, sizeof(union ixgbe_atr_input)); |
8258 | handle = cls->knode.handle; | 8258 | uhtid = TC_U32_USERHTID(cls->knode.handle); |
8259 | link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); | ||
8259 | 8260 | ||
8260 | /* At the moment cls_u32 jumps to transport layer and skips past | 8261 | /* At the moment cls_u32 jumps to network layer and skips past |
8261 | * L2 headers. The canonical method to match L2 frames is to use | 8262 | * L2 headers. The canonical method to match L2 frames is to use |
8262 | * negative values. However this is error prone at best but really | 8263 | * negative values. However this is error prone at best but really |
8263 | * just broken because there is no way to "know" what sort of hdr | 8264 | * just broken because there is no way to "know" what sort of hdr |
8264 | * is in front of the transport layer. Fix cls_u32 to support L2 | 8265 | * is in front of the network layer. Fix cls_u32 to support L2 |
8265 | * headers when needed. | 8266 | * headers when needed. |
8266 | */ | 8267 | */ |
8267 | if (protocol != htons(ETH_P_IP)) | 8268 | if (protocol != htons(ETH_P_IP)) |
8268 | return -EINVAL; | 8269 | return -EINVAL; |
8269 | 8270 | ||
8270 | if (cls->knode.link_handle || | 8271 | if (link_uhtid) { |
8271 | cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) { | ||
8272 | struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; | 8272 | struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; |
8273 | u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle); | ||
8274 | 8273 | ||
8275 | if (!test_bit(uhtid, &adapter->tables)) | 8274 | if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) |
8275 | return -EINVAL; | ||
8276 | |||
8277 | if (!test_bit(link_uhtid - 1, &adapter->tables)) | ||
8276 | return -EINVAL; | 8278 | return -EINVAL; |
8277 | 8279 | ||
8278 | for (i = 0; nexthdr[i].jump; i++) { | 8280 | for (i = 0; nexthdr[i].jump; i++) { |
@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, | |||
8288 | nexthdr->mask != cls->knode.sel->keys[0].mask) | 8290 | nexthdr->mask != cls->knode.sel->keys[0].mask) |
8289 | return -EINVAL; | 8291 | return -EINVAL; |
8290 | 8292 | ||
8291 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) | 8293 | adapter->jump_tables[link_uhtid] = nexthdr->jump; |
8292 | return -EINVAL; | ||
8293 | |||
8294 | adapter->jump_tables[uhtid] = nexthdr->jump; | ||
8295 | } | 8294 | } |
8296 | return 0; | 8295 | return 0; |
8297 | } | 8296 | } |
@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, | |||
8308 | * To add support for new nodes update ixgbe_model.h parse structures | 8307 | * To add support for new nodes update ixgbe_model.h parse structures |
8309 | * this function _should_ be generic try not to hardcode values here. | 8308 | * this function _should_ be generic try not to hardcode values here. |
8310 | */ | 8309 | */ |
8311 | if (TC_U32_USERHTID(handle) == 0x800) { | 8310 | if (uhtid == 0x800) { |
8312 | field_ptr = adapter->jump_tables[0]; | 8311 | field_ptr = adapter->jump_tables[0]; |
8313 | } else { | 8312 | } else { |
8314 | if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) | 8313 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) |
8315 | return -EINVAL; | 8314 | return -EINVAL; |
8316 | 8315 | ||
8317 | field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; | 8316 | field_ptr = adapter->jump_tables[uhtid]; |
8318 | } | 8317 | } |
8319 | 8318 | ||
8320 | if (!field_ptr) | 8319 | if (!field_ptr) |
@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, | |||
8332 | int j; | 8331 | int j; |
8333 | 8332 | ||
8334 | for (j = 0; field_ptr[j].val; j++) { | 8333 | for (j = 0; field_ptr[j].val; j++) { |
8335 | if (field_ptr[j].off == off && | 8334 | if (field_ptr[j].off == off) { |
8336 | field_ptr[j].mask == m) { | ||
8337 | field_ptr[j].val(input, &mask, val, m); | 8335 | field_ptr[j].val(input, &mask, val, m); |
8338 | input->filter.formatted.flow_type |= | 8336 | input->filter.formatted.flow_type |= |
8339 | field_ptr[j].type; | 8337 | field_ptr[j].type; |
@@ -8393,8 +8391,8 @@ err_out: | |||
8393 | return -EINVAL; | 8391 | return -EINVAL; |
8394 | } | 8392 | } |
8395 | 8393 | ||
8396 | int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | 8394 | static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, |
8397 | struct tc_to_netdev *tc) | 8395 | struct tc_to_netdev *tc) |
8398 | { | 8396 | { |
8399 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 8397 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
8400 | 8398 | ||
@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, | |||
8554 | { | 8552 | { |
8555 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 8553 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
8556 | struct ixgbe_hw *hw = &adapter->hw; | 8554 | struct ixgbe_hw *hw = &adapter->hw; |
8557 | u16 new_port = ntohs(port); | ||
8558 | 8555 | ||
8559 | if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) | 8556 | if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) |
8560 | return; | 8557 | return; |
@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, | |||
8562 | if (sa_family == AF_INET6) | 8559 | if (sa_family == AF_INET6) |
8563 | return; | 8560 | return; |
8564 | 8561 | ||
8565 | if (adapter->vxlan_port == new_port) | 8562 | if (adapter->vxlan_port == port) |
8566 | return; | 8563 | return; |
8567 | 8564 | ||
8568 | if (adapter->vxlan_port) { | 8565 | if (adapter->vxlan_port) { |
8569 | netdev_info(dev, | 8566 | netdev_info(dev, |
8570 | "Hit Max num of VXLAN ports, not adding port %d\n", | 8567 | "Hit Max num of VXLAN ports, not adding port %d\n", |
8571 | new_port); | 8568 | ntohs(port)); |
8572 | return; | 8569 | return; |
8573 | } | 8570 | } |
8574 | 8571 | ||
8575 | adapter->vxlan_port = new_port; | 8572 | adapter->vxlan_port = port; |
8576 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); | 8573 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); |
8577 | } | 8574 | } |
8578 | 8575 | ||
8579 | /** | 8576 | /** |
@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, | |||
8586 | __be16 port) | 8583 | __be16 port) |
8587 | { | 8584 | { |
8588 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 8585 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
8589 | u16 new_port = ntohs(port); | ||
8590 | 8586 | ||
8591 | if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) | 8587 | if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) |
8592 | return; | 8588 | return; |
@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, | |||
8594 | if (sa_family == AF_INET6) | 8590 | if (sa_family == AF_INET6) |
8595 | return; | 8591 | return; |
8596 | 8592 | ||
8597 | if (adapter->vxlan_port != new_port) { | 8593 | if (adapter->vxlan_port != port) { |
8598 | netdev_info(dev, "Port %d was not found, not deleting\n", | 8594 | netdev_info(dev, "Port %d was not found, not deleting\n", |
8599 | new_port); | 8595 | ntohs(port)); |
8600 | return; | 8596 | return; |
8601 | } | 8597 | } |
8602 | 8598 | ||
@@ -9265,17 +9261,6 @@ skip_sriov: | |||
9265 | netdev->priv_flags |= IFF_UNICAST_FLT; | 9261 | netdev->priv_flags |= IFF_UNICAST_FLT; |
9266 | netdev->priv_flags |= IFF_SUPP_NOFCS; | 9262 | netdev->priv_flags |= IFF_SUPP_NOFCS; |
9267 | 9263 | ||
9268 | #ifdef CONFIG_IXGBE_VXLAN | ||
9269 | switch (adapter->hw.mac.type) { | ||
9270 | case ixgbe_mac_X550: | ||
9271 | case ixgbe_mac_X550EM_x: | ||
9272 | netdev->hw_enc_features |= NETIF_F_RXCSUM; | ||
9273 | break; | ||
9274 | default: | ||
9275 | break; | ||
9276 | } | ||
9277 | #endif /* CONFIG_IXGBE_VXLAN */ | ||
9278 | |||
9279 | #ifdef CONFIG_IXGBE_DCB | 9264 | #ifdef CONFIG_IXGBE_DCB |
9280 | netdev->dcbnl_ops = &dcbnl_ops; | 9265 | netdev->dcbnl_ops = &dcbnl_ops; |
9281 | #endif | 9266 | #endif |
@@ -9329,6 +9314,8 @@ skip_sriov: | |||
9329 | goto err_sw_init; | 9314 | goto err_sw_init; |
9330 | } | 9315 | } |
9331 | 9316 | ||
9317 | /* Set hw->mac.addr to permanent MAC address */ | ||
9318 | ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); | ||
9332 | ixgbe_mac_set_default_filter(adapter); | 9319 | ixgbe_mac_set_default_filter(adapter); |
9333 | 9320 | ||
9334 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, | 9321 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index ce48872d4782..74c53ad9d268 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | struct ixgbe_mat_field { | 33 | struct ixgbe_mat_field { |
34 | unsigned int off; | 34 | unsigned int off; |
35 | unsigned int mask; | ||
36 | int (*val)(struct ixgbe_fdir_filter *input, | 35 | int (*val)(struct ixgbe_fdir_filter *input, |
37 | union ixgbe_atr_input *mask, | 36 | union ixgbe_atr_input *mask, |
38 | u32 val, u32 m); | 37 | u32 val, u32 m); |
@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, | |||
58 | } | 57 | } |
59 | 58 | ||
60 | static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { | 59 | static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { |
61 | { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, | 60 | { .off = 12, .val = ixgbe_mat_prgm_sip, |
62 | .type = IXGBE_ATR_FLOW_TYPE_IPV4}, | 61 | .type = IXGBE_ATR_FLOW_TYPE_IPV4}, |
63 | { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, | 62 | { .off = 16, .val = ixgbe_mat_prgm_dip, |
64 | .type = IXGBE_ATR_FLOW_TYPE_IPV4}, | 63 | .type = IXGBE_ATR_FLOW_TYPE_IPV4}, |
65 | { .val = NULL } /* terminal node */ | 64 | { .val = NULL } /* terminal node */ |
66 | }; | 65 | }; |
67 | 66 | ||
68 | static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, | 67 | static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, |
69 | union ixgbe_atr_input *mask, | 68 | union ixgbe_atr_input *mask, |
70 | u32 val, u32 m) | 69 | u32 val, u32 m) |
71 | { | 70 | { |
72 | input->filter.formatted.src_port = val & 0xffff; | 71 | input->filter.formatted.src_port = val & 0xffff; |
73 | mask->formatted.src_port = m & 0xffff; | 72 | mask->formatted.src_port = m & 0xffff; |
74 | return 0; | 73 | input->filter.formatted.dst_port = val >> 16; |
75 | }; | 74 | mask->formatted.dst_port = m >> 16; |
76 | 75 | ||
77 | static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input, | ||
78 | union ixgbe_atr_input *mask, | ||
79 | u32 val, u32 m) | ||
80 | { | ||
81 | input->filter.formatted.dst_port = val & 0xffff; | ||
82 | mask->formatted.dst_port = m & 0xffff; | ||
83 | return 0; | 76 | return 0; |
84 | }; | 77 | }; |
85 | 78 | ||
86 | static struct ixgbe_mat_field ixgbe_tcp_fields[] = { | 79 | static struct ixgbe_mat_field ixgbe_tcp_fields[] = { |
87 | {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, | 80 | {.off = 0, .val = ixgbe_mat_prgm_ports, |
88 | .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, | ||
89 | {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport, | ||
90 | .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, | 81 | .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, |
91 | { .val = NULL } /* terminal node */ | 82 | { .val = NULL } /* terminal node */ |
92 | }; | 83 | }; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 87aca3f7c3de..68a9c646498e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | |||
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) | |||
355 | command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); | 355 | command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); |
356 | if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) | 356 | if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) |
357 | break; | 357 | break; |
358 | usleep_range(10, 20); | 358 | udelay(10); |
359 | } | 359 | } |
360 | if (ctrl) | 360 | if (ctrl) |
361 | *ctrl = command; | 361 | *ctrl = command; |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index c48aef613b0a..d7aa4b203f40 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c | |||
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev, | |||
680 | 680 | ||
681 | if (if_running) | 681 | if (if_running) |
682 | /* indicate we're in test mode */ | 682 | /* indicate we're in test mode */ |
683 | dev_close(netdev); | 683 | ixgbevf_close(netdev); |
684 | else | 684 | else |
685 | ixgbevf_reset(adapter); | 685 | ixgbevf_reset(adapter); |
686 | 686 | ||
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev, | |||
692 | 692 | ||
693 | clear_bit(__IXGBEVF_TESTING, &adapter->state); | 693 | clear_bit(__IXGBEVF_TESTING, &adapter->state); |
694 | if (if_running) | 694 | if (if_running) |
695 | dev_open(netdev); | 695 | ixgbevf_open(netdev); |
696 | } else { | 696 | } else { |
697 | hw_dbg(&adapter->hw, "online testing starting\n"); | 697 | hw_dbg(&adapter->hw, "online testing starting\n"); |
698 | /* Online tests */ | 698 | /* Online tests */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 68ec7daa04fd..991eeae81473 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; | |||
486 | extern const char ixgbevf_driver_name[]; | 486 | extern const char ixgbevf_driver_name[]; |
487 | extern const char ixgbevf_driver_version[]; | 487 | extern const char ixgbevf_driver_version[]; |
488 | 488 | ||
489 | int ixgbevf_open(struct net_device *netdev); | ||
490 | int ixgbevf_close(struct net_device *netdev); | ||
489 | void ixgbevf_up(struct ixgbevf_adapter *adapter); | 491 | void ixgbevf_up(struct ixgbevf_adapter *adapter); |
490 | void ixgbevf_down(struct ixgbevf_adapter *adapter); | 492 | void ixgbevf_down(struct ixgbevf_adapter *adapter); |
491 | void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); | 493 | void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 0ea14c0a2e74..b0edae94d73d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) | |||
3122 | * handler is registered with the OS, the watchdog timer is started, | 3122 | * handler is registered with the OS, the watchdog timer is started, |
3123 | * and the stack is notified that the interface is ready. | 3123 | * and the stack is notified that the interface is ready. |
3124 | **/ | 3124 | **/ |
3125 | static int ixgbevf_open(struct net_device *netdev) | 3125 | int ixgbevf_open(struct net_device *netdev) |
3126 | { | 3126 | { |
3127 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 3127 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
3128 | struct ixgbe_hw *hw = &adapter->hw; | 3128 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -3205,7 +3205,7 @@ err_setup_reset: | |||
3205 | * needs to be disabled. A global MAC reset is issued to stop the | 3205 | * needs to be disabled. A global MAC reset is issued to stop the |
3206 | * hardware, and all transmit and receive resources are freed. | 3206 | * hardware, and all transmit and receive resources are freed. |
3207 | **/ | 3207 | **/ |
3208 | static int ixgbevf_close(struct net_device *netdev) | 3208 | int ixgbevf_close(struct net_device *netdev) |
3209 | { | 3209 | { |
3210 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 3210 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
3211 | 3211 | ||
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) | |||
3692 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 3692 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
3693 | struct ixgbe_hw *hw = &adapter->hw; | 3693 | struct ixgbe_hw *hw = &adapter->hw; |
3694 | struct sockaddr *addr = p; | 3694 | struct sockaddr *addr = p; |
3695 | int err; | ||
3695 | 3696 | ||
3696 | if (!is_valid_ether_addr(addr->sa_data)) | 3697 | if (!is_valid_ether_addr(addr->sa_data)) |
3697 | return -EADDRNOTAVAIL; | 3698 | return -EADDRNOTAVAIL; |
3698 | 3699 | ||
3699 | ether_addr_copy(netdev->dev_addr, addr->sa_data); | ||
3700 | ether_addr_copy(hw->mac.addr, addr->sa_data); | ||
3701 | |||
3702 | spin_lock_bh(&adapter->mbx_lock); | 3700 | spin_lock_bh(&adapter->mbx_lock); |
3703 | 3701 | ||
3704 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); | 3702 | err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); |
3705 | 3703 | ||
3706 | spin_unlock_bh(&adapter->mbx_lock); | 3704 | spin_unlock_bh(&adapter->mbx_lock); |
3707 | 3705 | ||
3706 | if (err) | ||
3707 | return -EPERM; | ||
3708 | |||
3709 | ether_addr_copy(hw->mac.addr, addr->sa_data); | ||
3710 | ether_addr_copy(netdev->dev_addr, addr->sa_data); | ||
3711 | |||
3708 | return 0; | 3712 | return 0; |
3709 | } | 3713 | } |
3710 | 3714 | ||
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 61a98f4c5746..4d613a4f2a7f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c | |||
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, | |||
408 | 408 | ||
409 | /* if nacked the address was rejected, use "perm_addr" */ | 409 | /* if nacked the address was rejected, use "perm_addr" */ |
410 | if (!ret_val && | 410 | if (!ret_val && |
411 | (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) | 411 | (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { |
412 | ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); | 412 | ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); |
413 | return IXGBE_ERR_MBX; | ||
414 | } | ||
413 | 415 | ||
414 | return ret_val; | 416 | return ret_val; |
415 | } | 417 | } |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 577f7ca7deba..7fc490225da5 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -260,7 +260,6 @@ | |||
260 | 260 | ||
261 | #define MVNETA_VLAN_TAG_LEN 4 | 261 | #define MVNETA_VLAN_TAG_LEN 4 |
262 | 262 | ||
263 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | ||
264 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 | 263 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 |
265 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | 264 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
266 | #define MVNETA_ACC_MODE_EXT1 1 | 265 | #define MVNETA_ACC_MODE_EXT1 1 |
@@ -300,7 +299,7 @@ | |||
300 | #define MVNETA_RX_PKT_SIZE(mtu) \ | 299 | #define MVNETA_RX_PKT_SIZE(mtu) \ |
301 | ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ | 300 | ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ |
302 | ETH_HLEN + ETH_FCS_LEN, \ | 301 | ETH_HLEN + ETH_FCS_LEN, \ |
303 | MVNETA_CPU_D_CACHE_LINE_SIZE) | 302 | cache_line_size()) |
304 | 303 | ||
305 | #define IS_TSO_HEADER(txq, addr) \ | 304 | #define IS_TSO_HEADER(txq, addr) \ |
306 | ((addr >= txq->tso_hdrs_phys) && \ | 305 | ((addr >= txq->tso_hdrs_phys) && \ |
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp, | |||
2764 | if (rxq->descs == NULL) | 2763 | if (rxq->descs == NULL) |
2765 | return -ENOMEM; | 2764 | return -ENOMEM; |
2766 | 2765 | ||
2767 | BUG_ON(rxq->descs != | ||
2768 | PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); | ||
2769 | |||
2770 | rxq->last_desc = rxq->size - 1; | 2766 | rxq->last_desc = rxq->size - 1; |
2771 | 2767 | ||
2772 | /* Set Rx descriptors queue starting address */ | 2768 | /* Set Rx descriptors queue starting address */ |
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, | |||
2837 | if (txq->descs == NULL) | 2833 | if (txq->descs == NULL) |
2838 | return -ENOMEM; | 2834 | return -ENOMEM; |
2839 | 2835 | ||
2840 | /* Make sure descriptor address is cache line size aligned */ | ||
2841 | BUG_ON(txq->descs != | ||
2842 | PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); | ||
2843 | |||
2844 | txq->last_desc = txq->size - 1; | 2836 | txq->last_desc = txq->size - 1; |
2845 | 2837 | ||
2846 | /* Set maximum bandwidth for enabled TXQs */ | 2838 | /* Set maximum bandwidth for enabled TXQs */ |
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) | |||
3050 | return mtu; | 3042 | return mtu; |
3051 | } | 3043 | } |
3052 | 3044 | ||
3045 | static void mvneta_percpu_enable(void *arg) | ||
3046 | { | ||
3047 | struct mvneta_port *pp = arg; | ||
3048 | |||
3049 | enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); | ||
3050 | } | ||
3051 | |||
3052 | static void mvneta_percpu_disable(void *arg) | ||
3053 | { | ||
3054 | struct mvneta_port *pp = arg; | ||
3055 | |||
3056 | disable_percpu_irq(pp->dev->irq); | ||
3057 | } | ||
3058 | |||
3053 | /* Change the device mtu */ | 3059 | /* Change the device mtu */ |
3054 | static int mvneta_change_mtu(struct net_device *dev, int mtu) | 3060 | static int mvneta_change_mtu(struct net_device *dev, int mtu) |
3055 | { | 3061 | { |
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) | |||
3074 | * reallocation of the queues | 3080 | * reallocation of the queues |
3075 | */ | 3081 | */ |
3076 | mvneta_stop_dev(pp); | 3082 | mvneta_stop_dev(pp); |
3083 | on_each_cpu(mvneta_percpu_disable, pp, true); | ||
3077 | 3084 | ||
3078 | mvneta_cleanup_txqs(pp); | 3085 | mvneta_cleanup_txqs(pp); |
3079 | mvneta_cleanup_rxqs(pp); | 3086 | mvneta_cleanup_rxqs(pp); |
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) | |||
3097 | return ret; | 3104 | return ret; |
3098 | } | 3105 | } |
3099 | 3106 | ||
3107 | on_each_cpu(mvneta_percpu_enable, pp, true); | ||
3100 | mvneta_start_dev(pp); | 3108 | mvneta_start_dev(pp); |
3101 | mvneta_port_up(pp); | 3109 | mvneta_port_up(pp); |
3102 | 3110 | ||
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp) | |||
3250 | pp->phy_dev = NULL; | 3258 | pp->phy_dev = NULL; |
3251 | } | 3259 | } |
3252 | 3260 | ||
3253 | static void mvneta_percpu_enable(void *arg) | ||
3254 | { | ||
3255 | struct mvneta_port *pp = arg; | ||
3256 | |||
3257 | enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); | ||
3258 | } | ||
3259 | |||
3260 | static void mvneta_percpu_disable(void *arg) | ||
3261 | { | ||
3262 | struct mvneta_port *pp = arg; | ||
3263 | |||
3264 | disable_percpu_irq(pp->dev->irq); | ||
3265 | } | ||
3266 | |||
3267 | /* Electing a CPU must be done in an atomic way: it should be done | 3261 | /* Electing a CPU must be done in an atomic way: it should be done |
3268 | * after or before the removal/insertion of a CPU and this function is | 3262 | * after or before the removal/insertion of a CPU and this function is |
3269 | * not reentrant. | 3263 | * not reentrant. |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index c797971aefab..868a957f24bb 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -321,7 +321,6 @@ | |||
321 | /* Lbtd 802.3 type */ | 321 | /* Lbtd 802.3 type */ |
322 | #define MVPP2_IP_LBDT_TYPE 0xfffa | 322 | #define MVPP2_IP_LBDT_TYPE 0xfffa |
323 | 323 | ||
324 | #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 | ||
325 | #define MVPP2_TX_CSUM_MAX_SIZE 9800 | 324 | #define MVPP2_TX_CSUM_MAX_SIZE 9800 |
326 | 325 | ||
327 | /* Timeout constants */ | 326 | /* Timeout constants */ |
@@ -377,7 +376,7 @@ | |||
377 | 376 | ||
378 | #define MVPP2_RX_PKT_SIZE(mtu) \ | 377 | #define MVPP2_RX_PKT_SIZE(mtu) \ |
379 | ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ | 378 | ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ |
380 | ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) | 379 | ETH_HLEN + ETH_FCS_LEN, cache_line_size()) |
381 | 380 | ||
382 | #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) | 381 | #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) |
383 | #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) | 382 | #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) |
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, | |||
4493 | if (!aggr_txq->descs) | 4492 | if (!aggr_txq->descs) |
4494 | return -ENOMEM; | 4493 | return -ENOMEM; |
4495 | 4494 | ||
4496 | /* Make sure descriptor address is cache line size aligned */ | ||
4497 | BUG_ON(aggr_txq->descs != | ||
4498 | PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); | ||
4499 | |||
4500 | aggr_txq->last_desc = aggr_txq->size - 1; | 4495 | aggr_txq->last_desc = aggr_txq->size - 1; |
4501 | 4496 | ||
4502 | /* Aggr TXQ no reset WA */ | 4497 | /* Aggr TXQ no reset WA */ |
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, | |||
4526 | if (!rxq->descs) | 4521 | if (!rxq->descs) |
4527 | return -ENOMEM; | 4522 | return -ENOMEM; |
4528 | 4523 | ||
4529 | BUG_ON(rxq->descs != | ||
4530 | PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); | ||
4531 | |||
4532 | rxq->last_desc = rxq->size - 1; | 4524 | rxq->last_desc = rxq->size - 1; |
4533 | 4525 | ||
4534 | /* Zero occupied and non-occupied counters - direct access */ | 4526 | /* Zero occupied and non-occupied counters - direct access */ |
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
4616 | if (!txq->descs) | 4608 | if (!txq->descs) |
4617 | return -ENOMEM; | 4609 | return -ENOMEM; |
4618 | 4610 | ||
4619 | /* Make sure descriptor address is cache line size aligned */ | ||
4620 | BUG_ON(txq->descs != | ||
4621 | PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); | ||
4622 | |||
4623 | txq->last_desc = txq->size - 1; | 4611 | txq->last_desc = txq->size - 1; |
4624 | 4612 | ||
4625 | /* Set Tx descriptors queue starting address - indirect access */ | 4613 | /* Set Tx descriptors queue starting address - indirect access */ |
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) | |||
6059 | 6047 | ||
6060 | /* Map physical Rx queue to port's logical Rx queue */ | 6048 | /* Map physical Rx queue to port's logical Rx queue */ |
6061 | rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); | 6049 | rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); |
6062 | if (!rxq) | 6050 | if (!rxq) { |
6051 | err = -ENOMEM; | ||
6063 | goto err_free_percpu; | 6052 | goto err_free_percpu; |
6053 | } | ||
6064 | /* Map this Rx queue to a physical queue */ | 6054 | /* Map this Rx queue to a physical queue */ |
6065 | rxq->id = port->first_rxq + queue; | 6055 | rxq->id = port->first_rxq + queue; |
6066 | rxq->port = port->id; | 6056 | rxq->port = port->id; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index ffd0accc2ec9..2017b0121f5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, | |||
2750 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 2750 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
2751 | enum qed_int_mode int_mode) | 2751 | enum qed_int_mode int_mode) |
2752 | { | 2752 | { |
2753 | int rc; | 2753 | int rc = 0; |
2754 | 2754 | ||
2755 | /* Configure AEU signal change to produce attentions */ | 2755 | /* Configure AEU signal change to produce attentions */ |
2756 | qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); | 2756 | qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index ef332708e5f2..6d31f92ef2b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "1.00.00.34" | 21 | #define DRV_VERSION "1.00.00.35" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4e1a7dba7c4a..087e14a3fba7 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -1377,11 +1377,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1377 | 1377 | ||
1378 | /* TAG and timestamp required flag */ | 1378 | /* TAG and timestamp required flag */ |
1379 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 1379 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1380 | skb_tx_timestamp(skb); | ||
1381 | desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; | 1380 | desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; |
1382 | desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); | 1381 | desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); |
1383 | } | 1382 | } |
1384 | 1383 | ||
1384 | skb_tx_timestamp(skb); | ||
1385 | /* Descriptor type must be set after all the above writes */ | 1385 | /* Descriptor type must be set after all the above writes */ |
1386 | dma_wmb(); | 1386 | dma_wmb(); |
1387 | desc->die_dt = DT_FEND; | 1387 | desc->die_dt = DT_FEND; |
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index b02eed12bfc5..73427e29df2a 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c | |||
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev) | |||
155 | return 0; | 155 | return 0; |
156 | 156 | ||
157 | err_rx_irq_unmap: | 157 | err_rx_irq_unmap: |
158 | while (--i) | 158 | while (i--) |
159 | irq_dispose_mapping(priv->rxq[i]->irq_no); | 159 | irq_dispose_mapping(priv->rxq[i]->irq_no); |
160 | i = SXGBE_TX_QUEUES; | 160 | i = SXGBE_TX_QUEUES; |
161 | err_tx_irq_unmap: | 161 | err_tx_irq_unmap: |
162 | while (--i) | 162 | while (i--) |
163 | irq_dispose_mapping(priv->txq[i]->irq_no); | 163 | irq_dispose_mapping(priv->txq[i]->irq_no); |
164 | irq_dispose_mapping(priv->irq); | 164 | irq_dispose_mapping(priv->irq); |
165 | err_drv_remove: | 165 | err_drv_remove: |
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index e13228f115f0..011386f6f24d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c | |||
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, | |||
199 | { | 199 | { |
200 | unsigned int tdes1 = p->des1; | 200 | unsigned int tdes1 = p->des1; |
201 | 201 | ||
202 | if (mode == STMMAC_CHAIN_MODE) | ||
203 | norm_set_tx_desc_len_on_chain(p, len); | ||
204 | else | ||
205 | norm_set_tx_desc_len_on_ring(p, len); | ||
206 | |||
207 | if (is_fs) | 202 | if (is_fs) |
208 | tdes1 |= TDES1_FIRST_SEGMENT; | 203 | tdes1 |= TDES1_FIRST_SEGMENT; |
209 | else | 204 | else |
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, | |||
217 | if (ls) | 212 | if (ls) |
218 | tdes1 |= TDES1_LAST_SEGMENT; | 213 | tdes1 |= TDES1_LAST_SEGMENT; |
219 | 214 | ||
220 | if (tx_own) | ||
221 | tdes1 |= TDES0_OWN; | ||
222 | |||
223 | p->des1 = tdes1; | 215 | p->des1 = tdes1; |
216 | |||
217 | if (mode == STMMAC_CHAIN_MODE) | ||
218 | norm_set_tx_desc_len_on_chain(p, len); | ||
219 | else | ||
220 | norm_set_tx_desc_len_on_ring(p, len); | ||
221 | |||
222 | if (tx_own) | ||
223 | p->des0 |= TDES0_OWN; | ||
224 | } | 224 | } |
225 | 225 | ||
226 | static void ndesc_set_tx_ic(struct dma_desc *p) | 226 | static void ndesc_set_tx_ic(struct dma_desc *p) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4c5ce9848ca9..78464fa7fe1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -278,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) | |||
278 | */ | 278 | */ |
279 | bool stmmac_eee_init(struct stmmac_priv *priv) | 279 | bool stmmac_eee_init(struct stmmac_priv *priv) |
280 | { | 280 | { |
281 | char *phy_bus_name = priv->plat->phy_bus_name; | ||
282 | unsigned long flags; | 281 | unsigned long flags; |
283 | bool ret = false; | 282 | bool ret = false; |
284 | 283 | ||
@@ -290,7 +289,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
290 | goto out; | 289 | goto out; |
291 | 290 | ||
292 | /* Never init EEE in case of a switch is attached */ | 291 | /* Never init EEE in case of a switch is attached */ |
293 | if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) | 292 | if (priv->phydev->is_pseudo_fixed_link) |
294 | goto out; | 293 | goto out; |
295 | 294 | ||
296 | /* MAC core supports the EEE feature. */ | 295 | /* MAC core supports the EEE feature. */ |
@@ -827,12 +826,8 @@ static int stmmac_init_phy(struct net_device *dev) | |||
827 | phydev = of_phy_connect(dev, priv->plat->phy_node, | 826 | phydev = of_phy_connect(dev, priv->plat->phy_node, |
828 | &stmmac_adjust_link, 0, interface); | 827 | &stmmac_adjust_link, 0, interface); |
829 | } else { | 828 | } else { |
830 | if (priv->plat->phy_bus_name) | 829 | snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", |
831 | snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", | 830 | priv->plat->bus_id); |
832 | priv->plat->phy_bus_name, priv->plat->bus_id); | ||
833 | else | ||
834 | snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", | ||
835 | priv->plat->bus_id); | ||
836 | 831 | ||
837 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, | 832 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, |
838 | priv->plat->phy_addr); | 833 | priv->plat->phy_addr); |
@@ -871,9 +866,8 @@ static int stmmac_init_phy(struct net_device *dev) | |||
871 | } | 866 | } |
872 | 867 | ||
873 | /* If attached to a switch, there is no reason to poll phy handler */ | 868 | /* If attached to a switch, there is no reason to poll phy handler */ |
874 | if (priv->plat->phy_bus_name) | 869 | if (phydev->is_pseudo_fixed_link) |
875 | if (!strcmp(priv->plat->phy_bus_name, "fixed")) | 870 | phydev->irq = PHY_IGNORE_INTERRUPT; |
876 | phydev->irq = PHY_IGNORE_INTERRUPT; | ||
877 | 871 | ||
878 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" | 872 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" |
879 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); | 873 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ea76129dafc2..06704ca6f9ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
198 | struct mii_bus *new_bus; | 198 | struct mii_bus *new_bus; |
199 | struct stmmac_priv *priv = netdev_priv(ndev); | 199 | struct stmmac_priv *priv = netdev_priv(ndev); |
200 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; | 200 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; |
201 | int addr, found; | ||
202 | struct device_node *mdio_node = priv->plat->mdio_node; | 201 | struct device_node *mdio_node = priv->plat->mdio_node; |
202 | int addr, found; | ||
203 | 203 | ||
204 | if (!mdio_bus_data) | 204 | if (!mdio_bus_data) |
205 | return 0; | 205 | return 0; |
206 | 206 | ||
207 | if (IS_ENABLED(CONFIG_OF)) { | ||
208 | if (mdio_node) { | ||
209 | netdev_dbg(ndev, "FOUND MDIO subnode\n"); | ||
210 | } else { | ||
211 | netdev_warn(ndev, "No MDIO subnode found\n"); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | new_bus = mdiobus_alloc(); | 207 | new_bus = mdiobus_alloc(); |
216 | if (new_bus == NULL) | 208 | if (new_bus == NULL) |
217 | return -ENOMEM; | 209 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index dcbd2a1601e8..cf37ea558ecc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -132,6 +132,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources | ||
136 | * @plat: driver data platform structure | ||
137 | * @np: device tree node | ||
138 | * @dev: device pointer | ||
139 | * Description: | ||
140 | * The mdio bus will be allocated in case of a phy transceiver is on board; | ||
141 | * it will be NULL if the fixed-link is configured. | ||
142 | * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated | ||
143 | * in any case (for DSA, mdio must be registered even if fixed-link). | ||
144 | * The table below sums the supported configurations: | ||
145 | * ------------------------------- | ||
146 | * snps,phy-addr | Y | ||
147 | * ------------------------------- | ||
148 | * phy-handle | Y | ||
149 | * ------------------------------- | ||
150 | * fixed-link | N | ||
151 | * ------------------------------- | ||
152 | * snps,dwmac-mdio | | ||
153 | * even if | Y | ||
154 | * fixed-link | | ||
155 | * ------------------------------- | ||
156 | * | ||
157 | * It returns 0 in case of success otherwise -ENODEV. | ||
158 | */ | ||
159 | static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, | ||
160 | struct device_node *np, struct device *dev) | ||
161 | { | ||
162 | bool mdio = true; | ||
163 | |||
164 | /* If phy-handle property is passed from DT, use it as the PHY */ | ||
165 | plat->phy_node = of_parse_phandle(np, "phy-handle", 0); | ||
166 | if (plat->phy_node) | ||
167 | dev_dbg(dev, "Found phy-handle subnode\n"); | ||
168 | |||
169 | /* If phy-handle is not specified, check if we have a fixed-phy */ | ||
170 | if (!plat->phy_node && of_phy_is_fixed_link(np)) { | ||
171 | if ((of_phy_register_fixed_link(np) < 0)) | ||
172 | return -ENODEV; | ||
173 | |||
174 | dev_dbg(dev, "Found fixed-link subnode\n"); | ||
175 | plat->phy_node = of_node_get(np); | ||
176 | mdio = false; | ||
177 | } | ||
178 | |||
179 | /* If snps,dwmac-mdio is passed from DT, always register the MDIO */ | ||
180 | for_each_child_of_node(np, plat->mdio_node) { | ||
181 | if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | if (plat->mdio_node) { | ||
186 | dev_dbg(dev, "Found MDIO subnode\n"); | ||
187 | mdio = true; | ||
188 | } | ||
189 | |||
190 | if (mdio) | ||
191 | plat->mdio_bus_data = | ||
192 | devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), | ||
193 | GFP_KERNEL); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /** | ||
135 | * stmmac_probe_config_dt - parse device-tree driver parameters | 198 | * stmmac_probe_config_dt - parse device-tree driver parameters |
136 | * @pdev: platform_device structure | 199 | * @pdev: platform_device structure |
137 | * @plat: driver data platform structure | 200 | * @plat: driver data platform structure |
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) | |||
146 | struct device_node *np = pdev->dev.of_node; | 209 | struct device_node *np = pdev->dev.of_node; |
147 | struct plat_stmmacenet_data *plat; | 210 | struct plat_stmmacenet_data *plat; |
148 | struct stmmac_dma_cfg *dma_cfg; | 211 | struct stmmac_dma_cfg *dma_cfg; |
149 | struct device_node *child_node = NULL; | ||
150 | 212 | ||
151 | plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); | 213 | plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); |
152 | if (!plat) | 214 | if (!plat) |
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) | |||
166 | /* Default to phy auto-detection */ | 228 | /* Default to phy auto-detection */ |
167 | plat->phy_addr = -1; | 229 | plat->phy_addr = -1; |
168 | 230 | ||
169 | /* If we find a phy-handle property, use it as the PHY */ | ||
170 | plat->phy_node = of_parse_phandle(np, "phy-handle", 0); | ||
171 | |||
172 | /* If phy-handle is not specified, check if we have a fixed-phy */ | ||
173 | if (!plat->phy_node && of_phy_is_fixed_link(np)) { | ||
174 | if ((of_phy_register_fixed_link(np) < 0)) | ||
175 | return ERR_PTR(-ENODEV); | ||
176 | |||
177 | plat->phy_node = of_node_get(np); | ||
178 | } | ||
179 | |||
180 | for_each_child_of_node(np, child_node) | ||
181 | if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) { | ||
182 | plat->mdio_node = child_node; | ||
183 | break; | ||
184 | } | ||
185 | |||
186 | /* "snps,phy-addr" is not a standard property. Mark it as deprecated | 231 | /* "snps,phy-addr" is not a standard property. Mark it as deprecated |
187 | * and warn of its use. Remove this when phy node support is added. | 232 | * and warn of its use. Remove this when phy node support is added. |
188 | */ | 233 | */ |
189 | if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) | 234 | if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) |
190 | dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); | 235 | dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); |
191 | 236 | ||
192 | if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) | 237 | /* To Configure PHY by using all device-tree supported properties */ |
193 | plat->mdio_bus_data = NULL; | 238 | if (stmmac_dt_phy(plat, np, &pdev->dev)) |
194 | else | 239 | return ERR_PTR(-ENODEV); |
195 | plat->mdio_bus_data = | ||
196 | devm_kzalloc(&pdev->dev, | ||
197 | sizeof(struct stmmac_mdio_bus_data), | ||
198 | GFP_KERNEL); | ||
199 | 240 | ||
200 | of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); | 241 | of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); |
201 | 242 | ||
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b881a7b1e4f6..9636da0b6efc 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c | |||
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
339 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), | 339 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), |
340 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), | 340 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), |
341 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), | 341 | BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), |
342 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"), | ||
343 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"), | ||
342 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), | 344 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), |
343 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), | 345 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), |
344 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), | 346 | BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), |
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { | |||
348 | { PHY_ID_BCM7250, 0xfffffff0, }, | 350 | { PHY_ID_BCM7250, 0xfffffff0, }, |
349 | { PHY_ID_BCM7364, 0xfffffff0, }, | 351 | { PHY_ID_BCM7364, 0xfffffff0, }, |
350 | { PHY_ID_BCM7366, 0xfffffff0, }, | 352 | { PHY_ID_BCM7366, 0xfffffff0, }, |
353 | { PHY_ID_BCM7346, 0xfffffff0, }, | ||
354 | { PHY_ID_BCM7362, 0xfffffff0, }, | ||
351 | { PHY_ID_BCM7425, 0xfffffff0, }, | 355 | { PHY_ID_BCM7425, 0xfffffff0, }, |
352 | { PHY_ID_BCM7429, 0xfffffff0, }, | 356 | { PHY_ID_BCM7429, 0xfffffff0, }, |
353 | { PHY_ID_BCM7439, 0xfffffff0, }, | 357 | { PHY_ID_BCM7439, 0xfffffff0, }, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 26c64d2782fa..a0f64cba86ba 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev) | |||
1198 | goto err_dev_open; | 1198 | goto err_dev_open; |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | dev_uc_sync_multiple(port_dev, dev); | ||
1202 | dev_mc_sync_multiple(port_dev, dev); | ||
1203 | |||
1201 | err = vlan_vids_add_by_dev(port_dev, dev); | 1204 | err = vlan_vids_add_by_dev(port_dev, dev); |
1202 | if (err) { | 1205 | if (err) { |
1203 | netdev_err(dev, "Failed to add vlan ids to device %s\n", | 1206 | netdev_err(dev, "Failed to add vlan ids to device %s\n", |
@@ -1261,6 +1264,8 @@ err_enable_netpoll: | |||
1261 | vlan_vids_del_by_dev(port_dev, dev); | 1264 | vlan_vids_del_by_dev(port_dev, dev); |
1262 | 1265 | ||
1263 | err_vids_add: | 1266 | err_vids_add: |
1267 | dev_uc_unsync(port_dev, dev); | ||
1268 | dev_mc_unsync(port_dev, dev); | ||
1264 | dev_close(port_dev); | 1269 | dev_close(port_dev); |
1265 | 1270 | ||
1266 | err_dev_open: | 1271 | err_dev_open: |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index afdf950617c3..510e90a6bb26 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -622,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte | |||
622 | 622 | ||
623 | /* Re-attach the filter to persist device */ | 623 | /* Re-attach the filter to persist device */ |
624 | if (!skip_filter && (tun->filter_attached == true)) { | 624 | if (!skip_filter && (tun->filter_attached == true)) { |
625 | err = sk_attach_filter(&tun->fprog, tfile->socket.sk); | 625 | err = __sk_attach_filter(&tun->fprog, tfile->socket.sk, |
626 | lockdep_rtnl_is_held()); | ||
626 | if (!err) | 627 | if (!err) |
627 | goto out; | 628 | goto out; |
628 | } | 629 | } |
@@ -1822,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n) | |||
1822 | 1823 | ||
1823 | for (i = 0; i < n; i++) { | 1824 | for (i = 0; i < n; i++) { |
1824 | tfile = rtnl_dereference(tun->tfiles[i]); | 1825 | tfile = rtnl_dereference(tun->tfiles[i]); |
1825 | sk_detach_filter(tfile->socket.sk); | 1826 | __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held()); |
1826 | } | 1827 | } |
1827 | 1828 | ||
1828 | tun->filter_attached = false; | 1829 | tun->filter_attached = false; |
@@ -1835,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun) | |||
1835 | 1836 | ||
1836 | for (i = 0; i < tun->numqueues; i++) { | 1837 | for (i = 0; i < tun->numqueues; i++) { |
1837 | tfile = rtnl_dereference(tun->tfiles[i]); | 1838 | tfile = rtnl_dereference(tun->tfiles[i]); |
1838 | ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); | 1839 | ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk, |
1840 | lockdep_rtnl_is_held()); | ||
1839 | if (ret) { | 1841 | if (ret) { |
1840 | tun_detach_filter(tun, i); | 1842 | tun_detach_filter(tun, i); |
1841 | return ret; | 1843 | return ret; |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 86ba30ba35e8..2fb31edab125 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = { | |||
1626 | .driver_info = (unsigned long) &wwan_info, | 1626 | .driver_info = (unsigned long) &wwan_info, |
1627 | }, | 1627 | }, |
1628 | 1628 | ||
1629 | /* Telit LE910 V2 */ | ||
1630 | { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036, | ||
1631 | USB_CLASS_COMM, | ||
1632 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), | ||
1633 | .driver_info = (unsigned long)&wwan_noarp_info, | ||
1634 | }, | ||
1635 | |||
1629 | /* DW5812 LTE Verizon Mobile Broadband Card | 1636 | /* DW5812 LTE Verizon Mobile Broadband Card |
1630 | * Unlike DW5550 this device requires FLAG_NOARP | 1637 | * Unlike DW5550 this device requires FLAG_NOARP |
1631 | */ | 1638 | */ |
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 1bfe0fcaccf5..22e1a9a99a7d 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c | |||
@@ -38,7 +38,7 @@ | |||
38 | * HEADS UP: this handshaking isn't all that robust. This driver | 38 | * HEADS UP: this handshaking isn't all that robust. This driver |
39 | * gets confused easily if you unplug one end of the cable then | 39 | * gets confused easily if you unplug one end of the cable then |
40 | * try to connect it again; you'll need to restart both ends. The | 40 | * try to connect it again; you'll need to restart both ends. The |
41 | * "naplink" software (used by some PlayStation/2 deveopers) does | 41 | * "naplink" software (used by some PlayStation/2 developers) does |
42 | * the handshaking much better! Also, sometimes this hardware | 42 | * the handshaking much better! Also, sometimes this hardware |
43 | * seems to get wedged under load. Prolific docs are weak, and | 43 | * seems to get wedged under load. Prolific docs are weak, and |
44 | * don't identify differences between PL2301 and PL2302, much less | 44 | * don't identify differences between PL2301 and PL2302, much less |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 7d717c66bcb0..9d1fce8a6e84 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = { | |||
844 | {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ | 844 | {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ |
845 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ | 845 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ |
846 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 846 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
847 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | ||
847 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 848 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
848 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 849 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
849 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 850 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index ca5721c306bb..cc31c6f1f88e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -99,7 +99,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
99 | if (unlikely(bad_pmem)) | 99 | if (unlikely(bad_pmem)) |
100 | rc = -EIO; | 100 | rc = -EIO; |
101 | else { | 101 | else { |
102 | memcpy_from_pmem(mem + off, pmem_addr, len); | 102 | rc = memcpy_from_pmem(mem + off, pmem_addr, len); |
103 | flush_dcache_page(page); | 103 | flush_dcache_page(page); |
104 | } | 104 | } |
105 | } else { | 105 | } else { |
@@ -295,7 +295,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, | |||
295 | 295 | ||
296 | if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) | 296 | if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) |
297 | return -EIO; | 297 | return -EIO; |
298 | memcpy_from_pmem(buf, pmem->virt_addr + offset, size); | 298 | return memcpy_from_pmem(buf, pmem->virt_addr + offset, size); |
299 | } else { | 299 | } else { |
300 | memcpy_to_pmem(pmem->virt_addr + offset, buf, size); | 300 | memcpy_to_pmem(pmem->virt_addr + offset, buf, size); |
301 | wmb_pmem(); | 301 | wmb_pmem(); |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 9973cebb4d6f..07462d79d040 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
309 | * much memory to the process. | 309 | * much memory to the process. |
310 | */ | 310 | */ |
311 | down_read(¤t->mm->mmap_sem); | 311 | down_read(¤t->mm->mmap_sem); |
312 | ret = get_user_pages(current, current->mm, address, 1, | 312 | ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); |
313 | !is_write, 0, &page, NULL); | ||
314 | up_read(¤t->mm->mmap_sem); | 313 | up_read(¤t->mm->mmap_sem); |
315 | if (ret < 0) | 314 | if (ret < 0) |
316 | break; | 315 | break; |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9607bc826460..5d4d91846357 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -886,7 +886,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode, | |||
886 | } | 886 | } |
887 | 887 | ||
888 | down_read(¤t->mm->mmap_sem); | 888 | down_read(¤t->mm->mmap_sem); |
889 | pinned = get_user_pages(current, current->mm, | 889 | pinned = get_user_pages( |
890 | (unsigned long)xfer->loc_addr & PAGE_MASK, | 890 | (unsigned long)xfer->loc_addr & PAGE_MASK, |
891 | nr_pages, dir == DMA_FROM_DEVICE, 0, | 891 | nr_pages, dir == DMA_FROM_DEVICE, 0, |
892 | page_list, NULL); | 892 | page_list, NULL); |
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index 6bb04d453247..6f056caa8a56 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c | |||
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); | 191 | ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); |
192 | if (!ddata->boot_base) { | 192 | if (IS_ERR(ddata->boot_base)) { |
193 | dev_err(dev, "Boot base not found\n"); | 193 | dev_err(dev, "Boot base not found\n"); |
194 | return -EINVAL; | 194 | return PTR_ERR(ddata->boot_base); |
195 | } | 195 | } |
196 | 196 | ||
197 | err = of_property_read_u32_index(np, "st,syscfg", 1, | 197 | err = of_property_read_u32_index(np, "st,syscfg", 1, |
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 17ad5749e91d..1e560188dd13 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c | |||
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu, | |||
317 | struct alias_pav_group *group; | 317 | struct alias_pav_group *group; |
318 | struct dasd_uid uid; | 318 | struct dasd_uid uid; |
319 | 319 | ||
320 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
320 | private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; | 321 | private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; |
321 | private->uid.base_unit_addr = | 322 | private->uid.base_unit_addr = |
322 | lcu->uac->unit[private->uid.real_unit_addr].base_ua; | 323 | lcu->uac->unit[private->uid.real_unit_addr].base_ua; |
323 | uid = private->uid; | 324 | uid = private->uid; |
324 | 325 | spin_unlock(get_ccwdev_lock(device->cdev)); | |
325 | /* if we have no PAV anyway, we don't need to bother with PAV groups */ | 326 | /* if we have no PAV anyway, we don't need to bother with PAV groups */ |
326 | if (lcu->pav == NO_PAV) { | 327 | if (lcu->pav == NO_PAV) { |
327 | list_move(&device->alias_list, &lcu->active_devices); | 328 | list_move(&device->alias_list, &lcu->active_devices); |
328 | return 0; | 329 | return 0; |
329 | } | 330 | } |
330 | |||
331 | group = _find_group(lcu, &uid); | 331 | group = _find_group(lcu, &uid); |
332 | if (!group) { | 332 | if (!group) { |
333 | group = kzalloc(sizeof(*group), GFP_ATOMIC); | 333 | group = kzalloc(sizeof(*group), GFP_ATOMIC); |
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr) | |||
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | ||
401 | * This function tries to lock all devices on an lcu via trylock | ||
402 | * return NULL on success otherwise return first failed device | ||
403 | */ | ||
404 | static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu, | ||
405 | struct dasd_device *pos) | ||
406 | |||
407 | { | ||
408 | struct alias_pav_group *pavgroup; | ||
409 | struct dasd_device *device; | ||
410 | |||
411 | list_for_each_entry(device, &lcu->active_devices, alias_list) { | ||
412 | if (device == pos) | ||
413 | continue; | ||
414 | if (!spin_trylock(get_ccwdev_lock(device->cdev))) | ||
415 | return device; | ||
416 | } | ||
417 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
418 | if (device == pos) | ||
419 | continue; | ||
420 | if (!spin_trylock(get_ccwdev_lock(device->cdev))) | ||
421 | return device; | ||
422 | } | ||
423 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
424 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { | ||
425 | if (device == pos) | ||
426 | continue; | ||
427 | if (!spin_trylock(get_ccwdev_lock(device->cdev))) | ||
428 | return device; | ||
429 | } | ||
430 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { | ||
431 | if (device == pos) | ||
432 | continue; | ||
433 | if (!spin_trylock(get_ccwdev_lock(device->cdev))) | ||
434 | return device; | ||
435 | } | ||
436 | } | ||
437 | return NULL; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * unlock all devices except the one that is specified as pos | ||
442 | * stop if enddev is specified and reached | ||
443 | */ | ||
444 | static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu, | ||
445 | struct dasd_device *pos, | ||
446 | struct dasd_device *enddev) | ||
447 | |||
448 | { | ||
449 | struct alias_pav_group *pavgroup; | ||
450 | struct dasd_device *device; | ||
451 | |||
452 | list_for_each_entry(device, &lcu->active_devices, alias_list) { | ||
453 | if (device == pos) | ||
454 | continue; | ||
455 | if (device == enddev) | ||
456 | return; | ||
457 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
458 | } | ||
459 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
460 | if (device == pos) | ||
461 | continue; | ||
462 | if (device == enddev) | ||
463 | return; | ||
464 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
465 | } | ||
466 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
467 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { | ||
468 | if (device == pos) | ||
469 | continue; | ||
470 | if (device == enddev) | ||
471 | return; | ||
472 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
473 | } | ||
474 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { | ||
475 | if (device == pos) | ||
476 | continue; | ||
477 | if (device == enddev) | ||
478 | return; | ||
479 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
480 | } | ||
481 | } | ||
482 | } | ||
483 | |||
484 | /* | ||
485 | * this function is needed because the locking order | ||
486 | * device lock -> lcu lock | ||
487 | * needs to be assured when iterating over devices in an LCU | ||
488 | * | ||
489 | * if a device is specified in pos then the device lock is already hold | ||
490 | */ | ||
491 | static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu, | ||
492 | struct dasd_device *pos, | ||
493 | unsigned long *flags) | ||
494 | { | ||
495 | struct dasd_device *failed; | ||
496 | |||
497 | do { | ||
498 | spin_lock_irqsave(&lcu->lock, *flags); | ||
499 | failed = _trylock_all_devices_on_lcu(lcu, pos); | ||
500 | if (failed) { | ||
501 | _unlock_all_devices_on_lcu(lcu, pos, failed); | ||
502 | spin_unlock_irqrestore(&lcu->lock, *flags); | ||
503 | cpu_relax(); | ||
504 | } | ||
505 | } while (failed); | ||
506 | } | ||
507 | |||
508 | static void _trylock_and_lock_lcu(struct alias_lcu *lcu, | ||
509 | struct dasd_device *pos) | ||
510 | { | ||
511 | struct dasd_device *failed; | ||
512 | |||
513 | do { | ||
514 | spin_lock(&lcu->lock); | ||
515 | failed = _trylock_all_devices_on_lcu(lcu, pos); | ||
516 | if (failed) { | ||
517 | _unlock_all_devices_on_lcu(lcu, pos, failed); | ||
518 | spin_unlock(&lcu->lock); | ||
519 | cpu_relax(); | ||
520 | } | ||
521 | } while (failed); | ||
522 | } | ||
523 | |||
524 | static int read_unit_address_configuration(struct dasd_device *device, | 400 | static int read_unit_address_configuration(struct dasd_device *device, |
525 | struct alias_lcu *lcu) | 401 | struct alias_lcu *lcu) |
526 | { | 402 | { |
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) | |||
615 | if (rc) | 491 | if (rc) |
616 | return rc; | 492 | return rc; |
617 | 493 | ||
618 | _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); | 494 | spin_lock_irqsave(&lcu->lock, flags); |
619 | lcu->pav = NO_PAV; | 495 | lcu->pav = NO_PAV; |
620 | for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { | 496 | for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { |
621 | switch (lcu->uac->unit[i].ua_type) { | 497 | switch (lcu->uac->unit[i].ua_type) { |
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) | |||
634 | alias_list) { | 510 | alias_list) { |
635 | _add_device_to_lcu(lcu, device, refdev); | 511 | _add_device_to_lcu(lcu, device, refdev); |
636 | } | 512 | } |
637 | _unlock_all_devices_on_lcu(lcu, NULL, NULL); | ||
638 | spin_unlock_irqrestore(&lcu->lock, flags); | 513 | spin_unlock_irqrestore(&lcu->lock, flags); |
639 | return 0; | 514 | return 0; |
640 | } | 515 | } |
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device) | |||
722 | 597 | ||
723 | lcu = private->lcu; | 598 | lcu = private->lcu; |
724 | rc = 0; | 599 | rc = 0; |
725 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 600 | spin_lock_irqsave(&lcu->lock, flags); |
726 | spin_lock(&lcu->lock); | ||
727 | if (!(lcu->flags & UPDATE_PENDING)) { | 601 | if (!(lcu->flags & UPDATE_PENDING)) { |
728 | rc = _add_device_to_lcu(lcu, device, device); | 602 | rc = _add_device_to_lcu(lcu, device, device); |
729 | if (rc) | 603 | if (rc) |
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device) | |||
733 | list_move(&device->alias_list, &lcu->active_devices); | 607 | list_move(&device->alias_list, &lcu->active_devices); |
734 | _schedule_lcu_update(lcu, device); | 608 | _schedule_lcu_update(lcu, device); |
735 | } | 609 | } |
736 | spin_unlock(&lcu->lock); | 610 | spin_unlock_irqrestore(&lcu->lock, flags); |
737 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
738 | return rc; | 611 | return rc; |
739 | } | 612 | } |
740 | 613 | ||
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu) | |||
933 | struct alias_pav_group *pavgroup; | 806 | struct alias_pav_group *pavgroup; |
934 | struct dasd_device *device; | 807 | struct dasd_device *device; |
935 | 808 | ||
936 | list_for_each_entry(device, &lcu->active_devices, alias_list) | 809 | list_for_each_entry(device, &lcu->active_devices, alias_list) { |
810 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
937 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); | 811 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); |
938 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) | 812 | spin_unlock(get_ccwdev_lock(device->cdev)); |
813 | } | ||
814 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
815 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
939 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); | 816 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); |
817 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
818 | } | ||
940 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | 819 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { |
941 | list_for_each_entry(device, &pavgroup->baselist, alias_list) | 820 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { |
821 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
942 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); | 822 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); |
943 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) | 823 | spin_unlock(get_ccwdev_lock(device->cdev)); |
824 | } | ||
825 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { | ||
826 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
944 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); | 827 | dasd_device_set_stop_bits(device, DASD_STOPPED_SU); |
828 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
829 | } | ||
945 | } | 830 | } |
946 | } | 831 | } |
947 | 832 | ||
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu) | |||
950 | struct alias_pav_group *pavgroup; | 835 | struct alias_pav_group *pavgroup; |
951 | struct dasd_device *device; | 836 | struct dasd_device *device; |
952 | 837 | ||
953 | list_for_each_entry(device, &lcu->active_devices, alias_list) | 838 | list_for_each_entry(device, &lcu->active_devices, alias_list) { |
839 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
954 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); | 840 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); |
955 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) | 841 | spin_unlock(get_ccwdev_lock(device->cdev)); |
842 | } | ||
843 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
844 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
956 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); | 845 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); |
846 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
847 | } | ||
957 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | 848 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { |
958 | list_for_each_entry(device, &pavgroup->baselist, alias_list) | 849 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { |
850 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
959 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); | 851 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); |
960 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) | 852 | spin_unlock(get_ccwdev_lock(device->cdev)); |
853 | } | ||
854 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { | ||
855 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
961 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); | 856 | dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); |
857 | spin_unlock(get_ccwdev_lock(device->cdev)); | ||
858 | } | ||
962 | } | 859 | } |
963 | } | 860 | } |
964 | 861 | ||
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work) | |||
984 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 881 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
985 | reset_summary_unit_check(lcu, device, suc_data->reason); | 882 | reset_summary_unit_check(lcu, device, suc_data->reason); |
986 | 883 | ||
987 | _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); | 884 | spin_lock_irqsave(&lcu->lock, flags); |
988 | _unstop_all_devices_on_lcu(lcu); | 885 | _unstop_all_devices_on_lcu(lcu); |
989 | _restart_all_base_devices_on_lcu(lcu); | 886 | _restart_all_base_devices_on_lcu(lcu); |
990 | /* 3. read new alias configuration */ | 887 | /* 3. read new alias configuration */ |
991 | _schedule_lcu_update(lcu, device); | 888 | _schedule_lcu_update(lcu, device); |
992 | lcu->suc_data.device = NULL; | 889 | lcu->suc_data.device = NULL; |
993 | dasd_put_device(device); | 890 | dasd_put_device(device); |
994 | _unlock_all_devices_on_lcu(lcu, NULL, NULL); | ||
995 | spin_unlock_irqrestore(&lcu->lock, flags); | 891 | spin_unlock_irqrestore(&lcu->lock, flags); |
996 | } | 892 | } |
997 | 893 | ||
998 | /* | 894 | void dasd_alias_handle_summary_unit_check(struct work_struct *work) |
999 | * note: this will be called from int handler context (cdev locked) | ||
1000 | */ | ||
1001 | void dasd_alias_handle_summary_unit_check(struct dasd_device *device, | ||
1002 | struct irb *irb) | ||
1003 | { | 895 | { |
896 | struct dasd_device *device = container_of(work, struct dasd_device, | ||
897 | suc_work); | ||
1004 | struct dasd_eckd_private *private = device->private; | 898 | struct dasd_eckd_private *private = device->private; |
1005 | struct alias_lcu *lcu; | 899 | struct alias_lcu *lcu; |
1006 | char reason; | 900 | unsigned long flags; |
1007 | char *sense; | ||
1008 | |||
1009 | sense = dasd_get_sense(irb); | ||
1010 | if (sense) { | ||
1011 | reason = sense[8]; | ||
1012 | DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", | ||
1013 | "eckd handle summary unit check: reason", reason); | ||
1014 | } else { | ||
1015 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | ||
1016 | "eckd handle summary unit check:" | ||
1017 | " no reason code available"); | ||
1018 | return; | ||
1019 | } | ||
1020 | 901 | ||
1021 | lcu = private->lcu; | 902 | lcu = private->lcu; |
1022 | if (!lcu) { | 903 | if (!lcu) { |
1023 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 904 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
1024 | "device not ready to handle summary" | 905 | "device not ready to handle summary" |
1025 | " unit check (no lcu structure)"); | 906 | " unit check (no lcu structure)"); |
1026 | return; | 907 | goto out; |
1027 | } | 908 | } |
1028 | _trylock_and_lock_lcu(lcu, device); | 909 | spin_lock_irqsave(&lcu->lock, flags); |
1029 | /* If this device is about to be removed just return and wait for | 910 | /* If this device is about to be removed just return and wait for |
1030 | * the next interrupt on a different device | 911 | * the next interrupt on a different device |
1031 | */ | 912 | */ |
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, | |||
1033 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 914 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
1034 | "device is in offline processing," | 915 | "device is in offline processing," |
1035 | " don't do summary unit check handling"); | 916 | " don't do summary unit check handling"); |
1036 | _unlock_all_devices_on_lcu(lcu, device, NULL); | 917 | goto out_unlock; |
1037 | spin_unlock(&lcu->lock); | ||
1038 | return; | ||
1039 | } | 918 | } |
1040 | if (lcu->suc_data.device) { | 919 | if (lcu->suc_data.device) { |
1041 | /* already scheduled or running */ | 920 | /* already scheduled or running */ |
1042 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 921 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
1043 | "previous instance of summary unit check worker" | 922 | "previous instance of summary unit check worker" |
1044 | " still pending"); | 923 | " still pending"); |
1045 | _unlock_all_devices_on_lcu(lcu, device, NULL); | 924 | goto out_unlock; |
1046 | spin_unlock(&lcu->lock); | ||
1047 | return ; | ||
1048 | } | 925 | } |
1049 | _stop_all_devices_on_lcu(lcu); | 926 | _stop_all_devices_on_lcu(lcu); |
1050 | /* prepare for lcu_update */ | 927 | /* prepare for lcu_update */ |
1051 | private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; | 928 | lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; |
1052 | lcu->suc_data.reason = reason; | 929 | lcu->suc_data.reason = private->suc_reason; |
1053 | lcu->suc_data.device = device; | 930 | lcu->suc_data.device = device; |
1054 | dasd_get_device(device); | 931 | dasd_get_device(device); |
1055 | _unlock_all_devices_on_lcu(lcu, device, NULL); | ||
1056 | spin_unlock(&lcu->lock); | ||
1057 | if (!schedule_work(&lcu->suc_data.worker)) | 932 | if (!schedule_work(&lcu->suc_data.worker)) |
1058 | dasd_put_device(device); | 933 | dasd_put_device(device); |
934 | out_unlock: | ||
935 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
936 | out: | ||
937 | clear_bit(DASD_FLAG_SUC, &device->flags); | ||
938 | dasd_put_device(device); | ||
1059 | }; | 939 | }; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 75c032dcf173..c1b4ae55e129 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1682 | 1682 | ||
1683 | /* setup work queue for validate server*/ | 1683 | /* setup work queue for validate server*/ |
1684 | INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); | 1684 | INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); |
1685 | /* setup work queue for summary unit check */ | ||
1686 | INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); | ||
1685 | 1687 | ||
1686 | if (!ccw_device_is_pathgroup(device->cdev)) { | 1688 | if (!ccw_device_is_pathgroup(device->cdev)) { |
1687 | dev_warn(&device->cdev->dev, | 1689 | dev_warn(&device->cdev->dev, |
@@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, | |||
2549 | device->state == DASD_STATE_ONLINE && | 2551 | device->state == DASD_STATE_ONLINE && |
2550 | !test_bit(DASD_FLAG_OFFLINE, &device->flags) && | 2552 | !test_bit(DASD_FLAG_OFFLINE, &device->flags) && |
2551 | !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { | 2553 | !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { |
2552 | /* | ||
2553 | * the state change could be caused by an alias | ||
2554 | * reassignment remove device from alias handling | ||
2555 | * to prevent new requests from being scheduled on | ||
2556 | * the wrong alias device | ||
2557 | */ | ||
2558 | dasd_alias_remove_device(device); | ||
2559 | |||
2560 | /* schedule worker to reload device */ | 2554 | /* schedule worker to reload device */ |
2561 | dasd_reload_device(device); | 2555 | dasd_reload_device(device); |
2562 | } | 2556 | } |
@@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, | |||
2571 | /* summary unit check */ | 2565 | /* summary unit check */ |
2572 | if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && | 2566 | if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && |
2573 | (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { | 2567 | (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { |
2574 | dasd_alias_handle_summary_unit_check(device, irb); | 2568 | if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { |
2569 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | ||
2570 | "eckd suc: device already notified"); | ||
2571 | return; | ||
2572 | } | ||
2573 | sense = dasd_get_sense(irb); | ||
2574 | if (!sense) { | ||
2575 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | ||
2576 | "eckd suc: no reason code available"); | ||
2577 | clear_bit(DASD_FLAG_SUC, &device->flags); | ||
2578 | return; | ||
2579 | |||
2580 | } | ||
2581 | private->suc_reason = sense[8]; | ||
2582 | DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", | ||
2583 | "eckd handle summary unit check: reason", | ||
2584 | private->suc_reason); | ||
2585 | dasd_get_device(device); | ||
2586 | if (!schedule_work(&device->suc_work)) | ||
2587 | dasd_put_device(device); | ||
2588 | |||
2575 | return; | 2589 | return; |
2576 | } | 2590 | } |
2577 | 2591 | ||
@@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device) | |||
4495 | struct dasd_uid uid; | 4509 | struct dasd_uid uid; |
4496 | unsigned long flags; | 4510 | unsigned long flags; |
4497 | 4511 | ||
4512 | /* | ||
4513 | * remove device from alias handling to prevent new requests | ||
4514 | * from being scheduled on the wrong alias device | ||
4515 | */ | ||
4516 | dasd_alias_remove_device(device); | ||
4517 | |||
4498 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 4518 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
4499 | old_base = private->uid.base_unit_addr; | 4519 | old_base = private->uid.base_unit_addr; |
4500 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 4520 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index f8f91ee652d3..6d9a6d3517cd 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -525,6 +525,7 @@ struct dasd_eckd_private { | |||
525 | int count; | 525 | int count; |
526 | 526 | ||
527 | u32 fcx_max_data; | 527 | u32 fcx_max_data; |
528 | char suc_reason; | ||
528 | }; | 529 | }; |
529 | 530 | ||
530 | 531 | ||
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *); | |||
534 | int dasd_alias_add_device(struct dasd_device *); | 535 | int dasd_alias_add_device(struct dasd_device *); |
535 | int dasd_alias_remove_device(struct dasd_device *); | 536 | int dasd_alias_remove_device(struct dasd_device *); |
536 | struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); | 537 | struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); |
537 | void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); | 538 | void dasd_alias_handle_summary_unit_check(struct work_struct *); |
538 | void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); | 539 | void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); |
539 | void dasd_alias_lcu_setup_complete(struct dasd_device *); | 540 | void dasd_alias_lcu_setup_complete(struct dasd_device *); |
540 | void dasd_alias_wait_for_lcu_setup(struct dasd_device *); | 541 | void dasd_alias_wait_for_lcu_setup(struct dasd_device *); |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 8de29be32a56..0f0add932e7a 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -470,6 +470,7 @@ struct dasd_device { | |||
470 | struct work_struct restore_device; | 470 | struct work_struct restore_device; |
471 | struct work_struct reload_device; | 471 | struct work_struct reload_device; |
472 | struct work_struct kick_validate; | 472 | struct work_struct kick_validate; |
473 | struct work_struct suc_work; | ||
473 | struct timer_list timer; | 474 | struct timer_list timer; |
474 | 475 | ||
475 | debug_info_t *debug_area; | 476 | debug_info_t *debug_area; |
@@ -542,6 +543,7 @@ struct dasd_attention_data { | |||
542 | #define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ | 543 | #define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ |
543 | #define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ | 544 | #define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ |
544 | #define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */ | 545 | #define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */ |
546 | #define DASD_FLAG_SUC 14 /* unhandled summary unit check */ | ||
545 | 547 | ||
546 | #define DASD_SLEEPON_START_TAG ((void *) 1) | 548 | #define DASD_SLEEPON_START_TAG ((void *) 1) |
547 | #define DASD_SLEEPON_END_TAG ((void *) 2) | 549 | #define DASD_SLEEPON_END_TAG ((void *) 2) |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index a24443ba59ea..97e5b69e0668 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl, | |||
779 | return 0; | 779 | return 0; |
780 | } | 780 | } |
781 | 781 | ||
782 | static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl) | ||
783 | { | ||
784 | struct iscsi_node_acl *acl = container_of(se_nacl, | ||
785 | struct iscsi_node_acl, se_node_acl); | ||
786 | |||
787 | configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group); | ||
788 | } | ||
789 | |||
790 | /* End items for lio_target_acl_cit */ | 782 | /* End items for lio_target_acl_cit */ |
791 | 783 | ||
792 | /* Start items for lio_target_tpg_attrib_cit */ | 784 | /* Start items for lio_target_tpg_attrib_cit */ |
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn( | |||
1247 | if (IS_ERR(tiqn)) | 1239 | if (IS_ERR(tiqn)) |
1248 | return ERR_CAST(tiqn); | 1240 | return ERR_CAST(tiqn); |
1249 | 1241 | ||
1242 | pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); | ||
1243 | pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:" | ||
1244 | " %s\n", name); | ||
1245 | return &tiqn->tiqn_wwn; | ||
1246 | } | ||
1247 | |||
1248 | static void lio_target_add_wwn_groups(struct se_wwn *wwn) | ||
1249 | { | ||
1250 | struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); | ||
1251 | |||
1250 | config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, | 1252 | config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, |
1251 | "iscsi_instance", &iscsi_stat_instance_cit); | 1253 | "iscsi_instance", &iscsi_stat_instance_cit); |
1252 | configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group, | 1254 | configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group, |
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn( | |||
1271 | "iscsi_logout_stats", &iscsi_stat_logout_cit); | 1273 | "iscsi_logout_stats", &iscsi_stat_logout_cit); |
1272 | configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, | 1274 | configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, |
1273 | &tiqn->tiqn_wwn.fabric_stat_group); | 1275 | &tiqn->tiqn_wwn.fabric_stat_group); |
1274 | |||
1275 | |||
1276 | pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); | ||
1277 | pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:" | ||
1278 | " %s\n", name); | ||
1279 | return &tiqn->tiqn_wwn; | ||
1280 | } | 1276 | } |
1281 | 1277 | ||
1282 | static void lio_target_call_coredeltiqn( | 1278 | static void lio_target_call_coredeltiqn( |
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn( | |||
1284 | { | 1280 | { |
1285 | struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); | 1281 | struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); |
1286 | 1282 | ||
1287 | configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group); | ||
1288 | |||
1289 | pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", | 1283 | pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", |
1290 | tiqn->tiqn); | 1284 | tiqn->tiqn); |
1291 | iscsit_del_tiqn(tiqn); | 1285 | iscsit_del_tiqn(tiqn); |
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = { | |||
1660 | .aborted_task = lio_aborted_task, | 1654 | .aborted_task = lio_aborted_task, |
1661 | .fabric_make_wwn = lio_target_call_coreaddtiqn, | 1655 | .fabric_make_wwn = lio_target_call_coreaddtiqn, |
1662 | .fabric_drop_wwn = lio_target_call_coredeltiqn, | 1656 | .fabric_drop_wwn = lio_target_call_coredeltiqn, |
1657 | .add_wwn_groups = lio_target_add_wwn_groups, | ||
1663 | .fabric_make_tpg = lio_target_tiqn_addtpg, | 1658 | .fabric_make_tpg = lio_target_tiqn_addtpg, |
1664 | .fabric_drop_tpg = lio_target_tiqn_deltpg, | 1659 | .fabric_drop_tpg = lio_target_tiqn_deltpg, |
1665 | .fabric_make_np = lio_target_call_addnptotpg, | 1660 | .fabric_make_np = lio_target_call_addnptotpg, |
1666 | .fabric_drop_np = lio_target_call_delnpfromtpg, | 1661 | .fabric_drop_np = lio_target_call_delnpfromtpg, |
1667 | .fabric_init_nodeacl = lio_target_init_nodeacl, | 1662 | .fabric_init_nodeacl = lio_target_init_nodeacl, |
1668 | .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl, | ||
1669 | 1663 | ||
1670 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, | 1664 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, |
1671 | .tfc_wwn_attrs = lio_target_wwn_attrs, | 1665 | .tfc_wwn_attrs = lio_target_wwn_attrs, |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 1bd5c72b663e..31a096aa16ab 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item) | |||
338 | { | 338 | { |
339 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | 339 | struct se_node_acl *se_nacl = container_of(to_config_group(item), |
340 | struct se_node_acl, acl_group); | 340 | struct se_node_acl, acl_group); |
341 | struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf; | ||
342 | 341 | ||
343 | if (tf->tf_ops->fabric_cleanup_nodeacl) | 342 | configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group); |
344 | tf->tf_ops->fabric_cleanup_nodeacl(se_nacl); | ||
345 | core_tpg_del_initiator_node_acl(se_nacl); | 343 | core_tpg_del_initiator_node_acl(se_nacl); |
346 | } | 344 | } |
347 | 345 | ||
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl( | |||
383 | if (IS_ERR(se_nacl)) | 381 | if (IS_ERR(se_nacl)) |
384 | return ERR_CAST(se_nacl); | 382 | return ERR_CAST(se_nacl); |
385 | 383 | ||
386 | if (tf->tf_ops->fabric_init_nodeacl) { | ||
387 | int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name); | ||
388 | if (ret) { | ||
389 | core_tpg_del_initiator_node_acl(se_nacl); | ||
390 | return ERR_PTR(ret); | ||
391 | } | ||
392 | } | ||
393 | |||
394 | config_group_init_type_name(&se_nacl->acl_group, name, | 384 | config_group_init_type_name(&se_nacl->acl_group, name, |
395 | &tf->tf_tpg_nacl_base_cit); | 385 | &tf->tf_tpg_nacl_base_cit); |
396 | 386 | ||
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl( | |||
414 | configfs_add_default_group(&se_nacl->acl_fabric_stat_group, | 404 | configfs_add_default_group(&se_nacl->acl_fabric_stat_group, |
415 | &se_nacl->acl_group); | 405 | &se_nacl->acl_group); |
416 | 406 | ||
407 | if (tf->tf_ops->fabric_init_nodeacl) { | ||
408 | int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name); | ||
409 | if (ret) { | ||
410 | configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group); | ||
411 | core_tpg_del_initiator_node_acl(se_nacl); | ||
412 | return ERR_PTR(ret); | ||
413 | } | ||
414 | } | ||
415 | |||
417 | return &se_nacl->acl_group; | 416 | return &se_nacl->acl_group; |
418 | } | 417 | } |
419 | 418 | ||
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item) | |||
892 | struct se_wwn, wwn_group); | 891 | struct se_wwn, wwn_group); |
893 | struct target_fabric_configfs *tf = wwn->wwn_tf; | 892 | struct target_fabric_configfs *tf = wwn->wwn_tf; |
894 | 893 | ||
894 | configfs_remove_default_groups(&wwn->fabric_stat_group); | ||
895 | tf->tf_ops->fabric_drop_wwn(wwn); | 895 | tf->tf_ops->fabric_drop_wwn(wwn); |
896 | } | 896 | } |
897 | 897 | ||
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn( | |||
945 | &tf->tf_wwn_fabric_stats_cit); | 945 | &tf->tf_wwn_fabric_stats_cit); |
946 | configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); | 946 | configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); |
947 | 947 | ||
948 | if (tf->tf_ops->add_wwn_groups) | ||
949 | tf->tf_ops->add_wwn_groups(wwn); | ||
948 | return &wwn->wwn_group; | 950 | return &wwn->wwn_group; |
949 | } | 951 | } |
950 | 952 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4b02591b0301..d01f89d130e0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/buffer_head.h> | 25 | #include <linux/buffer_head.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
28 | #include <linux/freezer.h> | ||
29 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
30 | #include <linux/migrate.h> | 29 | #include <linux/migrate.h> |
31 | #include <linux/ratelimit.h> | 30 | #include <linux/ratelimit.h> |
@@ -303,7 +302,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, | |||
303 | err = map_private_extent_buffer(buf, offset, 32, | 302 | err = map_private_extent_buffer(buf, offset, 32, |
304 | &kaddr, &map_start, &map_len); | 303 | &kaddr, &map_start, &map_len); |
305 | if (err) | 304 | if (err) |
306 | return 1; | 305 | return err; |
307 | cur_len = min(len, map_len - (offset - map_start)); | 306 | cur_len = min(len, map_len - (offset - map_start)); |
308 | crc = btrfs_csum_data(kaddr + offset - map_start, | 307 | crc = btrfs_csum_data(kaddr + offset - map_start, |
309 | crc, cur_len); | 308 | crc, cur_len); |
@@ -313,7 +312,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, | |||
313 | if (csum_size > sizeof(inline_result)) { | 312 | if (csum_size > sizeof(inline_result)) { |
314 | result = kzalloc(csum_size, GFP_NOFS); | 313 | result = kzalloc(csum_size, GFP_NOFS); |
315 | if (!result) | 314 | if (!result) |
316 | return 1; | 315 | return -ENOMEM; |
317 | } else { | 316 | } else { |
318 | result = (char *)&inline_result; | 317 | result = (char *)&inline_result; |
319 | } | 318 | } |
@@ -334,7 +333,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, | |||
334 | val, found, btrfs_header_level(buf)); | 333 | val, found, btrfs_header_level(buf)); |
335 | if (result != (char *)&inline_result) | 334 | if (result != (char *)&inline_result) |
336 | kfree(result); | 335 | kfree(result); |
337 | return 1; | 336 | return -EUCLEAN; |
338 | } | 337 | } |
339 | } else { | 338 | } else { |
340 | write_extent_buffer(buf, result, 0, csum_size); | 339 | write_extent_buffer(buf, result, 0, csum_size); |
@@ -513,11 +512,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) | |||
513 | eb = (struct extent_buffer *)page->private; | 512 | eb = (struct extent_buffer *)page->private; |
514 | if (page != eb->pages[0]) | 513 | if (page != eb->pages[0]) |
515 | return 0; | 514 | return 0; |
515 | |||
516 | found_start = btrfs_header_bytenr(eb); | 516 | found_start = btrfs_header_bytenr(eb); |
517 | if (WARN_ON(found_start != start || !PageUptodate(page))) | 517 | /* |
518 | return 0; | 518 | * Please do not consolidate these warnings into a single if. |
519 | csum_tree_block(fs_info, eb, 0); | 519 | * It is useful to know what went wrong. |
520 | return 0; | 520 | */ |
521 | if (WARN_ON(found_start != start)) | ||
522 | return -EUCLEAN; | ||
523 | if (WARN_ON(!PageUptodate(page))) | ||
524 | return -EUCLEAN; | ||
525 | |||
526 | ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, | ||
527 | btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); | ||
528 | |||
529 | return csum_tree_block(fs_info, eb, 0); | ||
521 | } | 530 | } |
522 | 531 | ||
523 | static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, | 532 | static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, |
@@ -661,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, | |||
661 | eb, found_level); | 670 | eb, found_level); |
662 | 671 | ||
663 | ret = csum_tree_block(fs_info, eb, 1); | 672 | ret = csum_tree_block(fs_info, eb, 1); |
664 | if (ret) { | 673 | if (ret) |
665 | ret = -EIO; | ||
666 | goto err; | 674 | goto err; |
667 | } | ||
668 | 675 | ||
669 | /* | 676 | /* |
670 | * If this is a leaf block and it is corrupt, set the corrupt bit so | 677 | * If this is a leaf block and it is corrupt, set the corrupt bit so |
@@ -1831,7 +1838,7 @@ static int cleaner_kthread(void *arg) | |||
1831 | */ | 1838 | */ |
1832 | btrfs_delete_unused_bgs(root->fs_info); | 1839 | btrfs_delete_unused_bgs(root->fs_info); |
1833 | sleep: | 1840 | sleep: |
1834 | if (!try_to_freeze() && !again) { | 1841 | if (!again) { |
1835 | set_current_state(TASK_INTERRUPTIBLE); | 1842 | set_current_state(TASK_INTERRUPTIBLE); |
1836 | if (!kthread_should_stop()) | 1843 | if (!kthread_should_stop()) |
1837 | schedule(); | 1844 | schedule(); |
@@ -1921,14 +1928,12 @@ sleep: | |||
1921 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, | 1928 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
1922 | &root->fs_info->fs_state))) | 1929 | &root->fs_info->fs_state))) |
1923 | btrfs_cleanup_transaction(root); | 1930 | btrfs_cleanup_transaction(root); |
1924 | if (!try_to_freeze()) { | 1931 | set_current_state(TASK_INTERRUPTIBLE); |
1925 | set_current_state(TASK_INTERRUPTIBLE); | 1932 | if (!kthread_should_stop() && |
1926 | if (!kthread_should_stop() && | 1933 | (!btrfs_transaction_blocked(root->fs_info) || |
1927 | (!btrfs_transaction_blocked(root->fs_info) || | 1934 | cannot_commit)) |
1928 | cannot_commit)) | 1935 | schedule_timeout(delay); |
1929 | schedule_timeout(delay); | 1936 | __set_current_state(TASK_RUNNING); |
1930 | __set_current_state(TASK_RUNNING); | ||
1931 | } | ||
1932 | } while (!kthread_should_stop()); | 1937 | } while (!kthread_should_stop()); |
1933 | return 0; | 1938 | return 0; |
1934 | } | 1939 | } |
diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 519112168a9e..1669f6291c95 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c | |||
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g, | |||
343 | struct dlm_cluster *cl = NULL; | 343 | struct dlm_cluster *cl = NULL; |
344 | struct dlm_spaces *sps = NULL; | 344 | struct dlm_spaces *sps = NULL; |
345 | struct dlm_comms *cms = NULL; | 345 | struct dlm_comms *cms = NULL; |
346 | void *gps = NULL; | ||
347 | 346 | ||
348 | cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); | 347 | cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); |
349 | sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); | 348 | sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); |
350 | cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); | 349 | cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); |
351 | 350 | ||
352 | if (!cl || !gps || !sps || !cms) | 351 | if (!cl || !sps || !cms) |
353 | goto fail; | 352 | goto fail; |
354 | 353 | ||
355 | config_group_init_type_name(&cl->group, name, &cluster_type); | 354 | config_group_init_type_name(&cl->group, name, &cluster_type); |
diff --git a/fs/namei.c b/fs/namei.c index 794f81dce766..1d9ca2d5dff6 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1740,15 +1740,17 @@ static int walk_component(struct nameidata *nd, int flags) | |||
1740 | nd->flags); | 1740 | nd->flags); |
1741 | if (IS_ERR(path.dentry)) | 1741 | if (IS_ERR(path.dentry)) |
1742 | return PTR_ERR(path.dentry); | 1742 | return PTR_ERR(path.dentry); |
1743 | if (unlikely(d_is_negative(path.dentry))) { | 1743 | |
1744 | dput(path.dentry); | ||
1745 | return -ENOENT; | ||
1746 | } | ||
1747 | path.mnt = nd->path.mnt; | 1744 | path.mnt = nd->path.mnt; |
1748 | err = follow_managed(&path, nd); | 1745 | err = follow_managed(&path, nd); |
1749 | if (unlikely(err < 0)) | 1746 | if (unlikely(err < 0)) |
1750 | return err; | 1747 | return err; |
1751 | 1748 | ||
1749 | if (unlikely(d_is_negative(path.dentry))) { | ||
1750 | path_to_nameidata(&path, nd); | ||
1751 | return -ENOENT; | ||
1752 | } | ||
1753 | |||
1752 | seq = 0; /* we are already out of RCU mode */ | 1754 | seq = 0; /* we are already out of RCU mode */ |
1753 | inode = d_backing_inode(path.dentry); | 1755 | inode = d_backing_inode(path.dentry); |
1754 | } | 1756 | } |
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c index f30b6ecacdd1..ba7dec40771e 100644 --- a/fs/orangefs/dir.c +++ b/fs/orangefs/dir.c | |||
@@ -235,7 +235,7 @@ get_new_buffer_index: | |||
235 | if (ret == -EIO && op_state_purged(new_op)) { | 235 | if (ret == -EIO && op_state_purged(new_op)) { |
236 | gossip_err("%s: Client is down. Aborting readdir call.\n", | 236 | gossip_err("%s: Client is down. Aborting readdir call.\n", |
237 | __func__); | 237 | __func__); |
238 | goto out_slot; | 238 | goto out_free_op; |
239 | } | 239 | } |
240 | 240 | ||
241 | if (ret < 0 || new_op->downcall.status != 0) { | 241 | if (ret < 0 || new_op->downcall.status != 0) { |
@@ -244,14 +244,14 @@ get_new_buffer_index: | |||
244 | new_op->downcall.status); | 244 | new_op->downcall.status); |
245 | if (ret >= 0) | 245 | if (ret >= 0) |
246 | ret = new_op->downcall.status; | 246 | ret = new_op->downcall.status; |
247 | goto out_slot; | 247 | goto out_free_op; |
248 | } | 248 | } |
249 | 249 | ||
250 | dents_buf = new_op->downcall.trailer_buf; | 250 | dents_buf = new_op->downcall.trailer_buf; |
251 | if (dents_buf == NULL) { | 251 | if (dents_buf == NULL) { |
252 | gossip_err("Invalid NULL buffer in readdir response\n"); | 252 | gossip_err("Invalid NULL buffer in readdir response\n"); |
253 | ret = -ENOMEM; | 253 | ret = -ENOMEM; |
254 | goto out_slot; | 254 | goto out_free_op; |
255 | } | 255 | } |
256 | 256 | ||
257 | bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size, | 257 | bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size, |
@@ -363,8 +363,6 @@ out_destroy_handle: | |||
363 | out_vfree: | 363 | out_vfree: |
364 | gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf); | 364 | gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf); |
365 | vfree(dents_buf); | 365 | vfree(dents_buf); |
366 | out_slot: | ||
367 | orangefs_readdir_index_put(buffer_index); | ||
368 | out_free_op: | 366 | out_free_op: |
369 | op_release(new_op); | 367 | op_release(new_op); |
370 | gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret); | 368 | gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret); |
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h index 45ce4ff4cbc7..50578a28bd9e 100644 --- a/fs/orangefs/protocol.h +++ b/fs/orangefs/protocol.h | |||
@@ -407,7 +407,7 @@ enum { | |||
407 | * space. Zero signifies the upstream version of the kernel module. | 407 | * space. Zero signifies the upstream version of the kernel module. |
408 | */ | 408 | */ |
409 | #define ORANGEFS_KERNEL_PROTO_VERSION 0 | 409 | #define ORANGEFS_KERNEL_PROTO_VERSION 0 |
410 | #define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904 | 410 | #define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903 |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl. | 413 | * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl. |
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index df4f369254c0..506c3531832e 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -559,25 +559,25 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
559 | #endif | 559 | #endif |
560 | 560 | ||
561 | /** | 561 | /** |
562 | * fetch_or - perform *ptr |= mask and return old value of *ptr | 562 | * atomic_fetch_or - perform *p |= mask and return old value of *p |
563 | * @ptr: pointer to value | 563 | * @p: pointer to atomic_t |
564 | * @mask: mask to OR on the value | 564 | * @mask: mask to OR on the atomic_t |
565 | * | ||
566 | * cmpxchg based fetch_or, macro so it works for different integer types | ||
567 | */ | 565 | */ |
568 | #ifndef fetch_or | 566 | #ifndef atomic_fetch_or |
569 | #define fetch_or(ptr, mask) \ | 567 | static inline int atomic_fetch_or(atomic_t *p, int mask) |
570 | ({ typeof(*(ptr)) __old, __val = *(ptr); \ | 568 | { |
571 | for (;;) { \ | 569 | int old, val = atomic_read(p); |
572 | __old = cmpxchg((ptr), __val, __val | (mask)); \ | 570 | |
573 | if (__old == __val) \ | 571 | for (;;) { |
574 | break; \ | 572 | old = atomic_cmpxchg(p, val, val | mask); |
575 | __val = __old; \ | 573 | if (old == val) |
576 | } \ | 574 | break; |
577 | __old; \ | 575 | val = old; |
578 | }) | 576 | } |
579 | #endif | ||
580 | 577 | ||
578 | return old; | ||
579 | } | ||
580 | #endif | ||
581 | 581 | ||
582 | #ifdef CONFIG_GENERIC_ATOMIC64 | 582 | #ifdef CONFIG_GENERIC_ATOMIC64 |
583 | #include <asm-generic/atomic64.h> | 583 | #include <asm-generic/atomic64.h> |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index f0ba9c2ec639..e3354b74286c 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #define PHY_ID_BCM7250 0xae025280 | 24 | #define PHY_ID_BCM7250 0xae025280 |
25 | #define PHY_ID_BCM7364 0xae025260 | 25 | #define PHY_ID_BCM7364 0xae025260 |
26 | #define PHY_ID_BCM7366 0x600d8490 | 26 | #define PHY_ID_BCM7366 0x600d8490 |
27 | #define PHY_ID_BCM7346 0x600d8650 | ||
28 | #define PHY_ID_BCM7362 0x600d84b0 | ||
27 | #define PHY_ID_BCM7425 0x600d86b0 | 29 | #define PHY_ID_BCM7425 0x600d86b0 |
28 | #define PHY_ID_BCM7429 0x600d8730 | 30 | #define PHY_ID_BCM7429 0x600d8730 |
29 | #define PHY_ID_BCM7435 0x600d8750 | 31 | #define PHY_ID_BCM7435 0x600d8750 |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 485fe5519448..d9d6a9d77489 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \ | |||
188 | } | 188 | } |
189 | 189 | ||
190 | #define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ | 190 | #define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ |
191 | static struct configfs_attribute _pfx##attr_##_name = { \ | 191 | static struct configfs_bin_attribute _pfx##attr_##_name = { \ |
192 | .cb_attr = { \ | 192 | .cb_attr = { \ |
193 | .ca_name = __stringify(_name), \ | 193 | .ca_name = __stringify(_name), \ |
194 | .ca_mode = S_IRUGO, \ | 194 | .ca_mode = S_IRUGO, \ |
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = { \ | |||
200 | } | 200 | } |
201 | 201 | ||
202 | #define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ | 202 | #define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ |
203 | static struct configfs_attribute _pfx##attr_##_name = { \ | 203 | static struct configfs_bin_attribute _pfx##attr_##_name = { \ |
204 | .cb_attr = { \ | 204 | .cb_attr = { \ |
205 | .ca_name = __stringify(_name), \ | 205 | .ca_name = __stringify(_name), \ |
206 | .ca_mode = S_IWUSR, \ | 206 | .ca_mode = S_IWUSR, \ |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 43aa1f8855c7..a51a5361695f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | |||
465 | void bpf_prog_destroy(struct bpf_prog *fp); | 465 | void bpf_prog_destroy(struct bpf_prog *fp); |
466 | 466 | ||
467 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 467 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
468 | int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, | ||
469 | bool locked); | ||
468 | int sk_attach_bpf(u32 ufd, struct sock *sk); | 470 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
469 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 471 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
470 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | 472 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); |
471 | int sk_detach_filter(struct sock *sk); | 473 | int sk_detach_filter(struct sock *sk); |
474 | int __sk_detach_filter(struct sock *sk, bool locked); | ||
475 | |||
472 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, | 476 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
473 | unsigned int len); | 477 | unsigned int len); |
474 | 478 | ||
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 79b0ef6aaa14..7008623e24b1 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | |||
127 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) | 127 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
128 | return __pmd_trans_huge_lock(pmd, vma); | 128 | return __pmd_trans_huge_lock(pmd, vma); |
129 | else | 129 | else |
130 | return false; | 130 | return NULL; |
131 | } | 131 | } |
132 | static inline int hpage_nr_pages(struct page *page) | 132 | static inline int hpage_nr_pages(struct page *page) |
133 | { | 133 | { |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 0e1f433cc4b7..f48b8a664b0f 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
@@ -234,6 +234,10 @@ struct ip_set { | |||
234 | spinlock_t lock; | 234 | spinlock_t lock; |
235 | /* References to the set */ | 235 | /* References to the set */ |
236 | u32 ref; | 236 | u32 ref; |
237 | /* References to the set for netlink events like dump, | ||
238 | * ref can be swapped out by ip_set_swap | ||
239 | */ | ||
240 | u32 ref_netlink; | ||
237 | /* The core set type */ | 241 | /* The core set type */ |
238 | struct ip_set_type *type; | 242 | struct ip_set_type *type; |
239 | /* The type variant doing the real job */ | 243 | /* The type variant doing the real job */ |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 3ec5309e29f3..ac6d872ce067 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h | |||
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | |||
42 | BUG(); | 42 | BUG(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, | ||
46 | size_t n) | ||
47 | { | ||
48 | BUG(); | ||
49 | return -EFAULT; | ||
50 | } | ||
51 | |||
45 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, | 52 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, |
46 | struct iov_iter *i) | 53 | struct iov_iter *i) |
47 | { | 54 | { |
@@ -66,14 +73,17 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) | |||
66 | #endif | 73 | #endif |
67 | 74 | ||
68 | /* | 75 | /* |
69 | * Architectures that define ARCH_HAS_PMEM_API must provide | 76 | * memcpy_from_pmem - read from persistent memory with error handling |
70 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), | 77 | * @dst: destination buffer |
71 | * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem() | 78 | * @src: source buffer |
72 | * and arch_has_wmb_pmem(). | 79 | * @size: transfer length |
80 | * | ||
81 | * Returns 0 on success negative error code on failure. | ||
73 | */ | 82 | */ |
74 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | 83 | static inline int memcpy_from_pmem(void *dst, void __pmem const *src, |
84 | size_t size) | ||
75 | { | 85 | { |
76 | memcpy(dst, (void __force const *) src, size); | 86 | return arch_memcpy_from_pmem(dst, src, size); |
77 | } | 87 | } |
78 | 88 | ||
79 | static inline bool arch_has_pmem_api(void) | 89 | static inline bool arch_has_pmem_api(void) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 60bba7e032dc..52c4847b05e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -720,7 +720,7 @@ struct signal_struct { | |||
720 | struct task_cputime cputime_expires; | 720 | struct task_cputime cputime_expires; |
721 | 721 | ||
722 | #ifdef CONFIG_NO_HZ_FULL | 722 | #ifdef CONFIG_NO_HZ_FULL |
723 | unsigned long tick_dep_mask; | 723 | atomic_t tick_dep_mask; |
724 | #endif | 724 | #endif |
725 | 725 | ||
726 | struct list_head cpu_timers[3]; | 726 | struct list_head cpu_timers[3]; |
@@ -1549,7 +1549,7 @@ struct task_struct { | |||
1549 | #endif | 1549 | #endif |
1550 | 1550 | ||
1551 | #ifdef CONFIG_NO_HZ_FULL | 1551 | #ifdef CONFIG_NO_HZ_FULL |
1552 | unsigned long tick_dep_mask; | 1552 | atomic_t tick_dep_mask; |
1553 | #endif | 1553 | #endif |
1554 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1554 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
1555 | u64 start_time; /* monotonic time in nsec */ | 1555 | u64 start_time; /* monotonic time in nsec */ |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 4bcf5a61aada..e6bc30a42a74 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -108,7 +108,6 @@ struct stmmac_axi { | |||
108 | }; | 108 | }; |
109 | 109 | ||
110 | struct plat_stmmacenet_data { | 110 | struct plat_stmmacenet_data { |
111 | char *phy_bus_name; | ||
112 | int bus_id; | 111 | int bus_id; |
113 | int phy_addr; | 112 | int phy_addr; |
114 | int interface; | 113 | int interface; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 685a51aa98cc..8ff6d40a294f 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -76,6 +76,7 @@ struct target_core_fabric_ops { | |||
76 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, | 76 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, |
77 | struct config_group *, const char *); | 77 | struct config_group *, const char *); |
78 | void (*fabric_drop_wwn)(struct se_wwn *); | 78 | void (*fabric_drop_wwn)(struct se_wwn *); |
79 | void (*add_wwn_groups)(struct se_wwn *); | ||
79 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, | 80 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, |
80 | struct config_group *, const char *); | 81 | struct config_group *, const char *); |
81 | void (*fabric_drop_tpg)(struct se_portal_group *); | 82 | void (*fabric_drop_tpg)(struct se_portal_group *); |
@@ -87,7 +88,6 @@ struct target_core_fabric_ops { | |||
87 | struct config_group *, const char *); | 88 | struct config_group *, const char *); |
88 | void (*fabric_drop_np)(struct se_tpg_np *); | 89 | void (*fabric_drop_np)(struct se_tpg_np *); |
89 | int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); | 90 | int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); |
90 | void (*fabric_cleanup_nodeacl)(struct se_node_acl *); | ||
91 | 91 | ||
92 | struct configfs_attribute **tfc_discovery_attrs; | 92 | struct configfs_attribute **tfc_discovery_attrs; |
93 | struct configfs_attribute **tfc_wwn_attrs; | 93 | struct configfs_attribute **tfc_wwn_attrs; |
diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h index 6fb644029c80..8738a78e6bf4 100644 --- a/include/trace/events/page_isolation.h +++ b/include/trace/events/page_isolation.h | |||
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated, | |||
29 | 29 | ||
30 | TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", | 30 | TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", |
31 | __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, | 31 | __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, |
32 | __entry->end_pfn == __entry->fin_pfn ? "success" : "fail") | 32 | __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail") |
33 | ); | 33 | ); |
34 | 34 | ||
35 | #endif /* _TRACE_PAGE_ISOLATION_H */ | 35 | #endif /* _TRACE_PAGE_ISOLATION_H */ |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 924f537183fd..23917bb47bf3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -375,6 +375,7 @@ struct bpf_tunnel_key { | |||
375 | }; | 375 | }; |
376 | __u8 tunnel_tos; | 376 | __u8 tunnel_tos; |
377 | __u8 tunnel_ttl; | 377 | __u8 tunnel_ttl; |
378 | __u16 tunnel_ext; | ||
378 | __u32 tunnel_label; | 379 | __u32 tunnel_label; |
379 | }; | 380 | }; |
380 | 381 | ||
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index aa9f10428743..621fa8ac4425 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h | |||
@@ -1 +1,5 @@ | |||
1 | #include <linux/compiler.h> | 1 | #include <linux/compiler.h> |
2 | |||
3 | #ifndef __always_inline | ||
4 | #define __always_inline inline | ||
5 | #endif | ||
diff --git a/init/Kconfig b/init/Kconfig index e0d26162432e..0dfd09d54c65 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH | |||
272 | See the man page for more details. | 272 | See the man page for more details. |
273 | 273 | ||
274 | config FHANDLE | 274 | config FHANDLE |
275 | bool "open by fhandle syscalls" | 275 | bool "open by fhandle syscalls" if EXPERT |
276 | select EXPORTFS | 276 | select EXPORTFS |
277 | default y | ||
277 | help | 278 | help |
278 | If you say Y here, a user level program will be able to map | 279 | If you say Y here, a user level program will be able to map |
279 | file names to handle and then later use the handle for | 280 | file names to handle and then later use the handle for |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2a2efe1bc76c..adc5e4bd74f8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -137,11 +137,13 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) | |||
137 | "map_type:\t%u\n" | 137 | "map_type:\t%u\n" |
138 | "key_size:\t%u\n" | 138 | "key_size:\t%u\n" |
139 | "value_size:\t%u\n" | 139 | "value_size:\t%u\n" |
140 | "max_entries:\t%u\n", | 140 | "max_entries:\t%u\n" |
141 | "map_flags:\t%#x\n", | ||
141 | map->map_type, | 142 | map->map_type, |
142 | map->key_size, | 143 | map->key_size, |
143 | map->value_size, | 144 | map->value_size, |
144 | map->max_entries); | 145 | map->max_entries, |
146 | map->map_flags); | ||
145 | } | 147 | } |
146 | #endif | 148 | #endif |
147 | 149 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index de24fbce5277..52bedc5a5aaa 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2417,14 +2417,24 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
2417 | cpuctx->task_ctx = NULL; | 2417 | cpuctx->task_ctx = NULL; |
2418 | } | 2418 | } |
2419 | 2419 | ||
2420 | is_active ^= ctx->is_active; /* changed bits */ | 2420 | /* |
2421 | 2421 | * Always update time if it was set; not only when it changes. | |
2422 | * Otherwise we can 'forget' to update time for any but the last | ||
2423 | * context we sched out. For example: | ||
2424 | * | ||
2425 | * ctx_sched_out(.event_type = EVENT_FLEXIBLE) | ||
2426 | * ctx_sched_out(.event_type = EVENT_PINNED) | ||
2427 | * | ||
2428 | * would only update time for the pinned events. | ||
2429 | */ | ||
2422 | if (is_active & EVENT_TIME) { | 2430 | if (is_active & EVENT_TIME) { |
2423 | /* update (and stop) ctx time */ | 2431 | /* update (and stop) ctx time */ |
2424 | update_context_time(ctx); | 2432 | update_context_time(ctx); |
2425 | update_cgrp_time_from_cpuctx(cpuctx); | 2433 | update_cgrp_time_from_cpuctx(cpuctx); |
2426 | } | 2434 | } |
2427 | 2435 | ||
2436 | is_active ^= ctx->is_active; /* changed bits */ | ||
2437 | |||
2428 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) | 2438 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) |
2429 | return; | 2439 | return; |
2430 | 2440 | ||
@@ -8532,6 +8542,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8532 | f_flags); | 8542 | f_flags); |
8533 | if (IS_ERR(event_file)) { | 8543 | if (IS_ERR(event_file)) { |
8534 | err = PTR_ERR(event_file); | 8544 | err = PTR_ERR(event_file); |
8545 | event_file = NULL; | ||
8535 | goto err_context; | 8546 | goto err_context; |
8536 | } | 8547 | } |
8537 | 8548 | ||
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 53ab2f85d77e..2324ba5310db 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -2000,6 +2000,77 @@ static inline int get_first_held_lock(struct task_struct *curr, | |||
2000 | } | 2000 | } |
2001 | 2001 | ||
2002 | /* | 2002 | /* |
2003 | * Returns the next chain_key iteration | ||
2004 | */ | ||
2005 | static u64 print_chain_key_iteration(int class_idx, u64 chain_key) | ||
2006 | { | ||
2007 | u64 new_chain_key = iterate_chain_key(chain_key, class_idx); | ||
2008 | |||
2009 | printk(" class_idx:%d -> chain_key:%016Lx", | ||
2010 | class_idx, | ||
2011 | (unsigned long long)new_chain_key); | ||
2012 | return new_chain_key; | ||
2013 | } | ||
2014 | |||
2015 | static void | ||
2016 | print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) | ||
2017 | { | ||
2018 | struct held_lock *hlock; | ||
2019 | u64 chain_key = 0; | ||
2020 | int depth = curr->lockdep_depth; | ||
2021 | int i; | ||
2022 | |||
2023 | printk("depth: %u\n", depth + 1); | ||
2024 | for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { | ||
2025 | hlock = curr->held_locks + i; | ||
2026 | chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); | ||
2027 | |||
2028 | print_lock(hlock); | ||
2029 | } | ||
2030 | |||
2031 | print_chain_key_iteration(hlock_next->class_idx, chain_key); | ||
2032 | print_lock(hlock_next); | ||
2033 | } | ||
2034 | |||
2035 | static void print_chain_keys_chain(struct lock_chain *chain) | ||
2036 | { | ||
2037 | int i; | ||
2038 | u64 chain_key = 0; | ||
2039 | int class_id; | ||
2040 | |||
2041 | printk("depth: %u\n", chain->depth); | ||
2042 | for (i = 0; i < chain->depth; i++) { | ||
2043 | class_id = chain_hlocks[chain->base + i]; | ||
2044 | chain_key = print_chain_key_iteration(class_id + 1, chain_key); | ||
2045 | |||
2046 | print_lock_name(lock_classes + class_id); | ||
2047 | printk("\n"); | ||
2048 | } | ||
2049 | } | ||
2050 | |||
2051 | static void print_collision(struct task_struct *curr, | ||
2052 | struct held_lock *hlock_next, | ||
2053 | struct lock_chain *chain) | ||
2054 | { | ||
2055 | printk("\n"); | ||
2056 | printk("======================\n"); | ||
2057 | printk("[chain_key collision ]\n"); | ||
2058 | print_kernel_ident(); | ||
2059 | printk("----------------------\n"); | ||
2060 | printk("%s/%d: ", current->comm, task_pid_nr(current)); | ||
2061 | printk("Hash chain already cached but the contents don't match!\n"); | ||
2062 | |||
2063 | printk("Held locks:"); | ||
2064 | print_chain_keys_held_locks(curr, hlock_next); | ||
2065 | |||
2066 | printk("Locks in cached chain:"); | ||
2067 | print_chain_keys_chain(chain); | ||
2068 | |||
2069 | printk("\nstack backtrace:\n"); | ||
2070 | dump_stack(); | ||
2071 | } | ||
2072 | |||
2073 | /* | ||
2003 | * Checks whether the chain and the current held locks are consistent | 2074 | * Checks whether the chain and the current held locks are consistent |
2004 | * in depth and also in content. If they are not it most likely means | 2075 | * in depth and also in content. If they are not it most likely means |
2005 | * that there was a collision during the calculation of the chain_key. | 2076 | * that there was a collision during the calculation of the chain_key. |
@@ -2014,14 +2085,18 @@ static int check_no_collision(struct task_struct *curr, | |||
2014 | 2085 | ||
2015 | i = get_first_held_lock(curr, hlock); | 2086 | i = get_first_held_lock(curr, hlock); |
2016 | 2087 | ||
2017 | if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) | 2088 | if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { |
2089 | print_collision(curr, hlock, chain); | ||
2018 | return 0; | 2090 | return 0; |
2091 | } | ||
2019 | 2092 | ||
2020 | for (j = 0; j < chain->depth - 1; j++, i++) { | 2093 | for (j = 0; j < chain->depth - 1; j++, i++) { |
2021 | id = curr->held_locks[i].class_idx - 1; | 2094 | id = curr->held_locks[i].class_idx - 1; |
2022 | 2095 | ||
2023 | if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) | 2096 | if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { |
2097 | print_collision(curr, hlock, chain); | ||
2024 | return 0; | 2098 | return 0; |
2099 | } | ||
2025 | } | 2100 | } |
2026 | #endif | 2101 | #endif |
2027 | return 1; | 2102 | return 1; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d8465eeab8b3..8b489fcac37b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -321,6 +321,24 @@ static inline void init_hrtick(void) | |||
321 | } | 321 | } |
322 | #endif /* CONFIG_SCHED_HRTICK */ | 322 | #endif /* CONFIG_SCHED_HRTICK */ |
323 | 323 | ||
324 | /* | ||
325 | * cmpxchg based fetch_or, macro so it works for different integer types | ||
326 | */ | ||
327 | #define fetch_or(ptr, mask) \ | ||
328 | ({ \ | ||
329 | typeof(ptr) _ptr = (ptr); \ | ||
330 | typeof(mask) _mask = (mask); \ | ||
331 | typeof(*_ptr) _old, _val = *_ptr; \ | ||
332 | \ | ||
333 | for (;;) { \ | ||
334 | _old = cmpxchg(_ptr, _val, _val | _mask); \ | ||
335 | if (_old == _val) \ | ||
336 | break; \ | ||
337 | _val = _old; \ | ||
338 | } \ | ||
339 | _old; \ | ||
340 | }) | ||
341 | |||
324 | #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) | 342 | #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) |
325 | /* | 343 | /* |
326 | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, | 344 | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 084b79f5917e..58e3310c9b21 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | |||
157 | cpumask_var_t tick_nohz_full_mask; | 157 | cpumask_var_t tick_nohz_full_mask; |
158 | cpumask_var_t housekeeping_mask; | 158 | cpumask_var_t housekeeping_mask; |
159 | bool tick_nohz_full_running; | 159 | bool tick_nohz_full_running; |
160 | static unsigned long tick_dep_mask; | 160 | static atomic_t tick_dep_mask; |
161 | 161 | ||
162 | static void trace_tick_dependency(unsigned long dep) | 162 | static bool check_tick_dependency(atomic_t *dep) |
163 | { | 163 | { |
164 | if (dep & TICK_DEP_MASK_POSIX_TIMER) { | 164 | int val = atomic_read(dep); |
165 | |||
166 | if (val & TICK_DEP_MASK_POSIX_TIMER) { | ||
165 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); | 167 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
166 | return; | 168 | return true; |
167 | } | 169 | } |
168 | 170 | ||
169 | if (dep & TICK_DEP_MASK_PERF_EVENTS) { | 171 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
170 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); | 172 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
171 | return; | 173 | return true; |
172 | } | 174 | } |
173 | 175 | ||
174 | if (dep & TICK_DEP_MASK_SCHED) { | 176 | if (val & TICK_DEP_MASK_SCHED) { |
175 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); | 177 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
176 | return; | 178 | return true; |
177 | } | 179 | } |
178 | 180 | ||
179 | if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE) | 181 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
180 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); | 182 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
183 | return true; | ||
184 | } | ||
185 | |||
186 | return false; | ||
181 | } | 187 | } |
182 | 188 | ||
183 | static bool can_stop_full_tick(struct tick_sched *ts) | 189 | static bool can_stop_full_tick(struct tick_sched *ts) |
184 | { | 190 | { |
185 | WARN_ON_ONCE(!irqs_disabled()); | 191 | WARN_ON_ONCE(!irqs_disabled()); |
186 | 192 | ||
187 | if (tick_dep_mask) { | 193 | if (check_tick_dependency(&tick_dep_mask)) |
188 | trace_tick_dependency(tick_dep_mask); | ||
189 | return false; | 194 | return false; |
190 | } | ||
191 | 195 | ||
192 | if (ts->tick_dep_mask) { | 196 | if (check_tick_dependency(&ts->tick_dep_mask)) |
193 | trace_tick_dependency(ts->tick_dep_mask); | ||
194 | return false; | 197 | return false; |
195 | } | ||
196 | 198 | ||
197 | if (current->tick_dep_mask) { | 199 | if (check_tick_dependency(¤t->tick_dep_mask)) |
198 | trace_tick_dependency(current->tick_dep_mask); | ||
199 | return false; | 200 | return false; |
200 | } | ||
201 | 201 | ||
202 | if (current->signal->tick_dep_mask) { | 202 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
203 | trace_tick_dependency(current->signal->tick_dep_mask); | ||
204 | return false; | 203 | return false; |
205 | } | ||
206 | 204 | ||
207 | return true; | 205 | return true; |
208 | } | 206 | } |
@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void) | |||
259 | preempt_enable(); | 257 | preempt_enable(); |
260 | } | 258 | } |
261 | 259 | ||
262 | static void tick_nohz_dep_set_all(unsigned long *dep, | 260 | static void tick_nohz_dep_set_all(atomic_t *dep, |
263 | enum tick_dep_bits bit) | 261 | enum tick_dep_bits bit) |
264 | { | 262 | { |
265 | unsigned long prev; | 263 | int prev; |
266 | 264 | ||
267 | prev = fetch_or(dep, BIT_MASK(bit)); | 265 | prev = atomic_fetch_or(dep, BIT(bit)); |
268 | if (!prev) | 266 | if (!prev) |
269 | tick_nohz_full_kick_all(); | 267 | tick_nohz_full_kick_all(); |
270 | } | 268 | } |
@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit) | |||
280 | 278 | ||
281 | void tick_nohz_dep_clear(enum tick_dep_bits bit) | 279 | void tick_nohz_dep_clear(enum tick_dep_bits bit) |
282 | { | 280 | { |
283 | clear_bit(bit, &tick_dep_mask); | 281 | atomic_andnot(BIT(bit), &tick_dep_mask); |
284 | } | 282 | } |
285 | 283 | ||
286 | /* | 284 | /* |
@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit) | |||
289 | */ | 287 | */ |
290 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) | 288 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) |
291 | { | 289 | { |
292 | unsigned long prev; | 290 | int prev; |
293 | struct tick_sched *ts; | 291 | struct tick_sched *ts; |
294 | 292 | ||
295 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); | 293 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
296 | 294 | ||
297 | prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit)); | 295 | prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit)); |
298 | if (!prev) { | 296 | if (!prev) { |
299 | preempt_disable(); | 297 | preempt_disable(); |
300 | /* Perf needs local kick that is NMI safe */ | 298 | /* Perf needs local kick that is NMI safe */ |
@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) | |||
313 | { | 311 | { |
314 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | 312 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
315 | 313 | ||
316 | clear_bit(bit, &ts->tick_dep_mask); | 314 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
317 | } | 315 | } |
318 | 316 | ||
319 | /* | 317 | /* |
@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) | |||
331 | 329 | ||
332 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) | 330 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) |
333 | { | 331 | { |
334 | clear_bit(bit, &tsk->tick_dep_mask); | 332 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
335 | } | 333 | } |
336 | 334 | ||
337 | /* | 335 | /* |
@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |||
345 | 343 | ||
346 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) | 344 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
347 | { | 345 | { |
348 | clear_bit(bit, &sig->tick_dep_mask); | 346 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
349 | } | 347 | } |
350 | 348 | ||
351 | /* | 349 | /* |
@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void) | |||
366 | ts = this_cpu_ptr(&tick_cpu_sched); | 364 | ts = this_cpu_ptr(&tick_cpu_sched); |
367 | 365 | ||
368 | if (ts->tick_stopped) { | 366 | if (ts->tick_stopped) { |
369 | if (current->tick_dep_mask || current->signal->tick_dep_mask) | 367 | if (atomic_read(¤t->tick_dep_mask) || |
368 | atomic_read(¤t->signal->tick_dep_mask)) | ||
370 | tick_nohz_full_kick(); | 369 | tick_nohz_full_kick(); |
371 | } | 370 | } |
372 | out: | 371 | out: |
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index eb4e32566a83..bf38226e5c17 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h | |||
@@ -60,7 +60,7 @@ struct tick_sched { | |||
60 | u64 next_timer; | 60 | u64 next_timer; |
61 | ktime_t idle_expires; | 61 | ktime_t idle_expires; |
62 | int do_timer_last; | 62 | int do_timer_last; |
63 | unsigned long tick_dep_mask; | 63 | atomic_t tick_dep_mask; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | extern struct tick_sched *tick_get_tick_sched(int cpu); | 66 | extern struct tick_sched *tick_get_tick_sched(int cpu); |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index acb3b6c4dd89..38f1dd79acdb 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object) | |||
498 | struct kasan_alloc_meta *alloc_info = | 498 | struct kasan_alloc_meta *alloc_info = |
499 | get_alloc_info(cache, object); | 499 | get_alloc_info(cache, object); |
500 | alloc_info->state = KASAN_STATE_FREE; | 500 | alloc_info->state = KASAN_STATE_FREE; |
501 | set_track(&free_info->track); | 501 | set_track(&free_info->track, GFP_NOWAIT); |
502 | } | 502 | } |
503 | #endif | 503 | #endif |
504 | 504 | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b34d279a7ee6..86349586eacb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -547,7 +547,11 @@ static int oom_reaper(void *unused) | |||
547 | 547 | ||
548 | static void wake_oom_reaper(struct task_struct *tsk) | 548 | static void wake_oom_reaper(struct task_struct *tsk) |
549 | { | 549 | { |
550 | if (!oom_reaper_th || tsk->oom_reaper_list) | 550 | if (!oom_reaper_th) |
551 | return; | ||
552 | |||
553 | /* tsk is already queued? */ | ||
554 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) | ||
551 | return; | 555 | return; |
552 | 556 | ||
553 | get_task_struct(tsk); | 557 | get_task_struct(tsk); |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 92c4c36501e7..c4f568206544 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
215 | * all pages in [start_pfn...end_pfn) must be in the same zone. | 215 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
216 | * zone->lock must be held before call this. | 216 | * zone->lock must be held before call this. |
217 | * | 217 | * |
218 | * Returns 1 if all pages in the range are isolated. | 218 | * Returns the last tested pfn. |
219 | */ | 219 | */ |
220 | static unsigned long | 220 | static unsigned long |
221 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, | 221 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, |
@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, | |||
289 | * now as a simple work-around, we use the next node for destination. | 289 | * now as a simple work-around, we use the next node for destination. |
290 | */ | 290 | */ |
291 | if (PageHuge(page)) { | 291 | if (PageHuge(page)) { |
292 | nodemask_t src = nodemask_of_node(page_to_nid(page)); | 292 | int node = next_online_node(page_to_nid(page)); |
293 | nodemask_t dst; | 293 | if (node == MAX_NUMNODES) |
294 | nodes_complement(dst, src); | 294 | node = first_online_node; |
295 | return alloc_huge_page_node(page_hstate(compound_head(page)), | 295 | return alloc_huge_page_node(page_hstate(compound_head(page)), |
296 | next_node(page_to_nid(page), dst)); | 296 | node); |
297 | } | 297 | } |
298 | 298 | ||
299 | if (PageHighMem(page)) | 299 | if (PageHighMem(page)) |
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) | |||
569 | } | 569 | } |
570 | 570 | ||
571 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | 571 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
572 | static void percpu_flush_tlb_batch_pages(void *data) | ||
573 | { | ||
574 | /* | ||
575 | * All TLB entries are flushed on the assumption that it is | ||
576 | * cheaper to flush all TLBs and let them be refilled than | ||
577 | * flushing individual PFNs. Note that we do not track mm's | ||
578 | * to flush as that might simply be multiple full TLB flushes | ||
579 | * for no gain. | ||
580 | */ | ||
581 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | ||
582 | flush_tlb_local(); | ||
583 | } | ||
584 | |||
585 | /* | 572 | /* |
586 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is | 573 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
587 | * important if a PTE was dirty when it was unmapped that it's flushed | 574 | * important if a PTE was dirty when it was unmapped that it's flushed |
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void) | |||
598 | 585 | ||
599 | cpu = get_cpu(); | 586 | cpu = get_cpu(); |
600 | 587 | ||
601 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); | 588 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { |
602 | 589 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | |
603 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) | 590 | local_flush_tlb(); |
604 | percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); | 591 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
605 | |||
606 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { | ||
607 | smp_call_function_many(&tlb_ubc->cpumask, | ||
608 | percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); | ||
609 | } | 592 | } |
593 | |||
594 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) | ||
595 | flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); | ||
610 | cpumask_clear(&tlb_ubc->cpumask); | 596 | cpumask_clear(&tlb_ubc->cpumask); |
611 | tlb_ubc->flush_required = false; | 597 | tlb_ubc->flush_required = false; |
612 | tlb_ubc->writable = false; | 598 | tlb_ubc->writable = false; |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index e23449094188..9cb7044d0801 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -582,7 +582,7 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time) | |||
582 | int err; | 582 | int err; |
583 | 583 | ||
584 | err = switchdev_port_attr_set(br->dev, &attr); | 584 | err = switchdev_port_attr_set(br->dev, &attr); |
585 | if (err) | 585 | if (err && err != -EOPNOTSUPP) |
586 | return err; | 586 | return err; |
587 | 587 | ||
588 | br->ageing_time = t; | 588 | br->ageing_time = t; |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 67b2e27999aa..8570bc7744c2 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1521,6 +1521,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
1521 | if (copy_from_user(&tmp, user, sizeof(tmp))) | 1521 | if (copy_from_user(&tmp, user, sizeof(tmp))) |
1522 | return -EFAULT; | 1522 | return -EFAULT; |
1523 | 1523 | ||
1524 | tmp.name[sizeof(tmp.name) - 1] = '\0'; | ||
1525 | |||
1524 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); | 1526 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); |
1525 | if (!t) | 1527 | if (!t) |
1526 | return ret; | 1528 | return ret; |
@@ -2332,6 +2334,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, | |||
2332 | if (copy_from_user(&tmp, user, sizeof(tmp))) | 2334 | if (copy_from_user(&tmp, user, sizeof(tmp))) |
2333 | return -EFAULT; | 2335 | return -EFAULT; |
2334 | 2336 | ||
2337 | tmp.name[sizeof(tmp.name) - 1] = '\0'; | ||
2338 | |||
2335 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); | 2339 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); |
2336 | if (!t) | 2340 | if (!t) |
2337 | return ret; | 2341 | return ret; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index adc8d7221dbb..77f7e7a9ebe1 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -40,7 +40,8 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | |||
40 | /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) | 40 | /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) |
41 | * or the bridge port (NF_BRIDGE PREROUTING). | 41 | * or the bridge port (NF_BRIDGE PREROUTING). |
42 | */ | 42 | */ |
43 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | 43 | static void nft_reject_br_send_v4_tcp_reset(struct net *net, |
44 | struct sk_buff *oldskb, | ||
44 | const struct net_device *dev, | 45 | const struct net_device *dev, |
45 | int hook) | 46 | int hook) |
46 | { | 47 | { |
@@ -48,7 +49,6 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | |||
48 | struct iphdr *niph; | 49 | struct iphdr *niph; |
49 | const struct tcphdr *oth; | 50 | const struct tcphdr *oth; |
50 | struct tcphdr _oth; | 51 | struct tcphdr _oth; |
51 | struct net *net = sock_net(oldskb->sk); | ||
52 | 52 | ||
53 | if (!nft_bridge_iphdr_validate(oldskb)) | 53 | if (!nft_bridge_iphdr_validate(oldskb)) |
54 | return; | 54 | return; |
@@ -75,7 +75,8 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | |||
75 | br_deliver(br_port_get_rcu(dev), nskb); | 75 | br_deliver(br_port_get_rcu(dev), nskb); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, | 78 | static void nft_reject_br_send_v4_unreach(struct net *net, |
79 | struct sk_buff *oldskb, | ||
79 | const struct net_device *dev, | 80 | const struct net_device *dev, |
80 | int hook, u8 code) | 81 | int hook, u8 code) |
81 | { | 82 | { |
@@ -86,7 +87,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, | |||
86 | void *payload; | 87 | void *payload; |
87 | __wsum csum; | 88 | __wsum csum; |
88 | u8 proto; | 89 | u8 proto; |
89 | struct net *net = sock_net(oldskb->sk); | ||
90 | 90 | ||
91 | if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) | 91 | if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) |
92 | return; | 92 | return; |
@@ -273,17 +273,17 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
273 | case htons(ETH_P_IP): | 273 | case htons(ETH_P_IP): |
274 | switch (priv->type) { | 274 | switch (priv->type) { |
275 | case NFT_REJECT_ICMP_UNREACH: | 275 | case NFT_REJECT_ICMP_UNREACH: |
276 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, | 276 | nft_reject_br_send_v4_unreach(pkt->net, pkt->skb, |
277 | pkt->hook, | 277 | pkt->in, pkt->hook, |
278 | priv->icmp_code); | 278 | priv->icmp_code); |
279 | break; | 279 | break; |
280 | case NFT_REJECT_TCP_RST: | 280 | case NFT_REJECT_TCP_RST: |
281 | nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in, | 281 | nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb, |
282 | pkt->hook); | 282 | pkt->in, pkt->hook); |
283 | break; | 283 | break; |
284 | case NFT_REJECT_ICMPX_UNREACH: | 284 | case NFT_REJECT_ICMPX_UNREACH: |
285 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, | 285 | nft_reject_br_send_v4_unreach(pkt->net, pkt->skb, |
286 | pkt->hook, | 286 | pkt->in, pkt->hook, |
287 | nft_reject_icmp_code(priv->icmp_code)); | 287 | nft_reject_icmp_code(priv->icmp_code)); |
288 | break; | 288 | break; |
289 | } | 289 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index b7177d01ecb0..ca7f832b2980 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1149,7 +1149,8 @@ void bpf_prog_destroy(struct bpf_prog *fp) | |||
1149 | } | 1149 | } |
1150 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); | 1150 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
1151 | 1151 | ||
1152 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) | 1152 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk, |
1153 | bool locked) | ||
1153 | { | 1154 | { |
1154 | struct sk_filter *fp, *old_fp; | 1155 | struct sk_filter *fp, *old_fp; |
1155 | 1156 | ||
@@ -1165,10 +1166,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) | |||
1165 | return -ENOMEM; | 1166 | return -ENOMEM; |
1166 | } | 1167 | } |
1167 | 1168 | ||
1168 | old_fp = rcu_dereference_protected(sk->sk_filter, | 1169 | old_fp = rcu_dereference_protected(sk->sk_filter, locked); |
1169 | sock_owned_by_user(sk)); | ||
1170 | rcu_assign_pointer(sk->sk_filter, fp); | 1170 | rcu_assign_pointer(sk->sk_filter, fp); |
1171 | |||
1172 | if (old_fp) | 1171 | if (old_fp) |
1173 | sk_filter_uncharge(sk, old_fp); | 1172 | sk_filter_uncharge(sk, old_fp); |
1174 | 1173 | ||
@@ -1247,7 +1246,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1247 | * occurs or there is insufficient memory for the filter a negative | 1246 | * occurs or there is insufficient memory for the filter a negative |
1248 | * errno code is returned. On success the return is zero. | 1247 | * errno code is returned. On success the return is zero. |
1249 | */ | 1248 | */ |
1250 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | 1249 | int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, |
1250 | bool locked) | ||
1251 | { | 1251 | { |
1252 | struct bpf_prog *prog = __get_filter(fprog, sk); | 1252 | struct bpf_prog *prog = __get_filter(fprog, sk); |
1253 | int err; | 1253 | int err; |
@@ -1255,7 +1255,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1255 | if (IS_ERR(prog)) | 1255 | if (IS_ERR(prog)) |
1256 | return PTR_ERR(prog); | 1256 | return PTR_ERR(prog); |
1257 | 1257 | ||
1258 | err = __sk_attach_prog(prog, sk); | 1258 | err = __sk_attach_prog(prog, sk, locked); |
1259 | if (err < 0) { | 1259 | if (err < 0) { |
1260 | __bpf_prog_release(prog); | 1260 | __bpf_prog_release(prog); |
1261 | return err; | 1261 | return err; |
@@ -1263,7 +1263,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1263 | 1263 | ||
1264 | return 0; | 1264 | return 0; |
1265 | } | 1265 | } |
1266 | EXPORT_SYMBOL_GPL(sk_attach_filter); | 1266 | EXPORT_SYMBOL_GPL(__sk_attach_filter); |
1267 | |||
1268 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | ||
1269 | { | ||
1270 | return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk)); | ||
1271 | } | ||
1267 | 1272 | ||
1268 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) | 1273 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
1269 | { | 1274 | { |
@@ -1309,7 +1314,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) | |||
1309 | if (IS_ERR(prog)) | 1314 | if (IS_ERR(prog)) |
1310 | return PTR_ERR(prog); | 1315 | return PTR_ERR(prog); |
1311 | 1316 | ||
1312 | err = __sk_attach_prog(prog, sk); | 1317 | err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk)); |
1313 | if (err < 0) { | 1318 | if (err < 0) { |
1314 | bpf_prog_put(prog); | 1319 | bpf_prog_put(prog); |
1315 | return err; | 1320 | return err; |
@@ -1764,6 +1769,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1764 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { | 1769 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
1765 | switch (size) { | 1770 | switch (size) { |
1766 | case offsetof(struct bpf_tunnel_key, tunnel_label): | 1771 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
1772 | case offsetof(struct bpf_tunnel_key, tunnel_ext): | ||
1767 | goto set_compat; | 1773 | goto set_compat; |
1768 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): | 1774 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
1769 | /* Fixup deprecated structure layouts here, so we have | 1775 | /* Fixup deprecated structure layouts here, so we have |
@@ -1849,6 +1855,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1849 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { | 1855 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
1850 | switch (size) { | 1856 | switch (size) { |
1851 | case offsetof(struct bpf_tunnel_key, tunnel_label): | 1857 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
1858 | case offsetof(struct bpf_tunnel_key, tunnel_ext): | ||
1852 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): | 1859 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
1853 | /* Fixup deprecated structure layouts here, so we have | 1860 | /* Fixup deprecated structure layouts here, so we have |
1854 | * a common path later on. | 1861 | * a common path later on. |
@@ -1861,7 +1868,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1861 | return -EINVAL; | 1868 | return -EINVAL; |
1862 | } | 1869 | } |
1863 | } | 1870 | } |
1864 | if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label)) | 1871 | if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || |
1872 | from->tunnel_ext)) | ||
1865 | return -EINVAL; | 1873 | return -EINVAL; |
1866 | 1874 | ||
1867 | skb_dst_drop(skb); | 1875 | skb_dst_drop(skb); |
@@ -2247,7 +2255,7 @@ static int __init register_sk_filter_ops(void) | |||
2247 | } | 2255 | } |
2248 | late_initcall(register_sk_filter_ops); | 2256 | late_initcall(register_sk_filter_ops); |
2249 | 2257 | ||
2250 | int sk_detach_filter(struct sock *sk) | 2258 | int __sk_detach_filter(struct sock *sk, bool locked) |
2251 | { | 2259 | { |
2252 | int ret = -ENOENT; | 2260 | int ret = -ENOENT; |
2253 | struct sk_filter *filter; | 2261 | struct sk_filter *filter; |
@@ -2255,8 +2263,7 @@ int sk_detach_filter(struct sock *sk) | |||
2255 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) | 2263 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
2256 | return -EPERM; | 2264 | return -EPERM; |
2257 | 2265 | ||
2258 | filter = rcu_dereference_protected(sk->sk_filter, | 2266 | filter = rcu_dereference_protected(sk->sk_filter, locked); |
2259 | sock_owned_by_user(sk)); | ||
2260 | if (filter) { | 2267 | if (filter) { |
2261 | RCU_INIT_POINTER(sk->sk_filter, NULL); | 2268 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
2262 | sk_filter_uncharge(sk, filter); | 2269 | sk_filter_uncharge(sk, filter); |
@@ -2265,7 +2272,12 @@ int sk_detach_filter(struct sock *sk) | |||
2265 | 2272 | ||
2266 | return ret; | 2273 | return ret; |
2267 | } | 2274 | } |
2268 | EXPORT_SYMBOL_GPL(sk_detach_filter); | 2275 | EXPORT_SYMBOL_GPL(__sk_detach_filter); |
2276 | |||
2277 | int sk_detach_filter(struct sock *sk) | ||
2278 | { | ||
2279 | return __sk_detach_filter(sk, sock_owned_by_user(sk)); | ||
2280 | } | ||
2269 | 2281 | ||
2270 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, | 2282 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, |
2271 | unsigned int len) | 2283 | unsigned int len) |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94acfc89ad97..a57bd17805b4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -603,7 +603,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
603 | const struct net_device_ops *ops; | 603 | const struct net_device_ops *ops; |
604 | int err; | 604 | int err; |
605 | 605 | ||
606 | np->dev = ndev; | ||
607 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); | 606 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
608 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); | 607 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
609 | 608 | ||
@@ -670,6 +669,7 @@ int netpoll_setup(struct netpoll *np) | |||
670 | goto unlock; | 669 | goto unlock; |
671 | } | 670 | } |
672 | dev_hold(ndev); | 671 | dev_hold(ndev); |
672 | np->dev = ndev; | ||
673 | 673 | ||
674 | if (netdev_master_upper_dev_get(ndev)) { | 674 | if (netdev_master_upper_dev_get(ndev)) { |
675 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); | 675 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); |
@@ -770,6 +770,7 @@ int netpoll_setup(struct netpoll *np) | |||
770 | return 0; | 770 | return 0; |
771 | 771 | ||
772 | put: | 772 | put: |
773 | np->dev = NULL; | ||
773 | dev_put(ndev); | 774 | dev_put(ndev); |
774 | unlock: | 775 | unlock: |
775 | rtnl_unlock(); | 776 | rtnl_unlock(); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f2066772d0f3..a75f7e94b445 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -909,6 +909,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, | |||
909 | + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ | 909 | + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ |
910 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ | 910 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ |
911 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ | 911 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ |
912 | + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ | ||
912 | + nla_total_size(1); /* IFLA_PROTO_DOWN */ | 913 | + nla_total_size(1); /* IFLA_PROTO_DOWN */ |
913 | 914 | ||
914 | } | 915 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index a0586b4a197d..5a94aea280d3 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -195,6 +195,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, | |||
195 | u8 proto = NAPI_GRO_CB(skb)->proto; | 195 | u8 proto = NAPI_GRO_CB(skb)->proto; |
196 | const struct net_offload **offloads; | 196 | const struct net_offload **offloads; |
197 | 197 | ||
198 | /* We can clear the encap_mark for FOU as we are essentially doing | ||
199 | * one of two possible things. We are either adding an L4 tunnel | ||
200 | * header to the outer L3 tunnel header, or we are are simply | ||
201 | * treating the GRE tunnel header as though it is a UDP protocol | ||
202 | * specific header such as VXLAN or GENEVE. | ||
203 | */ | ||
204 | NAPI_GRO_CB(skb)->encap_mark = 0; | ||
205 | |||
198 | rcu_read_lock(); | 206 | rcu_read_lock(); |
199 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 207 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
200 | ops = rcu_dereference(offloads[proto]); | 208 | ops = rcu_dereference(offloads[proto]); |
@@ -352,6 +360,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, | |||
352 | } | 360 | } |
353 | } | 361 | } |
354 | 362 | ||
363 | /* We can clear the encap_mark for GUE as we are essentially doing | ||
364 | * one of two possible things. We are either adding an L4 tunnel | ||
365 | * header to the outer L3 tunnel header, or we are are simply | ||
366 | * treating the GRE tunnel header as though it is a UDP protocol | ||
367 | * specific header such as VXLAN or GENEVE. | ||
368 | */ | ||
369 | NAPI_GRO_CB(skb)->encap_mark = 0; | ||
370 | |||
355 | rcu_read_lock(); | 371 | rcu_read_lock(); |
356 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 372 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
357 | ops = rcu_dereference(offloads[guehdr->proto_ctype]); | 373 | ops = rcu_dereference(offloads[guehdr->proto_ctype]); |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 02dd990af542..6165f30c4d72 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -372,8 +372,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, | |||
372 | if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || | 372 | if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || |
373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || | 373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || |
374 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || | 374 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || |
375 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || | 375 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || |
376 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || | 376 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) || |
377 | nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) | 377 | nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) |
378 | return -ENOMEM; | 378 | return -ENOMEM; |
379 | 379 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index bf081927e06b..4133b0f513af 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
359 | } | 359 | } |
360 | 360 | ||
361 | /* All zeroes == unconditional rule. */ | 361 | /* All zeroes == unconditional rule. */ |
362 | static inline bool unconditional(const struct arpt_arp *arp) | 362 | static inline bool unconditional(const struct arpt_entry *e) |
363 | { | 363 | { |
364 | static const struct arpt_arp uncond; | 364 | static const struct arpt_arp uncond; |
365 | 365 | ||
366 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; | 366 | return e->target_offset == sizeof(struct arpt_entry) && |
367 | memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; | ||
367 | } | 368 | } |
368 | 369 | ||
369 | /* Figures out from what hook each rule can be called: returns 0 if | 370 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo, | |||
402 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); | 403 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); |
403 | 404 | ||
404 | /* Unconditional return/END. */ | 405 | /* Unconditional return/END. */ |
405 | if ((e->target_offset == sizeof(struct arpt_entry) && | 406 | if ((unconditional(e) && |
406 | (strcmp(t->target.u.user.name, | 407 | (strcmp(t->target.u.user.name, |
407 | XT_STANDARD_TARGET) == 0) && | 408 | XT_STANDARD_TARGET) == 0) && |
408 | t->verdict < 0 && unconditional(&e->arp)) || | 409 | t->verdict < 0) || visited) { |
409 | visited) { | ||
410 | unsigned int oldpos, size; | 410 | unsigned int oldpos, size; |
411 | 411 | ||
412 | if ((strcmp(t->target.u.user.name, | 412 | if ((strcmp(t->target.u.user.name, |
@@ -474,14 +474,12 @@ next: | |||
474 | return 1; | 474 | return 1; |
475 | } | 475 | } |
476 | 476 | ||
477 | static inline int check_entry(const struct arpt_entry *e, const char *name) | 477 | static inline int check_entry(const struct arpt_entry *e) |
478 | { | 478 | { |
479 | const struct xt_entry_target *t; | 479 | const struct xt_entry_target *t; |
480 | 480 | ||
481 | if (!arp_checkentry(&e->arp)) { | 481 | if (!arp_checkentry(&e->arp)) |
482 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); | ||
483 | return -EINVAL; | 482 | return -EINVAL; |
484 | } | ||
485 | 483 | ||
486 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) | 484 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) |
487 | return -EINVAL; | 485 | return -EINVAL; |
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | |||
522 | struct xt_target *target; | 520 | struct xt_target *target; |
523 | int ret; | 521 | int ret; |
524 | 522 | ||
525 | ret = check_entry(e, name); | ||
526 | if (ret) | ||
527 | return ret; | ||
528 | |||
529 | e->counters.pcnt = xt_percpu_counter_alloc(); | 523 | e->counters.pcnt = xt_percpu_counter_alloc(); |
530 | if (IS_ERR_VALUE(e->counters.pcnt)) | 524 | if (IS_ERR_VALUE(e->counters.pcnt)) |
531 | return -ENOMEM; | 525 | return -ENOMEM; |
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e) | |||
557 | const struct xt_entry_target *t; | 551 | const struct xt_entry_target *t; |
558 | unsigned int verdict; | 552 | unsigned int verdict; |
559 | 553 | ||
560 | if (!unconditional(&e->arp)) | 554 | if (!unconditional(e)) |
561 | return false; | 555 | return false; |
562 | t = arpt_get_target_c(e); | 556 | t = arpt_get_target_c(e); |
563 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 557 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
576 | unsigned int valid_hooks) | 570 | unsigned int valid_hooks) |
577 | { | 571 | { |
578 | unsigned int h; | 572 | unsigned int h; |
573 | int err; | ||
579 | 574 | ||
580 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || | 575 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || |
581 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { | 576 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit || |
577 | (unsigned char *)e + e->next_offset > limit) { | ||
582 | duprintf("Bad offset %p\n", e); | 578 | duprintf("Bad offset %p\n", e); |
583 | return -EINVAL; | 579 | return -EINVAL; |
584 | } | 580 | } |
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
590 | return -EINVAL; | 586 | return -EINVAL; |
591 | } | 587 | } |
592 | 588 | ||
589 | err = check_entry(e); | ||
590 | if (err) | ||
591 | return err; | ||
592 | |||
593 | /* Check hooks & underflows */ | 593 | /* Check hooks & underflows */ |
594 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 594 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
595 | if (!(valid_hooks & (1 << h))) | 595 | if (!(valid_hooks & (1 << h))) |
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
598 | newinfo->hook_entry[h] = hook_entries[h]; | 598 | newinfo->hook_entry[h] = hook_entries[h]; |
599 | if ((unsigned char *)e - base == underflows[h]) { | 599 | if ((unsigned char *)e - base == underflows[h]) { |
600 | if (!check_underflow(e)) { | 600 | if (!check_underflow(e)) { |
601 | pr_err("Underflows must be unconditional and " | 601 | pr_debug("Underflows must be unconditional and " |
602 | "use the STANDARD target with " | 602 | "use the STANDARD target with " |
603 | "ACCEPT/DROP\n"); | 603 | "ACCEPT/DROP\n"); |
604 | return -EINVAL; | 604 | return -EINVAL; |
605 | } | 605 | } |
606 | newinfo->underflow[h] = underflows[h]; | 606 | newinfo->underflow[h] = underflows[h]; |
@@ -969,6 +969,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, | |||
969 | sizeof(struct arpt_get_entries) + get.size); | 969 | sizeof(struct arpt_get_entries) + get.size); |
970 | return -EINVAL; | 970 | return -EINVAL; |
971 | } | 971 | } |
972 | get.name[sizeof(get.name) - 1] = '\0'; | ||
972 | 973 | ||
973 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 974 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
974 | if (!IS_ERR_OR_NULL(t)) { | 975 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1233,7 +1234,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | |||
1233 | 1234 | ||
1234 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1235 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1235 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || | 1236 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || |
1236 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { | 1237 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || |
1238 | (unsigned char *)e + e->next_offset > limit) { | ||
1237 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1239 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1238 | return -EINVAL; | 1240 | return -EINVAL; |
1239 | } | 1241 | } |
@@ -1246,7 +1248,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | |||
1246 | } | 1248 | } |
1247 | 1249 | ||
1248 | /* For purposes of check_entry casting the compat entry is fine */ | 1250 | /* For purposes of check_entry casting the compat entry is fine */ |
1249 | ret = check_entry((struct arpt_entry *)e, name); | 1251 | ret = check_entry((struct arpt_entry *)e); |
1250 | if (ret) | 1252 | if (ret) |
1251 | return ret; | 1253 | return ret; |
1252 | 1254 | ||
@@ -1662,6 +1664,7 @@ static int compat_get_entries(struct net *net, | |||
1662 | *len, sizeof(get) + get.size); | 1664 | *len, sizeof(get) + get.size); |
1663 | return -EINVAL; | 1665 | return -EINVAL; |
1664 | } | 1666 | } |
1667 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1665 | 1668 | ||
1666 | xt_compat_lock(NFPROTO_ARP); | 1669 | xt_compat_lock(NFPROTO_ARP); |
1667 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 1670 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index e53f8d6f326d..631c100a1338 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset) | |||
168 | 168 | ||
169 | /* All zeroes == unconditional rule. */ | 169 | /* All zeroes == unconditional rule. */ |
170 | /* Mildly perf critical (only if packet tracing is on) */ | 170 | /* Mildly perf critical (only if packet tracing is on) */ |
171 | static inline bool unconditional(const struct ipt_ip *ip) | 171 | static inline bool unconditional(const struct ipt_entry *e) |
172 | { | 172 | { |
173 | static const struct ipt_ip uncond; | 173 | static const struct ipt_ip uncond; |
174 | 174 | ||
175 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; | 175 | return e->target_offset == sizeof(struct ipt_entry) && |
176 | memcmp(&e->ip, &uncond, sizeof(uncond)) == 0; | ||
176 | #undef FWINV | 177 | #undef FWINV |
177 | } | 178 | } |
178 | 179 | ||
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, | |||
229 | } else if (s == e) { | 230 | } else if (s == e) { |
230 | (*rulenum)++; | 231 | (*rulenum)++; |
231 | 232 | ||
232 | if (s->target_offset == sizeof(struct ipt_entry) && | 233 | if (unconditional(s) && |
233 | strcmp(t->target.u.kernel.target->name, | 234 | strcmp(t->target.u.kernel.target->name, |
234 | XT_STANDARD_TARGET) == 0 && | 235 | XT_STANDARD_TARGET) == 0 && |
235 | t->verdict < 0 && | 236 | t->verdict < 0) { |
236 | unconditional(&s->ip)) { | ||
237 | /* Tail of chains: STANDARD target (return/policy) */ | 237 | /* Tail of chains: STANDARD target (return/policy) */ |
238 | *comment = *chainname == hookname | 238 | *comment = *chainname == hookname |
239 | ? comments[NF_IP_TRACE_COMMENT_POLICY] | 239 | ? comments[NF_IP_TRACE_COMMENT_POLICY] |
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
476 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 476 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
477 | 477 | ||
478 | /* Unconditional return/END. */ | 478 | /* Unconditional return/END. */ |
479 | if ((e->target_offset == sizeof(struct ipt_entry) && | 479 | if ((unconditional(e) && |
480 | (strcmp(t->target.u.user.name, | 480 | (strcmp(t->target.u.user.name, |
481 | XT_STANDARD_TARGET) == 0) && | 481 | XT_STANDARD_TARGET) == 0) && |
482 | t->verdict < 0 && unconditional(&e->ip)) || | 482 | t->verdict < 0) || visited) { |
483 | visited) { | ||
484 | unsigned int oldpos, size; | 483 | unsigned int oldpos, size; |
485 | 484 | ||
486 | if ((strcmp(t->target.u.user.name, | 485 | if ((strcmp(t->target.u.user.name, |
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) | |||
569 | } | 568 | } |
570 | 569 | ||
571 | static int | 570 | static int |
572 | check_entry(const struct ipt_entry *e, const char *name) | 571 | check_entry(const struct ipt_entry *e) |
573 | { | 572 | { |
574 | const struct xt_entry_target *t; | 573 | const struct xt_entry_target *t; |
575 | 574 | ||
576 | if (!ip_checkentry(&e->ip)) { | 575 | if (!ip_checkentry(&e->ip)) |
577 | duprintf("ip check failed %p %s.\n", e, name); | ||
578 | return -EINVAL; | 576 | return -EINVAL; |
579 | } | ||
580 | 577 | ||
581 | if (e->target_offset + sizeof(struct xt_entry_target) > | 578 | if (e->target_offset + sizeof(struct xt_entry_target) > |
582 | e->next_offset) | 579 | e->next_offset) |
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | |||
666 | struct xt_mtchk_param mtpar; | 663 | struct xt_mtchk_param mtpar; |
667 | struct xt_entry_match *ematch; | 664 | struct xt_entry_match *ematch; |
668 | 665 | ||
669 | ret = check_entry(e, name); | ||
670 | if (ret) | ||
671 | return ret; | ||
672 | |||
673 | e->counters.pcnt = xt_percpu_counter_alloc(); | 666 | e->counters.pcnt = xt_percpu_counter_alloc(); |
674 | if (IS_ERR_VALUE(e->counters.pcnt)) | 667 | if (IS_ERR_VALUE(e->counters.pcnt)) |
675 | return -ENOMEM; | 668 | return -ENOMEM; |
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e) | |||
721 | const struct xt_entry_target *t; | 714 | const struct xt_entry_target *t; |
722 | unsigned int verdict; | 715 | unsigned int verdict; |
723 | 716 | ||
724 | if (!unconditional(&e->ip)) | 717 | if (!unconditional(e)) |
725 | return false; | 718 | return false; |
726 | t = ipt_get_target_c(e); | 719 | t = ipt_get_target_c(e); |
727 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 720 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
741 | unsigned int valid_hooks) | 734 | unsigned int valid_hooks) |
742 | { | 735 | { |
743 | unsigned int h; | 736 | unsigned int h; |
737 | int err; | ||
744 | 738 | ||
745 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || | 739 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || |
746 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { | 740 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit || |
741 | (unsigned char *)e + e->next_offset > limit) { | ||
747 | duprintf("Bad offset %p\n", e); | 742 | duprintf("Bad offset %p\n", e); |
748 | return -EINVAL; | 743 | return -EINVAL; |
749 | } | 744 | } |
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
755 | return -EINVAL; | 750 | return -EINVAL; |
756 | } | 751 | } |
757 | 752 | ||
753 | err = check_entry(e); | ||
754 | if (err) | ||
755 | return err; | ||
756 | |||
758 | /* Check hooks & underflows */ | 757 | /* Check hooks & underflows */ |
759 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 758 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
760 | if (!(valid_hooks & (1 << h))) | 759 | if (!(valid_hooks & (1 << h))) |
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
763 | newinfo->hook_entry[h] = hook_entries[h]; | 762 | newinfo->hook_entry[h] = hook_entries[h]; |
764 | if ((unsigned char *)e - base == underflows[h]) { | 763 | if ((unsigned char *)e - base == underflows[h]) { |
765 | if (!check_underflow(e)) { | 764 | if (!check_underflow(e)) { |
766 | pr_err("Underflows must be unconditional and " | 765 | pr_debug("Underflows must be unconditional and " |
767 | "use the STANDARD target with " | 766 | "use the STANDARD target with " |
768 | "ACCEPT/DROP\n"); | 767 | "ACCEPT/DROP\n"); |
769 | return -EINVAL; | 768 | return -EINVAL; |
770 | } | 769 | } |
771 | newinfo->underflow[h] = underflows[h]; | 770 | newinfo->underflow[h] = underflows[h]; |
@@ -1157,6 +1156,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, | |||
1157 | *len, sizeof(get) + get.size); | 1156 | *len, sizeof(get) + get.size); |
1158 | return -EINVAL; | 1157 | return -EINVAL; |
1159 | } | 1158 | } |
1159 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1160 | 1160 | ||
1161 | t = xt_find_table_lock(net, AF_INET, get.name); | 1161 | t = xt_find_table_lock(net, AF_INET, get.name); |
1162 | if (!IS_ERR_OR_NULL(t)) { | 1162 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1493,7 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | |||
1493 | 1493 | ||
1494 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1494 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1495 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || | 1495 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || |
1496 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { | 1496 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || |
1497 | (unsigned char *)e + e->next_offset > limit) { | ||
1497 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1498 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1498 | return -EINVAL; | 1499 | return -EINVAL; |
1499 | } | 1500 | } |
@@ -1506,7 +1507,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | |||
1506 | } | 1507 | } |
1507 | 1508 | ||
1508 | /* For purposes of check_entry casting the compat entry is fine */ | 1509 | /* For purposes of check_entry casting the compat entry is fine */ |
1509 | ret = check_entry((struct ipt_entry *)e, name); | 1510 | ret = check_entry((struct ipt_entry *)e); |
1510 | if (ret) | 1511 | if (ret) |
1511 | return ret; | 1512 | return ret; |
1512 | 1513 | ||
@@ -1935,6 +1936,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, | |||
1935 | *len, sizeof(get) + get.size); | 1936 | *len, sizeof(get) + get.size); |
1936 | return -EINVAL; | 1937 | return -EINVAL; |
1937 | } | 1938 | } |
1939 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1938 | 1940 | ||
1939 | xt_compat_lock(AF_INET); | 1941 | xt_compat_lock(AF_INET); |
1940 | t = xt_find_table_lock(net, AF_INET, get.name); | 1942 | t = xt_find_table_lock(net, AF_INET, get.name); |
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 7b8fbb352877..db5b87509446 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -18,10 +18,10 @@ | |||
18 | #include <net/netfilter/nf_conntrack_synproxy.h> | 18 | #include <net/netfilter/nf_conntrack_synproxy.h> |
19 | 19 | ||
20 | static struct iphdr * | 20 | static struct iphdr * |
21 | synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr) | 21 | synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr, |
22 | __be32 daddr) | ||
22 | { | 23 | { |
23 | struct iphdr *iph; | 24 | struct iphdr *iph; |
24 | struct net *net = sock_net(skb->sk); | ||
25 | 25 | ||
26 | skb_reset_network_header(skb); | 26 | skb_reset_network_header(skb); |
27 | iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); | 27 | iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); |
@@ -40,14 +40,12 @@ synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | static void | 42 | static void |
43 | synproxy_send_tcp(const struct synproxy_net *snet, | 43 | synproxy_send_tcp(struct net *net, |
44 | const struct sk_buff *skb, struct sk_buff *nskb, | 44 | const struct sk_buff *skb, struct sk_buff *nskb, |
45 | struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, | 45 | struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, |
46 | struct iphdr *niph, struct tcphdr *nth, | 46 | struct iphdr *niph, struct tcphdr *nth, |
47 | unsigned int tcp_hdr_size) | 47 | unsigned int tcp_hdr_size) |
48 | { | 48 | { |
49 | struct net *net = nf_ct_net(snet->tmpl); | ||
50 | |||
51 | nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); | 49 | nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); |
52 | nskb->ip_summed = CHECKSUM_PARTIAL; | 50 | nskb->ip_summed = CHECKSUM_PARTIAL; |
53 | nskb->csum_start = (unsigned char *)nth - nskb->head; | 51 | nskb->csum_start = (unsigned char *)nth - nskb->head; |
@@ -72,7 +70,7 @@ free_nskb: | |||
72 | } | 70 | } |
73 | 71 | ||
74 | static void | 72 | static void |
75 | synproxy_send_client_synack(const struct synproxy_net *snet, | 73 | synproxy_send_client_synack(struct net *net, |
76 | const struct sk_buff *skb, const struct tcphdr *th, | 74 | const struct sk_buff *skb, const struct tcphdr *th, |
77 | const struct synproxy_options *opts) | 75 | const struct synproxy_options *opts) |
78 | { | 76 | { |
@@ -91,7 +89,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet, | |||
91 | return; | 89 | return; |
92 | skb_reserve(nskb, MAX_TCP_HEADER); | 90 | skb_reserve(nskb, MAX_TCP_HEADER); |
93 | 91 | ||
94 | niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); | 92 | niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr); |
95 | 93 | ||
96 | skb_reset_transport_header(nskb); | 94 | skb_reset_transport_header(nskb); |
97 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 95 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -109,15 +107,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet, | |||
109 | 107 | ||
110 | synproxy_build_options(nth, opts); | 108 | synproxy_build_options(nth, opts); |
111 | 109 | ||
112 | synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, | 110 | synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, |
113 | niph, nth, tcp_hdr_size); | 111 | niph, nth, tcp_hdr_size); |
114 | } | 112 | } |
115 | 113 | ||
116 | static void | 114 | static void |
117 | synproxy_send_server_syn(const struct synproxy_net *snet, | 115 | synproxy_send_server_syn(struct net *net, |
118 | const struct sk_buff *skb, const struct tcphdr *th, | 116 | const struct sk_buff *skb, const struct tcphdr *th, |
119 | const struct synproxy_options *opts, u32 recv_seq) | 117 | const struct synproxy_options *opts, u32 recv_seq) |
120 | { | 118 | { |
119 | struct synproxy_net *snet = synproxy_pernet(net); | ||
121 | struct sk_buff *nskb; | 120 | struct sk_buff *nskb; |
122 | struct iphdr *iph, *niph; | 121 | struct iphdr *iph, *niph; |
123 | struct tcphdr *nth; | 122 | struct tcphdr *nth; |
@@ -132,7 +131,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, | |||
132 | return; | 131 | return; |
133 | skb_reserve(nskb, MAX_TCP_HEADER); | 132 | skb_reserve(nskb, MAX_TCP_HEADER); |
134 | 133 | ||
135 | niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); | 134 | niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr); |
136 | 135 | ||
137 | skb_reset_transport_header(nskb); | 136 | skb_reset_transport_header(nskb); |
138 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 137 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -153,12 +152,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet, | |||
153 | 152 | ||
154 | synproxy_build_options(nth, opts); | 153 | synproxy_build_options(nth, opts); |
155 | 154 | ||
156 | synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, | 155 | synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, |
157 | niph, nth, tcp_hdr_size); | 156 | niph, nth, tcp_hdr_size); |
158 | } | 157 | } |
159 | 158 | ||
160 | static void | 159 | static void |
161 | synproxy_send_server_ack(const struct synproxy_net *snet, | 160 | synproxy_send_server_ack(struct net *net, |
162 | const struct ip_ct_tcp *state, | 161 | const struct ip_ct_tcp *state, |
163 | const struct sk_buff *skb, const struct tcphdr *th, | 162 | const struct sk_buff *skb, const struct tcphdr *th, |
164 | const struct synproxy_options *opts) | 163 | const struct synproxy_options *opts) |
@@ -177,7 +176,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, | |||
177 | return; | 176 | return; |
178 | skb_reserve(nskb, MAX_TCP_HEADER); | 177 | skb_reserve(nskb, MAX_TCP_HEADER); |
179 | 178 | ||
180 | niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); | 179 | niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr); |
181 | 180 | ||
182 | skb_reset_transport_header(nskb); | 181 | skb_reset_transport_header(nskb); |
183 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 182 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -193,11 +192,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet, | |||
193 | 192 | ||
194 | synproxy_build_options(nth, opts); | 193 | synproxy_build_options(nth, opts); |
195 | 194 | ||
196 | synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); | 195 | synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); |
197 | } | 196 | } |
198 | 197 | ||
199 | static void | 198 | static void |
200 | synproxy_send_client_ack(const struct synproxy_net *snet, | 199 | synproxy_send_client_ack(struct net *net, |
201 | const struct sk_buff *skb, const struct tcphdr *th, | 200 | const struct sk_buff *skb, const struct tcphdr *th, |
202 | const struct synproxy_options *opts) | 201 | const struct synproxy_options *opts) |
203 | { | 202 | { |
@@ -215,7 +214,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet, | |||
215 | return; | 214 | return; |
216 | skb_reserve(nskb, MAX_TCP_HEADER); | 215 | skb_reserve(nskb, MAX_TCP_HEADER); |
217 | 216 | ||
218 | niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); | 217 | niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr); |
219 | 218 | ||
220 | skb_reset_transport_header(nskb); | 219 | skb_reset_transport_header(nskb); |
221 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 220 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -231,15 +230,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet, | |||
231 | 230 | ||
232 | synproxy_build_options(nth, opts); | 231 | synproxy_build_options(nth, opts); |
233 | 232 | ||
234 | synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, | 233 | synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, |
235 | niph, nth, tcp_hdr_size); | 234 | niph, nth, tcp_hdr_size); |
236 | } | 235 | } |
237 | 236 | ||
238 | static bool | 237 | static bool |
239 | synproxy_recv_client_ack(const struct synproxy_net *snet, | 238 | synproxy_recv_client_ack(struct net *net, |
240 | const struct sk_buff *skb, const struct tcphdr *th, | 239 | const struct sk_buff *skb, const struct tcphdr *th, |
241 | struct synproxy_options *opts, u32 recv_seq) | 240 | struct synproxy_options *opts, u32 recv_seq) |
242 | { | 241 | { |
242 | struct synproxy_net *snet = synproxy_pernet(net); | ||
243 | int mss; | 243 | int mss; |
244 | 244 | ||
245 | mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); | 245 | mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); |
@@ -255,7 +255,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet, | |||
255 | if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) | 255 | if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) |
256 | synproxy_check_timestamp_cookie(opts); | 256 | synproxy_check_timestamp_cookie(opts); |
257 | 257 | ||
258 | synproxy_send_server_syn(snet, skb, th, opts, recv_seq); | 258 | synproxy_send_server_syn(net, skb, th, opts, recv_seq); |
259 | return true; | 259 | return true; |
260 | } | 260 | } |
261 | 261 | ||
@@ -263,7 +263,8 @@ static unsigned int | |||
263 | synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | 263 | synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) |
264 | { | 264 | { |
265 | const struct xt_synproxy_info *info = par->targinfo; | 265 | const struct xt_synproxy_info *info = par->targinfo; |
266 | struct synproxy_net *snet = synproxy_pernet(par->net); | 266 | struct net *net = par->net; |
267 | struct synproxy_net *snet = synproxy_pernet(net); | ||
267 | struct synproxy_options opts = {}; | 268 | struct synproxy_options opts = {}; |
268 | struct tcphdr *th, _th; | 269 | struct tcphdr *th, _th; |
269 | 270 | ||
@@ -292,12 +293,12 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
292 | XT_SYNPROXY_OPT_SACK_PERM | | 293 | XT_SYNPROXY_OPT_SACK_PERM | |
293 | XT_SYNPROXY_OPT_ECN); | 294 | XT_SYNPROXY_OPT_ECN); |
294 | 295 | ||
295 | synproxy_send_client_synack(snet, skb, th, &opts); | 296 | synproxy_send_client_synack(net, skb, th, &opts); |
296 | return NF_DROP; | 297 | return NF_DROP; |
297 | 298 | ||
298 | } else if (th->ack && !(th->fin || th->rst || th->syn)) { | 299 | } else if (th->ack && !(th->fin || th->rst || th->syn)) { |
299 | /* ACK from client */ | 300 | /* ACK from client */ |
300 | synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); | 301 | synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); |
301 | return NF_DROP; | 302 | return NF_DROP; |
302 | } | 303 | } |
303 | 304 | ||
@@ -308,7 +309,8 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
308 | struct sk_buff *skb, | 309 | struct sk_buff *skb, |
309 | const struct nf_hook_state *nhs) | 310 | const struct nf_hook_state *nhs) |
310 | { | 311 | { |
311 | struct synproxy_net *snet = synproxy_pernet(nhs->net); | 312 | struct net *net = nhs->net; |
313 | struct synproxy_net *snet = synproxy_pernet(net); | ||
312 | enum ip_conntrack_info ctinfo; | 314 | enum ip_conntrack_info ctinfo; |
313 | struct nf_conn *ct; | 315 | struct nf_conn *ct; |
314 | struct nf_conn_synproxy *synproxy; | 316 | struct nf_conn_synproxy *synproxy; |
@@ -365,7 +367,7 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
365 | * therefore we need to add 1 to make the SYN sequence | 367 | * therefore we need to add 1 to make the SYN sequence |
366 | * number match the one of first SYN. | 368 | * number match the one of first SYN. |
367 | */ | 369 | */ |
368 | if (synproxy_recv_client_ack(snet, skb, th, &opts, | 370 | if (synproxy_recv_client_ack(net, skb, th, &opts, |
369 | ntohl(th->seq) + 1)) | 371 | ntohl(th->seq) + 1)) |
370 | this_cpu_inc(snet->stats->cookie_retrans); | 372 | this_cpu_inc(snet->stats->cookie_retrans); |
371 | 373 | ||
@@ -391,12 +393,12 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
391 | XT_SYNPROXY_OPT_SACK_PERM); | 393 | XT_SYNPROXY_OPT_SACK_PERM); |
392 | 394 | ||
393 | swap(opts.tsval, opts.tsecr); | 395 | swap(opts.tsval, opts.tsecr); |
394 | synproxy_send_server_ack(snet, state, skb, th, &opts); | 396 | synproxy_send_server_ack(net, state, skb, th, &opts); |
395 | 397 | ||
396 | nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); | 398 | nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); |
397 | 399 | ||
398 | swap(opts.tsval, opts.tsecr); | 400 | swap(opts.tsval, opts.tsecr); |
399 | synproxy_send_client_ack(snet, skb, th, &opts); | 401 | synproxy_send_client_ack(net, skb, th, &opts); |
400 | 402 | ||
401 | consume_skb(skb); | 403 | consume_skb(skb); |
402 | return NF_STOLEN; | 404 | return NF_STOLEN; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 84f9baf7aee8..86b67b70b626 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset) | |||
198 | 198 | ||
199 | /* All zeroes == unconditional rule. */ | 199 | /* All zeroes == unconditional rule. */ |
200 | /* Mildly perf critical (only if packet tracing is on) */ | 200 | /* Mildly perf critical (only if packet tracing is on) */ |
201 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) | 201 | static inline bool unconditional(const struct ip6t_entry *e) |
202 | { | 202 | { |
203 | static const struct ip6t_ip6 uncond; | 203 | static const struct ip6t_ip6 uncond; |
204 | 204 | ||
205 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; | 205 | return e->target_offset == sizeof(struct ip6t_entry) && |
206 | memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0; | ||
206 | } | 207 | } |
207 | 208 | ||
208 | static inline const struct xt_entry_target * | 209 | static inline const struct xt_entry_target * |
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, | |||
258 | } else if (s == e) { | 259 | } else if (s == e) { |
259 | (*rulenum)++; | 260 | (*rulenum)++; |
260 | 261 | ||
261 | if (s->target_offset == sizeof(struct ip6t_entry) && | 262 | if (unconditional(s) && |
262 | strcmp(t->target.u.kernel.target->name, | 263 | strcmp(t->target.u.kernel.target->name, |
263 | XT_STANDARD_TARGET) == 0 && | 264 | XT_STANDARD_TARGET) == 0 && |
264 | t->verdict < 0 && | 265 | t->verdict < 0) { |
265 | unconditional(&s->ipv6)) { | ||
266 | /* Tail of chains: STANDARD target (return/policy) */ | 266 | /* Tail of chains: STANDARD target (return/policy) */ |
267 | *comment = *chainname == hookname | 267 | *comment = *chainname == hookname |
268 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] | 268 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] |
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
489 | 489 | ||
490 | /* Unconditional return/END. */ | 490 | /* Unconditional return/END. */ |
491 | if ((e->target_offset == sizeof(struct ip6t_entry) && | 491 | if ((unconditional(e) && |
492 | (strcmp(t->target.u.user.name, | 492 | (strcmp(t->target.u.user.name, |
493 | XT_STANDARD_TARGET) == 0) && | 493 | XT_STANDARD_TARGET) == 0) && |
494 | t->verdict < 0 && | 494 | t->verdict < 0) || visited) { |
495 | unconditional(&e->ipv6)) || visited) { | ||
496 | unsigned int oldpos, size; | 495 | unsigned int oldpos, size; |
497 | 496 | ||
498 | if ((strcmp(t->target.u.user.name, | 497 | if ((strcmp(t->target.u.user.name, |
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) | |||
581 | } | 580 | } |
582 | 581 | ||
583 | static int | 582 | static int |
584 | check_entry(const struct ip6t_entry *e, const char *name) | 583 | check_entry(const struct ip6t_entry *e) |
585 | { | 584 | { |
586 | const struct xt_entry_target *t; | 585 | const struct xt_entry_target *t; |
587 | 586 | ||
588 | if (!ip6_checkentry(&e->ipv6)) { | 587 | if (!ip6_checkentry(&e->ipv6)) |
589 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | ||
590 | return -EINVAL; | 588 | return -EINVAL; |
591 | } | ||
592 | 589 | ||
593 | if (e->target_offset + sizeof(struct xt_entry_target) > | 590 | if (e->target_offset + sizeof(struct xt_entry_target) > |
594 | e->next_offset) | 591 | e->next_offset) |
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | |||
679 | struct xt_mtchk_param mtpar; | 676 | struct xt_mtchk_param mtpar; |
680 | struct xt_entry_match *ematch; | 677 | struct xt_entry_match *ematch; |
681 | 678 | ||
682 | ret = check_entry(e, name); | ||
683 | if (ret) | ||
684 | return ret; | ||
685 | |||
686 | e->counters.pcnt = xt_percpu_counter_alloc(); | 679 | e->counters.pcnt = xt_percpu_counter_alloc(); |
687 | if (IS_ERR_VALUE(e->counters.pcnt)) | 680 | if (IS_ERR_VALUE(e->counters.pcnt)) |
688 | return -ENOMEM; | 681 | return -ENOMEM; |
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e) | |||
733 | const struct xt_entry_target *t; | 726 | const struct xt_entry_target *t; |
734 | unsigned int verdict; | 727 | unsigned int verdict; |
735 | 728 | ||
736 | if (!unconditional(&e->ipv6)) | 729 | if (!unconditional(e)) |
737 | return false; | 730 | return false; |
738 | t = ip6t_get_target_c(e); | 731 | t = ip6t_get_target_c(e); |
739 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 732 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
753 | unsigned int valid_hooks) | 746 | unsigned int valid_hooks) |
754 | { | 747 | { |
755 | unsigned int h; | 748 | unsigned int h; |
749 | int err; | ||
756 | 750 | ||
757 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || | 751 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || |
758 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { | 752 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || |
753 | (unsigned char *)e + e->next_offset > limit) { | ||
759 | duprintf("Bad offset %p\n", e); | 754 | duprintf("Bad offset %p\n", e); |
760 | return -EINVAL; | 755 | return -EINVAL; |
761 | } | 756 | } |
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
767 | return -EINVAL; | 762 | return -EINVAL; |
768 | } | 763 | } |
769 | 764 | ||
765 | err = check_entry(e); | ||
766 | if (err) | ||
767 | return err; | ||
768 | |||
770 | /* Check hooks & underflows */ | 769 | /* Check hooks & underflows */ |
771 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 770 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
772 | if (!(valid_hooks & (1 << h))) | 771 | if (!(valid_hooks & (1 << h))) |
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
775 | newinfo->hook_entry[h] = hook_entries[h]; | 774 | newinfo->hook_entry[h] = hook_entries[h]; |
776 | if ((unsigned char *)e - base == underflows[h]) { | 775 | if ((unsigned char *)e - base == underflows[h]) { |
777 | if (!check_underflow(e)) { | 776 | if (!check_underflow(e)) { |
778 | pr_err("Underflows must be unconditional and " | 777 | pr_debug("Underflows must be unconditional and " |
779 | "use the STANDARD target with " | 778 | "use the STANDARD target with " |
780 | "ACCEPT/DROP\n"); | 779 | "ACCEPT/DROP\n"); |
781 | return -EINVAL; | 780 | return -EINVAL; |
782 | } | 781 | } |
783 | newinfo->underflow[h] = underflows[h]; | 782 | newinfo->underflow[h] = underflows[h]; |
@@ -1169,6 +1168,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, | |||
1169 | *len, sizeof(get) + get.size); | 1168 | *len, sizeof(get) + get.size); |
1170 | return -EINVAL; | 1169 | return -EINVAL; |
1171 | } | 1170 | } |
1171 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1172 | 1172 | ||
1173 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1173 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1174 | if (!IS_ERR_OR_NULL(t)) { | 1174 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1505,7 +1505,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | |||
1505 | 1505 | ||
1506 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1506 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1507 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || | 1507 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || |
1508 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { | 1508 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || |
1509 | (unsigned char *)e + e->next_offset > limit) { | ||
1509 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1510 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1510 | return -EINVAL; | 1511 | return -EINVAL; |
1511 | } | 1512 | } |
@@ -1518,7 +1519,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | |||
1518 | } | 1519 | } |
1519 | 1520 | ||
1520 | /* For purposes of check_entry casting the compat entry is fine */ | 1521 | /* For purposes of check_entry casting the compat entry is fine */ |
1521 | ret = check_entry((struct ip6t_entry *)e, name); | 1522 | ret = check_entry((struct ip6t_entry *)e); |
1522 | if (ret) | 1523 | if (ret) |
1523 | return ret; | 1524 | return ret; |
1524 | 1525 | ||
@@ -1944,6 +1945,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, | |||
1944 | *len, sizeof(get) + get.size); | 1945 | *len, sizeof(get) + get.size); |
1945 | return -EINVAL; | 1946 | return -EINVAL; |
1946 | } | 1947 | } |
1948 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1947 | 1949 | ||
1948 | xt_compat_lock(AF_INET6); | 1950 | xt_compat_lock(AF_INET6); |
1949 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1951 | t = xt_find_table_lock(net, AF_INET6, get.name); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fd25e447a5fa..8125931106be 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -843,8 +843,8 @@ start_lookup: | |||
843 | flush_stack(stack, count, skb, count - 1); | 843 | flush_stack(stack, count, skb, count - 1); |
844 | } else { | 844 | } else { |
845 | if (!inner_flushed) | 845 | if (!inner_flushed) |
846 | UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, | 846 | UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, |
847 | proto == IPPROTO_UDPLITE); | 847 | proto == IPPROTO_UDPLITE); |
848 | consume_skb(skb); | 848 | consume_skb(skb); |
849 | } | 849 | } |
850 | return 0; | 850 | return 0; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index b0bc475f641e..2e8e7e5fb4a6 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h | |||
@@ -95,7 +95,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) | |||
95 | if (!nested) | 95 | if (!nested) |
96 | goto nla_put_failure; | 96 | goto nla_put_failure; |
97 | if (mtype_do_head(skb, map) || | 97 | if (mtype_do_head(skb, map) || |
98 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 98 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
99 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) | 99 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) |
100 | goto nla_put_failure; | 100 | goto nla_put_failure; |
101 | if (unlikely(ip_set_put_flags(skb, set))) | 101 | if (unlikely(ip_set_put_flags(skb, set))) |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 7e6568cad494..a748b0c2c981 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -497,6 +497,26 @@ __ip_set_put(struct ip_set *set) | |||
497 | write_unlock_bh(&ip_set_ref_lock); | 497 | write_unlock_bh(&ip_set_ref_lock); |
498 | } | 498 | } |
499 | 499 | ||
500 | /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need | ||
501 | * a separate reference counter | ||
502 | */ | ||
503 | static inline void | ||
504 | __ip_set_get_netlink(struct ip_set *set) | ||
505 | { | ||
506 | write_lock_bh(&ip_set_ref_lock); | ||
507 | set->ref_netlink++; | ||
508 | write_unlock_bh(&ip_set_ref_lock); | ||
509 | } | ||
510 | |||
511 | static inline void | ||
512 | __ip_set_put_netlink(struct ip_set *set) | ||
513 | { | ||
514 | write_lock_bh(&ip_set_ref_lock); | ||
515 | BUG_ON(set->ref_netlink == 0); | ||
516 | set->ref_netlink--; | ||
517 | write_unlock_bh(&ip_set_ref_lock); | ||
518 | } | ||
519 | |||
500 | /* Add, del and test set entries from kernel. | 520 | /* Add, del and test set entries from kernel. |
501 | * | 521 | * |
502 | * The set behind the index must exist and must be referenced | 522 | * The set behind the index must exist and must be referenced |
@@ -1002,7 +1022,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl, | |||
1002 | if (!attr[IPSET_ATTR_SETNAME]) { | 1022 | if (!attr[IPSET_ATTR_SETNAME]) { |
1003 | for (i = 0; i < inst->ip_set_max; i++) { | 1023 | for (i = 0; i < inst->ip_set_max; i++) { |
1004 | s = ip_set(inst, i); | 1024 | s = ip_set(inst, i); |
1005 | if (s && s->ref) { | 1025 | if (s && (s->ref || s->ref_netlink)) { |
1006 | ret = -IPSET_ERR_BUSY; | 1026 | ret = -IPSET_ERR_BUSY; |
1007 | goto out; | 1027 | goto out; |
1008 | } | 1028 | } |
@@ -1024,7 +1044,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl, | |||
1024 | if (!s) { | 1044 | if (!s) { |
1025 | ret = -ENOENT; | 1045 | ret = -ENOENT; |
1026 | goto out; | 1046 | goto out; |
1027 | } else if (s->ref) { | 1047 | } else if (s->ref || s->ref_netlink) { |
1028 | ret = -IPSET_ERR_BUSY; | 1048 | ret = -IPSET_ERR_BUSY; |
1029 | goto out; | 1049 | goto out; |
1030 | } | 1050 | } |
@@ -1171,6 +1191,9 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb, | |||
1171 | from->family == to->family)) | 1191 | from->family == to->family)) |
1172 | return -IPSET_ERR_TYPE_MISMATCH; | 1192 | return -IPSET_ERR_TYPE_MISMATCH; |
1173 | 1193 | ||
1194 | if (from->ref_netlink || to->ref_netlink) | ||
1195 | return -EBUSY; | ||
1196 | |||
1174 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 1197 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
1175 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 1198 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
1176 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 1199 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
@@ -1206,7 +1229,7 @@ ip_set_dump_done(struct netlink_callback *cb) | |||
1206 | if (set->variant->uref) | 1229 | if (set->variant->uref) |
1207 | set->variant->uref(set, cb, false); | 1230 | set->variant->uref(set, cb, false); |
1208 | pr_debug("release set %s\n", set->name); | 1231 | pr_debug("release set %s\n", set->name); |
1209 | __ip_set_put_byindex(inst, index); | 1232 | __ip_set_put_netlink(set); |
1210 | } | 1233 | } |
1211 | return 0; | 1234 | return 0; |
1212 | } | 1235 | } |
@@ -1328,7 +1351,7 @@ dump_last: | |||
1328 | if (!cb->args[IPSET_CB_ARG0]) { | 1351 | if (!cb->args[IPSET_CB_ARG0]) { |
1329 | /* Start listing: make sure set won't be destroyed */ | 1352 | /* Start listing: make sure set won't be destroyed */ |
1330 | pr_debug("reference set\n"); | 1353 | pr_debug("reference set\n"); |
1331 | set->ref++; | 1354 | set->ref_netlink++; |
1332 | } | 1355 | } |
1333 | write_unlock_bh(&ip_set_ref_lock); | 1356 | write_unlock_bh(&ip_set_ref_lock); |
1334 | nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, | 1357 | nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, |
@@ -1396,7 +1419,7 @@ release_refcount: | |||
1396 | if (set->variant->uref) | 1419 | if (set->variant->uref) |
1397 | set->variant->uref(set, cb, false); | 1420 | set->variant->uref(set, cb, false); |
1398 | pr_debug("release set %s\n", set->name); | 1421 | pr_debug("release set %s\n", set->name); |
1399 | __ip_set_put_byindex(inst, index); | 1422 | __ip_set_put_netlink(set); |
1400 | cb->args[IPSET_CB_ARG0] = 0; | 1423 | cb->args[IPSET_CB_ARG0] = 0; |
1401 | } | 1424 | } |
1402 | out: | 1425 | out: |
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index e5336ab36d67..d32fd6b036bf 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
@@ -1082,7 +1082,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) | |||
1082 | if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) | 1082 | if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) |
1083 | goto nla_put_failure; | 1083 | goto nla_put_failure; |
1084 | #endif | 1084 | #endif |
1085 | if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 1085 | if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
1086 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) | 1086 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) |
1087 | goto nla_put_failure; | 1087 | goto nla_put_failure; |
1088 | if (unlikely(ip_set_put_flags(skb, set))) | 1088 | if (unlikely(ip_set_put_flags(skb, set))) |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 24c6c1962aea..a2a89e4e0a14 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
@@ -458,7 +458,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) | |||
458 | if (!nested) | 458 | if (!nested) |
459 | goto nla_put_failure; | 459 | goto nla_put_failure; |
460 | if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || | 460 | if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || |
461 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 461 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
462 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, | 462 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, |
463 | htonl(sizeof(*map) + n * set->dsize))) | 463 | htonl(sizeof(*map) + n * set->dsize))) |
464 | goto nla_put_failure; | 464 | goto nla_put_failure; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 75429997ed41..cb5b630a645b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -582,7 +582,12 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, | |||
582 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 582 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
583 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); | 583 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); |
584 | if (err < 0) { | 584 | if (err < 0) { |
585 | queue->queue_user_dropped++; | 585 | if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { |
586 | failopen = 1; | ||
587 | err = 0; | ||
588 | } else { | ||
589 | queue->queue_user_dropped++; | ||
590 | } | ||
586 | goto err_out_unlock; | 591 | goto err_out_unlock; |
587 | } | 592 | } |
588 | 593 | ||
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 234a73344c6e..ce947292ae77 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -7,7 +7,9 @@ config OPENVSWITCH | |||
7 | depends on INET | 7 | depends on INET |
8 | depends on !NF_CONNTRACK || \ | 8 | depends on !NF_CONNTRACK || \ |
9 | (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ | 9 | (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ |
10 | (!NF_NAT || NF_NAT))) | 10 | (!NF_NAT || NF_NAT) && \ |
11 | (!NF_NAT_IPV4 || NF_NAT_IPV4) && \ | ||
12 | (!NF_NAT_IPV6 || NF_NAT_IPV6))) | ||
11 | select LIBCRC32C | 13 | select LIBCRC32C |
12 | select MPLS | 14 | select MPLS |
13 | select NET_MPLS_GSO | 15 | select NET_MPLS_GSO |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index dc5eb29fe7d6..1b9d286756be 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -535,14 +535,15 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |||
535 | switch (ctinfo) { | 535 | switch (ctinfo) { |
536 | case IP_CT_RELATED: | 536 | case IP_CT_RELATED: |
537 | case IP_CT_RELATED_REPLY: | 537 | case IP_CT_RELATED_REPLY: |
538 | if (skb->protocol == htons(ETH_P_IP) && | 538 | if (IS_ENABLED(CONFIG_NF_NAT_IPV4) && |
539 | skb->protocol == htons(ETH_P_IP) && | ||
539 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { | 540 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
540 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, | 541 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, |
541 | hooknum)) | 542 | hooknum)) |
542 | err = NF_DROP; | 543 | err = NF_DROP; |
543 | goto push; | 544 | goto push; |
544 | #if IS_ENABLED(CONFIG_NF_NAT_IPV6) | 545 | } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) && |
545 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 546 | skb->protocol == htons(ETH_P_IPV6)) { |
546 | __be16 frag_off; | 547 | __be16 frag_off; |
547 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 548 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
548 | int hdrlen = ipv6_skip_exthdr(skb, | 549 | int hdrlen = ipv6_skip_exthdr(skb, |
@@ -557,7 +558,6 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |||
557 | err = NF_DROP; | 558 | err = NF_DROP; |
558 | goto push; | 559 | goto push; |
559 | } | 560 | } |
560 | #endif | ||
561 | } | 561 | } |
562 | /* Non-ICMP, fall thru to initialize if needed. */ | 562 | /* Non-ICMP, fall thru to initialize if needed. */ |
563 | case IP_CT_NEW: | 563 | case IP_CT_NEW: |
@@ -664,11 +664,12 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, | |||
664 | 664 | ||
665 | /* Determine NAT type. | 665 | /* Determine NAT type. |
666 | * Check if the NAT type can be deduced from the tracked connection. | 666 | * Check if the NAT type can be deduced from the tracked connection. |
667 | * Make sure expected traffic is NATted only when committing. | 667 | * Make sure new expected connections (IP_CT_RELATED) are NATted only |
668 | * when committing. | ||
668 | */ | 669 | */ |
669 | if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && | 670 | if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && |
670 | ct->status & IPS_NAT_MASK && | 671 | ct->status & IPS_NAT_MASK && |
671 | (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) { | 672 | (ctinfo != IP_CT_RELATED || info->commit)) { |
672 | /* NAT an established or related connection like before. */ | 673 | /* NAT an established or related connection like before. */ |
673 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) | 674 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) |
674 | /* This is the REPLY direction for a connection | 675 | /* This is the REPLY direction for a connection |
@@ -968,7 +969,8 @@ static int parse_nat(const struct nlattr *attr, | |||
968 | break; | 969 | break; |
969 | 970 | ||
970 | case OVS_NAT_ATTR_IP_MIN: | 971 | case OVS_NAT_ATTR_IP_MIN: |
971 | nla_memcpy(&info->range.min_addr, a, nla_len(a)); | 972 | nla_memcpy(&info->range.min_addr, a, |
973 | sizeof(info->range.min_addr)); | ||
972 | info->range.flags |= NF_NAT_RANGE_MAP_IPS; | 974 | info->range.flags |= NF_NAT_RANGE_MAP_IPS; |
973 | break; | 975 | break; |
974 | 976 | ||
@@ -1238,7 +1240,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1238 | } | 1240 | } |
1239 | 1241 | ||
1240 | if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { | 1242 | if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { |
1241 | if (info->family == NFPROTO_IPV4) { | 1243 | if (IS_ENABLED(CONFIG_NF_NAT_IPV4) && |
1244 | info->family == NFPROTO_IPV4) { | ||
1242 | if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, | 1245 | if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, |
1243 | info->range.min_addr.ip) || | 1246 | info->range.min_addr.ip) || |
1244 | (info->range.max_addr.ip | 1247 | (info->range.max_addr.ip |
@@ -1246,8 +1249,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1246 | (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, | 1249 | (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, |
1247 | info->range.max_addr.ip)))) | 1250 | info->range.max_addr.ip)))) |
1248 | return false; | 1251 | return false; |
1249 | #if IS_ENABLED(CONFIG_NF_NAT_IPV6) | 1252 | } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) && |
1250 | } else if (info->family == NFPROTO_IPV6) { | 1253 | info->family == NFPROTO_IPV6) { |
1251 | if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, | 1254 | if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, |
1252 | &info->range.min_addr.in6) || | 1255 | &info->range.min_addr.in6) || |
1253 | (memcmp(&info->range.max_addr.in6, | 1256 | (memcmp(&info->range.max_addr.in6, |
@@ -1256,7 +1259,6 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1256 | (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, | 1259 | (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, |
1257 | &info->range.max_addr.in6)))) | 1260 | &info->range.max_addr.in6)))) |
1258 | return false; | 1261 | return false; |
1259 | #endif | ||
1260 | } else { | 1262 | } else { |
1261 | return false; | 1263 | return false; |
1262 | } | 1264 | } |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 736c004abfbc..97745351d58c 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -401,7 +401,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
401 | sk = chunk->skb->sk; | 401 | sk = chunk->skb->sk; |
402 | 402 | ||
403 | /* Allocate the new skb. */ | 403 | /* Allocate the new skb. */ |
404 | nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); | 404 | nskb = alloc_skb(packet->size + MAX_HEADER, gfp); |
405 | if (!nskb) | 405 | if (!nskb) |
406 | goto nomem; | 406 | goto nomem; |
407 | 407 | ||
@@ -523,8 +523,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
523 | */ | 523 | */ |
524 | if (auth) | 524 | if (auth) |
525 | sctp_auth_calculate_hmac(asoc, nskb, | 525 | sctp_auth_calculate_hmac(asoc, nskb, |
526 | (struct sctp_auth_chunk *)auth, | 526 | (struct sctp_auth_chunk *)auth, |
527 | GFP_ATOMIC); | 527 | gfp); |
528 | 528 | ||
529 | /* 2) Calculate the Adler-32 checksum of the whole packet, | 529 | /* 2) Calculate the Adler-32 checksum of the whole packet, |
530 | * including the SCTP common header and all the | 530 | * including the SCTP common header and all the |
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 8b5833c1ff2e..2b9b98f1c2ff 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
@@ -1079,7 +1079,7 @@ nla_put_failure: | |||
1079 | * @filter_dev: filter device | 1079 | * @filter_dev: filter device |
1080 | * @idx: | 1080 | * @idx: |
1081 | * | 1081 | * |
1082 | * Delete FDB entry from switch device. | 1082 | * Dump FDB entries from switch device. |
1083 | */ | 1083 | */ |
1084 | int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, | 1084 | int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, |
1085 | struct net_device *dev, | 1085 | struct net_device *dev, |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index ad7f5b3f9b61..1c4ad477ce93 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
292 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; | 292 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; |
293 | 293 | ||
294 | skb_dst_force(skb); | 294 | skb_dst_force(skb); |
295 | dev_hold(skb->dev); | ||
295 | 296 | ||
296 | nexthdr = x->type->input(x, skb); | 297 | nexthdr = x->type->input(x, skb); |
297 | 298 | ||
298 | if (nexthdr == -EINPROGRESS) | 299 | if (nexthdr == -EINPROGRESS) |
299 | return 0; | 300 | return 0; |
300 | resume: | 301 | resume: |
302 | dev_put(skb->dev); | ||
303 | |||
301 | spin_lock(&x->lock); | 304 | spin_lock(&x->lock); |
302 | if (nexthdr <= 0) { | 305 | if (nexthdr <= 0) { |
303 | if (nexthdr == -EBADMSG) { | 306 | if (nexthdr == -EBADMSG) { |
diff --git a/sound/core/timer.c b/sound/core/timer.c index ea4d999113ef..6469bedda2f3 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c | |||
@@ -1019,8 +1019,8 @@ static int snd_timer_s_start(struct snd_timer * timer) | |||
1019 | njiff += timer->sticks - priv->correction; | 1019 | njiff += timer->sticks - priv->correction; |
1020 | priv->correction = 0; | 1020 | priv->correction = 0; |
1021 | } | 1021 | } |
1022 | priv->last_expires = priv->tlist.expires = njiff; | 1022 | priv->last_expires = njiff; |
1023 | add_timer(&priv->tlist); | 1023 | mod_timer(&priv->tlist, njiff); |
1024 | return 0; | 1024 | return 0; |
1025 | } | 1025 | } |
1026 | 1026 | ||
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index d0da2508823e..6800e0c5a38f 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c | |||
@@ -287,6 +287,18 @@ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops | |||
287 | } | 287 | } |
288 | EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier); | 288 | EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier); |
289 | 289 | ||
290 | /* check whether intel graphics is present */ | ||
291 | static bool i915_gfx_present(void) | ||
292 | { | ||
293 | static struct pci_device_id ids[] = { | ||
294 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID), | ||
295 | .class = PCI_BASE_CLASS_DISPLAY << 16, | ||
296 | .class_mask = 0xff << 16 }, | ||
297 | {} | ||
298 | }; | ||
299 | return pci_dev_present(ids); | ||
300 | } | ||
301 | |||
290 | /** | 302 | /** |
291 | * snd_hdac_i915_init - Initialize i915 audio component | 303 | * snd_hdac_i915_init - Initialize i915 audio component |
292 | * @bus: HDA core bus | 304 | * @bus: HDA core bus |
@@ -309,6 +321,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus) | |||
309 | if (WARN_ON(hdac_acomp)) | 321 | if (WARN_ON(hdac_acomp)) |
310 | return -EBUSY; | 322 | return -EBUSY; |
311 | 323 | ||
324 | if (!i915_gfx_present()) | ||
325 | return -ENODEV; | ||
326 | |||
312 | acomp = kzalloc(sizeof(*acomp), GFP_KERNEL); | 327 | acomp = kzalloc(sizeof(*acomp), GFP_KERNEL); |
313 | if (!acomp) | 328 | if (!acomp) |
314 | return -ENOMEM; | 329 | return -ENOMEM; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2624cfe98884..b680be0e937d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2361,6 +2361,10 @@ static const struct pci_device_id azx_ids[] = { | |||
2361 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2361 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2362 | { PCI_DEVICE(0x1002, 0xaae8), | 2362 | { PCI_DEVICE(0x1002, 0xaae8), |
2363 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2363 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2364 | { PCI_DEVICE(0x1002, 0xaae0), | ||
2365 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2366 | { PCI_DEVICE(0x1002, 0xaaf0), | ||
2367 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2364 | /* VIA VT8251/VT8237A */ | 2368 | /* VIA VT8251/VT8237A */ |
2365 | { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, | 2369 | { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, |
2366 | /* VIA GFX VT7122/VX900 */ | 2370 | /* VIA GFX VT7122/VX900 */ |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 73978c79981f..fefe83f2beab 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4759,6 +4759,7 @@ enum { | |||
4759 | ALC255_FIXUP_DELL_SPK_NOISE, | 4759 | ALC255_FIXUP_DELL_SPK_NOISE, |
4760 | ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, | 4760 | ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, |
4761 | ALC280_FIXUP_HP_HEADSET_MIC, | 4761 | ALC280_FIXUP_HP_HEADSET_MIC, |
4762 | ALC221_FIXUP_HP_FRONT_MIC, | ||
4762 | }; | 4763 | }; |
4763 | 4764 | ||
4764 | static const struct hda_fixup alc269_fixups[] = { | 4765 | static const struct hda_fixup alc269_fixups[] = { |
@@ -5401,6 +5402,13 @@ static const struct hda_fixup alc269_fixups[] = { | |||
5401 | .chained = true, | 5402 | .chained = true, |
5402 | .chain_id = ALC269_FIXUP_HEADSET_MIC, | 5403 | .chain_id = ALC269_FIXUP_HEADSET_MIC, |
5403 | }, | 5404 | }, |
5405 | [ALC221_FIXUP_HP_FRONT_MIC] = { | ||
5406 | .type = HDA_FIXUP_PINS, | ||
5407 | .v.pins = (const struct hda_pintbl[]) { | ||
5408 | { 0x19, 0x02a19020 }, /* Front Mic */ | ||
5409 | { } | ||
5410 | }, | ||
5411 | }, | ||
5404 | }; | 5412 | }; |
5405 | 5413 | ||
5406 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 5414 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
@@ -5506,6 +5514,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5506 | SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5514 | SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5507 | SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5515 | SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5508 | SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), | 5516 | SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), |
5517 | SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), | ||
5509 | SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), | 5518 | SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), |
5510 | SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5519 | SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5511 | SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5520 | SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index fb62bce2435c..24c7c2311b47 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip, | |||
150 | usb_audio_err(chip, "cannot memdup\n"); | 150 | usb_audio_err(chip, "cannot memdup\n"); |
151 | return -ENOMEM; | 151 | return -ENOMEM; |
152 | } | 152 | } |
153 | INIT_LIST_HEAD(&fp->list); | ||
153 | if (fp->nr_rates > MAX_NR_RATES) { | 154 | if (fp->nr_rates > MAX_NR_RATES) { |
154 | kfree(fp); | 155 | kfree(fp); |
155 | return -EINVAL; | 156 | return -EINVAL; |
@@ -193,6 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip, | |||
193 | return 0; | 194 | return 0; |
194 | 195 | ||
195 | error: | 196 | error: |
197 | list_del(&fp->list); /* unlink for avoiding double-free */ | ||
196 | kfree(fp); | 198 | kfree(fp); |
197 | kfree(rate_table); | 199 | kfree(rate_table); |
198 | return err; | 200 | return err; |
@@ -469,6 +471,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip, | |||
469 | fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; | 471 | fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; |
470 | fp->datainterval = 0; | 472 | fp->datainterval = 0; |
471 | fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); | 473 | fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); |
474 | INIT_LIST_HEAD(&fp->list); | ||
472 | 475 | ||
473 | switch (fp->maxpacksize) { | 476 | switch (fp->maxpacksize) { |
474 | case 0x120: | 477 | case 0x120: |
@@ -492,6 +495,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip, | |||
492 | ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; | 495 | ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; |
493 | err = snd_usb_add_audio_stream(chip, stream, fp); | 496 | err = snd_usb_add_audio_stream(chip, stream, fp); |
494 | if (err < 0) { | 497 | if (err < 0) { |
498 | list_del(&fp->list); /* unlink for avoiding double-free */ | ||
495 | kfree(fp); | 499 | kfree(fp); |
496 | return err; | 500 | return err; |
497 | } | 501 | } |
@@ -1133,6 +1137,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
1133 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ | 1137 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ |
1134 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1138 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
1135 | case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ | 1139 | case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ |
1140 | case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ | ||
1136 | case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ | 1141 | case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ |
1137 | return true; | 1142 | return true; |
1138 | } | 1143 | } |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 51258a15f653..6fe7f210bd4e 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
@@ -316,7 +316,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits, | |||
316 | /* | 316 | /* |
317 | * add this endpoint to the chip instance. | 317 | * add this endpoint to the chip instance. |
318 | * if a stream with the same endpoint already exists, append to it. | 318 | * if a stream with the same endpoint already exists, append to it. |
319 | * if not, create a new pcm stream. | 319 | * if not, create a new pcm stream. note, fp is added to the substream |
320 | * fmt_list and will be freed on the chip instance release. do not free | ||
321 | * fp or do remove it from the substream fmt_list to avoid double-free. | ||
320 | */ | 322 | */ |
321 | int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | 323 | int snd_usb_add_audio_stream(struct snd_usb_audio *chip, |
322 | int stream, | 324 | int stream, |
@@ -677,6 +679,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no) | |||
677 | * (fp->maxpacksize & 0x7ff); | 679 | * (fp->maxpacksize & 0x7ff); |
678 | fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no); | 680 | fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no); |
679 | fp->clock = clock; | 681 | fp->clock = clock; |
682 | INIT_LIST_HEAD(&fp->list); | ||
680 | 683 | ||
681 | /* some quirks for attributes here */ | 684 | /* some quirks for attributes here */ |
682 | 685 | ||
@@ -725,6 +728,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no) | |||
725 | dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint); | 728 | dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint); |
726 | err = snd_usb_add_audio_stream(chip, stream, fp); | 729 | err = snd_usb_add_audio_stream(chip, stream, fp); |
727 | if (err < 0) { | 730 | if (err < 0) { |
731 | list_del(&fp->list); /* unlink for avoiding double-free */ | ||
728 | kfree(fp->rate_table); | 732 | kfree(fp->rate_table); |
729 | kfree(fp->chmap); | 733 | kfree(fp->chmap); |
730 | kfree(fp); | 734 | kfree(fp); |
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh index 5334ad9d39b7..1069d96248c1 100755 --- a/tools/lib/lockdep/run_tests.sh +++ b/tools/lib/lockdep/run_tests.sh | |||
@@ -3,7 +3,7 @@ | |||
3 | make &> /dev/null | 3 | make &> /dev/null |
4 | 4 | ||
5 | for i in `ls tests/*.c`; do | 5 | for i in `ls tests/*.c`; do |
6 | testname=$(basename -s .c "$i") | 6 | testname=$(basename "$i" .c) |
7 | gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null | 7 | gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null |
8 | echo -ne "$testname... " | 8 | echo -ne "$testname... " |
9 | if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then | 9 | if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then |
@@ -11,11 +11,13 @@ for i in `ls tests/*.c`; do | |||
11 | else | 11 | else |
12 | echo "FAILED!" | 12 | echo "FAILED!" |
13 | fi | 13 | fi |
14 | rm tests/$testname | 14 | if [ -f "tests/$testname" ]; then |
15 | rm tests/$testname | ||
16 | fi | ||
15 | done | 17 | done |
16 | 18 | ||
17 | for i in `ls tests/*.c`; do | 19 | for i in `ls tests/*.c`; do |
18 | testname=$(basename -s .c "$i") | 20 | testname=$(basename "$i" .c) |
19 | gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null | 21 | gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null |
20 | echo -ne "(PRELOAD) $testname... " | 22 | echo -ne "(PRELOAD) $testname... " |
21 | if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then | 23 | if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then |
@@ -23,5 +25,7 @@ for i in `ls tests/*.c`; do | |||
23 | else | 25 | else |
24 | echo "FAILED!" | 26 | echo "FAILED!" |
25 | fi | 27 | fi |
26 | rm tests/$testname | 28 | if [ -f "tests/$testname" ]; then |
29 | rm tests/$testname | ||
30 | fi | ||
27 | done | 31 | done |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 2e1fa2357528..8c8c6b9ce915 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -74,6 +74,7 @@ arch/*/include/uapi/asm/unistd*.h | |||
74 | arch/*/include/uapi/asm/perf_regs.h | 74 | arch/*/include/uapi/asm/perf_regs.h |
75 | arch/*/lib/memcpy*.S | 75 | arch/*/lib/memcpy*.S |
76 | arch/*/lib/memset*.S | 76 | arch/*/lib/memset*.S |
77 | arch/*/include/asm/*features.h | ||
77 | include/linux/poison.h | 78 | include/linux/poison.h |
78 | include/linux/hw_breakpoint.h | 79 | include/linux/hw_breakpoint.h |
79 | include/uapi/linux/perf_event.h | 80 | include/uapi/linux/perf_event.h |
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c index 6138bdef6e63..f8ccee132867 100644 --- a/tools/perf/arch/powerpc/util/header.c +++ b/tools/perf/arch/powerpc/util/header.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <stdlib.h> | 4 | #include <stdlib.h> |
5 | #include <string.h> | 5 | #include <string.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include "header.h" | ||
8 | #include "util.h" | ||
7 | 9 | ||
8 | #define mfspr(rn) ({unsigned long rval; \ | 10 | #define mfspr(rn) ({unsigned long rval; \ |
9 | asm volatile("mfspr %0," __stringify(rn) \ | 11 | asm volatile("mfspr %0," __stringify(rn) \ |
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg index 238aa3927c71..f2d9c5fe58e0 100755 --- a/tools/perf/tests/perf-targz-src-pkg +++ b/tools/perf/tests/perf-targz-src-pkg | |||
@@ -15,7 +15,7 @@ TMP_DEST=$(mktemp -d) | |||
15 | tar xf ${TARBALL} -C $TMP_DEST | 15 | tar xf ${TARBALL} -C $TMP_DEST |
16 | rm -f ${TARBALL} | 16 | rm -f ${TARBALL} |
17 | cd - > /dev/null | 17 | cd - > /dev/null |
18 | make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1 | 18 | make -C $TMP_DEST/perf*/tools/perf > /dev/null |
19 | RC=$? | 19 | RC=$? |
20 | rm -rf ${TMP_DEST} | 20 | rm -rf ${TMP_DEST} |
21 | exit $RC | 21 | exit $RC |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 4b9816555946..2a83414159a6 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -337,7 +337,7 @@ static void callchain_node__init_have_children(struct callchain_node *node, | |||
337 | chain = list_entry(node->val.next, struct callchain_list, list); | 337 | chain = list_entry(node->val.next, struct callchain_list, list); |
338 | chain->has_children = has_sibling; | 338 | chain->has_children = has_sibling; |
339 | 339 | ||
340 | if (node->val.next != node->val.prev) { | 340 | if (!list_empty(&node->val)) { |
341 | chain = list_entry(node->val.prev, struct callchain_list, list); | 341 | chain = list_entry(node->val.prev, struct callchain_list, list); |
342 | chain->has_children = !RB_EMPTY_ROOT(&node->rb_root); | 342 | chain->has_children = !RB_EMPTY_ROOT(&node->rb_root); |
343 | } | 343 | } |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 52cf479bc593..dad55d04ffdd 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -56,13 +56,22 @@ const char *perf_event__name(unsigned int id) | |||
56 | return perf_event__names[id]; | 56 | return perf_event__names[id]; |
57 | } | 57 | } |
58 | 58 | ||
59 | static struct perf_sample synth_sample = { | 59 | static int perf_tool__process_synth_event(struct perf_tool *tool, |
60 | union perf_event *event, | ||
61 | struct machine *machine, | ||
62 | perf_event__handler_t process) | ||
63 | { | ||
64 | struct perf_sample synth_sample = { | ||
60 | .pid = -1, | 65 | .pid = -1, |
61 | .tid = -1, | 66 | .tid = -1, |
62 | .time = -1, | 67 | .time = -1, |
63 | .stream_id = -1, | 68 | .stream_id = -1, |
64 | .cpu = -1, | 69 | .cpu = -1, |
65 | .period = 1, | 70 | .period = 1, |
71 | .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, | ||
72 | }; | ||
73 | |||
74 | return process(tool, event, &synth_sample, machine); | ||
66 | }; | 75 | }; |
67 | 76 | ||
68 | /* | 77 | /* |
@@ -186,7 +195,7 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
186 | if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) | 195 | if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) |
187 | return -1; | 196 | return -1; |
188 | 197 | ||
189 | if (process(tool, event, &synth_sample, machine) != 0) | 198 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) |
190 | return -1; | 199 | return -1; |
191 | 200 | ||
192 | return tgid; | 201 | return tgid; |
@@ -218,7 +227,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool, | |||
218 | 227 | ||
219 | event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); | 228 | event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); |
220 | 229 | ||
221 | if (process(tool, event, &synth_sample, machine) != 0) | 230 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) |
222 | return -1; | 231 | return -1; |
223 | 232 | ||
224 | return 0; | 233 | return 0; |
@@ -344,7 +353,7 @@ out: | |||
344 | event->mmap2.pid = tgid; | 353 | event->mmap2.pid = tgid; |
345 | event->mmap2.tid = pid; | 354 | event->mmap2.tid = pid; |
346 | 355 | ||
347 | if (process(tool, event, &synth_sample, machine) != 0) { | 356 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { |
348 | rc = -1; | 357 | rc = -1; |
349 | break; | 358 | break; |
350 | } | 359 | } |
@@ -402,7 +411,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, | |||
402 | 411 | ||
403 | memcpy(event->mmap.filename, pos->dso->long_name, | 412 | memcpy(event->mmap.filename, pos->dso->long_name, |
404 | pos->dso->long_name_len + 1); | 413 | pos->dso->long_name_len + 1); |
405 | if (process(tool, event, &synth_sample, machine) != 0) { | 414 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { |
406 | rc = -1; | 415 | rc = -1; |
407 | break; | 416 | break; |
408 | } | 417 | } |
@@ -472,7 +481,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, | |||
472 | /* | 481 | /* |
473 | * Send the prepared comm event | 482 | * Send the prepared comm event |
474 | */ | 483 | */ |
475 | if (process(tool, comm_event, &synth_sample, machine) != 0) | 484 | if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) |
476 | break; | 485 | break; |
477 | 486 | ||
478 | rc = 0; | 487 | rc = 0; |
@@ -701,7 +710,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | |||
701 | event->mmap.len = map->end - event->mmap.start; | 710 | event->mmap.len = map->end - event->mmap.start; |
702 | event->mmap.pid = machine->pid; | 711 | event->mmap.pid = machine->pid; |
703 | 712 | ||
704 | err = process(tool, event, &synth_sample, machine); | 713 | err = perf_tool__process_synth_event(tool, event, machine, process); |
705 | free(event); | 714 | free(event); |
706 | 715 | ||
707 | return err; | 716 | return err; |
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index cd67e64a0494..2fbeb59c4bdd 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h | |||
@@ -9,36 +9,32 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent | |||
9 | 9 | ||
10 | #if defined(__arm__) | 10 | #if defined(__arm__) |
11 | #define GEN_ELF_ARCH EM_ARM | 11 | #define GEN_ELF_ARCH EM_ARM |
12 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
13 | #define GEN_ELF_CLASS ELFCLASS32 | 12 | #define GEN_ELF_CLASS ELFCLASS32 |
14 | #elif defined(__aarch64__) | 13 | #elif defined(__aarch64__) |
15 | #define GEN_ELF_ARCH EM_AARCH64 | 14 | #define GEN_ELF_ARCH EM_AARCH64 |
16 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
17 | #define GEN_ELF_CLASS ELFCLASS64 | 15 | #define GEN_ELF_CLASS ELFCLASS64 |
18 | #elif defined(__x86_64__) | 16 | #elif defined(__x86_64__) |
19 | #define GEN_ELF_ARCH EM_X86_64 | 17 | #define GEN_ELF_ARCH EM_X86_64 |
20 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
21 | #define GEN_ELF_CLASS ELFCLASS64 | 18 | #define GEN_ELF_CLASS ELFCLASS64 |
22 | #elif defined(__i386__) | 19 | #elif defined(__i386__) |
23 | #define GEN_ELF_ARCH EM_386 | 20 | #define GEN_ELF_ARCH EM_386 |
24 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
25 | #define GEN_ELF_CLASS ELFCLASS32 | 21 | #define GEN_ELF_CLASS ELFCLASS32 |
26 | #elif defined(__ppcle__) | 22 | #elif defined(__powerpc64__) |
27 | #define GEN_ELF_ARCH EM_PPC | ||
28 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
29 | #define GEN_ELF_CLASS ELFCLASS64 | ||
30 | #elif defined(__powerpc__) | ||
31 | #define GEN_ELF_ARCH EM_PPC64 | ||
32 | #define GEN_ELF_ENDIAN ELFDATA2MSB | ||
33 | #define GEN_ELF_CLASS ELFCLASS64 | ||
34 | #elif defined(__powerpcle__) | ||
35 | #define GEN_ELF_ARCH EM_PPC64 | 23 | #define GEN_ELF_ARCH EM_PPC64 |
36 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
37 | #define GEN_ELF_CLASS ELFCLASS64 | 24 | #define GEN_ELF_CLASS ELFCLASS64 |
25 | #elif defined(__powerpc__) | ||
26 | #define GEN_ELF_ARCH EM_PPC | ||
27 | #define GEN_ELF_CLASS ELFCLASS32 | ||
38 | #else | 28 | #else |
39 | #error "unsupported architecture" | 29 | #error "unsupported architecture" |
40 | #endif | 30 | #endif |
41 | 31 | ||
32 | #if __BYTE_ORDER == __BIG_ENDIAN | ||
33 | #define GEN_ELF_ENDIAN ELFDATA2MSB | ||
34 | #else | ||
35 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
36 | #endif | ||
37 | |||
42 | #if GEN_ELF_CLASS == ELFCLASS64 | 38 | #if GEN_ELF_CLASS == ELFCLASS64 |
43 | #define elf_newehdr elf64_newehdr | 39 | #define elf_newehdr elf64_newehdr |
44 | #define elf_getshdr elf64_getshdr | 40 | #define elf_getshdr elf64_getshdr |
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index 6bc3ecd2e7ca..abf1366e2a24 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c | |||
@@ -279,6 +279,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, | |||
279 | event.sample.header.misc = PERF_RECORD_MISC_USER; | 279 | event.sample.header.misc = PERF_RECORD_MISC_USER; |
280 | event.sample.header.size = sizeof(struct perf_event_header); | 280 | event.sample.header.size = sizeof(struct perf_event_header); |
281 | 281 | ||
282 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
282 | sample.ip = le64_to_cpu(branch->from); | 283 | sample.ip = le64_to_cpu(branch->from); |
283 | sample.pid = btsq->pid; | 284 | sample.pid = btsq->pid; |
284 | sample.tid = btsq->tid; | 285 | sample.tid = btsq->tid; |
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 05d815851be1..407f11b97c8d 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c | |||
@@ -979,6 +979,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) | |||
979 | if (!pt->timeless_decoding) | 979 | if (!pt->timeless_decoding) |
980 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 980 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
981 | 981 | ||
982 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
982 | sample.ip = ptq->state->from_ip; | 983 | sample.ip = ptq->state->from_ip; |
983 | sample.pid = ptq->pid; | 984 | sample.pid = ptq->pid; |
984 | sample.tid = ptq->tid; | 985 | sample.tid = ptq->tid; |
@@ -1035,6 +1036,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) | |||
1035 | if (!pt->timeless_decoding) | 1036 | if (!pt->timeless_decoding) |
1036 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 1037 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
1037 | 1038 | ||
1039 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
1038 | sample.ip = ptq->state->from_ip; | 1040 | sample.ip = ptq->state->from_ip; |
1039 | sample.pid = ptq->pid; | 1041 | sample.pid = ptq->pid; |
1040 | sample.tid = ptq->tid; | 1042 | sample.tid = ptq->tid; |
@@ -1092,6 +1094,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) | |||
1092 | if (!pt->timeless_decoding) | 1094 | if (!pt->timeless_decoding) |
1093 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 1095 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
1094 | 1096 | ||
1097 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
1095 | sample.ip = ptq->state->from_ip; | 1098 | sample.ip = ptq->state->from_ip; |
1096 | sample.pid = ptq->pid; | 1099 | sample.pid = ptq->pid; |
1097 | sample.tid = ptq->tid; | 1100 | sample.tid = ptq->tid; |
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index cd272cc21e05..ad0c0bb1fbc7 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c | |||
@@ -417,6 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) | |||
417 | * use first address as sample address | 417 | * use first address as sample address |
418 | */ | 418 | */ |
419 | memset(&sample, 0, sizeof(sample)); | 419 | memset(&sample, 0, sizeof(sample)); |
420 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
420 | sample.pid = pid; | 421 | sample.pid = pid; |
421 | sample.tid = tid; | 422 | sample.tid = tid; |
422 | sample.time = id->time; | 423 | sample.time = id->time; |
@@ -505,6 +506,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr) | |||
505 | * use first address as sample address | 506 | * use first address as sample address |
506 | */ | 507 | */ |
507 | memset(&sample, 0, sizeof(sample)); | 508 | memset(&sample, 0, sizeof(sample)); |
509 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
508 | sample.pid = pid; | 510 | sample.pid = pid; |
509 | sample.tid = tid; | 511 | sample.tid = tid; |
510 | sample.time = id->time; | 512 | sample.time = id->time; |