diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 20:34:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 20:34:10 -0400 |
commit | a379f71a30dddbd2e7393624e455ce53c87965d1 (patch) | |
tree | c9c71b3eb19ff7e8618ff29e9d5ac99882b823e1 | |
parent | de34f4da7f62ff59ac6e1ef320b0fcfa3296fce3 (diff) | |
parent | 9c5d760b8d229b94c5030863a5edaee5f1a9d7b7 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- a few block updates that fell in my lap
- lib/ updates
- checkpatch
- autofs
- ipc
- a ton of misc other things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits)
mm: split gfp_mask and mapping flags into separate fields
fs: use mapping_set_error instead of opencoded set_bit
treewide: remove redundant #include <linux/kconfig.h>
hung_task: allow hung_task_panic when hung_task_warnings is 0
kthread: add kerneldoc for kthread_create()
kthread: better support freezable kthread workers
kthread: allow to modify delayed kthread work
kthread: allow to cancel kthread work
kthread: initial support for delayed kthread work
kthread: detect when a kthread work is used by more workers
kthread: add kthread_destroy_worker()
kthread: add kthread_create_worker*()
kthread: allow to call __kthread_create_on_node() with va_list args
kthread/smpboot: do not park in kthread_create_on_cpu()
kthread: kthread worker API cleanup
kthread: rename probe_kthread_data() to kthread_probe_data()
scripts/tags.sh: enable code completion in VIM
mm: kmemleak: avoid using __va() on addresses that don't have a lowmem mapping
kdump, vmcoreinfo: report memory sections virtual addresses
ipc/sem.c: add cond_resched in exit_sme
...
200 files changed, 2300 insertions, 1061 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index 2d455a5cf671..98bf7ac29aad 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt | |||
@@ -126,3 +126,20 @@ means that we won't try quite as hard to get them. | |||
126 | 126 | ||
127 | NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM, | 127 | NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM, |
128 | though ARM64 patches will likely be posted soon. | 128 | though ARM64 patches will likely be posted soon. |
129 | |||
130 | DMA_ATTR_NO_WARN | ||
131 | ---------------- | ||
132 | |||
133 | This tells the DMA-mapping subsystem to suppress allocation failure reports | ||
134 | (similarly to __GFP_NOWARN). | ||
135 | |||
136 | On some architectures allocation failures are reported with error messages | ||
137 | to the system logs. Although this can help to identify and debug problems, | ||
138 | drivers which handle failures (eg, retry later) have no problems with them, | ||
139 | and can actually flood the system logs with error messages that aren't any | ||
140 | problem at all, depending on the implementation of the retry mechanism. | ||
141 | |||
142 | So, this provides a way for drivers to avoid those error messages on calls | ||
143 | where allocation failures are not a problem, and shouldn't bother the logs. | ||
144 | |||
145 | NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC. | ||
diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt index bf9061142827..238e9f61352f 100644 --- a/Documentation/RCU/lockdep-splat.txt +++ b/Documentation/RCU/lockdep-splat.txt | |||
@@ -57,7 +57,7 @@ Call Trace: | |||
57 | [<ffffffff817db154>] kernel_thread_helper+0x4/0x10 | 57 | [<ffffffff817db154>] kernel_thread_helper+0x4/0x10 |
58 | [<ffffffff81066430>] ? finish_task_switch+0x80/0x110 | 58 | [<ffffffff81066430>] ? finish_task_switch+0x80/0x110 |
59 | [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe | 59 | [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe |
60 | [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70 | 60 | [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70 |
61 | [<ffffffff817db150>] ? gs_change+0xb/0xb | 61 | [<ffffffff817db150>] ? gs_change+0xb/0xb |
62 | 62 | ||
63 | Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows: | 63 | Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows: |
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst index 1788722d5495..b2391b829169 100644 --- a/Documentation/dev-tools/kmemleak.rst +++ b/Documentation/dev-tools/kmemleak.rst | |||
@@ -162,6 +162,15 @@ See the include/linux/kmemleak.h header for the functions prototype. | |||
162 | - ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness | 162 | - ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness |
163 | - ``kmemleak_free_recursive`` - as kmemleak_free but checks the recursiveness | 163 | - ``kmemleak_free_recursive`` - as kmemleak_free but checks the recursiveness |
164 | 164 | ||
165 | The following functions take a physical address as the object pointer | ||
166 | and only perform the corresponding action if the address has a lowmem | ||
167 | mapping: | ||
168 | |||
169 | - ``kmemleak_alloc_phys`` | ||
170 | - ``kmemleak_free_part_phys`` | ||
171 | - ``kmemleak_not_leak_phys`` | ||
172 | - ``kmemleak_ignore_phys`` | ||
173 | |||
165 | Dealing with false positives/negatives | 174 | Dealing with false positives/negatives |
166 | -------------------------------------- | 175 | -------------------------------------- |
167 | 176 | ||
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt index aff22113a986..50a3e01a36f8 100644 --- a/Documentation/filesystems/autofs4-mount-control.txt +++ b/Documentation/filesystems/autofs4-mount-control.txt | |||
@@ -179,8 +179,19 @@ struct autofs_dev_ioctl { | |||
179 | * including this struct */ | 179 | * including this struct */ |
180 | __s32 ioctlfd; /* automount command fd */ | 180 | __s32 ioctlfd; /* automount command fd */ |
181 | 181 | ||
182 | __u32 arg1; /* Command parameters */ | 182 | union { |
183 | __u32 arg2; | 183 | struct args_protover protover; |
184 | struct args_protosubver protosubver; | ||
185 | struct args_openmount openmount; | ||
186 | struct args_ready ready; | ||
187 | struct args_fail fail; | ||
188 | struct args_setpipefd setpipefd; | ||
189 | struct args_timeout timeout; | ||
190 | struct args_requester requester; | ||
191 | struct args_expire expire; | ||
192 | struct args_askumount askumount; | ||
193 | struct args_ismountpoint ismountpoint; | ||
194 | }; | ||
184 | 195 | ||
185 | char path[0]; | 196 | char path[0]; |
186 | }; | 197 | }; |
@@ -192,8 +203,8 @@ optionally be used to check a specific mount corresponding to a given | |||
192 | mount point file descriptor, and when requesting the uid and gid of the | 203 | mount point file descriptor, and when requesting the uid and gid of the |
193 | last successful mount on a directory within the autofs file system. | 204 | last successful mount on a directory within the autofs file system. |
194 | 205 | ||
195 | The fields arg1 and arg2 are used to communicate parameters and results of | 206 | The union is used to communicate parameters and results of calls made |
196 | calls made as described below. | 207 | as described below. |
197 | 208 | ||
198 | The path field is used to pass a path where it is needed and the size field | 209 | The path field is used to pass a path where it is needed and the size field |
199 | is used account for the increased structure length when translating the | 210 | is used account for the increased structure length when translating the |
@@ -245,9 +256,9 @@ AUTOFS_DEV_IOCTL_PROTOVER_CMD and AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD | |||
245 | Get the major and minor version of the autofs4 protocol version understood | 256 | Get the major and minor version of the autofs4 protocol version understood |
246 | by loaded module. This call requires an initialized struct autofs_dev_ioctl | 257 | by loaded module. This call requires an initialized struct autofs_dev_ioctl |
247 | with the ioctlfd field set to a valid autofs mount point descriptor | 258 | with the ioctlfd field set to a valid autofs mount point descriptor |
248 | and sets the requested version number in structure field arg1. These | 259 | and sets the requested version number in version field of struct args_protover |
249 | commands return 0 on success or one of the negative error codes if | 260 | or sub_version field of struct args_protosubver. These commands return |
250 | validation fails. | 261 | 0 on success or one of the negative error codes if validation fails. |
251 | 262 | ||
252 | 263 | ||
253 | AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT | 264 | AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT |
@@ -256,9 +267,9 @@ AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT | |||
256 | Obtain and release a file descriptor for an autofs managed mount point | 267 | Obtain and release a file descriptor for an autofs managed mount point |
257 | path. The open call requires an initialized struct autofs_dev_ioctl with | 268 | path. The open call requires an initialized struct autofs_dev_ioctl with |
258 | the path field set and the size field adjusted appropriately as well | 269 | the path field set and the size field adjusted appropriately as well |
259 | as the arg1 field set to the device number of the autofs mount. The | 270 | as the devid field of struct args_openmount set to the device number of |
260 | device number can be obtained from the mount options shown in | 271 | the autofs mount. The device number can be obtained from the mount options |
261 | /proc/mounts. The close call requires an initialized struct | 272 | shown in /proc/mounts. The close call requires an initialized struct |
262 | autofs_dev_ioct with the ioctlfd field set to the descriptor obtained | 273 | autofs_dev_ioct with the ioctlfd field set to the descriptor obtained |
263 | from the open call. The release of the file descriptor can also be done | 274 | from the open call. The release of the file descriptor can also be done |
264 | with close(2) so any open descriptors will also be closed at process exit. | 275 | with close(2) so any open descriptors will also be closed at process exit. |
@@ -272,10 +283,10 @@ AUTOFS_DEV_IOCTL_READY_CMD and AUTOFS_DEV_IOCTL_FAIL_CMD | |||
272 | Return mount and expire result status from user space to the kernel. | 283 | Return mount and expire result status from user space to the kernel. |
273 | Both of these calls require an initialized struct autofs_dev_ioctl | 284 | Both of these calls require an initialized struct autofs_dev_ioctl |
274 | with the ioctlfd field set to the descriptor obtained from the open | 285 | with the ioctlfd field set to the descriptor obtained from the open |
275 | call and the arg1 field set to the wait queue token number, received | 286 | call and the token field of struct args_ready or struct args_fail set |
276 | by user space in the foregoing mount or expire request. The arg2 field | 287 | to the wait queue token number, received by user space in the foregoing |
277 | is set to the status to be returned. For the ready call this is always | 288 | mount or expire request. The status field of struct args_fail is set to |
278 | 0 and for the fail call it is set to the errno of the operation. | 289 | the errno of the operation. It is set to 0 on success. |
279 | 290 | ||
280 | 291 | ||
281 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD | 292 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD |
@@ -290,9 +301,10 @@ mount be catatonic (see next call). | |||
290 | 301 | ||
291 | The call requires an initialized struct autofs_dev_ioctl with the | 302 | The call requires an initialized struct autofs_dev_ioctl with the |
292 | ioctlfd field set to the descriptor obtained from the open call and | 303 | ioctlfd field set to the descriptor obtained from the open call and |
293 | the arg1 field set to descriptor of the pipe. On success the call | 304 | the pipefd field of struct args_setpipefd set to descriptor of the pipe. |
294 | also sets the process group id used to identify the controlling process | 305 | On success the call also sets the process group id used to identify the |
295 | (eg. the owning automount(8) daemon) to the process group of the caller. | 306 | controlling process (eg. the owning automount(8) daemon) to the process |
307 | group of the caller. | ||
296 | 308 | ||
297 | 309 | ||
298 | AUTOFS_DEV_IOCTL_CATATONIC_CMD | 310 | AUTOFS_DEV_IOCTL_CATATONIC_CMD |
@@ -323,9 +335,8 @@ mount on the given path dentry. | |||
323 | 335 | ||
324 | The call requires an initialized struct autofs_dev_ioctl with the path | 336 | The call requires an initialized struct autofs_dev_ioctl with the path |
325 | field set to the mount point in question and the size field adjusted | 337 | field set to the mount point in question and the size field adjusted |
326 | appropriately as well as the arg1 field set to the device number of the | 338 | appropriately. Upon return the uid field of struct args_requester contains |
327 | containing autofs mount. Upon return the struct field arg1 contains the | 339 | the uid and gid field the gid. |
328 | uid and arg2 the gid. | ||
329 | 340 | ||
330 | When reconstructing an autofs mount tree with active mounts we need to | 341 | When reconstructing an autofs mount tree with active mounts we need to |
331 | re-connect to mounts that may have used the original process uid and | 342 | re-connect to mounts that may have used the original process uid and |
@@ -343,8 +354,9 @@ this ioctl is called until no further expire candidates are found. | |||
343 | The call requires an initialized struct autofs_dev_ioctl with the | 354 | The call requires an initialized struct autofs_dev_ioctl with the |
344 | ioctlfd field set to the descriptor obtained from the open call. In | 355 | ioctlfd field set to the descriptor obtained from the open call. In |
345 | addition an immediate expire, independent of the mount timeout, can be | 356 | addition an immediate expire, independent of the mount timeout, can be |
346 | requested by setting the arg1 field to 1. If no expire candidates can | 357 | requested by setting the how field of struct args_expire to 1. If no |
347 | be found the ioctl returns -1 with errno set to EAGAIN. | 358 | expire candidates can be found the ioctl returns -1 with errno set to |
359 | EAGAIN. | ||
348 | 360 | ||
349 | This call causes the kernel module to check the mount corresponding | 361 | This call causes the kernel module to check the mount corresponding |
350 | to the given ioctlfd for mounts that can be expired, issues an expire | 362 | to the given ioctlfd for mounts that can be expired, issues an expire |
@@ -357,7 +369,8 @@ Checks if an autofs mount point is in use. | |||
357 | 369 | ||
358 | The call requires an initialized struct autofs_dev_ioctl with the | 370 | The call requires an initialized struct autofs_dev_ioctl with the |
359 | ioctlfd field set to the descriptor obtained from the open call and | 371 | ioctlfd field set to the descriptor obtained from the open call and |
360 | it returns the result in the arg1 field, 1 for busy and 0 otherwise. | 372 | it returns the result in the may_umount field of struct args_askumount, |
373 | 1 for busy and 0 otherwise. | ||
361 | 374 | ||
362 | 375 | ||
363 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD | 376 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD |
@@ -369,12 +382,12 @@ The call requires an initialized struct autofs_dev_ioctl. There are two | |||
369 | possible variations. Both use the path field set to the path of the mount | 382 | possible variations. Both use the path field set to the path of the mount |
370 | point to check and the size field adjusted appropriately. One uses the | 383 | point to check and the size field adjusted appropriately. One uses the |
371 | ioctlfd field to identify a specific mount point to check while the other | 384 | ioctlfd field to identify a specific mount point to check while the other |
372 | variation uses the path and optionally arg1 set to an autofs mount type. | 385 | variation uses the path and optionally in.type field of struct args_ismountpoint |
373 | The call returns 1 if this is a mount point and sets arg1 to the device | 386 | set to an autofs mount type. The call returns 1 if this is a mount point |
374 | number of the mount and field arg2 to the relevant super block magic | 387 | and sets out.devid field to the device number of the mount and out.magic |
375 | number (described below) or 0 if it isn't a mountpoint. In both cases | 388 | field to the relevant super block magic number (described below) or 0 if |
376 | the the device number (as returned by new_encode_dev()) is returned | 389 | it isn't a mountpoint. In both cases the the device number (as returned |
377 | in field arg1. | 390 | by new_encode_dev()) is returned in out.devid field. |
378 | 391 | ||
379 | If supplied with a file descriptor we're looking for a specific mount, | 392 | If supplied with a file descriptor we're looking for a specific mount, |
380 | not necessarily at the top of the mounted stack. In this case the path | 393 | not necessarily at the top of the mounted stack. In this case the path |
diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt index 39d02e19fb62..8fac3fe7b8c9 100644 --- a/Documentation/filesystems/autofs4.txt +++ b/Documentation/filesystems/autofs4.txt | |||
@@ -203,9 +203,9 @@ initiated or is being considered, otherwise it returns 0. | |||
203 | Mountpoint expiry | 203 | Mountpoint expiry |
204 | ----------------- | 204 | ----------------- |
205 | 205 | ||
206 | The VFS has a mechansim for automatically expiring unused mounts, | 206 | The VFS has a mechanism for automatically expiring unused mounts, |
207 | much as it can expire any unused dentry information from the dcache. | 207 | much as it can expire any unused dentry information from the dcache. |
208 | This is guided by the MNT_SHRINKABLE flag. This only applies to | 208 | This is guided by the MNT_SHRINKABLE flag. This only applies to |
209 | mounts that were created by `d_automount()` returning a filesystem to be | 209 | mounts that were created by `d_automount()` returning a filesystem to be |
210 | mounted. As autofs doesn't return such a filesystem but leaves the | 210 | mounted. As autofs doesn't return such a filesystem but leaves the |
211 | mounting to the automount daemon, it must involve the automount daemon | 211 | mounting to the automount daemon, it must involve the automount daemon |
@@ -298,7 +298,7 @@ remove directories and symlinks using normal filesystem operations. | |||
298 | autofs knows whether a process requesting some operation is the daemon | 298 | autofs knows whether a process requesting some operation is the daemon |
299 | or not based on its process-group id number (see getpgid(1)). | 299 | or not based on its process-group id number (see getpgid(1)). |
300 | 300 | ||
301 | When an autofs filesystem it mounted the pgid of the mounting | 301 | When an autofs filesystem is mounted the pgid of the mounting |
302 | processes is recorded unless the "pgrp=" option is given, in which | 302 | processes is recorded unless the "pgrp=" option is given, in which |
303 | case that number is recorded instead. Any request arriving from a | 303 | case that number is recorded instead. Any request arriving from a |
304 | process in that process group is considered to come from the daemon. | 304 | process in that process group is considered to come from the daemon. |
@@ -450,7 +450,7 @@ Commands are: | |||
450 | numbers for existing filesystems can be found in | 450 | numbers for existing filesystems can be found in |
451 | `/proc/self/mountinfo`. | 451 | `/proc/self/mountinfo`. |
452 | - **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`. | 452 | - **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`. |
453 | - **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in | 453 | - **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in |
454 | catatonic mode, this can provide the write end of a new pipe | 454 | catatonic mode, this can provide the write end of a new pipe |
455 | in `arg1` to re-establish communication with a daemon. The | 455 | in `arg1` to re-establish communication with a daemon. The |
456 | process group of the calling process is used to identify the | 456 | process group of the calling process is used to identify the |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 705fb915cbf7..a1489e14f8ee 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -33,6 +33,37 @@ can also be entered as | |||
33 | Double-quotes can be used to protect spaces in values, e.g.: | 33 | Double-quotes can be used to protect spaces in values, e.g.: |
34 | param="spaces in here" | 34 | param="spaces in here" |
35 | 35 | ||
36 | cpu lists: | ||
37 | ---------- | ||
38 | |||
39 | Some kernel parameters take a list of CPUs as a value, e.g. isolcpus, | ||
40 | nohz_full, irqaffinity, rcu_nocbs. The format of this list is: | ||
41 | |||
42 | <cpu number>,...,<cpu number> | ||
43 | |||
44 | or | ||
45 | |||
46 | <cpu number>-<cpu number> | ||
47 | (must be a positive range in ascending order) | ||
48 | |||
49 | or a mixture | ||
50 | |||
51 | <cpu number>,...,<cpu number>-<cpu number> | ||
52 | |||
53 | Note that for the special case of a range one can split the range into equal | ||
54 | sized groups and for each group use some amount from the beginning of that | ||
55 | group: | ||
56 | |||
57 | <cpu number>-cpu number>:<used size>/<group size> | ||
58 | |||
59 | For example one can add to the command line following parameter: | ||
60 | |||
61 | isolcpus=1,2,10-20,100-2000:2/25 | ||
62 | |||
63 | where the final item represents CPUs 100,101,125,126,150,151,... | ||
64 | |||
65 | |||
66 | |||
36 | This document may not be entirely up to date and comprehensive. The command | 67 | This document may not be entirely up to date and comprehensive. The command |
37 | "modinfo -p ${modulename}" shows a current list of all parameters of a loadable | 68 | "modinfo -p ${modulename}" shows a current list of all parameters of a loadable |
38 | module. Loadable modules, after being loaded into the running kernel, also | 69 | module. Loadable modules, after being loaded into the running kernel, also |
@@ -1789,13 +1820,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1789 | See Documentation/filesystems/nfs/nfsroot.txt. | 1820 | See Documentation/filesystems/nfs/nfsroot.txt. |
1790 | 1821 | ||
1791 | irqaffinity= [SMP] Set the default irq affinity mask | 1822 | irqaffinity= [SMP] Set the default irq affinity mask |
1792 | Format: | 1823 | The argument is a cpu list, as described above. |
1793 | <cpu number>,...,<cpu number> | ||
1794 | or | ||
1795 | <cpu number>-<cpu number> | ||
1796 | (must be a positive range in ascending order) | ||
1797 | or a mixture | ||
1798 | <cpu number>,...,<cpu number>-<cpu number> | ||
1799 | 1824 | ||
1800 | irqfixup [HW] | 1825 | irqfixup [HW] |
1801 | When an interrupt is not handled search all handlers | 1826 | When an interrupt is not handled search all handlers |
@@ -1812,13 +1837,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1812 | Format: <RDP>,<reset>,<pci_scan>,<verbosity> | 1837 | Format: <RDP>,<reset>,<pci_scan>,<verbosity> |
1813 | 1838 | ||
1814 | isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler. | 1839 | isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler. |
1815 | Format: | 1840 | The argument is a cpu list, as described above. |
1816 | <cpu number>,...,<cpu number> | ||
1817 | or | ||
1818 | <cpu number>-<cpu number> | ||
1819 | (must be a positive range in ascending order) | ||
1820 | or a mixture | ||
1821 | <cpu number>,...,<cpu number>-<cpu number> | ||
1822 | 1841 | ||
1823 | This option can be used to specify one or more CPUs | 1842 | This option can be used to specify one or more CPUs |
1824 | to isolate from the general SMP balancing and scheduling | 1843 | to isolate from the general SMP balancing and scheduling |
@@ -2680,6 +2699,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2680 | Default: on | 2699 | Default: on |
2681 | 2700 | ||
2682 | nohz_full= [KNL,BOOT] | 2701 | nohz_full= [KNL,BOOT] |
2702 | The argument is a cpu list, as described above. | ||
2683 | In kernels built with CONFIG_NO_HZ_FULL=y, set | 2703 | In kernels built with CONFIG_NO_HZ_FULL=y, set |
2684 | the specified list of CPUs whose tick will be stopped | 2704 | the specified list of CPUs whose tick will be stopped |
2685 | whenever possible. The boot CPU will be forced outside | 2705 | whenever possible. The boot CPU will be forced outside |
@@ -3285,6 +3305,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3285 | See Documentation/blockdev/ramdisk.txt. | 3305 | See Documentation/blockdev/ramdisk.txt. |
3286 | 3306 | ||
3287 | rcu_nocbs= [KNL] | 3307 | rcu_nocbs= [KNL] |
3308 | The argument is a cpu list, as described above. | ||
3309 | |||
3288 | In kernels built with CONFIG_RCU_NOCB_CPU=y, set | 3310 | In kernels built with CONFIG_RCU_NOCB_CPU=y, set |
3289 | the specified list of CPUs to be no-callback CPUs. | 3311 | the specified list of CPUs to be no-callback CPUs. |
3290 | Invocation of these CPUs' RCU callbacks will | 3312 | Invocation of these CPUs' RCU callbacks will |
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h index 624e1d436c6c..00748350cf72 100644 --- a/arch/arm/include/asm/trusted_foundations.h +++ b/arch/arm/include/asm/trusted_foundations.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H | 26 | #ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H |
27 | #define __ASM_ARM_TRUSTED_FOUNDATIONS_H | 27 | #define __ASM_ARM_TRUSTED_FOUNDATIONS_H |
28 | 28 | ||
29 | #include <linux/kconfig.h> | ||
30 | #include <linux/printk.h> | 29 | #include <linux/printk.h> |
31 | #include <linux/bug.h> | 30 | #include <linux/bug.h> |
32 | #include <linux/of.h> | 31 | #include <linux/of.h> |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 612eb530f33f..91d2d5b01414 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -318,8 +318,7 @@ unsigned long get_wchan(struct task_struct *p) | |||
318 | 318 | ||
319 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 319 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
320 | { | 320 | { |
321 | unsigned long range_end = mm->brk + 0x02000000; | 321 | return randomize_page(mm->brk, 0x02000000); |
322 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | ||
323 | } | 322 | } |
324 | 323 | ||
325 | #ifdef CONFIG_MMU | 324 | #ifdef CONFIG_MMU |
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 55101bd86b98..39feb85a6931 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
8 | 8 | ||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/kconfig.h> | ||
11 | #include <linux/types.h> | 10 | #include <linux/types.h> |
12 | #include <linux/stddef.h> | 11 | #include <linux/stddef.h> |
13 | #include <linux/stringify.h> | 12 | #include <linux/stringify.h> |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a4f5f766af08..27b2f1387df4 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -372,12 +372,8 @@ unsigned long arch_align_stack(unsigned long sp) | |||
372 | 372 | ||
373 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 373 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
374 | { | 374 | { |
375 | unsigned long range_end = mm->brk; | ||
376 | |||
377 | if (is_compat_task()) | 375 | if (is_compat_task()) |
378 | range_end += 0x02000000; | 376 | return randomize_page(mm->brk, 0x02000000); |
379 | else | 377 | else |
380 | range_end += 0x40000000; | 378 | return randomize_page(mm->brk, 0x40000000); |
381 | |||
382 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | ||
383 | } | 379 | } |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index cb16fcc5f8f0..5537f95b28c9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -267,6 +267,17 @@ static void octeon_crash_shutdown(struct pt_regs *regs) | |||
267 | default_machine_crash_shutdown(regs); | 267 | default_machine_crash_shutdown(regs); |
268 | } | 268 | } |
269 | 269 | ||
270 | #ifdef CONFIG_SMP | ||
271 | void octeon_crash_smp_send_stop(void) | ||
272 | { | ||
273 | int cpu; | ||
274 | |||
275 | /* disable watchdogs */ | ||
276 | for_each_online_cpu(cpu) | ||
277 | cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); | ||
278 | } | ||
279 | #endif | ||
280 | |||
270 | #endif /* CONFIG_KEXEC */ | 281 | #endif /* CONFIG_KEXEC */ |
271 | 282 | ||
272 | #ifdef CONFIG_CAVIUM_RESERVE32 | 283 | #ifdef CONFIG_CAVIUM_RESERVE32 |
@@ -911,6 +922,9 @@ void __init prom_init(void) | |||
911 | _machine_kexec_shutdown = octeon_shutdown; | 922 | _machine_kexec_shutdown = octeon_shutdown; |
912 | _machine_crash_shutdown = octeon_crash_shutdown; | 923 | _machine_crash_shutdown = octeon_crash_shutdown; |
913 | _machine_kexec_prepare = octeon_kexec_prepare; | 924 | _machine_kexec_prepare = octeon_kexec_prepare; |
925 | #ifdef CONFIG_SMP | ||
926 | _crash_smp_send_stop = octeon_crash_smp_send_stop; | ||
927 | #endif | ||
914 | #endif | 928 | #endif |
915 | 929 | ||
916 | octeon_user_io_init(); | 930 | octeon_user_io_init(); |
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h index ee25ebbf2a28..493a3cc7c39a 100644 --- a/arch/mips/include/asm/kexec.h +++ b/arch/mips/include/asm/kexec.h | |||
@@ -45,6 +45,7 @@ extern const unsigned char kexec_smp_wait[]; | |||
45 | extern unsigned long secondary_kexec_args[4]; | 45 | extern unsigned long secondary_kexec_args[4]; |
46 | extern void (*relocated_kexec_smp_wait) (void *); | 46 | extern void (*relocated_kexec_smp_wait) (void *); |
47 | extern atomic_t kexec_ready_to_reboot; | 47 | extern atomic_t kexec_ready_to_reboot; |
48 | extern void (*_crash_smp_send_stop)(void); | ||
48 | #endif | 49 | #endif |
49 | #endif | 50 | #endif |
50 | 51 | ||
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index d1ff774ac4b6..c68c0cc879c6 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
17 | #include <linux/kconfig.h> | ||
18 | #include <boot_param.h> | 17 | #include <boot_param.h> |
19 | 18 | ||
20 | /* loongson internal northbridge initialization */ | 19 | /* loongson internal northbridge initialization */ |
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 610f0f3bdb34..1723b1762297 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c | |||
@@ -47,9 +47,14 @@ static void crash_shutdown_secondary(void *passed_regs) | |||
47 | 47 | ||
48 | static void crash_kexec_prepare_cpus(void) | 48 | static void crash_kexec_prepare_cpus(void) |
49 | { | 49 | { |
50 | static int cpus_stopped; | ||
50 | unsigned int msecs; | 51 | unsigned int msecs; |
52 | unsigned int ncpus; | ||
51 | 53 | ||
52 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | 54 | if (cpus_stopped) |
55 | return; | ||
56 | |||
57 | ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | ||
53 | 58 | ||
54 | dump_send_ipi(crash_shutdown_secondary); | 59 | dump_send_ipi(crash_shutdown_secondary); |
55 | smp_wmb(); | 60 | smp_wmb(); |
@@ -64,6 +69,17 @@ static void crash_kexec_prepare_cpus(void) | |||
64 | cpu_relax(); | 69 | cpu_relax(); |
65 | mdelay(1); | 70 | mdelay(1); |
66 | } | 71 | } |
72 | |||
73 | cpus_stopped = 1; | ||
74 | } | ||
75 | |||
76 | /* Override the weak function in kernel/panic.c */ | ||
77 | void crash_smp_send_stop(void) | ||
78 | { | ||
79 | if (_crash_smp_send_stop) | ||
80 | _crash_smp_send_stop(); | ||
81 | |||
82 | crash_kexec_prepare_cpus(); | ||
67 | } | 83 | } |
68 | 84 | ||
69 | #else /* !defined(CONFIG_SMP) */ | 85 | #else /* !defined(CONFIG_SMP) */ |
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 50980bf3983e..59725204105c 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c | |||
@@ -25,6 +25,7 @@ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; | |||
25 | #ifdef CONFIG_SMP | 25 | #ifdef CONFIG_SMP |
26 | void (*relocated_kexec_smp_wait) (void *); | 26 | void (*relocated_kexec_smp_wait) (void *); |
27 | atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); | 27 | atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); |
28 | void (*_crash_smp_send_stop)(void) = NULL; | ||
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | int | 31 | int |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 36775d20b0e7..f8b7bf836437 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -35,7 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/debugfs.h> | 37 | #include <linux/debugfs.h> |
38 | #include <linux/kconfig.h> | ||
39 | #include <linux/percpu-defs.h> | 38 | #include <linux/percpu-defs.h> |
40 | #include <linux/perf_event.h> | 39 | #include <linux/perf_event.h> |
41 | 40 | ||
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 39e7b472f0d8..49a2e2226fee 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/filter.h> | 15 | #include <linux/filter.h> |
16 | #include <linux/if_vlan.h> | 16 | #include <linux/if_vlan.h> |
17 | #include <linux/kconfig.h> | ||
18 | #include <linux/moduleloader.h> | 17 | #include <linux/moduleloader.h> |
19 | #include <linux/netdevice.h> | 18 | #include <linux/netdevice.h> |
20 | #include <linux/string.h> | 19 | #include <linux/string.h> |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 37d6e741be82..5f202a566ec5 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -479,7 +479,8 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
479 | 479 | ||
480 | /* Handle failure */ | 480 | /* Handle failure */ |
481 | if (unlikely(entry == DMA_ERROR_CODE)) { | 481 | if (unlikely(entry == DMA_ERROR_CODE)) { |
482 | if (printk_ratelimit()) | 482 | if (!(attrs & DMA_ATTR_NO_WARN) && |
483 | printk_ratelimit()) | ||
483 | dev_info(dev, "iommu_alloc failed, tbl %p " | 484 | dev_info(dev, "iommu_alloc failed, tbl %p " |
484 | "vaddr %lx npages %lu\n", tbl, vaddr, | 485 | "vaddr %lx npages %lu\n", tbl, vaddr, |
485 | npages); | 486 | npages); |
@@ -776,7 +777,8 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
776 | mask >> tbl->it_page_shift, align, | 777 | mask >> tbl->it_page_shift, align, |
777 | attrs); | 778 | attrs); |
778 | if (dma_handle == DMA_ERROR_CODE) { | 779 | if (dma_handle == DMA_ERROR_CODE) { |
779 | if (printk_ratelimit()) { | 780 | if (!(attrs & DMA_ATTR_NO_WARN) && |
781 | printk_ratelimit()) { | ||
780 | dev_info(dev, "iommu_alloc failed, tbl %p " | 782 | dev_info(dev, "iommu_alloc failed, tbl %p " |
781 | "vaddr %p npages %d\n", tbl, vaddr, | 783 | "vaddr %p npages %d\n", tbl, vaddr, |
782 | npages); | 784 | npages); |
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index 851a94e6ae58..ef61c597898b 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c | |||
@@ -88,6 +88,5 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
88 | 88 | ||
89 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 89 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
90 | { | 90 | { |
91 | unsigned long range_end = mm->brk + 0x02000000; | 91 | return randomize_page(mm->brk, 0x02000000); |
92 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | ||
93 | } | 92 | } |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 00299c927852..d7c6b676b3a5 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -295,8 +295,7 @@ unsigned long get_wchan(struct task_struct *p) | |||
295 | 295 | ||
296 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 296 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
297 | { | 297 | { |
298 | unsigned long range_end = mm->brk + 0x02000000; | 298 | return randomize_page(mm->brk, 0x02000000); |
299 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | ||
300 | } | 299 | } |
301 | 300 | ||
302 | /* | 301 | /* |
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index d2434c1cad05..282630e4c6ea 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h | |||
@@ -210,6 +210,7 @@ struct kexec_entry64_regs { | |||
210 | 210 | ||
211 | typedef void crash_vmclear_fn(void); | 211 | typedef void crash_vmclear_fn(void); |
212 | extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; | 212 | extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; |
213 | extern void kdump_nmi_shootdown_cpus(void); | ||
213 | 214 | ||
214 | #endif /* __ASSEMBLY__ */ | 215 | #endif /* __ASSEMBLY__ */ |
215 | 216 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 19980b36f394..026ea82ecc60 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -47,6 +47,7 @@ struct smp_ops { | |||
47 | void (*smp_cpus_done)(unsigned max_cpus); | 47 | void (*smp_cpus_done)(unsigned max_cpus); |
48 | 48 | ||
49 | void (*stop_other_cpus)(int wait); | 49 | void (*stop_other_cpus)(int wait); |
50 | void (*crash_stop_other_cpus)(void); | ||
50 | void (*smp_send_reschedule)(int cpu); | 51 | void (*smp_send_reschedule)(int cpu); |
51 | 52 | ||
52 | int (*cpu_up)(unsigned cpu, struct task_struct *tidle); | 53 | int (*cpu_up)(unsigned cpu, struct task_struct *tidle); |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 9616cf76940c..650830e39e3a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -133,15 +133,31 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) | |||
133 | disable_local_APIC(); | 133 | disable_local_APIC(); |
134 | } | 134 | } |
135 | 135 | ||
136 | static void kdump_nmi_shootdown_cpus(void) | 136 | void kdump_nmi_shootdown_cpus(void) |
137 | { | 137 | { |
138 | nmi_shootdown_cpus(kdump_nmi_callback); | 138 | nmi_shootdown_cpus(kdump_nmi_callback); |
139 | 139 | ||
140 | disable_local_APIC(); | 140 | disable_local_APIC(); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Override the weak function in kernel/panic.c */ | ||
144 | void crash_smp_send_stop(void) | ||
145 | { | ||
146 | static int cpus_stopped; | ||
147 | |||
148 | if (cpus_stopped) | ||
149 | return; | ||
150 | |||
151 | if (smp_ops.crash_stop_other_cpus) | ||
152 | smp_ops.crash_stop_other_cpus(); | ||
153 | else | ||
154 | smp_send_stop(); | ||
155 | |||
156 | cpus_stopped = 1; | ||
157 | } | ||
158 | |||
143 | #else | 159 | #else |
144 | static void kdump_nmi_shootdown_cpus(void) | 160 | void crash_smp_send_stop(void) |
145 | { | 161 | { |
146 | /* There are no cpus to shootdown */ | 162 | /* There are no cpus to shootdown */ |
147 | } | 163 | } |
@@ -160,7 +176,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
160 | /* The kernel is broken so disable interrupts */ | 176 | /* The kernel is broken so disable interrupts */ |
161 | local_irq_disable(); | 177 | local_irq_disable(); |
162 | 178 | ||
163 | kdump_nmi_shootdown_cpus(); | 179 | crash_smp_send_stop(); |
164 | 180 | ||
165 | /* | 181 | /* |
166 | * VMCLEAR VMCSs loaded on this cpu if needed. | 182 | * VMCLEAR VMCSs loaded on this cpu if needed. |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 5a294e48b185..8c1f218926d7 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -337,6 +337,9 @@ void arch_crash_save_vmcoreinfo(void) | |||
337 | #endif | 337 | #endif |
338 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", | 338 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", |
339 | kaslr_offset()); | 339 | kaslr_offset()); |
340 | VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET); | ||
341 | VMCOREINFO_VMALLOC_START(VMALLOC_START); | ||
342 | VMCOREINFO_VMEMMAP_START(VMEMMAP_START); | ||
340 | } | 343 | } |
341 | 344 | ||
342 | /* arch-dependent functionality related to kexec file-based syscall */ | 345 | /* arch-dependent functionality related to kexec file-based syscall */ |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 28cea7802ecb..0888a879120f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -509,8 +509,7 @@ unsigned long arch_align_stack(unsigned long sp) | |||
509 | 509 | ||
510 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 510 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
511 | { | 511 | { |
512 | unsigned long range_end = mm->brk + 0x02000000; | 512 | return randomize_page(mm->brk, 0x02000000); |
513 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | ||
514 | } | 513 | } |
515 | 514 | ||
516 | /* | 515 | /* |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 658777cf3851..68f8cc222f25 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <asm/nmi.h> | 32 | #include <asm/nmi.h> |
33 | #include <asm/mce.h> | 33 | #include <asm/mce.h> |
34 | #include <asm/trace/irq_vectors.h> | 34 | #include <asm/trace/irq_vectors.h> |
35 | #include <asm/kexec.h> | ||
36 | |||
35 | /* | 37 | /* |
36 | * Some notes on x86 processor bugs affecting SMP operation: | 38 | * Some notes on x86 processor bugs affecting SMP operation: |
37 | * | 39 | * |
@@ -342,6 +344,9 @@ struct smp_ops smp_ops = { | |||
342 | .smp_cpus_done = native_smp_cpus_done, | 344 | .smp_cpus_done = native_smp_cpus_done, |
343 | 345 | ||
344 | .stop_other_cpus = native_stop_other_cpus, | 346 | .stop_other_cpus = native_stop_other_cpus, |
347 | #if defined(CONFIG_KEXEC_CORE) | ||
348 | .crash_stop_other_cpus = kdump_nmi_shootdown_cpus, | ||
349 | #endif | ||
345 | .smp_send_reschedule = native_smp_send_reschedule, | 350 | .smp_send_reschedule = native_smp_send_reschedule, |
346 | 351 | ||
347 | .cpu_up = native_cpu_up, | 352 | .cpu_up = native_cpu_up, |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 10e0272d789a..a55ed63b9f91 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,6 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | unsigned long *end) | 101 | unsigned long *end) |
102 | { | 102 | { |
103 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { | 103 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { |
104 | unsigned long new_begin; | ||
105 | /* This is usually used needed to map code in small | 104 | /* This is usually used needed to map code in small |
106 | model, so it needs to be in the first 31bit. Limit | 105 | model, so it needs to be in the first 31bit. Limit |
107 | it to that. This means we need to move the | 106 | it to that. This means we need to move the |
@@ -112,9 +111,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
112 | *begin = 0x40000000; | 111 | *begin = 0x40000000; |
113 | *end = 0x80000000; | 112 | *end = 0x80000000; |
114 | if (current->flags & PF_RANDOMIZE) { | 113 | if (current->flags & PF_RANDOMIZE) { |
115 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | 114 | *begin = randomize_page(*begin, 0x02000000); |
116 | if (new_begin) | ||
117 | *begin = new_begin; | ||
118 | } | 115 | } |
119 | } else { | 116 | } else { |
120 | *begin = current->mm->mmap_legacy_base; | 117 | *begin = current->mm->mmap_legacy_base; |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 5fb6c620180e..16a7134eedac 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
212 | */ | 212 | */ |
213 | smp_mb(); | 213 | smp_mb(); |
214 | if (atomic_dec_if_positive(&ps->pending) > 0) | 214 | if (atomic_dec_if_positive(&ps->pending) > 0) |
215 | queue_kthread_work(&pit->worker, &pit->expired); | 215 | kthread_queue_work(&pit->worker, &pit->expired); |
216 | } | 216 | } |
217 | 217 | ||
218 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) | 218 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) |
@@ -233,7 +233,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) | |||
233 | static void destroy_pit_timer(struct kvm_pit *pit) | 233 | static void destroy_pit_timer(struct kvm_pit *pit) |
234 | { | 234 | { |
235 | hrtimer_cancel(&pit->pit_state.timer); | 235 | hrtimer_cancel(&pit->pit_state.timer); |
236 | flush_kthread_work(&pit->expired); | 236 | kthread_flush_work(&pit->expired); |
237 | } | 237 | } |
238 | 238 | ||
239 | static void pit_do_work(struct kthread_work *work) | 239 | static void pit_do_work(struct kthread_work *work) |
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) | |||
272 | if (atomic_read(&ps->reinject)) | 272 | if (atomic_read(&ps->reinject)) |
273 | atomic_inc(&ps->pending); | 273 | atomic_inc(&ps->pending); |
274 | 274 | ||
275 | queue_kthread_work(&pt->worker, &pt->expired); | 275 | kthread_queue_work(&pt->worker, &pt->expired); |
276 | 276 | ||
277 | if (ps->is_periodic) { | 277 | if (ps->is_periodic) { |
278 | hrtimer_add_expires_ns(&ps->timer, ps->period); | 278 | hrtimer_add_expires_ns(&ps->timer, ps->period); |
@@ -324,7 +324,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) | |||
324 | 324 | ||
325 | /* TODO The new value only affected after the retriggered */ | 325 | /* TODO The new value only affected after the retriggered */ |
326 | hrtimer_cancel(&ps->timer); | 326 | hrtimer_cancel(&ps->timer); |
327 | flush_kthread_work(&pit->expired); | 327 | kthread_flush_work(&pit->expired); |
328 | ps->period = interval; | 328 | ps->period = interval; |
329 | ps->is_periodic = is_period; | 329 | ps->is_periodic = is_period; |
330 | 330 | ||
@@ -667,13 +667,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) | |||
667 | pid_nr = pid_vnr(pid); | 667 | pid_nr = pid_vnr(pid); |
668 | put_pid(pid); | 668 | put_pid(pid); |
669 | 669 | ||
670 | init_kthread_worker(&pit->worker); | 670 | kthread_init_worker(&pit->worker); |
671 | pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, | 671 | pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, |
672 | "kvm-pit/%d", pid_nr); | 672 | "kvm-pit/%d", pid_nr); |
673 | if (IS_ERR(pit->worker_task)) | 673 | if (IS_ERR(pit->worker_task)) |
674 | goto fail_kthread; | 674 | goto fail_kthread; |
675 | 675 | ||
676 | init_kthread_work(&pit->expired, pit_do_work); | 676 | kthread_init_work(&pit->expired, pit_do_work); |
677 | 677 | ||
678 | pit->kvm = kvm; | 678 | pit->kvm = kvm; |
679 | 679 | ||
@@ -730,7 +730,7 @@ void kvm_free_pit(struct kvm *kvm) | |||
730 | kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); | 730 | kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); |
731 | kvm_pit_set_reinject(pit, false); | 731 | kvm_pit_set_reinject(pit, false); |
732 | hrtimer_cancel(&pit->pit_state.timer); | 732 | hrtimer_cancel(&pit->pit_state.timer); |
733 | flush_kthread_work(&pit->expired); | 733 | kthread_flush_work(&pit->expired); |
734 | kthread_stop(pit->worker_task); | 734 | kthread_stop(pit->worker_task); |
735 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | 735 | kvm_free_irq_source_id(kvm, pit->irq_source_id); |
736 | kfree(pit); | 736 | kfree(pit); |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 083e56f72308..46fe9248410d 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -31,6 +31,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
31 | unsigned int granularity; | 31 | unsigned int granularity; |
32 | enum req_op op; | 32 | enum req_op op; |
33 | int alignment; | 33 | int alignment; |
34 | sector_t bs_mask; | ||
34 | 35 | ||
35 | if (!q) | 36 | if (!q) |
36 | return -ENXIO; | 37 | return -ENXIO; |
@@ -50,6 +51,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
50 | op = REQ_OP_DISCARD; | 51 | op = REQ_OP_DISCARD; |
51 | } | 52 | } |
52 | 53 | ||
54 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | ||
55 | if ((sector | nr_sects) & bs_mask) | ||
56 | return -EINVAL; | ||
57 | |||
53 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | 58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
54 | granularity = max(q->limits.discard_granularity >> 9, 1U); | 59 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
55 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | 60 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; |
@@ -150,10 +155,15 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
150 | unsigned int max_write_same_sectors; | 155 | unsigned int max_write_same_sectors; |
151 | struct bio *bio = NULL; | 156 | struct bio *bio = NULL; |
152 | int ret = 0; | 157 | int ret = 0; |
158 | sector_t bs_mask; | ||
153 | 159 | ||
154 | if (!q) | 160 | if (!q) |
155 | return -ENXIO; | 161 | return -ENXIO; |
156 | 162 | ||
163 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | ||
164 | if ((sector | nr_sects) & bs_mask) | ||
165 | return -EINVAL; | ||
166 | |||
157 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ | 167 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
158 | max_write_same_sectors = UINT_MAX >> 9; | 168 | max_write_same_sectors = UINT_MAX >> 9; |
159 | 169 | ||
@@ -202,6 +212,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
202 | int ret; | 212 | int ret; |
203 | struct bio *bio = NULL; | 213 | struct bio *bio = NULL; |
204 | unsigned int sz; | 214 | unsigned int sz; |
215 | sector_t bs_mask; | ||
216 | |||
217 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | ||
218 | if ((sector | nr_sects) & bs_mask) | ||
219 | return -EINVAL; | ||
205 | 220 | ||
206 | while (nr_sects != 0) { | 221 | while (nr_sects != 0) { |
207 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), | 222 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
diff --git a/block/ioctl.c b/block/ioctl.c index ed2397f8de9d..755119c3c1b9 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -225,7 +225,8 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, | |||
225 | unsigned long arg) | 225 | unsigned long arg) |
226 | { | 226 | { |
227 | uint64_t range[2]; | 227 | uint64_t range[2]; |
228 | uint64_t start, len; | 228 | struct address_space *mapping; |
229 | uint64_t start, end, len; | ||
229 | 230 | ||
230 | if (!(mode & FMODE_WRITE)) | 231 | if (!(mode & FMODE_WRITE)) |
231 | return -EBADF; | 232 | return -EBADF; |
@@ -235,18 +236,23 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, | |||
235 | 236 | ||
236 | start = range[0]; | 237 | start = range[0]; |
237 | len = range[1]; | 238 | len = range[1]; |
239 | end = start + len - 1; | ||
238 | 240 | ||
239 | if (start & 511) | 241 | if (start & 511) |
240 | return -EINVAL; | 242 | return -EINVAL; |
241 | if (len & 511) | 243 | if (len & 511) |
242 | return -EINVAL; | 244 | return -EINVAL; |
243 | start >>= 9; | 245 | if (end >= (uint64_t)i_size_read(bdev->bd_inode)) |
244 | len >>= 9; | 246 | return -EINVAL; |
245 | 247 | if (end < start) | |
246 | if (start + len > (i_size_read(bdev->bd_inode) >> 9)) | ||
247 | return -EINVAL; | 248 | return -EINVAL; |
248 | 249 | ||
249 | return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false); | 250 | /* Invalidate the page cache, including dirty pages */ |
251 | mapping = bdev->bd_inode->i_mapping; | ||
252 | truncate_inode_pages_range(mapping, start, end); | ||
253 | |||
254 | return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, | ||
255 | false); | ||
250 | } | 256 | } |
251 | 257 | ||
252 | static int put_ushort(unsigned long arg, unsigned short val) | 258 | static int put_ushort(unsigned long arg, unsigned short val) |
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index bfb92ace2c91..6989ba0046df 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
47 | 47 | ||
48 | /* If another context is idling then defer */ | 48 | /* If another context is idling then defer */ |
49 | if (engine->idling) { | 49 | if (engine->idling) { |
50 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 50 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
51 | goto out; | 51 | goto out; |
52 | } | 52 | } |
53 | 53 | ||
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
58 | 58 | ||
59 | /* Only do teardown in the thread */ | 59 | /* Only do teardown in the thread */ |
60 | if (!in_kthread) { | 60 | if (!in_kthread) { |
61 | queue_kthread_work(&engine->kworker, | 61 | kthread_queue_work(&engine->kworker, |
62 | &engine->pump_requests); | 62 | &engine->pump_requests); |
63 | goto out; | 63 | goto out; |
64 | } | 64 | } |
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine, | |||
189 | ret = ablkcipher_enqueue_request(&engine->queue, req); | 189 | ret = ablkcipher_enqueue_request(&engine->queue, req); |
190 | 190 | ||
191 | if (!engine->busy && need_pump) | 191 | if (!engine->busy && need_pump) |
192 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 192 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
193 | 193 | ||
194 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
195 | return ret; | 195 | return ret; |
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine, | |||
231 | ret = ahash_enqueue_request(&engine->queue, req); | 231 | ret = ahash_enqueue_request(&engine->queue, req); |
232 | 232 | ||
233 | if (!engine->busy && need_pump) | 233 | if (!engine->busy && need_pump) |
234 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 234 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
235 | 235 | ||
236 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
237 | return ret; | 237 | return ret; |
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, | |||
284 | 284 | ||
285 | req->base.complete(&req->base, err); | 285 | req->base.complete(&req->base, err); |
286 | 286 | ||
287 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 287 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
288 | } | 288 | } |
289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); |
290 | 290 | ||
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, | |||
321 | 321 | ||
322 | req->base.complete(&req->base, err); | 322 | req->base.complete(&req->base, err); |
323 | 323 | ||
324 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 324 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
325 | } | 325 | } |
326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
327 | 327 | ||
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine) | |||
345 | engine->running = true; | 345 | engine->running = true; |
346 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 346 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
347 | 347 | ||
348 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 348 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
349 | 349 | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
@@ -422,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | 422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); |
423 | spin_lock_init(&engine->queue_lock); | 423 | spin_lock_init(&engine->queue_lock); |
424 | 424 | ||
425 | init_kthread_worker(&engine->kworker); | 425 | kthread_init_worker(&engine->kworker); |
426 | engine->kworker_task = kthread_run(kthread_worker_fn, | 426 | engine->kworker_task = kthread_run(kthread_worker_fn, |
427 | &engine->kworker, "%s", | 427 | &engine->kworker, "%s", |
428 | engine->name); | 428 | engine->name); |
@@ -430,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
430 | dev_err(dev, "failed to create crypto request pump task\n"); | 430 | dev_err(dev, "failed to create crypto request pump task\n"); |
431 | return NULL; | 431 | return NULL; |
432 | } | 432 | } |
433 | init_kthread_work(&engine->pump_requests, crypto_pump_work); | 433 | kthread_init_work(&engine->pump_requests, crypto_pump_work); |
434 | 434 | ||
435 | if (engine->rt) { | 435 | if (engine->rt) { |
436 | dev_info(dev, "will run requests pump with realtime priority\n"); | 436 | dev_info(dev, "will run requests pump with realtime priority\n"); |
@@ -455,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine) | |||
455 | if (ret) | 455 | if (ret) |
456 | return ret; | 456 | return ret; |
457 | 457 | ||
458 | flush_kthread_worker(&engine->kworker); | 458 | kthread_flush_worker(&engine->kworker); |
459 | kthread_stop(engine->kworker_task); | 459 | kthread_stop(engine->kworker_task); |
460 | 460 | ||
461 | return 0; | 461 | return 0; |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cbdb3b162718..fa1b7a90ba11 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo) | |||
840 | 840 | ||
841 | static void loop_unprepare_queue(struct loop_device *lo) | 841 | static void loop_unprepare_queue(struct loop_device *lo) |
842 | { | 842 | { |
843 | flush_kthread_worker(&lo->worker); | 843 | kthread_flush_worker(&lo->worker); |
844 | kthread_stop(lo->worker_task); | 844 | kthread_stop(lo->worker_task); |
845 | } | 845 | } |
846 | 846 | ||
847 | static int loop_prepare_queue(struct loop_device *lo) | 847 | static int loop_prepare_queue(struct loop_device *lo) |
848 | { | 848 | { |
849 | init_kthread_worker(&lo->worker); | 849 | kthread_init_worker(&lo->worker); |
850 | lo->worker_task = kthread_run(kthread_worker_fn, | 850 | lo->worker_task = kthread_run(kthread_worker_fn, |
851 | &lo->worker, "loop%d", lo->lo_number); | 851 | &lo->worker, "loop%d", lo->lo_number); |
852 | if (IS_ERR(lo->worker_task)) | 852 | if (IS_ERR(lo->worker_task)) |
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1658 | break; | 1658 | break; |
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | queue_kthread_work(&lo->worker, &cmd->work); | 1661 | kthread_queue_work(&lo->worker, &cmd->work); |
1662 | 1662 | ||
1663 | return BLK_MQ_RQ_QUEUE_OK; | 1663 | return BLK_MQ_RQ_QUEUE_OK; |
1664 | } | 1664 | } |
@@ -1696,7 +1696,7 @@ static int loop_init_request(void *data, struct request *rq, | |||
1696 | struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); | 1696 | struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); |
1697 | 1697 | ||
1698 | cmd->rq = rq; | 1698 | cmd->rq = rq; |
1699 | init_kthread_work(&cmd->work, loop_queue_work); | 1699 | kthread_init_work(&cmd->work, loop_queue_work); |
1700 | 1700 | ||
1701 | return 0; | 1701 | return 0; |
1702 | } | 1702 | } |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 3efb3bf0ab83..d131e152c8ce 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -2100,23 +2100,37 @@ unsigned long get_random_long(void) | |||
2100 | } | 2100 | } |
2101 | EXPORT_SYMBOL(get_random_long); | 2101 | EXPORT_SYMBOL(get_random_long); |
2102 | 2102 | ||
2103 | /* | 2103 | /** |
2104 | * randomize_range() returns a start address such that | 2104 | * randomize_page - Generate a random, page aligned address |
2105 | * @start: The smallest acceptable address the caller will take. | ||
2106 | * @range: The size of the area, starting at @start, within which the | ||
2107 | * random address must fall. | ||
2108 | * | ||
2109 | * If @start + @range would overflow, @range is capped. | ||
2105 | * | 2110 | * |
2106 | * [...... <range> .....] | 2111 | * NOTE: Historical use of randomize_range, which this replaces, presumed that |
2107 | * start end | 2112 | * @start was already page aligned. We now align it regardless. |
2108 | * | 2113 | * |
2109 | * a <range> with size "len" starting at the return value is inside in the | 2114 | * Return: A page aligned address within [start, start + range). On error, |
2110 | * area defined by [start, end], but is otherwise randomized. | 2115 | * @start is returned. |
2111 | */ | 2116 | */ |
2112 | unsigned long | 2117 | unsigned long |
2113 | randomize_range(unsigned long start, unsigned long end, unsigned long len) | 2118 | randomize_page(unsigned long start, unsigned long range) |
2114 | { | 2119 | { |
2115 | unsigned long range = end - len - start; | 2120 | if (!PAGE_ALIGNED(start)) { |
2121 | range -= PAGE_ALIGN(start) - start; | ||
2122 | start = PAGE_ALIGN(start); | ||
2123 | } | ||
2116 | 2124 | ||
2117 | if (end <= start + len) | 2125 | if (start > ULONG_MAX - range) |
2118 | return 0; | 2126 | range = ULONG_MAX - start; |
2119 | return PAGE_ALIGN(get_random_int() % range + start); | 2127 | |
2128 | range >>= PAGE_SHIFT; | ||
2129 | |||
2130 | if (range == 0) | ||
2131 | return start; | ||
2132 | |||
2133 | return start + (get_random_long() % range << PAGE_SHIFT); | ||
2120 | } | 2134 | } |
2121 | 2135 | ||
2122 | /* Interface for in-kernel drivers of true hardware RNGs. | 2136 | /* Interface for in-kernel drivers of true hardware RNGs. |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 8114744bf30c..d433b1db1fdd 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | #include <linux/kconfig.h> | ||
42 | #include "../tty/hvc/hvc_console.h" | 41 | #include "../tty/hvc/hvc_console.h" |
43 | 42 | ||
44 | #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) | 43 | #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) |
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index f2f229efbe64..6d9904a4a0ab 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c | |||
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) | |||
129 | if (likely(worker)) { | 129 | if (likely(worker)) { |
130 | cq->notify = RVT_CQ_NONE; | 130 | cq->notify = RVT_CQ_NONE; |
131 | cq->triggered++; | 131 | cq->triggered++; |
132 | queue_kthread_work(worker, &cq->comptask); | 132 | kthread_queue_work(worker, &cq->comptask); |
133 | } | 133 | } |
134 | } | 134 | } |
135 | 135 | ||
@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, | |||
265 | cq->ibcq.cqe = entries; | 265 | cq->ibcq.cqe = entries; |
266 | cq->notify = RVT_CQ_NONE; | 266 | cq->notify = RVT_CQ_NONE; |
267 | spin_lock_init(&cq->lock); | 267 | spin_lock_init(&cq->lock); |
268 | init_kthread_work(&cq->comptask, send_complete); | 268 | kthread_init_work(&cq->comptask, send_complete); |
269 | cq->queue = wc; | 269 | cq->queue = wc; |
270 | 270 | ||
271 | ret = &cq->ibcq; | 271 | ret = &cq->ibcq; |
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq) | |||
295 | struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); | 295 | struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); |
296 | struct rvt_dev_info *rdi = cq->rdi; | 296 | struct rvt_dev_info *rdi = cq->rdi; |
297 | 297 | ||
298 | flush_kthread_work(&cq->comptask); | 298 | kthread_flush_work(&cq->comptask); |
299 | spin_lock(&rdi->n_cqs_lock); | 299 | spin_lock(&rdi->n_cqs_lock); |
300 | rdi->n_cqs_allocated--; | 300 | rdi->n_cqs_allocated--; |
301 | spin_unlock(&rdi->n_cqs_lock); | 301 | spin_unlock(&rdi->n_cqs_lock); |
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) | |||
514 | rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL); | 514 | rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL); |
515 | if (!rdi->worker) | 515 | if (!rdi->worker) |
516 | return -ENOMEM; | 516 | return -ENOMEM; |
517 | init_kthread_worker(rdi->worker); | 517 | kthread_init_worker(rdi->worker); |
518 | task = kthread_create_on_node( | 518 | task = kthread_create_on_node( |
519 | kthread_worker_fn, | 519 | kthread_worker_fn, |
520 | rdi->worker, | 520 | rdi->worker, |
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi) | |||
547 | /* blocks future queuing from send_complete() */ | 547 | /* blocks future queuing from send_complete() */ |
548 | rdi->worker = NULL; | 548 | rdi->worker = NULL; |
549 | smp_wmb(); /* See rdi_cq_enter */ | 549 | smp_wmb(); /* See rdi_cq_enter */ |
550 | flush_kthread_worker(worker); | 550 | kthread_flush_worker(worker); |
551 | kthread_stop(worker->task); | 551 | kthread_stop(worker->task); |
552 | kfree(worker); | 552 | kfree(worker); |
553 | } | 553 | } |
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index 09c769c6e91f..ef8c747c35e7 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/kconfig.h> | ||
13 | #include <linux/list.h> | 12 | #include <linux/list.h> |
14 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
15 | #include <linux/rmi.h> | 14 | #include <linux/rmi.h> |
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index c83bce89028b..4a88312fbd25 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/bitmap.h> | 17 | #include <linux/bitmap.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
20 | #include <linux/kconfig.h> | ||
21 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
22 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
23 | #include <linux/of.h> | 22 | #include <linux/of.h> |
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index fac81fc9bcf6..b5d2dfc23bad 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c | |||
@@ -8,7 +8,6 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/kconfig.h> | ||
12 | #include <linux/rmi.h> | 11 | #include <linux/rmi.h> |
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
14 | #include <linux/uaccess.h> | 13 | #include <linux/uaccess.h> |
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index 20c7134b3d3b..f798f427a46f 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/input.h> | 13 | #include <linux/input.h> |
14 | #include <linux/input/mt.h> | 14 | #include <linux/input/mt.h> |
15 | #include <linux/kconfig.h> | ||
16 | #include <linux/rmi.h> | 15 | #include <linux/rmi.h> |
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | #include <linux/of.h> | 17 | #include <linux/of.h> |
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index b844c89a9506..daa4ae89e466 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c | |||
@@ -52,7 +52,6 @@ | |||
52 | 52 | ||
53 | #include <linux/bitops.h> | 53 | #include <linux/bitops.h> |
54 | #include <linux/cpumask.h> | 54 | #include <linux/cpumask.h> |
55 | #include <linux/kconfig.h> | ||
56 | #include <linux/kernel.h> | 55 | #include <linux/kernel.h> |
57 | #include <linux/init.h> | 56 | #include <linux/init.h> |
58 | #include <linux/interrupt.h> | 57 | #include <linux/interrupt.h> |
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 0fea985ef1dc..353c54986211 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
15 | #include <linux/kconfig.h> | ||
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 0ec92631e23c..64c2692070ef 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kconfig.h> | ||
17 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
19 | #include <linux/of.h> | 18 | #include <linux/of.h> |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 1d4a5b46d9ae..bddf169c4b37 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kconfig.h> | ||
22 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
23 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
24 | #include <linux/of.h> | 23 | #include <linux/of.h> |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 5eacce1ef88b..dc75bea0d541 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -581,7 +581,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, | |||
581 | if (!md->init_tio_pdu) | 581 | if (!md->init_tio_pdu) |
582 | memset(&tio->info, 0, sizeof(tio->info)); | 582 | memset(&tio->info, 0, sizeof(tio->info)); |
583 | if (md->kworker_task) | 583 | if (md->kworker_task) |
584 | init_kthread_work(&tio->work, map_tio_request); | 584 | kthread_init_work(&tio->work, map_tio_request); |
585 | } | 585 | } |
586 | 586 | ||
587 | static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, | 587 | static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, |
@@ -831,7 +831,7 @@ static void dm_old_request_fn(struct request_queue *q) | |||
831 | tio = tio_from_request(rq); | 831 | tio = tio_from_request(rq); |
832 | /* Establish tio->ti before queuing work (map_tio_request) */ | 832 | /* Establish tio->ti before queuing work (map_tio_request) */ |
833 | tio->ti = ti; | 833 | tio->ti = ti; |
834 | queue_kthread_work(&md->kworker, &tio->work); | 834 | kthread_queue_work(&md->kworker, &tio->work); |
835 | BUG_ON(!irqs_disabled()); | 835 | BUG_ON(!irqs_disabled()); |
836 | } | 836 | } |
837 | } | 837 | } |
@@ -853,7 +853,7 @@ int dm_old_init_request_queue(struct mapped_device *md) | |||
853 | blk_queue_prep_rq(md->queue, dm_old_prep_fn); | 853 | blk_queue_prep_rq(md->queue, dm_old_prep_fn); |
854 | 854 | ||
855 | /* Initialize the request-based DM worker thread */ | 855 | /* Initialize the request-based DM worker thread */ |
856 | init_kthread_worker(&md->kworker); | 856 | kthread_init_worker(&md->kworker); |
857 | md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, | 857 | md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, |
858 | "kdmwork-%s", dm_device_name(md)); | 858 | "kdmwork-%s", dm_device_name(md)); |
859 | if (IS_ERR(md->kworker_task)) | 859 | if (IS_ERR(md->kworker_task)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index be35258324c1..147af9536d0c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1891,7 +1891,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
1891 | spin_unlock_irq(q->queue_lock); | 1891 | spin_unlock_irq(q->queue_lock); |
1892 | 1892 | ||
1893 | if (dm_request_based(md) && md->kworker_task) | 1893 | if (dm_request_based(md) && md->kworker_task) |
1894 | flush_kthread_worker(&md->kworker); | 1894 | kthread_flush_worker(&md->kworker); |
1895 | 1895 | ||
1896 | /* | 1896 | /* |
1897 | * Take suspend_lock so that presuspend and postsuspend methods | 1897 | * Take suspend_lock so that presuspend and postsuspend methods |
@@ -2147,7 +2147,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, | |||
2147 | if (dm_request_based(md)) { | 2147 | if (dm_request_based(md)) { |
2148 | dm_stop_queue(md->queue); | 2148 | dm_stop_queue(md->queue); |
2149 | if (md->kworker_task) | 2149 | if (md->kworker_task) |
2150 | flush_kthread_worker(&md->kworker); | 2150 | kthread_flush_worker(&md->kworker); |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | flush_workqueue(md->wq); | 2153 | flush_workqueue(md->wq); |
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h index 1dcc936e1661..dcdd163ace85 100644 --- a/drivers/media/dvb-frontends/af9013.h +++ b/drivers/media/dvb-frontends/af9013.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef AF9013_H | 25 | #ifndef AF9013_H |
26 | #define AF9013_H | 26 | #define AF9013_H |
27 | 27 | ||
28 | #include <linux/kconfig.h> | ||
29 | #include <linux/dvb/frontend.h> | 28 | #include <linux/dvb/frontend.h> |
30 | 29 | ||
31 | /* AF9013/5 GPIOs (mostly guessed) | 30 | /* AF9013/5 GPIOs (mostly guessed) |
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h index 6ad22b69a636..5b83e4f96297 100644 --- a/drivers/media/dvb-frontends/af9033.h +++ b/drivers/media/dvb-frontends/af9033.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef AF9033_H | 22 | #ifndef AF9033_H |
23 | #define AF9033_H | 23 | #define AF9033_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | |||
27 | /* | 25 | /* |
28 | * I2C address (TODO: are these in 8-bit format?) | 26 | * I2C address (TODO: are these in 8-bit format?) |
29 | * 0x38, 0x3a, 0x3c, 0x3e | 27 | * 0x38, 0x3a, 0x3c, 0x3e |
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h index 6da4ae6d6cc3..dc61bf7d1b09 100644 --- a/drivers/media/dvb-frontends/ascot2e.h +++ b/drivers/media/dvb-frontends/ascot2e.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __DVB_ASCOT2E_H__ | 22 | #ifndef __DVB_ASCOT2E_H__ |
23 | #define __DVB_ASCOT2E_H__ | 23 | #define __DVB_ASCOT2E_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/atbm8830.h b/drivers/media/dvb-frontends/atbm8830.h index 5446d13fdfe8..bb862387080f 100644 --- a/drivers/media/dvb-frontends/atbm8830.h +++ b/drivers/media/dvb-frontends/atbm8830.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __ATBM8830_H__ | 22 | #ifndef __ATBM8830_H__ |
23 | #define __ATBM8830_H__ | 23 | #define __ATBM8830_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h index 78bf3f73e58d..21c51a4c519a 100644 --- a/drivers/media/dvb-frontends/au8522.h +++ b/drivers/media/dvb-frontends/au8522.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __AU8522_H__ | 22 | #ifndef __AU8522_H__ |
23 | #define __AU8522_H__ | 23 | #define __AU8522_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | enum au8522_if_freq { | 27 | enum au8522_if_freq { |
diff --git a/drivers/media/dvb-frontends/cx22702.h b/drivers/media/dvb-frontends/cx22702.h index 68b69a7660d2..a1956a9ba406 100644 --- a/drivers/media/dvb-frontends/cx22702.h +++ b/drivers/media/dvb-frontends/cx22702.h | |||
@@ -28,7 +28,6 @@ | |||
28 | #ifndef CX22702_H | 28 | #ifndef CX22702_H |
29 | #define CX22702_H | 29 | #define CX22702_H |
30 | 30 | ||
31 | #include <linux/kconfig.h> | ||
32 | #include <linux/dvb/frontend.h> | 31 | #include <linux/dvb/frontend.h> |
33 | 32 | ||
34 | struct cx22702_config { | 33 | struct cx22702_config { |
diff --git a/drivers/media/dvb-frontends/cx24113.h b/drivers/media/dvb-frontends/cx24113.h index 962919b9b6e6..194c703611b4 100644 --- a/drivers/media/dvb-frontends/cx24113.h +++ b/drivers/media/dvb-frontends/cx24113.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef CX24113_H | 22 | #ifndef CX24113_H |
23 | #define CX24113_H | 23 | #define CX24113_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | |||
27 | struct dvb_frontend; | 25 | struct dvb_frontend; |
28 | 26 | ||
29 | struct cx24113_config { | 27 | struct cx24113_config { |
diff --git a/drivers/media/dvb-frontends/cx24116.h b/drivers/media/dvb-frontends/cx24116.h index f6dbabc1d62b..9ff8df8d44b8 100644 --- a/drivers/media/dvb-frontends/cx24116.h +++ b/drivers/media/dvb-frontends/cx24116.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef CX24116_H | 21 | #ifndef CX24116_H |
22 | #define CX24116_H | 22 | #define CX24116_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include <linux/dvb/frontend.h> | 24 | #include <linux/dvb/frontend.h> |
26 | 25 | ||
27 | struct cx24116_config { | 26 | struct cx24116_config { |
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h index 1648ab432168..445f13faf63a 100644 --- a/drivers/media/dvb-frontends/cx24117.h +++ b/drivers/media/dvb-frontends/cx24117.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef CX24117_H | 22 | #ifndef CX24117_H |
23 | #define CX24117_H | 23 | #define CX24117_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | struct cx24117_config { | 27 | struct cx24117_config { |
diff --git a/drivers/media/dvb-frontends/cx24120.h b/drivers/media/dvb-frontends/cx24120.h index f0970423e16f..de4ca9aa0923 100644 --- a/drivers/media/dvb-frontends/cx24120.h +++ b/drivers/media/dvb-frontends/cx24120.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #ifndef CX24120_H | 20 | #ifndef CX24120_H |
21 | #define CX24120_H | 21 | #define CX24120_H |
22 | 22 | ||
23 | #include <linux/kconfig.h> | ||
24 | #include <linux/dvb/frontend.h> | 23 | #include <linux/dvb/frontend.h> |
25 | #include <linux/firmware.h> | 24 | #include <linux/firmware.h> |
26 | 25 | ||
diff --git a/drivers/media/dvb-frontends/cx24123.h b/drivers/media/dvb-frontends/cx24123.h index 975f3c926fe8..aac23444aa9a 100644 --- a/drivers/media/dvb-frontends/cx24123.h +++ b/drivers/media/dvb-frontends/cx24123.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef CX24123_H | 21 | #ifndef CX24123_H |
22 | #define CX24123_H | 22 | #define CX24123_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include <linux/dvb/frontend.h> | 24 | #include <linux/dvb/frontend.h> |
26 | 25 | ||
27 | struct cx24123_config { | 26 | struct cx24123_config { |
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h index d77afe0b8a9e..f3ff8f6eb3bb 100644 --- a/drivers/media/dvb-frontends/cxd2820r.h +++ b/drivers/media/dvb-frontends/cxd2820r.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef CXD2820R_H | 22 | #ifndef CXD2820R_H |
23 | #define CXD2820R_H | 23 | #define CXD2820R_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | #define CXD2820R_GPIO_D (0 << 0) /* disable */ | 27 | #define CXD2820R_GPIO_D (0 << 0) /* disable */ |
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h index 62ad5f07390b..7f1acfb8f4f5 100644 --- a/drivers/media/dvb-frontends/cxd2841er.h +++ b/drivers/media/dvb-frontends/cxd2841er.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef CXD2841ER_H | 22 | #ifndef CXD2841ER_H |
23 | #define CXD2841ER_H | 23 | #define CXD2841ER_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | enum cxd2841er_xtal { | 27 | enum cxd2841er_xtal { |
diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h index b37e69e6a58c..67a6d50865fb 100644 --- a/drivers/media/dvb-frontends/dib3000mc.h +++ b/drivers/media/dvb-frontends/dib3000mc.h | |||
@@ -13,8 +13,6 @@ | |||
13 | #ifndef DIB3000MC_H | 13 | #ifndef DIB3000MC_H |
14 | #define DIB3000MC_H | 14 | #define DIB3000MC_H |
15 | 15 | ||
16 | #include <linux/kconfig.h> | ||
17 | |||
18 | #include "dibx000_common.h" | 16 | #include "dibx000_common.h" |
19 | 17 | ||
20 | struct dib3000mc_config { | 18 | struct dib3000mc_config { |
diff --git a/drivers/media/dvb-frontends/dib7000m.h b/drivers/media/dvb-frontends/dib7000m.h index 6468c278cc4d..8f84dfa9bb58 100644 --- a/drivers/media/dvb-frontends/dib7000m.h +++ b/drivers/media/dvb-frontends/dib7000m.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef DIB7000M_H | 1 | #ifndef DIB7000M_H |
2 | #define DIB7000M_H | 2 | #define DIB7000M_H |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | |||
6 | #include "dibx000_common.h" | 4 | #include "dibx000_common.h" |
7 | 5 | ||
8 | struct dib7000m_config { | 6 | struct dib7000m_config { |
diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h index baa278928cf3..205fbbff632b 100644 --- a/drivers/media/dvb-frontends/dib7000p.h +++ b/drivers/media/dvb-frontends/dib7000p.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef DIB7000P_H | 1 | #ifndef DIB7000P_H |
2 | #define DIB7000P_H | 2 | #define DIB7000P_H |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | |||
6 | #include "dibx000_common.h" | 4 | #include "dibx000_common.h" |
7 | 5 | ||
8 | struct dib7000p_config { | 6 | struct dib7000p_config { |
diff --git a/drivers/media/dvb-frontends/drxd.h b/drivers/media/dvb-frontends/drxd.h index a47c22d6667e..f0507cdbb503 100644 --- a/drivers/media/dvb-frontends/drxd.h +++ b/drivers/media/dvb-frontends/drxd.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #ifndef _DRXD_H_ | 24 | #ifndef _DRXD_H_ |
25 | #define _DRXD_H_ | 25 | #define _DRXD_H_ |
26 | 26 | ||
27 | #include <linux/kconfig.h> | ||
28 | #include <linux/types.h> | 27 | #include <linux/types.h> |
29 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
30 | 29 | ||
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h index 8f0b9eec528f..a629897eb905 100644 --- a/drivers/media/dvb-frontends/drxk.h +++ b/drivers/media/dvb-frontends/drxk.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _DRXK_H_ | 1 | #ifndef _DRXK_H_ |
2 | #define _DRXK_H_ | 2 | #define _DRXK_H_ |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | #include <linux/types.h> | 4 | #include <linux/types.h> |
6 | #include <linux/i2c.h> | 5 | #include <linux/i2c.h> |
7 | 6 | ||
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h index 153169da9017..82e8c2531f26 100644 --- a/drivers/media/dvb-frontends/ds3000.h +++ b/drivers/media/dvb-frontends/ds3000.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef DS3000_H | 22 | #ifndef DS3000_H |
23 | #define DS3000_H | 23 | #define DS3000_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | struct ds3000_config { | 27 | struct ds3000_config { |
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h index 15e4ceab869a..50f1af512b62 100644 --- a/drivers/media/dvb-frontends/dvb_dummy_fe.h +++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef DVB_DUMMY_FE_H | 22 | #ifndef DVB_DUMMY_FE_H |
23 | #define DVB_DUMMY_FE_H | 23 | #define DVB_DUMMY_FE_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | #include "dvb_frontend.h" | 26 | #include "dvb_frontend.h" |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/ec100.h b/drivers/media/dvb-frontends/ec100.h index 9544bab5cd1d..e894bdcf35a3 100644 --- a/drivers/media/dvb-frontends/ec100.h +++ b/drivers/media/dvb-frontends/ec100.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef EC100_H | 22 | #ifndef EC100_H |
23 | #define EC100_H | 23 | #define EC100_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | struct ec100_config { | 27 | struct ec100_config { |
diff --git a/drivers/media/dvb-frontends/hd29l2.h b/drivers/media/dvb-frontends/hd29l2.h index 48e9ab74c883..a14d6f36dbf6 100644 --- a/drivers/media/dvb-frontends/hd29l2.h +++ b/drivers/media/dvb-frontends/hd29l2.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #ifndef HD29L2_H | 23 | #ifndef HD29L2_H |
24 | #define HD29L2_H | 24 | #define HD29L2_H |
25 | 25 | ||
26 | #include <linux/kconfig.h> | ||
27 | #include <linux/dvb/frontend.h> | 26 | #include <linux/dvb/frontend.h> |
28 | 27 | ||
29 | struct hd29l2_config { | 28 | struct hd29l2_config { |
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h index e1b9224cfc55..333615491d9e 100644 --- a/drivers/media/dvb-frontends/helene.h +++ b/drivers/media/dvb-frontends/helene.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef __DVB_HELENE_H__ | 21 | #ifndef __DVB_HELENE_H__ |
22 | #define __DVB_HELENE_H__ | 22 | #define __DVB_HELENE_H__ |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include <linux/dvb/frontend.h> | 24 | #include <linux/dvb/frontend.h> |
26 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
27 | 26 | ||
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h index c1e2d1834b78..672a556df71a 100644 --- a/drivers/media/dvb-frontends/horus3a.h +++ b/drivers/media/dvb-frontends/horus3a.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __DVB_HORUS3A_H__ | 22 | #ifndef __DVB_HORUS3A_H__ |
23 | #define __DVB_HORUS3A_H__ | 23 | #define __DVB_HORUS3A_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h index af107a2dd357..5eab39744b23 100644 --- a/drivers/media/dvb-frontends/ix2505v.h +++ b/drivers/media/dvb-frontends/ix2505v.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #ifndef DVB_IX2505V_H | 20 | #ifndef DVB_IX2505V_H |
21 | #define DVB_IX2505V_H | 21 | #define DVB_IX2505V_H |
22 | 22 | ||
23 | #include <linux/kconfig.h> | ||
24 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | 25 | ||
diff --git a/drivers/media/dvb-frontends/lg2160.h b/drivers/media/dvb-frontends/lg2160.h index d20bd909de39..8c74ddc6b88a 100644 --- a/drivers/media/dvb-frontends/lg2160.h +++ b/drivers/media/dvb-frontends/lg2160.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef _LG2160_H_ | 22 | #ifndef _LG2160_H_ |
23 | #define _LG2160_H_ | 23 | #define _LG2160_H_ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
27 | #include "dvb_frontend.h" | 26 | #include "dvb_frontend.h" |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h index f91a1b49ce2f..e7dceb60e572 100644 --- a/drivers/media/dvb-frontends/lgdt3305.h +++ b/drivers/media/dvb-frontends/lgdt3305.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef _LGDT3305_H_ | 22 | #ifndef _LGDT3305_H_ |
23 | #define _LGDT3305_H_ | 23 | #define _LGDT3305_H_ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
27 | #include "dvb_frontend.h" | 26 | #include "dvb_frontend.h" |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/lgs8gl5.h b/drivers/media/dvb-frontends/lgs8gl5.h index a5b3faf121f0..f36a7fd0b102 100644 --- a/drivers/media/dvb-frontends/lgs8gl5.h +++ b/drivers/media/dvb-frontends/lgs8gl5.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #ifndef LGS8GL5_H | 23 | #ifndef LGS8GL5_H |
24 | #define LGS8GL5_H | 24 | #define LGS8GL5_H |
25 | 25 | ||
26 | #include <linux/kconfig.h> | ||
27 | #include <linux/dvb/frontend.h> | 26 | #include <linux/dvb/frontend.h> |
28 | 27 | ||
29 | struct lgs8gl5_config { | 28 | struct lgs8gl5_config { |
diff --git a/drivers/media/dvb-frontends/lgs8gxx.h b/drivers/media/dvb-frontends/lgs8gxx.h index 368c9928ef7f..7519c0210399 100644 --- a/drivers/media/dvb-frontends/lgs8gxx.h +++ b/drivers/media/dvb-frontends/lgs8gxx.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #ifndef __LGS8GXX_H__ | 26 | #ifndef __LGS8GXX_H__ |
27 | #define __LGS8GXX_H__ | 27 | #define __LGS8GXX_H__ |
28 | 28 | ||
29 | #include <linux/kconfig.h> | ||
30 | #include <linux/dvb/frontend.h> | 29 | #include <linux/dvb/frontend.h> |
31 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
32 | 31 | ||
diff --git a/drivers/media/dvb-frontends/lnbh24.h b/drivers/media/dvb-frontends/lnbh24.h index a088b8ec1e53..24431dfdce1f 100644 --- a/drivers/media/dvb-frontends/lnbh24.h +++ b/drivers/media/dvb-frontends/lnbh24.h | |||
@@ -23,8 +23,6 @@ | |||
23 | #ifndef _LNBH24_H | 23 | #ifndef _LNBH24_H |
24 | #define _LNBH24_H | 24 | #define _LNBH24_H |
25 | 25 | ||
26 | #include <linux/kconfig.h> | ||
27 | |||
28 | /* system register bits */ | 26 | /* system register bits */ |
29 | #define LNBH24_OLF 0x01 | 27 | #define LNBH24_OLF 0x01 |
30 | #define LNBH24_OTF 0x02 | 28 | #define LNBH24_OTF 0x02 |
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h index 1f329ef05acc..f13fd0308b3e 100644 --- a/drivers/media/dvb-frontends/lnbh25.h +++ b/drivers/media/dvb-frontends/lnbh25.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #define LNBH25_H | 22 | #define LNBH25_H |
23 | 23 | ||
24 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | /* 22 kHz tone enabled. Tone output controlled by DSQIN pin */ | 27 | /* 22 kHz tone enabled. Tone output controlled by DSQIN pin */ |
diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h index cd9101f6e579..4bb6439068ec 100644 --- a/drivers/media/dvb-frontends/lnbp21.h +++ b/drivers/media/dvb-frontends/lnbp21.h | |||
@@ -27,8 +27,6 @@ | |||
27 | #ifndef _LNBP21_H | 27 | #ifndef _LNBP21_H |
28 | #define _LNBP21_H | 28 | #define _LNBP21_H |
29 | 29 | ||
30 | #include <linux/kconfig.h> | ||
31 | |||
32 | /* system register bits */ | 30 | /* system register bits */ |
33 | /* [RO] 0=OK; 1=over current limit flag */ | 31 | /* [RO] 0=OK; 1=over current limit flag */ |
34 | #define LNBP21_OLF 0x01 | 32 | #define LNBP21_OLF 0x01 |
diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h index 5d01d92814c2..0cb72126c498 100644 --- a/drivers/media/dvb-frontends/lnbp22.h +++ b/drivers/media/dvb-frontends/lnbp22.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #ifndef _LNBP22_H | 28 | #ifndef _LNBP22_H |
29 | #define _LNBP22_H | 29 | #define _LNBP22_H |
30 | 30 | ||
31 | #include <linux/kconfig.h> | ||
32 | |||
33 | /* Enable */ | 31 | /* Enable */ |
34 | #define LNBP22_EN 0x10 | 32 | #define LNBP22_EN 0x10 |
35 | /* Voltage selection */ | 33 | /* Voltage selection */ |
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h index de7430178e9e..1a313b0f5875 100644 --- a/drivers/media/dvb-frontends/m88rs2000.h +++ b/drivers/media/dvb-frontends/m88rs2000.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #ifndef M88RS2000_H | 20 | #ifndef M88RS2000_H |
21 | #define M88RS2000_H | 21 | #define M88RS2000_H |
22 | 22 | ||
23 | #include <linux/kconfig.h> | ||
24 | #include <linux/dvb/frontend.h> | 23 | #include <linux/dvb/frontend.h> |
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | 25 | ||
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h index a113282d6956..dfb02db2126c 100644 --- a/drivers/media/dvb-frontends/mb86a20s.h +++ b/drivers/media/dvb-frontends/mb86a20s.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #ifndef MB86A20S_H | 16 | #ifndef MB86A20S_H |
17 | #define MB86A20S_H | 17 | #define MB86A20S_H |
18 | 18 | ||
19 | #include <linux/kconfig.h> | ||
20 | #include <linux/dvb/frontend.h> | 19 | #include <linux/dvb/frontend.h> |
21 | 20 | ||
22 | /** | 21 | /** |
diff --git a/drivers/media/dvb-frontends/s5h1409.h b/drivers/media/dvb-frontends/s5h1409.h index f58b9ca5557a..b38557c451b9 100644 --- a/drivers/media/dvb-frontends/s5h1409.h +++ b/drivers/media/dvb-frontends/s5h1409.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __S5H1409_H__ | 22 | #ifndef __S5H1409_H__ |
23 | #define __S5H1409_H__ | 23 | #define __S5H1409_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | struct s5h1409_config { | 27 | struct s5h1409_config { |
diff --git a/drivers/media/dvb-frontends/s5h1411.h b/drivers/media/dvb-frontends/s5h1411.h index f3a87f7ec360..791bab0e16e9 100644 --- a/drivers/media/dvb-frontends/s5h1411.h +++ b/drivers/media/dvb-frontends/s5h1411.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __S5H1411_H__ | 22 | #ifndef __S5H1411_H__ |
23 | #define __S5H1411_H__ | 23 | #define __S5H1411_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | #define S5H1411_I2C_TOP_ADDR (0x32 >> 1) | 27 | #define S5H1411_I2C_TOP_ADDR (0x32 >> 1) |
diff --git a/drivers/media/dvb-frontends/s5h1432.h b/drivers/media/dvb-frontends/s5h1432.h index f490c5ee5801..b81c9bd4e422 100644 --- a/drivers/media/dvb-frontends/s5h1432.h +++ b/drivers/media/dvb-frontends/s5h1432.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __S5H1432_H__ | 22 | #ifndef __S5H1432_H__ |
23 | #define __S5H1432_H__ | 23 | #define __S5H1432_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | #define S5H1432_I2C_TOP_ADDR (0x02 >> 1) | 27 | #define S5H1432_I2C_TOP_ADDR (0x02 >> 1) |
diff --git a/drivers/media/dvb-frontends/s921.h b/drivers/media/dvb-frontends/s921.h index f5b722d8081b..a47ed894d4ae 100644 --- a/drivers/media/dvb-frontends/s921.h +++ b/drivers/media/dvb-frontends/s921.h | |||
@@ -17,7 +17,6 @@ | |||
17 | #ifndef S921_H | 17 | #ifndef S921_H |
18 | #define S921_H | 18 | #define S921_H |
19 | 19 | ||
20 | #include <linux/kconfig.h> | ||
21 | #include <linux/dvb/frontend.h> | 20 | #include <linux/dvb/frontend.h> |
22 | 21 | ||
23 | struct s921_config { | 22 | struct s921_config { |
diff --git a/drivers/media/dvb-frontends/si21xx.h b/drivers/media/dvb-frontends/si21xx.h index ef5f351ca68e..b1be62f1983a 100644 --- a/drivers/media/dvb-frontends/si21xx.h +++ b/drivers/media/dvb-frontends/si21xx.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef SI21XX_H | 1 | #ifndef SI21XX_H |
2 | #define SI21XX_H | 2 | #define SI21XX_H |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | #include <linux/dvb/frontend.h> | 4 | #include <linux/dvb/frontend.h> |
6 | #include "dvb_frontend.h" | 5 | #include "dvb_frontend.h" |
7 | 6 | ||
diff --git a/drivers/media/dvb-frontends/sp2.h b/drivers/media/dvb-frontends/sp2.h index 6cceea022d49..3901cd74b3f7 100644 --- a/drivers/media/dvb-frontends/sp2.h +++ b/drivers/media/dvb-frontends/sp2.h | |||
@@ -17,7 +17,6 @@ | |||
17 | #ifndef SP2_H | 17 | #ifndef SP2_H |
18 | #define SP2_H | 18 | #define SP2_H |
19 | 19 | ||
20 | #include <linux/kconfig.h> | ||
21 | #include "dvb_ca_en50221.h" | 20 | #include "dvb_ca_en50221.h" |
22 | 21 | ||
23 | /* | 22 | /* |
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h index da581b652cb9..78e75dfc317f 100644 --- a/drivers/media/dvb-frontends/stb6000.h +++ b/drivers/media/dvb-frontends/stb6000.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #ifndef __DVB_STB6000_H__ | 23 | #ifndef __DVB_STB6000_H__ |
24 | #define __DVB_STB6000_H__ | 24 | #define __DVB_STB6000_H__ |
25 | 25 | ||
26 | #include <linux/kconfig.h> | ||
27 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
28 | #include "dvb_frontend.h" | 27 | #include "dvb_frontend.h" |
29 | 28 | ||
diff --git a/drivers/media/dvb-frontends/stv0288.h b/drivers/media/dvb-frontends/stv0288.h index b58603c00c80..803acb917282 100644 --- a/drivers/media/dvb-frontends/stv0288.h +++ b/drivers/media/dvb-frontends/stv0288.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #ifndef STV0288_H | 27 | #ifndef STV0288_H |
28 | #define STV0288_H | 28 | #define STV0288_H |
29 | 29 | ||
30 | #include <linux/kconfig.h> | ||
31 | #include <linux/dvb/frontend.h> | 30 | #include <linux/dvb/frontend.h> |
32 | #include "dvb_frontend.h" | 31 | #include "dvb_frontend.h" |
33 | 32 | ||
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h index 92b3e85fb818..b88166a9716f 100644 --- a/drivers/media/dvb-frontends/stv0367.h +++ b/drivers/media/dvb-frontends/stv0367.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #ifndef STV0367_H | 26 | #ifndef STV0367_H |
27 | #define STV0367_H | 27 | #define STV0367_H |
28 | 28 | ||
29 | #include <linux/kconfig.h> | ||
30 | #include <linux/dvb/frontend.h> | 29 | #include <linux/dvb/frontend.h> |
31 | #include "dvb_frontend.h" | 30 | #include "dvb_frontend.h" |
32 | 31 | ||
diff --git a/drivers/media/dvb-frontends/stv0900.h b/drivers/media/dvb-frontends/stv0900.h index c90bf00ea9ce..9ca2da90c7d7 100644 --- a/drivers/media/dvb-frontends/stv0900.h +++ b/drivers/media/dvb-frontends/stv0900.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #ifndef STV0900_H | 26 | #ifndef STV0900_H |
27 | #define STV0900_H | 27 | #define STV0900_H |
28 | 28 | ||
29 | #include <linux/kconfig.h> | ||
30 | #include <linux/dvb/frontend.h> | 29 | #include <linux/dvb/frontend.h> |
31 | #include "dvb_frontend.h" | 30 | #include "dvb_frontend.h" |
32 | 31 | ||
diff --git a/drivers/media/dvb-frontends/stv6110.h b/drivers/media/dvb-frontends/stv6110.h index f3c8a5c6b77d..4604f793d954 100644 --- a/drivers/media/dvb-frontends/stv6110.h +++ b/drivers/media/dvb-frontends/stv6110.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef __DVB_STV6110_H__ | 25 | #ifndef __DVB_STV6110_H__ |
26 | #define __DVB_STV6110_H__ | 26 | #define __DVB_STV6110_H__ |
27 | 27 | ||
28 | #include <linux/kconfig.h> | ||
29 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
30 | #include "dvb_frontend.h" | 29 | #include "dvb_frontend.h" |
31 | 30 | ||
diff --git a/drivers/media/dvb-frontends/tda10048.h b/drivers/media/dvb-frontends/tda10048.h index bc77a7311de1..a2cebb0cceba 100644 --- a/drivers/media/dvb-frontends/tda10048.h +++ b/drivers/media/dvb-frontends/tda10048.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef TDA10048_H | 22 | #ifndef TDA10048_H |
23 | #define TDA10048_H | 23 | #define TDA10048_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | #include <linux/firmware.h> | 26 | #include <linux/firmware.h> |
28 | 27 | ||
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.h b/drivers/media/dvb-frontends/tda18271c2dd.h index 7ebd8eaff4eb..e6ccf240f54c 100644 --- a/drivers/media/dvb-frontends/tda18271c2dd.h +++ b/drivers/media/dvb-frontends/tda18271c2dd.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _TDA18271C2DD_H_ | 1 | #ifndef _TDA18271C2DD_H_ |
2 | #define _TDA18271C2DD_H_ | 2 | #define _TDA18271C2DD_H_ |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | |||
6 | #if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD) | 4 | #if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD) |
7 | struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe, | 5 | struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe, |
8 | struct i2c_adapter *i2c, u8 adr); | 6 | struct i2c_adapter *i2c, u8 adr); |
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h index 9220e5cf0d21..facc54f0a6af 100644 --- a/drivers/media/dvb-frontends/ts2020.h +++ b/drivers/media/dvb-frontends/ts2020.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef TS2020_H | 22 | #ifndef TS2020_H |
23 | #define TS2020_H | 23 | #define TS2020_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/dvb/frontend.h> | 25 | #include <linux/dvb/frontend.h> |
27 | 26 | ||
28 | struct ts2020_config { | 27 | struct ts2020_config { |
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h index 670e76a654ee..c568d8d59de3 100644 --- a/drivers/media/dvb-frontends/zl10036.h +++ b/drivers/media/dvb-frontends/zl10036.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef DVB_ZL10036_H | 21 | #ifndef DVB_ZL10036_H |
22 | #define DVB_ZL10036_H | 22 | #define DVB_ZL10036_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
26 | #include "dvb_frontend.h" | 25 | #include "dvb_frontend.h" |
27 | 26 | ||
diff --git a/drivers/media/dvb-frontends/zl10039.h b/drivers/media/dvb-frontends/zl10039.h index 070929444e71..66e708569375 100644 --- a/drivers/media/dvb-frontends/zl10039.h +++ b/drivers/media/dvb-frontends/zl10039.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef ZL10039_H | 22 | #ifndef ZL10039_H |
23 | #define ZL10039_H | 23 | #define ZL10039_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | |||
27 | #if IS_REACHABLE(CONFIG_DVB_ZL10039) | 25 | #if IS_REACHABLE(CONFIG_DVB_ZL10039) |
28 | struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe, | 26 | struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe, |
29 | u8 i2c_addr, | 27 | u8 i2c_addr, |
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h index 6c511723fd1b..57a40c84b46e 100644 --- a/drivers/media/pci/cx23885/altera-ci.h +++ b/drivers/media/pci/cx23885/altera-ci.h | |||
@@ -20,8 +20,6 @@ | |||
20 | #ifndef __ALTERA_CI_H | 20 | #ifndef __ALTERA_CI_H |
21 | #define __ALTERA_CI_H | 21 | #define __ALTERA_CI_H |
22 | 22 | ||
23 | #include <linux/kconfig.h> | ||
24 | |||
25 | #define ALT_DATA 0x000000ff | 23 | #define ALT_DATA 0x000000ff |
26 | #define ALT_TDI 0x00008000 | 24 | #define ALT_TDI 0x00008000 |
27 | #define ALT_TDO 0x00004000 | 25 | #define ALT_TDO 0x00004000 |
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 374033a5bdaf..ee48c3e09de4 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c | |||
@@ -750,7 +750,7 @@ static int ivtv_init_struct1(struct ivtv *itv) | |||
750 | spin_lock_init(&itv->lock); | 750 | spin_lock_init(&itv->lock); |
751 | spin_lock_init(&itv->dma_reg_lock); | 751 | spin_lock_init(&itv->dma_reg_lock); |
752 | 752 | ||
753 | init_kthread_worker(&itv->irq_worker); | 753 | kthread_init_worker(&itv->irq_worker); |
754 | itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, | 754 | itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, |
755 | "%s", itv->v4l2_dev.name); | 755 | "%s", itv->v4l2_dev.name); |
756 | if (IS_ERR(itv->irq_worker_task)) { | 756 | if (IS_ERR(itv->irq_worker_task)) { |
@@ -760,7 +760,7 @@ static int ivtv_init_struct1(struct ivtv *itv) | |||
760 | /* must use the FIFO scheduler as it is realtime sensitive */ | 760 | /* must use the FIFO scheduler as it is realtime sensitive */ |
761 | sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m); | 761 | sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m); |
762 | 762 | ||
763 | init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); | 763 | kthread_init_work(&itv->irq_work, ivtv_irq_work_handler); |
764 | 764 | ||
765 | /* Initial settings */ | 765 | /* Initial settings */ |
766 | itv->cxhdl.port = CX2341X_PORT_MEMORY; | 766 | itv->cxhdl.port = CX2341X_PORT_MEMORY; |
@@ -1441,7 +1441,7 @@ static void ivtv_remove(struct pci_dev *pdev) | |||
1441 | del_timer_sync(&itv->dma_timer); | 1441 | del_timer_sync(&itv->dma_timer); |
1442 | 1442 | ||
1443 | /* Kill irq worker */ | 1443 | /* Kill irq worker */ |
1444 | flush_kthread_worker(&itv->irq_worker); | 1444 | kthread_flush_worker(&itv->irq_worker); |
1445 | kthread_stop(itv->irq_worker_task); | 1445 | kthread_stop(itv->irq_worker_task); |
1446 | 1446 | ||
1447 | ivtv_streams_cleanup(itv); | 1447 | ivtv_streams_cleanup(itv); |
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c index 36ca2d67c812..6efe1f71262c 100644 --- a/drivers/media/pci/ivtv/ivtv-irq.c +++ b/drivers/media/pci/ivtv/ivtv-irq.c | |||
@@ -1062,7 +1062,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id) | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { | 1064 | if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { |
1065 | queue_kthread_work(&itv->irq_worker, &itv->irq_work); | 1065 | kthread_queue_work(&itv->irq_worker, &itv->irq_work); |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | spin_unlock(&itv->dma_reg_lock); | 1068 | spin_unlock(&itv->dma_reg_lock); |
diff --git a/drivers/media/tuners/fc0011.h b/drivers/media/tuners/fc0011.h index 81bb568d6943..438cf897acd1 100644 --- a/drivers/media/tuners/fc0011.h +++ b/drivers/media/tuners/fc0011.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef LINUX_FC0011_H_ | 1 | #ifndef LINUX_FC0011_H_ |
2 | #define LINUX_FC0011_H_ | 2 | #define LINUX_FC0011_H_ |
3 | 3 | ||
4 | #include <linux/kconfig.h> | ||
5 | #include "dvb_frontend.h" | 4 | #include "dvb_frontend.h" |
6 | 5 | ||
7 | 6 | ||
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h index 9ad32859bab0..4a23e418daf0 100644 --- a/drivers/media/tuners/fc0012.h +++ b/drivers/media/tuners/fc0012.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef _FC0012_H_ | 21 | #ifndef _FC0012_H_ |
22 | #define _FC0012_H_ | 22 | #define _FC0012_H_ |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | #include "fc001x-common.h" | 25 | #include "fc001x-common.h" |
27 | 26 | ||
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h index e130bd7a3230..8c34105c9383 100644 --- a/drivers/media/tuners/fc0013.h +++ b/drivers/media/tuners/fc0013.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef _FC0013_H_ | 22 | #ifndef _FC0013_H_ |
23 | #define _FC0013_H_ | 23 | #define _FC0013_H_ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include "dvb_frontend.h" | 25 | #include "dvb_frontend.h" |
27 | #include "fc001x-common.h" | 26 | #include "fc001x-common.h" |
28 | 27 | ||
diff --git a/drivers/media/tuners/max2165.h b/drivers/media/tuners/max2165.h index 5054f01a78fb..aadd9fea59e4 100644 --- a/drivers/media/tuners/max2165.h +++ b/drivers/media/tuners/max2165.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef __MAX2165_H__ | 22 | #ifndef __MAX2165_H__ |
23 | #define __MAX2165_H__ | 23 | #define __MAX2165_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | |||
27 | struct dvb_frontend; | 25 | struct dvb_frontend; |
28 | struct i2c_adapter; | 26 | struct i2c_adapter; |
29 | 27 | ||
diff --git a/drivers/media/tuners/mc44s803.h b/drivers/media/tuners/mc44s803.h index b3e614be657d..6b40df339284 100644 --- a/drivers/media/tuners/mc44s803.h +++ b/drivers/media/tuners/mc44s803.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef MC44S803_H | 22 | #ifndef MC44S803_H |
23 | #define MC44S803_H | 23 | #define MC44S803_H |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | |||
27 | struct dvb_frontend; | 25 | struct dvb_frontend; |
28 | struct i2c_adapter; | 26 | struct i2c_adapter; |
29 | 27 | ||
diff --git a/drivers/media/tuners/mxl5005s.h b/drivers/media/tuners/mxl5005s.h index 5764b12c5c7c..d842734f2dcd 100644 --- a/drivers/media/tuners/mxl5005s.h +++ b/drivers/media/tuners/mxl5005s.h | |||
@@ -23,8 +23,6 @@ | |||
23 | #ifndef __MXL5005S_H | 23 | #ifndef __MXL5005S_H |
24 | #define __MXL5005S_H | 24 | #define __MXL5005S_H |
25 | 25 | ||
26 | #include <linux/kconfig.h> | ||
27 | |||
28 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
29 | #include "dvb_frontend.h" | 27 | #include "dvb_frontend.h" |
30 | 28 | ||
diff --git a/drivers/media/tuners/r820t.h b/drivers/media/tuners/r820t.h index b1e5661af1c7..fdcab91405de 100644 --- a/drivers/media/tuners/r820t.h +++ b/drivers/media/tuners/r820t.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef R820T_H | 21 | #ifndef R820T_H |
22 | #define R820T_H | 22 | #define R820T_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | 25 | ||
27 | enum r820t_chip { | 26 | enum r820t_chip { |
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h index 5f1a60bf7ced..76807f5b3cf8 100644 --- a/drivers/media/tuners/si2157.h +++ b/drivers/media/tuners/si2157.h | |||
@@ -17,7 +17,6 @@ | |||
17 | #ifndef SI2157_H | 17 | #ifndef SI2157_H |
18 | #define SI2157_H | 18 | #define SI2157_H |
19 | 19 | ||
20 | #include <linux/kconfig.h> | ||
21 | #include <media/media-device.h> | 20 | #include <media/media-device.h> |
22 | #include "dvb_frontend.h" | 21 | #include "dvb_frontend.h" |
23 | 22 | ||
diff --git a/drivers/media/tuners/tda18212.h b/drivers/media/tuners/tda18212.h index e58c9096d79c..6391dafd0c9d 100644 --- a/drivers/media/tuners/tda18212.h +++ b/drivers/media/tuners/tda18212.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef TDA18212_H | 21 | #ifndef TDA18212_H |
22 | #define TDA18212_H | 22 | #define TDA18212_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | 25 | ||
27 | struct tda18212_config { | 26 | struct tda18212_config { |
diff --git a/drivers/media/tuners/tda18218.h b/drivers/media/tuners/tda18218.h index 1eacb4f84e93..076b5f2e888d 100644 --- a/drivers/media/tuners/tda18218.h +++ b/drivers/media/tuners/tda18218.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef TDA18218_H | 21 | #ifndef TDA18218_H |
22 | #define TDA18218_H | 22 | #define TDA18218_H |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | 25 | ||
27 | struct tda18218_config { | 26 | struct tda18218_config { |
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h index 00ba29e21fb9..336bd49eb09b 100644 --- a/drivers/media/tuners/xc5000.h +++ b/drivers/media/tuners/xc5000.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef __XC5000_H__ | 22 | #ifndef __XC5000_H__ |
23 | #define __XC5000_H__ | 23 | #define __XC5000_H__ |
24 | 24 | ||
25 | #include <linux/kconfig.h> | ||
26 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
27 | 26 | ||
28 | struct dvb_frontend; | 27 | struct dvb_frontend; |
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h index 7065aca81252..e6eae9d88e9f 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef __MXL111SF_DEMOD_H__ | 21 | #ifndef __MXL111SF_DEMOD_H__ |
22 | #define __MXL111SF_DEMOD_H__ | 22 | #define __MXL111SF_DEMOD_H__ |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | #include "mxl111sf.h" | 25 | #include "mxl111sf.h" |
27 | 26 | ||
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h index 509b55071218..e96d9a444ed1 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifndef __MXL111SF_TUNER_H__ | 21 | #ifndef __MXL111SF_TUNER_H__ |
22 | #define __MXL111SF_TUNER_H__ | 22 | #define __MXL111SF_TUNER_H__ |
23 | 23 | ||
24 | #include <linux/kconfig.h> | ||
25 | #include "dvb_frontend.h" | 24 | #include "dvb_frontend.h" |
26 | #include "mxl111sf.h" | 25 | #include "mxl111sf.h" |
27 | 26 | ||
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 4b08c2a47ae2..18ed3bfbb5e2 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * see Documentation/dvb/README.dvb-usb for more information | 9 | * see Documentation/dvb/README.dvb-usb for more information |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kconfig.h> | ||
13 | #include "dibusb.h" | 12 | #include "dibusb.h" |
14 | 13 | ||
15 | /* Max transfer size done by I2C transfer functions */ | 14 | /* Max transfer size done by I2C transfer functions */ |
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 6d43d75493ea..474c11e1d495 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/kconfig.h> | ||
14 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
15 | #include <linux/init.h> | 14 | #include <linux/init.h> |
16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 4f270482cfd0..d46e4adf6d2b 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/gfp.h> | 39 | #include <linux/gfp.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/reboot.h> | 41 | #include <linux/reboot.h> |
42 | #include <linux/kconfig.h> | ||
43 | #include <linux/leds.h> | 42 | #include <linux/leds.h> |
44 | 43 | ||
45 | #include <linux/mtd/mtd.h> | 44 | #include <linux/mtd/mtd.h> |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index c1f34f04e338..fccdd49bb964 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/mtd/mtd.h> | 30 | #include <linux/mtd/mtd.h> |
31 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
32 | #include <linux/err.h> | 32 | #include <linux/err.h> |
33 | #include <linux/kconfig.h> | ||
34 | 33 | ||
35 | #include "mtdcore.h" | 34 | #include "mtdcore.h" |
36 | 35 | ||
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index cc9e6bd83e0e..76fb8552c9d9 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c | |||
@@ -17,7 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/kconfig.h> | ||
21 | #include <linux/module.h> | 20 | #include <linux/module.h> |
22 | #include <linux/io.h> | 21 | #include <linux/io.h> |
23 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 42e34076d2de..b14f0305aa31 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c | |||
@@ -821,7 +821,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev) | |||
821 | } | 821 | } |
822 | 822 | ||
823 | if (oldfilter != priv->rxfilter) | 823 | if (oldfilter != priv->rxfilter) |
824 | queue_kthread_work(&priv->kworker, &priv->setrx_work); | 824 | kthread_queue_work(&priv->kworker, &priv->setrx_work); |
825 | } | 825 | } |
826 | 826 | ||
827 | static void encx24j600_hw_tx(struct encx24j600_priv *priv) | 827 | static void encx24j600_hw_tx(struct encx24j600_priv *priv) |
@@ -879,7 +879,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev) | |||
879 | /* Remember the skb for deferred processing */ | 879 | /* Remember the skb for deferred processing */ |
880 | priv->tx_skb = skb; | 880 | priv->tx_skb = skb; |
881 | 881 | ||
882 | queue_kthread_work(&priv->kworker, &priv->tx_work); | 882 | kthread_queue_work(&priv->kworker, &priv->tx_work); |
883 | 883 | ||
884 | return NETDEV_TX_OK; | 884 | return NETDEV_TX_OK; |
885 | } | 885 | } |
@@ -1037,9 +1037,9 @@ static int encx24j600_spi_probe(struct spi_device *spi) | |||
1037 | goto out_free; | 1037 | goto out_free; |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | init_kthread_worker(&priv->kworker); | 1040 | kthread_init_worker(&priv->kworker); |
1041 | init_kthread_work(&priv->tx_work, encx24j600_tx_proc); | 1041 | kthread_init_work(&priv->tx_work, encx24j600_tx_proc); |
1042 | init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc); | 1042 | kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc); |
1043 | 1043 | ||
1044 | priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker, | 1044 | priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker, |
1045 | "encx24j600"); | 1045 | "encx24j600"); |
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index e15bf84fc6b2..0ac449acaf5b 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/highmem.h> | 11 | #include <linux/highmem.h> |
12 | #include <linux/if_vlan.h> | 12 | #include <linux/if_vlan.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kconfig.h> | ||
15 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 37ab46cdbec4..d2349a1bc6ba 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/kconfig.h> | ||
13 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
14 | #include <linux/etherdevice.h> | 13 | #include <linux/etherdevice.h> |
15 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 0b37ce9f28f1..ca31a57dbc86 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/kconfig.h> | ||
14 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
15 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 68ef1875e8a8..0fc99f0f2571 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -515,7 +515,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req, | |||
515 | goto out; | 515 | goto out; |
516 | 516 | ||
517 | ret = BLK_MQ_RQ_QUEUE_BUSY; | 517 | ret = BLK_MQ_RQ_QUEUE_BUSY; |
518 | if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir)) | 518 | if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, |
519 | DMA_ATTR_NO_WARN)) | ||
519 | goto out; | 520 | goto out; |
520 | 521 | ||
521 | if (!nvme_setup_prps(dev, req, size)) | 522 | if (!nvme_setup_prps(dev, req, size)) |
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig index 7512e98e9311..564a51abeece 100644 --- a/drivers/pps/Kconfig +++ b/drivers/pps/Kconfig | |||
@@ -31,7 +31,7 @@ config PPS_DEBUG | |||
31 | 31 | ||
32 | config NTP_PPS | 32 | config NTP_PPS |
33 | bool "PPS kernel consumer support" | 33 | bool "PPS kernel consumer support" |
34 | depends on !NO_HZ | 34 | depends on !NO_HZ_COMMON |
35 | help | 35 | help |
36 | This option adds support for direct in-kernel time | 36 | This option adds support for direct in-kernel time |
37 | synchronization using an external PPS signal. | 37 | synchronization using an external PPS signal. |
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index cebc296463ad..bad0e0ea4f30 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c | |||
@@ -1841,24 +1841,19 @@ static int cm_chan_msg_send(void __user *arg) | |||
1841 | { | 1841 | { |
1842 | struct rio_cm_msg msg; | 1842 | struct rio_cm_msg msg; |
1843 | void *buf; | 1843 | void *buf; |
1844 | int ret = 0; | 1844 | int ret; |
1845 | 1845 | ||
1846 | if (copy_from_user(&msg, arg, sizeof(msg))) | 1846 | if (copy_from_user(&msg, arg, sizeof(msg))) |
1847 | return -EFAULT; | 1847 | return -EFAULT; |
1848 | if (msg.size > RIO_MAX_MSG_SIZE) | 1848 | if (msg.size > RIO_MAX_MSG_SIZE) |
1849 | return -EINVAL; | 1849 | return -EINVAL; |
1850 | 1850 | ||
1851 | buf = kmalloc(msg.size, GFP_KERNEL); | 1851 | buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size); |
1852 | if (!buf) | 1852 | if (IS_ERR(buf)) |
1853 | return -ENOMEM; | 1853 | return PTR_ERR(buf); |
1854 | |||
1855 | if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) { | ||
1856 | ret = -EFAULT; | ||
1857 | goto out; | ||
1858 | } | ||
1859 | 1854 | ||
1860 | ret = riocm_ch_send(msg.ch_num, buf, msg.size); | 1855 | ret = riocm_ch_send(msg.ch_num, buf, msg.size); |
1861 | out: | 1856 | |
1862 | kfree(buf); | 1857 | kfree(buf); |
1863 | return ret; | 1858 | return ret; |
1864 | } | 1859 | } |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8146ccd35a1a..5787b723b593 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1112,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread) | |||
1112 | 1112 | ||
1113 | /* If another context is idling the device then defer */ | 1113 | /* If another context is idling the device then defer */ |
1114 | if (master->idling) { | 1114 | if (master->idling) { |
1115 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1115 | kthread_queue_work(&master->kworker, &master->pump_messages); |
1116 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1116 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1117 | return; | 1117 | return; |
1118 | } | 1118 | } |
@@ -1126,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread) | |||
1126 | 1126 | ||
1127 | /* Only do teardown in the thread */ | 1127 | /* Only do teardown in the thread */ |
1128 | if (!in_kthread) { | 1128 | if (!in_kthread) { |
1129 | queue_kthread_work(&master->kworker, | 1129 | kthread_queue_work(&master->kworker, |
1130 | &master->pump_messages); | 1130 | &master->pump_messages); |
1131 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1131 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1132 | return; | 1132 | return; |
@@ -1250,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master) | |||
1250 | master->running = false; | 1250 | master->running = false; |
1251 | master->busy = false; | 1251 | master->busy = false; |
1252 | 1252 | ||
1253 | init_kthread_worker(&master->kworker); | 1253 | kthread_init_worker(&master->kworker); |
1254 | master->kworker_task = kthread_run(kthread_worker_fn, | 1254 | master->kworker_task = kthread_run(kthread_worker_fn, |
1255 | &master->kworker, "%s", | 1255 | &master->kworker, "%s", |
1256 | dev_name(&master->dev)); | 1256 | dev_name(&master->dev)); |
@@ -1258,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master) | |||
1258 | dev_err(&master->dev, "failed to create message pump task\n"); | 1258 | dev_err(&master->dev, "failed to create message pump task\n"); |
1259 | return PTR_ERR(master->kworker_task); | 1259 | return PTR_ERR(master->kworker_task); |
1260 | } | 1260 | } |
1261 | init_kthread_work(&master->pump_messages, spi_pump_messages); | 1261 | kthread_init_work(&master->pump_messages, spi_pump_messages); |
1262 | 1262 | ||
1263 | /* | 1263 | /* |
1264 | * Master config will indicate if this controller should run the | 1264 | * Master config will indicate if this controller should run the |
@@ -1331,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master) | |||
1331 | spin_lock_irqsave(&master->queue_lock, flags); | 1331 | spin_lock_irqsave(&master->queue_lock, flags); |
1332 | master->cur_msg = NULL; | 1332 | master->cur_msg = NULL; |
1333 | master->cur_msg_prepared = false; | 1333 | master->cur_msg_prepared = false; |
1334 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1334 | kthread_queue_work(&master->kworker, &master->pump_messages); |
1335 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1335 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1336 | 1336 | ||
1337 | trace_spi_message_done(mesg); | 1337 | trace_spi_message_done(mesg); |
@@ -1357,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master) | |||
1357 | master->cur_msg = NULL; | 1357 | master->cur_msg = NULL; |
1358 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1358 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1359 | 1359 | ||
1360 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1360 | kthread_queue_work(&master->kworker, &master->pump_messages); |
1361 | 1361 | ||
1362 | return 0; | 1362 | return 0; |
1363 | } | 1363 | } |
@@ -1404,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master) | |||
1404 | ret = spi_stop_queue(master); | 1404 | ret = spi_stop_queue(master); |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * flush_kthread_worker will block until all work is done. | 1407 | * kthread_flush_worker will block until all work is done. |
1408 | * If the reason that stop_queue timed out is that the work will never | 1408 | * If the reason that stop_queue timed out is that the work will never |
1409 | * finish, then it does no good to call flush/stop thread, so | 1409 | * finish, then it does no good to call flush/stop thread, so |
1410 | * return anyway. | 1410 | * return anyway. |
@@ -1414,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master) | |||
1414 | return ret; | 1414 | return ret; |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | flush_kthread_worker(&master->kworker); | 1417 | kthread_flush_worker(&master->kworker); |
1418 | kthread_stop(master->kworker_task); | 1418 | kthread_stop(master->kworker_task); |
1419 | 1419 | ||
1420 | return 0; | 1420 | return 0; |
@@ -1438,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi, | |||
1438 | 1438 | ||
1439 | list_add_tail(&msg->queue, &master->queue); | 1439 | list_add_tail(&msg->queue, &master->queue); |
1440 | if (!master->busy && need_pump) | 1440 | if (!master->busy && need_pump) |
1441 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1441 | kthread_queue_work(&master->kworker, &master->pump_messages); |
1442 | 1442 | ||
1443 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1443 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1444 | return 0; | 1444 | return 0; |
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index 5d79efc1aafe..046e84d7a158 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c | |||
@@ -241,10 +241,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret | |||
241 | obj->vob_discard_page_warned = 0; | 241 | obj->vob_discard_page_warned = 0; |
242 | } else { | 242 | } else { |
243 | SetPageError(vmpage); | 243 | SetPageError(vmpage); |
244 | if (ioret == -ENOSPC) | 244 | mapping_set_error(inode->i_mapping, ioret); |
245 | set_bit(AS_ENOSPC, &inode->i_mapping->flags); | ||
246 | else | ||
247 | set_bit(AS_EIO, &inode->i_mapping->flags); | ||
248 | 245 | ||
249 | if ((ioret == -ESHUTDOWN || ioret == -EINTR) && | 246 | if ((ioret == -ESHUTDOWN || ioret == -EINTR) && |
250 | obj->vob_discard_page_warned == 0) { | 247 | obj->vob_discard_page_warned == 0) { |
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index a9d94f7cf683..2675792a8f59 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c | |||
@@ -708,7 +708,7 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) | |||
708 | { | 708 | { |
709 | struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id; | 709 | struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id; |
710 | 710 | ||
711 | queue_kthread_work(&s->kworker, &s->irq_work); | 711 | kthread_queue_work(&s->kworker, &s->irq_work); |
712 | 712 | ||
713 | return IRQ_HANDLED; | 713 | return IRQ_HANDLED; |
714 | } | 714 | } |
@@ -784,7 +784,7 @@ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit) | |||
784 | 784 | ||
785 | one->config.flags |= SC16IS7XX_RECONF_IER; | 785 | one->config.flags |= SC16IS7XX_RECONF_IER; |
786 | one->config.ier_clear |= bit; | 786 | one->config.ier_clear |= bit; |
787 | queue_kthread_work(&s->kworker, &one->reg_work); | 787 | kthread_queue_work(&s->kworker, &one->reg_work); |
788 | } | 788 | } |
789 | 789 | ||
790 | static void sc16is7xx_stop_tx(struct uart_port *port) | 790 | static void sc16is7xx_stop_tx(struct uart_port *port) |
@@ -802,7 +802,7 @@ static void sc16is7xx_start_tx(struct uart_port *port) | |||
802 | struct sc16is7xx_port *s = dev_get_drvdata(port->dev); | 802 | struct sc16is7xx_port *s = dev_get_drvdata(port->dev); |
803 | struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); | 803 | struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); |
804 | 804 | ||
805 | queue_kthread_work(&s->kworker, &one->tx_work); | 805 | kthread_queue_work(&s->kworker, &one->tx_work); |
806 | } | 806 | } |
807 | 807 | ||
808 | static unsigned int sc16is7xx_tx_empty(struct uart_port *port) | 808 | static unsigned int sc16is7xx_tx_empty(struct uart_port *port) |
@@ -828,7 +828,7 @@ static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
828 | struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); | 828 | struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); |
829 | 829 | ||
830 | one->config.flags |= SC16IS7XX_RECONF_MD; | 830 | one->config.flags |= SC16IS7XX_RECONF_MD; |
831 | queue_kthread_work(&s->kworker, &one->reg_work); | 831 | kthread_queue_work(&s->kworker, &one->reg_work); |
832 | } | 832 | } |
833 | 833 | ||
834 | static void sc16is7xx_break_ctl(struct uart_port *port, int break_state) | 834 | static void sc16is7xx_break_ctl(struct uart_port *port, int break_state) |
@@ -957,7 +957,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port, | |||
957 | 957 | ||
958 | port->rs485 = *rs485; | 958 | port->rs485 = *rs485; |
959 | one->config.flags |= SC16IS7XX_RECONF_RS485; | 959 | one->config.flags |= SC16IS7XX_RECONF_RS485; |
960 | queue_kthread_work(&s->kworker, &one->reg_work); | 960 | kthread_queue_work(&s->kworker, &one->reg_work); |
961 | 961 | ||
962 | return 0; | 962 | return 0; |
963 | } | 963 | } |
@@ -1030,7 +1030,7 @@ static void sc16is7xx_shutdown(struct uart_port *port) | |||
1030 | 1030 | ||
1031 | sc16is7xx_power(port, 0); | 1031 | sc16is7xx_power(port, 0); |
1032 | 1032 | ||
1033 | flush_kthread_worker(&s->kworker); | 1033 | kthread_flush_worker(&s->kworker); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | static const char *sc16is7xx_type(struct uart_port *port) | 1036 | static const char *sc16is7xx_type(struct uart_port *port) |
@@ -1176,8 +1176,8 @@ static int sc16is7xx_probe(struct device *dev, | |||
1176 | s->devtype = devtype; | 1176 | s->devtype = devtype; |
1177 | dev_set_drvdata(dev, s); | 1177 | dev_set_drvdata(dev, s); |
1178 | 1178 | ||
1179 | init_kthread_worker(&s->kworker); | 1179 | kthread_init_worker(&s->kworker); |
1180 | init_kthread_work(&s->irq_work, sc16is7xx_ist); | 1180 | kthread_init_work(&s->irq_work, sc16is7xx_ist); |
1181 | s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker, | 1181 | s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker, |
1182 | "sc16is7xx"); | 1182 | "sc16is7xx"); |
1183 | if (IS_ERR(s->kworker_task)) { | 1183 | if (IS_ERR(s->kworker_task)) { |
@@ -1234,8 +1234,8 @@ static int sc16is7xx_probe(struct device *dev, | |||
1234 | SC16IS7XX_EFCR_RXDISABLE_BIT | | 1234 | SC16IS7XX_EFCR_RXDISABLE_BIT | |
1235 | SC16IS7XX_EFCR_TXDISABLE_BIT); | 1235 | SC16IS7XX_EFCR_TXDISABLE_BIT); |
1236 | /* Initialize kthread work structs */ | 1236 | /* Initialize kthread work structs */ |
1237 | init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc); | 1237 | kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc); |
1238 | init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc); | 1238 | kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc); |
1239 | /* Register port */ | 1239 | /* Register port */ |
1240 | uart_add_one_port(&sc16is7xx_uart, &s->p[i].port); | 1240 | uart_add_one_port(&sc16is7xx_uart, &s->p[i].port); |
1241 | 1241 | ||
@@ -1301,7 +1301,7 @@ static int sc16is7xx_remove(struct device *dev) | |||
1301 | sc16is7xx_power(&s->p[i].port, 0); | 1301 | sc16is7xx_power(&s->p[i].port, 0); |
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | flush_kthread_worker(&s->kworker); | 1304 | kthread_flush_worker(&s->kworker); |
1305 | kthread_stop(s->kworker_task); | 1305 | kthread_stop(s->kworker_task); |
1306 | 1306 | ||
1307 | if (!IS_ERR(s->clk)) | 1307 | if (!IS_ERR(s->clk)) |
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 12731e67d2c7..ea73afb026d8 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/usb/ehci_def.h> | 20 | #include <linux/usb/ehci_def.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
23 | #include <linux/kconfig.h> | ||
24 | #include <linux/kgdb.h> | 23 | #include <linux/kgdb.h> |
25 | #include <linux/kthread.h> | 24 | #include <linux/kthread.h> |
26 | #include <asm/io.h> | 25 | #include <asm/io.h> |
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c index f5fccb3e4152..f78503203f42 100644 --- a/drivers/usb/gadget/udc/bcm63xx_udc.c +++ b/drivers/usb/gadget/udc/bcm63xx_udc.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/kconfig.h> | ||
25 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
26 | #include <linux/list.h> | 25 | #include <linux/list.h> |
27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 35af36253440..d793f548dfe2 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/kconfig.h> | ||
13 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
14 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
15 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 14d506efd1aa..f865c3f05bea 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -398,8 +398,7 @@ no_more: | |||
398 | switch (ret) { | 398 | switch (ret) { |
399 | case -EDQUOT: | 399 | case -EDQUOT: |
400 | case -ENOSPC: | 400 | case -ENOSPC: |
401 | set_bit(AS_ENOSPC, | 401 | mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC); |
402 | &wb->vnode->vfs_inode.i_mapping->flags); | ||
403 | break; | 402 | break; |
404 | case -EROFS: | 403 | case -EROFS: |
405 | case -EIO: | 404 | case -EIO: |
@@ -409,7 +408,7 @@ no_more: | |||
409 | case -ENOMEDIUM: | 408 | case -ENOMEDIUM: |
410 | case -ENXIO: | 409 | case -ENXIO: |
411 | afs_kill_pages(wb->vnode, true, first, last); | 410 | afs_kill_pages(wb->vnode, true, first, last); |
412 | set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags); | 411 | mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO); |
413 | break; | 412 | break; |
414 | case -EACCES: | 413 | case -EACCES: |
415 | case -EPERM: | 414 | case -EPERM: |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index a439548de785..a1fba4285277 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -20,7 +20,8 @@ | |||
20 | #define AUTOFS_IOC_COUNT 32 | 20 | #define AUTOFS_IOC_COUNT 32 |
21 | 21 | ||
22 | #define AUTOFS_DEV_IOCTL_IOC_FIRST (AUTOFS_DEV_IOCTL_VERSION) | 22 | #define AUTOFS_DEV_IOCTL_IOC_FIRST (AUTOFS_DEV_IOCTL_VERSION) |
23 | #define AUTOFS_DEV_IOCTL_IOC_COUNT (AUTOFS_IOC_COUNT - 11) | 23 | #define AUTOFS_DEV_IOCTL_IOC_COUNT \ |
24 | (AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD - AUTOFS_DEV_IOCTL_VERSION_CMD) | ||
24 | 25 | ||
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
@@ -33,8 +34,6 @@ | |||
33 | #include <asm/current.h> | 34 | #include <asm/current.h> |
34 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
35 | 36 | ||
36 | /* #define DEBUG */ | ||
37 | |||
38 | #ifdef pr_fmt | 37 | #ifdef pr_fmt |
39 | #undef pr_fmt | 38 | #undef pr_fmt |
40 | #endif | 39 | #endif |
@@ -111,8 +110,6 @@ struct autofs_sb_info { | |||
111 | int max_proto; | 110 | int max_proto; |
112 | unsigned long exp_timeout; | 111 | unsigned long exp_timeout; |
113 | unsigned int type; | 112 | unsigned int type; |
114 | int reghost_enabled; | ||
115 | int needs_reghost; | ||
116 | struct super_block *sb; | 113 | struct super_block *sb; |
117 | struct mutex wq_mutex; | 114 | struct mutex wq_mutex; |
118 | struct mutex pipe_mutex; | 115 | struct mutex pipe_mutex; |
@@ -271,4 +268,4 @@ static inline void autofs4_del_expiring(struct dentry *dentry) | |||
271 | } | 268 | } |
272 | } | 269 | } |
273 | 270 | ||
274 | extern void autofs4_kill_sb(struct super_block *); | 271 | void autofs4_kill_sb(struct super_block *); |
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index c7fcc7438843..fc09eb77ddf3 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c | |||
@@ -75,7 +75,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) | |||
75 | if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) || | 75 | if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) || |
76 | (param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) { | 76 | (param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) { |
77 | pr_warn("ioctl control interface version mismatch: " | 77 | pr_warn("ioctl control interface version mismatch: " |
78 | "kernel(%u.%u), user(%u.%u), cmd(%d)\n", | 78 | "kernel(%u.%u), user(%u.%u), cmd(0x%08x)\n", |
79 | AUTOFS_DEV_IOCTL_VERSION_MAJOR, | 79 | AUTOFS_DEV_IOCTL_VERSION_MAJOR, |
80 | AUTOFS_DEV_IOCTL_VERSION_MINOR, | 80 | AUTOFS_DEV_IOCTL_VERSION_MINOR, |
81 | param->ver_major, param->ver_minor, cmd); | 81 | param->ver_major, param->ver_minor, cmd); |
@@ -172,6 +172,17 @@ static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f) | |||
172 | return sbi; | 172 | return sbi; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* Return autofs dev ioctl version */ | ||
176 | static int autofs_dev_ioctl_version(struct file *fp, | ||
177 | struct autofs_sb_info *sbi, | ||
178 | struct autofs_dev_ioctl *param) | ||
179 | { | ||
180 | /* This should have already been set. */ | ||
181 | param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; | ||
182 | param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; | ||
183 | return 0; | ||
184 | } | ||
185 | |||
175 | /* Return autofs module protocol version */ | 186 | /* Return autofs module protocol version */ |
176 | static int autofs_dev_ioctl_protover(struct file *fp, | 187 | static int autofs_dev_ioctl_protover(struct file *fp, |
177 | struct autofs_sb_info *sbi, | 188 | struct autofs_sb_info *sbi, |
@@ -586,41 +597,25 @@ out: | |||
586 | 597 | ||
587 | static ioctl_fn lookup_dev_ioctl(unsigned int cmd) | 598 | static ioctl_fn lookup_dev_ioctl(unsigned int cmd) |
588 | { | 599 | { |
589 | static struct { | 600 | static ioctl_fn _ioctls[] = { |
590 | int cmd; | 601 | autofs_dev_ioctl_version, |
591 | ioctl_fn fn; | 602 | autofs_dev_ioctl_protover, |
592 | } _ioctls[] = { | 603 | autofs_dev_ioctl_protosubver, |
593 | {cmd_idx(AUTOFS_DEV_IOCTL_VERSION_CMD), NULL}, | 604 | autofs_dev_ioctl_openmount, |
594 | {cmd_idx(AUTOFS_DEV_IOCTL_PROTOVER_CMD), | 605 | autofs_dev_ioctl_closemount, |
595 | autofs_dev_ioctl_protover}, | 606 | autofs_dev_ioctl_ready, |
596 | {cmd_idx(AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD), | 607 | autofs_dev_ioctl_fail, |
597 | autofs_dev_ioctl_protosubver}, | 608 | autofs_dev_ioctl_setpipefd, |
598 | {cmd_idx(AUTOFS_DEV_IOCTL_OPENMOUNT_CMD), | 609 | autofs_dev_ioctl_catatonic, |
599 | autofs_dev_ioctl_openmount}, | 610 | autofs_dev_ioctl_timeout, |
600 | {cmd_idx(AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD), | 611 | autofs_dev_ioctl_requester, |
601 | autofs_dev_ioctl_closemount}, | 612 | autofs_dev_ioctl_expire, |
602 | {cmd_idx(AUTOFS_DEV_IOCTL_READY_CMD), | 613 | autofs_dev_ioctl_askumount, |
603 | autofs_dev_ioctl_ready}, | 614 | autofs_dev_ioctl_ismountpoint, |
604 | {cmd_idx(AUTOFS_DEV_IOCTL_FAIL_CMD), | ||
605 | autofs_dev_ioctl_fail}, | ||
606 | {cmd_idx(AUTOFS_DEV_IOCTL_SETPIPEFD_CMD), | ||
607 | autofs_dev_ioctl_setpipefd}, | ||
608 | {cmd_idx(AUTOFS_DEV_IOCTL_CATATONIC_CMD), | ||
609 | autofs_dev_ioctl_catatonic}, | ||
610 | {cmd_idx(AUTOFS_DEV_IOCTL_TIMEOUT_CMD), | ||
611 | autofs_dev_ioctl_timeout}, | ||
612 | {cmd_idx(AUTOFS_DEV_IOCTL_REQUESTER_CMD), | ||
613 | autofs_dev_ioctl_requester}, | ||
614 | {cmd_idx(AUTOFS_DEV_IOCTL_EXPIRE_CMD), | ||
615 | autofs_dev_ioctl_expire}, | ||
616 | {cmd_idx(AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD), | ||
617 | autofs_dev_ioctl_askumount}, | ||
618 | {cmd_idx(AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD), | ||
619 | autofs_dev_ioctl_ismountpoint} | ||
620 | }; | 615 | }; |
621 | unsigned int idx = cmd_idx(cmd); | 616 | unsigned int idx = cmd_idx(cmd); |
622 | 617 | ||
623 | return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx].fn; | 618 | return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx]; |
624 | } | 619 | } |
625 | 620 | ||
626 | /* ioctl dispatcher */ | 621 | /* ioctl dispatcher */ |
@@ -642,7 +637,7 @@ static int _autofs_dev_ioctl(unsigned int command, | |||
642 | cmd = _IOC_NR(command); | 637 | cmd = _IOC_NR(command); |
643 | 638 | ||
644 | if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) || | 639 | if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) || |
645 | cmd - cmd_first >= AUTOFS_DEV_IOCTL_IOC_COUNT) { | 640 | cmd - cmd_first > AUTOFS_DEV_IOCTL_IOC_COUNT) { |
646 | return -ENOTTY; | 641 | return -ENOTTY; |
647 | } | 642 | } |
648 | 643 | ||
@@ -655,14 +650,11 @@ static int _autofs_dev_ioctl(unsigned int command, | |||
655 | if (err) | 650 | if (err) |
656 | goto out; | 651 | goto out; |
657 | 652 | ||
658 | /* The validate routine above always sets the version */ | ||
659 | if (cmd == AUTOFS_DEV_IOCTL_VERSION_CMD) | ||
660 | goto done; | ||
661 | |||
662 | fn = lookup_dev_ioctl(cmd); | 653 | fn = lookup_dev_ioctl(cmd); |
663 | if (!fn) { | 654 | if (!fn) { |
664 | pr_warn("unknown command 0x%08x\n", command); | 655 | pr_warn("unknown command 0x%08x\n", command); |
665 | return -ENOTTY; | 656 | err = -ENOTTY; |
657 | goto out; | ||
666 | } | 658 | } |
667 | 659 | ||
668 | fp = NULL; | 660 | fp = NULL; |
@@ -671,9 +663,11 @@ static int _autofs_dev_ioctl(unsigned int command, | |||
671 | /* | 663 | /* |
672 | * For obvious reasons the openmount can't have a file | 664 | * For obvious reasons the openmount can't have a file |
673 | * descriptor yet. We don't take a reference to the | 665 | * descriptor yet. We don't take a reference to the |
674 | * file during close to allow for immediate release. | 666 | * file during close to allow for immediate release, |
667 | * and the same for retrieving ioctl version. | ||
675 | */ | 668 | */ |
676 | if (cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD && | 669 | if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD && |
670 | cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD && | ||
677 | cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) { | 671 | cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) { |
678 | fp = fget(param->ioctlfd); | 672 | fp = fget(param->ioctlfd); |
679 | if (!fp) { | 673 | if (!fp) { |
@@ -706,7 +700,6 @@ cont: | |||
706 | 700 | ||
707 | if (fp) | 701 | if (fp) |
708 | fput(fp); | 702 | fput(fp); |
709 | done: | ||
710 | if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE)) | 703 | if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE)) |
711 | err = -EFAULT; | 704 | err = -EFAULT; |
712 | out: | 705 | out: |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index ca9cbd6362e0..438b5bf675b6 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -274,6 +274,23 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
274 | goto fail_dput; | 274 | goto fail_dput; |
275 | } | 275 | } |
276 | 276 | ||
277 | /* Test versions first */ | ||
278 | if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION || | ||
279 | sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) { | ||
280 | pr_err("kernel does not match daemon version " | ||
281 | "daemon (%d, %d) kernel (%d, %d)\n", | ||
282 | sbi->min_proto, sbi->max_proto, | ||
283 | AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION); | ||
284 | goto fail_dput; | ||
285 | } | ||
286 | |||
287 | /* Establish highest kernel protocol version */ | ||
288 | if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION) | ||
289 | sbi->version = AUTOFS_MAX_PROTO_VERSION; | ||
290 | else | ||
291 | sbi->version = sbi->max_proto; | ||
292 | sbi->sub_version = AUTOFS_PROTO_SUBVERSION; | ||
293 | |||
277 | if (pgrp_set) { | 294 | if (pgrp_set) { |
278 | sbi->oz_pgrp = find_get_pid(pgrp); | 295 | sbi->oz_pgrp = find_get_pid(pgrp); |
279 | if (!sbi->oz_pgrp) { | 296 | if (!sbi->oz_pgrp) { |
@@ -291,29 +308,12 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
291 | root_inode->i_fop = &autofs4_root_operations; | 308 | root_inode->i_fop = &autofs4_root_operations; |
292 | root_inode->i_op = &autofs4_dir_inode_operations; | 309 | root_inode->i_op = &autofs4_dir_inode_operations; |
293 | 310 | ||
294 | /* Couldn't this be tested earlier? */ | ||
295 | if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION || | ||
296 | sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) { | ||
297 | pr_err("kernel does not match daemon version " | ||
298 | "daemon (%d, %d) kernel (%d, %d)\n", | ||
299 | sbi->min_proto, sbi->max_proto, | ||
300 | AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION); | ||
301 | goto fail_dput; | ||
302 | } | ||
303 | |||
304 | /* Establish highest kernel protocol version */ | ||
305 | if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION) | ||
306 | sbi->version = AUTOFS_MAX_PROTO_VERSION; | ||
307 | else | ||
308 | sbi->version = sbi->max_proto; | ||
309 | sbi->sub_version = AUTOFS_PROTO_SUBVERSION; | ||
310 | |||
311 | pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp)); | 311 | pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp)); |
312 | pipe = fget(pipefd); | 312 | pipe = fget(pipefd); |
313 | 313 | ||
314 | if (!pipe) { | 314 | if (!pipe) { |
315 | pr_err("could not open pipe file descriptor\n"); | 315 | pr_err("could not open pipe file descriptor\n"); |
316 | goto fail_dput; | 316 | goto fail_put_pid; |
317 | } | 317 | } |
318 | ret = autofs_prepare_pipe(pipe); | 318 | ret = autofs_prepare_pipe(pipe); |
319 | if (ret < 0) | 319 | if (ret < 0) |
@@ -334,14 +334,14 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
334 | fail_fput: | 334 | fail_fput: |
335 | pr_err("pipe file descriptor does not contain proper ops\n"); | 335 | pr_err("pipe file descriptor does not contain proper ops\n"); |
336 | fput(pipe); | 336 | fput(pipe); |
337 | /* fall through */ | 337 | fail_put_pid: |
338 | put_pid(sbi->oz_pgrp); | ||
338 | fail_dput: | 339 | fail_dput: |
339 | dput(root); | 340 | dput(root); |
340 | goto fail_free; | 341 | goto fail_free; |
341 | fail_ino: | 342 | fail_ino: |
342 | kfree(ino); | 343 | autofs4_free_ino(ino); |
343 | fail_free: | 344 | fail_free: |
344 | put_pid(sbi->oz_pgrp); | ||
345 | kfree(sbi); | 345 | kfree(sbi); |
346 | s->s_fs_info = NULL; | 346 | s->s_fs_info = NULL; |
347 | return ret; | 347 | return ret; |
@@ -368,7 +368,8 @@ struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode) | |||
368 | inode->i_fop = &autofs4_dir_operations; | 368 | inode->i_fop = &autofs4_dir_operations; |
369 | } else if (S_ISLNK(mode)) { | 369 | } else if (S_ISLNK(mode)) { |
370 | inode->i_op = &autofs4_symlink_inode_operations; | 370 | inode->i_op = &autofs4_symlink_inode_operations; |
371 | } | 371 | } else |
372 | WARN_ON(1); | ||
372 | 373 | ||
373 | return inode; | 374 | return inode; |
374 | } | 375 | } |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 623510e84c96..a11f73174877 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -577,8 +577,6 @@ static int autofs4_dir_symlink(struct inode *dir, | |||
577 | inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555); | 577 | inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555); |
578 | if (!inode) { | 578 | if (!inode) { |
579 | kfree(cp); | 579 | kfree(cp); |
580 | if (!dentry->d_fsdata) | ||
581 | kfree(ino); | ||
582 | return -ENOMEM; | 580 | return -ENOMEM; |
583 | } | 581 | } |
584 | inode->i_private = cp; | 582 | inode->i_private = cp; |
@@ -842,7 +840,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) | |||
842 | if (may_umount(mnt)) | 840 | if (may_umount(mnt)) |
843 | status = 1; | 841 | status = 1; |
844 | 842 | ||
845 | pr_debug("returning %d\n", status); | 843 | pr_debug("may umount %d\n", status); |
846 | 844 | ||
847 | status = put_user(status, p); | 845 | status = put_user(status, p); |
848 | 846 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 376e4e426324..05b553368bb4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/cleancache.h> | 30 | #include <linux/cleancache.h> |
31 | #include <linux/dax.h> | 31 | #include <linux/dax.h> |
32 | #include <linux/badblocks.h> | 32 | #include <linux/badblocks.h> |
33 | #include <linux/falloc.h> | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include "internal.h" | 35 | #include "internal.h" |
35 | 36 | ||
@@ -1775,6 +1776,81 @@ static const struct address_space_operations def_blk_aops = { | |||
1775 | .is_dirty_writeback = buffer_check_dirty_writeback, | 1776 | .is_dirty_writeback = buffer_check_dirty_writeback, |
1776 | }; | 1777 | }; |
1777 | 1778 | ||
1779 | #define BLKDEV_FALLOC_FL_SUPPORTED \ | ||
1780 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ | ||
1781 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) | ||
1782 | |||
1783 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, | ||
1784 | loff_t len) | ||
1785 | { | ||
1786 | struct block_device *bdev = I_BDEV(bdev_file_inode(file)); | ||
1787 | struct request_queue *q = bdev_get_queue(bdev); | ||
1788 | struct address_space *mapping; | ||
1789 | loff_t end = start + len - 1; | ||
1790 | loff_t isize; | ||
1791 | int error; | ||
1792 | |||
1793 | /* Fail if we don't recognize the flags. */ | ||
1794 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) | ||
1795 | return -EOPNOTSUPP; | ||
1796 | |||
1797 | /* Don't go off the end of the device. */ | ||
1798 | isize = i_size_read(bdev->bd_inode); | ||
1799 | if (start >= isize) | ||
1800 | return -EINVAL; | ||
1801 | if (end >= isize) { | ||
1802 | if (mode & FALLOC_FL_KEEP_SIZE) { | ||
1803 | len = isize - start; | ||
1804 | end = start + len - 1; | ||
1805 | } else | ||
1806 | return -EINVAL; | ||
1807 | } | ||
1808 | |||
1809 | /* | ||
1810 | * Don't allow IO that isn't aligned to logical block size. | ||
1811 | */ | ||
1812 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) | ||
1813 | return -EINVAL; | ||
1814 | |||
1815 | /* Invalidate the page cache, including dirty pages. */ | ||
1816 | mapping = bdev->bd_inode->i_mapping; | ||
1817 | truncate_inode_pages_range(mapping, start, end); | ||
1818 | |||
1819 | switch (mode) { | ||
1820 | case FALLOC_FL_ZERO_RANGE: | ||
1821 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: | ||
1822 | error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, | ||
1823 | GFP_KERNEL, false); | ||
1824 | break; | ||
1825 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: | ||
1826 | /* Only punch if the device can do zeroing discard. */ | ||
1827 | if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data) | ||
1828 | return -EOPNOTSUPP; | ||
1829 | error = blkdev_issue_discard(bdev, start >> 9, len >> 9, | ||
1830 | GFP_KERNEL, 0); | ||
1831 | break; | ||
1832 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: | ||
1833 | if (!blk_queue_discard(q)) | ||
1834 | return -EOPNOTSUPP; | ||
1835 | error = blkdev_issue_discard(bdev, start >> 9, len >> 9, | ||
1836 | GFP_KERNEL, 0); | ||
1837 | break; | ||
1838 | default: | ||
1839 | return -EOPNOTSUPP; | ||
1840 | } | ||
1841 | if (error) | ||
1842 | return error; | ||
1843 | |||
1844 | /* | ||
1845 | * Invalidate again; if someone wandered in and dirtied a page, | ||
1846 | * the caller will be given -EBUSY. The third argument is | ||
1847 | * inclusive, so the rounding here is safe. | ||
1848 | */ | ||
1849 | return invalidate_inode_pages2_range(mapping, | ||
1850 | start >> PAGE_SHIFT, | ||
1851 | end >> PAGE_SHIFT); | ||
1852 | } | ||
1853 | |||
1778 | const struct file_operations def_blk_fops = { | 1854 | const struct file_operations def_blk_fops = { |
1779 | .open = blkdev_open, | 1855 | .open = blkdev_open, |
1780 | .release = blkdev_close, | 1856 | .release = blkdev_close, |
@@ -1789,6 +1865,7 @@ const struct file_operations def_blk_fops = { | |||
1789 | #endif | 1865 | #endif |
1790 | .splice_read = generic_file_splice_read, | 1866 | .splice_read = generic_file_splice_read, |
1791 | .splice_write = iter_file_splice_write, | 1867 | .splice_write = iter_file_splice_write, |
1868 | .fallocate = blkdev_fallocate, | ||
1792 | }; | 1869 | }; |
1793 | 1870 | ||
1794 | int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) | 1871 | int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) |
diff --git a/fs/buffer.c b/fs/buffer.c index 7dad8713fac8..b205a629001d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -351,7 +351,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) | |||
351 | set_buffer_uptodate(bh); | 351 | set_buffer_uptodate(bh); |
352 | } else { | 352 | } else { |
353 | buffer_io_error(bh, ", lost async page write"); | 353 | buffer_io_error(bh, ", lost async page write"); |
354 | set_bit(AS_EIO, &page->mapping->flags); | 354 | mapping_set_error(page->mapping, -EIO); |
355 | set_buffer_write_io_error(bh); | 355 | set_buffer_write_io_error(bh); |
356 | clear_buffer_uptodate(bh); | 356 | clear_buffer_uptodate(bh); |
357 | SetPageError(page); | 357 | SetPageError(page); |
@@ -3249,7 +3249,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) | |||
3249 | bh = head; | 3249 | bh = head; |
3250 | do { | 3250 | do { |
3251 | if (buffer_write_io_error(bh) && page->mapping) | 3251 | if (buffer_write_io_error(bh) && page->mapping) |
3252 | set_bit(AS_EIO, &page->mapping->flags); | 3252 | mapping_set_error(page->mapping, -EIO); |
3253 | if (buffer_busy(bh)) | 3253 | if (buffer_busy(bh)) |
3254 | goto failed; | 3254 | goto failed; |
3255 | bh = bh->b_this_page; | 3255 | bh = bh->b_this_page; |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index d42ff527ab21..d8072bc074a4 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
@@ -778,7 +778,7 @@ try_again: | |||
778 | fail: | 778 | fail: |
779 | EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n", | 779 | EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n", |
780 | inode->i_ino, page->index, ret); | 780 | inode->i_ino, page->index, ret); |
781 | set_bit(AS_EIO, &page->mapping->flags); | 781 | mapping_set_error(page->mapping, -EIO); |
782 | unlock_page(page); | 782 | unlock_page(page); |
783 | return ret; | 783 | return ret; |
784 | } | 784 | } |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b4cbee936cf8..0094923e5ebf 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -88,7 +88,7 @@ static void ext4_finish_bio(struct bio *bio) | |||
88 | 88 | ||
89 | if (bio->bi_error) { | 89 | if (bio->bi_error) { |
90 | SetPageError(page); | 90 | SetPageError(page); |
91 | set_bit(AS_EIO, &page->mapping->flags); | 91 | mapping_set_error(page->mapping, -EIO); |
92 | } | 92 | } |
93 | bh = head = page_buffers(page); | 93 | bh = head = page_buffers(page); |
94 | /* | 94 | /* |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 0d0177c9149c..9ae194fd2fdb 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -75,7 +75,7 @@ static void f2fs_write_end_io(struct bio *bio) | |||
75 | fscrypt_pullback_bio_page(&page, true); | 75 | fscrypt_pullback_bio_page(&page, true); |
76 | 76 | ||
77 | if (unlikely(bio->bi_error)) { | 77 | if (unlikely(bio->bi_error)) { |
78 | set_bit(AS_EIO, &page->mapping->flags); | 78 | mapping_set_error(page->mapping, -EIO); |
79 | f2fs_stop_checkpoint(sbi, true); | 79 | f2fs_stop_checkpoint(sbi, true); |
80 | } | 80 | } |
81 | end_page_writeback(page); | 81 | end_page_writeback(page); |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 5bb565f9989c..31f8ca046639 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -269,8 +269,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal, | |||
269 | * filemap_fdatawait_range(), set it again so | 269 | * filemap_fdatawait_range(), set it again so |
270 | * that user process can get -EIO from fsync(). | 270 | * that user process can get -EIO from fsync(). |
271 | */ | 271 | */ |
272 | set_bit(AS_EIO, | 272 | mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO); |
273 | &jinode->i_vfs_inode->i_mapping->flags); | ||
274 | 273 | ||
275 | if (!ret) | 274 | if (!ret) |
276 | ret = err; | 275 | ret = err; |
diff --git a/fs/lockd/procfs.h b/fs/lockd/procfs.h index 2257a1311027..184a15edd18d 100644 --- a/fs/lockd/procfs.h +++ b/fs/lockd/procfs.h | |||
@@ -6,8 +6,6 @@ | |||
6 | #ifndef _LOCKD_PROCFS_H | 6 | #ifndef _LOCKD_PROCFS_H |
7 | #define _LOCKD_PROCFS_H | 7 | #define _LOCKD_PROCFS_H |
8 | 8 | ||
9 | #include <linux/kconfig.h> | ||
10 | |||
11 | #if IS_ENABLED(CONFIG_PROC_FS) | 9 | #if IS_ENABLED(CONFIG_PROC_FS) |
12 | int lockd_create_procfs(void); | 10 | int lockd_create_procfs(void); |
13 | void lockd_remove_procfs(void); | 11 | void lockd_remove_procfs(void); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 6ea06f8a7d29..3f828a187049 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -3188,6 +3188,9 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, | |||
3188 | migrate->new_master, | 3188 | migrate->new_master, |
3189 | migrate->master); | 3189 | migrate->master); |
3190 | 3190 | ||
3191 | if (ret < 0) | ||
3192 | kmem_cache_free(dlm_mle_cache, mle); | ||
3193 | |||
3191 | spin_unlock(&dlm->master_lock); | 3194 | spin_unlock(&dlm->master_lock); |
3192 | unlock: | 3195 | unlock: |
3193 | spin_unlock(&dlm->spinlock); | 3196 | spin_unlock(&dlm->spinlock); |
@@ -300,7 +300,8 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
300 | * Let individual file system decide if it supports preallocation | 300 | * Let individual file system decide if it supports preallocation |
301 | * for directories or not. | 301 | * for directories or not. |
302 | */ | 302 | */ |
303 | if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) | 303 | if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && |
304 | !S_ISBLK(inode->i_mode)) | ||
304 | return -ENODEV; | 305 | return -ENODEV; |
305 | 306 | ||
306 | /* Check for wrap through zero too */ | 307 | /* Check for wrap through zero too */ |
@@ -601,54 +601,63 @@ pipe_fasync(int fd, struct file *filp, int on) | |||
601 | return retval; | 601 | return retval; |
602 | } | 602 | } |
603 | 603 | ||
604 | static void account_pipe_buffers(struct pipe_inode_info *pipe, | 604 | static unsigned long account_pipe_buffers(struct user_struct *user, |
605 | unsigned long old, unsigned long new) | 605 | unsigned long old, unsigned long new) |
606 | { | 606 | { |
607 | atomic_long_add(new - old, &pipe->user->pipe_bufs); | 607 | return atomic_long_add_return(new - old, &user->pipe_bufs); |
608 | } | 608 | } |
609 | 609 | ||
610 | static bool too_many_pipe_buffers_soft(struct user_struct *user) | 610 | static bool too_many_pipe_buffers_soft(unsigned long user_bufs) |
611 | { | 611 | { |
612 | return pipe_user_pages_soft && | 612 | return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft; |
613 | atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft; | ||
614 | } | 613 | } |
615 | 614 | ||
616 | static bool too_many_pipe_buffers_hard(struct user_struct *user) | 615 | static bool too_many_pipe_buffers_hard(unsigned long user_bufs) |
617 | { | 616 | { |
618 | return pipe_user_pages_hard && | 617 | return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; |
619 | atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard; | ||
620 | } | 618 | } |
621 | 619 | ||
622 | struct pipe_inode_info *alloc_pipe_info(void) | 620 | struct pipe_inode_info *alloc_pipe_info(void) |
623 | { | 621 | { |
624 | struct pipe_inode_info *pipe; | 622 | struct pipe_inode_info *pipe; |
623 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; | ||
624 | struct user_struct *user = get_current_user(); | ||
625 | unsigned long user_bufs; | ||
625 | 626 | ||
626 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); | 627 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); |
627 | if (pipe) { | 628 | if (pipe == NULL) |
628 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; | 629 | goto out_free_uid; |
629 | struct user_struct *user = get_current_user(); | ||
630 | |||
631 | if (!too_many_pipe_buffers_hard(user)) { | ||
632 | if (too_many_pipe_buffers_soft(user)) | ||
633 | pipe_bufs = 1; | ||
634 | pipe->bufs = kcalloc(pipe_bufs, | ||
635 | sizeof(struct pipe_buffer), | ||
636 | GFP_KERNEL_ACCOUNT); | ||
637 | } | ||
638 | 630 | ||
639 | if (pipe->bufs) { | 631 | if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE)) |
640 | init_waitqueue_head(&pipe->wait); | 632 | pipe_bufs = pipe_max_size >> PAGE_SHIFT; |
641 | pipe->r_counter = pipe->w_counter = 1; | 633 | |
642 | pipe->buffers = pipe_bufs; | 634 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); |
643 | pipe->user = user; | 635 | |
644 | account_pipe_buffers(pipe, 0, pipe_bufs); | 636 | if (too_many_pipe_buffers_soft(user_bufs)) { |
645 | mutex_init(&pipe->mutex); | 637 | user_bufs = account_pipe_buffers(user, pipe_bufs, 1); |
646 | return pipe; | 638 | pipe_bufs = 1; |
647 | } | 639 | } |
648 | free_uid(user); | 640 | |
649 | kfree(pipe); | 641 | if (too_many_pipe_buffers_hard(user_bufs)) |
642 | goto out_revert_acct; | ||
643 | |||
644 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), | ||
645 | GFP_KERNEL_ACCOUNT); | ||
646 | |||
647 | if (pipe->bufs) { | ||
648 | init_waitqueue_head(&pipe->wait); | ||
649 | pipe->r_counter = pipe->w_counter = 1; | ||
650 | pipe->buffers = pipe_bufs; | ||
651 | pipe->user = user; | ||
652 | mutex_init(&pipe->mutex); | ||
653 | return pipe; | ||
650 | } | 654 | } |
651 | 655 | ||
656 | out_revert_acct: | ||
657 | (void) account_pipe_buffers(user, pipe_bufs, 0); | ||
658 | kfree(pipe); | ||
659 | out_free_uid: | ||
660 | free_uid(user); | ||
652 | return NULL; | 661 | return NULL; |
653 | } | 662 | } |
654 | 663 | ||
@@ -656,7 +665,7 @@ void free_pipe_info(struct pipe_inode_info *pipe) | |||
656 | { | 665 | { |
657 | int i; | 666 | int i; |
658 | 667 | ||
659 | account_pipe_buffers(pipe, pipe->buffers, 0); | 668 | (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); |
660 | free_uid(pipe->user); | 669 | free_uid(pipe->user); |
661 | for (i = 0; i < pipe->buffers; i++) { | 670 | for (i = 0; i < pipe->buffers; i++) { |
662 | struct pipe_buffer *buf = pipe->bufs + i; | 671 | struct pipe_buffer *buf = pipe->bufs + i; |
@@ -1008,12 +1017,54 @@ const struct file_operations pipefifo_fops = { | |||
1008 | }; | 1017 | }; |
1009 | 1018 | ||
1010 | /* | 1019 | /* |
1020 | * Currently we rely on the pipe array holding a power-of-2 number | ||
1021 | * of pages. | ||
1022 | */ | ||
1023 | static inline unsigned int round_pipe_size(unsigned int size) | ||
1024 | { | ||
1025 | unsigned long nr_pages; | ||
1026 | |||
1027 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1028 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1011 | * Allocate a new array of pipe buffers and copy the info over. Returns the | 1032 | * Allocate a new array of pipe buffers and copy the info over. Returns the |
1012 | * pipe size if successful, or return -ERROR on error. | 1033 | * pipe size if successful, or return -ERROR on error. |
1013 | */ | 1034 | */ |
1014 | static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | 1035 | static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) |
1015 | { | 1036 | { |
1016 | struct pipe_buffer *bufs; | 1037 | struct pipe_buffer *bufs; |
1038 | unsigned int size, nr_pages; | ||
1039 | unsigned long user_bufs; | ||
1040 | long ret = 0; | ||
1041 | |||
1042 | size = round_pipe_size(arg); | ||
1043 | nr_pages = size >> PAGE_SHIFT; | ||
1044 | |||
1045 | if (!nr_pages) | ||
1046 | return -EINVAL; | ||
1047 | |||
1048 | /* | ||
1049 | * If trying to increase the pipe capacity, check that an | ||
1050 | * unprivileged user is not trying to exceed various limits | ||
1051 | * (soft limit check here, hard limit check just below). | ||
1052 | * Decreasing the pipe capacity is always permitted, even | ||
1053 | * if the user is currently over a limit. | ||
1054 | */ | ||
1055 | if (nr_pages > pipe->buffers && | ||
1056 | size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | ||
1057 | return -EPERM; | ||
1058 | |||
1059 | user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); | ||
1060 | |||
1061 | if (nr_pages > pipe->buffers && | ||
1062 | (too_many_pipe_buffers_hard(user_bufs) || | ||
1063 | too_many_pipe_buffers_soft(user_bufs)) && | ||
1064 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { | ||
1065 | ret = -EPERM; | ||
1066 | goto out_revert_acct; | ||
1067 | } | ||
1017 | 1068 | ||
1018 | /* | 1069 | /* |
1019 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't | 1070 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't |
@@ -1021,13 +1072,17 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | |||
1021 | * again like we would do for growing. If the pipe currently | 1072 | * again like we would do for growing. If the pipe currently |
1022 | * contains more buffers than arg, then return busy. | 1073 | * contains more buffers than arg, then return busy. |
1023 | */ | 1074 | */ |
1024 | if (nr_pages < pipe->nrbufs) | 1075 | if (nr_pages < pipe->nrbufs) { |
1025 | return -EBUSY; | 1076 | ret = -EBUSY; |
1077 | goto out_revert_acct; | ||
1078 | } | ||
1026 | 1079 | ||
1027 | bufs = kcalloc(nr_pages, sizeof(*bufs), | 1080 | bufs = kcalloc(nr_pages, sizeof(*bufs), |
1028 | GFP_KERNEL_ACCOUNT | __GFP_NOWARN); | 1081 | GFP_KERNEL_ACCOUNT | __GFP_NOWARN); |
1029 | if (unlikely(!bufs)) | 1082 | if (unlikely(!bufs)) { |
1030 | return -ENOMEM; | 1083 | ret = -ENOMEM; |
1084 | goto out_revert_acct; | ||
1085 | } | ||
1031 | 1086 | ||
1032 | /* | 1087 | /* |
1033 | * The pipe array wraps around, so just start the new one at zero | 1088 | * The pipe array wraps around, so just start the new one at zero |
@@ -1050,24 +1105,15 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | |||
1050 | memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); | 1105 | memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); |
1051 | } | 1106 | } |
1052 | 1107 | ||
1053 | account_pipe_buffers(pipe, pipe->buffers, nr_pages); | ||
1054 | pipe->curbuf = 0; | 1108 | pipe->curbuf = 0; |
1055 | kfree(pipe->bufs); | 1109 | kfree(pipe->bufs); |
1056 | pipe->bufs = bufs; | 1110 | pipe->bufs = bufs; |
1057 | pipe->buffers = nr_pages; | 1111 | pipe->buffers = nr_pages; |
1058 | return nr_pages * PAGE_SIZE; | 1112 | return nr_pages * PAGE_SIZE; |
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Currently we rely on the pipe array holding a power-of-2 number | ||
1063 | * of pages. | ||
1064 | */ | ||
1065 | static inline unsigned int round_pipe_size(unsigned int size) | ||
1066 | { | ||
1067 | unsigned long nr_pages; | ||
1068 | 1113 | ||
1069 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1114 | out_revert_acct: |
1070 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | 1115 | (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); |
1116 | return ret; | ||
1071 | } | 1117 | } |
1072 | 1118 | ||
1073 | /* | 1119 | /* |
@@ -1109,28 +1155,9 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1109 | __pipe_lock(pipe); | 1155 | __pipe_lock(pipe); |
1110 | 1156 | ||
1111 | switch (cmd) { | 1157 | switch (cmd) { |
1112 | case F_SETPIPE_SZ: { | 1158 | case F_SETPIPE_SZ: |
1113 | unsigned int size, nr_pages; | 1159 | ret = pipe_set_size(pipe, arg); |
1114 | |||
1115 | size = round_pipe_size(arg); | ||
1116 | nr_pages = size >> PAGE_SHIFT; | ||
1117 | |||
1118 | ret = -EINVAL; | ||
1119 | if (!nr_pages) | ||
1120 | goto out; | ||
1121 | |||
1122 | if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { | ||
1123 | ret = -EPERM; | ||
1124 | goto out; | ||
1125 | } else if ((too_many_pipe_buffers_hard(pipe->user) || | ||
1126 | too_many_pipe_buffers_soft(pipe->user)) && | ||
1127 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { | ||
1128 | ret = -EPERM; | ||
1129 | goto out; | ||
1130 | } | ||
1131 | ret = pipe_set_size(pipe, nr_pages); | ||
1132 | break; | 1160 | break; |
1133 | } | ||
1134 | case F_GETPIPE_SZ: | 1161 | case F_GETPIPE_SZ: |
1135 | ret = pipe->buffers * PAGE_SIZE; | 1162 | ret = pipe->buffers * PAGE_SIZE; |
1136 | break; | 1163 | break; |
@@ -1139,7 +1166,6 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1139 | break; | 1166 | break; |
1140 | } | 1167 | } |
1141 | 1168 | ||
1142 | out: | ||
1143 | __pipe_unlock(pipe); | 1169 | __pipe_unlock(pipe); |
1144 | return ret; | 1170 | return ret; |
1145 | } | 1171 | } |
diff --git a/fs/select.c b/fs/select.c index 8ed9da50896a..3d4f85defeab 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/sched/rt.h> | 29 | #include <linux/sched/rt.h> |
30 | #include <linux/freezer.h> | 30 | #include <linux/freezer.h> |
31 | #include <net/busy_poll.h> | 31 | #include <net/busy_poll.h> |
32 | #include <linux/vmalloc.h> | ||
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | 35 | ||
@@ -554,7 +555,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
554 | fd_set_bits fds; | 555 | fd_set_bits fds; |
555 | void *bits; | 556 | void *bits; |
556 | int ret, max_fds; | 557 | int ret, max_fds; |
557 | unsigned int size; | 558 | size_t size, alloc_size; |
558 | struct fdtable *fdt; | 559 | struct fdtable *fdt; |
559 | /* Allocate small arguments on the stack to save memory and be faster */ | 560 | /* Allocate small arguments on the stack to save memory and be faster */ |
560 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; | 561 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; |
@@ -581,7 +582,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
581 | if (size > sizeof(stack_fds) / 6) { | 582 | if (size > sizeof(stack_fds) / 6) { |
582 | /* Not enough space in on-stack array; must use kmalloc */ | 583 | /* Not enough space in on-stack array; must use kmalloc */ |
583 | ret = -ENOMEM; | 584 | ret = -ENOMEM; |
584 | bits = kmalloc(6 * size, GFP_KERNEL); | 585 | if (size > (SIZE_MAX / 6)) |
586 | goto out_nofds; | ||
587 | |||
588 | alloc_size = 6 * size; | ||
589 | bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); | ||
590 | if (!bits && alloc_size > PAGE_SIZE) | ||
591 | bits = vmalloc(alloc_size); | ||
592 | |||
585 | if (!bits) | 593 | if (!bits) |
586 | goto out_nofds; | 594 | goto out_nofds; |
587 | } | 595 | } |
@@ -618,7 +626,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
618 | 626 | ||
619 | out: | 627 | out: |
620 | if (bits != stack_fds) | 628 | if (bits != stack_fds) |
621 | kfree(bits); | 629 | kvfree(bits); |
622 | out_nofds: | 630 | out_nofds: |
623 | return ret; | 631 | return ret; |
624 | } | 632 | } |
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index 7caaf298f539..28c15050ebe6 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h | |||
@@ -10,214 +10,5 @@ | |||
10 | #ifndef _LINUX_AUTO_DEV_IOCTL_H | 10 | #ifndef _LINUX_AUTO_DEV_IOCTL_H |
11 | #define _LINUX_AUTO_DEV_IOCTL_H | 11 | #define _LINUX_AUTO_DEV_IOCTL_H |
12 | 12 | ||
13 | #include <linux/auto_fs.h> | 13 | #include <uapi/linux/auto_dev-ioctl.h> |
14 | #include <linux/string.h> | ||
15 | |||
16 | #define AUTOFS_DEVICE_NAME "autofs" | ||
17 | |||
18 | #define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1 | ||
19 | #define AUTOFS_DEV_IOCTL_VERSION_MINOR 0 | ||
20 | |||
21 | #define AUTOFS_DEVID_LEN 16 | ||
22 | |||
23 | #define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl) | ||
24 | |||
25 | /* | ||
26 | * An ioctl interface for autofs mount point control. | ||
27 | */ | ||
28 | |||
29 | struct args_protover { | ||
30 | __u32 version; | ||
31 | }; | ||
32 | |||
33 | struct args_protosubver { | ||
34 | __u32 sub_version; | ||
35 | }; | ||
36 | |||
37 | struct args_openmount { | ||
38 | __u32 devid; | ||
39 | }; | ||
40 | |||
41 | struct args_ready { | ||
42 | __u32 token; | ||
43 | }; | ||
44 | |||
45 | struct args_fail { | ||
46 | __u32 token; | ||
47 | __s32 status; | ||
48 | }; | ||
49 | |||
50 | struct args_setpipefd { | ||
51 | __s32 pipefd; | ||
52 | }; | ||
53 | |||
54 | struct args_timeout { | ||
55 | __u64 timeout; | ||
56 | }; | ||
57 | |||
58 | struct args_requester { | ||
59 | __u32 uid; | ||
60 | __u32 gid; | ||
61 | }; | ||
62 | |||
63 | struct args_expire { | ||
64 | __u32 how; | ||
65 | }; | ||
66 | |||
67 | struct args_askumount { | ||
68 | __u32 may_umount; | ||
69 | }; | ||
70 | |||
71 | struct args_ismountpoint { | ||
72 | union { | ||
73 | struct args_in { | ||
74 | __u32 type; | ||
75 | } in; | ||
76 | struct args_out { | ||
77 | __u32 devid; | ||
78 | __u32 magic; | ||
79 | } out; | ||
80 | }; | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * All the ioctls use this structure. | ||
85 | * When sending a path size must account for the total length | ||
86 | * of the chunk of memory otherwise is is the size of the | ||
87 | * structure. | ||
88 | */ | ||
89 | |||
90 | struct autofs_dev_ioctl { | ||
91 | __u32 ver_major; | ||
92 | __u32 ver_minor; | ||
93 | __u32 size; /* total size of data passed in | ||
94 | * including this struct */ | ||
95 | __s32 ioctlfd; /* automount command fd */ | ||
96 | |||
97 | /* Command parameters */ | ||
98 | |||
99 | union { | ||
100 | struct args_protover protover; | ||
101 | struct args_protosubver protosubver; | ||
102 | struct args_openmount openmount; | ||
103 | struct args_ready ready; | ||
104 | struct args_fail fail; | ||
105 | struct args_setpipefd setpipefd; | ||
106 | struct args_timeout timeout; | ||
107 | struct args_requester requester; | ||
108 | struct args_expire expire; | ||
109 | struct args_askumount askumount; | ||
110 | struct args_ismountpoint ismountpoint; | ||
111 | }; | ||
112 | |||
113 | char path[0]; | ||
114 | }; | ||
115 | |||
116 | static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) | ||
117 | { | ||
118 | memset(in, 0, sizeof(struct autofs_dev_ioctl)); | ||
119 | in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; | ||
120 | in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; | ||
121 | in->size = sizeof(struct autofs_dev_ioctl); | ||
122 | in->ioctlfd = -1; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * If you change this make sure you make the corresponding change | ||
127 | * to autofs-dev-ioctl.c:lookup_ioctl() | ||
128 | */ | ||
129 | enum { | ||
130 | /* Get various version info */ | ||
131 | AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, | ||
132 | AUTOFS_DEV_IOCTL_PROTOVER_CMD, | ||
133 | AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, | ||
134 | |||
135 | /* Open mount ioctl fd */ | ||
136 | AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, | ||
137 | |||
138 | /* Close mount ioctl fd */ | ||
139 | AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, | ||
140 | |||
141 | /* Mount/expire status returns */ | ||
142 | AUTOFS_DEV_IOCTL_READY_CMD, | ||
143 | AUTOFS_DEV_IOCTL_FAIL_CMD, | ||
144 | |||
145 | /* Activate/deactivate autofs mount */ | ||
146 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, | ||
147 | AUTOFS_DEV_IOCTL_CATATONIC_CMD, | ||
148 | |||
149 | /* Expiry timeout */ | ||
150 | AUTOFS_DEV_IOCTL_TIMEOUT_CMD, | ||
151 | |||
152 | /* Get mount last requesting uid and gid */ | ||
153 | AUTOFS_DEV_IOCTL_REQUESTER_CMD, | ||
154 | |||
155 | /* Check for eligible expire candidates */ | ||
156 | AUTOFS_DEV_IOCTL_EXPIRE_CMD, | ||
157 | |||
158 | /* Request busy status */ | ||
159 | AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, | ||
160 | |||
161 | /* Check if path is a mountpoint */ | ||
162 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, | ||
163 | }; | ||
164 | |||
165 | #define AUTOFS_IOCTL 0x93 | ||
166 | |||
167 | #define AUTOFS_DEV_IOCTL_VERSION \ | ||
168 | _IOWR(AUTOFS_IOCTL, \ | ||
169 | AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) | ||
170 | |||
171 | #define AUTOFS_DEV_IOCTL_PROTOVER \ | ||
172 | _IOWR(AUTOFS_IOCTL, \ | ||
173 | AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl) | ||
174 | |||
175 | #define AUTOFS_DEV_IOCTL_PROTOSUBVER \ | ||
176 | _IOWR(AUTOFS_IOCTL, \ | ||
177 | AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl) | ||
178 | |||
179 | #define AUTOFS_DEV_IOCTL_OPENMOUNT \ | ||
180 | _IOWR(AUTOFS_IOCTL, \ | ||
181 | AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl) | ||
182 | |||
183 | #define AUTOFS_DEV_IOCTL_CLOSEMOUNT \ | ||
184 | _IOWR(AUTOFS_IOCTL, \ | ||
185 | AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl) | ||
186 | |||
187 | #define AUTOFS_DEV_IOCTL_READY \ | ||
188 | _IOWR(AUTOFS_IOCTL, \ | ||
189 | AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl) | ||
190 | |||
191 | #define AUTOFS_DEV_IOCTL_FAIL \ | ||
192 | _IOWR(AUTOFS_IOCTL, \ | ||
193 | AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl) | ||
194 | |||
195 | #define AUTOFS_DEV_IOCTL_SETPIPEFD \ | ||
196 | _IOWR(AUTOFS_IOCTL, \ | ||
197 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl) | ||
198 | |||
199 | #define AUTOFS_DEV_IOCTL_CATATONIC \ | ||
200 | _IOWR(AUTOFS_IOCTL, \ | ||
201 | AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl) | ||
202 | |||
203 | #define AUTOFS_DEV_IOCTL_TIMEOUT \ | ||
204 | _IOWR(AUTOFS_IOCTL, \ | ||
205 | AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl) | ||
206 | |||
207 | #define AUTOFS_DEV_IOCTL_REQUESTER \ | ||
208 | _IOWR(AUTOFS_IOCTL, \ | ||
209 | AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl) | ||
210 | |||
211 | #define AUTOFS_DEV_IOCTL_EXPIRE \ | ||
212 | _IOWR(AUTOFS_IOCTL, \ | ||
213 | AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl) | ||
214 | |||
215 | #define AUTOFS_DEV_IOCTL_ASKUMOUNT \ | ||
216 | _IOWR(AUTOFS_IOCTL, \ | ||
217 | AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl) | ||
218 | |||
219 | #define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \ | ||
220 | _IOWR(AUTOFS_IOCTL, \ | ||
221 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl) | ||
222 | |||
223 | #endif /* _LINUX_AUTO_DEV_IOCTL_H */ | 14 | #endif /* _LINUX_AUTO_DEV_IOCTL_H */ |
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index b4066bb89083..b8f814c95cf5 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #define _LINUX_AUTO_FS_H | 10 | #define _LINUX_AUTO_FS_H |
11 | 11 | ||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/limits.h> | ||
14 | #include <linux/ioctl.h> | 13 | #include <linux/ioctl.h> |
15 | #include <uapi/linux/auto_fs.h> | 14 | #include <uapi/linux/auto_fs.h> |
16 | #endif /* _LINUX_AUTO_FS_H */ | 15 | #endif /* _LINUX_AUTO_FS_H */ |
diff --git a/include/linux/ctype.h b/include/linux/ctype.h index 653589e3e30e..f13e4ff6835a 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h | |||
@@ -22,7 +22,10 @@ extern const unsigned char _ctype[]; | |||
22 | #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) | 22 | #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) |
23 | #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) | 23 | #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) |
24 | #define iscntrl(c) ((__ismask(c)&(_C)) != 0) | 24 | #define iscntrl(c) ((__ismask(c)&(_C)) != 0) |
25 | #define isdigit(c) ((__ismask(c)&(_D)) != 0) | 25 | static inline int isdigit(int c) |
26 | { | ||
27 | return '0' <= c && c <= '9'; | ||
28 | } | ||
26 | #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) | 29 | #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) |
27 | #define islower(c) ((__ismask(c)&(_L)) != 0) | 30 | #define islower(c) ((__ismask(c)&(_L)) != 0) |
28 | #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) | 31 | #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 0f90eb5e3c6b..08528afdf58b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -56,6 +56,11 @@ | |||
56 | * that gives better TLB efficiency. | 56 | * that gives better TLB efficiency. |
57 | */ | 57 | */ |
58 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | 58 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) |
59 | /* | ||
60 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | ||
61 | * allocation failure reports (similarly to __GFP_NOWARN). | ||
62 | */ | ||
63 | #define DMA_ATTR_NO_WARN (1UL << 8) | ||
59 | 64 | ||
60 | /* | 65 | /* |
61 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | 66 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
diff --git a/include/linux/export.h b/include/linux/export.h index c565f87f005e..d7df4922da1d 100644 --- a/include/linux/export.h +++ b/include/linux/export.h | |||
@@ -78,7 +78,6 @@ extern struct module __this_module; | |||
78 | 78 | ||
79 | #elif defined(CONFIG_TRIM_UNUSED_KSYMS) | 79 | #elif defined(CONFIG_TRIM_UNUSED_KSYMS) |
80 | 80 | ||
81 | #include <linux/kconfig.h> | ||
82 | #include <generated/autoksyms.h> | 81 | #include <generated/autoksyms.h> |
83 | 82 | ||
84 | #define __EXPORT_SYMBOL(sym, sec) \ | 83 | #define __EXPORT_SYMBOL(sym, sec) \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index c145219286a8..bc65d5918140 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -440,8 +440,9 @@ struct address_space { | |||
440 | unsigned long nrexceptional; | 440 | unsigned long nrexceptional; |
441 | pgoff_t writeback_index;/* writeback starts here */ | 441 | pgoff_t writeback_index;/* writeback starts here */ |
442 | const struct address_space_operations *a_ops; /* methods */ | 442 | const struct address_space_operations *a_ops; /* methods */ |
443 | unsigned long flags; /* error bits/gfp mask */ | 443 | unsigned long flags; /* error bits */ |
444 | spinlock_t private_lock; /* for use by the address_space */ | 444 | spinlock_t private_lock; /* for use by the address_space */ |
445 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ | ||
445 | struct list_head private_list; /* ditto */ | 446 | struct list_head private_list; /* ditto */ |
446 | void *private_data; /* ditto */ | 447 | void *private_data; /* ditto */ |
447 | } __attribute__((aligned(sizeof(long)))); | 448 | } __attribute__((aligned(sizeof(long)))); |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1f0be7213e6d..24e2cc56beb1 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/irqdomain.h> | 8 | #include <linux/irqdomain.h> |
9 | #include <linux/lockdep.h> | 9 | #include <linux/lockdep.h> |
10 | #include <linux/pinctrl/pinctrl.h> | 10 | #include <linux/pinctrl/pinctrl.h> |
11 | #include <linux/kconfig.h> | ||
12 | 11 | ||
13 | struct gpio_desc; | 12 | struct gpio_desc; |
14 | struct of_phandle_args; | 13 | struct of_phandle_args; |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d7437777baaa..406c33dcae13 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -259,6 +259,12 @@ phys_addr_t paddr_vmcoreinfo_note(void); | |||
259 | vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) | 259 | vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) |
260 | #define VMCOREINFO_CONFIG(name) \ | 260 | #define VMCOREINFO_CONFIG(name) \ |
261 | vmcoreinfo_append_str("CONFIG_%s=y\n", #name) | 261 | vmcoreinfo_append_str("CONFIG_%s=y\n", #name) |
262 | #define VMCOREINFO_PAGE_OFFSET(value) \ | ||
263 | vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) | ||
264 | #define VMCOREINFO_VMALLOC_START(value) \ | ||
265 | vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) | ||
266 | #define VMCOREINFO_VMEMMAP_START(value) \ | ||
267 | vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) | ||
262 | 268 | ||
263 | extern struct kimage *kexec_image; | 269 | extern struct kimage *kexec_image; |
264 | extern struct kimage *kexec_crash_image; | 270 | extern struct kimage *kexec_crash_image; |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 4894c6888bc6..1c2a32829620 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -38,6 +38,11 @@ extern void kmemleak_not_leak(const void *ptr) __ref; | |||
38 | extern void kmemleak_ignore(const void *ptr) __ref; | 38 | extern void kmemleak_ignore(const void *ptr) __ref; |
39 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; | 39 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; |
40 | extern void kmemleak_no_scan(const void *ptr) __ref; | 40 | extern void kmemleak_no_scan(const void *ptr) __ref; |
41 | extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, | ||
42 | gfp_t gfp) __ref; | ||
43 | extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref; | ||
44 | extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; | ||
45 | extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; | ||
41 | 46 | ||
42 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | 47 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, |
43 | int min_count, unsigned long flags, | 48 | int min_count, unsigned long flags, |
@@ -106,6 +111,19 @@ static inline void kmemleak_erase(void **ptr) | |||
106 | static inline void kmemleak_no_scan(const void *ptr) | 111 | static inline void kmemleak_no_scan(const void *ptr) |
107 | { | 112 | { |
108 | } | 113 | } |
114 | static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, | ||
115 | int min_count, gfp_t gfp) | ||
116 | { | ||
117 | } | ||
118 | static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size) | ||
119 | { | ||
120 | } | ||
121 | static inline void kmemleak_not_leak_phys(phys_addr_t phys) | ||
122 | { | ||
123 | } | ||
124 | static inline void kmemleak_ignore_phys(phys_addr_t phys) | ||
125 | { | ||
126 | } | ||
109 | 127 | ||
110 | #endif /* CONFIG_DEBUG_KMEMLEAK */ | 128 | #endif /* CONFIG_DEBUG_KMEMLEAK */ |
111 | 129 | ||
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index e691b6a23f72..a6e82a69c363 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -10,6 +10,17 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
10 | int node, | 10 | int node, |
11 | const char namefmt[], ...); | 11 | const char namefmt[], ...); |
12 | 12 | ||
13 | /** | ||
14 | * kthread_create - create a kthread on the current node | ||
15 | * @threadfn: the function to run in the thread | ||
16 | * @data: data pointer for @threadfn() | ||
17 | * @namefmt: printf-style format string for the thread name | ||
18 | * @...: arguments for @namefmt. | ||
19 | * | ||
20 | * This macro will create a kthread on the current node, leaving it in | ||
21 | * the stopped state. This is just a helper for kthread_create_on_node(); | ||
22 | * see the documentation there for more details. | ||
23 | */ | ||
13 | #define kthread_create(threadfn, data, namefmt, arg...) \ | 24 | #define kthread_create(threadfn, data, namefmt, arg...) \ |
14 | kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) | 25 | kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) |
15 | 26 | ||
@@ -44,7 +55,7 @@ bool kthread_should_stop(void); | |||
44 | bool kthread_should_park(void); | 55 | bool kthread_should_park(void); |
45 | bool kthread_freezable_should_stop(bool *was_frozen); | 56 | bool kthread_freezable_should_stop(bool *was_frozen); |
46 | void *kthread_data(struct task_struct *k); | 57 | void *kthread_data(struct task_struct *k); |
47 | void *probe_kthread_data(struct task_struct *k); | 58 | void *kthread_probe_data(struct task_struct *k); |
48 | int kthread_park(struct task_struct *k); | 59 | int kthread_park(struct task_struct *k); |
49 | void kthread_unpark(struct task_struct *k); | 60 | void kthread_unpark(struct task_struct *k); |
50 | void kthread_parkme(void); | 61 | void kthread_parkme(void); |
@@ -57,16 +68,23 @@ extern int tsk_fork_get_node(struct task_struct *tsk); | |||
57 | * Simple work processor based on kthread. | 68 | * Simple work processor based on kthread. |
58 | * | 69 | * |
59 | * This provides easier way to make use of kthreads. A kthread_work | 70 | * This provides easier way to make use of kthreads. A kthread_work |
60 | * can be queued and flushed using queue/flush_kthread_work() | 71 | * can be queued and flushed using queue/kthread_flush_work() |
61 | * respectively. Queued kthread_works are processed by a kthread | 72 | * respectively. Queued kthread_works are processed by a kthread |
62 | * running kthread_worker_fn(). | 73 | * running kthread_worker_fn(). |
63 | */ | 74 | */ |
64 | struct kthread_work; | 75 | struct kthread_work; |
65 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | 76 | typedef void (*kthread_work_func_t)(struct kthread_work *work); |
77 | void kthread_delayed_work_timer_fn(unsigned long __data); | ||
78 | |||
79 | enum { | ||
80 | KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ | ||
81 | }; | ||
66 | 82 | ||
67 | struct kthread_worker { | 83 | struct kthread_worker { |
84 | unsigned int flags; | ||
68 | spinlock_t lock; | 85 | spinlock_t lock; |
69 | struct list_head work_list; | 86 | struct list_head work_list; |
87 | struct list_head delayed_work_list; | ||
70 | struct task_struct *task; | 88 | struct task_struct *task; |
71 | struct kthread_work *current_work; | 89 | struct kthread_work *current_work; |
72 | }; | 90 | }; |
@@ -75,11 +93,19 @@ struct kthread_work { | |||
75 | struct list_head node; | 93 | struct list_head node; |
76 | kthread_work_func_t func; | 94 | kthread_work_func_t func; |
77 | struct kthread_worker *worker; | 95 | struct kthread_worker *worker; |
96 | /* Number of canceling calls that are running at the moment. */ | ||
97 | int canceling; | ||
98 | }; | ||
99 | |||
100 | struct kthread_delayed_work { | ||
101 | struct kthread_work work; | ||
102 | struct timer_list timer; | ||
78 | }; | 103 | }; |
79 | 104 | ||
80 | #define KTHREAD_WORKER_INIT(worker) { \ | 105 | #define KTHREAD_WORKER_INIT(worker) { \ |
81 | .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ | 106 | .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ |
82 | .work_list = LIST_HEAD_INIT((worker).work_list), \ | 107 | .work_list = LIST_HEAD_INIT((worker).work_list), \ |
108 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ | ||
83 | } | 109 | } |
84 | 110 | ||
85 | #define KTHREAD_WORK_INIT(work, fn) { \ | 111 | #define KTHREAD_WORK_INIT(work, fn) { \ |
@@ -87,46 +113,88 @@ struct kthread_work { | |||
87 | .func = (fn), \ | 113 | .func = (fn), \ |
88 | } | 114 | } |
89 | 115 | ||
116 | #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ | ||
117 | .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ | ||
118 | .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \ | ||
119 | 0, (unsigned long)&(dwork), \ | ||
120 | TIMER_IRQSAFE), \ | ||
121 | } | ||
122 | |||
90 | #define DEFINE_KTHREAD_WORKER(worker) \ | 123 | #define DEFINE_KTHREAD_WORKER(worker) \ |
91 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) | 124 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) |
92 | 125 | ||
93 | #define DEFINE_KTHREAD_WORK(work, fn) \ | 126 | #define DEFINE_KTHREAD_WORK(work, fn) \ |
94 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | 127 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) |
95 | 128 | ||
129 | #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ | ||
130 | struct kthread_delayed_work dwork = \ | ||
131 | KTHREAD_DELAYED_WORK_INIT(dwork, fn) | ||
132 | |||
96 | /* | 133 | /* |
97 | * kthread_worker.lock needs its own lockdep class key when defined on | 134 | * kthread_worker.lock needs its own lockdep class key when defined on |
98 | * stack with lockdep enabled. Use the following macros in such cases. | 135 | * stack with lockdep enabled. Use the following macros in such cases. |
99 | */ | 136 | */ |
100 | #ifdef CONFIG_LOCKDEP | 137 | #ifdef CONFIG_LOCKDEP |
101 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ | 138 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ |
102 | ({ init_kthread_worker(&worker); worker; }) | 139 | ({ kthread_init_worker(&worker); worker; }) |
103 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ | 140 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ |
104 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) | 141 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) |
105 | #else | 142 | #else |
106 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) | 143 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) |
107 | #endif | 144 | #endif |
108 | 145 | ||
109 | extern void __init_kthread_worker(struct kthread_worker *worker, | 146 | extern void __kthread_init_worker(struct kthread_worker *worker, |
110 | const char *name, struct lock_class_key *key); | 147 | const char *name, struct lock_class_key *key); |
111 | 148 | ||
112 | #define init_kthread_worker(worker) \ | 149 | #define kthread_init_worker(worker) \ |
113 | do { \ | 150 | do { \ |
114 | static struct lock_class_key __key; \ | 151 | static struct lock_class_key __key; \ |
115 | __init_kthread_worker((worker), "("#worker")->lock", &__key); \ | 152 | __kthread_init_worker((worker), "("#worker")->lock", &__key); \ |
116 | } while (0) | 153 | } while (0) |
117 | 154 | ||
118 | #define init_kthread_work(work, fn) \ | 155 | #define kthread_init_work(work, fn) \ |
119 | do { \ | 156 | do { \ |
120 | memset((work), 0, sizeof(struct kthread_work)); \ | 157 | memset((work), 0, sizeof(struct kthread_work)); \ |
121 | INIT_LIST_HEAD(&(work)->node); \ | 158 | INIT_LIST_HEAD(&(work)->node); \ |
122 | (work)->func = (fn); \ | 159 | (work)->func = (fn); \ |
123 | } while (0) | 160 | } while (0) |
124 | 161 | ||
162 | #define kthread_init_delayed_work(dwork, fn) \ | ||
163 | do { \ | ||
164 | kthread_init_work(&(dwork)->work, (fn)); \ | ||
165 | __setup_timer(&(dwork)->timer, \ | ||
166 | kthread_delayed_work_timer_fn, \ | ||
167 | (unsigned long)(dwork), \ | ||
168 | TIMER_IRQSAFE); \ | ||
169 | } while (0) | ||
170 | |||
125 | int kthread_worker_fn(void *worker_ptr); | 171 | int kthread_worker_fn(void *worker_ptr); |
126 | 172 | ||
127 | bool queue_kthread_work(struct kthread_worker *worker, | 173 | __printf(2, 3) |
174 | struct kthread_worker * | ||
175 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); | ||
176 | |||
177 | struct kthread_worker * | ||
178 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, | ||
179 | const char namefmt[], ...); | ||
180 | |||
181 | bool kthread_queue_work(struct kthread_worker *worker, | ||
128 | struct kthread_work *work); | 182 | struct kthread_work *work); |
129 | void flush_kthread_work(struct kthread_work *work); | 183 | |
130 | void flush_kthread_worker(struct kthread_worker *worker); | 184 | bool kthread_queue_delayed_work(struct kthread_worker *worker, |
185 | struct kthread_delayed_work *dwork, | ||
186 | unsigned long delay); | ||
187 | |||
188 | bool kthread_mod_delayed_work(struct kthread_worker *worker, | ||
189 | struct kthread_delayed_work *dwork, | ||
190 | unsigned long delay); | ||
191 | |||
192 | void kthread_flush_work(struct kthread_work *work); | ||
193 | void kthread_flush_worker(struct kthread_worker *worker); | ||
194 | |||
195 | bool kthread_cancel_work_sync(struct kthread_work *work); | ||
196 | bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); | ||
197 | |||
198 | void kthread_destroy_worker(struct kthread_worker *worker); | ||
131 | 199 | ||
132 | #endif /* _LINUX_KTHREAD_H */ | 200 | #endif /* _LINUX_KTHREAD_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 747f401cc312..dd15d39e1985 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -16,17 +16,16 @@ | |||
16 | #include <linux/hugetlb_inline.h> | 16 | #include <linux/hugetlb_inline.h> |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | 19 | * Bits in mapping->flags. |
20 | * allocation mode flags. | ||
21 | */ | 20 | */ |
22 | enum mapping_flags { | 21 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | 22 | AS_EIO = 0, /* IO error on async write */ |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | 23 | AS_ENOSPC = 1, /* ENOSPC on async write */ |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | 24 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ |
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | 25 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ |
27 | AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ | 26 | AS_EXITING = 4, /* final truncate in progress */ |
28 | /* writeback related tags are not used */ | 27 | /* writeback related tags are not used */ |
29 | AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5, | 28 | AS_NO_WRITEBACK_TAGS = 5, |
30 | }; | 29 | }; |
31 | 30 | ||
32 | static inline void mapping_set_error(struct address_space *mapping, int error) | 31 | static inline void mapping_set_error(struct address_space *mapping, int error) |
@@ -78,7 +77,7 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping) | |||
78 | 77 | ||
79 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | 78 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
80 | { | 79 | { |
81 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; | 80 | return mapping->gfp_mask; |
82 | } | 81 | } |
83 | 82 | ||
84 | /* Restricts the given gfp_mask to what the mapping allows. */ | 83 | /* Restricts the given gfp_mask to what the mapping allows. */ |
@@ -94,8 +93,7 @@ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |||
94 | */ | 93 | */ |
95 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) | 94 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
96 | { | 95 | { |
97 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | | 96 | m->gfp_mask = mask; |
98 | (__force unsigned long)mask; | ||
99 | } | 97 | } |
100 | 98 | ||
101 | void release_pages(struct page **pages, int nr, bool cold); | 99 | void release_pages(struct page **pages, int nr, bool cold); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52b97db93830..af3581b8a451 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -461,6 +461,14 @@ static inline struct radix_tree_node *entry_to_node(void *ptr) | |||
461 | * | 461 | * |
462 | * This function updates @iter->index in the case of a successful lookup. | 462 | * This function updates @iter->index in the case of a successful lookup. |
463 | * For tagged lookup it also eats @iter->tags. | 463 | * For tagged lookup it also eats @iter->tags. |
464 | * | ||
465 | * There are several cases where 'slot' can be passed in as NULL to this | ||
466 | * function. These cases result from the use of radix_tree_iter_next() or | ||
467 | * radix_tree_iter_retry(). In these cases we don't end up dereferencing | ||
468 | * 'slot' because either: | ||
469 | * a) we are doing tagged iteration and iter->tags has been set to 0, or | ||
470 | * b) we are doing non-tagged iteration, and iter->index and iter->next_index | ||
471 | * have been set up so that radix_tree_chunk_size() returns 1 or 0. | ||
464 | */ | 472 | */ |
465 | static __always_inline void ** | 473 | static __always_inline void ** |
466 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | 474 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) |
diff --git a/include/linux/random.h b/include/linux/random.h index 3d6e9815cd85..f7bb7a355cf7 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -34,7 +34,7 @@ extern const struct file_operations random_fops, urandom_fops; | |||
34 | 34 | ||
35 | unsigned int get_random_int(void); | 35 | unsigned int get_random_int(void); |
36 | unsigned long get_random_long(void); | 36 | unsigned long get_random_long(void); |
37 | unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); | 37 | unsigned long randomize_page(unsigned long start, unsigned long range); |
38 | 38 | ||
39 | u32 prandom_u32(void); | 39 | u32 prandom_u32(void); |
40 | void prandom_bytes(void *buf, size_t nbytes); | 40 | void prandom_bytes(void *buf, size_t nbytes); |
diff --git a/include/linux/relay.h b/include/linux/relay.h index ecbb34a382b8..68c1448e56bb 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/irq_work.h> | ||
18 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
19 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
20 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
@@ -38,7 +39,7 @@ struct rchan_buf | |||
38 | size_t subbufs_consumed; /* count of sub-buffers consumed */ | 39 | size_t subbufs_consumed; /* count of sub-buffers consumed */ |
39 | struct rchan *chan; /* associated channel */ | 40 | struct rchan *chan; /* associated channel */ |
40 | wait_queue_head_t read_wait; /* reader wait queue */ | 41 | wait_queue_head_t read_wait; /* reader wait queue */ |
41 | struct timer_list timer; /* reader wake-up timer */ | 42 | struct irq_work wakeup_work; /* reader wakeup */ |
42 | struct dentry *dentry; /* channel file dentry */ | 43 | struct dentry *dentry; /* channel file dentry */ |
43 | struct kref kref; /* channel buffer refcount */ | 44 | struct kref kref; /* channel buffer refcount */ |
44 | struct page **page_array; /* array of current buffer pages */ | 45 | struct page **page_array; /* array of current buffer pages */ |
diff --git a/include/linux/sem.h b/include/linux/sem.h index 976ce3a19f1b..d0efd6e6c20a 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h | |||
@@ -21,6 +21,7 @@ struct sem_array { | |||
21 | struct list_head list_id; /* undo requests on this array */ | 21 | struct list_head list_id; /* undo requests on this array */ |
22 | int sem_nsems; /* no. of semaphores in array */ | 22 | int sem_nsems; /* no. of semaphores in array */ |
23 | int complex_count; /* pending complex operations */ | 23 | int complex_count; /* pending complex operations */ |
24 | bool complex_mode; /* no parallel simple ops */ | ||
24 | }; | 25 | }; |
25 | 26 | ||
26 | #ifdef CONFIG_SYSVIPC | 27 | #ifdef CONFIG_SYSVIPC |
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h new file mode 100644 index 000000000000..021ed331dd71 --- /dev/null +++ b/include/uapi/linux/auto_dev-ioctl.h | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Red Hat, Inc. All rights reserved. | ||
3 | * Copyright 2008 Ian Kent <raven@themaw.net> | ||
4 | * | ||
5 | * This file is part of the Linux kernel and is made available under | ||
6 | * the terms of the GNU General Public License, version 2, or at your | ||
7 | * option, any later version, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef _UAPI_LINUX_AUTO_DEV_IOCTL_H | ||
11 | #define _UAPI_LINUX_AUTO_DEV_IOCTL_H | ||
12 | |||
13 | #include <linux/auto_fs.h> | ||
14 | #include <linux/string.h> | ||
15 | |||
16 | #define AUTOFS_DEVICE_NAME "autofs" | ||
17 | |||
18 | #define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1 | ||
19 | #define AUTOFS_DEV_IOCTL_VERSION_MINOR 0 | ||
20 | |||
21 | #define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl) | ||
22 | |||
23 | /* | ||
24 | * An ioctl interface for autofs mount point control. | ||
25 | */ | ||
26 | |||
27 | struct args_protover { | ||
28 | __u32 version; | ||
29 | }; | ||
30 | |||
31 | struct args_protosubver { | ||
32 | __u32 sub_version; | ||
33 | }; | ||
34 | |||
35 | struct args_openmount { | ||
36 | __u32 devid; | ||
37 | }; | ||
38 | |||
39 | struct args_ready { | ||
40 | __u32 token; | ||
41 | }; | ||
42 | |||
43 | struct args_fail { | ||
44 | __u32 token; | ||
45 | __s32 status; | ||
46 | }; | ||
47 | |||
48 | struct args_setpipefd { | ||
49 | __s32 pipefd; | ||
50 | }; | ||
51 | |||
52 | struct args_timeout { | ||
53 | __u64 timeout; | ||
54 | }; | ||
55 | |||
56 | struct args_requester { | ||
57 | __u32 uid; | ||
58 | __u32 gid; | ||
59 | }; | ||
60 | |||
61 | struct args_expire { | ||
62 | __u32 how; | ||
63 | }; | ||
64 | |||
65 | struct args_askumount { | ||
66 | __u32 may_umount; | ||
67 | }; | ||
68 | |||
69 | struct args_ismountpoint { | ||
70 | union { | ||
71 | struct args_in { | ||
72 | __u32 type; | ||
73 | } in; | ||
74 | struct args_out { | ||
75 | __u32 devid; | ||
76 | __u32 magic; | ||
77 | } out; | ||
78 | }; | ||
79 | }; | ||
80 | |||
81 | /* | ||
82 | * All the ioctls use this structure. | ||
83 | * When sending a path size must account for the total length | ||
84 | * of the chunk of memory otherwise is is the size of the | ||
85 | * structure. | ||
86 | */ | ||
87 | |||
88 | struct autofs_dev_ioctl { | ||
89 | __u32 ver_major; | ||
90 | __u32 ver_minor; | ||
91 | __u32 size; /* total size of data passed in | ||
92 | * including this struct */ | ||
93 | __s32 ioctlfd; /* automount command fd */ | ||
94 | |||
95 | /* Command parameters */ | ||
96 | |||
97 | union { | ||
98 | struct args_protover protover; | ||
99 | struct args_protosubver protosubver; | ||
100 | struct args_openmount openmount; | ||
101 | struct args_ready ready; | ||
102 | struct args_fail fail; | ||
103 | struct args_setpipefd setpipefd; | ||
104 | struct args_timeout timeout; | ||
105 | struct args_requester requester; | ||
106 | struct args_expire expire; | ||
107 | struct args_askumount askumount; | ||
108 | struct args_ismountpoint ismountpoint; | ||
109 | }; | ||
110 | |||
111 | char path[0]; | ||
112 | }; | ||
113 | |||
114 | static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) | ||
115 | { | ||
116 | memset(in, 0, sizeof(struct autofs_dev_ioctl)); | ||
117 | in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; | ||
118 | in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; | ||
119 | in->size = sizeof(struct autofs_dev_ioctl); | ||
120 | in->ioctlfd = -1; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * If you change this make sure you make the corresponding change | ||
125 | * to autofs-dev-ioctl.c:lookup_ioctl() | ||
126 | */ | ||
127 | enum { | ||
128 | /* Get various version info */ | ||
129 | AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, | ||
130 | AUTOFS_DEV_IOCTL_PROTOVER_CMD, | ||
131 | AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, | ||
132 | |||
133 | /* Open mount ioctl fd */ | ||
134 | AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, | ||
135 | |||
136 | /* Close mount ioctl fd */ | ||
137 | AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, | ||
138 | |||
139 | /* Mount/expire status returns */ | ||
140 | AUTOFS_DEV_IOCTL_READY_CMD, | ||
141 | AUTOFS_DEV_IOCTL_FAIL_CMD, | ||
142 | |||
143 | /* Activate/deactivate autofs mount */ | ||
144 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, | ||
145 | AUTOFS_DEV_IOCTL_CATATONIC_CMD, | ||
146 | |||
147 | /* Expiry timeout */ | ||
148 | AUTOFS_DEV_IOCTL_TIMEOUT_CMD, | ||
149 | |||
150 | /* Get mount last requesting uid and gid */ | ||
151 | AUTOFS_DEV_IOCTL_REQUESTER_CMD, | ||
152 | |||
153 | /* Check for eligible expire candidates */ | ||
154 | AUTOFS_DEV_IOCTL_EXPIRE_CMD, | ||
155 | |||
156 | /* Request busy status */ | ||
157 | AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, | ||
158 | |||
159 | /* Check if path is a mountpoint */ | ||
160 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, | ||
161 | }; | ||
162 | |||
163 | #define AUTOFS_IOCTL 0x93 | ||
164 | |||
165 | #define AUTOFS_DEV_IOCTL_VERSION \ | ||
166 | _IOWR(AUTOFS_IOCTL, \ | ||
167 | AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) | ||
168 | |||
169 | #define AUTOFS_DEV_IOCTL_PROTOVER \ | ||
170 | _IOWR(AUTOFS_IOCTL, \ | ||
171 | AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl) | ||
172 | |||
173 | #define AUTOFS_DEV_IOCTL_PROTOSUBVER \ | ||
174 | _IOWR(AUTOFS_IOCTL, \ | ||
175 | AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl) | ||
176 | |||
177 | #define AUTOFS_DEV_IOCTL_OPENMOUNT \ | ||
178 | _IOWR(AUTOFS_IOCTL, \ | ||
179 | AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl) | ||
180 | |||
181 | #define AUTOFS_DEV_IOCTL_CLOSEMOUNT \ | ||
182 | _IOWR(AUTOFS_IOCTL, \ | ||
183 | AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl) | ||
184 | |||
185 | #define AUTOFS_DEV_IOCTL_READY \ | ||
186 | _IOWR(AUTOFS_IOCTL, \ | ||
187 | AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl) | ||
188 | |||
189 | #define AUTOFS_DEV_IOCTL_FAIL \ | ||
190 | _IOWR(AUTOFS_IOCTL, \ | ||
191 | AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl) | ||
192 | |||
193 | #define AUTOFS_DEV_IOCTL_SETPIPEFD \ | ||
194 | _IOWR(AUTOFS_IOCTL, \ | ||
195 | AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl) | ||
196 | |||
197 | #define AUTOFS_DEV_IOCTL_CATATONIC \ | ||
198 | _IOWR(AUTOFS_IOCTL, \ | ||
199 | AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl) | ||
200 | |||
201 | #define AUTOFS_DEV_IOCTL_TIMEOUT \ | ||
202 | _IOWR(AUTOFS_IOCTL, \ | ||
203 | AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl) | ||
204 | |||
205 | #define AUTOFS_DEV_IOCTL_REQUESTER \ | ||
206 | _IOWR(AUTOFS_IOCTL, \ | ||
207 | AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl) | ||
208 | |||
209 | #define AUTOFS_DEV_IOCTL_EXPIRE \ | ||
210 | _IOWR(AUTOFS_IOCTL, \ | ||
211 | AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl) | ||
212 | |||
213 | #define AUTOFS_DEV_IOCTL_ASKUMOUNT \ | ||
214 | _IOWR(AUTOFS_IOCTL, \ | ||
215 | AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl) | ||
216 | |||
217 | #define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \ | ||
218 | _IOWR(AUTOFS_IOCTL, \ | ||
219 | AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl) | ||
220 | |||
221 | #endif /* _UAPI_LINUX_AUTO_DEV_IOCTL_H */ | ||
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 9175a1b4dc69..1bfc3ed8b284 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _UAPI_LINUX_AUTO_FS_H | 12 | #define _UAPI_LINUX_AUTO_FS_H |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/limits.h> | ||
15 | #ifndef __KERNEL__ | 16 | #ifndef __KERNEL__ |
16 | #include <sys/ioctl.h> | 17 | #include <sys/ioctl.h> |
17 | #endif /* __KERNEL__ */ | 18 | #endif /* __KERNEL__ */ |
diff --git a/init/Kconfig b/init/Kconfig index d7fc22639665..34407f15e6d3 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1288,6 +1288,7 @@ config SYSFS_DEPRECATED_V2 | |||
1288 | 1288 | ||
1289 | config RELAY | 1289 | config RELAY |
1290 | bool "Kernel->user space relay support (formerly relayfs)" | 1290 | bool "Kernel->user space relay support (formerly relayfs)" |
1291 | select IRQ_WORK | ||
1291 | help | 1292 | help |
1292 | This option enables support for relay interface support in | 1293 | This option enables support for relay interface support in |
1293 | certain file systems (such as debugfs). | 1294 | certain file systems (such as debugfs). |
@@ -51,19 +51,14 @@ struct msg_receiver { | |||
51 | long r_msgtype; | 51 | long r_msgtype; |
52 | long r_maxsize; | 52 | long r_maxsize; |
53 | 53 | ||
54 | /* | 54 | struct msg_msg *r_msg; |
55 | * Mark r_msg volatile so that the compiler | ||
56 | * does not try to get smart and optimize | ||
57 | * it. We rely on this for the lockless | ||
58 | * receive algorithm. | ||
59 | */ | ||
60 | struct msg_msg *volatile r_msg; | ||
61 | }; | 55 | }; |
62 | 56 | ||
63 | /* one msg_sender for each sleeping sender */ | 57 | /* one msg_sender for each sleeping sender */ |
64 | struct msg_sender { | 58 | struct msg_sender { |
65 | struct list_head list; | 59 | struct list_head list; |
66 | struct task_struct *tsk; | 60 | struct task_struct *tsk; |
61 | size_t msgsz; | ||
67 | }; | 62 | }; |
68 | 63 | ||
69 | #define SEARCH_ANY 1 | 64 | #define SEARCH_ANY 1 |
@@ -159,45 +154,72 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
159 | return msq->q_perm.id; | 154 | return msq->q_perm.id; |
160 | } | 155 | } |
161 | 156 | ||
162 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) | 157 | static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz) |
158 | { | ||
159 | return msgsz + msq->q_cbytes <= msq->q_qbytes && | ||
160 | 1 + msq->q_qnum <= msq->q_qbytes; | ||
161 | } | ||
162 | |||
163 | static inline void ss_add(struct msg_queue *msq, | ||
164 | struct msg_sender *mss, size_t msgsz) | ||
163 | { | 165 | { |
164 | mss->tsk = current; | 166 | mss->tsk = current; |
167 | mss->msgsz = msgsz; | ||
165 | __set_current_state(TASK_INTERRUPTIBLE); | 168 | __set_current_state(TASK_INTERRUPTIBLE); |
166 | list_add_tail(&mss->list, &msq->q_senders); | 169 | list_add_tail(&mss->list, &msq->q_senders); |
167 | } | 170 | } |
168 | 171 | ||
169 | static inline void ss_del(struct msg_sender *mss) | 172 | static inline void ss_del(struct msg_sender *mss) |
170 | { | 173 | { |
171 | if (mss->list.next != NULL) | 174 | if (mss->list.next) |
172 | list_del(&mss->list); | 175 | list_del(&mss->list); |
173 | } | 176 | } |
174 | 177 | ||
175 | static void ss_wakeup(struct list_head *h, int kill) | 178 | static void ss_wakeup(struct msg_queue *msq, |
179 | struct wake_q_head *wake_q, bool kill) | ||
176 | { | 180 | { |
177 | struct msg_sender *mss, *t; | 181 | struct msg_sender *mss, *t; |
182 | struct task_struct *stop_tsk = NULL; | ||
183 | struct list_head *h = &msq->q_senders; | ||
178 | 184 | ||
179 | list_for_each_entry_safe(mss, t, h, list) { | 185 | list_for_each_entry_safe(mss, t, h, list) { |
180 | if (kill) | 186 | if (kill) |
181 | mss->list.next = NULL; | 187 | mss->list.next = NULL; |
182 | wake_up_process(mss->tsk); | 188 | |
189 | /* | ||
190 | * Stop at the first task we don't wakeup, | ||
191 | * we've already iterated the original | ||
192 | * sender queue. | ||
193 | */ | ||
194 | else if (stop_tsk == mss->tsk) | ||
195 | break; | ||
196 | /* | ||
197 | * We are not in an EIDRM scenario here, therefore | ||
198 | * verify that we really need to wakeup the task. | ||
199 | * To maintain current semantics and wakeup order, | ||
200 | * move the sender to the tail on behalf of the | ||
201 | * blocked task. | ||
202 | */ | ||
203 | else if (!msg_fits_inqueue(msq, mss->msgsz)) { | ||
204 | if (!stop_tsk) | ||
205 | stop_tsk = mss->tsk; | ||
206 | |||
207 | list_move_tail(&mss->list, &msq->q_senders); | ||
208 | continue; | ||
209 | } | ||
210 | |||
211 | wake_q_add(wake_q, mss->tsk); | ||
183 | } | 212 | } |
184 | } | 213 | } |
185 | 214 | ||
186 | static void expunge_all(struct msg_queue *msq, int res) | 215 | static void expunge_all(struct msg_queue *msq, int res, |
216 | struct wake_q_head *wake_q) | ||
187 | { | 217 | { |
188 | struct msg_receiver *msr, *t; | 218 | struct msg_receiver *msr, *t; |
189 | 219 | ||
190 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { | 220 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { |
191 | msr->r_msg = NULL; /* initialize expunge ordering */ | 221 | wake_q_add(wake_q, msr->r_tsk); |
192 | wake_up_process(msr->r_tsk); | 222 | WRITE_ONCE(msr->r_msg, ERR_PTR(res)); |
193 | /* | ||
194 | * Ensure that the wakeup is visible before setting r_msg as | ||
195 | * the receiving end depends on it: either spinning on a nil, | ||
196 | * or dealing with -EAGAIN cases. See lockless receive part 1 | ||
197 | * and 2 in do_msgrcv(). | ||
198 | */ | ||
199 | smp_wmb(); /* barrier (B) */ | ||
200 | msr->r_msg = ERR_PTR(res); | ||
201 | } | 223 | } |
202 | } | 224 | } |
203 | 225 | ||
@@ -213,11 +235,13 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
213 | { | 235 | { |
214 | struct msg_msg *msg, *t; | 236 | struct msg_msg *msg, *t; |
215 | struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); | 237 | struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); |
238 | WAKE_Q(wake_q); | ||
216 | 239 | ||
217 | expunge_all(msq, -EIDRM); | 240 | expunge_all(msq, -EIDRM, &wake_q); |
218 | ss_wakeup(&msq->q_senders, 1); | 241 | ss_wakeup(msq, &wake_q, true); |
219 | msg_rmid(ns, msq); | 242 | msg_rmid(ns, msq); |
220 | ipc_unlock_object(&msq->q_perm); | 243 | ipc_unlock_object(&msq->q_perm); |
244 | wake_up_q(&wake_q); | ||
221 | rcu_read_unlock(); | 245 | rcu_read_unlock(); |
222 | 246 | ||
223 | list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { | 247 | list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { |
@@ -372,6 +396,9 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, | |||
372 | freeque(ns, ipcp); | 396 | freeque(ns, ipcp); |
373 | goto out_up; | 397 | goto out_up; |
374 | case IPC_SET: | 398 | case IPC_SET: |
399 | { | ||
400 | WAKE_Q(wake_q); | ||
401 | |||
375 | if (msqid64.msg_qbytes > ns->msg_ctlmnb && | 402 | if (msqid64.msg_qbytes > ns->msg_ctlmnb && |
376 | !capable(CAP_SYS_RESOURCE)) { | 403 | !capable(CAP_SYS_RESOURCE)) { |
377 | err = -EPERM; | 404 | err = -EPERM; |
@@ -386,15 +413,21 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, | |||
386 | msq->q_qbytes = msqid64.msg_qbytes; | 413 | msq->q_qbytes = msqid64.msg_qbytes; |
387 | 414 | ||
388 | msq->q_ctime = get_seconds(); | 415 | msq->q_ctime = get_seconds(); |
389 | /* sleeping receivers might be excluded by | 416 | /* |
417 | * Sleeping receivers might be excluded by | ||
390 | * stricter permissions. | 418 | * stricter permissions. |
391 | */ | 419 | */ |
392 | expunge_all(msq, -EAGAIN); | 420 | expunge_all(msq, -EAGAIN, &wake_q); |
393 | /* sleeping senders might be able to send | 421 | /* |
422 | * Sleeping senders might be able to send | ||
394 | * due to a larger queue size. | 423 | * due to a larger queue size. |
395 | */ | 424 | */ |
396 | ss_wakeup(&msq->q_senders, 0); | 425 | ss_wakeup(msq, &wake_q, false); |
397 | break; | 426 | ipc_unlock_object(&msq->q_perm); |
427 | wake_up_q(&wake_q); | ||
428 | |||
429 | goto out_unlock1; | ||
430 | } | ||
398 | default: | 431 | default: |
399 | err = -EINVAL; | 432 | err = -EINVAL; |
400 | goto out_unlock1; | 433 | goto out_unlock1; |
@@ -566,7 +599,8 @@ static int testmsg(struct msg_msg *msg, long type, int mode) | |||
566 | return 0; | 599 | return 0; |
567 | } | 600 | } |
568 | 601 | ||
569 | static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) | 602 | static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, |
603 | struct wake_q_head *wake_q) | ||
570 | { | 604 | { |
571 | struct msg_receiver *msr, *t; | 605 | struct msg_receiver *msr, *t; |
572 | 606 | ||
@@ -577,27 +611,14 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) | |||
577 | 611 | ||
578 | list_del(&msr->r_list); | 612 | list_del(&msr->r_list); |
579 | if (msr->r_maxsize < msg->m_ts) { | 613 | if (msr->r_maxsize < msg->m_ts) { |
580 | /* initialize pipelined send ordering */ | 614 | wake_q_add(wake_q, msr->r_tsk); |
581 | msr->r_msg = NULL; | 615 | WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG)); |
582 | wake_up_process(msr->r_tsk); | ||
583 | /* barrier (B) see barrier comment below */ | ||
584 | smp_wmb(); | ||
585 | msr->r_msg = ERR_PTR(-E2BIG); | ||
586 | } else { | 616 | } else { |
587 | msr->r_msg = NULL; | ||
588 | msq->q_lrpid = task_pid_vnr(msr->r_tsk); | 617 | msq->q_lrpid = task_pid_vnr(msr->r_tsk); |
589 | msq->q_rtime = get_seconds(); | 618 | msq->q_rtime = get_seconds(); |
590 | wake_up_process(msr->r_tsk); | ||
591 | /* | ||
592 | * Ensure that the wakeup is visible before | ||
593 | * setting r_msg, as the receiving can otherwise | ||
594 | * exit - once r_msg is set, the receiver can | ||
595 | * continue. See lockless receive part 1 and 2 | ||
596 | * in do_msgrcv(). Barrier (B). | ||
597 | */ | ||
598 | smp_wmb(); | ||
599 | msr->r_msg = msg; | ||
600 | 619 | ||
620 | wake_q_add(wake_q, msr->r_tsk); | ||
621 | WRITE_ONCE(msr->r_msg, msg); | ||
601 | return 1; | 622 | return 1; |
602 | } | 623 | } |
603 | } | 624 | } |
@@ -613,6 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
613 | struct msg_msg *msg; | 634 | struct msg_msg *msg; |
614 | int err; | 635 | int err; |
615 | struct ipc_namespace *ns; | 636 | struct ipc_namespace *ns; |
637 | WAKE_Q(wake_q); | ||
616 | 638 | ||
617 | ns = current->nsproxy->ipc_ns; | 639 | ns = current->nsproxy->ipc_ns; |
618 | 640 | ||
@@ -654,10 +676,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
654 | if (err) | 676 | if (err) |
655 | goto out_unlock0; | 677 | goto out_unlock0; |
656 | 678 | ||
657 | if (msgsz + msq->q_cbytes <= msq->q_qbytes && | 679 | if (msg_fits_inqueue(msq, msgsz)) |
658 | 1 + msq->q_qnum <= msq->q_qbytes) { | ||
659 | break; | 680 | break; |
660 | } | ||
661 | 681 | ||
662 | /* queue full, wait: */ | 682 | /* queue full, wait: */ |
663 | if (msgflg & IPC_NOWAIT) { | 683 | if (msgflg & IPC_NOWAIT) { |
@@ -666,7 +686,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
666 | } | 686 | } |
667 | 687 | ||
668 | /* enqueue the sender and prepare to block */ | 688 | /* enqueue the sender and prepare to block */ |
669 | ss_add(msq, &s); | 689 | ss_add(msq, &s, msgsz); |
670 | 690 | ||
671 | if (!ipc_rcu_getref(msq)) { | 691 | if (!ipc_rcu_getref(msq)) { |
672 | err = -EIDRM; | 692 | err = -EIDRM; |
@@ -686,7 +706,6 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
686 | err = -EIDRM; | 706 | err = -EIDRM; |
687 | goto out_unlock0; | 707 | goto out_unlock0; |
688 | } | 708 | } |
689 | |||
690 | ss_del(&s); | 709 | ss_del(&s); |
691 | 710 | ||
692 | if (signal_pending(current)) { | 711 | if (signal_pending(current)) { |
@@ -695,10 +714,11 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
695 | } | 714 | } |
696 | 715 | ||
697 | } | 716 | } |
717 | |||
698 | msq->q_lspid = task_tgid_vnr(current); | 718 | msq->q_lspid = task_tgid_vnr(current); |
699 | msq->q_stime = get_seconds(); | 719 | msq->q_stime = get_seconds(); |
700 | 720 | ||
701 | if (!pipelined_send(msq, msg)) { | 721 | if (!pipelined_send(msq, msg, &wake_q)) { |
702 | /* no one is waiting for this message, enqueue it */ | 722 | /* no one is waiting for this message, enqueue it */ |
703 | list_add_tail(&msg->m_list, &msq->q_messages); | 723 | list_add_tail(&msg->m_list, &msq->q_messages); |
704 | msq->q_cbytes += msgsz; | 724 | msq->q_cbytes += msgsz; |
@@ -712,6 +732,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
712 | 732 | ||
713 | out_unlock0: | 733 | out_unlock0: |
714 | ipc_unlock_object(&msq->q_perm); | 734 | ipc_unlock_object(&msq->q_perm); |
735 | wake_up_q(&wake_q); | ||
715 | out_unlock1: | 736 | out_unlock1: |
716 | rcu_read_unlock(); | 737 | rcu_read_unlock(); |
717 | if (msg != NULL) | 738 | if (msg != NULL) |
@@ -829,6 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
829 | struct msg_queue *msq; | 850 | struct msg_queue *msq; |
830 | struct ipc_namespace *ns; | 851 | struct ipc_namespace *ns; |
831 | struct msg_msg *msg, *copy = NULL; | 852 | struct msg_msg *msg, *copy = NULL; |
853 | WAKE_Q(wake_q); | ||
832 | 854 | ||
833 | ns = current->nsproxy->ipc_ns; | 855 | ns = current->nsproxy->ipc_ns; |
834 | 856 | ||
@@ -893,7 +915,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
893 | msq->q_cbytes -= msg->m_ts; | 915 | msq->q_cbytes -= msg->m_ts; |
894 | atomic_sub(msg->m_ts, &ns->msg_bytes); | 916 | atomic_sub(msg->m_ts, &ns->msg_bytes); |
895 | atomic_dec(&ns->msg_hdrs); | 917 | atomic_dec(&ns->msg_hdrs); |
896 | ss_wakeup(&msq->q_senders, 0); | 918 | ss_wakeup(msq, &wake_q, false); |
897 | 919 | ||
898 | goto out_unlock0; | 920 | goto out_unlock0; |
899 | } | 921 | } |
@@ -919,71 +941,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
919 | rcu_read_unlock(); | 941 | rcu_read_unlock(); |
920 | schedule(); | 942 | schedule(); |
921 | 943 | ||
922 | /* Lockless receive, part 1: | 944 | /* |
923 | * Disable preemption. We don't hold a reference to the queue | 945 | * Lockless receive, part 1: |
924 | * and getting a reference would defeat the idea of a lockless | 946 | * We don't hold a reference to the queue and getting a |
925 | * operation, thus the code relies on rcu to guarantee the | 947 | * reference would defeat the idea of a lockless operation, |
926 | * existence of msq: | 948 | * thus the code relies on rcu to guarantee the existence of |
949 | * msq: | ||
927 | * Prior to destruction, expunge_all(-EIRDM) changes r_msg. | 950 | * Prior to destruction, expunge_all(-EIRDM) changes r_msg. |
928 | * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. | 951 | * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. |
929 | * rcu_read_lock() prevents preemption between reading r_msg | ||
930 | * and acquiring the q_perm.lock in ipc_lock_object(). | ||
931 | */ | 952 | */ |
932 | rcu_read_lock(); | 953 | rcu_read_lock(); |
933 | 954 | ||
934 | /* Lockless receive, part 2: | 955 | /* |
935 | * Wait until pipelined_send or expunge_all are outside of | 956 | * Lockless receive, part 2: |
936 | * wake_up_process(). There is a race with exit(), see | 957 | * The work in pipelined_send() and expunge_all(): |
937 | * ipc/mqueue.c for the details. The correct serialization | 958 | * - Set pointer to message |
938 | * ensures that a receiver cannot continue without the wakeup | 959 | * - Queue the receiver task for later wakeup |
939 | * being visibible _before_ setting r_msg: | 960 | * - Wake up the process after the lock is dropped. |
940 | * | ||
941 | * CPU 0 CPU 1 | ||
942 | * <loop receiver> | ||
943 | * smp_rmb(); (A) <-- pair -. <waker thread> | ||
944 | * <load ->r_msg> | msr->r_msg = NULL; | ||
945 | * | wake_up_process(); | ||
946 | * <continue> `------> smp_wmb(); (B) | ||
947 | * msr->r_msg = msg; | ||
948 | * | 961 | * |
949 | * Where (A) orders the message value read and where (B) orders | 962 | * Should the process wake up before this wakeup (due to a |
950 | * the write to the r_msg -- done in both pipelined_send and | 963 | * signal) it will either see the message and continue ... |
951 | * expunge_all. | ||
952 | */ | ||
953 | for (;;) { | ||
954 | /* | ||
955 | * Pairs with writer barrier in pipelined_send | ||
956 | * or expunge_all. | ||
957 | */ | ||
958 | smp_rmb(); /* barrier (A) */ | ||
959 | msg = (struct msg_msg *)msr_d.r_msg; | ||
960 | if (msg) | ||
961 | break; | ||
962 | |||
963 | /* | ||
964 | * The cpu_relax() call is a compiler barrier | ||
965 | * which forces everything in this loop to be | ||
966 | * re-loaded. | ||
967 | */ | ||
968 | cpu_relax(); | ||
969 | } | ||
970 | |||
971 | /* Lockless receive, part 3: | ||
972 | * If there is a message or an error then accept it without | ||
973 | * locking. | ||
974 | */ | 964 | */ |
965 | msg = READ_ONCE(msr_d.r_msg); | ||
975 | if (msg != ERR_PTR(-EAGAIN)) | 966 | if (msg != ERR_PTR(-EAGAIN)) |
976 | goto out_unlock1; | 967 | goto out_unlock1; |
977 | 968 | ||
978 | /* Lockless receive, part 3: | 969 | /* |
979 | * Acquire the queue spinlock. | 970 | * ... or see -EAGAIN, acquire the lock to check the message |
980 | */ | 971 | * again. |
972 | */ | ||
981 | ipc_lock_object(&msq->q_perm); | 973 | ipc_lock_object(&msq->q_perm); |
982 | 974 | ||
983 | /* Lockless receive, part 4: | 975 | msg = msr_d.r_msg; |
984 | * Repeat test after acquiring the spinlock. | ||
985 | */ | ||
986 | msg = (struct msg_msg *)msr_d.r_msg; | ||
987 | if (msg != ERR_PTR(-EAGAIN)) | 976 | if (msg != ERR_PTR(-EAGAIN)) |
988 | goto out_unlock0; | 977 | goto out_unlock0; |
989 | 978 | ||
@@ -998,6 +987,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
998 | 987 | ||
999 | out_unlock0: | 988 | out_unlock0: |
1000 | ipc_unlock_object(&msq->q_perm); | 989 | ipc_unlock_object(&msq->q_perm); |
990 | wake_up_q(&wake_q); | ||
1001 | out_unlock1: | 991 | out_unlock1: |
1002 | rcu_read_unlock(); | 992 | rcu_read_unlock(); |
1003 | if (IS_ERR(msg)) { | 993 | if (IS_ERR(msg)) { |
@@ -162,14 +162,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); | |||
162 | 162 | ||
163 | /* | 163 | /* |
164 | * Locking: | 164 | * Locking: |
165 | * a) global sem_lock() for read/write | ||
165 | * sem_undo.id_next, | 166 | * sem_undo.id_next, |
166 | * sem_array.complex_count, | 167 | * sem_array.complex_count, |
167 | * sem_array.pending{_alter,_cont}, | 168 | * sem_array.complex_mode |
168 | * sem_array.sem_undo: global sem_lock() for read/write | 169 | * sem_array.pending{_alter,_const}, |
169 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | 170 | * sem_array.sem_undo |
170 | * | 171 | * |
172 | * b) global or semaphore sem_lock() for read/write: | ||
171 | * sem_array.sem_base[i].pending_{const,alter}: | 173 | * sem_array.sem_base[i].pending_{const,alter}: |
172 | * global or semaphore sem_lock() for read/write | 174 | * sem_array.complex_mode (for read) |
175 | * | ||
176 | * c) special: | ||
177 | * sem_undo_list.list_proc: | ||
178 | * * undo_list->lock for write | ||
179 | * * rcu for read | ||
173 | */ | 180 | */ |
174 | 181 | ||
175 | #define sc_semmsl sem_ctls[0] | 182 | #define sc_semmsl sem_ctls[0] |
@@ -260,31 +267,62 @@ static void sem_rcu_free(struct rcu_head *head) | |||
260 | } | 267 | } |
261 | 268 | ||
262 | /* | 269 | /* |
263 | * Wait until all currently ongoing simple ops have completed. | 270 | * Enter the mode suitable for non-simple operations: |
264 | * Caller must own sem_perm.lock. | 271 | * Caller must own sem_perm.lock. |
265 | * New simple ops cannot start, because simple ops first check | ||
266 | * that sem_perm.lock is free. | ||
267 | * that a) sem_perm.lock is free and b) complex_count is 0. | ||
268 | */ | 272 | */ |
269 | static void sem_wait_array(struct sem_array *sma) | 273 | static void complexmode_enter(struct sem_array *sma) |
270 | { | 274 | { |
271 | int i; | 275 | int i; |
272 | struct sem *sem; | 276 | struct sem *sem; |
273 | 277 | ||
274 | if (sma->complex_count) { | 278 | if (sma->complex_mode) { |
275 | /* The thread that increased sma->complex_count waited on | 279 | /* We are already in complex_mode. Nothing to do */ |
276 | * all sem->lock locks. Thus we don't need to wait again. | ||
277 | */ | ||
278 | return; | 280 | return; |
279 | } | 281 | } |
280 | 282 | ||
283 | /* We need a full barrier after seting complex_mode: | ||
284 | * The write to complex_mode must be visible | ||
285 | * before we read the first sem->lock spinlock state. | ||
286 | */ | ||
287 | smp_store_mb(sma->complex_mode, true); | ||
288 | |||
281 | for (i = 0; i < sma->sem_nsems; i++) { | 289 | for (i = 0; i < sma->sem_nsems; i++) { |
282 | sem = sma->sem_base + i; | 290 | sem = sma->sem_base + i; |
283 | spin_unlock_wait(&sem->lock); | 291 | spin_unlock_wait(&sem->lock); |
284 | } | 292 | } |
293 | /* | ||
294 | * spin_unlock_wait() is not a memory barriers, it is only a | ||
295 | * control barrier. The code must pair with spin_unlock(&sem->lock), | ||
296 | * thus just the control barrier is insufficient. | ||
297 | * | ||
298 | * smp_rmb() is sufficient, as writes cannot pass the control barrier. | ||
299 | */ | ||
300 | smp_rmb(); | ||
285 | } | 301 | } |
286 | 302 | ||
287 | /* | 303 | /* |
304 | * Try to leave the mode that disallows simple operations: | ||
305 | * Caller must own sem_perm.lock. | ||
306 | */ | ||
307 | static void complexmode_tryleave(struct sem_array *sma) | ||
308 | { | ||
309 | if (sma->complex_count) { | ||
310 | /* Complex ops are sleeping. | ||
311 | * We must stay in complex mode | ||
312 | */ | ||
313 | return; | ||
314 | } | ||
315 | /* | ||
316 | * Immediately after setting complex_mode to false, | ||
317 | * a simple op can start. Thus: all memory writes | ||
318 | * performed by the current operation must be visible | ||
319 | * before we set complex_mode to false. | ||
320 | */ | ||
321 | smp_store_release(&sma->complex_mode, false); | ||
322 | } | ||
323 | |||
324 | #define SEM_GLOBAL_LOCK (-1) | ||
325 | /* | ||
288 | * If the request contains only one semaphore operation, and there are | 326 | * If the request contains only one semaphore operation, and there are |
289 | * no complex transactions pending, lock only the semaphore involved. | 327 | * no complex transactions pending, lock only the semaphore involved. |
290 | * Otherwise, lock the entire semaphore array, since we either have | 328 | * Otherwise, lock the entire semaphore array, since we either have |
@@ -300,56 +338,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
300 | /* Complex operation - acquire a full lock */ | 338 | /* Complex operation - acquire a full lock */ |
301 | ipc_lock_object(&sma->sem_perm); | 339 | ipc_lock_object(&sma->sem_perm); |
302 | 340 | ||
303 | /* And wait until all simple ops that are processed | 341 | /* Prevent parallel simple ops */ |
304 | * right now have dropped their locks. | 342 | complexmode_enter(sma); |
305 | */ | 343 | return SEM_GLOBAL_LOCK; |
306 | sem_wait_array(sma); | ||
307 | return -1; | ||
308 | } | 344 | } |
309 | 345 | ||
310 | /* | 346 | /* |
311 | * Only one semaphore affected - try to optimize locking. | 347 | * Only one semaphore affected - try to optimize locking. |
312 | * The rules are: | 348 | * Optimized locking is possible if no complex operation |
313 | * - optimized locking is possible if no complex operation | 349 | * is either enqueued or processed right now. |
314 | * is either enqueued or processed right now. | 350 | * |
315 | * - The test for enqueued complex ops is simple: | 351 | * Both facts are tracked by complex_mode. |
316 | * sma->complex_count != 0 | ||
317 | * - Testing for complex ops that are processed right now is | ||
318 | * a bit more difficult. Complex ops acquire the full lock | ||
319 | * and first wait that the running simple ops have completed. | ||
320 | * (see above) | ||
321 | * Thus: If we own a simple lock and the global lock is free | ||
322 | * and complex_count is now 0, then it will stay 0 and | ||
323 | * thus just locking sem->lock is sufficient. | ||
324 | */ | 352 | */ |
325 | sem = sma->sem_base + sops->sem_num; | 353 | sem = sma->sem_base + sops->sem_num; |
326 | 354 | ||
327 | if (sma->complex_count == 0) { | 355 | /* |
356 | * Initial check for complex_mode. Just an optimization, | ||
357 | * no locking, no memory barrier. | ||
358 | */ | ||
359 | if (!sma->complex_mode) { | ||
328 | /* | 360 | /* |
329 | * It appears that no complex operation is around. | 361 | * It appears that no complex operation is around. |
330 | * Acquire the per-semaphore lock. | 362 | * Acquire the per-semaphore lock. |
331 | */ | 363 | */ |
332 | spin_lock(&sem->lock); | 364 | spin_lock(&sem->lock); |
333 | 365 | ||
334 | /* Then check that the global lock is free */ | 366 | /* |
335 | if (!spin_is_locked(&sma->sem_perm.lock)) { | 367 | * See 51d7d5205d33 |
336 | /* | 368 | * ("powerpc: Add smp_mb() to arch_spin_is_locked()"): |
337 | * We need a memory barrier with acquire semantics, | 369 | * A full barrier is required: the write of sem->lock |
338 | * otherwise we can race with another thread that does: | 370 | * must be visible before the read is executed |
339 | * complex_count++; | 371 | */ |
340 | * spin_unlock(sem_perm.lock); | 372 | smp_mb(); |
341 | */ | ||
342 | smp_acquire__after_ctrl_dep(); | ||
343 | 373 | ||
344 | /* | 374 | if (!smp_load_acquire(&sma->complex_mode)) { |
345 | * Now repeat the test of complex_count: | 375 | /* fast path successful! */ |
346 | * It can't change anymore until we drop sem->lock. | 376 | return sops->sem_num; |
347 | * Thus: if is now 0, then it will stay 0. | ||
348 | */ | ||
349 | if (sma->complex_count == 0) { | ||
350 | /* fast path successful! */ | ||
351 | return sops->sem_num; | ||
352 | } | ||
353 | } | 377 | } |
354 | spin_unlock(&sem->lock); | 378 | spin_unlock(&sem->lock); |
355 | } | 379 | } |
@@ -369,15 +393,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
369 | /* Not a false alarm, thus complete the sequence for a | 393 | /* Not a false alarm, thus complete the sequence for a |
370 | * full lock. | 394 | * full lock. |
371 | */ | 395 | */ |
372 | sem_wait_array(sma); | 396 | complexmode_enter(sma); |
373 | return -1; | 397 | return SEM_GLOBAL_LOCK; |
374 | } | 398 | } |
375 | } | 399 | } |
376 | 400 | ||
377 | static inline void sem_unlock(struct sem_array *sma, int locknum) | 401 | static inline void sem_unlock(struct sem_array *sma, int locknum) |
378 | { | 402 | { |
379 | if (locknum == -1) { | 403 | if (locknum == SEM_GLOBAL_LOCK) { |
380 | unmerge_queues(sma); | 404 | unmerge_queues(sma); |
405 | complexmode_tryleave(sma); | ||
381 | ipc_unlock_object(&sma->sem_perm); | 406 | ipc_unlock_object(&sma->sem_perm); |
382 | } else { | 407 | } else { |
383 | struct sem *sem = sma->sem_base + locknum; | 408 | struct sem *sem = sma->sem_base + locknum; |
@@ -529,6 +554,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
529 | } | 554 | } |
530 | 555 | ||
531 | sma->complex_count = 0; | 556 | sma->complex_count = 0; |
557 | sma->complex_mode = true; /* dropped by sem_unlock below */ | ||
532 | INIT_LIST_HEAD(&sma->pending_alter); | 558 | INIT_LIST_HEAD(&sma->pending_alter); |
533 | INIT_LIST_HEAD(&sma->pending_const); | 559 | INIT_LIST_HEAD(&sma->pending_const); |
534 | INIT_LIST_HEAD(&sma->list_id); | 560 | INIT_LIST_HEAD(&sma->list_id); |
@@ -2079,6 +2105,8 @@ void exit_sem(struct task_struct *tsk) | |||
2079 | struct list_head tasks; | 2105 | struct list_head tasks; |
2080 | int semid, i; | 2106 | int semid, i; |
2081 | 2107 | ||
2108 | cond_resched(); | ||
2109 | |||
2082 | rcu_read_lock(); | 2110 | rcu_read_lock(); |
2083 | un = list_entry_rcu(ulp->list_proc.next, | 2111 | un = list_entry_rcu(ulp->list_proc.next, |
2084 | struct sem_undo, list_proc); | 2112 | struct sem_undo, list_proc); |
@@ -2184,10 +2212,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
2184 | /* | 2212 | /* |
2185 | * The proc interface isn't aware of sem_lock(), it calls | 2213 | * The proc interface isn't aware of sem_lock(), it calls |
2186 | * ipc_lock_object() directly (in sysvipc_find_ipc). | 2214 | * ipc_lock_object() directly (in sysvipc_find_ipc). |
2187 | * In order to stay compatible with sem_lock(), we must wait until | 2215 | * In order to stay compatible with sem_lock(), we must |
2188 | * all simple semop() calls have left their critical regions. | 2216 | * enter / leave complex_mode. |
2189 | */ | 2217 | */ |
2190 | sem_wait_array(sma); | 2218 | complexmode_enter(sma); |
2191 | 2219 | ||
2192 | sem_otime = get_semotime(sma); | 2220 | sem_otime = get_semotime(sma); |
2193 | 2221 | ||
@@ -2204,6 +2232,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
2204 | sem_otime, | 2232 | sem_otime, |
2205 | sma->sem_ctime); | 2233 | sma->sem_ctime); |
2206 | 2234 | ||
2235 | complexmode_tryleave(sma); | ||
2236 | |||
2207 | return 0; | 2237 | return 0; |
2208 | } | 2238 | } |
2209 | #endif | 2239 | #endif |
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config index 9f748ed7bea8..1a8f34f63601 100644 --- a/kernel/configs/android-base.config +++ b/kernel/configs/android-base.config | |||
@@ -11,7 +11,6 @@ CONFIG_ANDROID_LOW_MEMORY_KILLER=y | |||
11 | CONFIG_ARMV8_DEPRECATED=y | 11 | CONFIG_ARMV8_DEPRECATED=y |
12 | CONFIG_ASHMEM=y | 12 | CONFIG_ASHMEM=y |
13 | CONFIG_AUDIT=y | 13 | CONFIG_AUDIT=y |
14 | CONFIG_BLK_DEV_DM=y | ||
15 | CONFIG_BLK_DEV_INITRD=y | 14 | CONFIG_BLK_DEV_INITRD=y |
16 | CONFIG_CGROUPS=y | 15 | CONFIG_CGROUPS=y |
17 | CONFIG_CGROUP_CPUACCT=y | 16 | CONFIG_CGROUP_CPUACCT=y |
@@ -19,9 +18,7 @@ CONFIG_CGROUP_DEBUG=y | |||
19 | CONFIG_CGROUP_FREEZER=y | 18 | CONFIG_CGROUP_FREEZER=y |
20 | CONFIG_CGROUP_SCHED=y | 19 | CONFIG_CGROUP_SCHED=y |
21 | CONFIG_CP15_BARRIER_EMULATION=y | 20 | CONFIG_CP15_BARRIER_EMULATION=y |
22 | CONFIG_DM_CRYPT=y | 21 | CONFIG_DEFAULT_SECURITY_SELINUX=y |
23 | CONFIG_DM_VERITY=y | ||
24 | CONFIG_DM_VERITY_FEC=y | ||
25 | CONFIG_EMBEDDED=y | 22 | CONFIG_EMBEDDED=y |
26 | CONFIG_FB=y | 23 | CONFIG_FB=y |
27 | CONFIG_HIGH_RES_TIMERS=y | 24 | CONFIG_HIGH_RES_TIMERS=y |
@@ -41,7 +38,6 @@ CONFIG_IPV6=y | |||
41 | CONFIG_IPV6_MIP6=y | 38 | CONFIG_IPV6_MIP6=y |
42 | CONFIG_IPV6_MULTIPLE_TABLES=y | 39 | CONFIG_IPV6_MULTIPLE_TABLES=y |
43 | CONFIG_IPV6_OPTIMISTIC_DAD=y | 40 | CONFIG_IPV6_OPTIMISTIC_DAD=y |
44 | CONFIG_IPV6_PRIVACY=y | ||
45 | CONFIG_IPV6_ROUTER_PREF=y | 41 | CONFIG_IPV6_ROUTER_PREF=y |
46 | CONFIG_IPV6_ROUTE_INFO=y | 42 | CONFIG_IPV6_ROUTE_INFO=y |
47 | CONFIG_IP_ADVANCED_ROUTER=y | 43 | CONFIG_IP_ADVANCED_ROUTER=y |
@@ -135,6 +131,7 @@ CONFIG_PREEMPT=y | |||
135 | CONFIG_QUOTA=y | 131 | CONFIG_QUOTA=y |
136 | CONFIG_RTC_CLASS=y | 132 | CONFIG_RTC_CLASS=y |
137 | CONFIG_RT_GROUP_SCHED=y | 133 | CONFIG_RT_GROUP_SCHED=y |
134 | CONFIG_SECCOMP=y | ||
138 | CONFIG_SECURITY=y | 135 | CONFIG_SECURITY=y |
139 | CONFIG_SECURITY_NETWORK=y | 136 | CONFIG_SECURITY_NETWORK=y |
140 | CONFIG_SECURITY_SELINUX=y | 137 | CONFIG_SECURITY_SELINUX=y |
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config index e3b953e966d2..297756be369c 100644 --- a/kernel/configs/android-recommended.config +++ b/kernel/configs/android-recommended.config | |||
@@ -6,12 +6,16 @@ | |||
6 | # CONFIG_PM_WAKELOCKS_GC is not set | 6 | # CONFIG_PM_WAKELOCKS_GC is not set |
7 | # CONFIG_VT is not set | 7 | # CONFIG_VT is not set |
8 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 8 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
9 | CONFIG_BLK_DEV_DM=y | ||
9 | CONFIG_BLK_DEV_LOOP=y | 10 | CONFIG_BLK_DEV_LOOP=y |
10 | CONFIG_BLK_DEV_RAM=y | 11 | CONFIG_BLK_DEV_RAM=y |
11 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 12 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
12 | CONFIG_COMPACTION=y | 13 | CONFIG_COMPACTION=y |
13 | CONFIG_DEBUG_RODATA=y | 14 | CONFIG_DEBUG_RODATA=y |
15 | CONFIG_DM_CRYPT=y | ||
14 | CONFIG_DM_UEVENT=y | 16 | CONFIG_DM_UEVENT=y |
17 | CONFIG_DM_VERITY=y | ||
18 | CONFIG_DM_VERITY_FEC=y | ||
15 | CONFIG_DRAGONRISE_FF=y | 19 | CONFIG_DRAGONRISE_FF=y |
16 | CONFIG_ENABLE_DEFAULT_TRACERS=y | 20 | CONFIG_ENABLE_DEFAULT_TRACERS=y |
17 | CONFIG_EXT4_FS=y | 21 | CONFIG_EXT4_FS=y |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 432c3d71d195..2b59c82cc3e1 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -98,26 +98,26 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) | |||
98 | 98 | ||
99 | trace_sched_process_hang(t); | 99 | trace_sched_process_hang(t); |
100 | 100 | ||
101 | if (!sysctl_hung_task_warnings) | 101 | if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | if (sysctl_hung_task_warnings > 0) | ||
105 | sysctl_hung_task_warnings--; | ||
106 | |||
107 | /* | 104 | /* |
108 | * Ok, the task did not get scheduled for more than 2 minutes, | 105 | * Ok, the task did not get scheduled for more than 2 minutes, |
109 | * complain: | 106 | * complain: |
110 | */ | 107 | */ |
111 | pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", | 108 | if (sysctl_hung_task_warnings) { |
112 | t->comm, t->pid, timeout); | 109 | sysctl_hung_task_warnings--; |
113 | pr_err(" %s %s %.*s\n", | 110 | pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", |
114 | print_tainted(), init_utsname()->release, | 111 | t->comm, t->pid, timeout); |
115 | (int)strcspn(init_utsname()->version, " "), | 112 | pr_err(" %s %s %.*s\n", |
116 | init_utsname()->version); | 113 | print_tainted(), init_utsname()->release, |
117 | pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" | 114 | (int)strcspn(init_utsname()->version, " "), |
118 | " disables this message.\n"); | 115 | init_utsname()->version); |
119 | sched_show_task(t); | 116 | pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" |
120 | debug_show_all_locks(); | 117 | " disables this message.\n"); |
118 | sched_show_task(t); | ||
119 | debug_show_all_locks(); | ||
120 | } | ||
121 | 121 | ||
122 | touch_nmi_watchdog(); | 122 | touch_nmi_watchdog(); |
123 | 123 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d10ab6b9b5e0..d63095472ea9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
50 | #include <linux/jump_label.h> | 50 | #include <linux/jump_label.h> |
51 | 51 | ||
52 | #include <asm-generic/sections.h> | 52 | #include <asm/sections.h> |
53 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
54 | #include <asm/errno.h> | 54 | #include <asm/errno.h> |
55 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 4ab4c3766a80..be2cc1f9dd57 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -138,7 +138,7 @@ void *kthread_data(struct task_struct *task) | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /** | 140 | /** |
141 | * probe_kthread_data - speculative version of kthread_data() | 141 | * kthread_probe_data - speculative version of kthread_data() |
142 | * @task: possible kthread task in question | 142 | * @task: possible kthread task in question |
143 | * | 143 | * |
144 | * @task could be a kthread task. Return the data value specified when it | 144 | * @task could be a kthread task. Return the data value specified when it |
@@ -146,7 +146,7 @@ void *kthread_data(struct task_struct *task) | |||
146 | * inaccessible for any reason, %NULL is returned. This function requires | 146 | * inaccessible for any reason, %NULL is returned. This function requires |
147 | * that @task itself is safe to dereference. | 147 | * that @task itself is safe to dereference. |
148 | */ | 148 | */ |
149 | void *probe_kthread_data(struct task_struct *task) | 149 | void *kthread_probe_data(struct task_struct *task) |
150 | { | 150 | { |
151 | struct kthread *kthread = to_kthread(task); | 151 | struct kthread *kthread = to_kthread(task); |
152 | void *data = NULL; | 152 | void *data = NULL; |
@@ -244,33 +244,10 @@ static void create_kthread(struct kthread_create_info *create) | |||
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | /** | 247 | static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), |
248 | * kthread_create_on_node - create a kthread. | 248 | void *data, int node, |
249 | * @threadfn: the function to run until signal_pending(current). | 249 | const char namefmt[], |
250 | * @data: data ptr for @threadfn. | 250 | va_list args) |
251 | * @node: task and thread structures for the thread are allocated on this node | ||
252 | * @namefmt: printf-style name for the thread. | ||
253 | * | ||
254 | * Description: This helper function creates and names a kernel | ||
255 | * thread. The thread will be stopped: use wake_up_process() to start | ||
256 | * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and | ||
257 | * is affine to all CPUs. | ||
258 | * | ||
259 | * If thread is going to be bound on a particular cpu, give its node | ||
260 | * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. | ||
261 | * When woken, the thread will run @threadfn() with @data as its | ||
262 | * argument. @threadfn() can either call do_exit() directly if it is a | ||
263 | * standalone thread for which no one will call kthread_stop(), or | ||
264 | * return when 'kthread_should_stop()' is true (which means | ||
265 | * kthread_stop() has been called). The return value should be zero | ||
266 | * or a negative error number; it will be passed to kthread_stop(). | ||
267 | * | ||
268 | * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). | ||
269 | */ | ||
270 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | ||
271 | void *data, int node, | ||
272 | const char namefmt[], | ||
273 | ...) | ||
274 | { | 251 | { |
275 | DECLARE_COMPLETION_ONSTACK(done); | 252 | DECLARE_COMPLETION_ONSTACK(done); |
276 | struct task_struct *task; | 253 | struct task_struct *task; |
@@ -311,11 +288,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
311 | task = create->result; | 288 | task = create->result; |
312 | if (!IS_ERR(task)) { | 289 | if (!IS_ERR(task)) { |
313 | static const struct sched_param param = { .sched_priority = 0 }; | 290 | static const struct sched_param param = { .sched_priority = 0 }; |
314 | va_list args; | ||
315 | 291 | ||
316 | va_start(args, namefmt); | ||
317 | vsnprintf(task->comm, sizeof(task->comm), namefmt, args); | 292 | vsnprintf(task->comm, sizeof(task->comm), namefmt, args); |
318 | va_end(args); | ||
319 | /* | 293 | /* |
320 | * root may have changed our (kthreadd's) priority or CPU mask. | 294 | * root may have changed our (kthreadd's) priority or CPU mask. |
321 | * The kernel thread should not inherit these properties. | 295 | * The kernel thread should not inherit these properties. |
@@ -326,6 +300,44 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
326 | kfree(create); | 300 | kfree(create); |
327 | return task; | 301 | return task; |
328 | } | 302 | } |
303 | |||
304 | /** | ||
305 | * kthread_create_on_node - create a kthread. | ||
306 | * @threadfn: the function to run until signal_pending(current). | ||
307 | * @data: data ptr for @threadfn. | ||
308 | * @node: task and thread structures for the thread are allocated on this node | ||
309 | * @namefmt: printf-style name for the thread. | ||
310 | * | ||
311 | * Description: This helper function creates and names a kernel | ||
312 | * thread. The thread will be stopped: use wake_up_process() to start | ||
313 | * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and | ||
314 | * is affine to all CPUs. | ||
315 | * | ||
316 | * If thread is going to be bound on a particular cpu, give its node | ||
317 | * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. | ||
318 | * When woken, the thread will run @threadfn() with @data as its | ||
319 | * argument. @threadfn() can either call do_exit() directly if it is a | ||
320 | * standalone thread for which no one will call kthread_stop(), or | ||
321 | * return when 'kthread_should_stop()' is true (which means | ||
322 | * kthread_stop() has been called). The return value should be zero | ||
323 | * or a negative error number; it will be passed to kthread_stop(). | ||
324 | * | ||
325 | * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). | ||
326 | */ | ||
327 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | ||
328 | void *data, int node, | ||
329 | const char namefmt[], | ||
330 | ...) | ||
331 | { | ||
332 | struct task_struct *task; | ||
333 | va_list args; | ||
334 | |||
335 | va_start(args, namefmt); | ||
336 | task = __kthread_create_on_node(threadfn, data, node, namefmt, args); | ||
337 | va_end(args); | ||
338 | |||
339 | return task; | ||
340 | } | ||
329 | EXPORT_SYMBOL(kthread_create_on_node); | 341 | EXPORT_SYMBOL(kthread_create_on_node); |
330 | 342 | ||
331 | static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) | 343 | static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) |
@@ -390,10 +402,10 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
390 | cpu); | 402 | cpu); |
391 | if (IS_ERR(p)) | 403 | if (IS_ERR(p)) |
392 | return p; | 404 | return p; |
405 | kthread_bind(p, cpu); | ||
406 | /* CPU hotplug need to bind once again when unparking the thread. */ | ||
393 | set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); | 407 | set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); |
394 | to_kthread(p)->cpu = cpu; | 408 | to_kthread(p)->cpu = cpu; |
395 | /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */ | ||
396 | kthread_park(p); | ||
397 | return p; | 409 | return p; |
398 | } | 410 | } |
399 | 411 | ||
@@ -407,6 +419,10 @@ static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) | |||
407 | * which might be about to be cleared. | 419 | * which might be about to be cleared. |
408 | */ | 420 | */ |
409 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | 421 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { |
422 | /* | ||
423 | * Newly created kthread was parked when the CPU was offline. | ||
424 | * The binding was lost and we need to set it again. | ||
425 | */ | ||
410 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | 426 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) |
411 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | 427 | __kthread_bind(k, kthread->cpu, TASK_PARKED); |
412 | wake_up_state(k, TASK_PARKED); | 428 | wake_up_state(k, TASK_PARKED); |
@@ -540,39 +556,48 @@ int kthreadd(void *unused) | |||
540 | return 0; | 556 | return 0; |
541 | } | 557 | } |
542 | 558 | ||
543 | void __init_kthread_worker(struct kthread_worker *worker, | 559 | void __kthread_init_worker(struct kthread_worker *worker, |
544 | const char *name, | 560 | const char *name, |
545 | struct lock_class_key *key) | 561 | struct lock_class_key *key) |
546 | { | 562 | { |
563 | memset(worker, 0, sizeof(struct kthread_worker)); | ||
547 | spin_lock_init(&worker->lock); | 564 | spin_lock_init(&worker->lock); |
548 | lockdep_set_class_and_name(&worker->lock, key, name); | 565 | lockdep_set_class_and_name(&worker->lock, key, name); |
549 | INIT_LIST_HEAD(&worker->work_list); | 566 | INIT_LIST_HEAD(&worker->work_list); |
550 | worker->task = NULL; | 567 | INIT_LIST_HEAD(&worker->delayed_work_list); |
551 | } | 568 | } |
552 | EXPORT_SYMBOL_GPL(__init_kthread_worker); | 569 | EXPORT_SYMBOL_GPL(__kthread_init_worker); |
553 | 570 | ||
554 | /** | 571 | /** |
555 | * kthread_worker_fn - kthread function to process kthread_worker | 572 | * kthread_worker_fn - kthread function to process kthread_worker |
556 | * @worker_ptr: pointer to initialized kthread_worker | 573 | * @worker_ptr: pointer to initialized kthread_worker |
557 | * | 574 | * |
558 | * This function can be used as @threadfn to kthread_create() or | 575 | * This function implements the main cycle of kthread worker. It processes |
559 | * kthread_run() with @worker_ptr argument pointing to an initialized | 576 | * work_list until it is stopped with kthread_stop(). It sleeps when the queue |
560 | * kthread_worker. The started kthread will process work_list until | 577 | * is empty. |
561 | * the it is stopped with kthread_stop(). A kthread can also call | 578 | * |
562 | * this function directly after extra initialization. | 579 | * The works are not allowed to keep any locks, disable preemption or interrupts |
580 | * when they finish. There is defined a safe point for freezing when one work | ||
581 | * finishes and before a new one is started. | ||
563 | * | 582 | * |
564 | * Different kthreads can be used for the same kthread_worker as long | 583 | * Also the works must not be handled by more than one worker at the same time, |
565 | * as there's only one kthread attached to it at any given time. A | 584 | * see also kthread_queue_work(). |
566 | * kthread_worker without an attached kthread simply collects queued | ||
567 | * kthread_works. | ||
568 | */ | 585 | */ |
569 | int kthread_worker_fn(void *worker_ptr) | 586 | int kthread_worker_fn(void *worker_ptr) |
570 | { | 587 | { |
571 | struct kthread_worker *worker = worker_ptr; | 588 | struct kthread_worker *worker = worker_ptr; |
572 | struct kthread_work *work; | 589 | struct kthread_work *work; |
573 | 590 | ||
574 | WARN_ON(worker->task); | 591 | /* |
592 | * FIXME: Update the check and remove the assignment when all kthread | ||
593 | * worker users are created using kthread_create_worker*() functions. | ||
594 | */ | ||
595 | WARN_ON(worker->task && worker->task != current); | ||
575 | worker->task = current; | 596 | worker->task = current; |
597 | |||
598 | if (worker->flags & KTW_FREEZABLE) | ||
599 | set_freezable(); | ||
600 | |||
576 | repeat: | 601 | repeat: |
577 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ | 602 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ |
578 | 603 | ||
@@ -605,12 +630,131 @@ repeat: | |||
605 | } | 630 | } |
606 | EXPORT_SYMBOL_GPL(kthread_worker_fn); | 631 | EXPORT_SYMBOL_GPL(kthread_worker_fn); |
607 | 632 | ||
608 | /* insert @work before @pos in @worker */ | 633 | static struct kthread_worker * |
609 | static void insert_kthread_work(struct kthread_worker *worker, | 634 | __kthread_create_worker(int cpu, unsigned int flags, |
610 | struct kthread_work *work, | 635 | const char namefmt[], va_list args) |
611 | struct list_head *pos) | 636 | { |
637 | struct kthread_worker *worker; | ||
638 | struct task_struct *task; | ||
639 | |||
640 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); | ||
641 | if (!worker) | ||
642 | return ERR_PTR(-ENOMEM); | ||
643 | |||
644 | kthread_init_worker(worker); | ||
645 | |||
646 | if (cpu >= 0) { | ||
647 | char name[TASK_COMM_LEN]; | ||
648 | |||
649 | /* | ||
650 | * kthread_create_worker_on_cpu() allows to pass a generic | ||
651 | * namefmt in compare with kthread_create_on_cpu. We need | ||
652 | * to format it here. | ||
653 | */ | ||
654 | vsnprintf(name, sizeof(name), namefmt, args); | ||
655 | task = kthread_create_on_cpu(kthread_worker_fn, worker, | ||
656 | cpu, name); | ||
657 | } else { | ||
658 | task = __kthread_create_on_node(kthread_worker_fn, worker, | ||
659 | -1, namefmt, args); | ||
660 | } | ||
661 | |||
662 | if (IS_ERR(task)) | ||
663 | goto fail_task; | ||
664 | |||
665 | worker->flags = flags; | ||
666 | worker->task = task; | ||
667 | wake_up_process(task); | ||
668 | return worker; | ||
669 | |||
670 | fail_task: | ||
671 | kfree(worker); | ||
672 | return ERR_CAST(task); | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * kthread_create_worker - create a kthread worker | ||
677 | * @flags: flags modifying the default behavior of the worker | ||
678 | * @namefmt: printf-style name for the kthread worker (task). | ||
679 | * | ||
680 | * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) | ||
681 | * when the needed structures could not get allocated, and ERR_PTR(-EINTR) | ||
682 | * when the worker was SIGKILLed. | ||
683 | */ | ||
684 | struct kthread_worker * | ||
685 | kthread_create_worker(unsigned int flags, const char namefmt[], ...) | ||
686 | { | ||
687 | struct kthread_worker *worker; | ||
688 | va_list args; | ||
689 | |||
690 | va_start(args, namefmt); | ||
691 | worker = __kthread_create_worker(-1, flags, namefmt, args); | ||
692 | va_end(args); | ||
693 | |||
694 | return worker; | ||
695 | } | ||
696 | EXPORT_SYMBOL(kthread_create_worker); | ||
697 | |||
698 | /** | ||
699 | * kthread_create_worker_on_cpu - create a kthread worker and bind it | ||
700 | * it to a given CPU and the associated NUMA node. | ||
701 | * @cpu: CPU number | ||
702 | * @flags: flags modifying the default behavior of the worker | ||
703 | * @namefmt: printf-style name for the kthread worker (task). | ||
704 | * | ||
705 | * Use a valid CPU number if you want to bind the kthread worker | ||
706 | * to the given CPU and the associated NUMA node. | ||
707 | * | ||
708 | * A good practice is to add the cpu number also into the worker name. | ||
709 | * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). | ||
710 | * | ||
711 | * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) | ||
712 | * when the needed structures could not get allocated, and ERR_PTR(-EINTR) | ||
713 | * when the worker was SIGKILLed. | ||
714 | */ | ||
715 | struct kthread_worker * | ||
716 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, | ||
717 | const char namefmt[], ...) | ||
718 | { | ||
719 | struct kthread_worker *worker; | ||
720 | va_list args; | ||
721 | |||
722 | va_start(args, namefmt); | ||
723 | worker = __kthread_create_worker(cpu, flags, namefmt, args); | ||
724 | va_end(args); | ||
725 | |||
726 | return worker; | ||
727 | } | ||
728 | EXPORT_SYMBOL(kthread_create_worker_on_cpu); | ||
729 | |||
730 | /* | ||
731 | * Returns true when the work could not be queued at the moment. | ||
732 | * It happens when it is already pending in a worker list | ||
733 | * or when it is being cancelled. | ||
734 | */ | ||
735 | static inline bool queuing_blocked(struct kthread_worker *worker, | ||
736 | struct kthread_work *work) | ||
737 | { | ||
738 | lockdep_assert_held(&worker->lock); | ||
739 | |||
740 | return !list_empty(&work->node) || work->canceling; | ||
741 | } | ||
742 | |||
743 | static void kthread_insert_work_sanity_check(struct kthread_worker *worker, | ||
744 | struct kthread_work *work) | ||
612 | { | 745 | { |
613 | lockdep_assert_held(&worker->lock); | 746 | lockdep_assert_held(&worker->lock); |
747 | WARN_ON_ONCE(!list_empty(&work->node)); | ||
748 | /* Do not use a work with >1 worker, see kthread_queue_work() */ | ||
749 | WARN_ON_ONCE(work->worker && work->worker != worker); | ||
750 | } | ||
751 | |||
752 | /* insert @work before @pos in @worker */ | ||
753 | static void kthread_insert_work(struct kthread_worker *worker, | ||
754 | struct kthread_work *work, | ||
755 | struct list_head *pos) | ||
756 | { | ||
757 | kthread_insert_work_sanity_check(worker, work); | ||
614 | 758 | ||
615 | list_add_tail(&work->node, pos); | 759 | list_add_tail(&work->node, pos); |
616 | work->worker = worker; | 760 | work->worker = worker; |
@@ -619,29 +763,133 @@ static void insert_kthread_work(struct kthread_worker *worker, | |||
619 | } | 763 | } |
620 | 764 | ||
621 | /** | 765 | /** |
622 | * queue_kthread_work - queue a kthread_work | 766 | * kthread_queue_work - queue a kthread_work |
623 | * @worker: target kthread_worker | 767 | * @worker: target kthread_worker |
624 | * @work: kthread_work to queue | 768 | * @work: kthread_work to queue |
625 | * | 769 | * |
626 | * Queue @work to work processor @task for async execution. @task | 770 | * Queue @work to work processor @task for async execution. @task |
627 | * must have been created with kthread_worker_create(). Returns %true | 771 | * must have been created with kthread_worker_create(). Returns %true |
628 | * if @work was successfully queued, %false if it was already pending. | 772 | * if @work was successfully queued, %false if it was already pending. |
773 | * | ||
774 | * Reinitialize the work if it needs to be used by another worker. | ||
775 | * For example, when the worker was stopped and started again. | ||
629 | */ | 776 | */ |
630 | bool queue_kthread_work(struct kthread_worker *worker, | 777 | bool kthread_queue_work(struct kthread_worker *worker, |
631 | struct kthread_work *work) | 778 | struct kthread_work *work) |
632 | { | 779 | { |
633 | bool ret = false; | 780 | bool ret = false; |
634 | unsigned long flags; | 781 | unsigned long flags; |
635 | 782 | ||
636 | spin_lock_irqsave(&worker->lock, flags); | 783 | spin_lock_irqsave(&worker->lock, flags); |
637 | if (list_empty(&work->node)) { | 784 | if (!queuing_blocked(worker, work)) { |
638 | insert_kthread_work(worker, work, &worker->work_list); | 785 | kthread_insert_work(worker, work, &worker->work_list); |
786 | ret = true; | ||
787 | } | ||
788 | spin_unlock_irqrestore(&worker->lock, flags); | ||
789 | return ret; | ||
790 | } | ||
791 | EXPORT_SYMBOL_GPL(kthread_queue_work); | ||
792 | |||
793 | /** | ||
794 | * kthread_delayed_work_timer_fn - callback that queues the associated kthread | ||
795 | * delayed work when the timer expires. | ||
796 | * @__data: pointer to the data associated with the timer | ||
797 | * | ||
798 | * The format of the function is defined by struct timer_list. | ||
799 | * It should have been called from irqsafe timer with irq already off. | ||
800 | */ | ||
801 | void kthread_delayed_work_timer_fn(unsigned long __data) | ||
802 | { | ||
803 | struct kthread_delayed_work *dwork = | ||
804 | (struct kthread_delayed_work *)__data; | ||
805 | struct kthread_work *work = &dwork->work; | ||
806 | struct kthread_worker *worker = work->worker; | ||
807 | |||
808 | /* | ||
809 | * This might happen when a pending work is reinitialized. | ||
810 | * It means that it is used a wrong way. | ||
811 | */ | ||
812 | if (WARN_ON_ONCE(!worker)) | ||
813 | return; | ||
814 | |||
815 | spin_lock(&worker->lock); | ||
816 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ | ||
817 | WARN_ON_ONCE(work->worker != worker); | ||
818 | |||
819 | /* Move the work from worker->delayed_work_list. */ | ||
820 | WARN_ON_ONCE(list_empty(&work->node)); | ||
821 | list_del_init(&work->node); | ||
822 | kthread_insert_work(worker, work, &worker->work_list); | ||
823 | |||
824 | spin_unlock(&worker->lock); | ||
825 | } | ||
826 | EXPORT_SYMBOL(kthread_delayed_work_timer_fn); | ||
827 | |||
828 | void __kthread_queue_delayed_work(struct kthread_worker *worker, | ||
829 | struct kthread_delayed_work *dwork, | ||
830 | unsigned long delay) | ||
831 | { | ||
832 | struct timer_list *timer = &dwork->timer; | ||
833 | struct kthread_work *work = &dwork->work; | ||
834 | |||
835 | WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || | ||
836 | timer->data != (unsigned long)dwork); | ||
837 | |||
838 | /* | ||
839 | * If @delay is 0, queue @dwork->work immediately. This is for | ||
840 | * both optimization and correctness. The earliest @timer can | ||
841 | * expire is on the closest next tick and delayed_work users depend | ||
842 | * on that there's no such delay when @delay is 0. | ||
843 | */ | ||
844 | if (!delay) { | ||
845 | kthread_insert_work(worker, work, &worker->work_list); | ||
846 | return; | ||
847 | } | ||
848 | |||
849 | /* Be paranoid and try to detect possible races already now. */ | ||
850 | kthread_insert_work_sanity_check(worker, work); | ||
851 | |||
852 | list_add(&work->node, &worker->delayed_work_list); | ||
853 | work->worker = worker; | ||
854 | timer_stats_timer_set_start_info(&dwork->timer); | ||
855 | timer->expires = jiffies + delay; | ||
856 | add_timer(timer); | ||
857 | } | ||
858 | |||
859 | /** | ||
860 | * kthread_queue_delayed_work - queue the associated kthread work | ||
861 | * after a delay. | ||
862 | * @worker: target kthread_worker | ||
863 | * @dwork: kthread_delayed_work to queue | ||
864 | * @delay: number of jiffies to wait before queuing | ||
865 | * | ||
866 | * If the work has not been pending it starts a timer that will queue | ||
867 | * the work after the given @delay. If @delay is zero, it queues the | ||
868 | * work immediately. | ||
869 | * | ||
870 | * Return: %false if the @work has already been pending. It means that | ||
871 | * either the timer was running or the work was queued. It returns %true | ||
872 | * otherwise. | ||
873 | */ | ||
874 | bool kthread_queue_delayed_work(struct kthread_worker *worker, | ||
875 | struct kthread_delayed_work *dwork, | ||
876 | unsigned long delay) | ||
877 | { | ||
878 | struct kthread_work *work = &dwork->work; | ||
879 | unsigned long flags; | ||
880 | bool ret = false; | ||
881 | |||
882 | spin_lock_irqsave(&worker->lock, flags); | ||
883 | |||
884 | if (!queuing_blocked(worker, work)) { | ||
885 | __kthread_queue_delayed_work(worker, dwork, delay); | ||
639 | ret = true; | 886 | ret = true; |
640 | } | 887 | } |
888 | |||
641 | spin_unlock_irqrestore(&worker->lock, flags); | 889 | spin_unlock_irqrestore(&worker->lock, flags); |
642 | return ret; | 890 | return ret; |
643 | } | 891 | } |
644 | EXPORT_SYMBOL_GPL(queue_kthread_work); | 892 | EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); |
645 | 893 | ||
646 | struct kthread_flush_work { | 894 | struct kthread_flush_work { |
647 | struct kthread_work work; | 895 | struct kthread_work work; |
@@ -656,12 +904,12 @@ static void kthread_flush_work_fn(struct kthread_work *work) | |||
656 | } | 904 | } |
657 | 905 | ||
658 | /** | 906 | /** |
659 | * flush_kthread_work - flush a kthread_work | 907 | * kthread_flush_work - flush a kthread_work |
660 | * @work: work to flush | 908 | * @work: work to flush |
661 | * | 909 | * |
662 | * If @work is queued or executing, wait for it to finish execution. | 910 | * If @work is queued or executing, wait for it to finish execution. |
663 | */ | 911 | */ |
664 | void flush_kthread_work(struct kthread_work *work) | 912 | void kthread_flush_work(struct kthread_work *work) |
665 | { | 913 | { |
666 | struct kthread_flush_work fwork = { | 914 | struct kthread_flush_work fwork = { |
667 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), | 915 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
@@ -670,21 +918,19 @@ void flush_kthread_work(struct kthread_work *work) | |||
670 | struct kthread_worker *worker; | 918 | struct kthread_worker *worker; |
671 | bool noop = false; | 919 | bool noop = false; |
672 | 920 | ||
673 | retry: | ||
674 | worker = work->worker; | 921 | worker = work->worker; |
675 | if (!worker) | 922 | if (!worker) |
676 | return; | 923 | return; |
677 | 924 | ||
678 | spin_lock_irq(&worker->lock); | 925 | spin_lock_irq(&worker->lock); |
679 | if (work->worker != worker) { | 926 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
680 | spin_unlock_irq(&worker->lock); | 927 | WARN_ON_ONCE(work->worker != worker); |
681 | goto retry; | ||
682 | } | ||
683 | 928 | ||
684 | if (!list_empty(&work->node)) | 929 | if (!list_empty(&work->node)) |
685 | insert_kthread_work(worker, &fwork.work, work->node.next); | 930 | kthread_insert_work(worker, &fwork.work, work->node.next); |
686 | else if (worker->current_work == work) | 931 | else if (worker->current_work == work) |
687 | insert_kthread_work(worker, &fwork.work, worker->work_list.next); | 932 | kthread_insert_work(worker, &fwork.work, |
933 | worker->work_list.next); | ||
688 | else | 934 | else |
689 | noop = true; | 935 | noop = true; |
690 | 936 | ||
@@ -693,23 +939,214 @@ retry: | |||
693 | if (!noop) | 939 | if (!noop) |
694 | wait_for_completion(&fwork.done); | 940 | wait_for_completion(&fwork.done); |
695 | } | 941 | } |
696 | EXPORT_SYMBOL_GPL(flush_kthread_work); | 942 | EXPORT_SYMBOL_GPL(kthread_flush_work); |
943 | |||
944 | /* | ||
945 | * This function removes the work from the worker queue. Also it makes sure | ||
946 | * that it won't get queued later via the delayed work's timer. | ||
947 | * | ||
948 | * The work might still be in use when this function finishes. See the | ||
949 | * current_work proceed by the worker. | ||
950 | * | ||
951 | * Return: %true if @work was pending and successfully canceled, | ||
952 | * %false if @work was not pending | ||
953 | */ | ||
954 | static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, | ||
955 | unsigned long *flags) | ||
956 | { | ||
957 | /* Try to cancel the timer if exists. */ | ||
958 | if (is_dwork) { | ||
959 | struct kthread_delayed_work *dwork = | ||
960 | container_of(work, struct kthread_delayed_work, work); | ||
961 | struct kthread_worker *worker = work->worker; | ||
962 | |||
963 | /* | ||
964 | * del_timer_sync() must be called to make sure that the timer | ||
965 | * callback is not running. The lock must be temporary released | ||
966 | * to avoid a deadlock with the callback. In the meantime, | ||
967 | * any queuing is blocked by setting the canceling counter. | ||
968 | */ | ||
969 | work->canceling++; | ||
970 | spin_unlock_irqrestore(&worker->lock, *flags); | ||
971 | del_timer_sync(&dwork->timer); | ||
972 | spin_lock_irqsave(&worker->lock, *flags); | ||
973 | work->canceling--; | ||
974 | } | ||
975 | |||
976 | /* | ||
977 | * Try to remove the work from a worker list. It might either | ||
978 | * be from worker->work_list or from worker->delayed_work_list. | ||
979 | */ | ||
980 | if (!list_empty(&work->node)) { | ||
981 | list_del_init(&work->node); | ||
982 | return true; | ||
983 | } | ||
984 | |||
985 | return false; | ||
986 | } | ||
987 | |||
988 | /** | ||
989 | * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work | ||
990 | * @worker: kthread worker to use | ||
991 | * @dwork: kthread delayed work to queue | ||
992 | * @delay: number of jiffies to wait before queuing | ||
993 | * | ||
994 | * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, | ||
995 | * modify @dwork's timer so that it expires after @delay. If @delay is zero, | ||
996 | * @work is guaranteed to be queued immediately. | ||
997 | * | ||
998 | * Return: %true if @dwork was pending and its timer was modified, | ||
999 | * %false otherwise. | ||
1000 | * | ||
1001 | * A special case is when the work is being canceled in parallel. | ||
1002 | * It might be caused either by the real kthread_cancel_delayed_work_sync() | ||
1003 | * or yet another kthread_mod_delayed_work() call. We let the other command | ||
1004 | * win and return %false here. The caller is supposed to synchronize these | ||
1005 | * operations a reasonable way. | ||
1006 | * | ||
1007 | * This function is safe to call from any context including IRQ handler. | ||
1008 | * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() | ||
1009 | * for details. | ||
1010 | */ | ||
1011 | bool kthread_mod_delayed_work(struct kthread_worker *worker, | ||
1012 | struct kthread_delayed_work *dwork, | ||
1013 | unsigned long delay) | ||
1014 | { | ||
1015 | struct kthread_work *work = &dwork->work; | ||
1016 | unsigned long flags; | ||
1017 | int ret = false; | ||
1018 | |||
1019 | spin_lock_irqsave(&worker->lock, flags); | ||
1020 | |||
1021 | /* Do not bother with canceling when never queued. */ | ||
1022 | if (!work->worker) | ||
1023 | goto fast_queue; | ||
1024 | |||
1025 | /* Work must not be used with >1 worker, see kthread_queue_work() */ | ||
1026 | WARN_ON_ONCE(work->worker != worker); | ||
1027 | |||
1028 | /* Do not fight with another command that is canceling this work. */ | ||
1029 | if (work->canceling) | ||
1030 | goto out; | ||
1031 | |||
1032 | ret = __kthread_cancel_work(work, true, &flags); | ||
1033 | fast_queue: | ||
1034 | __kthread_queue_delayed_work(worker, dwork, delay); | ||
1035 | out: | ||
1036 | spin_unlock_irqrestore(&worker->lock, flags); | ||
1037 | return ret; | ||
1038 | } | ||
1039 | EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); | ||
1040 | |||
1041 | static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) | ||
1042 | { | ||
1043 | struct kthread_worker *worker = work->worker; | ||
1044 | unsigned long flags; | ||
1045 | int ret = false; | ||
1046 | |||
1047 | if (!worker) | ||
1048 | goto out; | ||
1049 | |||
1050 | spin_lock_irqsave(&worker->lock, flags); | ||
1051 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ | ||
1052 | WARN_ON_ONCE(work->worker != worker); | ||
1053 | |||
1054 | ret = __kthread_cancel_work(work, is_dwork, &flags); | ||
1055 | |||
1056 | if (worker->current_work != work) | ||
1057 | goto out_fast; | ||
1058 | |||
1059 | /* | ||
1060 | * The work is in progress and we need to wait with the lock released. | ||
1061 | * In the meantime, block any queuing by setting the canceling counter. | ||
1062 | */ | ||
1063 | work->canceling++; | ||
1064 | spin_unlock_irqrestore(&worker->lock, flags); | ||
1065 | kthread_flush_work(work); | ||
1066 | spin_lock_irqsave(&worker->lock, flags); | ||
1067 | work->canceling--; | ||
1068 | |||
1069 | out_fast: | ||
1070 | spin_unlock_irqrestore(&worker->lock, flags); | ||
1071 | out: | ||
1072 | return ret; | ||
1073 | } | ||
1074 | |||
1075 | /** | ||
1076 | * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish | ||
1077 | * @work: the kthread work to cancel | ||
1078 | * | ||
1079 | * Cancel @work and wait for its execution to finish. This function | ||
1080 | * can be used even if the work re-queues itself. On return from this | ||
1081 | * function, @work is guaranteed to be not pending or executing on any CPU. | ||
1082 | * | ||
1083 | * kthread_cancel_work_sync(&delayed_work->work) must not be used for | ||
1084 | * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. | ||
1085 | * | ||
1086 | * The caller must ensure that the worker on which @work was last | ||
1087 | * queued can't be destroyed before this function returns. | ||
1088 | * | ||
1089 | * Return: %true if @work was pending, %false otherwise. | ||
1090 | */ | ||
1091 | bool kthread_cancel_work_sync(struct kthread_work *work) | ||
1092 | { | ||
1093 | return __kthread_cancel_work_sync(work, false); | ||
1094 | } | ||
1095 | EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); | ||
1096 | |||
1097 | /** | ||
1098 | * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and | ||
1099 | * wait for it to finish. | ||
1100 | * @dwork: the kthread delayed work to cancel | ||
1101 | * | ||
1102 | * This is kthread_cancel_work_sync() for delayed works. | ||
1103 | * | ||
1104 | * Return: %true if @dwork was pending, %false otherwise. | ||
1105 | */ | ||
1106 | bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) | ||
1107 | { | ||
1108 | return __kthread_cancel_work_sync(&dwork->work, true); | ||
1109 | } | ||
1110 | EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); | ||
697 | 1111 | ||
698 | /** | 1112 | /** |
699 | * flush_kthread_worker - flush all current works on a kthread_worker | 1113 | * kthread_flush_worker - flush all current works on a kthread_worker |
700 | * @worker: worker to flush | 1114 | * @worker: worker to flush |
701 | * | 1115 | * |
702 | * Wait until all currently executing or pending works on @worker are | 1116 | * Wait until all currently executing or pending works on @worker are |
703 | * finished. | 1117 | * finished. |
704 | */ | 1118 | */ |
705 | void flush_kthread_worker(struct kthread_worker *worker) | 1119 | void kthread_flush_worker(struct kthread_worker *worker) |
706 | { | 1120 | { |
707 | struct kthread_flush_work fwork = { | 1121 | struct kthread_flush_work fwork = { |
708 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), | 1122 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
709 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), | 1123 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
710 | }; | 1124 | }; |
711 | 1125 | ||
712 | queue_kthread_work(worker, &fwork.work); | 1126 | kthread_queue_work(worker, &fwork.work); |
713 | wait_for_completion(&fwork.done); | 1127 | wait_for_completion(&fwork.done); |
714 | } | 1128 | } |
715 | EXPORT_SYMBOL_GPL(flush_kthread_worker); | 1129 | EXPORT_SYMBOL_GPL(kthread_flush_worker); |
1130 | |||
1131 | /** | ||
1132 | * kthread_destroy_worker - destroy a kthread worker | ||
1133 | * @worker: worker to be destroyed | ||
1134 | * | ||
1135 | * Flush and destroy @worker. The simple flush is enough because the kthread | ||
1136 | * worker API is used only in trivial scenarios. There are no multi-step state | ||
1137 | * machines needed. | ||
1138 | */ | ||
1139 | void kthread_destroy_worker(struct kthread_worker *worker) | ||
1140 | { | ||
1141 | struct task_struct *task; | ||
1142 | |||
1143 | task = worker->task; | ||
1144 | if (WARN_ON(!task)) | ||
1145 | return; | ||
1146 | |||
1147 | kthread_flush_worker(worker); | ||
1148 | kthread_stop(task); | ||
1149 | WARN_ON(!list_empty(&worker->work_list)); | ||
1150 | kfree(worker); | ||
1151 | } | ||
1152 | EXPORT_SYMBOL(kthread_destroy_worker); | ||
diff --git a/kernel/panic.c b/kernel/panic.c index ca8cea1ef673..e6480e20379e 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -71,6 +71,32 @@ void __weak nmi_panic_self_stop(struct pt_regs *regs) | |||
71 | panic_smp_self_stop(); | 71 | panic_smp_self_stop(); |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | ||
75 | * Stop other CPUs in panic. Architecture dependent code may override this | ||
76 | * with more suitable version. For example, if the architecture supports | ||
77 | * crash dump, it should save registers of each stopped CPU and disable | ||
78 | * per-CPU features such as virtualization extensions. | ||
79 | */ | ||
80 | void __weak crash_smp_send_stop(void) | ||
81 | { | ||
82 | static int cpus_stopped; | ||
83 | |||
84 | /* | ||
85 | * This function can be called twice in panic path, but obviously | ||
86 | * we execute this only once. | ||
87 | */ | ||
88 | if (cpus_stopped) | ||
89 | return; | ||
90 | |||
91 | /* | ||
92 | * Note smp_send_stop is the usual smp shutdown function, which | ||
93 | * unfortunately means it may not be hardened to work in a panic | ||
94 | * situation. | ||
95 | */ | ||
96 | smp_send_stop(); | ||
97 | cpus_stopped = 1; | ||
98 | } | ||
99 | |||
74 | atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); | 100 | atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); |
75 | 101 | ||
76 | /* | 102 | /* |
@@ -164,14 +190,21 @@ void panic(const char *fmt, ...) | |||
164 | if (!_crash_kexec_post_notifiers) { | 190 | if (!_crash_kexec_post_notifiers) { |
165 | printk_nmi_flush_on_panic(); | 191 | printk_nmi_flush_on_panic(); |
166 | __crash_kexec(NULL); | 192 | __crash_kexec(NULL); |
167 | } | ||
168 | 193 | ||
169 | /* | 194 | /* |
170 | * Note smp_send_stop is the usual smp shutdown function, which | 195 | * Note smp_send_stop is the usual smp shutdown function, which |
171 | * unfortunately means it may not be hardened to work in a panic | 196 | * unfortunately means it may not be hardened to work in a |
172 | * situation. | 197 | * panic situation. |
173 | */ | 198 | */ |
174 | smp_send_stop(); | 199 | smp_send_stop(); |
200 | } else { | ||
201 | /* | ||
202 | * If we want to do crash dump after notifier calls and | ||
203 | * kmsg_dump, we will need architecture dependent extra | ||
204 | * works in addition to stopping other CPUs. | ||
205 | */ | ||
206 | crash_smp_send_stop(); | ||
207 | } | ||
175 | 208 | ||
176 | /* | 209 | /* |
177 | * Run any panic handlers, including those that might need to | 210 | * Run any panic handlers, including those that might need to |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1d3b7665d0be..2a99027312a6 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -73,6 +73,8 @@ void __ptrace_unlink(struct task_struct *child) | |||
73 | { | 73 | { |
74 | BUG_ON(!child->ptrace); | 74 | BUG_ON(!child->ptrace); |
75 | 75 | ||
76 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
77 | |||
76 | child->parent = child->real_parent; | 78 | child->parent = child->real_parent; |
77 | list_del_init(&child->ptrace_entry); | 79 | list_del_init(&child->ptrace_entry); |
78 | 80 | ||
@@ -489,7 +491,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
489 | 491 | ||
490 | /* Architecture-specific hardware disable .. */ | 492 | /* Architecture-specific hardware disable .. */ |
491 | ptrace_disable(child); | 493 | ptrace_disable(child); |
492 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
493 | 494 | ||
494 | write_lock_irq(&tasklist_lock); | 495 | write_lock_irq(&tasklist_lock); |
495 | /* | 496 | /* |
diff --git a/kernel/relay.c b/kernel/relay.c index 9988f5cc2d46..da79a109dbeb 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -328,13 +328,15 @@ static struct rchan_callbacks default_channel_callbacks = { | |||
328 | 328 | ||
329 | /** | 329 | /** |
330 | * wakeup_readers - wake up readers waiting on a channel | 330 | * wakeup_readers - wake up readers waiting on a channel |
331 | * @data: contains the channel buffer | 331 | * @work: contains the channel buffer |
332 | * | 332 | * |
333 | * This is the timer function used to defer reader waking. | 333 | * This is the function used to defer reader waking |
334 | */ | 334 | */ |
335 | static void wakeup_readers(unsigned long data) | 335 | static void wakeup_readers(struct irq_work *work) |
336 | { | 336 | { |
337 | struct rchan_buf *buf = (struct rchan_buf *)data; | 337 | struct rchan_buf *buf; |
338 | |||
339 | buf = container_of(work, struct rchan_buf, wakeup_work); | ||
338 | wake_up_interruptible(&buf->read_wait); | 340 | wake_up_interruptible(&buf->read_wait); |
339 | } | 341 | } |
340 | 342 | ||
@@ -352,9 +354,10 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
352 | if (init) { | 354 | if (init) { |
353 | init_waitqueue_head(&buf->read_wait); | 355 | init_waitqueue_head(&buf->read_wait); |
354 | kref_init(&buf->kref); | 356 | kref_init(&buf->kref); |
355 | setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); | 357 | init_irq_work(&buf->wakeup_work, wakeup_readers); |
356 | } else | 358 | } else { |
357 | del_timer_sync(&buf->timer); | 359 | irq_work_sync(&buf->wakeup_work); |
360 | } | ||
358 | 361 | ||
359 | buf->subbufs_produced = 0; | 362 | buf->subbufs_produced = 0; |
360 | buf->subbufs_consumed = 0; | 363 | buf->subbufs_consumed = 0; |
@@ -487,7 +490,7 @@ free_buf: | |||
487 | static void relay_close_buf(struct rchan_buf *buf) | 490 | static void relay_close_buf(struct rchan_buf *buf) |
488 | { | 491 | { |
489 | buf->finalized = 1; | 492 | buf->finalized = 1; |
490 | del_timer_sync(&buf->timer); | 493 | irq_work_sync(&buf->wakeup_work); |
491 | buf->chan->cb->remove_buf_file(buf->dentry); | 494 | buf->chan->cb->remove_buf_file(buf->dentry); |
492 | kref_put(&buf->kref, relay_remove_buf); | 495 | kref_put(&buf->kref, relay_remove_buf); |
493 | } | 496 | } |
@@ -754,14 +757,15 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
754 | buf->early_bytes += buf->chan->subbuf_size - | 757 | buf->early_bytes += buf->chan->subbuf_size - |
755 | buf->padding[old_subbuf]; | 758 | buf->padding[old_subbuf]; |
756 | smp_mb(); | 759 | smp_mb(); |
757 | if (waitqueue_active(&buf->read_wait)) | 760 | if (waitqueue_active(&buf->read_wait)) { |
758 | /* | 761 | /* |
759 | * Calling wake_up_interruptible() from here | 762 | * Calling wake_up_interruptible() from here |
760 | * will deadlock if we happen to be logging | 763 | * will deadlock if we happen to be logging |
761 | * from the scheduler (trying to re-grab | 764 | * from the scheduler (trying to re-grab |
762 | * rq->lock), so defer it. | 765 | * rq->lock), so defer it. |
763 | */ | 766 | */ |
764 | mod_timer(&buf->timer, jiffies + 1); | 767 | irq_work_queue(&buf->wakeup_work); |
768 | } | ||
765 | } | 769 | } |
766 | 770 | ||
767 | old = buf->data; | 771 | old = buf->data; |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 13bc43d1fb22..4a5c6e73ecd4 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -186,6 +186,11 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |||
186 | kfree(td); | 186 | kfree(td); |
187 | return PTR_ERR(tsk); | 187 | return PTR_ERR(tsk); |
188 | } | 188 | } |
189 | /* | ||
190 | * Park the thread so that it could start right on the CPU | ||
191 | * when it is available. | ||
192 | */ | ||
193 | kthread_park(tsk); | ||
189 | get_task_struct(tsk); | 194 | get_task_struct(tsk); |
190 | *per_cpu_ptr(ht->store, cpu) = tsk; | 195 | *per_cpu_ptr(ht->store, cpu) = tsk; |
191 | if (ht->create) { | 196 | if (ht->create) { |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bd81f0390277..479d840db286 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4261,7 +4261,7 @@ void print_worker_info(const char *log_lvl, struct task_struct *task) | |||
4261 | * This function is called without any synchronization and @task | 4261 | * This function is called without any synchronization and @task |
4262 | * could be in any state. Be careful with dereferences. | 4262 | * could be in any state. Be careful with dereferences. |
4263 | */ | 4263 | */ |
4264 | worker = probe_kthread_data(task); | 4264 | worker = kthread_probe_data(task); |
4265 | 4265 | ||
4266 | /* | 4266 | /* |
4267 | * Carefully copy the associated workqueue's workfn and name. Keep | 4267 | * Carefully copy the associated workqueue's workfn and name. Keep |
diff --git a/lib/Makefile b/lib/Makefile index f3ca8c0ab634..50144a3aeebd 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -180,6 +180,7 @@ obj-$(CONFIG_IRQ_POLL) += irq_poll.o | |||
180 | 180 | ||
181 | obj-$(CONFIG_STACKDEPOT) += stackdepot.o | 181 | obj-$(CONFIG_STACKDEPOT) += stackdepot.o |
182 | KASAN_SANITIZE_stackdepot.o := n | 182 | KASAN_SANITIZE_stackdepot.o := n |
183 | KCOV_INSTRUMENT_stackdepot.o := n | ||
183 | 184 | ||
184 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ | 185 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ |
185 | fdt_empty_tree.o | 186 | fdt_empty_tree.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index eca88087fa8a..0b66f0e5eb6b 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -496,6 +496,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf); | |||
496 | * ranges. Consecutively set bits are shown as two hyphen-separated | 496 | * ranges. Consecutively set bits are shown as two hyphen-separated |
497 | * decimal numbers, the smallest and largest bit numbers set in | 497 | * decimal numbers, the smallest and largest bit numbers set in |
498 | * the range. | 498 | * the range. |
499 | * Optionally each range can be postfixed to denote that only parts of it | ||
500 | * should be set. The range will divided to groups of specific size. | ||
501 | * From each group will be used only defined amount of bits. | ||
502 | * Syntax: range:used_size/group_size | ||
503 | * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 | ||
499 | * | 504 | * |
500 | * Returns 0 on success, -errno on invalid input strings. | 505 | * Returns 0 on success, -errno on invalid input strings. |
501 | * Error values: | 506 | * Error values: |
@@ -507,16 +512,20 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
507 | int is_user, unsigned long *maskp, | 512 | int is_user, unsigned long *maskp, |
508 | int nmaskbits) | 513 | int nmaskbits) |
509 | { | 514 | { |
510 | unsigned a, b; | 515 | unsigned int a, b, old_a, old_b; |
516 | unsigned int group_size, used_size; | ||
511 | int c, old_c, totaldigits, ndigits; | 517 | int c, old_c, totaldigits, ndigits; |
512 | const char __user __force *ubuf = (const char __user __force *)buf; | 518 | const char __user __force *ubuf = (const char __user __force *)buf; |
513 | int at_start, in_range; | 519 | int at_start, in_range, in_partial_range; |
514 | 520 | ||
515 | totaldigits = c = 0; | 521 | totaldigits = c = 0; |
522 | old_a = old_b = 0; | ||
523 | group_size = used_size = 0; | ||
516 | bitmap_zero(maskp, nmaskbits); | 524 | bitmap_zero(maskp, nmaskbits); |
517 | do { | 525 | do { |
518 | at_start = 1; | 526 | at_start = 1; |
519 | in_range = 0; | 527 | in_range = 0; |
528 | in_partial_range = 0; | ||
520 | a = b = 0; | 529 | a = b = 0; |
521 | ndigits = totaldigits; | 530 | ndigits = totaldigits; |
522 | 531 | ||
@@ -547,6 +556,24 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
547 | if ((totaldigits != ndigits) && isspace(old_c)) | 556 | if ((totaldigits != ndigits) && isspace(old_c)) |
548 | return -EINVAL; | 557 | return -EINVAL; |
549 | 558 | ||
559 | if (c == '/') { | ||
560 | used_size = a; | ||
561 | at_start = 1; | ||
562 | in_range = 0; | ||
563 | a = b = 0; | ||
564 | continue; | ||
565 | } | ||
566 | |||
567 | if (c == ':') { | ||
568 | old_a = a; | ||
569 | old_b = b; | ||
570 | at_start = 1; | ||
571 | in_range = 0; | ||
572 | in_partial_range = 1; | ||
573 | a = b = 0; | ||
574 | continue; | ||
575 | } | ||
576 | |||
550 | if (c == '-') { | 577 | if (c == '-') { |
551 | if (at_start || in_range) | 578 | if (at_start || in_range) |
552 | return -EINVAL; | 579 | return -EINVAL; |
@@ -567,15 +594,30 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
567 | } | 594 | } |
568 | if (ndigits == totaldigits) | 595 | if (ndigits == totaldigits) |
569 | continue; | 596 | continue; |
597 | if (in_partial_range) { | ||
598 | group_size = a; | ||
599 | a = old_a; | ||
600 | b = old_b; | ||
601 | old_a = old_b = 0; | ||
602 | } | ||
570 | /* if no digit is after '-', it's wrong*/ | 603 | /* if no digit is after '-', it's wrong*/ |
571 | if (at_start && in_range) | 604 | if (at_start && in_range) |
572 | return -EINVAL; | 605 | return -EINVAL; |
573 | if (!(a <= b)) | 606 | if (!(a <= b) || !(used_size <= group_size)) |
574 | return -EINVAL; | 607 | return -EINVAL; |
575 | if (b >= nmaskbits) | 608 | if (b >= nmaskbits) |
576 | return -ERANGE; | 609 | return -ERANGE; |
577 | while (a <= b) { | 610 | while (a <= b) { |
578 | set_bit(a, maskp); | 611 | if (in_partial_range) { |
612 | static int pos_in_group = 1; | ||
613 | |||
614 | if (pos_in_group <= used_size) | ||
615 | set_bit(a, maskp); | ||
616 | |||
617 | if (a == b || ++pos_in_group > group_size) | ||
618 | pos_in_group = 1; | ||
619 | } else | ||
620 | set_bit(a, maskp); | ||
579 | a++; | 621 | a++; |
580 | } | 622 | } |
581 | } while (buflen && c == ','); | 623 | } while (buflen && c == ','); |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index d8a5cf66c316..b8e2080c1a47 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -48,11 +48,9 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long | |||
48 | { | 48 | { |
49 | unsigned long long res; | 49 | unsigned long long res; |
50 | unsigned int rv; | 50 | unsigned int rv; |
51 | int overflow; | ||
52 | 51 | ||
53 | res = 0; | 52 | res = 0; |
54 | rv = 0; | 53 | rv = 0; |
55 | overflow = 0; | ||
56 | while (*s) { | 54 | while (*s) { |
57 | unsigned int val; | 55 | unsigned int val; |
58 | 56 | ||
@@ -71,15 +69,13 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long | |||
71 | */ | 69 | */ |
72 | if (unlikely(res & (~0ull << 60))) { | 70 | if (unlikely(res & (~0ull << 60))) { |
73 | if (res > div_u64(ULLONG_MAX - val, base)) | 71 | if (res > div_u64(ULLONG_MAX - val, base)) |
74 | overflow = 1; | 72 | rv |= KSTRTOX_OVERFLOW; |
75 | } | 73 | } |
76 | res = res * base + val; | 74 | res = res * base + val; |
77 | rv++; | 75 | rv++; |
78 | s++; | 76 | s++; |
79 | } | 77 | } |
80 | *p = res; | 78 | *p = res; |
81 | if (overflow) | ||
82 | rv |= KSTRTOX_OVERFLOW; | ||
83 | return rv; | 79 | return rv; |
84 | } | 80 | } |
85 | 81 | ||
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 9c5fe8110413..7e35fc450c5b 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/compiler.h> | 1 | #include <linux/compiler.h> |
2 | #include <linux/export.h> | 2 | #include <linux/export.h> |
3 | #include <linux/kasan-checks.h> | 3 | #include <linux/kasan-checks.h> |
4 | #include <linux/thread_info.h> | ||
4 | #include <linux/uaccess.h> | 5 | #include <linux/uaccess.h> |
5 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
6 | #include <linux/errno.h> | 7 | #include <linux/errno.h> |
@@ -111,6 +112,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) | |||
111 | long retval; | 112 | long retval; |
112 | 113 | ||
113 | kasan_check_write(dst, count); | 114 | kasan_check_write(dst, count); |
115 | check_object_size(dst, count, false); | ||
114 | user_access_begin(); | 116 | user_access_begin(); |
115 | retval = do_strncpy_from_user(dst, src, count, max); | 117 | retval = do_strncpy_from_user(dst, src, count, max); |
116 | user_access_end(); | 118 | user_access_end(); |
diff --git a/mm/bootmem.c b/mm/bootmem.c index a869f84f44d3..e8a55a3c9feb 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -155,7 +155,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size) | |||
155 | { | 155 | { |
156 | unsigned long cursor, end; | 156 | unsigned long cursor, end; |
157 | 157 | ||
158 | kmemleak_free_part(__va(physaddr), size); | 158 | kmemleak_free_part_phys(physaddr, size); |
159 | 159 | ||
160 | cursor = PFN_UP(physaddr); | 160 | cursor = PFN_UP(physaddr); |
161 | end = PFN_DOWN(physaddr + size); | 161 | end = PFN_DOWN(physaddr + size); |
@@ -399,7 +399,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
399 | { | 399 | { |
400 | unsigned long start, end; | 400 | unsigned long start, end; |
401 | 401 | ||
402 | kmemleak_free_part(__va(physaddr), size); | 402 | kmemleak_free_part_phys(physaddr, size); |
403 | 403 | ||
404 | start = PFN_UP(physaddr); | 404 | start = PFN_UP(physaddr); |
405 | end = PFN_DOWN(physaddr + size); | 405 | end = PFN_DOWN(physaddr + size); |
@@ -420,7 +420,7 @@ void __init free_bootmem(unsigned long physaddr, unsigned long size) | |||
420 | { | 420 | { |
421 | unsigned long start, end; | 421 | unsigned long start, end; |
422 | 422 | ||
423 | kmemleak_free_part(__va(physaddr), size); | 423 | kmemleak_free_part_phys(physaddr, size); |
424 | 424 | ||
425 | start = PFN_UP(physaddr); | 425 | start = PFN_UP(physaddr); |
426 | end = PFN_DOWN(physaddr + size); | 426 | end = PFN_DOWN(physaddr + size); |
@@ -336,7 +336,7 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
336 | * kmemleak scans/reads tracked objects for pointers to other | 336 | * kmemleak scans/reads tracked objects for pointers to other |
337 | * objects but this address isn't mapped and accessible | 337 | * objects but this address isn't mapped and accessible |
338 | */ | 338 | */ |
339 | kmemleak_ignore(phys_to_virt(addr)); | 339 | kmemleak_ignore_phys(addr); |
340 | base = addr; | 340 | base = addr; |
341 | } | 341 | } |
342 | 342 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 086292f7c59d..a5e453cf05c4 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -90,6 +90,8 @@ | |||
90 | #include <linux/cache.h> | 90 | #include <linux/cache.h> |
91 | #include <linux/percpu.h> | 91 | #include <linux/percpu.h> |
92 | #include <linux/hardirq.h> | 92 | #include <linux/hardirq.h> |
93 | #include <linux/bootmem.h> | ||
94 | #include <linux/pfn.h> | ||
93 | #include <linux/mmzone.h> | 95 | #include <linux/mmzone.h> |
94 | #include <linux/slab.h> | 96 | #include <linux/slab.h> |
95 | #include <linux/thread_info.h> | 97 | #include <linux/thread_info.h> |
@@ -1121,6 +1123,51 @@ void __ref kmemleak_no_scan(const void *ptr) | |||
1121 | } | 1123 | } |
1122 | EXPORT_SYMBOL(kmemleak_no_scan); | 1124 | EXPORT_SYMBOL(kmemleak_no_scan); |
1123 | 1125 | ||
1126 | /** | ||
1127 | * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical | ||
1128 | * address argument | ||
1129 | */ | ||
1130 | void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, | ||
1131 | gfp_t gfp) | ||
1132 | { | ||
1133 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | ||
1134 | kmemleak_alloc(__va(phys), size, min_count, gfp); | ||
1135 | } | ||
1136 | EXPORT_SYMBOL(kmemleak_alloc_phys); | ||
1137 | |||
1138 | /** | ||
1139 | * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a | ||
1140 | * physical address argument | ||
1141 | */ | ||
1142 | void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) | ||
1143 | { | ||
1144 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | ||
1145 | kmemleak_free_part(__va(phys), size); | ||
1146 | } | ||
1147 | EXPORT_SYMBOL(kmemleak_free_part_phys); | ||
1148 | |||
1149 | /** | ||
1150 | * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical | ||
1151 | * address argument | ||
1152 | */ | ||
1153 | void __ref kmemleak_not_leak_phys(phys_addr_t phys) | ||
1154 | { | ||
1155 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | ||
1156 | kmemleak_not_leak(__va(phys)); | ||
1157 | } | ||
1158 | EXPORT_SYMBOL(kmemleak_not_leak_phys); | ||
1159 | |||
1160 | /** | ||
1161 | * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical | ||
1162 | * address argument | ||
1163 | */ | ||
1164 | void __ref kmemleak_ignore_phys(phys_addr_t phys) | ||
1165 | { | ||
1166 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | ||
1167 | kmemleak_ignore(__va(phys)); | ||
1168 | } | ||
1169 | EXPORT_SYMBOL(kmemleak_ignore_phys); | ||
1170 | |||
1124 | /* | 1171 | /* |
1125 | * Update an object's checksum and return true if it was modified. | 1172 | * Update an object's checksum and return true if it was modified. |
1126 | */ | 1173 | */ |
diff --git a/mm/memblock.c b/mm/memblock.c index c8dfa430342b..7608bc305936 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -723,7 +723,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | |||
723 | (unsigned long long)base + size - 1, | 723 | (unsigned long long)base + size - 1, |
724 | (void *)_RET_IP_); | 724 | (void *)_RET_IP_); |
725 | 725 | ||
726 | kmemleak_free_part(__va(base), size); | 726 | kmemleak_free_part_phys(base, size); |
727 | return memblock_remove_range(&memblock.reserved, base, size); | 727 | return memblock_remove_range(&memblock.reserved, base, size); |
728 | } | 728 | } |
729 | 729 | ||
@@ -1152,7 +1152,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, | |||
1152 | * The min_count is set to 0 so that memblock allocations are | 1152 | * The min_count is set to 0 so that memblock allocations are |
1153 | * never reported as leaks. | 1153 | * never reported as leaks. |
1154 | */ | 1154 | */ |
1155 | kmemleak_alloc(__va(found), size, 0, 0); | 1155 | kmemleak_alloc_phys(found, size, 0, 0); |
1156 | return found; | 1156 | return found; |
1157 | } | 1157 | } |
1158 | return 0; | 1158 | return 0; |
@@ -1399,7 +1399,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) | |||
1399 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", | 1399 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
1400 | __func__, (u64)base, (u64)base + size - 1, | 1400 | __func__, (u64)base, (u64)base + size - 1, |
1401 | (void *)_RET_IP_); | 1401 | (void *)_RET_IP_); |
1402 | kmemleak_free_part(__va(base), size); | 1402 | kmemleak_free_part_phys(base, size); |
1403 | memblock_remove_range(&memblock.reserved, base, size); | 1403 | memblock_remove_range(&memblock.reserved, base, size); |
1404 | } | 1404 | } |
1405 | 1405 | ||
@@ -1419,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) | |||
1419 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", | 1419 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
1420 | __func__, (u64)base, (u64)base + size - 1, | 1420 | __func__, (u64)base, (u64)base + size - 1, |
1421 | (void *)_RET_IP_); | 1421 | (void *)_RET_IP_); |
1422 | kmemleak_free_part(__va(base), size); | 1422 | kmemleak_free_part_phys(base, size); |
1423 | cursor = PFN_UP(base); | 1423 | cursor = PFN_UP(base); |
1424 | end = PFN_DOWN(base + size); | 1424 | end = PFN_DOWN(base + size); |
1425 | 1425 | ||
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index ba609b684d7a..487dad610731 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -84,7 +84,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) | |||
84 | { | 84 | { |
85 | unsigned long cursor, end; | 85 | unsigned long cursor, end; |
86 | 86 | ||
87 | kmemleak_free_part(__va(addr), size); | 87 | kmemleak_free_part_phys(addr, size); |
88 | 88 | ||
89 | cursor = PFN_UP(addr); | 89 | cursor = PFN_UP(addr); |
90 | end = PFN_DOWN(addr + size); | 90 | end = PFN_DOWN(addr + size); |
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index c68ff3dcb926..e49121ee55f6 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h | |||
@@ -20,8 +20,6 @@ | |||
20 | 20 | ||
21 | #include "main.h" | 21 | #include "main.h" |
22 | 22 | ||
23 | #include <linux/kconfig.h> | ||
24 | |||
25 | struct net_device; | 23 | struct net_device; |
26 | 24 | ||
27 | #define BATADV_DEBUGFS_SUBDIR "batman_adv" | 25 | #define BATADV_DEBUGFS_SUBDIR "batman_adv" |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 206a6b346a8d..a8368d1c4348 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -54,6 +54,7 @@ my $min_conf_desc_length = 4; | |||
54 | my $spelling_file = "$D/spelling.txt"; | 54 | my $spelling_file = "$D/spelling.txt"; |
55 | my $codespell = 0; | 55 | my $codespell = 0; |
56 | my $codespellfile = "/usr/share/codespell/dictionary.txt"; | 56 | my $codespellfile = "/usr/share/codespell/dictionary.txt"; |
57 | my $conststructsfile = "$D/const_structs.checkpatch"; | ||
57 | my $color = 1; | 58 | my $color = 1; |
58 | my $allow_c99_comments = 1; | 59 | my $allow_c99_comments = 1; |
59 | 60 | ||
@@ -523,7 +524,11 @@ our @mode_permission_funcs = ( | |||
523 | ["module_param_array_named", 5], | 524 | ["module_param_array_named", 5], |
524 | ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2], | 525 | ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2], |
525 | ["proc_create(?:_data|)", 2], | 526 | ["proc_create(?:_data|)", 2], |
526 | ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2], | 527 | ["(?:CLASS|DEVICE|SENSOR|SENSOR_DEVICE|IIO_DEVICE)_ATTR", 2], |
528 | ["IIO_DEV_ATTR_[A-Z_]+", 1], | ||
529 | ["SENSOR_(?:DEVICE_|)ATTR_2", 2], | ||
530 | ["SENSOR_TEMPLATE(?:_2|)", 3], | ||
531 | ["__ATTR", 2], | ||
527 | ); | 532 | ); |
528 | 533 | ||
529 | #Create a search pattern for all these functions to speed up a loop below | 534 | #Create a search pattern for all these functions to speed up a loop below |
@@ -541,6 +546,32 @@ our $mode_perms_world_writable = qr{ | |||
541 | 0[0-7][0-7][2367] | 546 | 0[0-7][0-7][2367] |
542 | }x; | 547 | }x; |
543 | 548 | ||
549 | our %mode_permission_string_types = ( | ||
550 | "S_IRWXU" => 0700, | ||
551 | "S_IRUSR" => 0400, | ||
552 | "S_IWUSR" => 0200, | ||
553 | "S_IXUSR" => 0100, | ||
554 | "S_IRWXG" => 0070, | ||
555 | "S_IRGRP" => 0040, | ||
556 | "S_IWGRP" => 0020, | ||
557 | "S_IXGRP" => 0010, | ||
558 | "S_IRWXO" => 0007, | ||
559 | "S_IROTH" => 0004, | ||
560 | "S_IWOTH" => 0002, | ||
561 | "S_IXOTH" => 0001, | ||
562 | "S_IRWXUGO" => 0777, | ||
563 | "S_IRUGO" => 0444, | ||
564 | "S_IWUGO" => 0222, | ||
565 | "S_IXUGO" => 0111, | ||
566 | ); | ||
567 | |||
568 | #Create a search pattern for all these strings to speed up a loop below | ||
569 | our $mode_perms_string_search = ""; | ||
570 | foreach my $entry (keys %mode_permission_string_types) { | ||
571 | $mode_perms_string_search .= '|' if ($mode_perms_string_search ne ""); | ||
572 | $mode_perms_string_search .= $entry; | ||
573 | } | ||
574 | |||
544 | our $allowed_asm_includes = qr{(?x: | 575 | our $allowed_asm_includes = qr{(?x: |
545 | irq| | 576 | irq| |
546 | memory| | 577 | memory| |
@@ -598,6 +629,29 @@ if ($codespell) { | |||
598 | 629 | ||
599 | $misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix; | 630 | $misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix; |
600 | 631 | ||
632 | my $const_structs = ""; | ||
633 | if (open(my $conststructs, '<', $conststructsfile)) { | ||
634 | while (<$conststructs>) { | ||
635 | my $line = $_; | ||
636 | |||
637 | $line =~ s/\s*\n?$//g; | ||
638 | $line =~ s/^\s*//g; | ||
639 | |||
640 | next if ($line =~ m/^\s*#/); | ||
641 | next if ($line =~ m/^\s*$/); | ||
642 | if ($line =~ /\s/) { | ||
643 | print("$conststructsfile: '$line' invalid - ignored\n"); | ||
644 | next; | ||
645 | } | ||
646 | |||
647 | $const_structs .= '|' if ($const_structs ne ""); | ||
648 | $const_structs .= $line; | ||
649 | } | ||
650 | close($conststructsfile); | ||
651 | } else { | ||
652 | warn "No structs that should be const will be found - file '$conststructsfile': $!\n"; | ||
653 | } | ||
654 | |||
601 | sub build_types { | 655 | sub build_types { |
602 | my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)"; | 656 | my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)"; |
603 | my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)"; | 657 | my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)"; |
@@ -704,6 +758,16 @@ sub seed_camelcase_file { | |||
704 | } | 758 | } |
705 | } | 759 | } |
706 | 760 | ||
761 | sub is_maintained_obsolete { | ||
762 | my ($filename) = @_; | ||
763 | |||
764 | return 0 if (!(-e "$root/scripts/get_maintainer.pl")); | ||
765 | |||
766 | my $status = `perl $root/scripts/get_maintainer.pl --status --nom --nol --nogit --nogit-fallback -f $filename 2>&1`; | ||
767 | |||
768 | return $status =~ /obsolete/i; | ||
769 | } | ||
770 | |||
707 | my $camelcase_seeded = 0; | 771 | my $camelcase_seeded = 0; |
708 | sub seed_camelcase_includes { | 772 | sub seed_camelcase_includes { |
709 | return if ($camelcase_seeded); | 773 | return if ($camelcase_seeded); |
@@ -2289,6 +2353,10 @@ sub process { | |||
2289 | } | 2353 | } |
2290 | 2354 | ||
2291 | if ($found_file) { | 2355 | if ($found_file) { |
2356 | if (is_maintained_obsolete($realfile)) { | ||
2357 | WARN("OBSOLETE", | ||
2358 | "$realfile is marked as 'obsolete' in the MAINTAINERS hierarchy. No unnecessary modifications please.\n"); | ||
2359 | } | ||
2292 | if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) { | 2360 | if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) { |
2293 | $check = 1; | 2361 | $check = 1; |
2294 | } else { | 2362 | } else { |
@@ -2939,6 +3007,30 @@ sub process { | |||
2939 | "Block comments use a trailing */ on a separate line\n" . $herecurr); | 3007 | "Block comments use a trailing */ on a separate line\n" . $herecurr); |
2940 | } | 3008 | } |
2941 | 3009 | ||
3010 | # Block comment * alignment | ||
3011 | if ($prevline =~ /$;[ \t]*$/ && #ends in comment | ||
3012 | $line =~ /^\+[ \t]*$;/ && #leading comment | ||
3013 | $rawline =~ /^\+[ \t]*\*/ && #leading * | ||
3014 | (($prevrawline =~ /^\+.*?\/\*/ && #leading /* | ||
3015 | $prevrawline !~ /\*\/[ \t]*$/) || #no trailing */ | ||
3016 | $prevrawline =~ /^\+[ \t]*\*/)) { #leading * | ||
3017 | my $oldindent; | ||
3018 | $prevrawline =~ m@^\+([ \t]*/?)\*@; | ||
3019 | if (defined($1)) { | ||
3020 | $oldindent = expand_tabs($1); | ||
3021 | } else { | ||
3022 | $prevrawline =~ m@^\+(.*/?)\*@; | ||
3023 | $oldindent = expand_tabs($1); | ||
3024 | } | ||
3025 | $rawline =~ m@^\+([ \t]*)\*@; | ||
3026 | my $newindent = $1; | ||
3027 | $newindent = expand_tabs($newindent); | ||
3028 | if (length($oldindent) ne length($newindent)) { | ||
3029 | WARN("BLOCK_COMMENT_STYLE", | ||
3030 | "Block comments should align the * on each line\n" . $hereprev); | ||
3031 | } | ||
3032 | } | ||
3033 | |||
2942 | # check for missing blank lines after struct/union declarations | 3034 | # check for missing blank lines after struct/union declarations |
2943 | # with exceptions for various attributes and macros | 3035 | # with exceptions for various attributes and macros |
2944 | if ($prevline =~ /^[\+ ]};?\s*$/ && | 3036 | if ($prevline =~ /^[\+ ]};?\s*$/ && |
@@ -4665,7 +4757,17 @@ sub process { | |||
4665 | $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/); | 4757 | $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/); |
4666 | $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/); | 4758 | $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/); |
4667 | 4759 | ||
4668 | $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//; | 4760 | $dstat =~ s/^.\s*\#\s*define\s+$Ident(\([^\)]*\))?\s*//; |
4761 | my $define_args = $1; | ||
4762 | my $define_stmt = $dstat; | ||
4763 | my @def_args = (); | ||
4764 | |||
4765 | if (defined $define_args && $define_args ne "") { | ||
4766 | $define_args = substr($define_args, 1, length($define_args) - 2); | ||
4767 | $define_args =~ s/\s*//g; | ||
4768 | @def_args = split(",", $define_args); | ||
4769 | } | ||
4770 | |||
4669 | $dstat =~ s/$;//g; | 4771 | $dstat =~ s/$;//g; |
4670 | $dstat =~ s/\\\n.//g; | 4772 | $dstat =~ s/\\\n.//g; |
4671 | $dstat =~ s/^\s*//s; | 4773 | $dstat =~ s/^\s*//s; |
@@ -4701,6 +4803,15 @@ sub process { | |||
4701 | ^\[ | 4803 | ^\[ |
4702 | }x; | 4804 | }x; |
4703 | #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n"; | 4805 | #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n"; |
4806 | |||
4807 | $ctx =~ s/\n*$//; | ||
4808 | my $herectx = $here . "\n"; | ||
4809 | my $stmt_cnt = statement_rawlines($ctx); | ||
4810 | |||
4811 | for (my $n = 0; $n < $stmt_cnt; $n++) { | ||
4812 | $herectx .= raw_line($linenr, $n) . "\n"; | ||
4813 | } | ||
4814 | |||
4704 | if ($dstat ne '' && | 4815 | if ($dstat ne '' && |
4705 | $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(), | 4816 | $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(), |
4706 | $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo(); | 4817 | $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo(); |
@@ -4716,13 +4827,6 @@ sub process { | |||
4716 | $dstat !~ /^\(\{/ && # ({... | 4827 | $dstat !~ /^\(\{/ && # ({... |
4717 | $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/) | 4828 | $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/) |
4718 | { | 4829 | { |
4719 | $ctx =~ s/\n*$//; | ||
4720 | my $herectx = $here . "\n"; | ||
4721 | my $cnt = statement_rawlines($ctx); | ||
4722 | |||
4723 | for (my $n = 0; $n < $cnt; $n++) { | ||
4724 | $herectx .= raw_line($linenr, $n) . "\n"; | ||
4725 | } | ||
4726 | 4830 | ||
4727 | if ($dstat =~ /;/) { | 4831 | if ($dstat =~ /;/) { |
4728 | ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE", | 4832 | ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE", |
@@ -4731,6 +4835,46 @@ sub process { | |||
4731 | ERROR("COMPLEX_MACRO", | 4835 | ERROR("COMPLEX_MACRO", |
4732 | "Macros with complex values should be enclosed in parentheses\n" . "$herectx"); | 4836 | "Macros with complex values should be enclosed in parentheses\n" . "$herectx"); |
4733 | } | 4837 | } |
4838 | |||
4839 | } | ||
4840 | |||
4841 | # Make $define_stmt single line, comment-free, etc | ||
4842 | my @stmt_array = split('\n', $define_stmt); | ||
4843 | my $first = 1; | ||
4844 | $define_stmt = ""; | ||
4845 | foreach my $l (@stmt_array) { | ||
4846 | $l =~ s/\\$//; | ||
4847 | if ($first) { | ||
4848 | $define_stmt = $l; | ||
4849 | $first = 0; | ||
4850 | } elsif ($l =~ /^[\+ ]/) { | ||
4851 | $define_stmt .= substr($l, 1); | ||
4852 | } | ||
4853 | } | ||
4854 | $define_stmt =~ s/$;//g; | ||
4855 | $define_stmt =~ s/\s+/ /g; | ||
4856 | $define_stmt = trim($define_stmt); | ||
4857 | |||
4858 | # check if any macro arguments are reused (ignore '...' and 'type') | ||
4859 | foreach my $arg (@def_args) { | ||
4860 | next if ($arg =~ /\.\.\./); | ||
4861 | next if ($arg =~ /^type$/i); | ||
4862 | my $tmp = $define_stmt; | ||
4863 | $tmp =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; | ||
4864 | $tmp =~ s/\#+\s*$arg\b//g; | ||
4865 | $tmp =~ s/\b$arg\s*\#\#//g; | ||
4866 | my $use_cnt = $tmp =~ s/\b$arg\b//g; | ||
4867 | if ($use_cnt > 1) { | ||
4868 | CHK("MACRO_ARG_REUSE", | ||
4869 | "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); | ||
4870 | } | ||
4871 | # check if any macro arguments may have other precedence issues | ||
4872 | if ($define_stmt =~ m/($Operators)?\s*\b$arg\b\s*($Operators)?/m && | ||
4873 | ((defined($1) && $1 ne ',') || | ||
4874 | (defined($2) && $2 ne ','))) { | ||
4875 | CHK("MACRO_ARG_PRECEDENCE", | ||
4876 | "Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx"); | ||
4877 | } | ||
4734 | } | 4878 | } |
4735 | 4879 | ||
4736 | # check for macros with flow control, but without ## concatenation | 4880 | # check for macros with flow control, but without ## concatenation |
@@ -5495,46 +5639,46 @@ sub process { | |||
5495 | } | 5639 | } |
5496 | 5640 | ||
5497 | # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar) | 5641 | # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar) |
5498 | if ($^V && $^V ge 5.10.0 && | 5642 | # if ($^V && $^V ge 5.10.0 && |
5499 | defined $stat && | 5643 | # defined $stat && |
5500 | $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { | 5644 | # $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { |
5501 | if (WARN("PREFER_ETHER_ADDR_COPY", | 5645 | # if (WARN("PREFER_ETHER_ADDR_COPY", |
5502 | "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") && | 5646 | # "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") && |
5503 | $fix) { | 5647 | # $fix) { |
5504 | $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/; | 5648 | # $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/; |
5505 | } | 5649 | # } |
5506 | } | 5650 | # } |
5507 | 5651 | ||
5508 | # Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar) | 5652 | # Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar) |
5509 | if ($^V && $^V ge 5.10.0 && | 5653 | # if ($^V && $^V ge 5.10.0 && |
5510 | defined $stat && | 5654 | # defined $stat && |
5511 | $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { | 5655 | # $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { |
5512 | WARN("PREFER_ETHER_ADDR_EQUAL", | 5656 | # WARN("PREFER_ETHER_ADDR_EQUAL", |
5513 | "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n") | 5657 | # "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n") |
5514 | } | 5658 | # } |
5515 | 5659 | ||
5516 | # check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr | 5660 | # check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr |
5517 | # check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr | 5661 | # check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr |
5518 | if ($^V && $^V ge 5.10.0 && | 5662 | # if ($^V && $^V ge 5.10.0 && |
5519 | defined $stat && | 5663 | # defined $stat && |
5520 | $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { | 5664 | # $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) { |
5521 | 5665 | # | |
5522 | my $ms_val = $7; | 5666 | # my $ms_val = $7; |
5523 | 5667 | # | |
5524 | if ($ms_val =~ /^(?:0x|)0+$/i) { | 5668 | # if ($ms_val =~ /^(?:0x|)0+$/i) { |
5525 | if (WARN("PREFER_ETH_ZERO_ADDR", | 5669 | # if (WARN("PREFER_ETH_ZERO_ADDR", |
5526 | "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") && | 5670 | # "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") && |
5527 | $fix) { | 5671 | # $fix) { |
5528 | $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/; | 5672 | # $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/; |
5529 | } | 5673 | # } |
5530 | } elsif ($ms_val =~ /^(?:0xff|255)$/i) { | 5674 | # } elsif ($ms_val =~ /^(?:0xff|255)$/i) { |
5531 | if (WARN("PREFER_ETH_BROADCAST_ADDR", | 5675 | # if (WARN("PREFER_ETH_BROADCAST_ADDR", |
5532 | "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") && | 5676 | # "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") && |
5533 | $fix) { | 5677 | # $fix) { |
5534 | $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/; | 5678 | # $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/; |
5535 | } | 5679 | # } |
5536 | } | 5680 | # } |
5537 | } | 5681 | # } |
5538 | 5682 | ||
5539 | # typecasts on min/max could be min_t/max_t | 5683 | # typecasts on min/max could be min_t/max_t |
5540 | if ($^V && $^V ge 5.10.0 && | 5684 | if ($^V && $^V ge 5.10.0 && |
@@ -5654,6 +5798,19 @@ sub process { | |||
5654 | "externs should be avoided in .c files\n" . $herecurr); | 5798 | "externs should be avoided in .c files\n" . $herecurr); |
5655 | } | 5799 | } |
5656 | 5800 | ||
5801 | if ($realfile =~ /\.[ch]$/ && defined $stat && | ||
5802 | $stat =~ /^.\s*(?:extern\s+)?$Type\s*$Ident\s*\(\s*([^{]+)\s*\)\s*;/s && | ||
5803 | $1 ne "void") { | ||
5804 | my $args = trim($1); | ||
5805 | while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) { | ||
5806 | my $arg = trim($1); | ||
5807 | if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) { | ||
5808 | WARN("FUNCTION_ARGUMENTS", | ||
5809 | "function definition argument '$arg' should also have an identifier name\n" . $herecurr); | ||
5810 | } | ||
5811 | } | ||
5812 | } | ||
5813 | |||
5657 | # checks for new __setup's | 5814 | # checks for new __setup's |
5658 | if ($rawline =~ /\b__setup\("([^"]*)"/) { | 5815 | if ($rawline =~ /\b__setup\("([^"]*)"/) { |
5659 | my $name = $1; | 5816 | my $name = $1; |
@@ -5853,46 +6010,6 @@ sub process { | |||
5853 | } | 6010 | } |
5854 | 6011 | ||
5855 | # check for various structs that are normally const (ops, kgdb, device_tree) | 6012 | # check for various structs that are normally const (ops, kgdb, device_tree) |
5856 | my $const_structs = qr{ | ||
5857 | acpi_dock_ops| | ||
5858 | address_space_operations| | ||
5859 | backlight_ops| | ||
5860 | block_device_operations| | ||
5861 | dentry_operations| | ||
5862 | dev_pm_ops| | ||
5863 | dma_map_ops| | ||
5864 | extent_io_ops| | ||
5865 | file_lock_operations| | ||
5866 | file_operations| | ||
5867 | hv_ops| | ||
5868 | ide_dma_ops| | ||
5869 | intel_dvo_dev_ops| | ||
5870 | item_operations| | ||
5871 | iwl_ops| | ||
5872 | kgdb_arch| | ||
5873 | kgdb_io| | ||
5874 | kset_uevent_ops| | ||
5875 | lock_manager_operations| | ||
5876 | microcode_ops| | ||
5877 | mtrr_ops| | ||
5878 | neigh_ops| | ||
5879 | nlmsvc_binding| | ||
5880 | of_device_id| | ||
5881 | pci_raw_ops| | ||
5882 | pipe_buf_operations| | ||
5883 | platform_hibernation_ops| | ||
5884 | platform_suspend_ops| | ||
5885 | proto_ops| | ||
5886 | rpc_pipe_ops| | ||
5887 | seq_operations| | ||
5888 | snd_ac97_build_ops| | ||
5889 | soc_pcmcia_socket_ops| | ||
5890 | stacktrace_ops| | ||
5891 | sysfs_ops| | ||
5892 | tty_operations| | ||
5893 | uart_ops| | ||
5894 | usb_mon_operations| | ||
5895 | wd_ops}x; | ||
5896 | if ($line !~ /\bconst\b/ && | 6013 | if ($line !~ /\bconst\b/ && |
5897 | $line =~ /\bstruct\s+($const_structs)\b/) { | 6014 | $line =~ /\bstruct\s+($const_structs)\b/) { |
5898 | WARN("CONST_STRUCT", | 6015 | WARN("CONST_STRUCT", |
@@ -5979,34 +6096,69 @@ sub process { | |||
5979 | # Mode permission misuses where it seems decimal should be octal | 6096 | # Mode permission misuses where it seems decimal should be octal |
5980 | # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop | 6097 | # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop |
5981 | if ($^V && $^V ge 5.10.0 && | 6098 | if ($^V && $^V ge 5.10.0 && |
6099 | defined $stat && | ||
5982 | $line =~ /$mode_perms_search/) { | 6100 | $line =~ /$mode_perms_search/) { |
5983 | foreach my $entry (@mode_permission_funcs) { | 6101 | foreach my $entry (@mode_permission_funcs) { |
5984 | my $func = $entry->[0]; | 6102 | my $func = $entry->[0]; |
5985 | my $arg_pos = $entry->[1]; | 6103 | my $arg_pos = $entry->[1]; |
5986 | 6104 | ||
6105 | my $lc = $stat =~ tr@\n@@; | ||
6106 | $lc = $lc + $linenr; | ||
6107 | my $stat_real = raw_line($linenr, 0); | ||
6108 | for (my $count = $linenr + 1; $count <= $lc; $count++) { | ||
6109 | $stat_real = $stat_real . "\n" . raw_line($count, 0); | ||
6110 | } | ||
6111 | |||
5987 | my $skip_args = ""; | 6112 | my $skip_args = ""; |
5988 | if ($arg_pos > 1) { | 6113 | if ($arg_pos > 1) { |
5989 | $arg_pos--; | 6114 | $arg_pos--; |
5990 | $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}"; | 6115 | $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}"; |
5991 | } | 6116 | } |
5992 | my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]"; | 6117 | my $test = "\\b$func\\s*\\(${skip_args}($FuncArg(?:\\|\\s*$FuncArg)*)\\s*[,\\)]"; |
5993 | if ($line =~ /$test/) { | 6118 | if ($stat =~ /$test/) { |
5994 | my $val = $1; | 6119 | my $val = $1; |
5995 | $val = $6 if ($skip_args ne ""); | 6120 | $val = $6 if ($skip_args ne ""); |
5996 | 6121 | if (($val =~ /^$Int$/ && $val !~ /^$Octal$/) || | |
5997 | if ($val !~ /^0$/ && | 6122 | ($val =~ /^$Octal$/ && length($val) ne 4)) { |
5998 | (($val =~ /^$Int$/ && $val !~ /^$Octal$/) || | ||
5999 | length($val) ne 4)) { | ||
6000 | ERROR("NON_OCTAL_PERMISSIONS", | 6123 | ERROR("NON_OCTAL_PERMISSIONS", |
6001 | "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr); | 6124 | "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real); |
6002 | } elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) { | 6125 | } |
6126 | if ($val =~ /^$Octal$/ && (oct($val) & 02)) { | ||
6003 | ERROR("EXPORTED_WORLD_WRITABLE", | 6127 | ERROR("EXPORTED_WORLD_WRITABLE", |
6004 | "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); | 6128 | "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . "$here\n" . $stat_real); |
6005 | } | 6129 | } |
6006 | } | 6130 | } |
6007 | } | 6131 | } |
6008 | } | 6132 | } |
6009 | 6133 | ||
6134 | # check for uses of S_<PERMS> that could be octal for readability | ||
6135 | if ($line =~ /\b$mode_perms_string_search\b/) { | ||
6136 | my $val = ""; | ||
6137 | my $oval = ""; | ||
6138 | my $to = 0; | ||
6139 | my $curpos = 0; | ||
6140 | my $lastpos = 0; | ||
6141 | while ($line =~ /\b(($mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) { | ||
6142 | $curpos = pos($line); | ||
6143 | my $match = $2; | ||
6144 | my $omatch = $1; | ||
6145 | last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos)); | ||
6146 | $lastpos = $curpos; | ||
6147 | $to |= $mode_permission_string_types{$match}; | ||
6148 | $val .= '\s*\|\s*' if ($val ne ""); | ||
6149 | $val .= $match; | ||
6150 | $oval .= $omatch; | ||
6151 | } | ||
6152 | $oval =~ s/^\s*\|\s*//; | ||
6153 | $oval =~ s/\s*\|\s*$//; | ||
6154 | my $octal = sprintf("%04o", $to); | ||
6155 | if (WARN("SYMBOLIC_PERMS", | ||
6156 | "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) && | ||
6157 | $fix) { | ||
6158 | $fixed[$fixlinenr] =~ s/$val/$octal/; | ||
6159 | } | ||
6160 | } | ||
6161 | |||
6010 | # validate content of MODULE_LICENSE against list from include/linux/module.h | 6162 | # validate content of MODULE_LICENSE against list from include/linux/module.h |
6011 | if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) { | 6163 | if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) { |
6012 | my $extracted_string = get_quoted_string($line, $rawline); | 6164 | my $extracted_string = get_quoted_string($line, $rawline); |
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch new file mode 100644 index 000000000000..ac5f1267151d --- /dev/null +++ b/scripts/const_structs.checkpatch | |||
@@ -0,0 +1,64 @@ | |||
1 | acpi_dock_ops | ||
2 | address_space_operations | ||
3 | backlight_ops | ||
4 | block_device_operations | ||
5 | clk_ops | ||
6 | comedi_lrange | ||
7 | component_ops | ||
8 | dentry_operations | ||
9 | dev_pm_ops | ||
10 | dma_map_ops | ||
11 | driver_info | ||
12 | drm_connector_funcs | ||
13 | drm_encoder_funcs | ||
14 | drm_encoder_helper_funcs | ||
15 | ethtool_ops | ||
16 | extent_io_ops | ||
17 | file_lock_operations | ||
18 | file_operations | ||
19 | hv_ops | ||
20 | ide_dma_ops | ||
21 | ide_port_ops | ||
22 | inode_operations | ||
23 | intel_dvo_dev_ops | ||
24 | irq_domain_ops | ||
25 | item_operations | ||
26 | iwl_cfg | ||
27 | iwl_ops | ||
28 | kgdb_arch | ||
29 | kgdb_io | ||
30 | kset_uevent_ops | ||
31 | lock_manager_operations | ||
32 | machine_desc | ||
33 | microcode_ops | ||
34 | mlxsw_reg_info | ||
35 | mtrr_ops | ||
36 | neigh_ops | ||
37 | net_device_ops | ||
38 | nlmsvc_binding | ||
39 | nvkm_device_chip | ||
40 | of_device_id | ||
41 | pci_raw_ops | ||
42 | pipe_buf_operations | ||
43 | platform_hibernation_ops | ||
44 | platform_suspend_ops | ||
45 | proto_ops | ||
46 | regmap_access_table | ||
47 | rpc_pipe_ops | ||
48 | rtc_class_ops | ||
49 | sd_desc | ||
50 | seq_operations | ||
51 | sirfsoc_padmux | ||
52 | snd_ac97_build_ops | ||
53 | snd_soc_component_driver | ||
54 | soc_pcmcia_socket_ops | ||
55 | stacktrace_ops | ||
56 | sysfs_ops | ||
57 | tty_operations | ||
58 | uart_ops | ||
59 | usb_mon_operations | ||
60 | v4l2_ctrl_ops | ||
61 | v4l2_ioctl_ops | ||
62 | vm_operations_struct | ||
63 | wacom_features | ||
64 | wd_ops | ||
diff --git a/scripts/tags.sh b/scripts/tags.sh index b3775a9604ea..a2ff3388e5ea 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -263,7 +263,8 @@ exuberant() | |||
263 | -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL \ | 263 | -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL \ |
264 | -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \ | 264 | -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \ |
265 | -I static,const \ | 265 | -I static,const \ |
266 | --extra=+f --c-kinds=+px --langmap=c:+.h "${regex[@]}" | 266 | --extra=+fq --c-kinds=+px --fields=+iaS --langmap=c:+.h \ |
267 | "${regex[@]}" | ||
267 | 268 | ||
268 | setup_regex exuberant kconfig | 269 | setup_regex exuberant kconfig |
269 | all_kconfigs | xargs $1 -a \ | 270 | all_kconfigs | xargs $1 -a \ |
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index c8455b47388b..7ab14ce65a73 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c | |||
@@ -338,7 +338,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context) | |||
338 | spin_unlock_irqrestore(&sst->spinlock, flags); | 338 | spin_unlock_irqrestore(&sst->spinlock, flags); |
339 | 339 | ||
340 | /* continue to send any remaining messages... */ | 340 | /* continue to send any remaining messages... */ |
341 | queue_kthread_work(&ipc->kworker, &ipc->kwork); | 341 | kthread_queue_work(&ipc->kworker, &ipc->kwork); |
342 | 342 | ||
343 | return IRQ_HANDLED; | 343 | return IRQ_HANDLED; |
344 | } | 344 | } |
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h index 5d2949324d0e..012742299dd5 100644 --- a/sound/soc/intel/common/sst-acpi.h +++ b/sound/soc/intel/common/sst-acpi.h | |||
@@ -12,7 +12,6 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kconfig.h> | ||
16 | #include <linux/stddef.h> | 15 | #include <linux/stddef.h> |
17 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
18 | 17 | ||
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c index a12c7bb08d3b..6c672ac79cce 100644 --- a/sound/soc/intel/common/sst-ipc.c +++ b/sound/soc/intel/common/sst-ipc.c | |||
@@ -111,7 +111,7 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header, | |||
111 | list_add_tail(&msg->list, &ipc->tx_list); | 111 | list_add_tail(&msg->list, &ipc->tx_list); |
112 | spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); | 112 | spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); |
113 | 113 | ||
114 | queue_kthread_work(&ipc->kworker, &ipc->kwork); | 114 | kthread_queue_work(&ipc->kworker, &ipc->kwork); |
115 | 115 | ||
116 | if (wait) | 116 | if (wait) |
117 | return tx_wait_done(ipc, msg, rx_data); | 117 | return tx_wait_done(ipc, msg, rx_data); |
@@ -281,7 +281,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc) | |||
281 | return -ENOMEM; | 281 | return -ENOMEM; |
282 | 282 | ||
283 | /* start the IPC message thread */ | 283 | /* start the IPC message thread */ |
284 | init_kthread_worker(&ipc->kworker); | 284 | kthread_init_worker(&ipc->kworker); |
285 | ipc->tx_thread = kthread_run(kthread_worker_fn, | 285 | ipc->tx_thread = kthread_run(kthread_worker_fn, |
286 | &ipc->kworker, "%s", | 286 | &ipc->kworker, "%s", |
287 | dev_name(ipc->dev)); | 287 | dev_name(ipc->dev)); |
@@ -292,7 +292,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc) | |||
292 | return ret; | 292 | return ret; |
293 | } | 293 | } |
294 | 294 | ||
295 | init_kthread_work(&ipc->kwork, ipc_tx_msgs); | 295 | kthread_init_work(&ipc->kwork, ipc_tx_msgs); |
296 | return 0; | 296 | return 0; |
297 | } | 297 | } |
298 | EXPORT_SYMBOL_GPL(sst_ipc_init); | 298 | EXPORT_SYMBOL_GPL(sst_ipc_init); |
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index 91565229d074..e432a31fd9f2 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c | |||
@@ -818,7 +818,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context) | |||
818 | spin_unlock_irqrestore(&sst->spinlock, flags); | 818 | spin_unlock_irqrestore(&sst->spinlock, flags); |
819 | 819 | ||
820 | /* continue to send any remaining messages... */ | 820 | /* continue to send any remaining messages... */ |
821 | queue_kthread_work(&ipc->kworker, &ipc->kwork); | 821 | kthread_queue_work(&ipc->kworker, &ipc->kwork); |
822 | 822 | ||
823 | return IRQ_HANDLED; | 823 | return IRQ_HANDLED; |
824 | } | 824 | } |
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c index 0bd01e62622c..797cf4053235 100644 --- a/sound/soc/intel/skylake/skl-sst-ipc.c +++ b/sound/soc/intel/skylake/skl-sst-ipc.c | |||
@@ -464,7 +464,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context) | |||
464 | skl_ipc_int_enable(dsp); | 464 | skl_ipc_int_enable(dsp); |
465 | 465 | ||
466 | /* continue to send any remaining messages... */ | 466 | /* continue to send any remaining messages... */ |
467 | queue_kthread_work(&ipc->kworker, &ipc->kwork); | 467 | kthread_queue_work(&ipc->kworker, &ipc->kwork); |
468 | 468 | ||
469 | return IRQ_HANDLED; | 469 | return IRQ_HANDLED; |
470 | } | 470 | } |
diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c index 878daf3429e8..7dc5a0af9b54 100644 --- a/tools/testing/nvdimm/config_check.c +++ b/tools/testing/nvdimm/config_check.c | |||
@@ -1,4 +1,3 @@ | |||
1 | #include <linux/kconfig.h> | ||
2 | #include <linux/bug.h> | 1 | #include <linux/bug.h> |
3 | 2 | ||
4 | void check(void) | 3 | void check(void) |
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 9d0919ed52a4..f2e07f2fd4b4 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
@@ -3,7 +3,8 @@ CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE | |||
3 | LDFLAGS += -lpthread -lurcu | 3 | LDFLAGS += -lpthread -lurcu |
4 | TARGETS = main | 4 | TARGETS = main |
5 | OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ | 5 | OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ |
6 | regression1.o regression2.o regression3.o multiorder.o | 6 | regression1.o regression2.o regression3.o multiorder.o \ |
7 | iteration_check.o | ||
7 | 8 | ||
8 | targets: $(TARGETS) | 9 | targets: $(TARGETS) |
9 | 10 | ||
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c new file mode 100644 index 000000000000..9adb8e7415a6 --- /dev/null +++ b/tools/testing/radix-tree/iteration_check.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * iteration_check.c: test races having to do with radix tree iteration | ||
3 | * Copyright (c) 2016 Intel Corporation | ||
4 | * Author: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | #include <linux/radix-tree.h> | ||
16 | #include <pthread.h> | ||
17 | #include "test.h" | ||
18 | |||
19 | #define NUM_THREADS 4 | ||
20 | #define TAG 0 | ||
21 | static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER; | ||
22 | static pthread_t threads[NUM_THREADS]; | ||
23 | RADIX_TREE(tree, GFP_KERNEL); | ||
24 | bool test_complete; | ||
25 | |||
26 | /* relentlessly fill the tree with tagged entries */ | ||
27 | static void *add_entries_fn(void *arg) | ||
28 | { | ||
29 | int pgoff; | ||
30 | |||
31 | while (!test_complete) { | ||
32 | for (pgoff = 0; pgoff < 100; pgoff++) { | ||
33 | pthread_mutex_lock(&tree_lock); | ||
34 | if (item_insert(&tree, pgoff) == 0) | ||
35 | item_tag_set(&tree, pgoff, TAG); | ||
36 | pthread_mutex_unlock(&tree_lock); | ||
37 | } | ||
38 | } | ||
39 | |||
40 | return NULL; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find | ||
45 | * things that have been removed and randomly resetting our iteration to the | ||
46 | * next chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and | ||
47 | * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a | ||
48 | * NULL 'slot' variable. | ||
49 | */ | ||
50 | static void *tagged_iteration_fn(void *arg) | ||
51 | { | ||
52 | struct radix_tree_iter iter; | ||
53 | void **slot; | ||
54 | |||
55 | while (!test_complete) { | ||
56 | rcu_read_lock(); | ||
57 | radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) { | ||
58 | void *entry; | ||
59 | int i; | ||
60 | |||
61 | /* busy wait to let removals happen */ | ||
62 | for (i = 0; i < 1000000; i++) | ||
63 | ; | ||
64 | |||
65 | entry = radix_tree_deref_slot(slot); | ||
66 | if (unlikely(!entry)) | ||
67 | continue; | ||
68 | |||
69 | if (radix_tree_deref_retry(entry)) { | ||
70 | slot = radix_tree_iter_retry(&iter); | ||
71 | continue; | ||
72 | } | ||
73 | |||
74 | if (rand() % 50 == 0) | ||
75 | slot = radix_tree_iter_next(&iter); | ||
76 | } | ||
77 | rcu_read_unlock(); | ||
78 | } | ||
79 | |||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Iterate over the entries, doing a radix_tree_iter_retry() as we find things | ||
85 | * that have been removed and randomly resetting our iteration to the next | ||
86 | * chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and | ||
87 | * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a | ||
88 | * NULL 'slot' variable. | ||
89 | */ | ||
90 | static void *untagged_iteration_fn(void *arg) | ||
91 | { | ||
92 | struct radix_tree_iter iter; | ||
93 | void **slot; | ||
94 | |||
95 | while (!test_complete) { | ||
96 | rcu_read_lock(); | ||
97 | radix_tree_for_each_slot(slot, &tree, &iter, 0) { | ||
98 | void *entry; | ||
99 | int i; | ||
100 | |||
101 | /* busy wait to let removals happen */ | ||
102 | for (i = 0; i < 1000000; i++) | ||
103 | ; | ||
104 | |||
105 | entry = radix_tree_deref_slot(slot); | ||
106 | if (unlikely(!entry)) | ||
107 | continue; | ||
108 | |||
109 | if (radix_tree_deref_retry(entry)) { | ||
110 | slot = radix_tree_iter_retry(&iter); | ||
111 | continue; | ||
112 | } | ||
113 | |||
114 | if (rand() % 50 == 0) | ||
115 | slot = radix_tree_iter_next(&iter); | ||
116 | } | ||
117 | rcu_read_unlock(); | ||
118 | } | ||
119 | |||
120 | return NULL; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Randomly remove entries to help induce radix_tree_iter_retry() calls in the | ||
125 | * two iteration functions. | ||
126 | */ | ||
127 | static void *remove_entries_fn(void *arg) | ||
128 | { | ||
129 | while (!test_complete) { | ||
130 | int pgoff; | ||
131 | |||
132 | pgoff = rand() % 100; | ||
133 | |||
134 | pthread_mutex_lock(&tree_lock); | ||
135 | item_delete(&tree, pgoff); | ||
136 | pthread_mutex_unlock(&tree_lock); | ||
137 | } | ||
138 | |||
139 | return NULL; | ||
140 | } | ||
141 | |||
142 | /* This is a unit test for a bug found by the syzkaller tester */ | ||
143 | void iteration_test(void) | ||
144 | { | ||
145 | int i; | ||
146 | |||
147 | printf("Running iteration tests for 10 seconds\n"); | ||
148 | |||
149 | srand(time(0)); | ||
150 | test_complete = false; | ||
151 | |||
152 | if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) { | ||
153 | perror("pthread_create"); | ||
154 | exit(1); | ||
155 | } | ||
156 | if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) { | ||
157 | perror("pthread_create"); | ||
158 | exit(1); | ||
159 | } | ||
160 | if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) { | ||
161 | perror("pthread_create"); | ||
162 | exit(1); | ||
163 | } | ||
164 | if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) { | ||
165 | perror("pthread_create"); | ||
166 | exit(1); | ||
167 | } | ||
168 | |||
169 | sleep(10); | ||
170 | test_complete = true; | ||
171 | |||
172 | for (i = 0; i < NUM_THREADS; i++) { | ||
173 | if (pthread_join(threads[i], NULL)) { | ||
174 | perror("pthread_join"); | ||
175 | exit(1); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | item_kill_tree(&tree); | ||
180 | } | ||
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index b7619ff3b552..daa9010693e8 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c | |||
@@ -332,6 +332,7 @@ int main(int argc, char **argv) | |||
332 | regression1_test(); | 332 | regression1_test(); |
333 | regression2_test(); | 333 | regression2_test(); |
334 | regression3_test(); | 334 | regression3_test(); |
335 | iteration_test(); | ||
335 | single_thread_tests(long_run); | 336 | single_thread_tests(long_run); |
336 | 337 | ||
337 | sleep(1); | 338 | sleep(1); |
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index 2d03a63bb79c..0d6813a61b37 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #include "regression.h" | 43 | #include "regression.h" |
44 | 44 | ||
45 | static RADIX_TREE(mt_tree, GFP_KERNEL); | 45 | static RADIX_TREE(mt_tree, GFP_KERNEL); |
46 | static pthread_mutex_t mt_lock; | 46 | static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER; |
47 | 47 | ||
48 | struct page { | 48 | struct page { |
49 | pthread_mutex_t lock; | 49 | pthread_mutex_t lock; |
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index e85131369723..217fb2403f09 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h | |||
@@ -27,6 +27,7 @@ void item_kill_tree(struct radix_tree_root *root); | |||
27 | 27 | ||
28 | void tag_check(void); | 28 | void tag_check(void); |
29 | void multiorder_checks(void); | 29 | void multiorder_checks(void); |
30 | void iteration_test(void); | ||
30 | 31 | ||
31 | struct item * | 32 | struct item * |
32 | item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); | 33 | item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); |