diff options
231 files changed, 3339 insertions, 2187 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-vmcoreinfo b/Documentation/ABI/testing/sysfs-kernel-vmcoreinfo new file mode 100644 index 000000000000..7bd81168e063 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-vmcoreinfo | |||
@@ -0,0 +1,14 @@ | |||
1 | What: /sys/kernel/vmcoreinfo | ||
2 | Date: October 2007 | ||
3 | KernelVersion: 2.6.24 | ||
4 | Contact: Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp> | ||
5 | Kexec Mailing List <kexec@lists.infradead.org> | ||
6 | Vivek Goyal <vgoyal@redhat.com> | ||
7 | Description | ||
8 | Shows physical address and size of vmcoreinfo ELF note. | ||
9 | First value contains physical address of note in hex and | ||
10 | second value contains the size of note in hex. This ELF | ||
11 | note info is parsed by second kernel and exported to user | ||
12 | space as part of ELF note in /proc/vmcore file. This note | ||
13 | contains various information like struct size, symbol | ||
14 | values, page size etc. | ||
diff --git a/Documentation/blockdev/ramdisk.txt b/Documentation/blockdev/ramdisk.txt index fa72e97dd669..fe2ef978d85a 100644 --- a/Documentation/blockdev/ramdisk.txt +++ b/Documentation/blockdev/ramdisk.txt | |||
@@ -36,21 +36,30 @@ allowing one to squeeze more programs onto an average installation or | |||
36 | rescue floppy disk. | 36 | rescue floppy disk. |
37 | 37 | ||
38 | 38 | ||
39 | 2) Kernel Command Line Parameters | 39 | 2) Parameters |
40 | --------------------------------- | 40 | --------------------------------- |
41 | 41 | ||
42 | 2a) Kernel Command Line Parameters | ||
43 | |||
42 | ramdisk_size=N | 44 | ramdisk_size=N |
43 | ============== | 45 | ============== |
44 | 46 | ||
45 | This parameter tells the RAM disk driver to set up RAM disks of N k size. The | 47 | This parameter tells the RAM disk driver to set up RAM disks of N k size. The |
46 | default is 4096 (4 MB) (8192 (8 MB) on S390). | 48 | default is 4096 (4 MB). |
49 | |||
50 | 2b) Module parameters | ||
47 | 51 | ||
48 | ramdisk_blocksize=N | 52 | rd_nr |
49 | =================== | 53 | ===== |
54 | /dev/ramX devices created. | ||
50 | 55 | ||
51 | This parameter tells the RAM disk driver how many bytes to use per block. The | 56 | max_part |
52 | default is 1024 (BLOCK_SIZE). | 57 | ======== |
58 | Maximum partition number. | ||
53 | 59 | ||
60 | rd_size | ||
61 | ======= | ||
62 | See ramdisk_size. | ||
54 | 63 | ||
55 | 3) Using "rdev -r" | 64 | 3) Using "rdev -r" |
56 | ------------------ | 65 | ------------------ |
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index 8cb9938cc47e..be675d2d15a7 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt | |||
@@ -285,7 +285,7 @@ A: This is what you would need in your kernel code to receive notifications. | |||
285 | return NOTIFY_OK; | 285 | return NOTIFY_OK; |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct notifier_block foobar_cpu_notifer = | 288 | static struct notifier_block foobar_cpu_notifier = |
289 | { | 289 | { |
290 | .notifier_call = foobar_cpu_callback, | 290 | .notifier_call = foobar_cpu_callback, |
291 | }; | 291 | }; |
diff --git a/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt b/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt new file mode 100644 index 000000000000..31406fd4a43e --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt | |||
@@ -0,0 +1,27 @@ | |||
1 | Haoyu Microelectronics HYM8563 Real Time Clock | ||
2 | |||
3 | The HYM8563 provides basic rtc and alarm functionality | ||
4 | as well as a clock output of up to 32kHz. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: should be: "haoyu,hym8563" | ||
8 | - reg: i2c address | ||
9 | - interrupts: rtc alarm/event interrupt | ||
10 | - #clock-cells: the value should be 0 | ||
11 | |||
12 | Example: | ||
13 | |||
14 | hym8563: hym8563@51 { | ||
15 | compatible = "haoyu,hym8563"; | ||
16 | reg = <0x51>; | ||
17 | |||
18 | interrupts = <13 IRQ_TYPE_EDGE_FALLING>; | ||
19 | |||
20 | #clock-cells = <0>; | ||
21 | }; | ||
22 | |||
23 | device { | ||
24 | ... | ||
25 | clocks = <&hym8563>; | ||
26 | ... | ||
27 | }; | ||
diff --git a/Documentation/devicetree/bindings/rtc/maxim,ds1742.txt b/Documentation/devicetree/bindings/rtc/maxim,ds1742.txt new file mode 100644 index 000000000000..d0f937c355b5 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/maxim,ds1742.txt | |||
@@ -0,0 +1,12 @@ | |||
1 | * Maxim (Dallas) DS1742/DS1743 Real Time Clock | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should contain "maxim,ds1742". | ||
5 | - reg: Physical base address of the RTC and length of memory | ||
6 | mapped region. | ||
7 | |||
8 | Example: | ||
9 | rtc: rtc@10000000 { | ||
10 | compatible = "maxim,ds1742"; | ||
11 | reg = <0x10000000 0x800>; | ||
12 | }; | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index ff415d183352..520596da7953 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -34,6 +34,7 @@ fsl Freescale Semiconductor | |||
34 | GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 34 | GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. |
35 | gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 35 | gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. |
36 | gmt Global Mixed-mode Technology, Inc. | 36 | gmt Global Mixed-mode Technology, Inc. |
37 | haoyu Haoyu Microelectronic Co. Ltd. | ||
37 | hisilicon Hisilicon Limited. | 38 | hisilicon Hisilicon Limited. |
38 | hp Hewlett Packard | 39 | hp Hewlett Packard |
39 | ibm International Business Machines (IBM) | 40 | ibm International Business Machines (IBM) |
diff --git a/Documentation/dynamic-debug-howto.txt b/Documentation/dynamic-debug-howto.txt index 1bbdcfcf1f13..46325eb2ea76 100644 --- a/Documentation/dynamic-debug-howto.txt +++ b/Documentation/dynamic-debug-howto.txt | |||
@@ -108,6 +108,12 @@ If your query set is big, you can batch them too: | |||
108 | 108 | ||
109 | ~# cat query-batch-file > <debugfs>/dynamic_debug/control | 109 | ~# cat query-batch-file > <debugfs>/dynamic_debug/control |
110 | 110 | ||
111 | A another way is to use wildcard. The match rule support '*' (matches | ||
112 | zero or more characters) and '?' (matches exactly one character).For | ||
113 | example, you can match all usb drivers: | ||
114 | |||
115 | ~# echo "file drivers/usb/* +p" > <debugfs>/dynamic_debug/control | ||
116 | |||
111 | At the syntactical level, a command comprises a sequence of match | 117 | At the syntactical level, a command comprises a sequence of match |
112 | specifications, followed by a flags change specification. | 118 | specifications, followed by a flags change specification. |
113 | 119 | ||
@@ -315,6 +321,9 @@ nullarbor:~ # echo -n 'func svc_process -p' > | |||
315 | nullarbor:~ # echo -n 'format "nfsd: READ" +p' > | 321 | nullarbor:~ # echo -n 'format "nfsd: READ" +p' > |
316 | <debugfs>/dynamic_debug/control | 322 | <debugfs>/dynamic_debug/control |
317 | 323 | ||
324 | // enable messages in files of which the pathes include string "usb" | ||
325 | nullarbor:~ # echo -n '*usb* +p' > <debugfs>/dynamic_debug/control | ||
326 | |||
318 | // enable all messages | 327 | // enable all messages |
319 | nullarbor:~ # echo -n '+p' > <debugfs>/dynamic_debug/control | 328 | nullarbor:~ # echo -n '+p' > <debugfs>/dynamic_debug/control |
320 | 329 | ||
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX index 8042050eb265..632211cbdd56 100644 --- a/Documentation/filesystems/00-INDEX +++ b/Documentation/filesystems/00-INDEX | |||
@@ -10,24 +10,32 @@ afs.txt | |||
10 | - info and examples for the distributed AFS (Andrew File System) fs. | 10 | - info and examples for the distributed AFS (Andrew File System) fs. |
11 | affs.txt | 11 | affs.txt |
12 | - info and mount options for the Amiga Fast File System. | 12 | - info and mount options for the Amiga Fast File System. |
13 | autofs4-mount-control.txt | ||
14 | - info on device control operations for autofs4 module. | ||
13 | automount-support.txt | 15 | automount-support.txt |
14 | - information about filesystem automount support. | 16 | - information about filesystem automount support. |
15 | befs.txt | 17 | befs.txt |
16 | - information about the BeOS filesystem for Linux. | 18 | - information about the BeOS filesystem for Linux. |
17 | bfs.txt | 19 | bfs.txt |
18 | - info for the SCO UnixWare Boot Filesystem (BFS). | 20 | - info for the SCO UnixWare Boot Filesystem (BFS). |
21 | btrfs.txt | ||
22 | - info for the BTRFS filesystem. | ||
23 | caching/ | ||
24 | - directory containing filesystem cache documentation. | ||
19 | ceph.txt | 25 | ceph.txt |
20 | - info for the Ceph Distributed File System | 26 | - info for the Ceph Distributed File System. |
21 | cifs.txt | 27 | cifs/ |
22 | - description of the CIFS filesystem. | 28 | - directory containing CIFS filesystem documentation and example code. |
23 | coda.txt | 29 | coda.txt |
24 | - description of the CODA filesystem. | 30 | - description of the CODA filesystem. |
25 | configfs/ | 31 | configfs/ |
26 | - directory containing configfs documentation and example code. | 32 | - directory containing configfs documentation and example code. |
27 | cramfs.txt | 33 | cramfs.txt |
28 | - info on the cram filesystem for small storage (ROMs etc). | 34 | - info on the cram filesystem for small storage (ROMs etc). |
29 | dentry-locking.txt | 35 | debugfs.txt |
30 | - info on the RCU-based dcache locking model. | 36 | - info on the debugfs filesystem. |
37 | devpts.txt | ||
38 | - info on the devpts filesystem. | ||
31 | directory-locking | 39 | directory-locking |
32 | - info about the locking scheme used for directory operations. | 40 | - info about the locking scheme used for directory operations. |
33 | dlmfs.txt | 41 | dlmfs.txt |
@@ -35,7 +43,7 @@ dlmfs.txt | |||
35 | dnotify.txt | 43 | dnotify.txt |
36 | - info about directory notification in Linux. | 44 | - info about directory notification in Linux. |
37 | dnotify_test.c | 45 | dnotify_test.c |
38 | - example program for dnotify | 46 | - example program for dnotify. |
39 | ecryptfs.txt | 47 | ecryptfs.txt |
40 | - docs on eCryptfs: stacked cryptographic filesystem for Linux. | 48 | - docs on eCryptfs: stacked cryptographic filesystem for Linux. |
41 | efivarfs.txt | 49 | efivarfs.txt |
@@ -48,12 +56,18 @@ ext3.txt | |||
48 | - info, mount options and specifications for the Ext3 filesystem. | 56 | - info, mount options and specifications for the Ext3 filesystem. |
49 | ext4.txt | 57 | ext4.txt |
50 | - info, mount options and specifications for the Ext4 filesystem. | 58 | - info, mount options and specifications for the Ext4 filesystem. |
51 | files.txt | ||
52 | - info on file management in the Linux kernel. | ||
53 | f2fs.txt | 59 | f2fs.txt |
54 | - info and mount options for the F2FS filesystem. | 60 | - info and mount options for the F2FS filesystem. |
61 | fiemap.txt | ||
62 | - info on fiemap ioctl. | ||
63 | files.txt | ||
64 | - info on file management in the Linux kernel. | ||
55 | fuse.txt | 65 | fuse.txt |
56 | - info on the Filesystem in User SpacE including mount options. | 66 | - info on the Filesystem in User SpacE including mount options. |
67 | gfs2-glocks.txt | ||
68 | - info on the Global File System 2 - Glock internal locking rules. | ||
69 | gfs2-uevents.txt | ||
70 | - info on the Global File System 2 - uevents. | ||
57 | gfs2.txt | 71 | gfs2.txt |
58 | - info on the Global File System 2. | 72 | - info on the Global File System 2. |
59 | hfs.txt | 73 | hfs.txt |
@@ -84,40 +98,58 @@ ntfs.txt | |||
84 | - info and mount options for the NTFS filesystem (Windows NT). | 98 | - info and mount options for the NTFS filesystem (Windows NT). |
85 | ocfs2.txt | 99 | ocfs2.txt |
86 | - info and mount options for the OCFS2 clustered filesystem. | 100 | - info and mount options for the OCFS2 clustered filesystem. |
101 | omfs.txt | ||
102 | - info on the Optimized MPEG FileSystem. | ||
103 | path-lookup.txt | ||
104 | - info on path walking and name lookup locking. | ||
105 | pohmelfs/ | ||
106 | - directory containing pohmelfs filesystem documentation. | ||
87 | porting | 107 | porting |
88 | - various information on filesystem porting. | 108 | - various information on filesystem porting. |
89 | proc.txt | 109 | proc.txt |
90 | - info on Linux's /proc filesystem. | 110 | - info on Linux's /proc filesystem. |
111 | qnx6.txt | ||
112 | - info on the QNX6 filesystem. | ||
113 | quota.txt | ||
114 | - info on Quota subsystem. | ||
91 | ramfs-rootfs-initramfs.txt | 115 | ramfs-rootfs-initramfs.txt |
92 | - info on the 'in memory' filesystems ramfs, rootfs and initramfs. | 116 | - info on the 'in memory' filesystems ramfs, rootfs and initramfs. |
93 | reiser4.txt | ||
94 | - info on the Reiser4 filesystem based on dancing tree algorithms. | ||
95 | relay.txt | 117 | relay.txt |
96 | - info on relay, for efficient streaming from kernel to user space. | 118 | - info on relay, for efficient streaming from kernel to user space. |
97 | romfs.txt | 119 | romfs.txt |
98 | - description of the ROMFS filesystem. | 120 | - description of the ROMFS filesystem. |
99 | seq_file.txt | 121 | seq_file.txt |
100 | - how to use the seq_file API | 122 | - how to use the seq_file API. |
101 | sharedsubtree.txt | 123 | sharedsubtree.txt |
102 | - a description of shared subtrees for namespaces. | 124 | - a description of shared subtrees for namespaces. |
103 | spufs.txt | 125 | spufs.txt |
104 | - info and mount options for the SPU filesystem used on Cell. | 126 | - info and mount options for the SPU filesystem used on Cell. |
127 | squashfs.txt | ||
128 | - info on the squashfs filesystem. | ||
105 | sysfs-pci.txt | 129 | sysfs-pci.txt |
106 | - info on accessing PCI device resources through sysfs. | 130 | - info on accessing PCI device resources through sysfs. |
131 | sysfs-tagging.txt | ||
132 | - info on sysfs tagging to avoid duplicates. | ||
107 | sysfs.txt | 133 | sysfs.txt |
108 | - info on sysfs, a ram-based filesystem for exporting kernel objects. | 134 | - info on sysfs, a ram-based filesystem for exporting kernel objects. |
109 | sysv-fs.txt | 135 | sysv-fs.txt |
110 | - info on the SystemV/V7/Xenix/Coherent filesystem. | 136 | - info on the SystemV/V7/Xenix/Coherent filesystem. |
111 | tmpfs.txt | 137 | tmpfs.txt |
112 | - info on tmpfs, a filesystem that holds all files in virtual memory. | 138 | - info on tmpfs, a filesystem that holds all files in virtual memory. |
139 | ubifs.txt | ||
140 | - info on the Unsorted Block Images FileSystem. | ||
113 | udf.txt | 141 | udf.txt |
114 | - info and mount options for the UDF filesystem. | 142 | - info and mount options for the UDF filesystem. |
115 | ufs.txt | 143 | ufs.txt |
116 | - info on the ufs filesystem. | 144 | - info on the ufs filesystem. |
117 | vfat.txt | 145 | vfat.txt |
118 | - info on using the VFAT filesystem used in Windows NT and Windows 95 | 146 | - info on using the VFAT filesystem used in Windows NT and Windows 95. |
119 | vfs.txt | 147 | vfs.txt |
120 | - overview of the Virtual File System | 148 | - overview of the Virtual File System. |
149 | xfs-delayed-logging-design.txt | ||
150 | - info on the XFS Delayed Logging Design. | ||
151 | xfs-self-describing-metadata.txt | ||
152 | - info on XFS Self Describing Metadata. | ||
121 | xfs.txt | 153 | xfs.txt |
122 | - info and mount options for the XFS filesystem. | 154 | - info and mount options for the XFS filesystem. |
123 | xip.txt | 155 | xip.txt |
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index 873a2ab2e9f8..06887d46ccf2 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt | |||
@@ -81,6 +81,62 @@ nodiscard(*) The discard/TRIM commands are sent to the underlying | |||
81 | block device when blocks are freed. This is useful | 81 | block device when blocks are freed. This is useful |
82 | for SSD devices and sparse/thinly-provisioned LUNs. | 82 | for SSD devices and sparse/thinly-provisioned LUNs. |
83 | 83 | ||
84 | Ioctls | ||
85 | ====== | ||
86 | |||
87 | There is some NILFS2 specific functionality which can be accessed by applications | ||
88 | through the system call interfaces. The list of all NILFS2 specific ioctls are | ||
89 | shown in the table below. | ||
90 | |||
91 | Table of NILFS2 specific ioctls | ||
92 | .............................................................................. | ||
93 | Ioctl Description | ||
94 | NILFS_IOCTL_CHANGE_CPMODE Change mode of given checkpoint between | ||
95 | checkpoint and snapshot state. This ioctl is | ||
96 | used in chcp and mkcp utilities. | ||
97 | |||
98 | NILFS_IOCTL_DELETE_CHECKPOINT Remove checkpoint from NILFS2 file system. | ||
99 | This ioctl is used in rmcp utility. | ||
100 | |||
101 | NILFS_IOCTL_GET_CPINFO Return info about requested checkpoints. This | ||
102 | ioctl is used in lscp utility and by | ||
103 | nilfs_cleanerd daemon. | ||
104 | |||
105 | NILFS_IOCTL_GET_CPSTAT Return checkpoints statistics. This ioctl is | ||
106 | used by lscp, rmcp utilities and by | ||
107 | nilfs_cleanerd daemon. | ||
108 | |||
109 | NILFS_IOCTL_GET_SUINFO Return segment usage info about requested | ||
110 | segments. This ioctl is used in lssu, | ||
111 | nilfs_resize utilities and by nilfs_cleanerd | ||
112 | daemon. | ||
113 | |||
114 | NILFS_IOCTL_GET_SUSTAT Return segment usage statistics. This ioctl | ||
115 | is used in lssu, nilfs_resize utilities and | ||
116 | by nilfs_cleanerd daemon. | ||
117 | |||
118 | NILFS_IOCTL_GET_VINFO Return information on virtual block addresses. | ||
119 | This ioctl is used by nilfs_cleanerd daemon. | ||
120 | |||
121 | NILFS_IOCTL_GET_BDESCS Return information about descriptors of disk | ||
122 | block numbers. This ioctl is used by | ||
123 | nilfs_cleanerd daemon. | ||
124 | |||
125 | NILFS_IOCTL_CLEAN_SEGMENTS Do garbage collection operation in the | ||
126 | environment of requested parameters from | ||
127 | userspace. This ioctl is used by | ||
128 | nilfs_cleanerd daemon. | ||
129 | |||
130 | NILFS_IOCTL_SYNC Make a checkpoint. This ioctl is used in | ||
131 | mkcp utility. | ||
132 | |||
133 | NILFS_IOCTL_RESIZE Resize NILFS2 volume. This ioctl is used | ||
134 | by nilfs_resize utility. | ||
135 | |||
136 | NILFS_IOCTL_SET_ALLOC_RANGE Define lower limit of segments in bytes and | ||
137 | upper limit of segments in bytes. This ioctl | ||
138 | is used by nilfs_resize utility. | ||
139 | |||
84 | NILFS2 usage | 140 | NILFS2 usage |
85 | ============ | 141 | ============ |
86 | 142 | ||
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index a6619b7064b9..b35a64b82f9e 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt | |||
@@ -108,12 +108,12 @@ static DEVICE_ATTR(foo, S_IWUSR | S_IRUGO, show_foo, store_foo); | |||
108 | is equivalent to doing: | 108 | is equivalent to doing: |
109 | 109 | ||
110 | static struct device_attribute dev_attr_foo = { | 110 | static struct device_attribute dev_attr_foo = { |
111 | .attr = { | 111 | .attr = { |
112 | .name = "foo", | 112 | .name = "foo", |
113 | .mode = S_IWUSR | S_IRUGO, | 113 | .mode = S_IWUSR | S_IRUGO, |
114 | .show = show_foo, | ||
115 | .store = store_foo, | ||
116 | }, | 114 | }, |
115 | .show = show_foo, | ||
116 | .store = store_foo, | ||
117 | }; | 117 | }; |
118 | 118 | ||
119 | 119 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d4762d7ebd14..44738564b2ee 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1059,7 +1059,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1059 | debugfs files are removed at module unload time. | 1059 | debugfs files are removed at module unload time. |
1060 | 1060 | ||
1061 | gpt [EFI] Forces disk with valid GPT signature but | 1061 | gpt [EFI] Forces disk with valid GPT signature but |
1062 | invalid Protective MBR to be treated as GPT. | 1062 | invalid Protective MBR to be treated as GPT. If the |
1063 | primary GPT is corrupted, it enables the backup/alternate | ||
1064 | GPT to be used instead. | ||
1063 | 1065 | ||
1064 | grcan.enable0= [HW] Configuration of physical interface 0. Determines | 1066 | grcan.enable0= [HW] Configuration of physical interface 0. Determines |
1065 | the "Enable 0" bit of the configuration register. | 1067 | the "Enable 0" bit of the configuration register. |
@@ -1461,6 +1463,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1461 | Valid arguments: on, off | 1463 | Valid arguments: on, off |
1462 | Default: on | 1464 | Default: on |
1463 | 1465 | ||
1466 | kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode | ||
1467 | Valid arguments: 0, 1, 2 | ||
1468 | kmemcheck=0 (disabled) | ||
1469 | kmemcheck=1 (enabled) | ||
1470 | kmemcheck=2 (one-shot mode) | ||
1471 | Default: 2 (one-shot mode) | ||
1472 | |||
1464 | kstack=N [X86] Print N words from the kernel stack | 1473 | kstack=N [X86] Print N words from the kernel stack |
1465 | in oops dumps. | 1474 | in oops dumps. |
1466 | 1475 | ||
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt index 445ad743ec81..6f4eb322ffaf 100644 --- a/Documentation/printk-formats.txt +++ b/Documentation/printk-formats.txt | |||
@@ -55,14 +55,21 @@ Struct Resources: | |||
55 | For printing struct resources. The 'R' and 'r' specifiers result in a | 55 | For printing struct resources. The 'R' and 'r' specifiers result in a |
56 | printed resource with ('R') or without ('r') a decoded flags member. | 56 | printed resource with ('R') or without ('r') a decoded flags member. |
57 | 57 | ||
58 | Physical addresses: | 58 | Physical addresses types phys_addr_t: |
59 | 59 | ||
60 | %pa 0x01234567 or 0x0123456789abcdef | 60 | %pa[p] 0x01234567 or 0x0123456789abcdef |
61 | 61 | ||
62 | For printing a phys_addr_t type (and its derivatives, such as | 62 | For printing a phys_addr_t type (and its derivatives, such as |
63 | resource_size_t) which can vary based on build options, regardless of | 63 | resource_size_t) which can vary based on build options, regardless of |
64 | the width of the CPU data path. Passed by reference. | 64 | the width of the CPU data path. Passed by reference. |
65 | 65 | ||
66 | DMA addresses types dma_addr_t: | ||
67 | |||
68 | %pad 0x01234567 or 0x0123456789abcdef | ||
69 | |||
70 | For printing a dma_addr_t type which can vary based on build options, | ||
71 | regardless of the width of the CPU data path. Passed by reference. | ||
72 | |||
66 | Raw buffer as a hex string: | 73 | Raw buffer as a hex string: |
67 | %*ph 00 01 02 ... 3f | 74 | %*ph 00 01 02 ... 3f |
68 | %*phC 00:01:02: ... :3f | 75 | %*phC 00:01:02: ... :3f |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 6d486404200e..ee9a2f983b99 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -33,6 +33,7 @@ show up in /proc/sys/kernel: | |||
33 | - domainname | 33 | - domainname |
34 | - hostname | 34 | - hostname |
35 | - hotplug | 35 | - hotplug |
36 | - kexec_load_disabled | ||
36 | - kptr_restrict | 37 | - kptr_restrict |
37 | - kstack_depth_to_print [ X86 only ] | 38 | - kstack_depth_to_print [ X86 only ] |
38 | - l2cr [ PPC only ] | 39 | - l2cr [ PPC only ] |
@@ -287,6 +288,18 @@ Default value is "/sbin/hotplug". | |||
287 | 288 | ||
288 | ============================================================== | 289 | ============================================================== |
289 | 290 | ||
291 | kexec_load_disabled: | ||
292 | |||
293 | A toggle indicating if the kexec_load syscall has been disabled. This | ||
294 | value defaults to 0 (false: kexec_load enabled), but can be set to 1 | ||
295 | (true: kexec_load disabled). Once true, kexec can no longer be used, and | ||
296 | the toggle cannot be set back to false. This allows a kexec image to be | ||
297 | loaded before disabling the syscall, allowing a system to set up (and | ||
298 | later use) an image without it being altered. Generally used together | ||
299 | with the "modules_disabled" sysctl. | ||
300 | |||
301 | ============================================================== | ||
302 | |||
290 | kptr_restrict: | 303 | kptr_restrict: |
291 | 304 | ||
292 | This toggle indicates whether restrictions are placed on | 305 | This toggle indicates whether restrictions are placed on |
@@ -331,7 +344,7 @@ A toggle value indicating if modules are allowed to be loaded | |||
331 | in an otherwise modular kernel. This toggle defaults to off | 344 | in an otherwise modular kernel. This toggle defaults to off |
332 | (0), but can be set true (1). Once true, modules can be | 345 | (0), but can be set true (1). Once true, modules can be |
333 | neither loaded nor unloaded, and the toggle cannot be set back | 346 | neither loaded nor unloaded, and the toggle cannot be set back |
334 | to false. | 347 | to false. Generally used with the "kexec_load_disabled" toggle. |
335 | 348 | ||
336 | ============================================================== | 349 | ============================================================== |
337 | 350 | ||
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl index 4a37c4759cd2..00e425faa2fd 100644 --- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl +++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl | |||
@@ -123,7 +123,7 @@ my $regex_writepage; | |||
123 | 123 | ||
124 | # Static regex used. Specified like this for readability and for use with /o | 124 | # Static regex used. Specified like this for readability and for use with /o |
125 | # (process_pid) (cpus ) ( time ) (tpoint ) (details) | 125 | # (process_pid) (cpus ) ( time ) (tpoint ) (details) |
126 | my $regex_traceevent = '\s*([a-zA-Z0-9-]*)\s*(\[[0-9]*\])\s*([0-9.]*):\s*([a-zA-Z_]*):\s*(.*)'; | 126 | my $regex_traceevent = '\s*([a-zA-Z0-9-]*)\s*(\[[0-9]*\])(\s*[dX.][Nnp.][Hhs.][0-9a-fA-F.]*|)\s*([0-9.]*):\s*([a-zA-Z_]*):\s*(.*)'; |
127 | my $regex_statname = '[-0-9]*\s\((.*)\).*'; | 127 | my $regex_statname = '[-0-9]*\s\((.*)\).*'; |
128 | my $regex_statppid = '[-0-9]*\s\(.*\)\s[A-Za-z]\s([0-9]*).*'; | 128 | my $regex_statppid = '[-0-9]*\s\(.*\)\s[A-Za-z]\s([0-9]*).*'; |
129 | 129 | ||
@@ -270,8 +270,8 @@ EVENT_PROCESS: | |||
270 | while ($traceevent = <STDIN>) { | 270 | while ($traceevent = <STDIN>) { |
271 | if ($traceevent =~ /$regex_traceevent/o) { | 271 | if ($traceevent =~ /$regex_traceevent/o) { |
272 | $process_pid = $1; | 272 | $process_pid = $1; |
273 | $timestamp = $3; | 273 | $timestamp = $4; |
274 | $tracepoint = $4; | 274 | $tracepoint = $5; |
275 | 275 | ||
276 | $process_pid =~ /(.*)-([0-9]*)$/; | 276 | $process_pid =~ /(.*)-([0-9]*)$/; |
277 | my $process = $1; | 277 | my $process = $1; |
@@ -299,7 +299,7 @@ EVENT_PROCESS: | |||
299 | $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}++; | 299 | $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}++; |
300 | $perprocesspid{$process_pid}->{STATE_DIRECT_BEGIN} = $timestamp; | 300 | $perprocesspid{$process_pid}->{STATE_DIRECT_BEGIN} = $timestamp; |
301 | 301 | ||
302 | $details = $5; | 302 | $details = $6; |
303 | if ($details !~ /$regex_direct_begin/o) { | 303 | if ($details !~ /$regex_direct_begin/o) { |
304 | print "WARNING: Failed to parse mm_vmscan_direct_reclaim_begin as expected\n"; | 304 | print "WARNING: Failed to parse mm_vmscan_direct_reclaim_begin as expected\n"; |
305 | print " $details\n"; | 305 | print " $details\n"; |
@@ -322,7 +322,7 @@ EVENT_PROCESS: | |||
322 | $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; | 322 | $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; |
323 | } | 323 | } |
324 | } elsif ($tracepoint eq "mm_vmscan_kswapd_wake") { | 324 | } elsif ($tracepoint eq "mm_vmscan_kswapd_wake") { |
325 | $details = $5; | 325 | $details = $6; |
326 | if ($details !~ /$regex_kswapd_wake/o) { | 326 | if ($details !~ /$regex_kswapd_wake/o) { |
327 | print "WARNING: Failed to parse mm_vmscan_kswapd_wake as expected\n"; | 327 | print "WARNING: Failed to parse mm_vmscan_kswapd_wake as expected\n"; |
328 | print " $details\n"; | 328 | print " $details\n"; |
@@ -356,7 +356,7 @@ EVENT_PROCESS: | |||
356 | } elsif ($tracepoint eq "mm_vmscan_wakeup_kswapd") { | 356 | } elsif ($tracepoint eq "mm_vmscan_wakeup_kswapd") { |
357 | $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}++; | 357 | $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}++; |
358 | 358 | ||
359 | $details = $5; | 359 | $details = $6; |
360 | if ($details !~ /$regex_wakeup_kswapd/o) { | 360 | if ($details !~ /$regex_wakeup_kswapd/o) { |
361 | print "WARNING: Failed to parse mm_vmscan_wakeup_kswapd as expected\n"; | 361 | print "WARNING: Failed to parse mm_vmscan_wakeup_kswapd as expected\n"; |
362 | print " $details\n"; | 362 | print " $details\n"; |
@@ -366,7 +366,7 @@ EVENT_PROCESS: | |||
366 | my $order = $3; | 366 | my $order = $3; |
367 | $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD_PERORDER}[$order]++; | 367 | $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD_PERORDER}[$order]++; |
368 | } elsif ($tracepoint eq "mm_vmscan_lru_isolate") { | 368 | } elsif ($tracepoint eq "mm_vmscan_lru_isolate") { |
369 | $details = $5; | 369 | $details = $6; |
370 | if ($details !~ /$regex_lru_isolate/o) { | 370 | if ($details !~ /$regex_lru_isolate/o) { |
371 | print "WARNING: Failed to parse mm_vmscan_lru_isolate as expected\n"; | 371 | print "WARNING: Failed to parse mm_vmscan_lru_isolate as expected\n"; |
372 | print " $details\n"; | 372 | print " $details\n"; |
@@ -387,7 +387,7 @@ EVENT_PROCESS: | |||
387 | } | 387 | } |
388 | $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; | 388 | $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; |
389 | } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { | 389 | } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { |
390 | $details = $5; | 390 | $details = $6; |
391 | if ($details !~ /$regex_lru_shrink_inactive/o) { | 391 | if ($details !~ /$regex_lru_shrink_inactive/o) { |
392 | print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n"; | 392 | print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n"; |
393 | print " $details\n"; | 393 | print " $details\n"; |
@@ -397,7 +397,7 @@ EVENT_PROCESS: | |||
397 | my $nr_reclaimed = $4; | 397 | my $nr_reclaimed = $4; |
398 | $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed; | 398 | $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed; |
399 | } elsif ($tracepoint eq "mm_vmscan_writepage") { | 399 | } elsif ($tracepoint eq "mm_vmscan_writepage") { |
400 | $details = $5; | 400 | $details = $6; |
401 | if ($details !~ /$regex_writepage/o) { | 401 | if ($details !~ /$regex_writepage/o) { |
402 | print "WARNING: Failed to parse mm_vmscan_writepage as expected\n"; | 402 | print "WARNING: Failed to parse mm_vmscan_writepage as expected\n"; |
403 | print " $details\n"; | 403 | print " $details\n"; |
diff --git a/Documentation/vm/locking b/Documentation/vm/locking deleted file mode 100644 index f61228bd6395..000000000000 --- a/Documentation/vm/locking +++ /dev/null | |||
@@ -1,130 +0,0 @@ | |||
1 | Started Oct 1999 by Kanoj Sarcar <kanojsarcar@yahoo.com> | ||
2 | |||
3 | The intent of this file is to have an uptodate, running commentary | ||
4 | from different people about how locking and synchronization is done | ||
5 | in the Linux vm code. | ||
6 | |||
7 | page_table_lock & mmap_sem | ||
8 | -------------------------------------- | ||
9 | |||
10 | Page stealers pick processes out of the process pool and scan for | ||
11 | the best process to steal pages from. To guarantee the existence | ||
12 | of the victim mm, a mm_count inc and a mmdrop are done in swap_out(). | ||
13 | Page stealers hold kernel_lock to protect against a bunch of races. | ||
14 | The vma list of the victim mm is also scanned by the stealer, | ||
15 | and the page_table_lock is used to preserve list sanity against the | ||
16 | process adding/deleting to the list. This also guarantees existence | ||
17 | of the vma. Vma existence is not guaranteed once try_to_swap_out() | ||
18 | drops the page_table_lock. To guarantee the existence of the underlying | ||
19 | file structure, a get_file is done before the swapout() method is | ||
20 | invoked. The page passed into swapout() is guaranteed not to be reused | ||
21 | for a different purpose because the page reference count due to being | ||
22 | present in the user's pte is not released till after swapout() returns. | ||
23 | |||
24 | Any code that modifies the vmlist, or the vm_start/vm_end/ | ||
25 | vm_flags:VM_LOCKED/vm_next of any vma *in the list* must prevent | ||
26 | kswapd from looking at the chain. | ||
27 | |||
28 | The rules are: | ||
29 | 1. To scan the vmlist (look but don't touch) you must hold the | ||
30 | mmap_sem with read bias, i.e. down_read(&mm->mmap_sem) | ||
31 | 2. To modify the vmlist you need to hold the mmap_sem with | ||
32 | read&write bias, i.e. down_write(&mm->mmap_sem) *AND* | ||
33 | you need to take the page_table_lock. | ||
34 | 3. The swapper takes _just_ the page_table_lock, this is done | ||
35 | because the mmap_sem can be an extremely long lived lock | ||
36 | and the swapper just cannot sleep on that. | ||
37 | 4. The exception to this rule is expand_stack, which just | ||
38 | takes the read lock and the page_table_lock, this is ok | ||
39 | because it doesn't really modify fields anybody relies on. | ||
40 | 5. You must be able to guarantee that while holding page_table_lock | ||
41 | or page_table_lock of mm A, you will not try to get either lock | ||
42 | for mm B. | ||
43 | |||
44 | The caveats are: | ||
45 | 1. find_vma() makes use of, and updates, the mmap_cache pointer hint. | ||
46 | The update of mmap_cache is racy (page stealer can race with other code | ||
47 | that invokes find_vma with mmap_sem held), but that is okay, since it | ||
48 | is a hint. This can be fixed, if desired, by having find_vma grab the | ||
49 | page_table_lock. | ||
50 | |||
51 | |||
52 | Code that add/delete elements from the vmlist chain are | ||
53 | 1. callers of insert_vm_struct | ||
54 | 2. callers of merge_segments | ||
55 | 3. callers of avl_remove | ||
56 | |||
57 | Code that changes vm_start/vm_end/vm_flags:VM_LOCKED of vma's on | ||
58 | the list: | ||
59 | 1. expand_stack | ||
60 | 2. mprotect | ||
61 | 3. mlock | ||
62 | 4. mremap | ||
63 | |||
64 | It is advisable that changes to vm_start/vm_end be protected, although | ||
65 | in some cases it is not really needed. Eg, vm_start is modified by | ||
66 | expand_stack(), it is hard to come up with a destructive scenario without | ||
67 | having the vmlist protection in this case. | ||
68 | |||
69 | The page_table_lock nests with the inode i_mmap_mutex and the kmem cache | ||
70 | c_spinlock spinlocks. This is okay, since the kmem code asks for pages after | ||
71 | dropping c_spinlock. The page_table_lock also nests with pagecache_lock and | ||
72 | pagemap_lru_lock spinlocks, and no code asks for memory with these locks | ||
73 | held. | ||
74 | |||
75 | The page_table_lock is grabbed while holding the kernel_lock spinning monitor. | ||
76 | |||
77 | The page_table_lock is a spin lock. | ||
78 | |||
79 | Note: PTL can also be used to guarantee that no new clones using the | ||
80 | mm start up ... this is a loose form of stability on mm_users. For | ||
81 | example, it is used in copy_mm to protect against a racing tlb_gather_mmu | ||
82 | single address space optimization, so that the zap_page_range (from | ||
83 | truncate) does not lose sending ipi's to cloned threads that might | ||
84 | be spawned underneath it and go to user mode to drag in pte's into tlbs. | ||
85 | |||
86 | swap_lock | ||
87 | -------------- | ||
88 | The swap devices are chained in priority order from the "swap_list" header. | ||
89 | The "swap_list" is used for the round-robin swaphandle allocation strategy. | ||
90 | The #free swaphandles is maintained in "nr_swap_pages". These two together | ||
91 | are protected by the swap_lock. | ||
92 | |||
93 | The swap_lock also protects all the device reference counts on the | ||
94 | corresponding swaphandles, maintained in the "swap_map" array, and the | ||
95 | "highest_bit" and "lowest_bit" fields. | ||
96 | |||
97 | The swap_lock is a spinlock, and is never acquired from intr level. | ||
98 | |||
99 | To prevent races between swap space deletion or async readahead swapins | ||
100 | deciding whether a swap handle is being used, ie worthy of being read in | ||
101 | from disk, and an unmap -> swap_free making the handle unused, the swap | ||
102 | delete and readahead code grabs a temp reference on the swaphandle to | ||
103 | prevent warning messages from swap_duplicate <- read_swap_cache_async. | ||
104 | |||
105 | Swap cache locking | ||
106 | ------------------ | ||
107 | Pages are added into the swap cache with kernel_lock held, to make sure | ||
108 | that multiple pages are not being added (and hence lost) by associating | ||
109 | all of them with the same swaphandle. | ||
110 | |||
111 | Pages are guaranteed not to be removed from the scache if the page is | ||
112 | "shared": ie, other processes hold reference on the page or the associated | ||
113 | swap handle. The only code that does not follow this rule is shrink_mmap, | ||
114 | which deletes pages from the swap cache if no process has a reference on | ||
115 | the page (multiple processes might have references on the corresponding | ||
116 | swap handle though). lookup_swap_cache() races with shrink_mmap, when | ||
117 | establishing a reference on a scache page, so, it must check whether the | ||
118 | page it located is still in the swapcache, or shrink_mmap deleted it. | ||
119 | (This race is due to the fact that shrink_mmap looks at the page ref | ||
120 | count with pagecache_lock, but then drops pagecache_lock before deleting | ||
121 | the page from the scache). | ||
122 | |||
123 | do_wp_page and do_swap_page have MP races in them while trying to figure | ||
124 | out whether a page is "shared", by looking at the page_count + swap_count. | ||
125 | To preserve the sum of the counts, the page lock _must_ be acquired before | ||
126 | calling is_page_shared (else processes might switch their swap_count refs | ||
127 | to the page count refs, after the page count ref has been snapshotted). | ||
128 | |||
129 | Swap device deletion code currently breaks all the scache assumptions, | ||
130 | since it grabs neither mmap_sem nor page_table_lock. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 15802a32469e..6270a0b2b99d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -93,6 +93,11 @@ Descriptions of section entries: | |||
93 | N: Files and directories with regex patterns. | 93 | N: Files and directories with regex patterns. |
94 | N: [^a-z]tegra all files whose path contains the word tegra | 94 | N: [^a-z]tegra all files whose path contains the word tegra |
95 | One pattern per line. Multiple N: lines acceptable. | 95 | One pattern per line. Multiple N: lines acceptable. |
96 | scripts/get_maintainer.pl has different behavior for files that | ||
97 | match F: pattern and matches of N: patterns. By default, | ||
98 | get_maintainer will not look at git log history when an F: pattern | ||
99 | match occurs. When an N: match occurs, git log history is used | ||
100 | to also notify the people that have git commit signatures. | ||
96 | X: Files and directories that are NOT maintained, same rules as F: | 101 | X: Files and directories that are NOT maintained, same rules as F: |
97 | Files exclusions are tested before file matches. | 102 | Files exclusions are tested before file matches. |
98 | Can be useful for excluding a specific subdirectory, for instance: | 103 | Can be useful for excluding a specific subdirectory, for instance: |
@@ -3375,7 +3380,6 @@ M: Jingoo Han <jg1.han@samsung.com> | |||
3375 | L: linux-fbdev@vger.kernel.org | 3380 | L: linux-fbdev@vger.kernel.org |
3376 | S: Maintained | 3381 | S: Maintained |
3377 | F: drivers/video/exynos/exynos_dp* | 3382 | F: drivers/video/exynos/exynos_dp* |
3378 | F: include/video/exynos_dp* | ||
3379 | 3383 | ||
3380 | EXYNOS MIPI DISPLAY DRIVERS | 3384 | EXYNOS MIPI DISPLAY DRIVERS |
3381 | M: Inki Dae <inki.dae@samsung.com> | 3385 | M: Inki Dae <inki.dae@samsung.com> |
@@ -3986,6 +3990,12 @@ S: Orphan | |||
3986 | F: Documentation/filesystems/hfs.txt | 3990 | F: Documentation/filesystems/hfs.txt |
3987 | F: fs/hfs/ | 3991 | F: fs/hfs/ |
3988 | 3992 | ||
3993 | HFSPLUS FILESYSTEM | ||
3994 | L: linux-fsdevel@vger.kernel.org | ||
3995 | S: Orphan | ||
3996 | F: Documentation/filesystems/hfsplus.txt | ||
3997 | F: fs/hfsplus/ | ||
3998 | |||
3989 | HGA FRAMEBUFFER DRIVER | 3999 | HGA FRAMEBUFFER DRIVER |
3990 | M: Ferenc Bakonyi <fero@drama.obuda.kando.hu> | 4000 | M: Ferenc Bakonyi <fero@drama.obuda.kando.hu> |
3991 | L: linux-nvidia@lists.surfsouth.com | 4001 | L: linux-nvidia@lists.surfsouth.com |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index d39dc9b95a2c..3ba48fe1c0a5 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -539,13 +539,13 @@ config SMP | |||
539 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL | 539 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL |
540 | ---help--- | 540 | ---help--- |
541 | This enables support for systems with more than one CPU. If you have | 541 | This enables support for systems with more than one CPU. If you have |
542 | a system with only one CPU, like most personal computers, say N. If | 542 | a system with only one CPU, say N. If you have a system with more |
543 | you have a system with more than one CPU, say Y. | 543 | than one CPU, say Y. |
544 | 544 | ||
545 | If you say N here, the kernel will run on single and multiprocessor | 545 | If you say N here, the kernel will run on uni- and multiprocessor |
546 | machines, but will use only one CPU of a multiprocessor machine. If | 546 | machines, but will use only one CPU of a multiprocessor machine. If |
547 | you say Y here, the kernel will run on many, but not all, | 547 | you say Y here, the kernel will run on many, but not all, |
548 | singleprocessor machines. On a singleprocessor machine, the kernel | 548 | uniprocessor machines. On a uniprocessor machine, the kernel |
549 | will run faster if you say N here. | 549 | will run faster if you say N here. |
550 | 550 | ||
551 | See also the SMP-HOWTO available at | 551 | See also the SMP-HOWTO available at |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 9063ae6553cc..5438cabbc45d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -128,8 +128,8 @@ config SMP | |||
128 | default n | 128 | default n |
129 | help | 129 | help |
130 | This enables support for systems with more than one CPU. If you have | 130 | This enables support for systems with more than one CPU. If you have |
131 | a system with only one CPU, like most personal computers, say N. If | 131 | a system with only one CPU, say N. If you have a system with more |
132 | you have a system with more than one CPU, say Y. | 132 | than one CPU, say Y. |
133 | 133 | ||
134 | if SMP | 134 | if SMP |
135 | 135 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index f9b0fd387c6f..dc6ef9a2c649 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1470,14 +1470,14 @@ config SMP | |||
1470 | depends on MMU || ARM_MPU | 1470 | depends on MMU || ARM_MPU |
1471 | help | 1471 | help |
1472 | This enables support for systems with more than one CPU. If you have | 1472 | This enables support for systems with more than one CPU. If you have |
1473 | a system with only one CPU, like most personal computers, say N. If | 1473 | a system with only one CPU, say N. If you have a system with more |
1474 | you have a system with more than one CPU, say Y. | 1474 | than one CPU, say Y. |
1475 | 1475 | ||
1476 | If you say N here, the kernel will run on single and multiprocessor | 1476 | If you say N here, the kernel will run on uni- and multiprocessor |
1477 | machines, but will use only one CPU of a multiprocessor machine. If | 1477 | machines, but will use only one CPU of a multiprocessor machine. If |
1478 | you say Y here, the kernel will run on many, but not all, single | 1478 | you say Y here, the kernel will run on many, but not all, |
1479 | processor machines. On a single processor machine, the kernel will | 1479 | uniprocessor machines. On a uniprocessor machine, the kernel |
1480 | run faster if you say N here. | 1480 | will run faster if you say N here. |
1481 | 1481 | ||
1482 | See also <file:Documentation/x86/i386/IO-APIC.txt>, | 1482 | See also <file:Documentation/x86/i386/IO-APIC.txt>, |
1483 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at | 1483 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at |
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index 4353cf239a13..e59dba12ce94 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h | |||
@@ -169,7 +169,11 @@ static inline void outsl(unsigned int port, const void *addr, | |||
169 | } | 169 | } |
170 | 170 | ||
171 | #define inb_p(port) inb(port) | 171 | #define inb_p(port) inb(port) |
172 | #define inw_p(port) inw(port) | ||
173 | #define inl_p(port) inl(port) | ||
172 | #define outb_p(val, port) outb((val), (port)) | 174 | #define outb_p(val, port) outb((val), (port)) |
175 | #define outw_p(val, port) outw((val), (port)) | ||
176 | #define outl_p(val, port) outl((val), (port)) | ||
173 | 177 | ||
174 | /* | 178 | /* |
175 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 179 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h index b75b6bf4269c..1387f84b42b6 100644 --- a/arch/hexagon/include/asm/fixmap.h +++ b/arch/hexagon/include/asm/fixmap.h | |||
@@ -26,45 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #include <asm/mem-layout.h> | 27 | #include <asm/mem-layout.h> |
28 | 28 | ||
29 | /* | 29 | #include <asm-generic/fixmap.h> |
30 | * Full fixmap support involves set_fixmap() functions, but | ||
31 | * these may not be needed if all we're after is an area for | ||
32 | * highmem kernel mappings. | ||
33 | */ | ||
34 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
35 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
36 | |||
37 | extern void __this_fixmap_does_not_exist(void); | ||
38 | |||
39 | /** | ||
40 | * fix_to_virt -- "index to address" translation. | ||
41 | * | ||
42 | * If anyone tries to use the idx directly without translation, | ||
43 | * we catch the bug with a NULL-deference kernel oops. Illegal | ||
44 | * ranges of incoming indices are caught too. | ||
45 | */ | ||
46 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
47 | { | ||
48 | /* | ||
49 | * This branch gets completely eliminated after inlining, | ||
50 | * except when someone tries to use fixaddr indices in an | ||
51 | * illegal way. (such as mixing up address types or using | ||
52 | * out-of-range indices). | ||
53 | * | ||
54 | * If it doesn't get removed, the linker will complain | ||
55 | * loudly with a reasonably clear error message.. | ||
56 | */ | ||
57 | if (idx >= __end_of_fixed_addresses) | ||
58 | __this_fixmap_does_not_exist(); | ||
59 | |||
60 | return __fix_to_virt(idx); | ||
61 | } | ||
62 | |||
63 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
64 | { | ||
65 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
66 | return __virt_to_fix(vaddr); | ||
67 | } | ||
68 | 30 | ||
69 | #define kmap_get_fixmap_pte(vaddr) \ | 31 | #define kmap_get_fixmap_pte(vaddr) \ |
70 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \ | 32 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \ |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a8c3a11dc5ab..c063b054294e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -104,6 +104,7 @@ config HAVE_SETUP_PER_CPU_AREA | |||
104 | config DMI | 104 | config DMI |
105 | bool | 105 | bool |
106 | default y | 106 | default y |
107 | select DMI_SCAN_MACHINE_NON_EFI_FALLBACK | ||
107 | 108 | ||
108 | config EFI | 109 | config EFI |
109 | bool | 110 | bool |
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h index 185d3d18d0ec..f365a61f5c71 100644 --- a/arch/ia64/include/asm/dmi.h +++ b/arch/ia64/include/asm/dmi.h | |||
@@ -5,8 +5,10 @@ | |||
5 | #include <asm/io.h> | 5 | #include <asm/io.h> |
6 | 6 | ||
7 | /* Use normal IO mappings for DMI */ | 7 | /* Use normal IO mappings for DMI */ |
8 | #define dmi_ioremap ioremap | 8 | #define dmi_early_remap ioremap |
9 | #define dmi_iounmap(x,l) iounmap(x) | 9 | #define dmi_early_unmap(x, l) iounmap(x) |
10 | #define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) | 10 | #define dmi_remap ioremap |
11 | #define dmi_unmap iounmap | ||
12 | #define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) | ||
11 | 13 | ||
12 | #endif | 14 | #endif |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 5a84b3a50741..efd1b927ccb7 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/compiler.h> | 71 | #include <linux/compiler.h> |
72 | #include <linux/threads.h> | 72 | #include <linux/threads.h> |
73 | #include <linux/types.h> | 73 | #include <linux/types.h> |
74 | #include <linux/bitops.h> | ||
74 | 75 | ||
75 | #include <asm/fpu.h> | 76 | #include <asm/fpu.h> |
76 | #include <asm/page.h> | 77 | #include <asm/page.h> |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 09ef94a8a7c3..ca4504424dae 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -277,13 +277,13 @@ config SMP | |||
277 | bool "Symmetric multi-processing support" | 277 | bool "Symmetric multi-processing support" |
278 | ---help--- | 278 | ---help--- |
279 | This enables support for systems with more than one CPU. If you have | 279 | This enables support for systems with more than one CPU. If you have |
280 | a system with only one CPU, like most personal computers, say N. If | 280 | a system with only one CPU, say N. If you have a system with more |
281 | you have a system with more than one CPU, say Y. | 281 | than one CPU, say Y. |
282 | 282 | ||
283 | If you say N here, the kernel will run on single and multiprocessor | 283 | If you say N here, the kernel will run on uni- and multiprocessor |
284 | machines, but will use only one CPU of a multiprocessor machine. If | 284 | machines, but will use only one CPU of a multiprocessor machine. If |
285 | you say Y here, the kernel will run on many, but not all, | 285 | you say Y here, the kernel will run on many, but not all, |
286 | singleprocessor machines. On a singleprocessor machine, the kernel | 286 | uniprocessor machines. On a uniprocessor machine, the kernel |
287 | will run faster if you say N here. | 287 | will run faster if you say N here. |
288 | 288 | ||
289 | People using multiprocessor machines who say Y here should also say | 289 | People using multiprocessor machines who say Y here should also say |
diff --git a/arch/metag/include/asm/fixmap.h b/arch/metag/include/asm/fixmap.h index 33312751c92b..af621b041739 100644 --- a/arch/metag/include/asm/fixmap.h +++ b/arch/metag/include/asm/fixmap.h | |||
@@ -51,37 +51,7 @@ enum fixed_addresses { | |||
51 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 51 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
52 | #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) | 52 | #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) |
53 | 53 | ||
54 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 54 | #include <asm-generic/fixmap.h> |
55 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
56 | |||
57 | extern void __this_fixmap_does_not_exist(void); | ||
58 | /* | ||
59 | * 'index to address' translation. If anyone tries to use the idx | ||
60 | * directly without tranlation, we catch the bug with a NULL-deference | ||
61 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
62 | */ | ||
63 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
64 | { | ||
65 | /* | ||
66 | * this branch gets completely eliminated after inlining, | ||
67 | * except when someone tries to use fixaddr indices in an | ||
68 | * illegal way. (such as mixing up address types or using | ||
69 | * out-of-range indices). | ||
70 | * | ||
71 | * If it doesn't get removed, the linker will complain | ||
72 | * loudly with a reasonably clear error message.. | ||
73 | */ | ||
74 | if (idx >= __end_of_fixed_addresses) | ||
75 | __this_fixmap_does_not_exist(); | ||
76 | |||
77 | return __fix_to_virt(idx); | ||
78 | } | ||
79 | |||
80 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
81 | { | ||
82 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
83 | return __virt_to_fix(vaddr); | ||
84 | } | ||
85 | 55 | ||
86 | #define kmap_get_fixmap_pte(vaddr) \ | 56 | #define kmap_get_fixmap_pte(vaddr) \ |
87 | pte_offset_kernel( \ | 57 | pte_offset_kernel( \ |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index e23cccde9c27..8d581ab06c5d 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -30,6 +30,7 @@ config MICROBLAZE | |||
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select CLONE_BACKWARDS3 | 31 | select CLONE_BACKWARDS3 |
32 | select CLKSRC_OF | 32 | select CLKSRC_OF |
33 | select BUILDTIME_EXTABLE_SORT | ||
33 | 34 | ||
34 | config SWAP | 35 | config SWAP |
35 | def_bool n | 36 | def_bool n |
diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h index f2b312e10b10..06c0e2b1883f 100644 --- a/arch/microblaze/include/asm/fixmap.h +++ b/arch/microblaze/include/asm/fixmap.h | |||
@@ -58,52 +58,12 @@ enum fixed_addresses { | |||
58 | extern void __set_fixmap(enum fixed_addresses idx, | 58 | extern void __set_fixmap(enum fixed_addresses idx, |
59 | phys_addr_t phys, pgprot_t flags); | 59 | phys_addr_t phys, pgprot_t flags); |
60 | 60 | ||
61 | #define set_fixmap(idx, phys) \ | ||
62 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
63 | /* | ||
64 | * Some hardware wants to get fixmapped without caching. | ||
65 | */ | ||
66 | #define set_fixmap_nocache(idx, phys) \ | ||
67 | __set_fixmap(idx, phys, PAGE_KERNEL_CI) | ||
68 | |||
69 | #define clear_fixmap(idx) \ | ||
70 | __set_fixmap(idx, 0, __pgprot(0)) | ||
71 | |||
72 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 61 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
73 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | 62 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) |
74 | 63 | ||
75 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 64 | #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_CI |
76 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
77 | |||
78 | extern void __this_fixmap_does_not_exist(void); | ||
79 | |||
80 | /* | ||
81 | * 'index to address' translation. If anyone tries to use the idx | ||
82 | * directly without tranlation, we catch the bug with a NULL-deference | ||
83 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
84 | */ | ||
85 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
86 | { | ||
87 | /* | ||
88 | * this branch gets completely eliminated after inlining, | ||
89 | * except when someone tries to use fixaddr indices in an | ||
90 | * illegal way. (such as mixing up address types or using | ||
91 | * out-of-range indices). | ||
92 | * | ||
93 | * If it doesn't get removed, the linker will complain | ||
94 | * loudly with a reasonably clear error message.. | ||
95 | */ | ||
96 | if (idx >= __end_of_fixed_addresses) | ||
97 | __this_fixmap_does_not_exist(); | ||
98 | |||
99 | return __fix_to_virt(idx); | ||
100 | } | ||
101 | 65 | ||
102 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | 66 | #include <asm-generic/fixmap.h> |
103 | { | ||
104 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
105 | return __virt_to_fix(vaddr); | ||
106 | } | ||
107 | 67 | ||
108 | #endif /* !__ASSEMBLY__ */ | 68 | #endif /* !__ASSEMBLY__ */ |
109 | #endif | 69 | #endif |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c93d92beb3d6..52dac06ea6b4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -2129,13 +2129,13 @@ config SMP | |||
2129 | depends on SYS_SUPPORTS_SMP | 2129 | depends on SYS_SUPPORTS_SMP |
2130 | help | 2130 | help |
2131 | This enables support for systems with more than one CPU. If you have | 2131 | This enables support for systems with more than one CPU. If you have |
2132 | a system with only one CPU, like most personal computers, say N. If | 2132 | a system with only one CPU, say N. If you have a system with more |
2133 | you have a system with more than one CPU, say Y. | 2133 | than one CPU, say Y. |
2134 | 2134 | ||
2135 | If you say N here, the kernel will run on single and multiprocessor | 2135 | If you say N here, the kernel will run on uni- and multiprocessor |
2136 | machines, but will use only one CPU of a multiprocessor machine. If | 2136 | machines, but will use only one CPU of a multiprocessor machine. If |
2137 | you say Y here, the kernel will run on many, but not all, | 2137 | you say Y here, the kernel will run on many, but not all, |
2138 | singleprocessor machines. On a singleprocessor machine, the kernel | 2138 | uniprocessor machines. On a uniprocessor machine, the kernel |
2139 | will run faster if you say N here. | 2139 | will run faster if you say N here. |
2140 | 2140 | ||
2141 | People using multiprocessor machines who say Y here should also say | 2141 | People using multiprocessor machines who say Y here should also say |
@@ -2430,7 +2430,7 @@ source "drivers/pcmcia/Kconfig" | |||
2430 | source "drivers/pci/hotplug/Kconfig" | 2430 | source "drivers/pci/hotplug/Kconfig" |
2431 | 2431 | ||
2432 | config RAPIDIO | 2432 | config RAPIDIO |
2433 | bool "RapidIO support" | 2433 | tristate "RapidIO support" |
2434 | depends on PCI | 2434 | depends on PCI |
2435 | default n | 2435 | default n |
2436 | help | 2436 | help |
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index dfaaf493e9d4..8c012af2f451 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h | |||
@@ -71,38 +71,7 @@ enum fixed_addresses { | |||
71 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 71 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
72 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 72 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
73 | 73 | ||
74 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 74 | #include <asm-generic/fixmap.h> |
75 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
76 | |||
77 | extern void __this_fixmap_does_not_exist(void); | ||
78 | |||
79 | /* | ||
80 | * 'index to address' translation. If anyone tries to use the idx | ||
81 | * directly without tranlation, we catch the bug with a NULL-deference | ||
82 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
83 | */ | ||
84 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
85 | { | ||
86 | /* | ||
87 | * this branch gets completely eliminated after inlining, | ||
88 | * except when someone tries to use fixaddr indices in an | ||
89 | * illegal way. (such as mixing up address types or using | ||
90 | * out-of-range indices). | ||
91 | * | ||
92 | * If it doesn't get removed, the linker will complain | ||
93 | * loudly with a reasonably clear error message.. | ||
94 | */ | ||
95 | if (idx >= __end_of_fixed_addresses) | ||
96 | __this_fixmap_does_not_exist(); | ||
97 | |||
98 | return __fix_to_virt(idx); | ||
99 | } | ||
100 | |||
101 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
102 | { | ||
103 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
104 | return __virt_to_fix(vaddr); | ||
105 | } | ||
106 | 75 | ||
107 | #define kmap_get_fixmap_pte(vaddr) \ | 76 | #define kmap_get_fixmap_pte(vaddr) \ |
108 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | 77 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 8bde9237d13b..a648de1b1096 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -184,13 +184,13 @@ config SMP | |||
184 | depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 | 184 | depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 |
185 | ---help--- | 185 | ---help--- |
186 | This enables support for systems with more than one CPU. If you have | 186 | This enables support for systems with more than one CPU. If you have |
187 | a system with only one CPU, like most personal computers, say N. If | 187 | a system with only one CPU, say N. If you have a system with more |
188 | you have a system with more than one CPU, say Y. | 188 | than one CPU, say Y. |
189 | 189 | ||
190 | If you say N here, the kernel will run on single and multiprocessor | 190 | If you say N here, the kernel will run on uni- and multiprocessor |
191 | machines, but will use only one CPU of a multiprocessor machine. If | 191 | machines, but will use only one CPU of a multiprocessor machine. If |
192 | you say Y here, the kernel will run on many, but not all, | 192 | you say Y here, the kernel will run on many, but not all, |
193 | singleprocessor machines. On a singleprocessor machine, the kernel | 193 | uniprocessor machines. On a uniprocessor machine, the kernel |
194 | will run faster if you say N here. | 194 | will run faster if you say N here. |
195 | 195 | ||
196 | See also <file:Documentation/x86/i386/IO-APIC.txt>, | 196 | See also <file:Documentation/x86/i386/IO-APIC.txt>, |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b5f1858baf33..bb2a8ec440e7 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -229,13 +229,13 @@ config SMP | |||
229 | bool "Symmetric multi-processing support" | 229 | bool "Symmetric multi-processing support" |
230 | ---help--- | 230 | ---help--- |
231 | This enables support for systems with more than one CPU. If you have | 231 | This enables support for systems with more than one CPU. If you have |
232 | a system with only one CPU, like most personal computers, say N. If | 232 | a system with only one CPU, say N. If you have a system with more |
233 | you have a system with more than one CPU, say Y. | 233 | than one CPU, say Y. |
234 | 234 | ||
235 | If you say N here, the kernel will run on single and multiprocessor | 235 | If you say N here, the kernel will run on uni- and multiprocessor |
236 | machines, but will use only one CPU of a multiprocessor machine. If | 236 | machines, but will use only one CPU of a multiprocessor machine. If |
237 | you say Y here, the kernel will run on many, but not all, | 237 | you say Y here, the kernel will run on many, but not all, |
238 | singleprocessor machines. On a singleprocessor machine, the kernel | 238 | uniprocessor machines. On a uniprocessor machine, the kernel |
239 | will run faster if you say N here. | 239 | will run faster if you say N here. |
240 | 240 | ||
241 | See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO | 241 | See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b2be8e8cb5c7..bedc62b44aa6 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -794,7 +794,7 @@ config HAS_RAPIDIO | |||
794 | default n | 794 | default n |
795 | 795 | ||
796 | config RAPIDIO | 796 | config RAPIDIO |
797 | bool "RapidIO support" | 797 | tristate "RapidIO support" |
798 | depends on HAS_RAPIDIO || PCI | 798 | depends on HAS_RAPIDIO || PCI |
799 | help | 799 | help |
800 | If you say Y here, the kernel will include drivers and | 800 | If you say Y here, the kernel will include drivers and |
@@ -802,7 +802,7 @@ config RAPIDIO | |||
802 | 802 | ||
803 | config FSL_RIO | 803 | config FSL_RIO |
804 | bool "Freescale Embedded SRIO Controller support" | 804 | bool "Freescale Embedded SRIO Controller support" |
805 | depends on RAPIDIO && HAS_RAPIDIO | 805 | depends on RAPIDIO = y && HAS_RAPIDIO |
806 | default "n" | 806 | default "n" |
807 | ---help--- | 807 | ---help--- |
808 | Include support for RapidIO controller on Freescale embedded | 808 | Include support for RapidIO controller on Freescale embedded |
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 5c2c0233175e..90f604bbcd19 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h | |||
@@ -58,52 +58,12 @@ enum fixed_addresses { | |||
58 | extern void __set_fixmap (enum fixed_addresses idx, | 58 | extern void __set_fixmap (enum fixed_addresses idx, |
59 | phys_addr_t phys, pgprot_t flags); | 59 | phys_addr_t phys, pgprot_t flags); |
60 | 60 | ||
61 | #define set_fixmap(idx, phys) \ | ||
62 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
63 | /* | ||
64 | * Some hardware wants to get fixmapped without caching. | ||
65 | */ | ||
66 | #define set_fixmap_nocache(idx, phys) \ | ||
67 | __set_fixmap(idx, phys, PAGE_KERNEL_NCG) | ||
68 | |||
69 | #define clear_fixmap(idx) \ | ||
70 | __set_fixmap(idx, 0, __pgprot(0)) | ||
71 | |||
72 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 61 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
73 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | 62 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) |
74 | 63 | ||
75 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 64 | #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG |
76 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
77 | |||
78 | extern void __this_fixmap_does_not_exist(void); | ||
79 | |||
80 | /* | ||
81 | * 'index to address' translation. If anyone tries to use the idx | ||
82 | * directly without tranlation, we catch the bug with a NULL-deference | ||
83 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
84 | */ | ||
85 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
86 | { | ||
87 | /* | ||
88 | * this branch gets completely eliminated after inlining, | ||
89 | * except when someone tries to use fixaddr indices in an | ||
90 | * illegal way. (such as mixing up address types or using | ||
91 | * out-of-range indices). | ||
92 | * | ||
93 | * If it doesn't get removed, the linker will complain | ||
94 | * loudly with a reasonably clear error message.. | ||
95 | */ | ||
96 | if (idx >= __end_of_fixed_addresses) | ||
97 | __this_fixmap_does_not_exist(); | ||
98 | |||
99 | return __fix_to_virt(idx); | ||
100 | } | ||
101 | 65 | ||
102 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | 66 | #include <asm-generic/fixmap.h> |
103 | { | ||
104 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
105 | return __virt_to_fix(vaddr); | ||
106 | } | ||
107 | 67 | ||
108 | #endif /* !__ASSEMBLY__ */ | 68 | #endif /* !__ASSEMBLY__ */ |
109 | #endif | 69 | #endif |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index e9f312532526..4f858f77d870 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -334,10 +334,10 @@ config SMP | |||
334 | a system with only one CPU, like most personal computers, say N. If | 334 | a system with only one CPU, like most personal computers, say N. If |
335 | you have a system with more than one CPU, say Y. | 335 | you have a system with more than one CPU, say Y. |
336 | 336 | ||
337 | If you say N here, the kernel will run on single and multiprocessor | 337 | If you say N here, the kernel will run on uni- and multiprocessor |
338 | machines, but will use only one CPU of a multiprocessor machine. If | 338 | machines, but will use only one CPU of a multiprocessor machine. If |
339 | you say Y here, the kernel will run on many, but not all, | 339 | you say Y here, the kernel will run on many, but not all, |
340 | singleprocessor machines. On a singleprocessor machine, the kernel | 340 | uniprocessor machines. On a uniprocessor machine, the kernel |
341 | will run faster if you say N here. | 341 | will run faster if you say N here. |
342 | 342 | ||
343 | See also the SMP-HOWTO available at | 343 | See also the SMP-HOWTO available at |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ce298317a73e..6357710753d5 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -701,13 +701,13 @@ config SMP | |||
701 | depends on SYS_SUPPORTS_SMP | 701 | depends on SYS_SUPPORTS_SMP |
702 | ---help--- | 702 | ---help--- |
703 | This enables support for systems with more than one CPU. If you have | 703 | This enables support for systems with more than one CPU. If you have |
704 | a system with only one CPU, like most personal computers, say N. If | 704 | a system with only one CPU, say N. If you have a system with more |
705 | you have a system with more than one CPU, say Y. | 705 | than one CPU, say Y. |
706 | 706 | ||
707 | If you say N here, the kernel will run on single and multiprocessor | 707 | If you say N here, the kernel will run on uni- and multiprocessor |
708 | machines, but will use only one CPU of a multiprocessor machine. If | 708 | machines, but will use only one CPU of a multiprocessor machine. If |
709 | you say Y here, the kernel will run on many, but not all, | 709 | you say Y here, the kernel will run on many, but not all, |
710 | singleprocessor machines. On a singleprocessor machine, the kernel | 710 | uniprocessor machines. On a uniprocessor machine, the kernel |
711 | will run faster if you say N here. | 711 | will run faster if you say N here. |
712 | 712 | ||
713 | People using multiprocessor machines who say Y here should also say | 713 | People using multiprocessor machines who say Y here should also say |
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index cbe0186b6794..4daf91c3b725 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h | |||
@@ -79,13 +79,6 @@ extern void __set_fixmap(enum fixed_addresses idx, | |||
79 | unsigned long phys, pgprot_t flags); | 79 | unsigned long phys, pgprot_t flags); |
80 | extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); | 80 | extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); |
81 | 81 | ||
82 | #define set_fixmap(idx, phys) \ | ||
83 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
84 | /* | ||
85 | * Some hardware wants to get fixmapped without caching. | ||
86 | */ | ||
87 | #define set_fixmap_nocache(idx, phys) \ | ||
88 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
89 | /* | 82 | /* |
90 | * used by vmalloc.c. | 83 | * used by vmalloc.c. |
91 | * | 84 | * |
@@ -101,36 +94,8 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); | |||
101 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 94 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
102 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 95 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
103 | 96 | ||
104 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 97 | #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE |
105 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
106 | |||
107 | extern void __this_fixmap_does_not_exist(void); | ||
108 | |||
109 | /* | ||
110 | * 'index to address' translation. If anyone tries to use the idx | ||
111 | * directly without tranlation, we catch the bug with a NULL-deference | ||
112 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
113 | */ | ||
114 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
115 | { | ||
116 | /* | ||
117 | * this branch gets completely eliminated after inlining, | ||
118 | * except when someone tries to use fixaddr indices in an | ||
119 | * illegal way. (such as mixing up address types or using | ||
120 | * out-of-range indices). | ||
121 | * | ||
122 | * If it doesn't get removed, the linker will complain | ||
123 | * loudly with a reasonably clear error message.. | ||
124 | */ | ||
125 | if (idx >= __end_of_fixed_addresses) | ||
126 | __this_fixmap_does_not_exist(); | ||
127 | 98 | ||
128 | return __fix_to_virt(idx); | 99 | #include <asm-generic/fixmap.h> |
129 | } | ||
130 | 100 | ||
131 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
132 | { | ||
133 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
134 | return __virt_to_fix(vaddr); | ||
135 | } | ||
136 | #endif | 101 | #endif |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 49c09c7d5b77..67a049e75ec1 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -995,29 +995,19 @@ static struct unwinder dwarf_unwinder = { | |||
995 | 995 | ||
996 | static void dwarf_unwinder_cleanup(void) | 996 | static void dwarf_unwinder_cleanup(void) |
997 | { | 997 | { |
998 | struct rb_node **fde_rb_node = &fde_root.rb_node; | 998 | struct dwarf_fde *fde, *next_fde; |
999 | struct rb_node **cie_rb_node = &cie_root.rb_node; | 999 | struct dwarf_cie *cie, *next_cie; |
1000 | 1000 | ||
1001 | /* | 1001 | /* |
1002 | * Deallocate all the memory allocated for the DWARF unwinder. | 1002 | * Deallocate all the memory allocated for the DWARF unwinder. |
1003 | * Traverse all the FDE/CIE lists and remove and free all the | 1003 | * Traverse all the FDE/CIE lists and remove and free all the |
1004 | * memory associated with those data structures. | 1004 | * memory associated with those data structures. |
1005 | */ | 1005 | */ |
1006 | while (*fde_rb_node) { | 1006 | rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node) |
1007 | struct dwarf_fde *fde; | ||
1008 | |||
1009 | fde = rb_entry(*fde_rb_node, struct dwarf_fde, node); | ||
1010 | rb_erase(*fde_rb_node, &fde_root); | ||
1011 | kfree(fde); | 1007 | kfree(fde); |
1012 | } | ||
1013 | 1008 | ||
1014 | while (*cie_rb_node) { | 1009 | rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node) |
1015 | struct dwarf_cie *cie; | ||
1016 | |||
1017 | cie = rb_entry(*cie_rb_node, struct dwarf_cie, node); | ||
1018 | rb_erase(*cie_rb_node, &cie_root); | ||
1019 | kfree(cie); | 1010 | kfree(cie); |
1020 | } | ||
1021 | 1011 | ||
1022 | kmem_cache_destroy(dwarf_reg_cachep); | 1012 | kmem_cache_destroy(dwarf_reg_cachep); |
1023 | kmem_cache_destroy(dwarf_frame_cachep); | 1013 | kmem_cache_destroy(dwarf_frame_cachep); |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index d4f7a6a163dc..63dfe68f4af8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -152,10 +152,10 @@ config SMP | |||
152 | a system with only one CPU, say N. If you have a system with more | 152 | a system with only one CPU, say N. If you have a system with more |
153 | than one CPU, say Y. | 153 | than one CPU, say Y. |
154 | 154 | ||
155 | If you say N here, the kernel will run on single and multiprocessor | 155 | If you say N here, the kernel will run on uni- and multiprocessor |
156 | machines, but will use only one CPU of a multiprocessor machine. If | 156 | machines, but will use only one CPU of a multiprocessor machine. If |
157 | you say Y here, the kernel will run on many, but not all, | 157 | you say Y here, the kernel will run on many, but not all, |
158 | singleprocessor machines. On a singleprocessor machine, the kernel | 158 | uniprocessor machines. On a uniprocessor machine, the kernel |
159 | will run faster if you say N here. | 159 | will run faster if you say N here. |
160 | 160 | ||
161 | People using multiprocessor machines who say Y here should also say | 161 | People using multiprocessor machines who say Y here should also say |
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h index c6b9c1b38fd1..ffe2637aeb31 100644 --- a/arch/tile/include/asm/fixmap.h +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -25,9 +25,6 @@ | |||
25 | #include <asm/kmap_types.h> | 25 | #include <asm/kmap_types.h> |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
29 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
30 | |||
31 | /* | 28 | /* |
32 | * Here we define all the compile-time 'special' virtual | 29 | * Here we define all the compile-time 'special' virtual |
33 | * addresses. The point is to have a constant address at | 30 | * addresses. The point is to have a constant address at |
@@ -83,35 +80,7 @@ enum fixed_addresses { | |||
83 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) | 80 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) |
84 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) | 81 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) |
85 | 82 | ||
86 | extern void __this_fixmap_does_not_exist(void); | 83 | #include <asm-generic/fixmap.h> |
87 | |||
88 | /* | ||
89 | * 'index to address' translation. If anyone tries to use the idx | ||
90 | * directly without tranlation, we catch the bug with a NULL-deference | ||
91 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
92 | */ | ||
93 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
94 | { | ||
95 | /* | ||
96 | * this branch gets completely eliminated after inlining, | ||
97 | * except when someone tries to use fixaddr indices in an | ||
98 | * illegal way. (such as mixing up address types or using | ||
99 | * out-of-range indices). | ||
100 | * | ||
101 | * If it doesn't get removed, the linker will complain | ||
102 | * loudly with a reasonably clear error message.. | ||
103 | */ | ||
104 | if (idx >= __end_of_fixed_addresses) | ||
105 | __this_fixmap_does_not_exist(); | ||
106 | |||
107 | return __fix_to_virt(idx); | ||
108 | } | ||
109 | |||
110 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
111 | { | ||
112 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
113 | return __virt_to_fix(vaddr); | ||
114 | } | ||
115 | 84 | ||
116 | #endif /* !__ASSEMBLY__ */ | 85 | #endif /* !__ASSEMBLY__ */ |
117 | 86 | ||
diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h index 21a423bae5e8..3094ea3c73b0 100644 --- a/arch/um/include/asm/fixmap.h +++ b/arch/um/include/asm/fixmap.h | |||
@@ -43,13 +43,6 @@ enum fixed_addresses { | |||
43 | extern void __set_fixmap (enum fixed_addresses idx, | 43 | extern void __set_fixmap (enum fixed_addresses idx, |
44 | unsigned long phys, pgprot_t flags); | 44 | unsigned long phys, pgprot_t flags); |
45 | 45 | ||
46 | #define set_fixmap(idx, phys) \ | ||
47 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
48 | /* | ||
49 | * Some hardware wants to get fixmapped without caching. | ||
50 | */ | ||
51 | #define set_fixmap_nocache(idx, phys) \ | ||
52 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
53 | /* | 46 | /* |
54 | * used by vmalloc.c. | 47 | * used by vmalloc.c. |
55 | * | 48 | * |
@@ -62,37 +55,6 @@ extern void __set_fixmap (enum fixed_addresses idx, | |||
62 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 55 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
63 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 56 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
64 | 57 | ||
65 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 58 | #include <asm-generic/fixmap.h> |
66 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
67 | |||
68 | extern void __this_fixmap_does_not_exist(void); | ||
69 | |||
70 | /* | ||
71 | * 'index to address' translation. If anyone tries to use the idx | ||
72 | * directly without tranlation, we catch the bug with a NULL-deference | ||
73 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
74 | */ | ||
75 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
76 | { | ||
77 | /* | ||
78 | * this branch gets completely eliminated after inlining, | ||
79 | * except when someone tries to use fixaddr indices in an | ||
80 | * illegal way. (such as mixing up address types or using | ||
81 | * out-of-range indices). | ||
82 | * | ||
83 | * If it doesn't get removed, the linker will complain | ||
84 | * loudly with a reasonably clear error message.. | ||
85 | */ | ||
86 | if (idx >= __end_of_fixed_addresses) | ||
87 | __this_fixmap_does_not_exist(); | ||
88 | |||
89 | return __fix_to_virt(idx); | ||
90 | } | ||
91 | |||
92 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
93 | { | ||
94 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
95 | return __virt_to_fix(vaddr); | ||
96 | } | ||
97 | 59 | ||
98 | #endif | 60 | #endif |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d3b9186e4c23..3e97a3dd4129 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -279,13 +279,13 @@ config SMP | |||
279 | bool "Symmetric multi-processing support" | 279 | bool "Symmetric multi-processing support" |
280 | ---help--- | 280 | ---help--- |
281 | This enables support for systems with more than one CPU. If you have | 281 | This enables support for systems with more than one CPU. If you have |
282 | a system with only one CPU, like most personal computers, say N. If | 282 | a system with only one CPU, say N. If you have a system with more |
283 | you have a system with more than one CPU, say Y. | 283 | than one CPU, say Y. |
284 | 284 | ||
285 | If you say N here, the kernel will run on single and multiprocessor | 285 | If you say N here, the kernel will run on uni- and multiprocessor |
286 | machines, but will use only one CPU of a multiprocessor machine. If | 286 | machines, but will use only one CPU of a multiprocessor machine. If |
287 | you say Y here, the kernel will run on many, but not all, | 287 | you say Y here, the kernel will run on many, but not all, |
288 | singleprocessor machines. On a singleprocessor machine, the kernel | 288 | uniprocessor machines. On a uniprocessor machine, the kernel |
289 | will run faster if you say N here. | 289 | will run faster if you say N here. |
290 | 290 | ||
291 | Note that if you say Y here and choose architecture "586" or | 291 | Note that if you say Y here and choose architecture "586" or |
@@ -731,6 +731,7 @@ config APB_TIMER | |||
731 | # The code disables itself when not needed. | 731 | # The code disables itself when not needed. |
732 | config DMI | 732 | config DMI |
733 | default y | 733 | default y |
734 | select DMI_SCAN_MACHINE_NON_EFI_FALLBACK | ||
734 | bool "Enable DMI scanning" if EXPERT | 735 | bool "Enable DMI scanning" if EXPERT |
735 | ---help--- | 736 | ---help--- |
736 | Enabled scanning of DMI to identify machine quirks. Say Y | 737 | Enabled scanning of DMI to identify machine quirks. Say Y |
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h index fd8f9e2ca35f..535192f6bfad 100644 --- a/arch/x86/include/asm/dmi.h +++ b/arch/x86/include/asm/dmi.h | |||
@@ -13,7 +13,9 @@ static __always_inline __init void *dmi_alloc(unsigned len) | |||
13 | } | 13 | } |
14 | 14 | ||
15 | /* Use early IO mappings for DMI because it's initialized early */ | 15 | /* Use early IO mappings for DMI because it's initialized early */ |
16 | #define dmi_ioremap early_ioremap | 16 | #define dmi_early_remap early_ioremap |
17 | #define dmi_iounmap early_iounmap | 17 | #define dmi_early_unmap early_iounmap |
18 | #define dmi_remap ioremap | ||
19 | #define dmi_unmap iounmap | ||
18 | 20 | ||
19 | #endif /* _ASM_X86_DMI_H */ | 21 | #endif /* _ASM_X86_DMI_H */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e846225265ed..7252cd339175 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -175,64 +175,7 @@ static inline void __set_fixmap(enum fixed_addresses idx, | |||
175 | } | 175 | } |
176 | #endif | 176 | #endif |
177 | 177 | ||
178 | #define set_fixmap(idx, phys) \ | 178 | #include <asm-generic/fixmap.h> |
179 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
180 | |||
181 | /* | ||
182 | * Some hardware wants to get fixmapped without caching. | ||
183 | */ | ||
184 | #define set_fixmap_nocache(idx, phys) \ | ||
185 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
186 | |||
187 | #define clear_fixmap(idx) \ | ||
188 | __set_fixmap(idx, 0, __pgprot(0)) | ||
189 | |||
190 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
191 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
192 | |||
193 | extern void __this_fixmap_does_not_exist(void); | ||
194 | |||
195 | /* | ||
196 | * 'index to address' translation. If anyone tries to use the idx | ||
197 | * directly without translation, we catch the bug with a NULL-deference | ||
198 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
199 | */ | ||
200 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
201 | { | ||
202 | /* | ||
203 | * this branch gets completely eliminated after inlining, | ||
204 | * except when someone tries to use fixaddr indices in an | ||
205 | * illegal way. (such as mixing up address types or using | ||
206 | * out-of-range indices). | ||
207 | * | ||
208 | * If it doesn't get removed, the linker will complain | ||
209 | * loudly with a reasonably clear error message.. | ||
210 | */ | ||
211 | if (idx >= __end_of_fixed_addresses) | ||
212 | __this_fixmap_does_not_exist(); | ||
213 | |||
214 | return __fix_to_virt(idx); | ||
215 | } | ||
216 | |||
217 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
218 | { | ||
219 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
220 | return __virt_to_fix(vaddr); | ||
221 | } | ||
222 | |||
223 | /* Return an pointer with offset calculated */ | ||
224 | static __always_inline unsigned long | ||
225 | __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | ||
226 | { | ||
227 | __set_fixmap(idx, phys, flags); | ||
228 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | ||
229 | } | ||
230 | |||
231 | #define set_fixmap_offset(idx, phys) \ | ||
232 | __set_fixmap_offset(idx, phys, PAGE_KERNEL) | ||
233 | |||
234 | #define set_fixmap_offset_nocache(idx, phys) \ | ||
235 | __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) | ||
236 | 179 | ||
237 | #endif /* !__ASSEMBLY__ */ | 180 | #endif /* !__ASSEMBLY__ */ |
238 | #endif /* _ASM_X86_FIXMAP_H */ | 181 | #endif /* _ASM_X86_FIXMAP_H */ |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 0596e8e0cc19..207d9aef662d 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
108 | 108 | ||
109 | static inline void get_head_page_multiple(struct page *page, int nr) | 109 | static inline void get_head_page_multiple(struct page *page, int nr) |
110 | { | 110 | { |
111 | VM_BUG_ON(page != compound_head(page)); | 111 | VM_BUG_ON_PAGE(page != compound_head(page), page); |
112 | VM_BUG_ON(page_count(page) == 0); | 112 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
113 | atomic_add(nr, &page->_count); | 113 | atomic_add(nr, &page->_count); |
114 | SetPageReferenced(page); | 114 | SetPageReferenced(page); |
115 | } | 115 | } |
@@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, | |||
135 | head = pte_page(pte); | 135 | head = pte_page(pte); |
136 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | 136 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
137 | do { | 137 | do { |
138 | VM_BUG_ON(compound_head(page) != head); | 138 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
139 | pages[*nr] = page; | 139 | pages[*nr] = page; |
140 | if (PageTail(page)) | 140 | if (PageTail(page)) |
141 | get_huge_page_tail(page); | 141 | get_huge_page_tail(page); |
@@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, | |||
212 | head = pte_page(pte); | 212 | head = pte_page(pte); |
213 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | 213 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
214 | do { | 214 | do { |
215 | VM_BUG_ON(compound_head(page) != head); | 215 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
216 | pages[*nr] = page; | 216 | pages[*nr] = page; |
217 | if (PageTail(page)) | 217 | if (PageTail(page)) |
218 | get_huge_page_tail(page); | 218 | get_huge_page_tail(page); |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 86b9f37d102e..9ffa90c6201c 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -368,7 +368,8 @@ config BLK_DEV_RAM | |||
368 | For details, read <file:Documentation/blockdev/ramdisk.txt>. | 368 | For details, read <file:Documentation/blockdev/ramdisk.txt>. |
369 | 369 | ||
370 | To compile this driver as a module, choose M here: the | 370 | To compile this driver as a module, choose M here: the |
371 | module will be called rd. | 371 | module will be called brd. An alias "rd" has been defined |
372 | for historical reasons. | ||
372 | 373 | ||
373 | Most normal users won't need the RAM disk functionality, and can | 374 | Most normal users won't need the RAM disk functionality, and can |
374 | thus say N here. | 375 | thus say N here. |
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 074787281c94..5a29fac951ec 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -108,6 +108,9 @@ config DMI_SYSFS | |||
108 | under /sys/firmware/dmi when this option is enabled and | 108 | under /sys/firmware/dmi when this option is enabled and |
109 | loaded. | 109 | loaded. |
110 | 110 | ||
111 | config DMI_SCAN_MACHINE_NON_EFI_FALLBACK | ||
112 | bool | ||
113 | |||
111 | config ISCSI_IBFT_FIND | 114 | config ISCSI_IBFT_FIND |
112 | bool "iSCSI Boot Firmware Table Attributes" | 115 | bool "iSCSI Boot Firmware Table Attributes" |
113 | depends on X86 | 116 | depends on X86 |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index c7e81ff8f3ef..17afc51f3054 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -116,7 +116,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | |||
116 | { | 116 | { |
117 | u8 *buf; | 117 | u8 *buf; |
118 | 118 | ||
119 | buf = dmi_ioremap(dmi_base, dmi_len); | 119 | buf = dmi_early_remap(dmi_base, dmi_len); |
120 | if (buf == NULL) | 120 | if (buf == NULL) |
121 | return -1; | 121 | return -1; |
122 | 122 | ||
@@ -124,7 +124,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | |||
124 | 124 | ||
125 | add_device_randomness(buf, dmi_len); | 125 | add_device_randomness(buf, dmi_len); |
126 | 126 | ||
127 | dmi_iounmap(buf, dmi_len); | 127 | dmi_early_unmap(buf, dmi_len); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
@@ -527,18 +527,18 @@ void __init dmi_scan_machine(void) | |||
527 | * needed during early boot. This also means we can | 527 | * needed during early boot. This also means we can |
528 | * iounmap the space when we're done with it. | 528 | * iounmap the space when we're done with it. |
529 | */ | 529 | */ |
530 | p = dmi_ioremap(efi.smbios, 32); | 530 | p = dmi_early_remap(efi.smbios, 32); |
531 | if (p == NULL) | 531 | if (p == NULL) |
532 | goto error; | 532 | goto error; |
533 | memcpy_fromio(buf, p, 32); | 533 | memcpy_fromio(buf, p, 32); |
534 | dmi_iounmap(p, 32); | 534 | dmi_early_unmap(p, 32); |
535 | 535 | ||
536 | if (!dmi_present(buf)) { | 536 | if (!dmi_present(buf)) { |
537 | dmi_available = 1; | 537 | dmi_available = 1; |
538 | goto out; | 538 | goto out; |
539 | } | 539 | } |
540 | } else { | 540 | } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) { |
541 | p = dmi_ioremap(0xF0000, 0x10000); | 541 | p = dmi_early_remap(0xF0000, 0x10000); |
542 | if (p == NULL) | 542 | if (p == NULL) |
543 | goto error; | 543 | goto error; |
544 | 544 | ||
@@ -554,12 +554,12 @@ void __init dmi_scan_machine(void) | |||
554 | memcpy_fromio(buf + 16, q, 16); | 554 | memcpy_fromio(buf + 16, q, 16); |
555 | if (!dmi_present(buf)) { | 555 | if (!dmi_present(buf)) { |
556 | dmi_available = 1; | 556 | dmi_available = 1; |
557 | dmi_iounmap(p, 0x10000); | 557 | dmi_early_unmap(p, 0x10000); |
558 | goto out; | 558 | goto out; |
559 | } | 559 | } |
560 | memcpy(buf, buf + 16, 16); | 560 | memcpy(buf, buf + 16, 16); |
561 | } | 561 | } |
562 | dmi_iounmap(p, 0x10000); | 562 | dmi_early_unmap(p, 0x10000); |
563 | } | 563 | } |
564 | error: | 564 | error: |
565 | pr_info("DMI not present or invalid.\n"); | 565 | pr_info("DMI not present or invalid.\n"); |
@@ -831,13 +831,13 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), | |||
831 | if (!dmi_available) | 831 | if (!dmi_available) |
832 | return -1; | 832 | return -1; |
833 | 833 | ||
834 | buf = ioremap(dmi_base, dmi_len); | 834 | buf = dmi_remap(dmi_base, dmi_len); |
835 | if (buf == NULL) | 835 | if (buf == NULL) |
836 | return -1; | 836 | return -1; |
837 | 837 | ||
838 | dmi_table(buf, dmi_len, dmi_num, decode, private_data); | 838 | dmi_table(buf, dmi_len, dmi_num, decode, private_data); |
839 | 839 | ||
840 | iounmap(buf); | 840 | dmi_unmap(buf); |
841 | return 0; | 841 | return 0; |
842 | } | 842 | } |
843 | EXPORT_SYMBOL_GPL(dmi_walk); | 843 | EXPORT_SYMBOL_GPL(dmi_walk); |
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c index 143eba3309c5..ea7dfc59d796 100644 --- a/drivers/gpu/drm/gma500/backlight.c +++ b/drivers/gpu/drm/gma500/backlight.c | |||
@@ -26,13 +26,13 @@ | |||
26 | #include "intel_bios.h" | 26 | #include "intel_bios.h" |
27 | #include "power.h" | 27 | #include "power.h" |
28 | 28 | ||
29 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
29 | static void do_gma_backlight_set(struct drm_device *dev) | 30 | static void do_gma_backlight_set(struct drm_device *dev) |
30 | { | 31 | { |
31 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
32 | struct drm_psb_private *dev_priv = dev->dev_private; | 32 | struct drm_psb_private *dev_priv = dev->dev_private; |
33 | backlight_update_status(dev_priv->backlight_device); | 33 | backlight_update_status(dev_priv->backlight_device); |
34 | #endif | ||
35 | } | 34 | } |
35 | #endif | ||
36 | 36 | ||
37 | void gma_backlight_enable(struct drm_device *dev) | 37 | void gma_backlight_enable(struct drm_device *dev) |
38 | { | 38 | { |
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h index 6cd38fc68599..86d7518cd13b 100644 --- a/drivers/mailbox/omap-mbox.h +++ b/drivers/mailbox/omap-mbox.h | |||
@@ -52,7 +52,7 @@ struct omap_mbox_queue { | |||
52 | 52 | ||
53 | struct omap_mbox { | 53 | struct omap_mbox { |
54 | const char *name; | 54 | const char *name; |
55 | unsigned int irq; | 55 | int irq; |
56 | struct omap_mbox_queue *txq, *rxq; | 56 | struct omap_mbox_queue *txq, *rxq; |
57 | struct omap_mbox_ops *ops; | 57 | struct omap_mbox_ops *ops; |
58 | struct device *dev; | 58 | struct device *dev; |
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c index 25f8f93decb6..2a635b6fdaf7 100644 --- a/drivers/memstick/host/rtsx_pci_ms.c +++ b/drivers/memstick/host/rtsx_pci_ms.c | |||
@@ -145,6 +145,8 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, | |||
145 | unsigned int length = sg->length; | 145 | unsigned int length = sg->length; |
146 | u16 sec_cnt = (u16)(length / 512); | 146 | u16 sec_cnt = (u16)(length / 512); |
147 | u8 val, trans_mode, dma_dir; | 147 | u8 val, trans_mode, dma_dir; |
148 | struct memstick_dev *card = host->msh->card; | ||
149 | bool pro_card = card->id.type == MEMSTICK_TYPE_PRO; | ||
148 | 150 | ||
149 | dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", | 151 | dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", |
150 | __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", | 152 | __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", |
@@ -152,19 +154,21 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, | |||
152 | 154 | ||
153 | if (data_dir == READ) { | 155 | if (data_dir == READ) { |
154 | dma_dir = DMA_DIR_FROM_CARD; | 156 | dma_dir = DMA_DIR_FROM_CARD; |
155 | trans_mode = MS_TM_AUTO_READ; | 157 | trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ; |
156 | } else { | 158 | } else { |
157 | dma_dir = DMA_DIR_TO_CARD; | 159 | dma_dir = DMA_DIR_TO_CARD; |
158 | trans_mode = MS_TM_AUTO_WRITE; | 160 | trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE; |
159 | } | 161 | } |
160 | 162 | ||
161 | rtsx_pci_init_cmd(pcr); | 163 | rtsx_pci_init_cmd(pcr); |
162 | 164 | ||
163 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); | 165 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); |
164 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, | 166 | if (pro_card) { |
165 | 0xFF, (u8)(sec_cnt >> 8)); | 167 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, |
166 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, | 168 | 0xFF, (u8)(sec_cnt >> 8)); |
167 | 0xFF, (u8)sec_cnt); | 169 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, |
170 | 0xFF, (u8)sec_cnt); | ||
171 | } | ||
168 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); | 172 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); |
169 | 173 | ||
170 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, | 174 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, |
@@ -192,8 +196,14 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, | |||
192 | } | 196 | } |
193 | 197 | ||
194 | rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); | 198 | rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); |
195 | if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) | 199 | if (pro_card) { |
196 | return -EIO; | 200 | if (val & (MS_INT_CMDNK | MS_INT_ERR | |
201 | MS_CRC16_ERR | MS_RDY_TIMEOUT)) | ||
202 | return -EIO; | ||
203 | } else { | ||
204 | if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) | ||
205 | return -EIO; | ||
206 | } | ||
197 | 207 | ||
198 | return 0; | 208 | return 0; |
199 | } | 209 | } |
@@ -462,8 +472,8 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh, | |||
462 | clock = 19000000; | 472 | clock = 19000000; |
463 | ssc_depth = RTSX_SSC_DEPTH_500K; | 473 | ssc_depth = RTSX_SSC_DEPTH_500K; |
464 | 474 | ||
465 | err = rtsx_pci_write_register(pcr, MS_CFG, | 475 | err = rtsx_pci_write_register(pcr, MS_CFG, 0x58, |
466 | 0x18, MS_BUS_WIDTH_1); | 476 | MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT); |
467 | if (err < 0) | 477 | if (err < 0) |
468 | return err; | 478 | return err; |
469 | } else if (value == MEMSTICK_PAR4) { | 479 | } else if (value == MEMSTICK_PAR4) { |
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index f47eaa70eae0..612ca404e150 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c | |||
@@ -175,7 +175,7 @@ static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c, | |||
175 | if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { | 175 | if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { |
176 | const struct of_device_id *match; | 176 | const struct of_device_id *match; |
177 | match = of_match_node(max8998_dt_match, i2c->dev.of_node); | 177 | match = of_match_node(max8998_dt_match, i2c->dev.of_node); |
178 | return (int)match->data; | 178 | return (int)(long)match->data; |
179 | } | 179 | } |
180 | 180 | ||
181 | return (int)id->driver_data; | 181 | return (int)id->driver_data; |
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 6939ae56c2e1..966cf65c5c36 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c | |||
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client, | |||
170 | "Failed to find matching dt id\n"); | 170 | "Failed to find matching dt id\n"); |
171 | return -EINVAL; | 171 | return -EINVAL; |
172 | } | 172 | } |
173 | chip_id = (unsigned int)match->data; | 173 | chip_id = (unsigned int)(unsigned long)match->data; |
174 | status_off = of_property_read_bool(client->dev.of_node, | 174 | status_off = of_property_read_bool(client->dev.of_node, |
175 | "ti,pmic-shutdown-controller"); | 175 | "ti,pmic-shutdown-controller"); |
176 | } | 176 | } |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index b1328a45b095..db933decc39c 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -212,6 +212,17 @@ config RTC_DRV_DS3232 | |||
212 | This driver can also be built as a module. If so, the module | 212 | This driver can also be built as a module. If so, the module |
213 | will be called rtc-ds3232. | 213 | will be called rtc-ds3232. |
214 | 214 | ||
215 | config RTC_DRV_HYM8563 | ||
216 | tristate "Haoyu Microelectronics HYM8563" | ||
217 | depends on I2C && OF | ||
218 | help | ||
219 | Say Y to enable support for the HYM8563 I2C RTC chip. Apart | ||
220 | from the usual rtc functions it provides a clock output of | ||
221 | up to 32kHz. | ||
222 | |||
223 | This driver can also be built as a module. If so, the module | ||
224 | will be called rtc-hym8563. | ||
225 | |||
215 | config RTC_DRV_LP8788 | 226 | config RTC_DRV_LP8788 |
216 | tristate "TI LP8788 RTC driver" | 227 | tristate "TI LP8788 RTC driver" |
217 | depends on MFD_LP8788 | 228 | depends on MFD_LP8788 |
@@ -637,7 +648,7 @@ comment "Platform RTC drivers" | |||
637 | 648 | ||
638 | config RTC_DRV_CMOS | 649 | config RTC_DRV_CMOS |
639 | tristate "PC-style 'CMOS'" | 650 | tristate "PC-style 'CMOS'" |
640 | depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 | 651 | depends on X86 || ARM || M32R || PPC || MIPS || SPARC64 |
641 | default y if X86 | 652 | default y if X86 |
642 | help | 653 | help |
643 | Say "yes" here to get direct support for the real time clock | 654 | Say "yes" here to get direct support for the real time clock |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index c00741a0bf10..b427bf7dd20d 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -55,6 +55,7 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o | |||
55 | obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o | 55 | obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o |
56 | obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o | 56 | obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o |
57 | obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o | 57 | obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o |
58 | obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o | ||
58 | obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o | 59 | obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o |
59 | obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o | 60 | obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o |
60 | obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o | 61 | obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o |
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 02426812bebc..589351ef75d0 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/of.h> | ||
17 | #include <linux/rtc.h> | 18 | #include <linux/rtc.h> |
18 | #include <linux/kdev_t.h> | 19 | #include <linux/kdev_t.h> |
19 | #include <linux/idr.h> | 20 | #include <linux/idr.h> |
@@ -157,12 +158,27 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, | |||
157 | { | 158 | { |
158 | struct rtc_device *rtc; | 159 | struct rtc_device *rtc; |
159 | struct rtc_wkalrm alrm; | 160 | struct rtc_wkalrm alrm; |
160 | int id, err; | 161 | int of_id = -1, id = -1, err; |
162 | |||
163 | if (dev->of_node) | ||
164 | of_id = of_alias_get_id(dev->of_node, "rtc"); | ||
165 | else if (dev->parent && dev->parent->of_node) | ||
166 | of_id = of_alias_get_id(dev->parent->of_node, "rtc"); | ||
167 | |||
168 | if (of_id >= 0) { | ||
169 | id = ida_simple_get(&rtc_ida, of_id, of_id + 1, | ||
170 | GFP_KERNEL); | ||
171 | if (id < 0) | ||
172 | dev_warn(dev, "/aliases ID %d not available\n", | ||
173 | of_id); | ||
174 | } | ||
161 | 175 | ||
162 | id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); | ||
163 | if (id < 0) { | 176 | if (id < 0) { |
164 | err = id; | 177 | id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); |
165 | goto exit; | 178 | if (id < 0) { |
179 | err = id; | ||
180 | goto exit; | ||
181 | } | ||
166 | } | 182 | } |
167 | 183 | ||
168 | rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL); | 184 | rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL); |
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c index 9cfa8170a2d6..4af016985890 100644 --- a/drivers/rtc/rtc-as3722.c +++ b/drivers/rtc/rtc-as3722.c | |||
@@ -198,7 +198,7 @@ static int as3722_rtc_probe(struct platform_device *pdev) | |||
198 | 198 | ||
199 | device_init_wakeup(&pdev->dev, 1); | 199 | device_init_wakeup(&pdev->dev, 1); |
200 | 200 | ||
201 | as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev, | 201 | as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc", |
202 | &as3722_rtc_ops, THIS_MODULE); | 202 | &as3722_rtc_ops, THIS_MODULE); |
203 | if (IS_ERR(as3722_rtc->rtc)) { | 203 | if (IS_ERR(as3722_rtc->rtc)) { |
204 | ret = PTR_ERR(as3722_rtc->rtc); | 204 | ret = PTR_ERR(as3722_rtc->rtc); |
@@ -209,28 +209,16 @@ static int as3722_rtc_probe(struct platform_device *pdev) | |||
209 | as3722_rtc->alarm_irq = platform_get_irq(pdev, 0); | 209 | as3722_rtc->alarm_irq = platform_get_irq(pdev, 0); |
210 | dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq); | 210 | dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq); |
211 | 211 | ||
212 | ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL, | 212 | ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL, |
213 | as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME, | 213 | as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME, |
214 | "rtc-alarm", as3722_rtc); | 214 | "rtc-alarm", as3722_rtc); |
215 | if (ret < 0) { | 215 | if (ret < 0) { |
216 | dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", | 216 | dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", |
217 | as3722_rtc->alarm_irq, ret); | 217 | as3722_rtc->alarm_irq, ret); |
218 | goto scrub; | 218 | return ret; |
219 | } | 219 | } |
220 | disable_irq(as3722_rtc->alarm_irq); | 220 | disable_irq(as3722_rtc->alarm_irq); |
221 | return 0; | 221 | return 0; |
222 | scrub: | ||
223 | rtc_device_unregister(as3722_rtc->rtc); | ||
224 | return ret; | ||
225 | } | ||
226 | |||
227 | static int as3722_rtc_remove(struct platform_device *pdev) | ||
228 | { | ||
229 | struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev); | ||
230 | |||
231 | free_irq(as3722_rtc->alarm_irq, as3722_rtc); | ||
232 | rtc_device_unregister(as3722_rtc->rtc); | ||
233 | return 0; | ||
234 | } | 222 | } |
235 | 223 | ||
236 | #ifdef CONFIG_PM_SLEEP | 224 | #ifdef CONFIG_PM_SLEEP |
@@ -260,7 +248,6 @@ static const struct dev_pm_ops as3722_rtc_pm_ops = { | |||
260 | 248 | ||
261 | static struct platform_driver as3722_rtc_driver = { | 249 | static struct platform_driver as3722_rtc_driver = { |
262 | .probe = as3722_rtc_probe, | 250 | .probe = as3722_rtc_probe, |
263 | .remove = as3722_rtc_remove, | ||
264 | .driver = { | 251 | .driver = { |
265 | .name = "as3722-rtc", | 252 | .name = "as3722-rtc", |
266 | .pm = &as3722_rtc_pm_ops, | 253 | .pm = &as3722_rtc_pm_ops, |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index a2325bc5e497..cae212f30d65 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -756,11 +756,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) | |||
756 | irq_handler_t rtc_cmos_int_handler; | 756 | irq_handler_t rtc_cmos_int_handler; |
757 | 757 | ||
758 | if (is_hpet_enabled()) { | 758 | if (is_hpet_enabled()) { |
759 | int err; | ||
760 | |||
761 | rtc_cmos_int_handler = hpet_rtc_interrupt; | 759 | rtc_cmos_int_handler = hpet_rtc_interrupt; |
762 | err = hpet_register_irq_handler(cmos_interrupt); | 760 | retval = hpet_register_irq_handler(cmos_interrupt); |
763 | if (err != 0) { | 761 | if (retval) { |
764 | dev_warn(dev, "hpet_register_irq_handler " | 762 | dev_warn(dev, "hpet_register_irq_handler " |
765 | " failed in rtc_init()."); | 763 | " failed in rtc_init()."); |
766 | goto cleanup1; | 764 | goto cleanup1; |
@@ -1175,7 +1173,7 @@ static struct platform_driver cmos_platform_driver = { | |||
1175 | .remove = __exit_p(cmos_platform_remove), | 1173 | .remove = __exit_p(cmos_platform_remove), |
1176 | .shutdown = cmos_platform_shutdown, | 1174 | .shutdown = cmos_platform_shutdown, |
1177 | .driver = { | 1175 | .driver = { |
1178 | .name = (char *) driver_name, | 1176 | .name = driver_name, |
1179 | #ifdef CONFIG_PM | 1177 | #ifdef CONFIG_PM |
1180 | .pm = &cmos_pm_ops, | 1178 | .pm = &cmos_pm_ops, |
1181 | #endif | 1179 | #endif |
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 80f323731ee2..2dd586a19b59 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c | |||
@@ -787,7 +787,6 @@ static int ds1305_remove(struct spi_device *spi) | |||
787 | cancel_work_sync(&ds1305->work); | 787 | cancel_work_sync(&ds1305->work); |
788 | } | 788 | } |
789 | 789 | ||
790 | spi_set_drvdata(spi, NULL); | ||
791 | return 0; | 790 | return 0; |
792 | } | 791 | } |
793 | 792 | ||
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index 17b73fdc3b6e..5a1f3b2a8f1e 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c | |||
@@ -13,12 +13,13 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/bcd.h> | 15 | #include <linux/bcd.h> |
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
18 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
19 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
20 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
21 | #include <linux/rtc.h> | 20 | #include <linux/rtc.h> |
21 | #include <linux/of.h> | ||
22 | #include <linux/of_device.h> | ||
22 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
23 | #include <linux/io.h> | 24 | #include <linux/io.h> |
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -215,12 +216,19 @@ static int ds1742_rtc_remove(struct platform_device *pdev) | |||
215 | return 0; | 216 | return 0; |
216 | } | 217 | } |
217 | 218 | ||
219 | static struct of_device_id __maybe_unused ds1742_rtc_of_match[] = { | ||
220 | { .compatible = "maxim,ds1742", }, | ||
221 | { } | ||
222 | }; | ||
223 | MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match); | ||
224 | |||
218 | static struct platform_driver ds1742_rtc_driver = { | 225 | static struct platform_driver ds1742_rtc_driver = { |
219 | .probe = ds1742_rtc_probe, | 226 | .probe = ds1742_rtc_probe, |
220 | .remove = ds1742_rtc_remove, | 227 | .remove = ds1742_rtc_remove, |
221 | .driver = { | 228 | .driver = { |
222 | .name = "rtc-ds1742", | 229 | .name = "rtc-ds1742", |
223 | .owner = THIS_MODULE, | 230 | .owner = THIS_MODULE, |
231 | .of_match_table = ds1742_rtc_of_match, | ||
224 | }, | 232 | }, |
225 | }; | 233 | }; |
226 | 234 | ||
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c new file mode 100644 index 000000000000..bd628a6f981d --- /dev/null +++ b/drivers/rtc/rtc-hym8563.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * Haoyu HYM8563 RTC driver | ||
3 | * | ||
4 | * Copyright (C) 2013 MundoReader S.L. | ||
5 | * Author: Heiko Stuebner <heiko@sntech.de> | ||
6 | * | ||
7 | * based on rtc-HYM8563 | ||
8 | * Copyright (C) 2010 ROCKCHIP, Inc. | ||
9 | * | ||
10 | * This software is licensed under the terms of the GNU General Public | ||
11 | * License version 2, as published by the Free Software Foundation, and | ||
12 | * may be copied, distributed, and modified under those terms. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/clk-provider.h> | ||
22 | #include <linux/i2c.h> | ||
23 | #include <linux/bcd.h> | ||
24 | #include <linux/rtc.h> | ||
25 | |||
26 | #define HYM8563_CTL1 0x00 | ||
27 | #define HYM8563_CTL1_TEST BIT(7) | ||
28 | #define HYM8563_CTL1_STOP BIT(5) | ||
29 | #define HYM8563_CTL1_TESTC BIT(3) | ||
30 | |||
31 | #define HYM8563_CTL2 0x01 | ||
32 | #define HYM8563_CTL2_TI_TP BIT(4) | ||
33 | #define HYM8563_CTL2_AF BIT(3) | ||
34 | #define HYM8563_CTL2_TF BIT(2) | ||
35 | #define HYM8563_CTL2_AIE BIT(1) | ||
36 | #define HYM8563_CTL2_TIE BIT(0) | ||
37 | |||
38 | #define HYM8563_SEC 0x02 | ||
39 | #define HYM8563_SEC_VL BIT(7) | ||
40 | #define HYM8563_SEC_MASK 0x7f | ||
41 | |||
42 | #define HYM8563_MIN 0x03 | ||
43 | #define HYM8563_MIN_MASK 0x7f | ||
44 | |||
45 | #define HYM8563_HOUR 0x04 | ||
46 | #define HYM8563_HOUR_MASK 0x3f | ||
47 | |||
48 | #define HYM8563_DAY 0x05 | ||
49 | #define HYM8563_DAY_MASK 0x3f | ||
50 | |||
51 | #define HYM8563_WEEKDAY 0x06 | ||
52 | #define HYM8563_WEEKDAY_MASK 0x07 | ||
53 | |||
54 | #define HYM8563_MONTH 0x07 | ||
55 | #define HYM8563_MONTH_CENTURY BIT(7) | ||
56 | #define HYM8563_MONTH_MASK 0x1f | ||
57 | |||
58 | #define HYM8563_YEAR 0x08 | ||
59 | |||
60 | #define HYM8563_ALM_MIN 0x09 | ||
61 | #define HYM8563_ALM_HOUR 0x0a | ||
62 | #define HYM8563_ALM_DAY 0x0b | ||
63 | #define HYM8563_ALM_WEEK 0x0c | ||
64 | |||
65 | /* Each alarm check can be disabled by setting this bit in the register */ | ||
66 | #define HYM8563_ALM_BIT_DISABLE BIT(7) | ||
67 | |||
68 | #define HYM8563_CLKOUT 0x0d | ||
69 | #define HYM8563_CLKOUT_DISABLE BIT(7) | ||
70 | #define HYM8563_CLKOUT_32768 0 | ||
71 | #define HYM8563_CLKOUT_1024 1 | ||
72 | #define HYM8563_CLKOUT_32 2 | ||
73 | #define HYM8563_CLKOUT_1 3 | ||
74 | #define HYM8563_CLKOUT_MASK 3 | ||
75 | |||
76 | #define HYM8563_TMR_CTL 0x0e | ||
77 | #define HYM8563_TMR_CTL_ENABLE BIT(7) | ||
78 | #define HYM8563_TMR_CTL_4096 0 | ||
79 | #define HYM8563_TMR_CTL_64 1 | ||
80 | #define HYM8563_TMR_CTL_1 2 | ||
81 | #define HYM8563_TMR_CTL_1_60 3 | ||
82 | #define HYM8563_TMR_CTL_MASK 3 | ||
83 | |||
84 | #define HYM8563_TMR_CNT 0x0f | ||
85 | |||
86 | struct hym8563 { | ||
87 | struct i2c_client *client; | ||
88 | struct rtc_device *rtc; | ||
89 | bool valid; | ||
90 | #ifdef CONFIG_COMMON_CLK | ||
91 | struct clk_hw clkout_hw; | ||
92 | #endif | ||
93 | }; | ||
94 | |||
95 | /* | ||
96 | * RTC handling | ||
97 | */ | ||
98 | |||
99 | static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
100 | { | ||
101 | struct i2c_client *client = to_i2c_client(dev); | ||
102 | struct hym8563 *hym8563 = i2c_get_clientdata(client); | ||
103 | u8 buf[7]; | ||
104 | int ret; | ||
105 | |||
106 | if (!hym8563->valid) { | ||
107 | dev_warn(&client->dev, "no valid clock/calendar values available\n"); | ||
108 | return -EPERM; | ||
109 | } | ||
110 | |||
111 | ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf); | ||
112 | |||
113 | tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK); | ||
114 | tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK); | ||
115 | tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK); | ||
116 | tm->tm_mday = bcd2bin(buf[3] & HYM8563_DAY_MASK); | ||
117 | tm->tm_wday = bcd2bin(buf[4] & HYM8563_WEEKDAY_MASK); /* 0 = Sun */ | ||
118 | tm->tm_mon = bcd2bin(buf[5] & HYM8563_MONTH_MASK) - 1; /* 0 = Jan */ | ||
119 | tm->tm_year = bcd2bin(buf[6]) + 100; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
125 | { | ||
126 | struct i2c_client *client = to_i2c_client(dev); | ||
127 | struct hym8563 *hym8563 = i2c_get_clientdata(client); | ||
128 | u8 buf[7]; | ||
129 | int ret; | ||
130 | |||
131 | /* Years >= 2100 are to far in the future, 19XX is to early */ | ||
132 | if (tm->tm_year < 100 || tm->tm_year >= 200) | ||
133 | return -EINVAL; | ||
134 | |||
135 | buf[0] = bin2bcd(tm->tm_sec); | ||
136 | buf[1] = bin2bcd(tm->tm_min); | ||
137 | buf[2] = bin2bcd(tm->tm_hour); | ||
138 | buf[3] = bin2bcd(tm->tm_mday); | ||
139 | buf[4] = bin2bcd(tm->tm_wday); | ||
140 | buf[5] = bin2bcd(tm->tm_mon + 1); | ||
141 | |||
142 | /* | ||
143 | * While the HYM8563 has a century flag in the month register, | ||
144 | * it does not seem to carry it over a subsequent write/read. | ||
145 | * So we'll limit ourself to 100 years, starting at 2000 for now. | ||
146 | */ | ||
147 | buf[6] = tm->tm_year - 100; | ||
148 | |||
149 | /* | ||
150 | * CTL1 only contains TEST-mode bits apart from stop, | ||
151 | * so no need to read the value first | ||
152 | */ | ||
153 | ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, | ||
154 | HYM8563_CTL1_STOP); | ||
155 | if (ret < 0) | ||
156 | return ret; | ||
157 | |||
158 | ret = i2c_smbus_write_i2c_block_data(client, HYM8563_SEC, 7, buf); | ||
159 | if (ret < 0) | ||
160 | return ret; | ||
161 | |||
162 | ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0); | ||
163 | if (ret < 0) | ||
164 | return ret; | ||
165 | |||
166 | hym8563->valid = true; | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int hym8563_rtc_alarm_irq_enable(struct device *dev, | ||
172 | unsigned int enabled) | ||
173 | { | ||
174 | struct i2c_client *client = to_i2c_client(dev); | ||
175 | int data; | ||
176 | |||
177 | data = i2c_smbus_read_byte_data(client, HYM8563_CTL2); | ||
178 | if (data < 0) | ||
179 | return data; | ||
180 | |||
181 | if (enabled) | ||
182 | data |= HYM8563_CTL2_AIE; | ||
183 | else | ||
184 | data &= ~HYM8563_CTL2_AIE; | ||
185 | |||
186 | return i2c_smbus_write_byte_data(client, HYM8563_CTL2, data); | ||
187 | }; | ||
188 | |||
189 | static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
190 | { | ||
191 | struct i2c_client *client = to_i2c_client(dev); | ||
192 | struct rtc_time *alm_tm = &alm->time; | ||
193 | u8 buf[4]; | ||
194 | int ret; | ||
195 | |||
196 | ret = i2c_smbus_read_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf); | ||
197 | if (ret < 0) | ||
198 | return ret; | ||
199 | |||
200 | /* The alarm only has a minute accuracy */ | ||
201 | alm_tm->tm_sec = -1; | ||
202 | |||
203 | alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ? | ||
204 | -1 : | ||
205 | bcd2bin(buf[0] & HYM8563_MIN_MASK); | ||
206 | alm_tm->tm_hour = (buf[1] & HYM8563_ALM_BIT_DISABLE) ? | ||
207 | -1 : | ||
208 | bcd2bin(buf[1] & HYM8563_HOUR_MASK); | ||
209 | alm_tm->tm_mday = (buf[2] & HYM8563_ALM_BIT_DISABLE) ? | ||
210 | -1 : | ||
211 | bcd2bin(buf[2] & HYM8563_DAY_MASK); | ||
212 | alm_tm->tm_wday = (buf[3] & HYM8563_ALM_BIT_DISABLE) ? | ||
213 | -1 : | ||
214 | bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK); | ||
215 | |||
216 | alm_tm->tm_mon = -1; | ||
217 | alm_tm->tm_year = -1; | ||
218 | |||
219 | ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); | ||
220 | if (ret < 0) | ||
221 | return ret; | ||
222 | |||
223 | if (ret & HYM8563_CTL2_AIE) | ||
224 | alm->enabled = 1; | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
230 | { | ||
231 | struct i2c_client *client = to_i2c_client(dev); | ||
232 | struct rtc_time *alm_tm = &alm->time; | ||
233 | u8 buf[4]; | ||
234 | int ret; | ||
235 | |||
236 | /* | ||
237 | * The alarm has no seconds so deal with it | ||
238 | */ | ||
239 | if (alm_tm->tm_sec) { | ||
240 | alm_tm->tm_sec = 0; | ||
241 | alm_tm->tm_min++; | ||
242 | if (alm_tm->tm_min >= 60) { | ||
243 | alm_tm->tm_min = 0; | ||
244 | alm_tm->tm_hour++; | ||
245 | if (alm_tm->tm_hour >= 24) { | ||
246 | alm_tm->tm_hour = 0; | ||
247 | alm_tm->tm_mday++; | ||
248 | if (alm_tm->tm_mday > 31) | ||
249 | alm_tm->tm_mday = 0; | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | |||
254 | ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); | ||
255 | if (ret < 0) | ||
256 | return ret; | ||
257 | |||
258 | ret &= ~HYM8563_CTL2_AIE; | ||
259 | |||
260 | ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret); | ||
261 | if (ret < 0) | ||
262 | return ret; | ||
263 | |||
264 | buf[0] = (alm_tm->tm_min < 60 && alm_tm->tm_min >= 0) ? | ||
265 | bin2bcd(alm_tm->tm_min) : HYM8563_ALM_BIT_DISABLE; | ||
266 | |||
267 | buf[1] = (alm_tm->tm_hour < 24 && alm_tm->tm_hour >= 0) ? | ||
268 | bin2bcd(alm_tm->tm_hour) : HYM8563_ALM_BIT_DISABLE; | ||
269 | |||
270 | buf[2] = (alm_tm->tm_mday <= 31 && alm_tm->tm_mday >= 1) ? | ||
271 | bin2bcd(alm_tm->tm_mday) : HYM8563_ALM_BIT_DISABLE; | ||
272 | |||
273 | buf[3] = (alm_tm->tm_wday < 7 && alm_tm->tm_wday >= 0) ? | ||
274 | bin2bcd(alm_tm->tm_wday) : HYM8563_ALM_BIT_DISABLE; | ||
275 | |||
276 | ret = i2c_smbus_write_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf); | ||
277 | if (ret < 0) | ||
278 | return ret; | ||
279 | |||
280 | return hym8563_rtc_alarm_irq_enable(dev, alm->enabled); | ||
281 | } | ||
282 | |||
283 | static const struct rtc_class_ops hym8563_rtc_ops = { | ||
284 | .read_time = hym8563_rtc_read_time, | ||
285 | .set_time = hym8563_rtc_set_time, | ||
286 | .alarm_irq_enable = hym8563_rtc_alarm_irq_enable, | ||
287 | .read_alarm = hym8563_rtc_read_alarm, | ||
288 | .set_alarm = hym8563_rtc_set_alarm, | ||
289 | }; | ||
290 | |||
291 | /* | ||
292 | * Handling of the clkout | ||
293 | */ | ||
294 | |||
295 | #ifdef CONFIG_COMMON_CLK | ||
296 | #define clkout_hw_to_hym8563(_hw) container_of(_hw, struct hym8563, clkout_hw) | ||
297 | |||
298 | static int clkout_rates[] = { | ||
299 | 32768, | ||
300 | 1024, | ||
301 | 32, | ||
302 | 1, | ||
303 | }; | ||
304 | |||
305 | static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw, | ||
306 | unsigned long parent_rate) | ||
307 | { | ||
308 | struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); | ||
309 | struct i2c_client *client = hym8563->client; | ||
310 | int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); | ||
311 | |||
312 | if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE) | ||
313 | return 0; | ||
314 | |||
315 | ret &= HYM8563_CLKOUT_MASK; | ||
316 | return clkout_rates[ret]; | ||
317 | } | ||
318 | |||
319 | static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, | ||
320 | unsigned long *prate) | ||
321 | { | ||
322 | int i; | ||
323 | |||
324 | for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) | ||
325 | if (clkout_rates[i] <= rate) | ||
326 | return clkout_rates[i]; | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate, | ||
332 | unsigned long parent_rate) | ||
333 | { | ||
334 | struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); | ||
335 | struct i2c_client *client = hym8563->client; | ||
336 | int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); | ||
337 | int i; | ||
338 | |||
339 | if (ret < 0) | ||
340 | return ret; | ||
341 | |||
342 | for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) | ||
343 | if (clkout_rates[i] == rate) { | ||
344 | ret &= ~HYM8563_CLKOUT_MASK; | ||
345 | ret |= i; | ||
346 | return i2c_smbus_write_byte_data(client, | ||
347 | HYM8563_CLKOUT, ret); | ||
348 | } | ||
349 | |||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | static int hym8563_clkout_control(struct clk_hw *hw, bool enable) | ||
354 | { | ||
355 | struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); | ||
356 | struct i2c_client *client = hym8563->client; | ||
357 | int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); | ||
358 | |||
359 | if (ret < 0) | ||
360 | return ret; | ||
361 | |||
362 | if (enable) | ||
363 | ret &= ~HYM8563_CLKOUT_DISABLE; | ||
364 | else | ||
365 | ret |= HYM8563_CLKOUT_DISABLE; | ||
366 | |||
367 | return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret); | ||
368 | } | ||
369 | |||
370 | static int hym8563_clkout_prepare(struct clk_hw *hw) | ||
371 | { | ||
372 | return hym8563_clkout_control(hw, 1); | ||
373 | } | ||
374 | |||
375 | static void hym8563_clkout_unprepare(struct clk_hw *hw) | ||
376 | { | ||
377 | hym8563_clkout_control(hw, 0); | ||
378 | } | ||
379 | |||
380 | static int hym8563_clkout_is_prepared(struct clk_hw *hw) | ||
381 | { | ||
382 | struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); | ||
383 | struct i2c_client *client = hym8563->client; | ||
384 | int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); | ||
385 | |||
386 | if (ret < 0) | ||
387 | return ret; | ||
388 | |||
389 | return !(ret & HYM8563_CLKOUT_DISABLE); | ||
390 | } | ||
391 | |||
392 | static const struct clk_ops hym8563_clkout_ops = { | ||
393 | .prepare = hym8563_clkout_prepare, | ||
394 | .unprepare = hym8563_clkout_unprepare, | ||
395 | .is_prepared = hym8563_clkout_is_prepared, | ||
396 | .recalc_rate = hym8563_clkout_recalc_rate, | ||
397 | .round_rate = hym8563_clkout_round_rate, | ||
398 | .set_rate = hym8563_clkout_set_rate, | ||
399 | }; | ||
400 | |||
401 | static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563) | ||
402 | { | ||
403 | struct i2c_client *client = hym8563->client; | ||
404 | struct device_node *node = client->dev.of_node; | ||
405 | struct clk *clk; | ||
406 | struct clk_init_data init; | ||
407 | int ret; | ||
408 | |||
409 | ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, | ||
410 | HYM8563_CLKOUT_DISABLE); | ||
411 | if (ret < 0) | ||
412 | return ERR_PTR(ret); | ||
413 | |||
414 | init.name = "hym8563-clkout"; | ||
415 | init.ops = &hym8563_clkout_ops; | ||
416 | init.flags = CLK_IS_ROOT; | ||
417 | init.parent_names = NULL; | ||
418 | init.num_parents = 0; | ||
419 | hym8563->clkout_hw.init = &init; | ||
420 | |||
421 | /* register the clock */ | ||
422 | clk = clk_register(&client->dev, &hym8563->clkout_hw); | ||
423 | |||
424 | if (!IS_ERR(clk)) | ||
425 | of_clk_add_provider(node, of_clk_src_simple_get, clk); | ||
426 | |||
427 | return clk; | ||
428 | } | ||
429 | #endif | ||
430 | |||
431 | /* | ||
432 | * The alarm interrupt is implemented as a level-low interrupt in the | ||
433 | * hym8563, while the timer interrupt uses a falling edge. | ||
434 | * We don't use the timer at all, so the interrupt is requested to | ||
435 | * use the level-low trigger. | ||
436 | */ | ||
437 | static irqreturn_t hym8563_irq(int irq, void *dev_id) | ||
438 | { | ||
439 | struct hym8563 *hym8563 = (struct hym8563 *)dev_id; | ||
440 | struct i2c_client *client = hym8563->client; | ||
441 | struct mutex *lock = &hym8563->rtc->ops_lock; | ||
442 | int data, ret; | ||
443 | |||
444 | mutex_lock(lock); | ||
445 | |||
446 | /* Clear the alarm flag */ | ||
447 | |||
448 | data = i2c_smbus_read_byte_data(client, HYM8563_CTL2); | ||
449 | if (data < 0) { | ||
450 | dev_err(&client->dev, "%s: error reading i2c data %d\n", | ||
451 | __func__, data); | ||
452 | goto out; | ||
453 | } | ||
454 | |||
455 | data &= ~HYM8563_CTL2_AF; | ||
456 | |||
457 | ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, data); | ||
458 | if (ret < 0) { | ||
459 | dev_err(&client->dev, "%s: error writing i2c data %d\n", | ||
460 | __func__, ret); | ||
461 | } | ||
462 | |||
463 | out: | ||
464 | mutex_unlock(lock); | ||
465 | return IRQ_HANDLED; | ||
466 | } | ||
467 | |||
468 | static int hym8563_init_device(struct i2c_client *client) | ||
469 | { | ||
470 | int ret; | ||
471 | |||
472 | /* Clear stop flag if present */ | ||
473 | ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0); | ||
474 | if (ret < 0) | ||
475 | return ret; | ||
476 | |||
477 | ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); | ||
478 | if (ret < 0) | ||
479 | return ret; | ||
480 | |||
481 | /* Disable alarm and timer interrupts */ | ||
482 | ret &= ~HYM8563_CTL2_AIE; | ||
483 | ret &= ~HYM8563_CTL2_TIE; | ||
484 | |||
485 | /* Clear any pending alarm and timer flags */ | ||
486 | if (ret & HYM8563_CTL2_AF) | ||
487 | ret &= ~HYM8563_CTL2_AF; | ||
488 | |||
489 | if (ret & HYM8563_CTL2_TF) | ||
490 | ret &= ~HYM8563_CTL2_TF; | ||
491 | |||
492 | ret &= ~HYM8563_CTL2_TI_TP; | ||
493 | |||
494 | return i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret); | ||
495 | } | ||
496 | |||
497 | #ifdef CONFIG_PM_SLEEP | ||
498 | static int hym8563_suspend(struct device *dev) | ||
499 | { | ||
500 | struct i2c_client *client = to_i2c_client(dev); | ||
501 | int ret; | ||
502 | |||
503 | if (device_may_wakeup(dev)) { | ||
504 | ret = enable_irq_wake(client->irq); | ||
505 | if (ret) { | ||
506 | dev_err(dev, "enable_irq_wake failed, %d\n", ret); | ||
507 | return ret; | ||
508 | } | ||
509 | } | ||
510 | |||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static int hym8563_resume(struct device *dev) | ||
515 | { | ||
516 | struct i2c_client *client = to_i2c_client(dev); | ||
517 | |||
518 | if (device_may_wakeup(dev)) | ||
519 | disable_irq_wake(client->irq); | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | #endif | ||
524 | |||
525 | static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume); | ||
526 | |||
527 | static int hym8563_probe(struct i2c_client *client, | ||
528 | const struct i2c_device_id *id) | ||
529 | { | ||
530 | struct hym8563 *hym8563; | ||
531 | int ret; | ||
532 | |||
533 | hym8563 = devm_kzalloc(&client->dev, sizeof(*hym8563), GFP_KERNEL); | ||
534 | if (!hym8563) | ||
535 | return -ENOMEM; | ||
536 | |||
537 | hym8563->client = client; | ||
538 | i2c_set_clientdata(client, hym8563); | ||
539 | |||
540 | device_set_wakeup_capable(&client->dev, true); | ||
541 | |||
542 | ret = hym8563_init_device(client); | ||
543 | if (ret) { | ||
544 | dev_err(&client->dev, "could not init device, %d\n", ret); | ||
545 | return ret; | ||
546 | } | ||
547 | |||
548 | ret = devm_request_threaded_irq(&client->dev, client->irq, | ||
549 | NULL, hym8563_irq, | ||
550 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | ||
551 | client->name, hym8563); | ||
552 | if (ret < 0) { | ||
553 | dev_err(&client->dev, "irq %d request failed, %d\n", | ||
554 | client->irq, ret); | ||
555 | return ret; | ||
556 | } | ||
557 | |||
558 | /* check state of calendar information */ | ||
559 | ret = i2c_smbus_read_byte_data(client, HYM8563_SEC); | ||
560 | if (ret < 0) | ||
561 | return ret; | ||
562 | |||
563 | hym8563->valid = !(ret & HYM8563_SEC_VL); | ||
564 | dev_dbg(&client->dev, "rtc information is %s\n", | ||
565 | hym8563->valid ? "valid" : "invalid"); | ||
566 | |||
567 | hym8563->rtc = devm_rtc_device_register(&client->dev, client->name, | ||
568 | &hym8563_rtc_ops, THIS_MODULE); | ||
569 | if (IS_ERR(hym8563->rtc)) | ||
570 | return PTR_ERR(hym8563->rtc); | ||
571 | |||
572 | #ifdef CONFIG_COMMON_CLK | ||
573 | hym8563_clkout_register_clk(hym8563); | ||
574 | #endif | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static const struct i2c_device_id hym8563_id[] = { | ||
580 | { "hym8563", 0 }, | ||
581 | {}, | ||
582 | }; | ||
583 | MODULE_DEVICE_TABLE(i2c, hym8563_id); | ||
584 | |||
585 | static struct of_device_id hym8563_dt_idtable[] = { | ||
586 | { .compatible = "haoyu,hym8563" }, | ||
587 | {}, | ||
588 | }; | ||
589 | MODULE_DEVICE_TABLE(of, hym8563_dt_idtable); | ||
590 | |||
591 | static struct i2c_driver hym8563_driver = { | ||
592 | .driver = { | ||
593 | .name = "rtc-hym8563", | ||
594 | .owner = THIS_MODULE, | ||
595 | .pm = &hym8563_pm_ops, | ||
596 | .of_match_table = hym8563_dt_idtable, | ||
597 | }, | ||
598 | .probe = hym8563_probe, | ||
599 | .id_table = hym8563_id, | ||
600 | }; | ||
601 | |||
602 | module_i2c_driver(hym8563_driver); | ||
603 | |||
604 | MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); | ||
605 | MODULE_DESCRIPTION("HYM8563 RTC driver"); | ||
606 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c index 8e45b3c4aa2f..3032178bd9e6 100644 --- a/drivers/rtc/rtc-max8907.c +++ b/drivers/rtc/rtc-max8907.c | |||
@@ -51,7 +51,7 @@ static irqreturn_t max8907_irq_handler(int irq, void *data) | |||
51 | { | 51 | { |
52 | struct max8907_rtc *rtc = data; | 52 | struct max8907_rtc *rtc = data; |
53 | 53 | ||
54 | regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); | 54 | regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); |
55 | 55 | ||
56 | rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); | 56 | rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); |
57 | 57 | ||
@@ -64,7 +64,7 @@ static void regs_to_tm(u8 *regs, struct rtc_time *tm) | |||
64 | bcd2bin(regs[RTC_YEAR1]) - 1900; | 64 | bcd2bin(regs[RTC_YEAR1]) - 1900; |
65 | tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1; | 65 | tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1; |
66 | tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f); | 66 | tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f); |
67 | tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1; | 67 | tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07); |
68 | if (regs[RTC_HOUR] & HOUR_12) { | 68 | if (regs[RTC_HOUR] & HOUR_12) { |
69 | tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f); | 69 | tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f); |
70 | if (tm->tm_hour == 12) | 70 | if (tm->tm_hour == 12) |
@@ -88,7 +88,7 @@ static void tm_to_regs(struct rtc_time *tm, u8 *regs) | |||
88 | regs[RTC_YEAR1] = bin2bcd(low); | 88 | regs[RTC_YEAR1] = bin2bcd(low); |
89 | regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1); | 89 | regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1); |
90 | regs[RTC_DATE] = bin2bcd(tm->tm_mday); | 90 | regs[RTC_DATE] = bin2bcd(tm->tm_mday); |
91 | regs[RTC_WEEKDAY] = tm->tm_wday + 1; | 91 | regs[RTC_WEEKDAY] = tm->tm_wday; |
92 | regs[RTC_HOUR] = bin2bcd(tm->tm_hour); | 92 | regs[RTC_HOUR] = bin2bcd(tm->tm_hour); |
93 | regs[RTC_MIN] = bin2bcd(tm->tm_min); | 93 | regs[RTC_MIN] = bin2bcd(tm->tm_min); |
94 | regs[RTC_SEC] = bin2bcd(tm->tm_sec); | 94 | regs[RTC_SEC] = bin2bcd(tm->tm_sec); |
@@ -153,7 +153,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
153 | tm_to_regs(&alrm->time, regs); | 153 | tm_to_regs(&alrm->time, regs); |
154 | 154 | ||
155 | /* Disable alarm while we update the target time */ | 155 | /* Disable alarm while we update the target time */ |
156 | ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); | 156 | ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); |
157 | if (ret < 0) | 157 | if (ret < 0) |
158 | return ret; | 158 | return ret; |
159 | 159 | ||
@@ -163,8 +163,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
163 | return ret; | 163 | return ret; |
164 | 164 | ||
165 | if (alrm->enabled) | 165 | if (alrm->enabled) |
166 | ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, | 166 | ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77); |
167 | 0x7f, 0x7f); | ||
168 | 167 | ||
169 | return ret; | 168 | return ret; |
170 | } | 169 | } |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 50c572645546..419874fefa4b 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
@@ -391,11 +391,13 @@ static int mxc_rtc_probe(struct platform_device *pdev) | |||
391 | pdata->clk = devm_clk_get(&pdev->dev, NULL); | 391 | pdata->clk = devm_clk_get(&pdev->dev, NULL); |
392 | if (IS_ERR(pdata->clk)) { | 392 | if (IS_ERR(pdata->clk)) { |
393 | dev_err(&pdev->dev, "unable to get clock!\n"); | 393 | dev_err(&pdev->dev, "unable to get clock!\n"); |
394 | ret = PTR_ERR(pdata->clk); | 394 | return PTR_ERR(pdata->clk); |
395 | goto exit_free_pdata; | ||
396 | } | 395 | } |
397 | 396 | ||
398 | clk_prepare_enable(pdata->clk); | 397 | ret = clk_prepare_enable(pdata->clk); |
398 | if (ret) | ||
399 | return ret; | ||
400 | |||
399 | rate = clk_get_rate(pdata->clk); | 401 | rate = clk_get_rate(pdata->clk); |
400 | 402 | ||
401 | if (rate == 32768) | 403 | if (rate == 32768) |
@@ -447,8 +449,6 @@ static int mxc_rtc_probe(struct platform_device *pdev) | |||
447 | exit_put_clk: | 449 | exit_put_clk: |
448 | clk_disable_unprepare(pdata->clk); | 450 | clk_disable_unprepare(pdata->clk); |
449 | 451 | ||
450 | exit_free_pdata: | ||
451 | |||
452 | return ret; | 452 | return ret; |
453 | } | 453 | } |
454 | 454 | ||
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 1ee514a3972c..9bd842e97749 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c | |||
@@ -197,10 +197,7 @@ static int pcf2127_probe(struct i2c_client *client, | |||
197 | pcf2127_driver.driver.name, | 197 | pcf2127_driver.driver.name, |
198 | &pcf2127_rtc_ops, THIS_MODULE); | 198 | &pcf2127_rtc_ops, THIS_MODULE); |
199 | 199 | ||
200 | if (IS_ERR(pcf2127->rtc)) | 200 | return PTR_ERR_OR_ZERO(pcf2127->rtc); |
201 | return PTR_ERR(pcf2127->rtc); | ||
202 | |||
203 | return 0; | ||
204 | } | 201 | } |
205 | 202 | ||
206 | static const struct i2c_device_id pcf2127_id[] = { | 203 | static const struct i2c_device_id pcf2127_id[] = { |
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c index 00b0eb7fe166..de8d9c427782 100644 --- a/drivers/rtc/rtc-rx8581.c +++ b/drivers/rtc/rtc-rx8581.c | |||
@@ -52,8 +52,45 @@ | |||
52 | #define RX8581_CTRL_STOP 0x02 /* STOP bit */ | 52 | #define RX8581_CTRL_STOP 0x02 /* STOP bit */ |
53 | #define RX8581_CTRL_RESET 0x01 /* RESET bit */ | 53 | #define RX8581_CTRL_RESET 0x01 /* RESET bit */ |
54 | 54 | ||
55 | struct rx8581 { | ||
56 | struct i2c_client *client; | ||
57 | struct rtc_device *rtc; | ||
58 | s32 (*read_block_data)(const struct i2c_client *client, u8 command, | ||
59 | u8 length, u8 *values); | ||
60 | s32 (*write_block_data)(const struct i2c_client *client, u8 command, | ||
61 | u8 length, const u8 *values); | ||
62 | }; | ||
63 | |||
55 | static struct i2c_driver rx8581_driver; | 64 | static struct i2c_driver rx8581_driver; |
56 | 65 | ||
66 | static int rx8581_read_block_data(const struct i2c_client *client, u8 command, | ||
67 | u8 length, u8 *values) | ||
68 | { | ||
69 | s32 i, data; | ||
70 | |||
71 | for (i = 0; i < length; i++) { | ||
72 | data = i2c_smbus_read_byte_data(client, command + i); | ||
73 | if (data < 0) | ||
74 | return data; | ||
75 | values[i] = data; | ||
76 | } | ||
77 | return i; | ||
78 | } | ||
79 | |||
80 | static int rx8581_write_block_data(const struct i2c_client *client, u8 command, | ||
81 | u8 length, const u8 *values) | ||
82 | { | ||
83 | s32 i, ret; | ||
84 | |||
85 | for (i = 0; i < length; i++) { | ||
86 | ret = i2c_smbus_write_byte_data(client, command + i, | ||
87 | values[i]); | ||
88 | if (ret < 0) | ||
89 | return ret; | ||
90 | } | ||
91 | return length; | ||
92 | } | ||
93 | |||
57 | /* | 94 | /* |
58 | * In the routines that deal directly with the rx8581 hardware, we use | 95 | * In the routines that deal directly with the rx8581 hardware, we use |
59 | * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. | 96 | * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. |
@@ -62,6 +99,7 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
62 | { | 99 | { |
63 | unsigned char date[7]; | 100 | unsigned char date[7]; |
64 | int data, err; | 101 | int data, err; |
102 | struct rx8581 *rx8581 = i2c_get_clientdata(client); | ||
65 | 103 | ||
66 | /* First we ensure that the "update flag" is not set, we read the | 104 | /* First we ensure that the "update flag" is not set, we read the |
67 | * time and date then re-read the "update flag". If the update flag | 105 | * time and date then re-read the "update flag". If the update flag |
@@ -80,14 +118,13 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
80 | err = i2c_smbus_write_byte_data(client, | 118 | err = i2c_smbus_write_byte_data(client, |
81 | RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF)); | 119 | RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF)); |
82 | if (err != 0) { | 120 | if (err != 0) { |
83 | dev_err(&client->dev, "Unable to write device " | 121 | dev_err(&client->dev, "Unable to write device flags\n"); |
84 | "flags\n"); | ||
85 | return -EIO; | 122 | return -EIO; |
86 | } | 123 | } |
87 | } | 124 | } |
88 | 125 | ||
89 | /* Now read time and date */ | 126 | /* Now read time and date */ |
90 | err = i2c_smbus_read_i2c_block_data(client, RX8581_REG_SC, | 127 | err = rx8581->read_block_data(client, RX8581_REG_SC, |
91 | 7, date); | 128 | 7, date); |
92 | if (err < 0) { | 129 | if (err < 0) { |
93 | dev_err(&client->dev, "Unable to read date\n"); | 130 | dev_err(&client->dev, "Unable to read date\n"); |
@@ -140,6 +177,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
140 | { | 177 | { |
141 | int data, err; | 178 | int data, err; |
142 | unsigned char buf[7]; | 179 | unsigned char buf[7]; |
180 | struct rx8581 *rx8581 = i2c_get_clientdata(client); | ||
143 | 181 | ||
144 | dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " | 182 | dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " |
145 | "mday=%d, mon=%d, year=%d, wday=%d\n", | 183 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
@@ -176,7 +214,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
176 | } | 214 | } |
177 | 215 | ||
178 | /* write register's data */ | 216 | /* write register's data */ |
179 | err = i2c_smbus_write_i2c_block_data(client, RX8581_REG_SC, 7, buf); | 217 | err = rx8581->write_block_data(client, RX8581_REG_SC, 7, buf); |
180 | if (err < 0) { | 218 | if (err < 0) { |
181 | dev_err(&client->dev, "Unable to write to date registers\n"); | 219 | dev_err(&client->dev, "Unable to write to date registers\n"); |
182 | return -EIO; | 220 | return -EIO; |
@@ -231,22 +269,39 @@ static const struct rtc_class_ops rx8581_rtc_ops = { | |||
231 | static int rx8581_probe(struct i2c_client *client, | 269 | static int rx8581_probe(struct i2c_client *client, |
232 | const struct i2c_device_id *id) | 270 | const struct i2c_device_id *id) |
233 | { | 271 | { |
234 | struct rtc_device *rtc; | 272 | struct rx8581 *rx8581; |
235 | 273 | ||
236 | dev_dbg(&client->dev, "%s\n", __func__); | 274 | dev_dbg(&client->dev, "%s\n", __func__); |
237 | 275 | ||
238 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | 276 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA) |
239 | return -ENODEV; | 277 | && !i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) |
278 | return -EIO; | ||
240 | 279 | ||
241 | dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); | 280 | rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL); |
281 | if (!rx8581) | ||
282 | return -ENOMEM; | ||
242 | 283 | ||
243 | rtc = devm_rtc_device_register(&client->dev, rx8581_driver.driver.name, | 284 | i2c_set_clientdata(client, rx8581); |
244 | &rx8581_rtc_ops, THIS_MODULE); | 285 | rx8581->client = client; |
245 | 286 | ||
246 | if (IS_ERR(rtc)) | 287 | if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { |
247 | return PTR_ERR(rtc); | 288 | rx8581->read_block_data = i2c_smbus_read_i2c_block_data; |
289 | rx8581->write_block_data = i2c_smbus_write_i2c_block_data; | ||
290 | } else { | ||
291 | rx8581->read_block_data = rx8581_read_block_data; | ||
292 | rx8581->write_block_data = rx8581_write_block_data; | ||
293 | } | ||
248 | 294 | ||
249 | i2c_set_clientdata(client, rtc); | 295 | dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); |
296 | |||
297 | rx8581->rtc = devm_rtc_device_register(&client->dev, | ||
298 | rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE); | ||
299 | |||
300 | if (IS_ERR(rx8581->rtc)) { | ||
301 | dev_err(&client->dev, | ||
302 | "unable to register the class device\n"); | ||
303 | return PTR_ERR(rx8581->rtc); | ||
304 | } | ||
250 | 305 | ||
251 | return 0; | 306 | return 0; |
252 | } | 307 | } |
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index ae8119dc2846..476af93543f6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c | |||
@@ -639,6 +639,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) | |||
639 | s5m_rtc_enable_smpl(info, false); | 639 | s5m_rtc_enable_smpl(info, false); |
640 | } | 640 | } |
641 | 641 | ||
642 | #ifdef CONFIG_PM_SLEEP | ||
642 | static int s5m_rtc_resume(struct device *dev) | 643 | static int s5m_rtc_resume(struct device *dev) |
643 | { | 644 | { |
644 | struct s5m_rtc_info *info = dev_get_drvdata(dev); | 645 | struct s5m_rtc_info *info = dev_get_drvdata(dev); |
@@ -660,6 +661,7 @@ static int s5m_rtc_suspend(struct device *dev) | |||
660 | 661 | ||
661 | return ret; | 662 | return ret; |
662 | } | 663 | } |
664 | #endif /* CONFIG_PM_SLEEP */ | ||
663 | 665 | ||
664 | static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); | 666 | static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); |
665 | 667 | ||
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index c2e80d7ca5e2..1915464e4cd6 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c | |||
@@ -479,7 +479,7 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
479 | u8 rd_reg; | 479 | u8 rd_reg; |
480 | 480 | ||
481 | if (irq <= 0) | 481 | if (irq <= 0) |
482 | goto out1; | 482 | return ret; |
483 | 483 | ||
484 | /* Initialize the register map */ | 484 | /* Initialize the register map */ |
485 | if (twl_class_is_4030()) | 485 | if (twl_class_is_4030()) |
@@ -489,7 +489,7 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
489 | 489 | ||
490 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | 490 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); |
491 | if (ret < 0) | 491 | if (ret < 0) |
492 | goto out1; | 492 | return ret; |
493 | 493 | ||
494 | if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) | 494 | if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) |
495 | dev_warn(&pdev->dev, "Power up reset detected.\n"); | 495 | dev_warn(&pdev->dev, "Power up reset detected.\n"); |
@@ -500,7 +500,7 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
500 | /* Clear RTC Power up reset and pending alarm interrupts */ | 500 | /* Clear RTC Power up reset and pending alarm interrupts */ |
501 | ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); | 501 | ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); |
502 | if (ret < 0) | 502 | if (ret < 0) |
503 | goto out1; | 503 | return ret; |
504 | 504 | ||
505 | if (twl_class_is_6030()) { | 505 | if (twl_class_is_6030()) { |
506 | twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, | 506 | twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, |
@@ -512,7 +512,7 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
512 | dev_info(&pdev->dev, "Enabling TWL-RTC\n"); | 512 | dev_info(&pdev->dev, "Enabling TWL-RTC\n"); |
513 | ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG); | 513 | ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG); |
514 | if (ret < 0) | 514 | if (ret < 0) |
515 | goto out1; | 515 | return ret; |
516 | 516 | ||
517 | /* ensure interrupts are disabled, bootloaders can be strange */ | 517 | /* ensure interrupts are disabled, bootloaders can be strange */ |
518 | ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); | 518 | ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); |
@@ -522,34 +522,29 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
522 | /* init cached IRQ enable bits */ | 522 | /* init cached IRQ enable bits */ |
523 | ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); | 523 | ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); |
524 | if (ret < 0) | 524 | if (ret < 0) |
525 | goto out1; | 525 | return ret; |
526 | 526 | ||
527 | device_init_wakeup(&pdev->dev, 1); | 527 | device_init_wakeup(&pdev->dev, 1); |
528 | 528 | ||
529 | rtc = rtc_device_register(pdev->name, | 529 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
530 | &pdev->dev, &twl_rtc_ops, THIS_MODULE); | 530 | &twl_rtc_ops, THIS_MODULE); |
531 | if (IS_ERR(rtc)) { | 531 | if (IS_ERR(rtc)) { |
532 | ret = PTR_ERR(rtc); | ||
533 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", | 532 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", |
534 | PTR_ERR(rtc)); | 533 | PTR_ERR(rtc)); |
535 | goto out1; | 534 | return PTR_ERR(rtc); |
536 | } | 535 | } |
537 | 536 | ||
538 | ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, | 537 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
539 | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | 538 | twl_rtc_interrupt, |
540 | dev_name(&rtc->dev), rtc); | 539 | IRQF_TRIGGER_RISING | IRQF_ONESHOT, |
540 | dev_name(&rtc->dev), rtc); | ||
541 | if (ret < 0) { | 541 | if (ret < 0) { |
542 | dev_err(&pdev->dev, "IRQ is not free.\n"); | 542 | dev_err(&pdev->dev, "IRQ is not free.\n"); |
543 | goto out2; | 543 | return ret; |
544 | } | 544 | } |
545 | 545 | ||
546 | platform_set_drvdata(pdev, rtc); | 546 | platform_set_drvdata(pdev, rtc); |
547 | return 0; | 547 | return 0; |
548 | |||
549 | out2: | ||
550 | rtc_device_unregister(rtc); | ||
551 | out1: | ||
552 | return ret; | ||
553 | } | 548 | } |
554 | 549 | ||
555 | /* | 550 | /* |
@@ -559,9 +554,6 @@ out1: | |||
559 | static int twl_rtc_remove(struct platform_device *pdev) | 554 | static int twl_rtc_remove(struct platform_device *pdev) |
560 | { | 555 | { |
561 | /* leave rtc running, but disable irqs */ | 556 | /* leave rtc running, but disable irqs */ |
562 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
563 | int irq = platform_get_irq(pdev, 0); | ||
564 | |||
565 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | 557 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); |
566 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); | 558 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); |
567 | if (twl_class_is_6030()) { | 559 | if (twl_class_is_6030()) { |
@@ -571,10 +563,6 @@ static int twl_rtc_remove(struct platform_device *pdev) | |||
571 | REG_INT_MSK_STS_A); | 563 | REG_INT_MSK_STS_A); |
572 | } | 564 | } |
573 | 565 | ||
574 | |||
575 | free_irq(irq, rtc); | ||
576 | |||
577 | rtc_device_unregister(rtc); | ||
578 | return 0; | 566 | return 0; |
579 | } | 567 | } |
580 | 568 | ||
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index aabc22c587fb..88c9c92e89fd 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -293,7 +293,7 @@ static int rtc_probe(struct platform_device *pdev) | |||
293 | if (!res) | 293 | if (!res) |
294 | return -EBUSY; | 294 | return -EBUSY; |
295 | 295 | ||
296 | rtc1_base = ioremap(res->start, resource_size(res)); | 296 | rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); |
297 | if (!rtc1_base) | 297 | if (!rtc1_base) |
298 | return -EBUSY; | 298 | return -EBUSY; |
299 | 299 | ||
@@ -303,13 +303,14 @@ static int rtc_probe(struct platform_device *pdev) | |||
303 | goto err_rtc1_iounmap; | 303 | goto err_rtc1_iounmap; |
304 | } | 304 | } |
305 | 305 | ||
306 | rtc2_base = ioremap(res->start, resource_size(res)); | 306 | rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); |
307 | if (!rtc2_base) { | 307 | if (!rtc2_base) { |
308 | retval = -EBUSY; | 308 | retval = -EBUSY; |
309 | goto err_rtc1_iounmap; | 309 | goto err_rtc1_iounmap; |
310 | } | 310 | } |
311 | 311 | ||
312 | rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE); | 312 | rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops, |
313 | THIS_MODULE); | ||
313 | if (IS_ERR(rtc)) { | 314 | if (IS_ERR(rtc)) { |
314 | retval = PTR_ERR(rtc); | 315 | retval = PTR_ERR(rtc); |
315 | goto err_iounmap_all; | 316 | goto err_iounmap_all; |
@@ -330,24 +331,24 @@ static int rtc_probe(struct platform_device *pdev) | |||
330 | aie_irq = platform_get_irq(pdev, 0); | 331 | aie_irq = platform_get_irq(pdev, 0); |
331 | if (aie_irq <= 0) { | 332 | if (aie_irq <= 0) { |
332 | retval = -EBUSY; | 333 | retval = -EBUSY; |
333 | goto err_device_unregister; | 334 | goto err_iounmap_all; |
334 | } | 335 | } |
335 | 336 | ||
336 | retval = request_irq(aie_irq, elapsedtime_interrupt, 0, | 337 | retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0, |
337 | "elapsed_time", pdev); | 338 | "elapsed_time", pdev); |
338 | if (retval < 0) | 339 | if (retval < 0) |
339 | goto err_device_unregister; | 340 | goto err_iounmap_all; |
340 | 341 | ||
341 | pie_irq = platform_get_irq(pdev, 1); | 342 | pie_irq = platform_get_irq(pdev, 1); |
342 | if (pie_irq <= 0) { | 343 | if (pie_irq <= 0) { |
343 | retval = -EBUSY; | 344 | retval = -EBUSY; |
344 | goto err_free_irq; | 345 | goto err_iounmap_all; |
345 | } | 346 | } |
346 | 347 | ||
347 | retval = request_irq(pie_irq, rtclong1_interrupt, 0, | 348 | retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0, |
348 | "rtclong1", pdev); | 349 | "rtclong1", pdev); |
349 | if (retval < 0) | 350 | if (retval < 0) |
350 | goto err_free_irq; | 351 | goto err_iounmap_all; |
351 | 352 | ||
352 | platform_set_drvdata(pdev, rtc); | 353 | platform_set_drvdata(pdev, rtc); |
353 | 354 | ||
@@ -358,47 +359,20 @@ static int rtc_probe(struct platform_device *pdev) | |||
358 | 359 | ||
359 | return 0; | 360 | return 0; |
360 | 361 | ||
361 | err_free_irq: | ||
362 | free_irq(aie_irq, pdev); | ||
363 | |||
364 | err_device_unregister: | ||
365 | rtc_device_unregister(rtc); | ||
366 | |||
367 | err_iounmap_all: | 362 | err_iounmap_all: |
368 | iounmap(rtc2_base); | ||
369 | rtc2_base = NULL; | 363 | rtc2_base = NULL; |
370 | 364 | ||
371 | err_rtc1_iounmap: | 365 | err_rtc1_iounmap: |
372 | iounmap(rtc1_base); | ||
373 | rtc1_base = NULL; | 366 | rtc1_base = NULL; |
374 | 367 | ||
375 | return retval; | 368 | return retval; |
376 | } | 369 | } |
377 | 370 | ||
378 | static int rtc_remove(struct platform_device *pdev) | ||
379 | { | ||
380 | struct rtc_device *rtc; | ||
381 | |||
382 | rtc = platform_get_drvdata(pdev); | ||
383 | if (rtc) | ||
384 | rtc_device_unregister(rtc); | ||
385 | |||
386 | free_irq(aie_irq, pdev); | ||
387 | free_irq(pie_irq, pdev); | ||
388 | if (rtc1_base) | ||
389 | iounmap(rtc1_base); | ||
390 | if (rtc2_base) | ||
391 | iounmap(rtc2_base); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /* work with hotplug and coldplug */ | 371 | /* work with hotplug and coldplug */ |
397 | MODULE_ALIAS("platform:RTC"); | 372 | MODULE_ALIAS("platform:RTC"); |
398 | 373 | ||
399 | static struct platform_driver rtc_platform_driver = { | 374 | static struct platform_driver rtc_platform_driver = { |
400 | .probe = rtc_probe, | 375 | .probe = rtc_probe, |
401 | .remove = rtc_remove, | ||
402 | .driver = { | 376 | .driver = { |
403 | .name = rtc_name, | 377 | .name = rtc_name, |
404 | .owner = THIS_MODULE, | 378 | .owner = THIS_MODULE, |
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 12ca031877d4..52108be69e77 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
@@ -357,11 +357,13 @@ static int default_lcd_on = 1; | |||
357 | static bool mtrr = true; | 357 | static bool mtrr = true; |
358 | #endif | 358 | #endif |
359 | 359 | ||
360 | #ifdef CONFIG_FB_ATY128_BACKLIGHT | ||
360 | #ifdef CONFIG_PMAC_BACKLIGHT | 361 | #ifdef CONFIG_PMAC_BACKLIGHT |
361 | static int backlight = 1; | 362 | static int backlight = 1; |
362 | #else | 363 | #else |
363 | static int backlight = 0; | 364 | static int backlight = 0; |
364 | #endif | 365 | #endif |
366 | #endif | ||
365 | 367 | ||
366 | /* PLL constants */ | 368 | /* PLL constants */ |
367 | struct aty128_constants { | 369 | struct aty128_constants { |
@@ -1671,7 +1673,9 @@ static int aty128fb_setup(char *options) | |||
1671 | default_crt_on = simple_strtoul(this_opt+4, NULL, 0); | 1673 | default_crt_on = simple_strtoul(this_opt+4, NULL, 0); |
1672 | continue; | 1674 | continue; |
1673 | } else if (!strncmp(this_opt, "backlight:", 10)) { | 1675 | } else if (!strncmp(this_opt, "backlight:", 10)) { |
1676 | #ifdef CONFIG_FB_ATY128_BACKLIGHT | ||
1674 | backlight = simple_strtoul(this_opt+10, NULL, 0); | 1677 | backlight = simple_strtoul(this_opt+10, NULL, 0); |
1678 | #endif | ||
1675 | continue; | 1679 | continue; |
1676 | } | 1680 | } |
1677 | #ifdef CONFIG_MTRR | 1681 | #ifdef CONFIG_MTRR |
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index 00076ecfe9b8..8ea42b8d9bc8 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c | |||
@@ -110,8 +110,8 @@ static int hp680bl_probe(struct platform_device *pdev) | |||
110 | memset(&props, 0, sizeof(struct backlight_properties)); | 110 | memset(&props, 0, sizeof(struct backlight_properties)); |
111 | props.type = BACKLIGHT_RAW; | 111 | props.type = BACKLIGHT_RAW; |
112 | props.max_brightness = HP680_MAX_INTENSITY; | 112 | props.max_brightness = HP680_MAX_INTENSITY; |
113 | bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, | 113 | bd = devm_backlight_device_register(&pdev->dev, "hp680-bl", &pdev->dev, |
114 | &hp680bl_ops, &props); | 114 | NULL, &hp680bl_ops, &props); |
115 | if (IS_ERR(bd)) | 115 | if (IS_ERR(bd)) |
116 | return PTR_ERR(bd); | 116 | return PTR_ERR(bd); |
117 | 117 | ||
@@ -131,8 +131,6 @@ static int hp680bl_remove(struct platform_device *pdev) | |||
131 | bd->props.power = 0; | 131 | bd->props.power = 0; |
132 | hp680bl_send_intensity(bd); | 132 | hp680bl_send_intensity(bd); |
133 | 133 | ||
134 | backlight_device_unregister(bd); | ||
135 | |||
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
138 | 136 | ||
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c index 3ccb89340f22..6ce96b4a8796 100644 --- a/drivers/video/backlight/jornada720_bl.c +++ b/drivers/video/backlight/jornada720_bl.c | |||
@@ -115,9 +115,10 @@ static int jornada_bl_probe(struct platform_device *pdev) | |||
115 | memset(&props, 0, sizeof(struct backlight_properties)); | 115 | memset(&props, 0, sizeof(struct backlight_properties)); |
116 | props.type = BACKLIGHT_RAW; | 116 | props.type = BACKLIGHT_RAW; |
117 | props.max_brightness = BL_MAX_BRIGHT; | 117 | props.max_brightness = BL_MAX_BRIGHT; |
118 | bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, | ||
119 | &jornada_bl_ops, &props); | ||
120 | 118 | ||
119 | bd = devm_backlight_device_register(&pdev->dev, S1D_DEVICENAME, | ||
120 | &pdev->dev, NULL, &jornada_bl_ops, | ||
121 | &props); | ||
121 | if (IS_ERR(bd)) { | 122 | if (IS_ERR(bd)) { |
122 | ret = PTR_ERR(bd); | 123 | ret = PTR_ERR(bd); |
123 | dev_err(&pdev->dev, "failed to register device, err=%x\n", ret); | 124 | dev_err(&pdev->dev, "failed to register device, err=%x\n", ret); |
@@ -139,18 +140,8 @@ static int jornada_bl_probe(struct platform_device *pdev) | |||
139 | return 0; | 140 | return 0; |
140 | } | 141 | } |
141 | 142 | ||
142 | static int jornada_bl_remove(struct platform_device *pdev) | ||
143 | { | ||
144 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
145 | |||
146 | backlight_device_unregister(bd); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct platform_driver jornada_bl_driver = { | 143 | static struct platform_driver jornada_bl_driver = { |
152 | .probe = jornada_bl_probe, | 144 | .probe = jornada_bl_probe, |
153 | .remove = jornada_bl_remove, | ||
154 | .driver = { | 145 | .driver = { |
155 | .name = "jornada_bl", | 146 | .name = "jornada_bl", |
156 | }, | 147 | }, |
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c index b061413f1a65..da3876c9b3ae 100644 --- a/drivers/video/backlight/jornada720_lcd.c +++ b/drivers/video/backlight/jornada720_lcd.c | |||
@@ -100,7 +100,8 @@ static int jornada_lcd_probe(struct platform_device *pdev) | |||
100 | struct lcd_device *lcd_device; | 100 | struct lcd_device *lcd_device; |
101 | int ret; | 101 | int ret; |
102 | 102 | ||
103 | lcd_device = lcd_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props); | 103 | lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME, |
104 | &pdev->dev, NULL, &jornada_lcd_props); | ||
104 | 105 | ||
105 | if (IS_ERR(lcd_device)) { | 106 | if (IS_ERR(lcd_device)) { |
106 | ret = PTR_ERR(lcd_device); | 107 | ret = PTR_ERR(lcd_device); |
@@ -119,18 +120,8 @@ static int jornada_lcd_probe(struct platform_device *pdev) | |||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
121 | 122 | ||
122 | static int jornada_lcd_remove(struct platform_device *pdev) | ||
123 | { | ||
124 | struct lcd_device *lcd_device = platform_get_drvdata(pdev); | ||
125 | |||
126 | lcd_device_unregister(lcd_device); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static struct platform_driver jornada_lcd_driver = { | 123 | static struct platform_driver jornada_lcd_driver = { |
132 | .probe = jornada_lcd_probe, | 124 | .probe = jornada_lcd_probe, |
133 | .remove = jornada_lcd_remove, | ||
134 | .driver = { | 125 | .driver = { |
135 | .name = "jornada_lcd", | 126 | .name = "jornada_lcd", |
136 | }, | 127 | }, |
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 7592cc25c963..84a110a719cb 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c | |||
@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; | |||
78 | static unsigned long kb3886bl_flags; | 78 | static unsigned long kb3886bl_flags; |
79 | #define KB3886BL_SUSPENDED 0x01 | 79 | #define KB3886BL_SUSPENDED 0x01 |
80 | 80 | ||
81 | static struct dmi_system_id __initdata kb3886bl_device_table[] = { | 81 | static struct dmi_system_id kb3886bl_device_table[] __initdata = { |
82 | { | 82 | { |
83 | .ident = "Sahara Touch-iT", | 83 | .ident = "Sahara Touch-iT", |
84 | .matches = { | 84 | .matches = { |
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index b5fc13bc24e7..63e763828e0e 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c | |||
@@ -223,8 +223,8 @@ static int l4f00242t03_probe(struct spi_device *spi) | |||
223 | return PTR_ERR(priv->core_reg); | 223 | return PTR_ERR(priv->core_reg); |
224 | } | 224 | } |
225 | 225 | ||
226 | priv->ld = lcd_device_register("l4f00242t03", | 226 | priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev, |
227 | &spi->dev, priv, &l4f_ops); | 227 | priv, &l4f_ops); |
228 | if (IS_ERR(priv->ld)) | 228 | if (IS_ERR(priv->ld)) |
229 | return PTR_ERR(priv->ld); | 229 | return PTR_ERR(priv->ld); |
230 | 230 | ||
@@ -243,8 +243,6 @@ static int l4f00242t03_remove(struct spi_device *spi) | |||
243 | struct l4f00242t03_priv *priv = spi_get_drvdata(spi); | 243 | struct l4f00242t03_priv *priv = spi_get_drvdata(spi); |
244 | 244 | ||
245 | l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); | 245 | l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); |
246 | lcd_device_unregister(priv->ld); | ||
247 | |||
248 | return 0; | 246 | return 0; |
249 | } | 247 | } |
250 | 248 | ||
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index cae80d555e84..2ca3a040007b 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c | |||
@@ -125,7 +125,7 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr) | |||
125 | return false; | 125 | return false; |
126 | } | 126 | } |
127 | 127 | ||
128 | return (addr >= start && addr <= end); | 128 | return addr >= start && addr <= end; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int lp8557_bl_off(struct lp855x *lp) | 131 | static int lp8557_bl_off(struct lp855x *lp) |
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c index e49905d495dc..daba34dc46d4 100644 --- a/drivers/video/backlight/lp8788_bl.c +++ b/drivers/video/backlight/lp8788_bl.c | |||
@@ -63,13 +63,13 @@ static struct lp8788_bl_config default_bl_config = { | |||
63 | 63 | ||
64 | static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode) | 64 | static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode) |
65 | { | 65 | { |
66 | return (mode == LP8788_BL_COMB_PWM_BASED); | 66 | return mode == LP8788_BL_COMB_PWM_BASED; |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode) | 69 | static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode) |
70 | { | 70 | { |
71 | return (mode == LP8788_BL_REGISTER_ONLY || | 71 | return mode == LP8788_BL_REGISTER_ONLY || |
72 | mode == LP8788_BL_COMB_REGISTER_BASED); | 72 | mode == LP8788_BL_COMB_REGISTER_BASED; |
73 | } | 73 | } |
74 | 74 | ||
75 | static int lp8788_backlight_configure(struct lp8788_bl *bl) | 75 | static int lp8788_backlight_configure(struct lp8788_bl *bl) |
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c index ac11a4650c19..a0dcd88ac74f 100644 --- a/drivers/video/backlight/omap1_bl.c +++ b/drivers/video/backlight/omap1_bl.c | |||
@@ -146,8 +146,8 @@ static int omapbl_probe(struct platform_device *pdev) | |||
146 | memset(&props, 0, sizeof(struct backlight_properties)); | 146 | memset(&props, 0, sizeof(struct backlight_properties)); |
147 | props.type = BACKLIGHT_RAW; | 147 | props.type = BACKLIGHT_RAW; |
148 | props.max_brightness = OMAPBL_MAX_INTENSITY; | 148 | props.max_brightness = OMAPBL_MAX_INTENSITY; |
149 | dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, | 149 | dev = devm_backlight_device_register(&pdev->dev, "omap-bl", &pdev->dev, |
150 | &props); | 150 | bl, &omapbl_ops, &props); |
151 | if (IS_ERR(dev)) | 151 | if (IS_ERR(dev)) |
152 | return PTR_ERR(dev); | 152 | return PTR_ERR(dev); |
153 | 153 | ||
@@ -170,20 +170,10 @@ static int omapbl_probe(struct platform_device *pdev) | |||
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | static int omapbl_remove(struct platform_device *pdev) | ||
174 | { | ||
175 | struct backlight_device *dev = platform_get_drvdata(pdev); | ||
176 | |||
177 | backlight_device_unregister(dev); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume); | 173 | static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume); |
183 | 174 | ||
184 | static struct platform_driver omapbl_driver = { | 175 | static struct platform_driver omapbl_driver = { |
185 | .probe = omapbl_probe, | 176 | .probe = omapbl_probe, |
186 | .remove = omapbl_remove, | ||
187 | .driver = { | 177 | .driver = { |
188 | .name = "omap-bl", | 178 | .name = "omap-bl", |
189 | .pm = &omapbl_pm_ops, | 179 | .pm = &omapbl_pm_ops, |
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c index fdbb6ee5027c..f5a5202dd79d 100644 --- a/drivers/video/backlight/ot200_bl.c +++ b/drivers/video/backlight/ot200_bl.c | |||
@@ -118,8 +118,9 @@ static int ot200_backlight_probe(struct platform_device *pdev) | |||
118 | props.brightness = 100; | 118 | props.brightness = 100; |
119 | props.type = BACKLIGHT_RAW; | 119 | props.type = BACKLIGHT_RAW; |
120 | 120 | ||
121 | bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, data, | 121 | bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev), |
122 | &ot200_backlight_ops, &props); | 122 | &pdev->dev, data, &ot200_backlight_ops, |
123 | &props); | ||
123 | if (IS_ERR(bl)) { | 124 | if (IS_ERR(bl)) { |
124 | dev_err(&pdev->dev, "failed to register backlight\n"); | 125 | dev_err(&pdev->dev, "failed to register backlight\n"); |
125 | retval = PTR_ERR(bl); | 126 | retval = PTR_ERR(bl); |
@@ -137,10 +138,6 @@ error_devm_kzalloc: | |||
137 | 138 | ||
138 | static int ot200_backlight_remove(struct platform_device *pdev) | 139 | static int ot200_backlight_remove(struct platform_device *pdev) |
139 | { | 140 | { |
140 | struct backlight_device *bl = platform_get_drvdata(pdev); | ||
141 | |||
142 | backlight_device_unregister(bl); | ||
143 | |||
144 | /* on module unload set brightness to 100% */ | 141 | /* on module unload set brightness to 100% */ |
145 | cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0); | 142 | cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0); |
146 | cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); | 143 | cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); |
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c index b8db9338cacd..3ad676558c80 100644 --- a/drivers/video/backlight/tosa_bl.c +++ b/drivers/video/backlight/tosa_bl.c | |||
@@ -105,8 +105,9 @@ static int tosa_bl_probe(struct i2c_client *client, | |||
105 | memset(&props, 0, sizeof(struct backlight_properties)); | 105 | memset(&props, 0, sizeof(struct backlight_properties)); |
106 | props.type = BACKLIGHT_RAW; | 106 | props.type = BACKLIGHT_RAW; |
107 | props.max_brightness = 512 - 1; | 107 | props.max_brightness = 512 - 1; |
108 | data->bl = backlight_device_register("tosa-bl", &client->dev, data, | 108 | data->bl = devm_backlight_device_register(&client->dev, "tosa-bl", |
109 | &bl_ops, &props); | 109 | &client->dev, data, &bl_ops, |
110 | &props); | ||
110 | if (IS_ERR(data->bl)) { | 111 | if (IS_ERR(data->bl)) { |
111 | ret = PTR_ERR(data->bl); | 112 | ret = PTR_ERR(data->bl); |
112 | goto err_reg; | 113 | goto err_reg; |
@@ -128,9 +129,7 @@ static int tosa_bl_remove(struct i2c_client *client) | |||
128 | { | 129 | { |
129 | struct tosa_bl_data *data = i2c_get_clientdata(client); | 130 | struct tosa_bl_data *data = i2c_get_clientdata(client); |
130 | 131 | ||
131 | backlight_device_unregister(data->bl); | ||
132 | data->bl = NULL; | 132 | data->bl = NULL; |
133 | |||
134 | return 0; | 133 | return 0; |
135 | } | 134 | } |
136 | 135 | ||
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index be5d636764bf..f08d641ccd01 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c | |||
@@ -206,8 +206,8 @@ static int tosa_lcd_probe(struct spi_device *spi) | |||
206 | 206 | ||
207 | tosa_lcd_tg_on(data); | 207 | tosa_lcd_tg_on(data); |
208 | 208 | ||
209 | data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data, | 209 | data->lcd = devm_lcd_device_register(&spi->dev, "tosa-lcd", &spi->dev, |
210 | &tosa_lcd_ops); | 210 | data, &tosa_lcd_ops); |
211 | 211 | ||
212 | if (IS_ERR(data->lcd)) { | 212 | if (IS_ERR(data->lcd)) { |
213 | ret = PTR_ERR(data->lcd); | 213 | ret = PTR_ERR(data->lcd); |
@@ -226,8 +226,6 @@ static int tosa_lcd_remove(struct spi_device *spi) | |||
226 | { | 226 | { |
227 | struct tosa_lcd_data *data = spi_get_drvdata(spi); | 227 | struct tosa_lcd_data *data = spi_get_drvdata(spi); |
228 | 228 | ||
229 | lcd_device_unregister(data->lcd); | ||
230 | |||
231 | if (data->i2c) | 229 | if (data->i2c) |
232 | i2c_unregister_device(data->i2c); | 230 | i2c_unregister_device(data->i2c); |
233 | 231 | ||
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index 7b07135ab26e..c0227f9418eb 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c | |||
@@ -762,7 +762,8 @@ static int vlynq_remove(struct platform_device *pdev) | |||
762 | 762 | ||
763 | device_unregister(&dev->dev); | 763 | device_unregister(&dev->dev); |
764 | iounmap(dev->local); | 764 | iounmap(dev->local); |
765 | release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start); | 765 | release_mem_region(dev->regs_start, |
766 | dev->regs_end - dev->regs_start + 1); | ||
766 | 767 | ||
767 | kfree(dev); | 768 | kfree(dev); |
768 | 769 | ||
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index e36b18b2817b..9709b8b484ba 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c | |||
@@ -18,10 +18,31 @@ | |||
18 | #include <linux/of_gpio.h> | 18 | #include <linux/of_gpio.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/delay.h> | ||
21 | 22 | ||
22 | #include "../w1.h" | 23 | #include "../w1.h" |
23 | #include "../w1_int.h" | 24 | #include "../w1_int.h" |
24 | 25 | ||
26 | static u8 w1_gpio_set_pullup(void *data, int delay) | ||
27 | { | ||
28 | struct w1_gpio_platform_data *pdata = data; | ||
29 | |||
30 | if (delay) { | ||
31 | pdata->pullup_duration = delay; | ||
32 | } else { | ||
33 | if (pdata->pullup_duration) { | ||
34 | gpio_direction_output(pdata->pin, 1); | ||
35 | |||
36 | msleep(pdata->pullup_duration); | ||
37 | |||
38 | gpio_direction_input(pdata->pin); | ||
39 | } | ||
40 | pdata->pullup_duration = 0; | ||
41 | } | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
25 | static void w1_gpio_write_bit_dir(void *data, u8 bit) | 46 | static void w1_gpio_write_bit_dir(void *data, u8 bit) |
26 | { | 47 | { |
27 | struct w1_gpio_platform_data *pdata = data; | 48 | struct w1_gpio_platform_data *pdata = data; |
@@ -132,6 +153,7 @@ static int w1_gpio_probe(struct platform_device *pdev) | |||
132 | } else { | 153 | } else { |
133 | gpio_direction_input(pdata->pin); | 154 | gpio_direction_input(pdata->pin); |
134 | master->write_bit = w1_gpio_write_bit_dir; | 155 | master->write_bit = w1_gpio_write_bit_dir; |
156 | master->set_pullup = w1_gpio_set_pullup; | ||
135 | } | 157 | } |
136 | 158 | ||
137 | err = w1_add_master_device(master); | 159 | err = w1_add_master_device(master); |
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 5a98649f6abc..590bd8a7cd1b 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c | |||
@@ -117,18 +117,6 @@ int w1_add_master_device(struct w1_bus_master *master) | |||
117 | printk(KERN_ERR "w1_add_master_device: invalid function set\n"); | 117 | printk(KERN_ERR "w1_add_master_device: invalid function set\n"); |
118 | return(-EINVAL); | 118 | return(-EINVAL); |
119 | } | 119 | } |
120 | /* While it would be electrically possible to make a device that | ||
121 | * generated a strong pullup in bit bang mode, only hardware that | ||
122 | * controls 1-wire time frames are even expected to support a strong | ||
123 | * pullup. w1_io.c would need to support calling set_pullup before | ||
124 | * the last write_bit operation of a w1_write_8 which it currently | ||
125 | * doesn't. | ||
126 | */ | ||
127 | if (!master->write_byte && !master->touch_bit && master->set_pullup) { | ||
128 | printk(KERN_ERR "w1_add_master_device: set_pullup requires " | ||
129 | "write_byte or touch_bit, disabling\n"); | ||
130 | master->set_pullup = NULL; | ||
131 | } | ||
132 | 120 | ||
133 | /* Lock until the device is added (or not) to w1_masters. */ | 121 | /* Lock until the device is added (or not) to w1_masters. */ |
134 | mutex_lock(&w1_mlock); | 122 | mutex_lock(&w1_mlock); |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 4218e26df916..acf32054edd8 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -104,7 +104,7 @@ struct autofs_sb_info { | |||
104 | u32 magic; | 104 | u32 magic; |
105 | int pipefd; | 105 | int pipefd; |
106 | struct file *pipe; | 106 | struct file *pipe; |
107 | pid_t oz_pgrp; | 107 | struct pid *oz_pgrp; |
108 | int catatonic; | 108 | int catatonic; |
109 | int version; | 109 | int version; |
110 | int sub_version; | 110 | int sub_version; |
@@ -140,7 +140,7 @@ static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry) | |||
140 | filesystem without "magic".) */ | 140 | filesystem without "magic".) */ |
141 | 141 | ||
142 | static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) { | 142 | static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) { |
143 | return sbi->catatonic || task_pgrp_nr(current) == sbi->oz_pgrp; | 143 | return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* Does a dentry have some pending activity? */ | 146 | /* Does a dentry have some pending activity? */ |
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 1818ce7f5a06..3182c0e68b42 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c | |||
@@ -346,6 +346,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, | |||
346 | { | 346 | { |
347 | int pipefd; | 347 | int pipefd; |
348 | int err = 0; | 348 | int err = 0; |
349 | struct pid *new_pid = NULL; | ||
349 | 350 | ||
350 | if (param->setpipefd.pipefd == -1) | 351 | if (param->setpipefd.pipefd == -1) |
351 | return -EINVAL; | 352 | return -EINVAL; |
@@ -357,7 +358,17 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, | |||
357 | mutex_unlock(&sbi->wq_mutex); | 358 | mutex_unlock(&sbi->wq_mutex); |
358 | return -EBUSY; | 359 | return -EBUSY; |
359 | } else { | 360 | } else { |
360 | struct file *pipe = fget(pipefd); | 361 | struct file *pipe; |
362 | |||
363 | new_pid = get_task_pid(current, PIDTYPE_PGID); | ||
364 | |||
365 | if (ns_of_pid(new_pid) != ns_of_pid(sbi->oz_pgrp)) { | ||
366 | AUTOFS_WARN("Not allowed to change PID namespace"); | ||
367 | err = -EINVAL; | ||
368 | goto out; | ||
369 | } | ||
370 | |||
371 | pipe = fget(pipefd); | ||
361 | if (!pipe) { | 372 | if (!pipe) { |
362 | err = -EBADF; | 373 | err = -EBADF; |
363 | goto out; | 374 | goto out; |
@@ -367,12 +378,13 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, | |||
367 | fput(pipe); | 378 | fput(pipe); |
368 | goto out; | 379 | goto out; |
369 | } | 380 | } |
370 | sbi->oz_pgrp = task_pgrp_nr(current); | 381 | swap(sbi->oz_pgrp, new_pid); |
371 | sbi->pipefd = pipefd; | 382 | sbi->pipefd = pipefd; |
372 | sbi->pipe = pipe; | 383 | sbi->pipe = pipe; |
373 | sbi->catatonic = 0; | 384 | sbi->catatonic = 0; |
374 | } | 385 | } |
375 | out: | 386 | out: |
387 | put_pid(new_pid); | ||
376 | mutex_unlock(&sbi->wq_mutex); | 388 | mutex_unlock(&sbi->wq_mutex); |
377 | return err; | 389 | return err; |
378 | } | 390 | } |
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 3d9d3f5d5dda..394e90b02c5e 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c | |||
@@ -402,6 +402,20 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
402 | goto next; | 402 | goto next; |
403 | } | 403 | } |
404 | 404 | ||
405 | if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { | ||
406 | DPRINTK("checking symlink %p %.*s", | ||
407 | dentry, (int)dentry->d_name.len, dentry->d_name.name); | ||
408 | /* | ||
409 | * A symlink can't be "busy" in the usual sense so | ||
410 | * just check last used for expire timeout. | ||
411 | */ | ||
412 | if (autofs4_can_expire(dentry, timeout, do_now)) { | ||
413 | expired = dentry; | ||
414 | goto found; | ||
415 | } | ||
416 | goto next; | ||
417 | } | ||
418 | |||
405 | if (simple_empty(dentry)) | 419 | if (simple_empty(dentry)) |
406 | goto next; | 420 | goto next; |
407 | 421 | ||
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 3b9cc9b973c2..d7bd395ab586 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -56,8 +56,11 @@ void autofs4_kill_sb(struct super_block *sb) | |||
56 | * just call kill_anon_super when we are called from | 56 | * just call kill_anon_super when we are called from |
57 | * deactivate_super. | 57 | * deactivate_super. |
58 | */ | 58 | */ |
59 | if (sbi) /* Free wait queues, close pipe */ | 59 | if (sbi) { |
60 | /* Free wait queues, close pipe */ | ||
60 | autofs4_catatonic_mode(sbi); | 61 | autofs4_catatonic_mode(sbi); |
62 | put_pid(sbi->oz_pgrp); | ||
63 | } | ||
61 | 64 | ||
62 | DPRINTK("shutting down"); | 65 | DPRINTK("shutting down"); |
63 | kill_litter_super(sb); | 66 | kill_litter_super(sb); |
@@ -80,7 +83,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root) | |||
80 | if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID)) | 83 | if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID)) |
81 | seq_printf(m, ",gid=%u", | 84 | seq_printf(m, ",gid=%u", |
82 | from_kgid_munged(&init_user_ns, root_inode->i_gid)); | 85 | from_kgid_munged(&init_user_ns, root_inode->i_gid)); |
83 | seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); | 86 | seq_printf(m, ",pgrp=%d", pid_vnr(sbi->oz_pgrp)); |
84 | seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); | 87 | seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); |
85 | seq_printf(m, ",minproto=%d", sbi->min_proto); | 88 | seq_printf(m, ",minproto=%d", sbi->min_proto); |
86 | seq_printf(m, ",maxproto=%d", sbi->max_proto); | 89 | seq_printf(m, ",maxproto=%d", sbi->max_proto); |
@@ -124,7 +127,8 @@ static const match_table_t tokens = { | |||
124 | }; | 127 | }; |
125 | 128 | ||
126 | static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, | 129 | static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, |
127 | pid_t *pgrp, unsigned int *type, int *minproto, int *maxproto) | 130 | int *pgrp, bool *pgrp_set, unsigned int *type, |
131 | int *minproto, int *maxproto) | ||
128 | { | 132 | { |
129 | char *p; | 133 | char *p; |
130 | substring_t args[MAX_OPT_ARGS]; | 134 | substring_t args[MAX_OPT_ARGS]; |
@@ -132,7 +136,6 @@ static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, | |||
132 | 136 | ||
133 | *uid = current_uid(); | 137 | *uid = current_uid(); |
134 | *gid = current_gid(); | 138 | *gid = current_gid(); |
135 | *pgrp = task_pgrp_nr(current); | ||
136 | 139 | ||
137 | *minproto = AUTOFS_MIN_PROTO_VERSION; | 140 | *minproto = AUTOFS_MIN_PROTO_VERSION; |
138 | *maxproto = AUTOFS_MAX_PROTO_VERSION; | 141 | *maxproto = AUTOFS_MAX_PROTO_VERSION; |
@@ -171,6 +174,7 @@ static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, | |||
171 | if (match_int(args, &option)) | 174 | if (match_int(args, &option)) |
172 | return 1; | 175 | return 1; |
173 | *pgrp = option; | 176 | *pgrp = option; |
177 | *pgrp_set = true; | ||
174 | break; | 178 | break; |
175 | case Opt_minproto: | 179 | case Opt_minproto: |
176 | if (match_int(args, &option)) | 180 | if (match_int(args, &option)) |
@@ -206,10 +210,13 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
206 | int pipefd; | 210 | int pipefd; |
207 | struct autofs_sb_info *sbi; | 211 | struct autofs_sb_info *sbi; |
208 | struct autofs_info *ino; | 212 | struct autofs_info *ino; |
213 | int pgrp; | ||
214 | bool pgrp_set = false; | ||
215 | int ret = -EINVAL; | ||
209 | 216 | ||
210 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 217 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
211 | if (!sbi) | 218 | if (!sbi) |
212 | goto fail_unlock; | 219 | return -ENOMEM; |
213 | DPRINTK("starting up, sbi = %p",sbi); | 220 | DPRINTK("starting up, sbi = %p",sbi); |
214 | 221 | ||
215 | s->s_fs_info = sbi; | 222 | s->s_fs_info = sbi; |
@@ -218,7 +225,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
218 | sbi->pipe = NULL; | 225 | sbi->pipe = NULL; |
219 | sbi->catatonic = 1; | 226 | sbi->catatonic = 1; |
220 | sbi->exp_timeout = 0; | 227 | sbi->exp_timeout = 0; |
221 | sbi->oz_pgrp = task_pgrp_nr(current); | 228 | sbi->oz_pgrp = NULL; |
222 | sbi->sb = s; | 229 | sbi->sb = s; |
223 | sbi->version = 0; | 230 | sbi->version = 0; |
224 | sbi->sub_version = 0; | 231 | sbi->sub_version = 0; |
@@ -243,8 +250,10 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
243 | * Get the root inode and dentry, but defer checking for errors. | 250 | * Get the root inode and dentry, but defer checking for errors. |
244 | */ | 251 | */ |
245 | ino = autofs4_new_ino(sbi); | 252 | ino = autofs4_new_ino(sbi); |
246 | if (!ino) | 253 | if (!ino) { |
254 | ret = -ENOMEM; | ||
247 | goto fail_free; | 255 | goto fail_free; |
256 | } | ||
248 | root_inode = autofs4_get_inode(s, S_IFDIR | 0755); | 257 | root_inode = autofs4_get_inode(s, S_IFDIR | 0755); |
249 | root = d_make_root(root_inode); | 258 | root = d_make_root(root_inode); |
250 | if (!root) | 259 | if (!root) |
@@ -255,12 +264,23 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
255 | 264 | ||
256 | /* Can this call block? */ | 265 | /* Can this call block? */ |
257 | if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, | 266 | if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, |
258 | &sbi->oz_pgrp, &sbi->type, &sbi->min_proto, | 267 | &pgrp, &pgrp_set, &sbi->type, &sbi->min_proto, |
259 | &sbi->max_proto)) { | 268 | &sbi->max_proto)) { |
260 | printk("autofs: called with bogus options\n"); | 269 | printk("autofs: called with bogus options\n"); |
261 | goto fail_dput; | 270 | goto fail_dput; |
262 | } | 271 | } |
263 | 272 | ||
273 | if (pgrp_set) { | ||
274 | sbi->oz_pgrp = find_get_pid(pgrp); | ||
275 | if (!sbi->oz_pgrp) { | ||
276 | pr_warn("autofs: could not find process group %d\n", | ||
277 | pgrp); | ||
278 | goto fail_dput; | ||
279 | } | ||
280 | } else { | ||
281 | sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID); | ||
282 | } | ||
283 | |||
264 | if (autofs_type_trigger(sbi->type)) | 284 | if (autofs_type_trigger(sbi->type)) |
265 | __managed_dentry_set_managed(root); | 285 | __managed_dentry_set_managed(root); |
266 | 286 | ||
@@ -284,14 +304,15 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
284 | sbi->version = sbi->max_proto; | 304 | sbi->version = sbi->max_proto; |
285 | sbi->sub_version = AUTOFS_PROTO_SUBVERSION; | 305 | sbi->sub_version = AUTOFS_PROTO_SUBVERSION; |
286 | 306 | ||
287 | DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp); | 307 | DPRINTK("pipe fd = %d, pgrp = %u", pipefd, pid_nr(sbi->oz_pgrp)); |
288 | pipe = fget(pipefd); | 308 | pipe = fget(pipefd); |
289 | 309 | ||
290 | if (!pipe) { | 310 | if (!pipe) { |
291 | printk("autofs: could not open pipe file descriptor\n"); | 311 | printk("autofs: could not open pipe file descriptor\n"); |
292 | goto fail_dput; | 312 | goto fail_dput; |
293 | } | 313 | } |
294 | if (autofs_prepare_pipe(pipe) < 0) | 314 | ret = autofs_prepare_pipe(pipe); |
315 | if (ret < 0) | ||
295 | goto fail_fput; | 316 | goto fail_fput; |
296 | sbi->pipe = pipe; | 317 | sbi->pipe = pipe; |
297 | sbi->pipefd = pipefd; | 318 | sbi->pipefd = pipefd; |
@@ -316,10 +337,10 @@ fail_dput: | |||
316 | fail_ino: | 337 | fail_ino: |
317 | kfree(ino); | 338 | kfree(ino); |
318 | fail_free: | 339 | fail_free: |
340 | put_pid(sbi->oz_pgrp); | ||
319 | kfree(sbi); | 341 | kfree(sbi); |
320 | s->s_fs_info = NULL; | 342 | s->s_fs_info = NULL; |
321 | fail_unlock: | 343 | return ret; |
322 | return -EINVAL; | ||
323 | } | 344 | } |
324 | 345 | ||
325 | struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode) | 346 | struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode) |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 92ef341ba0cf..2caf36ac3e93 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -558,7 +558,7 @@ static int autofs4_dir_symlink(struct inode *dir, | |||
558 | dget(dentry); | 558 | dget(dentry); |
559 | atomic_inc(&ino->count); | 559 | atomic_inc(&ino->count); |
560 | p_ino = autofs4_dentry_ino(dentry->d_parent); | 560 | p_ino = autofs4_dentry_ino(dentry->d_parent); |
561 | if (p_ino && dentry->d_parent != dentry) | 561 | if (p_ino && !IS_ROOT(dentry)) |
562 | atomic_inc(&p_ino->count); | 562 | atomic_inc(&p_ino->count); |
563 | 563 | ||
564 | dir->i_mtime = CURRENT_TIME; | 564 | dir->i_mtime = CURRENT_TIME; |
@@ -593,7 +593,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) | |||
593 | 593 | ||
594 | if (atomic_dec_and_test(&ino->count)) { | 594 | if (atomic_dec_and_test(&ino->count)) { |
595 | p_ino = autofs4_dentry_ino(dentry->d_parent); | 595 | p_ino = autofs4_dentry_ino(dentry->d_parent); |
596 | if (p_ino && dentry->d_parent != dentry) | 596 | if (p_ino && !IS_ROOT(dentry)) |
597 | atomic_dec(&p_ino->count); | 597 | atomic_dec(&p_ino->count); |
598 | } | 598 | } |
599 | dput(ino->dentry); | 599 | dput(ino->dentry); |
@@ -732,7 +732,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m | |||
732 | dget(dentry); | 732 | dget(dentry); |
733 | atomic_inc(&ino->count); | 733 | atomic_inc(&ino->count); |
734 | p_ino = autofs4_dentry_ino(dentry->d_parent); | 734 | p_ino = autofs4_dentry_ino(dentry->d_parent); |
735 | if (p_ino && dentry->d_parent != dentry) | 735 | if (p_ino && !IS_ROOT(dentry)) |
736 | atomic_inc(&p_ino->count); | 736 | atomic_inc(&p_ino->count); |
737 | inc_nlink(dir); | 737 | inc_nlink(dir); |
738 | dir->i_mtime = CURRENT_TIME; | 738 | dir->i_mtime = CURRENT_TIME; |
diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c index f27c094a1919..1e8ea192be2b 100644 --- a/fs/autofs4/symlink.c +++ b/fs/autofs4/symlink.c | |||
@@ -14,6 +14,10 @@ | |||
14 | 14 | ||
15 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | 15 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) |
16 | { | 16 | { |
17 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); | ||
18 | struct autofs_info *ino = autofs4_dentry_ino(dentry); | ||
19 | if (ino && !autofs4_oz_mode(sbi)) | ||
20 | ino->last_used = jiffies; | ||
17 | nd_set_link(nd, dentry->d_inode->i_private); | 21 | nd_set_link(nd, dentry->d_inode->i_private); |
18 | return NULL; | 22 | return NULL; |
19 | } | 23 | } |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 689e40d983ad..116fd38ee472 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -347,11 +347,23 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
347 | struct qstr qstr; | 347 | struct qstr qstr; |
348 | char *name; | 348 | char *name; |
349 | int status, ret, type; | 349 | int status, ret, type; |
350 | pid_t pid; | ||
351 | pid_t tgid; | ||
350 | 352 | ||
351 | /* In catatonic mode, we don't wait for nobody */ | 353 | /* In catatonic mode, we don't wait for nobody */ |
352 | if (sbi->catatonic) | 354 | if (sbi->catatonic) |
353 | return -ENOENT; | 355 | return -ENOENT; |
354 | 356 | ||
357 | /* | ||
358 | * Try translating pids to the namespace of the daemon. | ||
359 | * | ||
360 | * Zero means failure: we are in an unrelated pid namespace. | ||
361 | */ | ||
362 | pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); | ||
363 | tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); | ||
364 | if (pid == 0 || tgid == 0) | ||
365 | return -ENOENT; | ||
366 | |||
355 | if (!dentry->d_inode) { | 367 | if (!dentry->d_inode) { |
356 | /* | 368 | /* |
357 | * A wait for a negative dentry is invalid for certain | 369 | * A wait for a negative dentry is invalid for certain |
@@ -417,8 +429,8 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
417 | wq->ino = autofs4_get_ino(sbi); | 429 | wq->ino = autofs4_get_ino(sbi); |
418 | wq->uid = current_uid(); | 430 | wq->uid = current_uid(); |
419 | wq->gid = current_gid(); | 431 | wq->gid = current_gid(); |
420 | wq->pid = current->pid; | 432 | wq->pid = pid; |
421 | wq->tgid = current->tgid; | 433 | wq->tgid = tgid; |
422 | wq->status = -EINTR; /* Status return if interrupted */ | 434 | wq->status = -EINTR; /* Status return if interrupted */ |
423 | wq->wait_ctr = 2; | 435 | wq->wait_ctr = 2; |
424 | 436 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 571a42326908..67be2951b98a 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -543,9 +543,6 @@ out: | |||
543 | * libraries. There is no binary dependent code anywhere else. | 543 | * libraries. There is no binary dependent code anywhere else. |
544 | */ | 544 | */ |
545 | 545 | ||
546 | #define INTERPRETER_NONE 0 | ||
547 | #define INTERPRETER_ELF 2 | ||
548 | |||
549 | #ifndef STACK_RND_MASK | 546 | #ifndef STACK_RND_MASK |
550 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ | 547 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ |
551 | #endif | 548 | #endif |
diff --git a/fs/coredump.c b/fs/coredump.c index bc3fbcd32558..e3ad709a4232 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #include <trace/events/task.h> | 41 | #include <trace/events/task.h> |
42 | #include "internal.h" | 42 | #include "internal.h" |
43 | #include "coredump.h" | ||
44 | 43 | ||
45 | #include <trace/events/sched.h> | 44 | #include <trace/events/sched.h> |
46 | 45 | ||
diff --git a/fs/coredump.h b/fs/coredump.h deleted file mode 100644 index e39ff072110d..000000000000 --- a/fs/coredump.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _FS_COREDUMP_H | ||
2 | #define _FS_COREDUMP_H | ||
3 | |||
4 | extern int __get_dumpable(unsigned long mm_flags); | ||
5 | |||
6 | #endif | ||
@@ -62,7 +62,6 @@ | |||
62 | 62 | ||
63 | #include <trace/events/task.h> | 63 | #include <trace/events/task.h> |
64 | #include "internal.h" | 64 | #include "internal.h" |
65 | #include "coredump.h" | ||
66 | 65 | ||
67 | #include <trace/events/sched.h> | 66 | #include <trace/events/sched.h> |
68 | 67 | ||
@@ -843,7 +842,6 @@ static int exec_mmap(struct mm_struct *mm) | |||
843 | tsk->active_mm = mm; | 842 | tsk->active_mm = mm; |
844 | activate_mm(active_mm, mm); | 843 | activate_mm(active_mm, mm); |
845 | task_unlock(tsk); | 844 | task_unlock(tsk); |
846 | arch_pick_mmap_layout(mm); | ||
847 | if (old_mm) { | 845 | if (old_mm) { |
848 | up_read(&old_mm->mmap_sem); | 846 | up_read(&old_mm->mmap_sem); |
849 | BUG_ON(active_mm != old_mm); | 847 | BUG_ON(active_mm != old_mm); |
@@ -1088,8 +1086,8 @@ int flush_old_exec(struct linux_binprm * bprm) | |||
1088 | bprm->mm = NULL; /* We're using it now */ | 1086 | bprm->mm = NULL; /* We're using it now */ |
1089 | 1087 | ||
1090 | set_fs(USER_DS); | 1088 | set_fs(USER_DS); |
1091 | current->flags &= | 1089 | current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | |
1092 | ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE); | 1090 | PF_NOFREEZE | PF_NO_SETAFFINITY); |
1093 | flush_thread(); | 1091 | flush_thread(); |
1094 | current->personality &= ~bprm->per_clear; | 1092 | current->personality &= ~bprm->per_clear; |
1095 | 1093 | ||
@@ -1139,9 +1137,7 @@ void setup_new_exec(struct linux_binprm * bprm) | |||
1139 | 1137 | ||
1140 | /* An exec changes our domain. We are no longer part of the thread | 1138 | /* An exec changes our domain. We are no longer part of the thread |
1141 | group */ | 1139 | group */ |
1142 | |||
1143 | current->self_exec_id++; | 1140 | current->self_exec_id++; |
1144 | |||
1145 | flush_signal_handlers(current, 0); | 1141 | flush_signal_handlers(current, 0); |
1146 | do_close_on_exec(current->files); | 1142 | do_close_on_exec(current->files); |
1147 | } | 1143 | } |
@@ -1173,6 +1169,10 @@ void free_bprm(struct linux_binprm *bprm) | |||
1173 | mutex_unlock(¤t->signal->cred_guard_mutex); | 1169 | mutex_unlock(¤t->signal->cred_guard_mutex); |
1174 | abort_creds(bprm->cred); | 1170 | abort_creds(bprm->cred); |
1175 | } | 1171 | } |
1172 | if (bprm->file) { | ||
1173 | allow_write_access(bprm->file); | ||
1174 | fput(bprm->file); | ||
1175 | } | ||
1176 | /* If a binfmt changed the interp, free it. */ | 1176 | /* If a binfmt changed the interp, free it. */ |
1177 | if (bprm->interp != bprm->filename) | 1177 | if (bprm->interp != bprm->filename) |
1178 | kfree(bprm->interp); | 1178 | kfree(bprm->interp); |
@@ -1224,11 +1224,10 @@ EXPORT_SYMBOL(install_exec_creds); | |||
1224 | * - the caller must hold ->cred_guard_mutex to protect against | 1224 | * - the caller must hold ->cred_guard_mutex to protect against |
1225 | * PTRACE_ATTACH | 1225 | * PTRACE_ATTACH |
1226 | */ | 1226 | */ |
1227 | static int check_unsafe_exec(struct linux_binprm *bprm) | 1227 | static void check_unsafe_exec(struct linux_binprm *bprm) |
1228 | { | 1228 | { |
1229 | struct task_struct *p = current, *t; | 1229 | struct task_struct *p = current, *t; |
1230 | unsigned n_fs; | 1230 | unsigned n_fs; |
1231 | int res = 0; | ||
1232 | 1231 | ||
1233 | if (p->ptrace) { | 1232 | if (p->ptrace) { |
1234 | if (p->ptrace & PT_PTRACE_CAP) | 1233 | if (p->ptrace & PT_PTRACE_CAP) |
@@ -1244,31 +1243,25 @@ static int check_unsafe_exec(struct linux_binprm *bprm) | |||
1244 | if (current->no_new_privs) | 1243 | if (current->no_new_privs) |
1245 | bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; | 1244 | bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; |
1246 | 1245 | ||
1246 | t = p; | ||
1247 | n_fs = 1; | 1247 | n_fs = 1; |
1248 | spin_lock(&p->fs->lock); | 1248 | spin_lock(&p->fs->lock); |
1249 | rcu_read_lock(); | 1249 | rcu_read_lock(); |
1250 | for (t = next_thread(p); t != p; t = next_thread(t)) { | 1250 | while_each_thread(p, t) { |
1251 | if (t->fs == p->fs) | 1251 | if (t->fs == p->fs) |
1252 | n_fs++; | 1252 | n_fs++; |
1253 | } | 1253 | } |
1254 | rcu_read_unlock(); | 1254 | rcu_read_unlock(); |
1255 | 1255 | ||
1256 | if (p->fs->users > n_fs) { | 1256 | if (p->fs->users > n_fs) |
1257 | bprm->unsafe |= LSM_UNSAFE_SHARE; | 1257 | bprm->unsafe |= LSM_UNSAFE_SHARE; |
1258 | } else { | 1258 | else |
1259 | res = -EAGAIN; | 1259 | p->fs->in_exec = 1; |
1260 | if (!p->fs->in_exec) { | ||
1261 | p->fs->in_exec = 1; | ||
1262 | res = 1; | ||
1263 | } | ||
1264 | } | ||
1265 | spin_unlock(&p->fs->lock); | 1260 | spin_unlock(&p->fs->lock); |
1266 | |||
1267 | return res; | ||
1268 | } | 1261 | } |
1269 | 1262 | ||
1270 | /* | 1263 | /* |
1271 | * Fill the binprm structure from the inode. | 1264 | * Fill the binprm structure from the inode. |
1272 | * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes | 1265 | * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes |
1273 | * | 1266 | * |
1274 | * This may be called multiple times for binary chains (scripts for example). | 1267 | * This may be called multiple times for binary chains (scripts for example). |
@@ -1430,14 +1423,7 @@ static int exec_binprm(struct linux_binprm *bprm) | |||
1430 | audit_bprm(bprm); | 1423 | audit_bprm(bprm); |
1431 | trace_sched_process_exec(current, old_pid, bprm); | 1424 | trace_sched_process_exec(current, old_pid, bprm); |
1432 | ptrace_event(PTRACE_EVENT_EXEC, old_vpid); | 1425 | ptrace_event(PTRACE_EVENT_EXEC, old_vpid); |
1433 | current->did_exec = 1; | ||
1434 | proc_exec_connector(current); | 1426 | proc_exec_connector(current); |
1435 | |||
1436 | if (bprm->file) { | ||
1437 | allow_write_access(bprm->file); | ||
1438 | fput(bprm->file); | ||
1439 | bprm->file = NULL; /* to catch use-after-free */ | ||
1440 | } | ||
1441 | } | 1427 | } |
1442 | 1428 | ||
1443 | return ret; | 1429 | return ret; |
@@ -1453,7 +1439,6 @@ static int do_execve_common(const char *filename, | |||
1453 | struct linux_binprm *bprm; | 1439 | struct linux_binprm *bprm; |
1454 | struct file *file; | 1440 | struct file *file; |
1455 | struct files_struct *displaced; | 1441 | struct files_struct *displaced; |
1456 | bool clear_in_exec; | ||
1457 | int retval; | 1442 | int retval; |
1458 | 1443 | ||
1459 | /* | 1444 | /* |
@@ -1485,10 +1470,7 @@ static int do_execve_common(const char *filename, | |||
1485 | if (retval) | 1470 | if (retval) |
1486 | goto out_free; | 1471 | goto out_free; |
1487 | 1472 | ||
1488 | retval = check_unsafe_exec(bprm); | 1473 | check_unsafe_exec(bprm); |
1489 | if (retval < 0) | ||
1490 | goto out_free; | ||
1491 | clear_in_exec = retval; | ||
1492 | current->in_execve = 1; | 1474 | current->in_execve = 1; |
1493 | 1475 | ||
1494 | file = open_exec(filename); | 1476 | file = open_exec(filename); |
@@ -1504,7 +1486,7 @@ static int do_execve_common(const char *filename, | |||
1504 | 1486 | ||
1505 | retval = bprm_mm_init(bprm); | 1487 | retval = bprm_mm_init(bprm); |
1506 | if (retval) | 1488 | if (retval) |
1507 | goto out_file; | 1489 | goto out_unmark; |
1508 | 1490 | ||
1509 | bprm->argc = count(argv, MAX_ARG_STRINGS); | 1491 | bprm->argc = count(argv, MAX_ARG_STRINGS); |
1510 | if ((retval = bprm->argc) < 0) | 1492 | if ((retval = bprm->argc) < 0) |
@@ -1551,15 +1533,8 @@ out: | |||
1551 | mmput(bprm->mm); | 1533 | mmput(bprm->mm); |
1552 | } | 1534 | } |
1553 | 1535 | ||
1554 | out_file: | ||
1555 | if (bprm->file) { | ||
1556 | allow_write_access(bprm->file); | ||
1557 | fput(bprm->file); | ||
1558 | } | ||
1559 | |||
1560 | out_unmark: | 1536 | out_unmark: |
1561 | if (clear_in_exec) | 1537 | current->fs->in_exec = 0; |
1562 | current->fs->in_exec = 0; | ||
1563 | current->in_execve = 0; | 1538 | current->in_execve = 0; |
1564 | 1539 | ||
1565 | out_free: | 1540 | out_free: |
@@ -1609,67 +1584,22 @@ void set_binfmt(struct linux_binfmt *new) | |||
1609 | if (new) | 1584 | if (new) |
1610 | __module_get(new->module); | 1585 | __module_get(new->module); |
1611 | } | 1586 | } |
1612 | |||
1613 | EXPORT_SYMBOL(set_binfmt); | 1587 | EXPORT_SYMBOL(set_binfmt); |
1614 | 1588 | ||
1615 | /* | 1589 | /* |
1616 | * set_dumpable converts traditional three-value dumpable to two flags and | 1590 | * set_dumpable stores three-value SUID_DUMP_* into mm->flags. |
1617 | * stores them into mm->flags. It modifies lower two bits of mm->flags, but | ||
1618 | * these bits are not changed atomically. So get_dumpable can observe the | ||
1619 | * intermediate state. To avoid doing unexpected behavior, get get_dumpable | ||
1620 | * return either old dumpable or new one by paying attention to the order of | ||
1621 | * modifying the bits. | ||
1622 | * | ||
1623 | * dumpable | mm->flags (binary) | ||
1624 | * old new | initial interim final | ||
1625 | * ---------+----------------------- | ||
1626 | * 0 1 | 00 01 01 | ||
1627 | * 0 2 | 00 10(*) 11 | ||
1628 | * 1 0 | 01 00 00 | ||
1629 | * 1 2 | 01 11 11 | ||
1630 | * 2 0 | 11 10(*) 00 | ||
1631 | * 2 1 | 11 11 01 | ||
1632 | * | ||
1633 | * (*) get_dumpable regards interim value of 10 as 11. | ||
1634 | */ | 1591 | */ |
1635 | void set_dumpable(struct mm_struct *mm, int value) | 1592 | void set_dumpable(struct mm_struct *mm, int value) |
1636 | { | 1593 | { |
1637 | switch (value) { | 1594 | unsigned long old, new; |
1638 | case SUID_DUMP_DISABLE: | ||
1639 | clear_bit(MMF_DUMPABLE, &mm->flags); | ||
1640 | smp_wmb(); | ||
1641 | clear_bit(MMF_DUMP_SECURELY, &mm->flags); | ||
1642 | break; | ||
1643 | case SUID_DUMP_USER: | ||
1644 | set_bit(MMF_DUMPABLE, &mm->flags); | ||
1645 | smp_wmb(); | ||
1646 | clear_bit(MMF_DUMP_SECURELY, &mm->flags); | ||
1647 | break; | ||
1648 | case SUID_DUMP_ROOT: | ||
1649 | set_bit(MMF_DUMP_SECURELY, &mm->flags); | ||
1650 | smp_wmb(); | ||
1651 | set_bit(MMF_DUMPABLE, &mm->flags); | ||
1652 | break; | ||
1653 | } | ||
1654 | } | ||
1655 | |||
1656 | int __get_dumpable(unsigned long mm_flags) | ||
1657 | { | ||
1658 | int ret; | ||
1659 | 1595 | ||
1660 | ret = mm_flags & MMF_DUMPABLE_MASK; | 1596 | if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) |
1661 | return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret; | 1597 | return; |
1662 | } | ||
1663 | 1598 | ||
1664 | /* | 1599 | do { |
1665 | * This returns the actual value of the suid_dumpable flag. For things | 1600 | old = ACCESS_ONCE(mm->flags); |
1666 | * that are using this for checking for privilege transitions, it must | 1601 | new = (old & ~MMF_DUMPABLE_MASK) | value; |
1667 | * test against SUID_DUMP_USER rather than treating it as a boolean | 1602 | } while (cmpxchg(&mm->flags, old, new) != old); |
1668 | * value. | ||
1669 | */ | ||
1670 | int get_dumpable(struct mm_struct *mm) | ||
1671 | { | ||
1672 | return __get_dumpable(mm->flags); | ||
1673 | } | 1603 | } |
1674 | 1604 | ||
1675 | SYSCALL_DEFINE3(execve, | 1605 | SYSCALL_DEFINE3(execve, |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index bafdd48eefde..e66e4808719f 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -309,43 +309,17 @@ struct fname { | |||
309 | */ | 309 | */ |
310 | static void free_rb_tree_fname(struct rb_root *root) | 310 | static void free_rb_tree_fname(struct rb_root *root) |
311 | { | 311 | { |
312 | struct rb_node *n = root->rb_node; | 312 | struct fname *fname, *next; |
313 | struct rb_node *parent; | 313 | |
314 | struct fname *fname; | 314 | rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash) |
315 | 315 | do { | |
316 | while (n) { | 316 | struct fname *old = fname; |
317 | /* Do the node's children first */ | ||
318 | if (n->rb_left) { | ||
319 | n = n->rb_left; | ||
320 | continue; | ||
321 | } | ||
322 | if (n->rb_right) { | ||
323 | n = n->rb_right; | ||
324 | continue; | ||
325 | } | ||
326 | /* | ||
327 | * The node has no children; free it, and then zero | ||
328 | * out parent's link to it. Finally go to the | ||
329 | * beginning of the loop and try to free the parent | ||
330 | * node. | ||
331 | */ | ||
332 | parent = rb_parent(n); | ||
333 | fname = rb_entry(n, struct fname, rb_hash); | ||
334 | while (fname) { | ||
335 | struct fname * old = fname; | ||
336 | fname = fname->next; | 317 | fname = fname->next; |
337 | kfree (old); | 318 | kfree(old); |
338 | } | 319 | } while (fname); |
339 | if (!parent) | ||
340 | *root = RB_ROOT; | ||
341 | else if (parent->rb_left == n) | ||
342 | parent->rb_left = NULL; | ||
343 | else if (parent->rb_right == n) | ||
344 | parent->rb_right = NULL; | ||
345 | n = parent; | ||
346 | } | ||
347 | } | ||
348 | 320 | ||
321 | *root = RB_ROOT; | ||
322 | } | ||
349 | 323 | ||
350 | static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp, | 324 | static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp, |
351 | loff_t pos) | 325 | loff_t pos) |
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 3f11656bd72e..41eb9dcfac7e 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c | |||
@@ -180,37 +180,12 @@ int ext4_setup_system_zone(struct super_block *sb) | |||
180 | /* Called when the filesystem is unmounted */ | 180 | /* Called when the filesystem is unmounted */ |
181 | void ext4_release_system_zone(struct super_block *sb) | 181 | void ext4_release_system_zone(struct super_block *sb) |
182 | { | 182 | { |
183 | struct rb_node *n = EXT4_SB(sb)->system_blks.rb_node; | 183 | struct ext4_system_zone *entry, *n; |
184 | struct rb_node *parent; | ||
185 | struct ext4_system_zone *entry; | ||
186 | 184 | ||
187 | while (n) { | 185 | rbtree_postorder_for_each_entry_safe(entry, n, |
188 | /* Do the node's children first */ | 186 | &EXT4_SB(sb)->system_blks, node) |
189 | if (n->rb_left) { | ||
190 | n = n->rb_left; | ||
191 | continue; | ||
192 | } | ||
193 | if (n->rb_right) { | ||
194 | n = n->rb_right; | ||
195 | continue; | ||
196 | } | ||
197 | /* | ||
198 | * The node has no children; free it, and then zero | ||
199 | * out parent's link to it. Finally go to the | ||
200 | * beginning of the loop and try to free the parent | ||
201 | * node. | ||
202 | */ | ||
203 | parent = rb_parent(n); | ||
204 | entry = rb_entry(n, struct ext4_system_zone, node); | ||
205 | kmem_cache_free(ext4_system_zone_cachep, entry); | 187 | kmem_cache_free(ext4_system_zone_cachep, entry); |
206 | if (!parent) | 188 | |
207 | EXT4_SB(sb)->system_blks = RB_ROOT; | ||
208 | else if (parent->rb_left == n) | ||
209 | parent->rb_left = NULL; | ||
210 | else if (parent->rb_right == n) | ||
211 | parent->rb_right = NULL; | ||
212 | n = parent; | ||
213 | } | ||
214 | EXT4_SB(sb)->system_blks = RB_ROOT; | 189 | EXT4_SB(sb)->system_blks = RB_ROOT; |
215 | } | 190 | } |
216 | 191 | ||
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 680bb3388919..d638c57e996e 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
@@ -353,41 +353,16 @@ struct fname { | |||
353 | */ | 353 | */ |
354 | static void free_rb_tree_fname(struct rb_root *root) | 354 | static void free_rb_tree_fname(struct rb_root *root) |
355 | { | 355 | { |
356 | struct rb_node *n = root->rb_node; | 356 | struct fname *fname, *next; |
357 | struct rb_node *parent; | 357 | |
358 | struct fname *fname; | 358 | rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash) |
359 | |||
360 | while (n) { | ||
361 | /* Do the node's children first */ | ||
362 | if (n->rb_left) { | ||
363 | n = n->rb_left; | ||
364 | continue; | ||
365 | } | ||
366 | if (n->rb_right) { | ||
367 | n = n->rb_right; | ||
368 | continue; | ||
369 | } | ||
370 | /* | ||
371 | * The node has no children; free it, and then zero | ||
372 | * out parent's link to it. Finally go to the | ||
373 | * beginning of the loop and try to free the parent | ||
374 | * node. | ||
375 | */ | ||
376 | parent = rb_parent(n); | ||
377 | fname = rb_entry(n, struct fname, rb_hash); | ||
378 | while (fname) { | 359 | while (fname) { |
379 | struct fname *old = fname; | 360 | struct fname *old = fname; |
380 | fname = fname->next; | 361 | fname = fname->next; |
381 | kfree(old); | 362 | kfree(old); |
382 | } | 363 | } |
383 | if (!parent) | 364 | |
384 | *root = RB_ROOT; | 365 | *root = RB_ROOT; |
385 | else if (parent->rb_left == n) | ||
386 | parent->rb_left = NULL; | ||
387 | else if (parent->rb_right == n) | ||
388 | parent->rb_right = NULL; | ||
389 | n = parent; | ||
390 | } | ||
391 | } | 366 | } |
392 | 367 | ||
393 | 368 | ||
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 37213d075f3c..3ebda928229c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -178,64 +178,6 @@ const struct dentry_operations hfsplus_dentry_operations = { | |||
178 | .d_compare = hfsplus_compare_dentry, | 178 | .d_compare = hfsplus_compare_dentry, |
179 | }; | 179 | }; |
180 | 180 | ||
181 | static struct dentry *hfsplus_file_lookup(struct inode *dir, | ||
182 | struct dentry *dentry, unsigned int flags) | ||
183 | { | ||
184 | struct hfs_find_data fd; | ||
185 | struct super_block *sb = dir->i_sb; | ||
186 | struct inode *inode = NULL; | ||
187 | struct hfsplus_inode_info *hip; | ||
188 | int err; | ||
189 | |||
190 | if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) | ||
191 | goto out; | ||
192 | |||
193 | inode = HFSPLUS_I(dir)->rsrc_inode; | ||
194 | if (inode) | ||
195 | goto out; | ||
196 | |||
197 | inode = new_inode(sb); | ||
198 | if (!inode) | ||
199 | return ERR_PTR(-ENOMEM); | ||
200 | |||
201 | hip = HFSPLUS_I(inode); | ||
202 | inode->i_ino = dir->i_ino; | ||
203 | INIT_LIST_HEAD(&hip->open_dir_list); | ||
204 | mutex_init(&hip->extents_lock); | ||
205 | hip->extent_state = 0; | ||
206 | hip->flags = 0; | ||
207 | hip->userflags = 0; | ||
208 | set_bit(HFSPLUS_I_RSRC, &hip->flags); | ||
209 | |||
210 | err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); | ||
211 | if (!err) { | ||
212 | err = hfsplus_find_cat(sb, dir->i_ino, &fd); | ||
213 | if (!err) | ||
214 | err = hfsplus_cat_read_inode(inode, &fd); | ||
215 | hfs_find_exit(&fd); | ||
216 | } | ||
217 | if (err) { | ||
218 | iput(inode); | ||
219 | return ERR_PTR(err); | ||
220 | } | ||
221 | hip->rsrc_inode = dir; | ||
222 | HFSPLUS_I(dir)->rsrc_inode = inode; | ||
223 | igrab(dir); | ||
224 | |||
225 | /* | ||
226 | * __mark_inode_dirty expects inodes to be hashed. Since we don't | ||
227 | * want resource fork inodes in the regular inode space, we make them | ||
228 | * appear hashed, but do not put on any lists. hlist_del() | ||
229 | * will work fine and require no locking. | ||
230 | */ | ||
231 | hlist_add_fake(&inode->i_hash); | ||
232 | |||
233 | mark_inode_dirty(inode); | ||
234 | out: | ||
235 | d_add(dentry, inode); | ||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | static void hfsplus_get_perms(struct inode *inode, | 181 | static void hfsplus_get_perms(struct inode *inode, |
240 | struct hfsplus_perm *perms, int dir) | 182 | struct hfsplus_perm *perms, int dir) |
241 | { | 183 | { |
@@ -385,7 +327,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, | |||
385 | } | 327 | } |
386 | 328 | ||
387 | static const struct inode_operations hfsplus_file_inode_operations = { | 329 | static const struct inode_operations hfsplus_file_inode_operations = { |
388 | .lookup = hfsplus_file_lookup, | ||
389 | .setattr = hfsplus_setattr, | 330 | .setattr = hfsplus_setattr, |
390 | .setxattr = generic_setxattr, | 331 | .setxattr = generic_setxattr, |
391 | .getxattr = generic_getxattr, | 332 | .getxattr = generic_getxattr, |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 975a1f562c10..9a5449bc3afb 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -564,25 +564,10 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ | |||
564 | they're killed. */ | 564 | they're killed. */ |
565 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | 565 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) |
566 | { | 566 | { |
567 | struct jffs2_node_frag *frag; | 567 | struct jffs2_node_frag *frag, *next; |
568 | struct jffs2_node_frag *parent; | ||
569 | |||
570 | if (!root->rb_node) | ||
571 | return; | ||
572 | 568 | ||
573 | dbg_fragtree("killing\n"); | 569 | dbg_fragtree("killing\n"); |
574 | 570 | rbtree_postorder_for_each_entry_safe(frag, next, root, rb) { | |
575 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | ||
576 | while(frag) { | ||
577 | if (frag->rb.rb_left) { | ||
578 | frag = frag_left(frag); | ||
579 | continue; | ||
580 | } | ||
581 | if (frag->rb.rb_right) { | ||
582 | frag = frag_right(frag); | ||
583 | continue; | ||
584 | } | ||
585 | |||
586 | if (frag->node && !(--frag->node->frags)) { | 571 | if (frag->node && !(--frag->node->frags)) { |
587 | /* Not a hole, and it's the final remaining frag | 572 | /* Not a hole, and it's the final remaining frag |
588 | of this node. Free the node */ | 573 | of this node. Free the node */ |
@@ -591,17 +576,8 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
591 | 576 | ||
592 | jffs2_free_full_dnode(frag->node); | 577 | jffs2_free_full_dnode(frag->node); |
593 | } | 578 | } |
594 | parent = frag_parent(frag); | ||
595 | if (parent) { | ||
596 | if (frag_left(parent) == frag) | ||
597 | parent->rb.rb_left = NULL; | ||
598 | else | ||
599 | parent->rb.rb_right = NULL; | ||
600 | } | ||
601 | 579 | ||
602 | jffs2_free_node_frag(frag); | 580 | jffs2_free_node_frag(frag); |
603 | frag = parent; | ||
604 | |||
605 | cond_resched(); | 581 | cond_resched(); |
606 | } | 582 | } |
607 | } | 583 | } |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index ae81b01e6fd7..386303dca382 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -543,33 +543,13 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, | |||
543 | 543 | ||
544 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | 544 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) |
545 | { | 545 | { |
546 | struct rb_node *this; | 546 | struct jffs2_tmp_dnode_info *tn, *next; |
547 | struct jffs2_tmp_dnode_info *tn; | ||
548 | |||
549 | this = list->rb_node; | ||
550 | 547 | ||
551 | /* Now at bottom of tree */ | 548 | rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { |
552 | while (this) { | ||
553 | if (this->rb_left) | ||
554 | this = this->rb_left; | ||
555 | else if (this->rb_right) | ||
556 | this = this->rb_right; | ||
557 | else { | ||
558 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | ||
559 | jffs2_free_full_dnode(tn->fn); | 549 | jffs2_free_full_dnode(tn->fn); |
560 | jffs2_free_tmp_dnode_info(tn); | 550 | jffs2_free_tmp_dnode_info(tn); |
561 | |||
562 | this = rb_parent(this); | ||
563 | if (!this) | ||
564 | break; | ||
565 | |||
566 | if (this->rb_left == &tn->rb) | ||
567 | this->rb_left = NULL; | ||
568 | else if (this->rb_right == &tn->rb) | ||
569 | this->rb_right = NULL; | ||
570 | else BUG(); | ||
571 | } | ||
572 | } | 551 | } |
552 | |||
573 | *list = RB_ROOT; | 553 | *list = RB_ROOT; |
574 | } | 554 | } |
575 | 555 | ||
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index d448a777166b..7f9b096d8d57 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -62,7 +62,8 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, | |||
62 | page = read_cache_page(mapping, index, filler, sb); | 62 | page = read_cache_page(mapping, index, filler, sb); |
63 | else { | 63 | else { |
64 | page = find_or_create_page(mapping, index, GFP_NOFS); | 64 | page = find_or_create_page(mapping, index, GFP_NOFS); |
65 | unlock_page(page); | 65 | if (page) |
66 | unlock_page(page); | ||
66 | } | 67 | } |
67 | return page; | 68 | return page; |
68 | } | 69 | } |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index b44bdb291b84..2b34021948e4 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -37,7 +37,26 @@ | |||
37 | #include "sufile.h" | 37 | #include "sufile.h" |
38 | #include "dat.h" | 38 | #include "dat.h" |
39 | 39 | ||
40 | 40 | /** | |
41 | * nilfs_ioctl_wrap_copy - wrapping function of get/set metadata info | ||
42 | * @nilfs: nilfs object | ||
43 | * @argv: vector of arguments from userspace | ||
44 | * @dir: set of direction flags | ||
45 | * @dofunc: concrete function of get/set metadata info | ||
46 | * | ||
47 | * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of | ||
48 | * calling dofunc() function on the basis of @argv argument. | ||
49 | * | ||
50 | * Return Value: On success, 0 is returned and requested metadata info | ||
51 | * is copied into userspace. On error, one of the following | ||
52 | * negative error codes is returned. | ||
53 | * | ||
54 | * %-EINVAL - Invalid arguments from userspace. | ||
55 | * | ||
56 | * %-ENOMEM - Insufficient amount of memory available. | ||
57 | * | ||
58 | * %-EFAULT - Failure during execution of requested operation. | ||
59 | */ | ||
41 | static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, | 60 | static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, |
42 | struct nilfs_argv *argv, int dir, | 61 | struct nilfs_argv *argv, int dir, |
43 | ssize_t (*dofunc)(struct the_nilfs *, | 62 | ssize_t (*dofunc)(struct the_nilfs *, |
@@ -57,6 +76,14 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, | |||
57 | if (argv->v_size > PAGE_SIZE) | 76 | if (argv->v_size > PAGE_SIZE) |
58 | return -EINVAL; | 77 | return -EINVAL; |
59 | 78 | ||
79 | /* | ||
80 | * Reject pairs of a start item position (argv->v_index) and a | ||
81 | * total count (argv->v_nmembs) which leads position 'pos' to | ||
82 | * overflow by the increment at the end of the loop. | ||
83 | */ | ||
84 | if (argv->v_index > ~(__u64)0 - argv->v_nmembs) | ||
85 | return -EINVAL; | ||
86 | |||
60 | buf = (void *)__get_free_pages(GFP_NOFS, 0); | 87 | buf = (void *)__get_free_pages(GFP_NOFS, 0); |
61 | if (unlikely(!buf)) | 88 | if (unlikely(!buf)) |
62 | return -ENOMEM; | 89 | return -ENOMEM; |
@@ -99,6 +126,9 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, | |||
99 | return ret; | 126 | return ret; |
100 | } | 127 | } |
101 | 128 | ||
129 | /** | ||
130 | * nilfs_ioctl_getflags - ioctl to support lsattr | ||
131 | */ | ||
102 | static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp) | 132 | static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp) |
103 | { | 133 | { |
104 | unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE; | 134 | unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE; |
@@ -106,6 +136,9 @@ static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp) | |||
106 | return put_user(flags, (int __user *)argp); | 136 | return put_user(flags, (int __user *)argp); |
107 | } | 137 | } |
108 | 138 | ||
139 | /** | ||
140 | * nilfs_ioctl_setflags - ioctl to support chattr | ||
141 | */ | ||
109 | static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp, | 142 | static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp, |
110 | void __user *argp) | 143 | void __user *argp) |
111 | { | 144 | { |
@@ -158,11 +191,33 @@ out: | |||
158 | return ret; | 191 | return ret; |
159 | } | 192 | } |
160 | 193 | ||
194 | /** | ||
195 | * nilfs_ioctl_getversion - get info about a file's version (generation number) | ||
196 | */ | ||
161 | static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) | 197 | static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) |
162 | { | 198 | { |
163 | return put_user(inode->i_generation, (int __user *)argp); | 199 | return put_user(inode->i_generation, (int __user *)argp); |
164 | } | 200 | } |
165 | 201 | ||
202 | /** | ||
203 | * nilfs_ioctl_change_cpmode - change checkpoint mode (checkpoint/snapshot) | ||
204 | * @inode: inode object | ||
205 | * @filp: file object | ||
206 | * @cmd: ioctl's request code | ||
207 | * @argp: pointer on argument from userspace | ||
208 | * | ||
209 | * Description: nilfs_ioctl_change_cpmode() function changes mode of | ||
210 | * given checkpoint between checkpoint and snapshot state. This ioctl | ||
211 | * is used in chcp and mkcp utilities. | ||
212 | * | ||
213 | * Return Value: On success, 0 is returned and mode of a checkpoint is | ||
214 | * changed. On error, one of the following negative error codes | ||
215 | * is returned. | ||
216 | * | ||
217 | * %-EPERM - Operation not permitted. | ||
218 | * | ||
219 | * %-EFAULT - Failure during checkpoint mode changing. | ||
220 | */ | ||
166 | static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, | 221 | static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, |
167 | unsigned int cmd, void __user *argp) | 222 | unsigned int cmd, void __user *argp) |
168 | { | 223 | { |
@@ -198,6 +253,25 @@ out: | |||
198 | return ret; | 253 | return ret; |
199 | } | 254 | } |
200 | 255 | ||
256 | /** | ||
257 | * nilfs_ioctl_delete_checkpoint - remove checkpoint | ||
258 | * @inode: inode object | ||
259 | * @filp: file object | ||
260 | * @cmd: ioctl's request code | ||
261 | * @argp: pointer on argument from userspace | ||
262 | * | ||
263 | * Description: nilfs_ioctl_delete_checkpoint() function removes | ||
264 | * checkpoint from NILFS2 file system. This ioctl is used in rmcp | ||
265 | * utility. | ||
266 | * | ||
267 | * Return Value: On success, 0 is returned and a checkpoint is | ||
268 | * removed. On error, one of the following negative error codes | ||
269 | * is returned. | ||
270 | * | ||
271 | * %-EPERM - Operation not permitted. | ||
272 | * | ||
273 | * %-EFAULT - Failure during checkpoint removing. | ||
274 | */ | ||
201 | static int | 275 | static int |
202 | nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, | 276 | nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, |
203 | unsigned int cmd, void __user *argp) | 277 | unsigned int cmd, void __user *argp) |
@@ -229,6 +303,21 @@ out: | |||
229 | return ret; | 303 | return ret; |
230 | } | 304 | } |
231 | 305 | ||
306 | /** | ||
307 | * nilfs_ioctl_do_get_cpinfo - callback method getting info about checkpoints | ||
308 | * @nilfs: nilfs object | ||
309 | * @posp: pointer on array of checkpoint's numbers | ||
310 | * @flags: checkpoint mode (checkpoint or snapshot) | ||
311 | * @buf: buffer for storing checkponts' info | ||
312 | * @size: size in bytes of one checkpoint info item in array | ||
313 | * @nmembs: number of checkpoints in array (numbers and infos) | ||
314 | * | ||
315 | * Description: nilfs_ioctl_do_get_cpinfo() function returns info about | ||
316 | * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in | ||
317 | * lscp utility and by nilfs_cleanerd daemon. | ||
318 | * | ||
319 | * Return value: count of nilfs_cpinfo structures in output buffer. | ||
320 | */ | ||
232 | static ssize_t | 321 | static ssize_t |
233 | nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | 322 | nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, |
234 | void *buf, size_t size, size_t nmembs) | 323 | void *buf, size_t size, size_t nmembs) |
@@ -242,6 +331,27 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
242 | return ret; | 331 | return ret; |
243 | } | 332 | } |
244 | 333 | ||
334 | /** | ||
335 | * nilfs_ioctl_get_cpstat - get checkpoints statistics | ||
336 | * @inode: inode object | ||
337 | * @filp: file object | ||
338 | * @cmd: ioctl's request code | ||
339 | * @argp: pointer on argument from userspace | ||
340 | * | ||
341 | * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. | ||
342 | * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities | ||
343 | * and by nilfs_cleanerd daemon. | ||
344 | * | ||
345 | * Return Value: On success, 0 is returned, and checkpoints information is | ||
346 | * copied into userspace pointer @argp. On error, one of the following | ||
347 | * negative error codes is returned. | ||
348 | * | ||
349 | * %-EIO - I/O error. | ||
350 | * | ||
351 | * %-ENOMEM - Insufficient amount of memory available. | ||
352 | * | ||
353 | * %-EFAULT - Failure during getting checkpoints statistics. | ||
354 | */ | ||
245 | static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, | 355 | static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, |
246 | unsigned int cmd, void __user *argp) | 356 | unsigned int cmd, void __user *argp) |
247 | { | 357 | { |
@@ -260,6 +370,21 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, | |||
260 | return ret; | 370 | return ret; |
261 | } | 371 | } |
262 | 372 | ||
373 | /** | ||
374 | * nilfs_ioctl_do_get_suinfo - callback method getting segment usage info | ||
375 | * @nilfs: nilfs object | ||
376 | * @posp: pointer on array of segment numbers | ||
377 | * @flags: *not used* | ||
378 | * @buf: buffer for storing suinfo array | ||
379 | * @size: size in bytes of one suinfo item in array | ||
380 | * @nmembs: count of segment numbers and suinfos in array | ||
381 | * | ||
382 | * Description: nilfs_ioctl_do_get_suinfo() function returns segment usage | ||
383 | * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used | ||
384 | * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. | ||
385 | * | ||
386 | * Return value: count of nilfs_suinfo structures in output buffer. | ||
387 | */ | ||
263 | static ssize_t | 388 | static ssize_t |
264 | nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | 389 | nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, |
265 | void *buf, size_t size, size_t nmembs) | 390 | void *buf, size_t size, size_t nmembs) |
@@ -273,6 +398,27 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
273 | return ret; | 398 | return ret; |
274 | } | 399 | } |
275 | 400 | ||
401 | /** | ||
402 | * nilfs_ioctl_get_sustat - get segment usage statistics | ||
403 | * @inode: inode object | ||
404 | * @filp: file object | ||
405 | * @cmd: ioctl's request code | ||
406 | * @argp: pointer on argument from userspace | ||
407 | * | ||
408 | * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. | ||
409 | * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities | ||
410 | * and by nilfs_cleanerd daemon. | ||
411 | * | ||
412 | * Return Value: On success, 0 is returned, and segment usage information is | ||
413 | * copied into userspace pointer @argp. On error, one of the following | ||
414 | * negative error codes is returned. | ||
415 | * | ||
416 | * %-EIO - I/O error. | ||
417 | * | ||
418 | * %-ENOMEM - Insufficient amount of memory available. | ||
419 | * | ||
420 | * %-EFAULT - Failure during getting segment usage statistics. | ||
421 | */ | ||
276 | static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, | 422 | static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, |
277 | unsigned int cmd, void __user *argp) | 423 | unsigned int cmd, void __user *argp) |
278 | { | 424 | { |
@@ -291,6 +437,21 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, | |||
291 | return ret; | 437 | return ret; |
292 | } | 438 | } |
293 | 439 | ||
440 | /** | ||
441 | * nilfs_ioctl_do_get_vinfo - callback method getting virtual blocks info | ||
442 | * @nilfs: nilfs object | ||
443 | * @posp: *not used* | ||
444 | * @flags: *not used* | ||
445 | * @buf: buffer for storing array of nilfs_vinfo structures | ||
446 | * @size: size in bytes of one vinfo item in array | ||
447 | * @nmembs: count of vinfos in array | ||
448 | * | ||
449 | * Description: nilfs_ioctl_do_get_vinfo() function returns information | ||
450 | * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used | ||
451 | * by nilfs_cleanerd daemon. | ||
452 | * | ||
453 | * Return value: count of nilfs_vinfo structures in output buffer. | ||
454 | */ | ||
294 | static ssize_t | 455 | static ssize_t |
295 | nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | 456 | nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, |
296 | void *buf, size_t size, size_t nmembs) | 457 | void *buf, size_t size, size_t nmembs) |
@@ -303,6 +464,21 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
303 | return ret; | 464 | return ret; |
304 | } | 465 | } |
305 | 466 | ||
467 | /** | ||
468 | * nilfs_ioctl_do_get_bdescs - callback method getting disk block descriptors | ||
469 | * @nilfs: nilfs object | ||
470 | * @posp: *not used* | ||
471 | * @flags: *not used* | ||
472 | * @buf: buffer for storing array of nilfs_bdesc structures | ||
473 | * @size: size in bytes of one bdesc item in array | ||
474 | * @nmembs: count of bdescs in array | ||
475 | * | ||
476 | * Description: nilfs_ioctl_do_get_bdescs() function returns information | ||
477 | * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl | ||
478 | * is used by nilfs_cleanerd daemon. | ||
479 | * | ||
480 | * Return value: count of nilfs_bdescs structures in output buffer. | ||
481 | */ | ||
306 | static ssize_t | 482 | static ssize_t |
307 | nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, | 483 | nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, |
308 | void *buf, size_t size, size_t nmembs) | 484 | void *buf, size_t size, size_t nmembs) |
@@ -329,6 +505,29 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
329 | return nmembs; | 505 | return nmembs; |
330 | } | 506 | } |
331 | 507 | ||
508 | /** | ||
509 | * nilfs_ioctl_get_bdescs - get disk block descriptors | ||
510 | * @inode: inode object | ||
511 | * @filp: file object | ||
512 | * @cmd: ioctl's request code | ||
513 | * @argp: pointer on argument from userspace | ||
514 | * | ||
515 | * Description: nilfs_ioctl_do_get_bdescs() function returns information | ||
516 | * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl | ||
517 | * is used by nilfs_cleanerd daemon. | ||
518 | * | ||
519 | * Return Value: On success, 0 is returned, and disk block descriptors are | ||
520 | * copied into userspace pointer @argp. On error, one of the following | ||
521 | * negative error codes is returned. | ||
522 | * | ||
523 | * %-EINVAL - Invalid arguments from userspace. | ||
524 | * | ||
525 | * %-EIO - I/O error. | ||
526 | * | ||
527 | * %-ENOMEM - Insufficient amount of memory available. | ||
528 | * | ||
529 | * %-EFAULT - Failure during getting disk block descriptors. | ||
530 | */ | ||
332 | static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, | 531 | static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, |
333 | unsigned int cmd, void __user *argp) | 532 | unsigned int cmd, void __user *argp) |
334 | { | 533 | { |
@@ -352,6 +551,26 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, | |||
352 | return ret; | 551 | return ret; |
353 | } | 552 | } |
354 | 553 | ||
554 | /** | ||
555 | * nilfs_ioctl_move_inode_block - prepare data/node block for moving by GC | ||
556 | * @inode: inode object | ||
557 | * @vdesc: descriptor of virtual block number | ||
558 | * @buffers: list of moving buffers | ||
559 | * | ||
560 | * Description: nilfs_ioctl_move_inode_block() function registers data/node | ||
561 | * buffer in the GC pagecache and submit read request. | ||
562 | * | ||
563 | * Return Value: On success, 0 is returned. On error, one of the following | ||
564 | * negative error codes is returned. | ||
565 | * | ||
566 | * %-EIO - I/O error. | ||
567 | * | ||
568 | * %-ENOMEM - Insufficient amount of memory available. | ||
569 | * | ||
570 | * %-ENOENT - Requested block doesn't exist. | ||
571 | * | ||
572 | * %-EEXIST - Blocks conflict is detected. | ||
573 | */ | ||
355 | static int nilfs_ioctl_move_inode_block(struct inode *inode, | 574 | static int nilfs_ioctl_move_inode_block(struct inode *inode, |
356 | struct nilfs_vdesc *vdesc, | 575 | struct nilfs_vdesc *vdesc, |
357 | struct list_head *buffers) | 576 | struct list_head *buffers) |
@@ -397,6 +616,19 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, | |||
397 | return 0; | 616 | return 0; |
398 | } | 617 | } |
399 | 618 | ||
619 | /** | ||
620 | * nilfs_ioctl_move_blocks - move valid inode's blocks during garbage collection | ||
621 | * @sb: superblock object | ||
622 | * @argv: vector of arguments from userspace | ||
623 | * @buf: array of nilfs_vdesc structures | ||
624 | * | ||
625 | * Description: nilfs_ioctl_move_blocks() function reads valid data/node | ||
626 | * blocks that garbage collector specified with the array of nilfs_vdesc | ||
627 | * structures and stores them into page caches of GC inodes. | ||
628 | * | ||
629 | * Return Value: Number of processed nilfs_vdesc structures or | ||
630 | * error code, otherwise. | ||
631 | */ | ||
400 | static int nilfs_ioctl_move_blocks(struct super_block *sb, | 632 | static int nilfs_ioctl_move_blocks(struct super_block *sb, |
401 | struct nilfs_argv *argv, void *buf) | 633 | struct nilfs_argv *argv, void *buf) |
402 | { | 634 | { |
@@ -462,6 +694,25 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb, | |||
462 | return ret; | 694 | return ret; |
463 | } | 695 | } |
464 | 696 | ||
697 | /** | ||
698 | * nilfs_ioctl_delete_checkpoints - delete checkpoints | ||
699 | * @nilfs: nilfs object | ||
700 | * @argv: vector of arguments from userspace | ||
701 | * @buf: array of periods of checkpoints numbers | ||
702 | * | ||
703 | * Description: nilfs_ioctl_delete_checkpoints() function deletes checkpoints | ||
704 | * in the period from p_start to p_end, excluding p_end itself. The checkpoints | ||
705 | * which have been already deleted are ignored. | ||
706 | * | ||
707 | * Return Value: Number of processed nilfs_period structures or | ||
708 | * error code, otherwise. | ||
709 | * | ||
710 | * %-EIO - I/O error. | ||
711 | * | ||
712 | * %-ENOMEM - Insufficient amount of memory available. | ||
713 | * | ||
714 | * %-EINVAL - invalid checkpoints. | ||
715 | */ | ||
465 | static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, | 716 | static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, |
466 | struct nilfs_argv *argv, void *buf) | 717 | struct nilfs_argv *argv, void *buf) |
467 | { | 718 | { |
@@ -479,6 +730,24 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, | |||
479 | return nmembs; | 730 | return nmembs; |
480 | } | 731 | } |
481 | 732 | ||
733 | /** | ||
734 | * nilfs_ioctl_free_vblocknrs - free virtual block numbers | ||
735 | * @nilfs: nilfs object | ||
736 | * @argv: vector of arguments from userspace | ||
737 | * @buf: array of virtual block numbers | ||
738 | * | ||
739 | * Description: nilfs_ioctl_free_vblocknrs() function frees | ||
740 | * the virtual block numbers specified by @buf and @argv->v_nmembs. | ||
741 | * | ||
742 | * Return Value: Number of processed virtual block numbers or | ||
743 | * error code, otherwise. | ||
744 | * | ||
745 | * %-EIO - I/O error. | ||
746 | * | ||
747 | * %-ENOMEM - Insufficient amount of memory available. | ||
748 | * | ||
749 | * %-ENOENT - The virtual block number have not been allocated. | ||
750 | */ | ||
482 | static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, | 751 | static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, |
483 | struct nilfs_argv *argv, void *buf) | 752 | struct nilfs_argv *argv, void *buf) |
484 | { | 753 | { |
@@ -490,6 +759,24 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, | |||
490 | return (ret < 0) ? ret : nmembs; | 759 | return (ret < 0) ? ret : nmembs; |
491 | } | 760 | } |
492 | 761 | ||
762 | /** | ||
763 | * nilfs_ioctl_mark_blocks_dirty - mark blocks dirty | ||
764 | * @nilfs: nilfs object | ||
765 | * @argv: vector of arguments from userspace | ||
766 | * @buf: array of block descriptors | ||
767 | * | ||
768 | * Description: nilfs_ioctl_mark_blocks_dirty() function marks | ||
769 | * metadata file or data blocks as dirty. | ||
770 | * | ||
771 | * Return Value: Number of processed block descriptors or | ||
772 | * error code, otherwise. | ||
773 | * | ||
774 | * %-ENOMEM - Insufficient memory available. | ||
775 | * | ||
776 | * %-EIO - I/O error | ||
777 | * | ||
778 | * %-ENOENT - the specified block does not exist (hole block) | ||
779 | */ | ||
493 | static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, | 780 | static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, |
494 | struct nilfs_argv *argv, void *buf) | 781 | struct nilfs_argv *argv, void *buf) |
495 | { | 782 | { |
@@ -571,6 +858,20 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, | |||
571 | return ret; | 858 | return ret; |
572 | } | 859 | } |
573 | 860 | ||
861 | /** | ||
862 | * nilfs_ioctl_clean_segments - clean segments | ||
863 | * @inode: inode object | ||
864 | * @filp: file object | ||
865 | * @cmd: ioctl's request code | ||
866 | * @argp: pointer on argument from userspace | ||
867 | * | ||
868 | * Description: nilfs_ioctl_clean_segments() function makes garbage | ||
869 | * collection operation in the environment of requested parameters | ||
870 | * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by | ||
871 | * nilfs_cleanerd daemon. | ||
872 | * | ||
873 | * Return Value: On success, 0 is returned or error code, otherwise. | ||
874 | */ | ||
574 | static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, | 875 | static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, |
575 | unsigned int cmd, void __user *argp) | 876 | unsigned int cmd, void __user *argp) |
576 | { | 877 | { |
@@ -682,6 +983,33 @@ out: | |||
682 | return ret; | 983 | return ret; |
683 | } | 984 | } |
684 | 985 | ||
986 | /** | ||
987 | * nilfs_ioctl_sync - make a checkpoint | ||
988 | * @inode: inode object | ||
989 | * @filp: file object | ||
990 | * @cmd: ioctl's request code | ||
991 | * @argp: pointer on argument from userspace | ||
992 | * | ||
993 | * Description: nilfs_ioctl_sync() function constructs a logical segment | ||
994 | * for checkpointing. This function guarantees that all modified data | ||
995 | * and metadata are written out to the device when it successfully | ||
996 | * returned. | ||
997 | * | ||
998 | * Return Value: On success, 0 is retured. On errors, one of the following | ||
999 | * negative error code is returned. | ||
1000 | * | ||
1001 | * %-EROFS - Read only filesystem. | ||
1002 | * | ||
1003 | * %-EIO - I/O error | ||
1004 | * | ||
1005 | * %-ENOSPC - No space left on device (only in a panic state). | ||
1006 | * | ||
1007 | * %-ERESTARTSYS - Interrupted. | ||
1008 | * | ||
1009 | * %-ENOMEM - Insufficient memory available. | ||
1010 | * | ||
1011 | * %-EFAULT - Failure during execution of requested operation. | ||
1012 | */ | ||
685 | static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, | 1013 | static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, |
686 | unsigned int cmd, void __user *argp) | 1014 | unsigned int cmd, void __user *argp) |
687 | { | 1015 | { |
@@ -710,6 +1038,14 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, | |||
710 | return 0; | 1038 | return 0; |
711 | } | 1039 | } |
712 | 1040 | ||
1041 | /** | ||
1042 | * nilfs_ioctl_resize - resize NILFS2 volume | ||
1043 | * @inode: inode object | ||
1044 | * @filp: file object | ||
1045 | * @argp: pointer on argument from userspace | ||
1046 | * | ||
1047 | * Return Value: On success, 0 is returned or error code, otherwise. | ||
1048 | */ | ||
713 | static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, | 1049 | static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, |
714 | void __user *argp) | 1050 | void __user *argp) |
715 | { | 1051 | { |
@@ -735,6 +1071,17 @@ out: | |||
735 | return ret; | 1071 | return ret; |
736 | } | 1072 | } |
737 | 1073 | ||
1074 | /** | ||
1075 | * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated | ||
1076 | * @inode: inode object | ||
1077 | * @argp: pointer on argument from userspace | ||
1078 | * | ||
1079 | * Decription: nilfs_ioctl_set_alloc_range() function defines lower limit | ||
1080 | * of segments in bytes and upper limit of segments in bytes. | ||
1081 | * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. | ||
1082 | * | ||
1083 | * Return Value: On success, 0 is returned or error code, otherwise. | ||
1084 | */ | ||
738 | static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) | 1085 | static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) |
739 | { | 1086 | { |
740 | struct the_nilfs *nilfs = inode->i_sb->s_fs_info; | 1087 | struct the_nilfs *nilfs = inode->i_sb->s_fs_info; |
@@ -767,6 +1114,28 @@ out: | |||
767 | return ret; | 1114 | return ret; |
768 | } | 1115 | } |
769 | 1116 | ||
1117 | /** | ||
1118 | * nilfs_ioctl_get_info - wrapping function of get metadata info | ||
1119 | * @inode: inode object | ||
1120 | * @filp: file object | ||
1121 | * @cmd: ioctl's request code | ||
1122 | * @argp: pointer on argument from userspace | ||
1123 | * @membsz: size of an item in bytes | ||
1124 | * @dofunc: concrete function of getting metadata info | ||
1125 | * | ||
1126 | * Description: nilfs_ioctl_get_info() gets metadata info by means of | ||
1127 | * calling dofunc() function. | ||
1128 | * | ||
1129 | * Return Value: On success, 0 is returned and requested metadata info | ||
1130 | * is copied into userspace. On error, one of the following | ||
1131 | * negative error codes is returned. | ||
1132 | * | ||
1133 | * %-EINVAL - Invalid arguments from userspace. | ||
1134 | * | ||
1135 | * %-ENOMEM - Insufficient amount of memory available. | ||
1136 | * | ||
1137 | * %-EFAULT - Failure during execution of requested operation. | ||
1138 | */ | ||
770 | static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, | 1139 | static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, |
771 | unsigned int cmd, void __user *argp, | 1140 | unsigned int cmd, void __user *argp, |
772 | size_t membsz, | 1141 | size_t membsz, |
@@ -663,10 +663,11 @@ out: | |||
663 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); | 663 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); |
664 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | 664 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); |
665 | } | 665 | } |
666 | if (ret > 0) { | 666 | if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { |
667 | int err = file_update_time(filp); | 667 | int err = file_update_time(filp); |
668 | if (err) | 668 | if (err) |
669 | ret = err; | 669 | ret = err; |
670 | sb_end_write(file_inode(filp)->i_sb); | ||
670 | } | 671 | } |
671 | return ret; | 672 | return ret; |
672 | } | 673 | } |
diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 021e7c069b86..551e61ba15b6 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c | |||
@@ -149,8 +149,6 @@ posix_acl_valid(const struct posix_acl *acl) | |||
149 | { | 149 | { |
150 | const struct posix_acl_entry *pa, *pe; | 150 | const struct posix_acl_entry *pa, *pe; |
151 | int state = ACL_USER_OBJ; | 151 | int state = ACL_USER_OBJ; |
152 | kuid_t prev_uid = INVALID_UID; | ||
153 | kgid_t prev_gid = INVALID_GID; | ||
154 | int needs_mask = 0; | 152 | int needs_mask = 0; |
155 | 153 | ||
156 | FOREACH_ACL_ENTRY(pa, acl, pe) { | 154 | FOREACH_ACL_ENTRY(pa, acl, pe) { |
@@ -169,10 +167,6 @@ posix_acl_valid(const struct posix_acl *acl) | |||
169 | return -EINVAL; | 167 | return -EINVAL; |
170 | if (!uid_valid(pa->e_uid)) | 168 | if (!uid_valid(pa->e_uid)) |
171 | return -EINVAL; | 169 | return -EINVAL; |
172 | if (uid_valid(prev_uid) && | ||
173 | uid_lte(pa->e_uid, prev_uid)) | ||
174 | return -EINVAL; | ||
175 | prev_uid = pa->e_uid; | ||
176 | needs_mask = 1; | 170 | needs_mask = 1; |
177 | break; | 171 | break; |
178 | 172 | ||
@@ -188,10 +182,6 @@ posix_acl_valid(const struct posix_acl *acl) | |||
188 | return -EINVAL; | 182 | return -EINVAL; |
189 | if (!gid_valid(pa->e_gid)) | 183 | if (!gid_valid(pa->e_gid)) |
190 | return -EINVAL; | 184 | return -EINVAL; |
191 | if (gid_valid(prev_gid) && | ||
192 | gid_lte(pa->e_gid, prev_gid)) | ||
193 | return -EINVAL; | ||
194 | prev_gid = pa->e_gid; | ||
195 | needs_mask = 1; | 185 | needs_mask = 1; |
196 | break; | 186 | break; |
197 | 187 | ||
diff --git a/fs/proc/array.c b/fs/proc/array.c index 1bd2077187fd..656e401794de 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -140,24 +140,15 @@ static const char * const task_state_array[] = { | |||
140 | "t (tracing stop)", /* 8 */ | 140 | "t (tracing stop)", /* 8 */ |
141 | "Z (zombie)", /* 16 */ | 141 | "Z (zombie)", /* 16 */ |
142 | "X (dead)", /* 32 */ | 142 | "X (dead)", /* 32 */ |
143 | "x (dead)", /* 64 */ | ||
144 | "K (wakekill)", /* 128 */ | ||
145 | "W (waking)", /* 256 */ | ||
146 | "P (parked)", /* 512 */ | ||
147 | }; | 143 | }; |
148 | 144 | ||
149 | static inline const char *get_task_state(struct task_struct *tsk) | 145 | static inline const char *get_task_state(struct task_struct *tsk) |
150 | { | 146 | { |
151 | unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; | 147 | unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT; |
152 | const char * const *p = &task_state_array[0]; | ||
153 | 148 | ||
154 | BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); | 149 | BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1); |
155 | 150 | ||
156 | while (state) { | 151 | return task_state_array[fls(state)]; |
157 | p++; | ||
158 | state >>= 1; | ||
159 | } | ||
160 | return *p; | ||
161 | } | 152 | } |
162 | 153 | ||
163 | static inline void task_state(struct seq_file *m, struct pid_namespace *ns, | 154 | static inline void task_state(struct seq_file *m, struct pid_namespace *ns, |
@@ -453,8 +444,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
453 | min_flt += t->min_flt; | 444 | min_flt += t->min_flt; |
454 | maj_flt += t->maj_flt; | 445 | maj_flt += t->maj_flt; |
455 | gtime += task_gtime(t); | 446 | gtime += task_gtime(t); |
456 | t = next_thread(t); | 447 | } while_each_thread(task, t); |
457 | } while (t != task); | ||
458 | 448 | ||
459 | min_flt += sig->min_flt; | 449 | min_flt += sig->min_flt; |
460 | maj_flt += sig->maj_flt; | 450 | maj_flt += sig->maj_flt; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 03c8d747be48..51507065263b 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1658,13 +1658,18 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags) | |||
1658 | return 0; | 1658 | return 0; |
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | static inline bool proc_inode_is_dead(struct inode *inode) | ||
1662 | { | ||
1663 | return !proc_pid(inode)->tasks[PIDTYPE_PID].first; | ||
1664 | } | ||
1665 | |||
1661 | int pid_delete_dentry(const struct dentry *dentry) | 1666 | int pid_delete_dentry(const struct dentry *dentry) |
1662 | { | 1667 | { |
1663 | /* Is the task we represent dead? | 1668 | /* Is the task we represent dead? |
1664 | * If so, then don't put the dentry on the lru list, | 1669 | * If so, then don't put the dentry on the lru list, |
1665 | * kill it immediately. | 1670 | * kill it immediately. |
1666 | */ | 1671 | */ |
1667 | return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; | 1672 | return proc_inode_is_dead(dentry->d_inode); |
1668 | } | 1673 | } |
1669 | 1674 | ||
1670 | const struct dentry_operations pid_dentry_operations = | 1675 | const struct dentry_operations pid_dentry_operations = |
@@ -3092,34 +3097,42 @@ out_no_task: | |||
3092 | * In the case of a seek we start with the leader and walk nr | 3097 | * In the case of a seek we start with the leader and walk nr |
3093 | * threads past it. | 3098 | * threads past it. |
3094 | */ | 3099 | */ |
3095 | static struct task_struct *first_tid(struct task_struct *leader, | 3100 | static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos, |
3096 | int tid, int nr, struct pid_namespace *ns) | 3101 | struct pid_namespace *ns) |
3097 | { | 3102 | { |
3098 | struct task_struct *pos; | 3103 | struct task_struct *pos, *task; |
3104 | unsigned long nr = f_pos; | ||
3105 | |||
3106 | if (nr != f_pos) /* 32bit overflow? */ | ||
3107 | return NULL; | ||
3099 | 3108 | ||
3100 | rcu_read_lock(); | 3109 | rcu_read_lock(); |
3101 | /* Attempt to start with the pid of a thread */ | 3110 | task = pid_task(pid, PIDTYPE_PID); |
3102 | if (tid && (nr > 0)) { | 3111 | if (!task) |
3112 | goto fail; | ||
3113 | |||
3114 | /* Attempt to start with the tid of a thread */ | ||
3115 | if (tid && nr) { | ||
3103 | pos = find_task_by_pid_ns(tid, ns); | 3116 | pos = find_task_by_pid_ns(tid, ns); |
3104 | if (pos && (pos->group_leader == leader)) | 3117 | if (pos && same_thread_group(pos, task)) |
3105 | goto found; | 3118 | goto found; |
3106 | } | 3119 | } |
3107 | 3120 | ||
3108 | /* If nr exceeds the number of threads there is nothing todo */ | 3121 | /* If nr exceeds the number of threads there is nothing todo */ |
3109 | pos = NULL; | 3122 | if (nr >= get_nr_threads(task)) |
3110 | if (nr && nr >= get_nr_threads(leader)) | 3123 | goto fail; |
3111 | goto out; | ||
3112 | 3124 | ||
3113 | /* If we haven't found our starting place yet start | 3125 | /* If we haven't found our starting place yet start |
3114 | * with the leader and walk nr threads forward. | 3126 | * with the leader and walk nr threads forward. |
3115 | */ | 3127 | */ |
3116 | for (pos = leader; nr > 0; --nr) { | 3128 | pos = task = task->group_leader; |
3117 | pos = next_thread(pos); | 3129 | do { |
3118 | if (pos == leader) { | 3130 | if (!nr--) |
3119 | pos = NULL; | 3131 | goto found; |
3120 | goto out; | 3132 | } while_each_thread(task, pos); |
3121 | } | 3133 | fail: |
3122 | } | 3134 | pos = NULL; |
3135 | goto out; | ||
3123 | found: | 3136 | found: |
3124 | get_task_struct(pos); | 3137 | get_task_struct(pos); |
3125 | out: | 3138 | out: |
@@ -3152,25 +3165,16 @@ static struct task_struct *next_tid(struct task_struct *start) | |||
3152 | /* for the /proc/TGID/task/ directories */ | 3165 | /* for the /proc/TGID/task/ directories */ |
3153 | static int proc_task_readdir(struct file *file, struct dir_context *ctx) | 3166 | static int proc_task_readdir(struct file *file, struct dir_context *ctx) |
3154 | { | 3167 | { |
3155 | struct task_struct *leader = NULL; | 3168 | struct inode *inode = file_inode(file); |
3156 | struct task_struct *task = get_proc_task(file_inode(file)); | 3169 | struct task_struct *task; |
3157 | struct pid_namespace *ns; | 3170 | struct pid_namespace *ns; |
3158 | int tid; | 3171 | int tid; |
3159 | 3172 | ||
3160 | if (!task) | 3173 | if (proc_inode_is_dead(inode)) |
3161 | return -ENOENT; | ||
3162 | rcu_read_lock(); | ||
3163 | if (pid_alive(task)) { | ||
3164 | leader = task->group_leader; | ||
3165 | get_task_struct(leader); | ||
3166 | } | ||
3167 | rcu_read_unlock(); | ||
3168 | put_task_struct(task); | ||
3169 | if (!leader) | ||
3170 | return -ENOENT; | 3174 | return -ENOENT; |
3171 | 3175 | ||
3172 | if (!dir_emit_dots(file, ctx)) | 3176 | if (!dir_emit_dots(file, ctx)) |
3173 | goto out; | 3177 | return 0; |
3174 | 3178 | ||
3175 | /* f_version caches the tgid value that the last readdir call couldn't | 3179 | /* f_version caches the tgid value that the last readdir call couldn't |
3176 | * return. lseek aka telldir automagically resets f_version to 0. | 3180 | * return. lseek aka telldir automagically resets f_version to 0. |
@@ -3178,7 +3182,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) | |||
3178 | ns = file->f_dentry->d_sb->s_fs_info; | 3182 | ns = file->f_dentry->d_sb->s_fs_info; |
3179 | tid = (int)file->f_version; | 3183 | tid = (int)file->f_version; |
3180 | file->f_version = 0; | 3184 | file->f_version = 0; |
3181 | for (task = first_tid(leader, tid, ctx->pos - 2, ns); | 3185 | for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); |
3182 | task; | 3186 | task; |
3183 | task = next_tid(task), ctx->pos++) { | 3187 | task = next_tid(task), ctx->pos++) { |
3184 | char name[PROC_NUMBUF]; | 3188 | char name[PROC_NUMBUF]; |
@@ -3194,8 +3198,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) | |||
3194 | break; | 3198 | break; |
3195 | } | 3199 | } |
3196 | } | 3200 | } |
3197 | out: | 3201 | |
3198 | put_task_struct(leader); | ||
3199 | return 0; | 3202 | return 0; |
3200 | } | 3203 | } |
3201 | 3204 | ||
diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index 82676e3fcd1d..cbd82dff7e81 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c | |||
@@ -26,4 +26,4 @@ static int __init proc_cmdline_init(void) | |||
26 | proc_create("cmdline", 0, NULL, &cmdline_proc_fops); | 26 | proc_create("cmdline", 0, NULL, &cmdline_proc_fops); |
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
29 | module_init(proc_cmdline_init); | 29 | fs_initcall(proc_cmdline_init); |
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c index 51942d5abcec..290ba85cb900 100644 --- a/fs/proc/consoles.c +++ b/fs/proc/consoles.c | |||
@@ -109,4 +109,4 @@ static int __init proc_consoles_init(void) | |||
109 | proc_create("consoles", 0, NULL, &proc_consoles_operations); | 109 | proc_create("consoles", 0, NULL, &proc_consoles_operations); |
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | module_init(proc_consoles_init); | 112 | fs_initcall(proc_consoles_init); |
diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c index 5a1e539a234b..06f4d31e0396 100644 --- a/fs/proc/cpuinfo.c +++ b/fs/proc/cpuinfo.c | |||
@@ -21,4 +21,4 @@ static int __init proc_cpuinfo_init(void) | |||
21 | proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); | 21 | proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); |
22 | return 0; | 22 | return 0; |
23 | } | 23 | } |
24 | module_init(proc_cpuinfo_init); | 24 | fs_initcall(proc_cpuinfo_init); |
diff --git a/fs/proc/devices.c b/fs/proc/devices.c index b14347167c35..50493edc30e5 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c | |||
@@ -67,4 +67,4 @@ static int __init proc_devices_init(void) | |||
67 | proc_create("devices", 0, NULL, &proc_devinfo_operations); | 67 | proc_create("devices", 0, NULL, &proc_devinfo_operations); |
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | module_init(proc_devices_init); | 70 | fs_initcall(proc_devices_init); |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index cca93b6fb9a9..b7f268eb5f45 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -49,8 +49,7 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) | |||
49 | setattr_copy(inode, iattr); | 49 | setattr_copy(inode, iattr); |
50 | mark_inode_dirty(inode); | 50 | mark_inode_dirty(inode); |
51 | 51 | ||
52 | de->uid = inode->i_uid; | 52 | proc_set_user(de, inode->i_uid, inode->i_gid); |
53 | de->gid = inode->i_gid; | ||
54 | de->mode = inode->i_mode; | 53 | de->mode = inode->i_mode; |
55 | return 0; | 54 | return 0; |
56 | } | 55 | } |
diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c index 05029c0e2f24..a352d5703b41 100644 --- a/fs/proc/interrupts.c +++ b/fs/proc/interrupts.c | |||
@@ -50,4 +50,4 @@ static int __init proc_interrupts_init(void) | |||
50 | proc_create("interrupts", 0, NULL, &proc_interrupts_operations); | 50 | proc_create("interrupts", 0, NULL, &proc_interrupts_operations); |
51 | return 0; | 51 | return 0; |
52 | } | 52 | } |
53 | module_init(proc_interrupts_init); | 53 | fs_initcall(proc_interrupts_init); |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 5ed0e52d6aa0..39e6ef32f0bd 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -639,4 +639,4 @@ static int __init proc_kcore_init(void) | |||
639 | 639 | ||
640 | return 0; | 640 | return 0; |
641 | } | 641 | } |
642 | module_init(proc_kcore_init); | 642 | fs_initcall(proc_kcore_init); |
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index bdfabdaefdce..05f8dcdb086e 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c | |||
@@ -61,4 +61,4 @@ static int __init proc_kmsg_init(void) | |||
61 | proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); | 61 | proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); |
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
64 | module_init(proc_kmsg_init); | 64 | fs_initcall(proc_kmsg_init); |
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 1afa4dd4cae2..aec66e6c2060 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c | |||
@@ -42,4 +42,4 @@ static int __init proc_loadavg_init(void) | |||
42 | proc_create("loadavg", 0, NULL, &loadavg_proc_fops); | 42 | proc_create("loadavg", 0, NULL, &loadavg_proc_fops); |
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | module_init(proc_loadavg_init); | 45 | fs_initcall(proc_loadavg_init); |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 24270eceddbf..136e548d9567 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -220,4 +220,4 @@ static int __init proc_meminfo_init(void) | |||
220 | proc_create("meminfo", 0, NULL, &meminfo_proc_fops); | 220 | proc_create("meminfo", 0, NULL, &meminfo_proc_fops); |
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | module_init(proc_meminfo_init); | 223 | fs_initcall(proc_meminfo_init); |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 5f9bc8a746c9..d4a35746cab9 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -131,4 +131,4 @@ static int __init proc_nommu_init(void) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | module_init(proc_nommu_init); | 134 | fs_initcall(proc_nommu_init); |
diff --git a/fs/proc/page.c b/fs/proc/page.c index b8730d9ebaee..02174a610315 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -118,10 +118,12 @@ u64 stable_page_flags(struct page *page) | |||
118 | /* | 118 | /* |
119 | * PageTransCompound can be true for non-huge compound pages (slab | 119 | * PageTransCompound can be true for non-huge compound pages (slab |
120 | * pages or pages allocated by drivers with __GFP_COMP) because it | 120 | * pages or pages allocated by drivers with __GFP_COMP) because it |
121 | * just checks PG_head/PG_tail, so we need to check PageLRU to make | 121 | * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon |
122 | * sure a given page is a thp, not a non-huge compound page. | 122 | * to make sure a given page is a thp, not a non-huge compound page. |
123 | */ | 123 | */ |
124 | else if (PageTransCompound(page) && PageLRU(compound_trans_head(page))) | 124 | else if (PageTransCompound(page) && |
125 | (PageLRU(compound_trans_head(page)) || | ||
126 | PageAnon(compound_trans_head(page)))) | ||
125 | u |= 1 << KPF_THP; | 127 | u |= 1 << KPF_THP; |
126 | 128 | ||
127 | /* | 129 | /* |
@@ -217,4 +219,4 @@ static int __init proc_page_init(void) | |||
217 | proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); | 219 | proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); |
218 | return 0; | 220 | return 0; |
219 | } | 221 | } |
220 | module_init(proc_page_init); | 222 | fs_initcall(proc_page_init); |
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index 70779b2fc209..c82dd5147845 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c | |||
@@ -74,9 +74,9 @@ __proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp, | |||
74 | return NULL; | 74 | return NULL; |
75 | 75 | ||
76 | if (!strncmp(name, "security-", 9)) | 76 | if (!strncmp(name, "security-", 9)) |
77 | ent->size = 0; /* don't leak number of password chars */ | 77 | proc_set_size(ent, 0); /* don't leak number of password chars */ |
78 | else | 78 | else |
79 | ent->size = pp->length; | 79 | proc_set_size(ent, pp->length); |
80 | 80 | ||
81 | return ent; | 81 | return ent; |
82 | } | 82 | } |
@@ -232,6 +232,7 @@ void __init proc_device_tree_init(void) | |||
232 | return; | 232 | return; |
233 | root = of_find_node_by_path("/"); | 233 | root = of_find_node_by_path("/"); |
234 | if (root == NULL) { | 234 | if (root == NULL) { |
235 | remove_proc_entry("device-tree", NULL); | ||
235 | pr_debug("/proc/device-tree: can't find root\n"); | 236 | pr_debug("/proc/device-tree: can't find root\n"); |
236 | return; | 237 | return; |
237 | } | 238 | } |
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c index 62604be9f58d..ad8a77f94beb 100644 --- a/fs/proc/softirqs.c +++ b/fs/proc/softirqs.c | |||
@@ -41,4 +41,4 @@ static int __init proc_softirqs_init(void) | |||
41 | proc_create("softirqs", 0, NULL, &proc_softirqs_operations); | 41 | proc_create("softirqs", 0, NULL, &proc_softirqs_operations); |
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | module_init(proc_softirqs_init); | 44 | fs_initcall(proc_softirqs_init); |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 1cf86c0e8689..6f599c62f0cc 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -221,4 +221,4 @@ static int __init proc_stat_init(void) | |||
221 | proc_create("stat", 0, NULL, &proc_stat_operations); | 221 | proc_create("stat", 0, NULL, &proc_stat_operations); |
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
224 | module_init(proc_stat_init); | 224 | fs_initcall(proc_stat_init); |
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 061894625903..7141b8d0ca9e 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c | |||
@@ -49,4 +49,4 @@ static int __init proc_uptime_init(void) | |||
49 | proc_create("uptime", 0, NULL, &uptime_proc_fops); | 49 | proc_create("uptime", 0, NULL, &uptime_proc_fops); |
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | module_init(proc_uptime_init); | 52 | fs_initcall(proc_uptime_init); |
diff --git a/fs/proc/version.c b/fs/proc/version.c index 76817a60678c..d2154eb6d78f 100644 --- a/fs/proc/version.c +++ b/fs/proc/version.c | |||
@@ -31,4 +31,4 @@ static int __init proc_version_init(void) | |||
31 | proc_create("version", 0, NULL, &version_proc_fops); | 31 | proc_create("version", 0, NULL, &version_proc_fops); |
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
34 | module_init(proc_version_init); | 34 | fs_initcall(proc_version_init); |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 9100d6959886..2ca7ba047f04 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -1082,7 +1082,7 @@ static int __init vmcore_init(void) | |||
1082 | proc_vmcore->size = vmcore_size; | 1082 | proc_vmcore->size = vmcore_size; |
1083 | return 0; | 1083 | return 0; |
1084 | } | 1084 | } |
1085 | module_init(vmcore_init) | 1085 | fs_initcall(vmcore_init); |
1086 | 1086 | ||
1087 | /* Cleanup function for vmcore module. */ | 1087 | /* Cleanup function for vmcore module. */ |
1088 | void vmcore_cleanup(void) | 1088 | void vmcore_cleanup(void) |
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 439406e081af..7be26f03a3f5 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c | |||
@@ -234,17 +234,12 @@ static int mounts_open_common(struct inode *inode, struct file *file, | |||
234 | 234 | ||
235 | rcu_read_lock(); | 235 | rcu_read_lock(); |
236 | nsp = task_nsproxy(task); | 236 | nsp = task_nsproxy(task); |
237 | if (!nsp) { | 237 | if (!nsp || !nsp->mnt_ns) { |
238 | rcu_read_unlock(); | 238 | rcu_read_unlock(); |
239 | put_task_struct(task); | 239 | put_task_struct(task); |
240 | goto err; | 240 | goto err; |
241 | } | 241 | } |
242 | ns = nsp->mnt_ns; | 242 | ns = nsp->mnt_ns; |
243 | if (!ns) { | ||
244 | rcu_read_unlock(); | ||
245 | put_task_struct(task); | ||
246 | goto err; | ||
247 | } | ||
248 | get_mnt_ns(ns); | 243 | get_mnt_ns(ns); |
249 | rcu_read_unlock(); | 244 | rcu_read_unlock(); |
250 | task_lock(task); | 245 | task_lock(task); |
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 4884ac5ae9be..1e56a4e8cf7c 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c | |||
@@ -30,13 +30,6 @@ | |||
30 | 30 | ||
31 | #include "internal.h" | 31 | #include "internal.h" |
32 | 32 | ||
33 | const struct address_space_operations ramfs_aops = { | ||
34 | .readpage = simple_readpage, | ||
35 | .write_begin = simple_write_begin, | ||
36 | .write_end = simple_write_end, | ||
37 | .set_page_dirty = __set_page_dirty_no_writeback, | ||
38 | }; | ||
39 | |||
40 | const struct file_operations ramfs_file_operations = { | 33 | const struct file_operations ramfs_file_operations = { |
41 | .read = do_sync_read, | 34 | .read = do_sync_read, |
42 | .aio_read = generic_file_aio_read, | 35 | .aio_read = generic_file_aio_read, |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 8d5b438cc188..0b3d8e4cb2fa 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -27,13 +27,12 @@ | |||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | static int ramfs_nommu_setattr(struct dentry *, struct iattr *); | 29 | static int ramfs_nommu_setattr(struct dentry *, struct iattr *); |
30 | 30 | static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |
31 | const struct address_space_operations ramfs_aops = { | 31 | unsigned long addr, |
32 | .readpage = simple_readpage, | 32 | unsigned long len, |
33 | .write_begin = simple_write_begin, | 33 | unsigned long pgoff, |
34 | .write_end = simple_write_end, | 34 | unsigned long flags); |
35 | .set_page_dirty = __set_page_dirty_no_writeback, | 35 | static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); |
36 | }; | ||
37 | 36 | ||
38 | const struct file_operations ramfs_file_operations = { | 37 | const struct file_operations ramfs_file_operations = { |
39 | .mmap = ramfs_nommu_mmap, | 38 | .mmap = ramfs_nommu_mmap, |
@@ -197,7 +196,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) | |||
197 | * - the pages to be mapped must exist | 196 | * - the pages to be mapped must exist |
198 | * - the pages be physically contiguous in sequence | 197 | * - the pages be physically contiguous in sequence |
199 | */ | 198 | */ |
200 | unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | 199 | static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, |
201 | unsigned long addr, unsigned long len, | 200 | unsigned long addr, unsigned long len, |
202 | unsigned long pgoff, unsigned long flags) | 201 | unsigned long pgoff, unsigned long flags) |
203 | { | 202 | { |
@@ -256,7 +255,7 @@ out: | |||
256 | /* | 255 | /* |
257 | * set up a mapping for shared memory segments | 256 | * set up a mapping for shared memory segments |
258 | */ | 257 | */ |
259 | int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) | 258 | static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) |
260 | { | 259 | { |
261 | if (!(vma->vm_flags & VM_SHARED)) | 260 | if (!(vma->vm_flags & VM_SHARED)) |
262 | return -ENOSYS; | 261 | return -ENOSYS; |
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 6a3e2c420180..d365b1c4eb3c 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c | |||
@@ -43,6 +43,13 @@ | |||
43 | static const struct super_operations ramfs_ops; | 43 | static const struct super_operations ramfs_ops; |
44 | static const struct inode_operations ramfs_dir_inode_operations; | 44 | static const struct inode_operations ramfs_dir_inode_operations; |
45 | 45 | ||
46 | static const struct address_space_operations ramfs_aops = { | ||
47 | .readpage = simple_readpage, | ||
48 | .write_begin = simple_write_begin, | ||
49 | .write_end = simple_write_end, | ||
50 | .set_page_dirty = __set_page_dirty_no_writeback, | ||
51 | }; | ||
52 | |||
46 | static struct backing_dev_info ramfs_backing_dev_info = { | 53 | static struct backing_dev_info ramfs_backing_dev_info = { |
47 | .name = "ramfs", | 54 | .name = "ramfs", |
48 | .ra_pages = 0, /* No readahead */ | 55 | .ra_pages = 0, /* No readahead */ |
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h index 6b330639b51d..a9d8ae88fa15 100644 --- a/fs/ramfs/internal.h +++ b/fs/ramfs/internal.h | |||
@@ -10,5 +10,4 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | 12 | ||
13 | extern const struct address_space_operations ramfs_aops; | ||
14 | extern const struct inode_operations ramfs_file_inode_operations; | 13 | extern const struct inode_operations ramfs_file_inode_operations; |
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index f8adaee537c2..dfb617b2bad2 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h | |||
@@ -1958,8 +1958,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} | |||
1958 | #define MAX_US_INT 0xffff | 1958 | #define MAX_US_INT 0xffff |
1959 | 1959 | ||
1960 | // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset | 1960 | // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset |
1961 | #define U32_MAX (~(__u32)0) | ||
1962 | |||
1963 | static inline loff_t max_reiserfs_offset(struct inode *inode) | 1961 | static inline loff_t max_reiserfs_offset(struct inode *inode) |
1964 | { | 1962 | { |
1965 | if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5) | 1963 | if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5) |
diff --git a/fs/romfs/super.c b/fs/romfs/super.c index ff1d3d42e72a..d8418782862b 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c | |||
@@ -533,16 +533,14 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) | |||
533 | 533 | ||
534 | root = romfs_iget(sb, pos); | 534 | root = romfs_iget(sb, pos); |
535 | if (IS_ERR(root)) | 535 | if (IS_ERR(root)) |
536 | goto error; | 536 | return PTR_ERR(root); |
537 | 537 | ||
538 | sb->s_root = d_make_root(root); | 538 | sb->s_root = d_make_root(root); |
539 | if (!sb->s_root) | 539 | if (!sb->s_root) |
540 | goto error; | 540 | return -ENOMEM; |
541 | 541 | ||
542 | return 0; | 542 | return 0; |
543 | 543 | ||
544 | error: | ||
545 | return -EINVAL; | ||
546 | error_rsb_inval: | 544 | error_rsb_inval: |
547 | ret = -EINVAL; | 545 | ret = -EINVAL; |
548 | error_rsb: | 546 | error_rsb: |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index cc1febd8fadf..5157b866a853 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -2118,26 +2118,10 @@ out_free: | |||
2118 | */ | 2118 | */ |
2119 | static void free_inodes(struct fsck_data *fsckd) | 2119 | static void free_inodes(struct fsck_data *fsckd) |
2120 | { | 2120 | { |
2121 | struct rb_node *this = fsckd->inodes.rb_node; | 2121 | struct fsck_inode *fscki, *n; |
2122 | struct fsck_inode *fscki; | ||
2123 | 2122 | ||
2124 | while (this) { | 2123 | rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb) |
2125 | if (this->rb_left) | 2124 | kfree(fscki); |
2126 | this = this->rb_left; | ||
2127 | else if (this->rb_right) | ||
2128 | this = this->rb_right; | ||
2129 | else { | ||
2130 | fscki = rb_entry(this, struct fsck_inode, rb); | ||
2131 | this = rb_parent(this); | ||
2132 | if (this) { | ||
2133 | if (this->rb_left == &fscki->rb) | ||
2134 | this->rb_left = NULL; | ||
2135 | else | ||
2136 | this->rb_right = NULL; | ||
2137 | } | ||
2138 | kfree(fscki); | ||
2139 | } | ||
2140 | } | ||
2141 | } | 2125 | } |
2142 | 2126 | ||
2143 | /** | 2127 | /** |
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index 36bd4efd0819..a902c5919e42 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c | |||
@@ -574,27 +574,10 @@ static int done_already(struct rb_root *done_tree, int lnum) | |||
574 | */ | 574 | */ |
575 | static void destroy_done_tree(struct rb_root *done_tree) | 575 | static void destroy_done_tree(struct rb_root *done_tree) |
576 | { | 576 | { |
577 | struct rb_node *this = done_tree->rb_node; | 577 | struct done_ref *dr, *n; |
578 | struct done_ref *dr; | ||
579 | 578 | ||
580 | while (this) { | 579 | rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb) |
581 | if (this->rb_left) { | ||
582 | this = this->rb_left; | ||
583 | continue; | ||
584 | } else if (this->rb_right) { | ||
585 | this = this->rb_right; | ||
586 | continue; | ||
587 | } | ||
588 | dr = rb_entry(this, struct done_ref, rb); | ||
589 | this = rb_parent(this); | ||
590 | if (this) { | ||
591 | if (this->rb_left == &dr->rb) | ||
592 | this->rb_left = NULL; | ||
593 | else | ||
594 | this->rb_right = NULL; | ||
595 | } | ||
596 | kfree(dr); | 580 | kfree(dr); |
597 | } | ||
598 | } | 581 | } |
599 | 582 | ||
600 | /** | 583 | /** |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index ba32da3fe08a..f1c3e5a1b315 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
@@ -815,27 +815,10 @@ static int dbg_find_check_orphan(struct rb_root *root, ino_t inum) | |||
815 | 815 | ||
816 | static void dbg_free_check_tree(struct rb_root *root) | 816 | static void dbg_free_check_tree(struct rb_root *root) |
817 | { | 817 | { |
818 | struct rb_node *this = root->rb_node; | 818 | struct check_orphan *o, *n; |
819 | struct check_orphan *o; | ||
820 | 819 | ||
821 | while (this) { | 820 | rbtree_postorder_for_each_entry_safe(o, n, root, rb) |
822 | if (this->rb_left) { | ||
823 | this = this->rb_left; | ||
824 | continue; | ||
825 | } else if (this->rb_right) { | ||
826 | this = this->rb_right; | ||
827 | continue; | ||
828 | } | ||
829 | o = rb_entry(this, struct check_orphan, rb); | ||
830 | this = rb_parent(this); | ||
831 | if (this) { | ||
832 | if (this->rb_left == &o->rb) | ||
833 | this->rb_left = NULL; | ||
834 | else | ||
835 | this->rb_right = NULL; | ||
836 | } | ||
837 | kfree(o); | 821 | kfree(o); |
838 | } | ||
839 | } | 822 | } |
840 | 823 | ||
841 | static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, | 824 | static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, |
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 065096e36ed9..c14adb2f420c 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -1335,29 +1335,14 @@ static void remove_ino(struct ubifs_info *c, ino_t inum) | |||
1335 | */ | 1335 | */ |
1336 | void ubifs_destroy_size_tree(struct ubifs_info *c) | 1336 | void ubifs_destroy_size_tree(struct ubifs_info *c) |
1337 | { | 1337 | { |
1338 | struct rb_node *this = c->size_tree.rb_node; | 1338 | struct size_entry *e, *n; |
1339 | struct size_entry *e; | ||
1340 | 1339 | ||
1341 | while (this) { | 1340 | rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { |
1342 | if (this->rb_left) { | ||
1343 | this = this->rb_left; | ||
1344 | continue; | ||
1345 | } else if (this->rb_right) { | ||
1346 | this = this->rb_right; | ||
1347 | continue; | ||
1348 | } | ||
1349 | e = rb_entry(this, struct size_entry, rb); | ||
1350 | if (e->inode) | 1341 | if (e->inode) |
1351 | iput(e->inode); | 1342 | iput(e->inode); |
1352 | this = rb_parent(this); | ||
1353 | if (this) { | ||
1354 | if (this->rb_left == &e->rb) | ||
1355 | this->rb_left = NULL; | ||
1356 | else | ||
1357 | this->rb_right = NULL; | ||
1358 | } | ||
1359 | kfree(e); | 1343 | kfree(e); |
1360 | } | 1344 | } |
1345 | |||
1361 | c->size_tree = RB_ROOT; | 1346 | c->size_tree = RB_ROOT; |
1362 | } | 1347 | } |
1363 | 1348 | ||
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index f69daa514a57..5ded8490c0c6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -873,26 +873,10 @@ static void free_orphans(struct ubifs_info *c) | |||
873 | */ | 873 | */ |
874 | static void free_buds(struct ubifs_info *c) | 874 | static void free_buds(struct ubifs_info *c) |
875 | { | 875 | { |
876 | struct rb_node *this = c->buds.rb_node; | 876 | struct ubifs_bud *bud, *n; |
877 | struct ubifs_bud *bud; | 877 | |
878 | 878 | rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) | |
879 | while (this) { | 879 | kfree(bud); |
880 | if (this->rb_left) | ||
881 | this = this->rb_left; | ||
882 | else if (this->rb_right) | ||
883 | this = this->rb_right; | ||
884 | else { | ||
885 | bud = rb_entry(this, struct ubifs_bud, rb); | ||
886 | this = rb_parent(this); | ||
887 | if (this) { | ||
888 | if (this->rb_left == &bud->rb) | ||
889 | this->rb_left = NULL; | ||
890 | else | ||
891 | this->rb_right = NULL; | ||
892 | } | ||
893 | kfree(bud); | ||
894 | } | ||
895 | } | ||
896 | } | 880 | } |
897 | 881 | ||
898 | /** | 882 | /** |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 349f31a30f40..9083bc7ed4ae 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -178,27 +178,11 @@ static int ins_clr_old_idx_znode(struct ubifs_info *c, | |||
178 | */ | 178 | */ |
179 | void destroy_old_idx(struct ubifs_info *c) | 179 | void destroy_old_idx(struct ubifs_info *c) |
180 | { | 180 | { |
181 | struct rb_node *this = c->old_idx.rb_node; | 181 | struct ubifs_old_idx *old_idx, *n; |
182 | struct ubifs_old_idx *old_idx; | ||
183 | 182 | ||
184 | while (this) { | 183 | rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb) |
185 | if (this->rb_left) { | ||
186 | this = this->rb_left; | ||
187 | continue; | ||
188 | } else if (this->rb_right) { | ||
189 | this = this->rb_right; | ||
190 | continue; | ||
191 | } | ||
192 | old_idx = rb_entry(this, struct ubifs_old_idx, rb); | ||
193 | this = rb_parent(this); | ||
194 | if (this) { | ||
195 | if (this->rb_left == &old_idx->rb) | ||
196 | this->rb_left = NULL; | ||
197 | else | ||
198 | this->rb_right = NULL; | ||
199 | } | ||
200 | kfree(old_idx); | 184 | kfree(old_idx); |
201 | } | 185 | |
202 | c->old_idx = RB_ROOT; | 186 | c->old_idx = RB_ROOT; |
203 | } | 187 | } |
204 | 188 | ||
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 000000000000..5a64ca4621f3 --- /dev/null +++ b/include/asm-generic/fixmap.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 | ||
12 | * Break out common bits to asm-generic by Mark Salter, November 2013 | ||
13 | */ | ||
14 | |||
15 | #ifndef __ASM_GENERIC_FIXMAP_H | ||
16 | #define __ASM_GENERIC_FIXMAP_H | ||
17 | |||
18 | #include <linux/bug.h> | ||
19 | |||
20 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
21 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | /* | ||
25 | * 'index to address' translation. If anyone tries to use the idx | ||
26 | * directly without translation, we catch the bug with a NULL-deference | ||
27 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
28 | */ | ||
29 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
30 | { | ||
31 | BUILD_BUG_ON(idx >= __end_of_fixed_addresses); | ||
32 | return __fix_to_virt(idx); | ||
33 | } | ||
34 | |||
35 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
36 | { | ||
37 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
38 | return __virt_to_fix(vaddr); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Provide some reasonable defaults for page flags. | ||
43 | * Not all architectures use all of these different types and some | ||
44 | * architectures use different names. | ||
45 | */ | ||
46 | #ifndef FIXMAP_PAGE_NORMAL | ||
47 | #define FIXMAP_PAGE_NORMAL PAGE_KERNEL | ||
48 | #endif | ||
49 | #ifndef FIXMAP_PAGE_NOCACHE | ||
50 | #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE | ||
51 | #endif | ||
52 | #ifndef FIXMAP_PAGE_IO | ||
53 | #define FIXMAP_PAGE_IO PAGE_KERNEL_IO | ||
54 | #endif | ||
55 | #ifndef FIXMAP_PAGE_CLEAR | ||
56 | #define FIXMAP_PAGE_CLEAR __pgprot(0) | ||
57 | #endif | ||
58 | |||
59 | #ifndef set_fixmap | ||
60 | #define set_fixmap(idx, phys) \ | ||
61 | __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) | ||
62 | #endif | ||
63 | |||
64 | #ifndef clear_fixmap | ||
65 | #define clear_fixmap(idx) \ | ||
66 | __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) | ||
67 | #endif | ||
68 | |||
69 | /* Return a pointer with offset calculated */ | ||
70 | #define __set_fixmap_offset(idx, phys, flags) \ | ||
71 | ({ \ | ||
72 | unsigned long addr; \ | ||
73 | __set_fixmap(idx, phys, flags); \ | ||
74 | addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ | ||
75 | addr; \ | ||
76 | }) | ||
77 | |||
78 | #define set_fixmap_offset(idx, phys) \ | ||
79 | __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) | ||
80 | |||
81 | /* | ||
82 | * Some hardware wants to get fixmapped without caching. | ||
83 | */ | ||
84 | #define set_fixmap_nocache(idx, phys) \ | ||
85 | __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) | ||
86 | |||
87 | #define set_fixmap_offset_nocache(idx, phys) \ | ||
88 | __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) | ||
89 | |||
90 | /* | ||
91 | * Some fixmaps are for IO | ||
92 | */ | ||
93 | #define set_fixmap_io(idx, phys) \ | ||
94 | __set_fixmap(idx, phys, FIXMAP_PAGE_IO) | ||
95 | |||
96 | #endif /* __ASSEMBLY__ */ | ||
97 | #endif /* __ASM_GENERIC_FIXMAP_H */ | ||
diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h deleted file mode 100644 index 27d4ec0dfce0..000000000000 --- a/include/asm-generic/int-l64.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* | ||
2 | * asm-generic/int-l64.h | ||
3 | * | ||
4 | * Integer declarations for architectures which use "long" | ||
5 | * for 64-bit types. | ||
6 | */ | ||
7 | #ifndef _ASM_GENERIC_INT_L64_H | ||
8 | #define _ASM_GENERIC_INT_L64_H | ||
9 | |||
10 | #include <uapi/asm-generic/int-l64.h> | ||
11 | |||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | typedef signed char s8; | ||
16 | typedef unsigned char u8; | ||
17 | |||
18 | typedef signed short s16; | ||
19 | typedef unsigned short u16; | ||
20 | |||
21 | typedef signed int s32; | ||
22 | typedef unsigned int u32; | ||
23 | |||
24 | typedef signed long s64; | ||
25 | typedef unsigned long u64; | ||
26 | |||
27 | #define S8_C(x) x | ||
28 | #define U8_C(x) x ## U | ||
29 | #define S16_C(x) x | ||
30 | #define U16_C(x) x ## U | ||
31 | #define S32_C(x) x | ||
32 | #define U32_C(x) x ## U | ||
33 | #define S64_C(x) x ## L | ||
34 | #define U64_C(x) x ## UL | ||
35 | |||
36 | #else /* __ASSEMBLY__ */ | ||
37 | |||
38 | #define S8_C(x) x | ||
39 | #define U8_C(x) x | ||
40 | #define S16_C(x) x | ||
41 | #define U16_C(x) x | ||
42 | #define S32_C(x) x | ||
43 | #define U32_C(x) x | ||
44 | #define S64_C(x) x | ||
45 | #define U64_C(x) x | ||
46 | |||
47 | #endif /* __ASSEMBLY__ */ | ||
48 | |||
49 | #endif /* _ASM_GENERIC_INT_L64_H */ | ||
diff --git a/include/linux/cache.h b/include/linux/cache.h index 4c570653ab84..17e7e82d2aa7 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h | |||
@@ -1,11 +1,11 @@ | |||
1 | #ifndef __LINUX_CACHE_H | 1 | #ifndef __LINUX_CACHE_H |
2 | #define __LINUX_CACHE_H | 2 | #define __LINUX_CACHE_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <uapi/linux/kernel.h> |
5 | #include <asm/cache.h> | 5 | #include <asm/cache.h> |
6 | 6 | ||
7 | #ifndef L1_CACHE_ALIGN | 7 | #ifndef L1_CACHE_ALIGN |
8 | #define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES) | 8 | #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | #ifndef SMP_CACHE_BYTES | 11 | #ifndef SMP_CACHE_BYTES |
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 0442c3d800f0..a6ef9cc267ec 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h | |||
@@ -8,23 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/ceph/types.h> | 9 | #include <linux/ceph/types.h> |
10 | 10 | ||
11 | /* This seemed to be the easiest place to define these */ | ||
12 | |||
13 | #define U8_MAX ((u8)(~0U)) | ||
14 | #define U16_MAX ((u16)(~0U)) | ||
15 | #define U32_MAX ((u32)(~0U)) | ||
16 | #define U64_MAX ((u64)(~0ULL)) | ||
17 | |||
18 | #define S8_MAX ((s8)(U8_MAX >> 1)) | ||
19 | #define S16_MAX ((s16)(U16_MAX >> 1)) | ||
20 | #define S32_MAX ((s32)(U32_MAX >> 1)) | ||
21 | #define S64_MAX ((s64)(U64_MAX >> 1LL)) | ||
22 | |||
23 | #define S8_MIN ((s8)(-S8_MAX - 1)) | ||
24 | #define S16_MIN ((s16)(-S16_MAX - 1)) | ||
25 | #define S32_MIN ((s32)(-S32_MAX - 1)) | ||
26 | #define S64_MIN ((s64)(-S64_MAX - 1LL)) | ||
27 | |||
28 | /* | 11 | /* |
29 | * in all cases, | 12 | * in all cases, |
30 | * void **p pointer to position pointer | 13 | * void **p pointer to position pointer |
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1eda33d7cb10..1c2fdaa2ffc3 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #ifndef __GENALLOC_H__ | 30 | #ifndef __GENALLOC_H__ |
31 | #define __GENALLOC_H__ | 31 | #define __GENALLOC_H__ |
32 | 32 | ||
33 | #include <linux/spinlock_types.h> | ||
34 | |||
33 | struct device; | 35 | struct device; |
34 | struct device_node; | 36 | struct device_node; |
35 | 37 | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 9b4dd491f7e8..0437439bc047 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __LINUX_GFP_H | 1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | 2 | #define __LINUX_GFP_H |
3 | 3 | ||
4 | #include <linux/mmdebug.h> | ||
4 | #include <linux/mmzone.h> | 5 | #include <linux/mmzone.h> |
5 | #include <linux/stddef.h> | 6 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d01cc972a1d9..8c43cc469d78 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_HUGETLB_H | 2 | #define _LINUX_HUGETLB_H |
3 | 3 | ||
4 | #include <linux/mm_types.h> | 4 | #include <linux/mm_types.h> |
5 | #include <linux/mmdebug.h> | ||
5 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
6 | #include <linux/hugetlb_inline.h> | 7 | #include <linux/hugetlb_inline.h> |
7 | #include <linux/cgroup.h> | 8 | #include <linux/cgroup.h> |
@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | |||
354 | 355 | ||
355 | static inline struct hstate *page_hstate(struct page *page) | 356 | static inline struct hstate *page_hstate(struct page *page) |
356 | { | 357 | { |
357 | VM_BUG_ON(!PageHuge(page)); | 358 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
358 | return size_to_hstate(PAGE_SIZE << compound_order(page)); | 359 | return size_to_hstate(PAGE_SIZE << compound_order(page)); |
359 | } | 360 | } |
360 | 361 | ||
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index ce8217f7b5c2..787bba3bf552 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef _LINUX_HUGETLB_CGROUP_H | 15 | #ifndef _LINUX_HUGETLB_CGROUP_H |
16 | #define _LINUX_HUGETLB_CGROUP_H | 16 | #define _LINUX_HUGETLB_CGROUP_H |
17 | 17 | ||
18 | #include <linux/mmdebug.h> | ||
18 | #include <linux/res_counter.h> | 19 | #include <linux/res_counter.h> |
19 | 20 | ||
20 | struct hugetlb_cgroup; | 21 | struct hugetlb_cgroup; |
@@ -28,7 +29,7 @@ struct hugetlb_cgroup; | |||
28 | 29 | ||
29 | static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) | 30 | static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) |
30 | { | 31 | { |
31 | VM_BUG_ON(!PageHuge(page)); | 32 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
32 | 33 | ||
33 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | 34 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) |
34 | return NULL; | 35 | return NULL; |
@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) | |||
38 | static inline | 39 | static inline |
39 | int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) | 40 | int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) |
40 | { | 41 | { |
41 | VM_BUG_ON(!PageHuge(page)); | 42 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
42 | 43 | ||
43 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | 44 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) |
44 | return -1; | 45 | return -1; |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2aa3d4b000e6..f74bb581ab64 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -29,6 +29,19 @@ | |||
29 | #define ULLONG_MAX (~0ULL) | 29 | #define ULLONG_MAX (~0ULL) |
30 | #define SIZE_MAX (~(size_t)0) | 30 | #define SIZE_MAX (~(size_t)0) |
31 | 31 | ||
32 | #define U8_MAX ((u8)~0U) | ||
33 | #define S8_MAX ((s8)(U8_MAX>>1)) | ||
34 | #define S8_MIN ((s8)(-S8_MAX - 1)) | ||
35 | #define U16_MAX ((u16)~0U) | ||
36 | #define S16_MAX ((s16)(U16_MAX>>1)) | ||
37 | #define S16_MIN ((s16)(-S16_MAX - 1)) | ||
38 | #define U32_MAX ((u32)~0U) | ||
39 | #define S32_MAX ((s32)(U32_MAX>>1)) | ||
40 | #define S32_MIN ((s32)(-S32_MAX - 1)) | ||
41 | #define U64_MAX ((u64)~0ULL) | ||
42 | #define S64_MAX ((s64)(U64_MAX>>1)) | ||
43 | #define S64_MIN ((s64)(-S64_MAX - 1)) | ||
44 | |||
32 | #define STACK_MAGIC 0xdeadbeef | 45 | #define STACK_MAGIC 0xdeadbeef |
33 | 46 | ||
34 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | 47 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 5fd33dc1fe3a..6d4066cdb5b5 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -170,6 +170,7 @@ unsigned long paddr_vmcoreinfo_note(void); | |||
170 | 170 | ||
171 | extern struct kimage *kexec_image; | 171 | extern struct kimage *kexec_image; |
172 | extern struct kimage *kexec_crash_image; | 172 | extern struct kimage *kexec_crash_image; |
173 | extern int kexec_load_disabled; | ||
173 | 174 | ||
174 | #ifndef kexec_flush_icache_page | 175 | #ifndef kexec_flush_icache_page |
175 | #define kexec_flush_icache_page(page) | 176 | #define kexec_flush_icache_page(page) |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cd0274bebd4c..1ef66360f0b0 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -61,6 +61,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, | |||
61 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | 61 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
62 | phys_addr_t size, phys_addr_t align); | 62 | phys_addr_t size, phys_addr_t align); |
63 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); | 63 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); |
64 | phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); | ||
64 | void memblock_allow_resize(void); | 65 | void memblock_allow_resize(void); |
65 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); | 66 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); |
66 | int memblock_add(phys_addr_t base, phys_addr_t size); | 67 | int memblock_add(phys_addr_t base, phys_addr_t size); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b3e7a667e03c..abd0113b6620 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -497,10 +497,11 @@ void __memcg_kmem_commit_charge(struct page *page, | |||
497 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | 497 | void __memcg_kmem_uncharge_pages(struct page *page, int order); |
498 | 498 | ||
499 | int memcg_cache_id(struct mem_cgroup *memcg); | 499 | int memcg_cache_id(struct mem_cgroup *memcg); |
500 | int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | 500 | int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, |
501 | struct kmem_cache *root_cache); | 501 | struct kmem_cache *root_cache); |
502 | void memcg_release_cache(struct kmem_cache *cachep); | 502 | void memcg_free_cache_params(struct kmem_cache *s); |
503 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); | 503 | void memcg_register_cache(struct kmem_cache *s); |
504 | void memcg_unregister_cache(struct kmem_cache *s); | ||
504 | 505 | ||
505 | int memcg_update_cache_size(struct kmem_cache *s, int num_groups); | 506 | int memcg_update_cache_size(struct kmem_cache *s, int num_groups); |
506 | void memcg_update_array_size(int num_groups); | 507 | void memcg_update_array_size(int num_groups); |
@@ -640,19 +641,21 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
640 | return -1; | 641 | return -1; |
641 | } | 642 | } |
642 | 643 | ||
643 | static inline int | 644 | static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, |
644 | memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | 645 | struct kmem_cache *s, struct kmem_cache *root_cache) |
645 | struct kmem_cache *root_cache) | ||
646 | { | 646 | { |
647 | return 0; | 647 | return 0; |
648 | } | 648 | } |
649 | 649 | ||
650 | static inline void memcg_release_cache(struct kmem_cache *cachep) | 650 | static inline void memcg_free_cache_params(struct kmem_cache *s) |
651 | { | ||
652 | } | ||
653 | |||
654 | static inline void memcg_register_cache(struct kmem_cache *s) | ||
651 | { | 655 | { |
652 | } | 656 | } |
653 | 657 | ||
654 | static inline void memcg_cache_list_add(struct mem_cgroup *memcg, | 658 | static inline void memcg_unregister_cache(struct kmem_cache *s) |
655 | struct kmem_cache *s) | ||
656 | { | 659 | { |
657 | } | 660 | } |
658 | 661 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index a512dd836931..d9992fc128ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
7 | 7 | ||
8 | #include <linux/mmdebug.h> | ||
8 | #include <linux/gfp.h> | 9 | #include <linux/gfp.h> |
9 | #include <linux/bug.h> | 10 | #include <linux/bug.h> |
10 | #include <linux/list.h> | 11 | #include <linux/list.h> |
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page) | |||
303 | */ | 304 | */ |
304 | static inline int put_page_testzero(struct page *page) | 305 | static inline int put_page_testzero(struct page *page) |
305 | { | 306 | { |
306 | VM_BUG_ON(atomic_read(&page->_count) == 0); | 307 | VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); |
307 | return atomic_dec_and_test(&page->_count); | 308 | return atomic_dec_and_test(&page->_count); |
308 | } | 309 | } |
309 | 310 | ||
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) | |||
364 | static inline void compound_lock(struct page *page) | 365 | static inline void compound_lock(struct page *page) |
365 | { | 366 | { |
366 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 367 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
367 | VM_BUG_ON(PageSlab(page)); | 368 | VM_BUG_ON_PAGE(PageSlab(page), page); |
368 | bit_spin_lock(PG_compound_lock, &page->flags); | 369 | bit_spin_lock(PG_compound_lock, &page->flags); |
369 | #endif | 370 | #endif |
370 | } | 371 | } |
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page) | |||
372 | static inline void compound_unlock(struct page *page) | 373 | static inline void compound_unlock(struct page *page) |
373 | { | 374 | { |
374 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 375 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
375 | VM_BUG_ON(PageSlab(page)); | 376 | VM_BUG_ON_PAGE(PageSlab(page), page); |
376 | bit_spin_unlock(PG_compound_lock, &page->flags); | 377 | bit_spin_unlock(PG_compound_lock, &page->flags); |
377 | #endif | 378 | #endif |
378 | } | 379 | } |
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page) | |||
447 | */ | 448 | */ |
448 | static inline bool compound_tail_refcounted(struct page *page) | 449 | static inline bool compound_tail_refcounted(struct page *page) |
449 | { | 450 | { |
450 | VM_BUG_ON(!PageHead(page)); | 451 | VM_BUG_ON_PAGE(!PageHead(page), page); |
451 | return __compound_tail_refcounted(page); | 452 | return __compound_tail_refcounted(page); |
452 | } | 453 | } |
453 | 454 | ||
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page) | |||
456 | /* | 457 | /* |
457 | * __split_huge_page_refcount() cannot run from under us. | 458 | * __split_huge_page_refcount() cannot run from under us. |
458 | */ | 459 | */ |
459 | VM_BUG_ON(!PageTail(page)); | 460 | VM_BUG_ON_PAGE(!PageTail(page), page); |
460 | VM_BUG_ON(page_mapcount(page) < 0); | 461 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
461 | VM_BUG_ON(atomic_read(&page->_count) != 0); | 462 | VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); |
462 | if (compound_tail_refcounted(page->first_page)) | 463 | if (compound_tail_refcounted(page->first_page)) |
463 | atomic_inc(&page->_mapcount); | 464 | atomic_inc(&page->_mapcount); |
464 | } | 465 | } |
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page) | |||
474 | * Getting a normal page or the head of a compound page | 475 | * Getting a normal page or the head of a compound page |
475 | * requires to already have an elevated page->_count. | 476 | * requires to already have an elevated page->_count. |
476 | */ | 477 | */ |
477 | VM_BUG_ON(atomic_read(&page->_count) <= 0); | 478 | VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); |
478 | atomic_inc(&page->_count); | 479 | atomic_inc(&page->_count); |
479 | } | 480 | } |
480 | 481 | ||
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page) | |||
511 | 512 | ||
512 | static inline void __SetPageBuddy(struct page *page) | 513 | static inline void __SetPageBuddy(struct page *page) |
513 | { | 514 | { |
514 | VM_BUG_ON(atomic_read(&page->_mapcount) != -1); | 515 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); |
515 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); | 516 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); |
516 | } | 517 | } |
517 | 518 | ||
518 | static inline void __ClearPageBuddy(struct page *page) | 519 | static inline void __ClearPageBuddy(struct page *page) |
519 | { | 520 | { |
520 | VM_BUG_ON(!PageBuddy(page)); | 521 | VM_BUG_ON_PAGE(!PageBuddy(page), page); |
521 | atomic_set(&page->_mapcount, -1); | 522 | atomic_set(&page->_mapcount, -1); |
522 | } | 523 | } |
523 | 524 | ||
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page) | |||
1401 | * slab code uses page->slab_cache and page->first_page (for tail | 1402 | * slab code uses page->slab_cache and page->first_page (for tail |
1402 | * pages), which share storage with page->ptl. | 1403 | * pages), which share storage with page->ptl. |
1403 | */ | 1404 | */ |
1404 | VM_BUG_ON(*(unsigned long *)&page->ptl); | 1405 | VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); |
1405 | if (!ptlock_alloc(page)) | 1406 | if (!ptlock_alloc(page)) |
1406 | return false; | 1407 | return false; |
1407 | spin_lock_init(ptlock_ptr(page)); | 1408 | spin_lock_init(ptlock_ptr(page)); |
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page) | |||
1492 | static inline void pgtable_pmd_page_dtor(struct page *page) | 1493 | static inline void pgtable_pmd_page_dtor(struct page *page) |
1493 | { | 1494 | { |
1494 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1495 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1495 | VM_BUG_ON(page->pmd_huge_pte); | 1496 | VM_BUG_ON_PAGE(page->pmd_huge_pte, page); |
1496 | #endif | 1497 | #endif |
1497 | ptlock_free(page); | 1498 | ptlock_free(page); |
1498 | } | 1499 | } |
@@ -2029,8 +2030,6 @@ extern void shake_page(struct page *p, int access); | |||
2029 | extern atomic_long_t num_poisoned_pages; | 2030 | extern atomic_long_t num_poisoned_pages; |
2030 | extern int soft_offline_page(struct page *page, int flags); | 2031 | extern int soft_offline_page(struct page *page, int flags); |
2031 | 2032 | ||
2032 | extern void dump_page(struct page *page); | ||
2033 | |||
2034 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) | 2033 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) |
2035 | extern void clear_huge_page(struct page *page, | 2034 | extern void clear_huge_page(struct page *page, |
2036 | unsigned long addr, | 2035 | unsigned long addr, |
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 580bd587d916..5042c036dda9 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
@@ -1,10 +1,19 @@ | |||
1 | #ifndef LINUX_MM_DEBUG_H | 1 | #ifndef LINUX_MM_DEBUG_H |
2 | #define LINUX_MM_DEBUG_H 1 | 2 | #define LINUX_MM_DEBUG_H 1 |
3 | 3 | ||
4 | struct page; | ||
5 | |||
6 | extern void dump_page(struct page *page, char *reason); | ||
7 | extern void dump_page_badflags(struct page *page, char *reason, | ||
8 | unsigned long badflags); | ||
9 | |||
4 | #ifdef CONFIG_DEBUG_VM | 10 | #ifdef CONFIG_DEBUG_VM |
5 | #define VM_BUG_ON(cond) BUG_ON(cond) | 11 | #define VM_BUG_ON(cond) BUG_ON(cond) |
12 | #define VM_BUG_ON_PAGE(cond, page) \ | ||
13 | do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) | ||
6 | #else | 14 | #else |
7 | #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) | 15 | #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) |
16 | #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) | ||
8 | #endif | 17 | #endif |
9 | 18 | ||
10 | #ifdef CONFIG_DEBUG_VIRTUAL | 19 | #ifdef CONFIG_DEBUG_VIRTUAL |
diff --git a/include/linux/of.h b/include/linux/of.h index 276c546980d8..70c64ba17fa5 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -377,8 +377,13 @@ static inline bool of_have_populated_dt(void) | |||
377 | return false; | 377 | return false; |
378 | } | 378 | } |
379 | 379 | ||
380 | /* Kill an unused variable warning on a device_node pointer */ | ||
381 | static inline void __of_use_dn(const struct device_node *np) | ||
382 | { | ||
383 | } | ||
384 | |||
380 | #define for_each_child_of_node(parent, child) \ | 385 | #define for_each_child_of_node(parent, child) \ |
381 | while (0) | 386 | while (__of_use_dn(parent), __of_use_dn(child), 0) |
382 | 387 | ||
383 | #define for_each_available_child_of_node(parent, child) \ | 388 | #define for_each_available_child_of_node(parent, child) \ |
384 | while (0) | 389 | while (0) |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 98ada58f9942..e464b4e987e8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page) | |||
412 | */ | 412 | */ |
413 | static inline int PageTransHuge(struct page *page) | 413 | static inline int PageTransHuge(struct page *page) |
414 | { | 414 | { |
415 | VM_BUG_ON(PageTail(page)); | 415 | VM_BUG_ON_PAGE(PageTail(page), page); |
416 | return PageHead(page); | 416 | return PageHead(page); |
417 | } | 417 | } |
418 | 418 | ||
@@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page) | |||
460 | */ | 460 | */ |
461 | static inline int PageSlabPfmemalloc(struct page *page) | 461 | static inline int PageSlabPfmemalloc(struct page *page) |
462 | { | 462 | { |
463 | VM_BUG_ON(!PageSlab(page)); | 463 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
464 | return PageActive(page); | 464 | return PageActive(page); |
465 | } | 465 | } |
466 | 466 | ||
467 | static inline void SetPageSlabPfmemalloc(struct page *page) | 467 | static inline void SetPageSlabPfmemalloc(struct page *page) |
468 | { | 468 | { |
469 | VM_BUG_ON(!PageSlab(page)); | 469 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
470 | SetPageActive(page); | 470 | SetPageActive(page); |
471 | } | 471 | } |
472 | 472 | ||
473 | static inline void __ClearPageSlabPfmemalloc(struct page *page) | 473 | static inline void __ClearPageSlabPfmemalloc(struct page *page) |
474 | { | 474 | { |
475 | VM_BUG_ON(!PageSlab(page)); | 475 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
476 | __ClearPageActive(page); | 476 | __ClearPageActive(page); |
477 | } | 477 | } |
478 | 478 | ||
479 | static inline void ClearPageSlabPfmemalloc(struct page *page) | 479 | static inline void ClearPageSlabPfmemalloc(struct page *page) |
480 | { | 480 | { |
481 | VM_BUG_ON(!PageSlab(page)); | 481 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
482 | ClearPageActive(page); | 482 | ClearPageActive(page); |
483 | } | 483 | } |
484 | 484 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e3dea75a078b..1710d1b060ba 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page) | |||
162 | * disabling preempt, and hence no need for the "speculative get" that | 162 | * disabling preempt, and hence no need for the "speculative get" that |
163 | * SMP requires. | 163 | * SMP requires. |
164 | */ | 164 | */ |
165 | VM_BUG_ON(page_count(page) == 0); | 165 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
166 | atomic_inc(&page->_count); | 166 | atomic_inc(&page->_count); |
167 | 167 | ||
168 | #else | 168 | #else |
@@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page) | |||
175 | return 0; | 175 | return 0; |
176 | } | 176 | } |
177 | #endif | 177 | #endif |
178 | VM_BUG_ON(PageTail(page)); | 178 | VM_BUG_ON_PAGE(PageTail(page), page); |
179 | 179 | ||
180 | return 1; | 180 | return 1; |
181 | } | 181 | } |
@@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count) | |||
191 | # ifdef CONFIG_PREEMPT_COUNT | 191 | # ifdef CONFIG_PREEMPT_COUNT |
192 | VM_BUG_ON(!in_atomic()); | 192 | VM_BUG_ON(!in_atomic()); |
193 | # endif | 193 | # endif |
194 | VM_BUG_ON(page_count(page) == 0); | 194 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
195 | atomic_add(count, &page->_count); | 195 | atomic_add(count, &page->_count); |
196 | 196 | ||
197 | #else | 197 | #else |
198 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | 198 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) |
199 | return 0; | 199 | return 0; |
200 | #endif | 200 | #endif |
201 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | 201 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
202 | 202 | ||
203 | return 1; | 203 | return 1; |
204 | } | 204 | } |
@@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count) | |||
210 | 210 | ||
211 | static inline void page_unfreeze_refs(struct page *page, int count) | 211 | static inline void page_unfreeze_refs(struct page *page, int count) |
212 | { | 212 | { |
213 | VM_BUG_ON(page_count(page) != 0); | 213 | VM_BUG_ON_PAGE(page_count(page) != 0, page); |
214 | VM_BUG_ON(count == 0); | 214 | VM_BUG_ON(count == 0); |
215 | 215 | ||
216 | atomic_set(&page->_count, count); | 216 | atomic_set(&page->_count, count); |
diff --git a/include/linux/parser.h b/include/linux/parser.h index ea2281e726f6..39d5b7955b23 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h | |||
@@ -29,5 +29,6 @@ int match_token(char *, const match_table_t table, substring_t args[]); | |||
29 | int match_int(substring_t *, int *result); | 29 | int match_int(substring_t *, int *result); |
30 | int match_octal(substring_t *, int *result); | 30 | int match_octal(substring_t *, int *result); |
31 | int match_hex(substring_t *, int *result); | 31 | int match_hex(substring_t *, int *result); |
32 | bool match_wildcard(const char *pattern, const char *str); | ||
32 | size_t match_strlcpy(char *, const substring_t *, size_t); | 33 | size_t match_strlcpy(char *, const substring_t *, size_t); |
33 | char *match_strdup(const substring_t *); | 34 | char *match_strdup(const substring_t *); |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9e4761caa80c..e3817d2441b6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __LINUX_PERCPU_H | 1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | 2 | #define __LINUX_PERCPU_H |
3 | 3 | ||
4 | #include <linux/mmdebug.h> | ||
4 | #include <linux/preempt.h> | 5 | #include <linux/preempt.h> |
5 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
6 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
diff --git a/include/linux/printk.h b/include/linux/printk.h index 694925837a16..cc6f74d65167 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
6 | #include <linux/kern_levels.h> | 6 | #include <linux/kern_levels.h> |
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
8 | #include <linux/cache.h> | ||
8 | 9 | ||
9 | extern const char linux_banner[]; | 10 | extern const char linux_banner[]; |
10 | extern const char linux_proc_banner[]; | 11 | extern const char linux_proc_banner[]; |
@@ -253,17 +254,17 @@ extern asmlinkage void dump_stack(void) __cold; | |||
253 | */ | 254 | */ |
254 | 255 | ||
255 | #ifdef CONFIG_PRINTK | 256 | #ifdef CONFIG_PRINTK |
256 | #define printk_once(fmt, ...) \ | 257 | #define printk_once(fmt, ...) \ |
257 | ({ \ | 258 | ({ \ |
258 | static bool __print_once; \ | 259 | static bool __print_once __read_mostly; \ |
259 | \ | 260 | \ |
260 | if (!__print_once) { \ | 261 | if (!__print_once) { \ |
261 | __print_once = true; \ | 262 | __print_once = true; \ |
262 | printk(fmt, ##__VA_ARGS__); \ | 263 | printk(fmt, ##__VA_ARGS__); \ |
263 | } \ | 264 | } \ |
264 | }) | 265 | }) |
265 | #else | 266 | #else |
266 | #define printk_once(fmt, ...) \ | 267 | #define printk_once(fmt, ...) \ |
267 | no_printk(fmt, ##__VA_ARGS__) | 268 | no_printk(fmt, ##__VA_ARGS__) |
268 | #endif | 269 | #endif |
269 | 270 | ||
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 753207c8ce20..ecc730977a5a 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h | |||
@@ -14,13 +14,6 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
14 | } | 14 | } |
15 | #else | 15 | #else |
16 | extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); | 16 | extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); |
17 | extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | ||
18 | unsigned long addr, | ||
19 | unsigned long len, | ||
20 | unsigned long pgoff, | ||
21 | unsigned long flags); | ||
22 | |||
23 | extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); | ||
24 | #endif | 17 | #endif |
25 | 18 | ||
26 | extern const struct file_operations ramfs_file_operations; | 19 | extern const struct file_operations ramfs_file_operations; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 485234d2fd42..68a0e84463a0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -229,7 +229,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
229 | /* get_task_state() */ | 229 | /* get_task_state() */ |
230 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ | 230 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
231 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ | 231 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
232 | __TASK_TRACED) | 232 | __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) |
233 | 233 | ||
234 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | 234 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
235 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | 235 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
@@ -391,22 +391,33 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
391 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | 391 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} |
392 | #endif | 392 | #endif |
393 | 393 | ||
394 | |||
395 | extern void set_dumpable(struct mm_struct *mm, int value); | ||
396 | extern int get_dumpable(struct mm_struct *mm); | ||
397 | |||
398 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ | 394 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ |
399 | #define SUID_DUMP_USER 1 /* Dump as user of process */ | 395 | #define SUID_DUMP_USER 1 /* Dump as user of process */ |
400 | #define SUID_DUMP_ROOT 2 /* Dump as root */ | 396 | #define SUID_DUMP_ROOT 2 /* Dump as root */ |
401 | 397 | ||
402 | /* mm flags */ | 398 | /* mm flags */ |
403 | /* dumpable bits */ | ||
404 | #define MMF_DUMPABLE 0 /* core dump is permitted */ | ||
405 | #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ | ||
406 | 399 | ||
400 | /* for SUID_DUMP_* above */ | ||
407 | #define MMF_DUMPABLE_BITS 2 | 401 | #define MMF_DUMPABLE_BITS 2 |
408 | #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) | 402 | #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) |
409 | 403 | ||
404 | extern void set_dumpable(struct mm_struct *mm, int value); | ||
405 | /* | ||
406 | * This returns the actual value of the suid_dumpable flag. For things | ||
407 | * that are using this for checking for privilege transitions, it must | ||
408 | * test against SUID_DUMP_USER rather than treating it as a boolean | ||
409 | * value. | ||
410 | */ | ||
411 | static inline int __get_dumpable(unsigned long mm_flags) | ||
412 | { | ||
413 | return mm_flags & MMF_DUMPABLE_MASK; | ||
414 | } | ||
415 | |||
416 | static inline int get_dumpable(struct mm_struct *mm) | ||
417 | { | ||
418 | return __get_dumpable(mm->flags); | ||
419 | } | ||
420 | |||
410 | /* coredump filter bits */ | 421 | /* coredump filter bits */ |
411 | #define MMF_DUMP_ANON_PRIVATE 2 | 422 | #define MMF_DUMP_ANON_PRIVATE 2 |
412 | #define MMF_DUMP_ANON_SHARED 3 | 423 | #define MMF_DUMP_ANON_SHARED 3 |
@@ -1228,7 +1239,6 @@ struct task_struct { | |||
1228 | /* Used for emulating ABI behavior of previous Linux versions */ | 1239 | /* Used for emulating ABI behavior of previous Linux versions */ |
1229 | unsigned int personality; | 1240 | unsigned int personality; |
1230 | 1241 | ||
1231 | unsigned did_exec:1; | ||
1232 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1242 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
1233 | * execve */ | 1243 | * execve */ |
1234 | unsigned in_iowait:1; | 1244 | unsigned in_iowait:1; |
@@ -2284,8 +2294,6 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); | |||
2284 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); | 2294 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); |
2285 | /* Remove the current tasks stale references to the old mm_struct */ | 2295 | /* Remove the current tasks stale references to the old mm_struct */ |
2286 | extern void mm_release(struct task_struct *, struct mm_struct *); | 2296 | extern void mm_release(struct task_struct *, struct mm_struct *); |
2287 | /* Allocate a new mm structure and copy contents from tsk->mm */ | ||
2288 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | ||
2289 | 2297 | ||
2290 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | 2298 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
2291 | struct task_struct *); | 2299 | struct task_struct *); |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 31e0193cb0c5..b13cf430764f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
@@ -99,4 +99,8 @@ extern int sched_rt_handler(struct ctl_table *table, int write, | |||
99 | void __user *buffer, size_t *lenp, | 99 | void __user *buffer, size_t *lenp, |
100 | loff_t *ppos); | 100 | loff_t *ppos); |
101 | 101 | ||
102 | extern int sysctl_numa_balancing(struct ctl_table *table, int write, | ||
103 | void __user *buffer, size_t *lenp, | ||
104 | loff_t *ppos); | ||
105 | |||
102 | #endif /* _SCHED_SYSCTL_H */ | 106 | #endif /* _SCHED_SYSCTL_H */ |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 1e2f4fe12773..a060142aa5f5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
513 | * | 513 | * |
514 | * Both the root cache and the child caches will have it. For the root cache, | 514 | * Both the root cache and the child caches will have it. For the root cache, |
515 | * this will hold a dynamically allocated array large enough to hold | 515 | * this will hold a dynamically allocated array large enough to hold |
516 | * information about the currently limited memcgs in the system. | 516 | * information about the currently limited memcgs in the system. To allow the |
517 | * array to be accessed without taking any locks, on relocation we free the old | ||
518 | * version only after a grace period. | ||
517 | * | 519 | * |
518 | * Child caches will hold extra metadata needed for its operation. Fields are: | 520 | * Child caches will hold extra metadata needed for its operation. Fields are: |
519 | * | 521 | * |
@@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
528 | struct memcg_cache_params { | 530 | struct memcg_cache_params { |
529 | bool is_root_cache; | 531 | bool is_root_cache; |
530 | union { | 532 | union { |
531 | struct kmem_cache *memcg_caches[0]; | 533 | struct { |
534 | struct rcu_head rcu_head; | ||
535 | struct kmem_cache *memcg_caches[0]; | ||
536 | }; | ||
532 | struct { | 537 | struct { |
533 | struct mem_cgroup *memcg; | 538 | struct mem_cgroup *memcg; |
534 | struct list_head list; | 539 | struct list_head list; |
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index 065e3ae79ab0..d58594a32324 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h | |||
@@ -20,6 +20,7 @@ struct w1_gpio_platform_data { | |||
20 | unsigned int is_open_drain:1; | 20 | unsigned int is_open_drain:1; |
21 | void (*enable_external_pullup)(int enable); | 21 | void (*enable_external_pullup)(int enable); |
22 | unsigned int ext_pullup_enable_pin; | 22 | unsigned int ext_pullup_enable_pin; |
23 | unsigned int pullup_duration; | ||
23 | }; | 24 | }; |
24 | 25 | ||
25 | #endif /* _LINUX_W1_GPIO_H */ | 26 | #endif /* _LINUX_W1_GPIO_H */ |
diff --git a/include/uapi/asm-generic/types.h b/include/uapi/asm-generic/types.h index bd39806013b5..a3877926b0d4 100644 --- a/include/uapi/asm-generic/types.h +++ b/include/uapi/asm-generic/types.h | |||
@@ -1,8 +1,7 @@ | |||
1 | #ifndef _ASM_GENERIC_TYPES_H | 1 | #ifndef _ASM_GENERIC_TYPES_H |
2 | #define _ASM_GENERIC_TYPES_H | 2 | #define _ASM_GENERIC_TYPES_H |
3 | /* | 3 | /* |
4 | * int-ll64 is used practically everywhere now, | 4 | * int-ll64 is used everywhere now. |
5 | * so use it as a reasonable default. | ||
6 | */ | 5 | */ |
7 | #include <asm-generic/int-ll64.h> | 6 | #include <asm-generic/int-ll64.h> |
8 | 7 | ||
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h index 5fbdd3d49eba..4295c745f342 100644 --- a/include/uapi/linux/dn.h +++ b/include/uapi/linux/dn.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _LINUX_DN_H | 1 | #ifndef _LINUX_DN_H |
2 | #define _LINUX_DN_H | 2 | #define _LINUX_DN_H |
3 | 3 | ||
4 | #include <linux/ioctl.h> | ||
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/if_ether.h> | 6 | #include <linux/if_ether.h> |
6 | 7 | ||
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 788128ebac45..35f5f4c6c260 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h | |||
@@ -150,7 +150,7 @@ | |||
150 | #define NFS4_SECINFO_STYLE4_CURRENT_FH 0 | 150 | #define NFS4_SECINFO_STYLE4_CURRENT_FH 0 |
151 | #define NFS4_SECINFO_STYLE4_PARENT 1 | 151 | #define NFS4_SECINFO_STYLE4_PARENT 1 |
152 | 152 | ||
153 | #define NFS4_MAX_UINT64 (~(u64)0) | 153 | #define NFS4_MAX_UINT64 (~(__u64)0) |
154 | 154 | ||
155 | /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. | 155 | /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. |
156 | * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly. | 156 | * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly. |
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e244ed412745..853bc1ccb395 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
@@ -788,7 +788,7 @@ union perf_mem_data_src { | |||
788 | #define PERF_MEM_TLB_SHIFT 26 | 788 | #define PERF_MEM_TLB_SHIFT 26 |
789 | 789 | ||
790 | #define PERF_MEM_S(a, s) \ | 790 | #define PERF_MEM_S(a, s) \ |
791 | (((u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) | 791 | (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) |
792 | 792 | ||
793 | /* | 793 | /* |
794 | * single taken branch record layout: | 794 | * single taken branch record layout: |
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h index 2d9a8859550a..63a23a3b8bb7 100644 --- a/include/uapi/linux/ppp-ioctl.h +++ b/include/uapi/linux/ppp-ioctl.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
15 | #include <linux/ppp_defs.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Bit definitions for flags argument to PPPIOCGFLAGS/PPPIOCSFLAGS. | 18 | * Bit definitions for flags argument to PPPIOCGFLAGS/PPPIOCSFLAGS. |
diff --git a/init/initramfs.c b/init/initramfs.c index a67ef9dbda9d..93b61396756b 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -583,7 +583,7 @@ static int __init populate_rootfs(void) | |||
583 | { | 583 | { |
584 | char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); | 584 | char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); |
585 | if (err) | 585 | if (err) |
586 | panic(err); /* Failed to decompress INTERNAL initramfs */ | 586 | panic("%s", err); /* Failed to decompress INTERNAL initramfs */ |
587 | if (initrd_start) { | 587 | if (initrd_start) { |
588 | #ifdef CONFIG_BLK_DEV_RAM | 588 | #ifdef CONFIG_BLK_DEV_RAM |
589 | int fd; | 589 | int fd; |
diff --git a/init/main.c b/init/main.c index f865261fb096..f333385d9a4f 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -99,10 +99,6 @@ extern void radix_tree_init(void); | |||
99 | static inline void mark_rodata_ro(void) { } | 99 | static inline void mark_rodata_ro(void) { } |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | #ifdef CONFIG_TC | ||
103 | extern void tc_init(void); | ||
104 | #endif | ||
105 | |||
106 | /* | 102 | /* |
107 | * Debug helper: via this flag we know that we are in 'early bootup code' | 103 | * Debug helper: via this flag we know that we are in 'early bootup code' |
108 | * where only the boot processor is running with IRQ disabled. This means | 104 | * where only the boot processor is running with IRQ disabled. This means |
@@ -282,7 +278,7 @@ static int __init unknown_bootoption(char *param, char *val, const char *unused) | |||
282 | unsigned int i; | 278 | unsigned int i; |
283 | for (i = 0; envp_init[i]; i++) { | 279 | for (i = 0; envp_init[i]; i++) { |
284 | if (i == MAX_INIT_ENVS) { | 280 | if (i == MAX_INIT_ENVS) { |
285 | panic_later = "Too many boot env vars at `%s'"; | 281 | panic_later = "env"; |
286 | panic_param = param; | 282 | panic_param = param; |
287 | } | 283 | } |
288 | if (!strncmp(param, envp_init[i], val - param)) | 284 | if (!strncmp(param, envp_init[i], val - param)) |
@@ -294,7 +290,7 @@ static int __init unknown_bootoption(char *param, char *val, const char *unused) | |||
294 | unsigned int i; | 290 | unsigned int i; |
295 | for (i = 0; argv_init[i]; i++) { | 291 | for (i = 0; argv_init[i]; i++) { |
296 | if (i == MAX_INIT_ARGS) { | 292 | if (i == MAX_INIT_ARGS) { |
297 | panic_later = "Too many boot init vars at `%s'"; | 293 | panic_later = "init"; |
298 | panic_param = param; | 294 | panic_param = param; |
299 | } | 295 | } |
300 | } | 296 | } |
@@ -586,7 +582,8 @@ asmlinkage void __init start_kernel(void) | |||
586 | */ | 582 | */ |
587 | console_init(); | 583 | console_init(); |
588 | if (panic_later) | 584 | if (panic_later) |
589 | panic(panic_later, panic_param); | 585 | panic("Too many boot %s vars at `%s'", panic_later, |
586 | panic_param); | ||
590 | 587 | ||
591 | lockdep_info(); | 588 | lockdep_info(); |
592 | 589 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 2f11bbe376b0..a17621c6cd42 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -800,14 +800,11 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
800 | * Allocate a new mm structure and copy contents from the | 800 | * Allocate a new mm structure and copy contents from the |
801 | * mm structure of the passed in task structure. | 801 | * mm structure of the passed in task structure. |
802 | */ | 802 | */ |
803 | struct mm_struct *dup_mm(struct task_struct *tsk) | 803 | static struct mm_struct *dup_mm(struct task_struct *tsk) |
804 | { | 804 | { |
805 | struct mm_struct *mm, *oldmm = current->mm; | 805 | struct mm_struct *mm, *oldmm = current->mm; |
806 | int err; | 806 | int err; |
807 | 807 | ||
808 | if (!oldmm) | ||
809 | return NULL; | ||
810 | |||
811 | mm = allocate_mm(); | 808 | mm = allocate_mm(); |
812 | if (!mm) | 809 | if (!mm) |
813 | goto fail_nomem; | 810 | goto fail_nomem; |
@@ -1229,7 +1226,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1229 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) | 1226 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) |
1230 | goto bad_fork_cleanup_count; | 1227 | goto bad_fork_cleanup_count; |
1231 | 1228 | ||
1232 | p->did_exec = 0; | ||
1233 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ | 1229 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
1234 | copy_flags(clone_flags, p); | 1230 | copy_flags(clone_flags, p); |
1235 | INIT_LIST_HEAD(&p->children); | 1231 | INIT_LIST_HEAD(&p->children); |
@@ -1654,7 +1650,7 @@ SYSCALL_DEFINE0(fork) | |||
1654 | return do_fork(SIGCHLD, 0, 0, NULL, NULL); | 1650 | return do_fork(SIGCHLD, 0, 0, NULL, NULL); |
1655 | #else | 1651 | #else |
1656 | /* can not support in nommu mode */ | 1652 | /* can not support in nommu mode */ |
1657 | return(-EINVAL); | 1653 | return -EINVAL; |
1658 | #endif | 1654 | #endif |
1659 | } | 1655 | } |
1660 | #endif | 1656 | #endif |
@@ -1662,7 +1658,7 @@ SYSCALL_DEFINE0(fork) | |||
1662 | #ifdef __ARCH_WANT_SYS_VFORK | 1658 | #ifdef __ARCH_WANT_SYS_VFORK |
1663 | SYSCALL_DEFINE0(vfork) | 1659 | SYSCALL_DEFINE0(vfork) |
1664 | { | 1660 | { |
1665 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, | 1661 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, |
1666 | 0, NULL, NULL); | 1662 | 0, NULL, NULL); |
1667 | } | 1663 | } |
1668 | #endif | 1664 | #endif |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 9c970167e402..ac738781d356 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -932,6 +932,7 @@ static int kimage_load_segment(struct kimage *image, | |||
932 | */ | 932 | */ |
933 | struct kimage *kexec_image; | 933 | struct kimage *kexec_image; |
934 | struct kimage *kexec_crash_image; | 934 | struct kimage *kexec_crash_image; |
935 | int kexec_load_disabled; | ||
935 | 936 | ||
936 | static DEFINE_MUTEX(kexec_mutex); | 937 | static DEFINE_MUTEX(kexec_mutex); |
937 | 938 | ||
@@ -942,7 +943,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | |||
942 | int result; | 943 | int result; |
943 | 944 | ||
944 | /* We only trust the superuser with rebooting the system. */ | 945 | /* We only trust the superuser with rebooting the system. */ |
945 | if (!capable(CAP_SYS_BOOT)) | 946 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
946 | return -EPERM; | 947 | return -EPERM; |
947 | 948 | ||
948 | /* | 949 | /* |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 9659d38e008f..d945a949760f 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
@@ -126,7 +126,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj, | |||
126 | { | 126 | { |
127 | return sprintf(buf, "%lx %x\n", | 127 | return sprintf(buf, "%lx %x\n", |
128 | paddr_vmcoreinfo_note(), | 128 | paddr_vmcoreinfo_note(), |
129 | (unsigned int)vmcoreinfo_max_size); | 129 | (unsigned int)sizeof(vmcoreinfo_note)); |
130 | } | 130 | } |
131 | KERNEL_ATTR_RO(vmcoreinfo); | 131 | KERNEL_ATTR_RO(vmcoreinfo); |
132 | 132 | ||
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index f8b41bddc6dc..b1d255f04135 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1595,10 +1595,13 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
1595 | * either merge it with the current buffer and flush, or if | 1595 | * either merge it with the current buffer and flush, or if |
1596 | * there was a race with interrupts (prefix == true) then just | 1596 | * there was a race with interrupts (prefix == true) then just |
1597 | * flush it out and store this line separately. | 1597 | * flush it out and store this line separately. |
1598 | * If the preceding printk was from a different task and missed | ||
1599 | * a newline, flush and append the newline. | ||
1598 | */ | 1600 | */ |
1599 | if (cont.len && cont.owner == current) { | 1601 | if (cont.len) { |
1600 | if (!(lflags & LOG_PREFIX)) | 1602 | if (cont.owner == current && !(lflags & LOG_PREFIX)) |
1601 | stored = cont_add(facility, level, text, text_len); | 1603 | stored = cont_add(facility, level, text, |
1604 | text_len); | ||
1602 | cont_flush(LOG_NEWLINE); | 1605 | cont_flush(LOG_NEWLINE); |
1603 | } | 1606 | } |
1604 | 1607 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d6964e49711..7fea865a810d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1770,7 +1770,29 @@ void set_numabalancing_state(bool enabled) | |||
1770 | numabalancing_enabled = enabled; | 1770 | numabalancing_enabled = enabled; |
1771 | } | 1771 | } |
1772 | #endif /* CONFIG_SCHED_DEBUG */ | 1772 | #endif /* CONFIG_SCHED_DEBUG */ |
1773 | #endif /* CONFIG_NUMA_BALANCING */ | 1773 | |
1774 | #ifdef CONFIG_PROC_SYSCTL | ||
1775 | int sysctl_numa_balancing(struct ctl_table *table, int write, | ||
1776 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
1777 | { | ||
1778 | struct ctl_table t; | ||
1779 | int err; | ||
1780 | int state = numabalancing_enabled; | ||
1781 | |||
1782 | if (write && !capable(CAP_SYS_ADMIN)) | ||
1783 | return -EPERM; | ||
1784 | |||
1785 | t = *table; | ||
1786 | t.data = &state; | ||
1787 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | ||
1788 | if (err < 0) | ||
1789 | return err; | ||
1790 | if (write) | ||
1791 | set_numabalancing_state(state); | ||
1792 | return err; | ||
1793 | } | ||
1794 | #endif | ||
1795 | #endif | ||
1774 | 1796 | ||
1775 | /* | 1797 | /* |
1776 | * fork()/clone()-time setup: | 1798 | * fork()/clone()-time setup: |
diff --git a/kernel/signal.c b/kernel/signal.c index 940b30ee9a30..52f881db1ca0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2047,8 +2047,8 @@ static bool do_signal_stop(int signr) | |||
2047 | if (task_set_jobctl_pending(current, signr | gstop)) | 2047 | if (task_set_jobctl_pending(current, signr | gstop)) |
2048 | sig->group_stop_count++; | 2048 | sig->group_stop_count++; |
2049 | 2049 | ||
2050 | for (t = next_thread(current); t != current; | 2050 | t = current; |
2051 | t = next_thread(t)) { | 2051 | while_each_thread(current, t) { |
2052 | /* | 2052 | /* |
2053 | * Setting state to TASK_STOPPED for a group | 2053 | * Setting state to TASK_STOPPED for a group |
2054 | * stop is always done with the siglock held, | 2054 | * stop is always done with the siglock held, |
@@ -3125,8 +3125,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
3125 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 3125 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
3126 | do { | 3126 | do { |
3127 | rm_from_queue_full(&mask, &t->pending); | 3127 | rm_from_queue_full(&mask, &t->pending); |
3128 | t = next_thread(t); | 3128 | } while_each_thread(current, t); |
3129 | } while (t != current); | ||
3130 | } | 3129 | } |
3131 | } | 3130 | } |
3132 | 3131 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index c72311324ea7..c0a58be780a4 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -895,8 +895,7 @@ SYSCALL_DEFINE1(times, struct tms __user *, tbuf) | |||
895 | * only important on a multi-user system anyway, to make sure one user | 895 | * only important on a multi-user system anyway, to make sure one user |
896 | * can't send a signal to a process owned by another. -TYT, 12/12/91 | 896 | * can't send a signal to a process owned by another. -TYT, 12/12/91 |
897 | * | 897 | * |
898 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. | 898 | * !PF_FORKNOEXEC check to conform completely to POSIX. |
899 | * LBT 04.03.94 | ||
900 | */ | 899 | */ |
901 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | 900 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) |
902 | { | 901 | { |
@@ -932,7 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
932 | if (task_session(p) != task_session(group_leader)) | 931 | if (task_session(p) != task_session(group_leader)) |
933 | goto out; | 932 | goto out; |
934 | err = -EACCES; | 933 | err = -EACCES; |
935 | if (p->did_exec) | 934 | if (!(p->flags & PF_FORKNOEXEC)) |
936 | goto out; | 935 | goto out; |
937 | } else { | 936 | } else { |
938 | err = -ESRCH; | 937 | err = -ESRCH; |
@@ -1572,8 +1571,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1572 | t = p; | 1571 | t = p; |
1573 | do { | 1572 | do { |
1574 | accumulate_thread_rusage(t, r); | 1573 | accumulate_thread_rusage(t, r); |
1575 | t = next_thread(t); | 1574 | } while_each_thread(p, t); |
1576 | } while (t != p); | ||
1577 | break; | 1575 | break; |
1578 | 1576 | ||
1579 | default: | 1577 | default: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 332cefcdb04b..096db7452cbd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <linux/capability.h> | 62 | #include <linux/capability.h> |
63 | #include <linux/binfmts.h> | 63 | #include <linux/binfmts.h> |
64 | #include <linux/sched/sysctl.h> | 64 | #include <linux/sched/sysctl.h> |
65 | #include <linux/kexec.h> | ||
65 | 66 | ||
66 | #include <asm/uaccess.h> | 67 | #include <asm/uaccess.h> |
67 | #include <asm/processor.h> | 68 | #include <asm/processor.h> |
@@ -389,6 +390,15 @@ static struct ctl_table kern_table[] = { | |||
389 | .mode = 0644, | 390 | .mode = 0644, |
390 | .proc_handler = proc_dointvec, | 391 | .proc_handler = proc_dointvec, |
391 | }, | 392 | }, |
393 | { | ||
394 | .procname = "numa_balancing", | ||
395 | .data = NULL, /* filled in by handler */ | ||
396 | .maxlen = sizeof(unsigned int), | ||
397 | .mode = 0644, | ||
398 | .proc_handler = sysctl_numa_balancing, | ||
399 | .extra1 = &zero, | ||
400 | .extra2 = &one, | ||
401 | }, | ||
392 | #endif /* CONFIG_NUMA_BALANCING */ | 402 | #endif /* CONFIG_NUMA_BALANCING */ |
393 | #endif /* CONFIG_SCHED_DEBUG */ | 403 | #endif /* CONFIG_SCHED_DEBUG */ |
394 | { | 404 | { |
@@ -605,6 +615,18 @@ static struct ctl_table kern_table[] = { | |||
605 | .proc_handler = proc_dointvec, | 615 | .proc_handler = proc_dointvec, |
606 | }, | 616 | }, |
607 | #endif | 617 | #endif |
618 | #ifdef CONFIG_KEXEC | ||
619 | { | ||
620 | .procname = "kexec_load_disabled", | ||
621 | .data = &kexec_load_disabled, | ||
622 | .maxlen = sizeof(int), | ||
623 | .mode = 0644, | ||
624 | /* only handle a transition from default "0" to "1" */ | ||
625 | .proc_handler = proc_dointvec_minmax, | ||
626 | .extra1 = &one, | ||
627 | .extra2 = &one, | ||
628 | }, | ||
629 | #endif | ||
608 | #ifdef CONFIG_MODULES | 630 | #ifdef CONFIG_MODULES |
609 | { | 631 | { |
610 | .procname = "modprobe", | 632 | .procname = "modprobe", |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 900b63c1e899..e0e2eebf7ab3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1595,6 +1595,33 @@ config DMA_API_DEBUG | |||
1595 | 1595 | ||
1596 | If unsure, say N. | 1596 | If unsure, say N. |
1597 | 1597 | ||
1598 | config TEST_MODULE | ||
1599 | tristate "Test module loading with 'hello world' module" | ||
1600 | default n | ||
1601 | depends on m | ||
1602 | help | ||
1603 | This builds the "test_module" module that emits "Hello, world" | ||
1604 | on printk when loaded. It is designed to be used for basic | ||
1605 | evaluation of the module loading subsystem (for example when | ||
1606 | validating module verification). It lacks any extra dependencies, | ||
1607 | and will not normally be loaded by the system unless explicitly | ||
1608 | requested by name. | ||
1609 | |||
1610 | If unsure, say N. | ||
1611 | |||
1612 | config TEST_USER_COPY | ||
1613 | tristate "Test user/kernel boundary protections" | ||
1614 | default n | ||
1615 | depends on m | ||
1616 | help | ||
1617 | This builds the "test_user_copy" module that runs sanity checks | ||
1618 | on the copy_to/from_user infrastructure, making sure basic | ||
1619 | user/kernel boundary testing is working. If it fails to load, | ||
1620 | a regression has been detected in the user/kernel memory boundary | ||
1621 | protections. | ||
1622 | |||
1623 | If unsure, say N. | ||
1624 | |||
1598 | source "samples/Kconfig" | 1625 | source "samples/Kconfig" |
1599 | 1626 | ||
1600 | source "lib/Kconfig.kgdb" | 1627 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Makefile b/lib/Makefile index a459c31e8c6b..98ec3b861062 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -31,6 +31,8 @@ obj-y += string_helpers.o | |||
31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
34 | obj-$(CONFIG_TEST_MODULE) += test_module.o | ||
35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | ||
34 | 36 | ||
35 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 37 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
36 | CFLAGS_kobject.o += -DDEBUG | 38 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/cmdline.c b/lib/cmdline.c index eb6791188cf5..d4932f745e92 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -49,13 +49,13 @@ static int get_range(char **str, int *pint) | |||
49 | * 3 - hyphen found to denote a range | 49 | * 3 - hyphen found to denote a range |
50 | */ | 50 | */ |
51 | 51 | ||
52 | int get_option (char **str, int *pint) | 52 | int get_option(char **str, int *pint) |
53 | { | 53 | { |
54 | char *cur = *str; | 54 | char *cur = *str; |
55 | 55 | ||
56 | if (!cur || !(*cur)) | 56 | if (!cur || !(*cur)) |
57 | return 0; | 57 | return 0; |
58 | *pint = simple_strtol (cur, str, 0); | 58 | *pint = simple_strtol(cur, str, 0); |
59 | if (cur == *str) | 59 | if (cur == *str) |
60 | return 0; | 60 | return 0; |
61 | if (**str == ',') { | 61 | if (**str == ',') { |
@@ -67,6 +67,7 @@ int get_option (char **str, int *pint) | |||
67 | 67 | ||
68 | return 1; | 68 | return 1; |
69 | } | 69 | } |
70 | EXPORT_SYMBOL(get_option); | ||
70 | 71 | ||
71 | /** | 72 | /** |
72 | * get_options - Parse a string into a list of integers | 73 | * get_options - Parse a string into a list of integers |
@@ -84,13 +85,13 @@ int get_option (char **str, int *pint) | |||
84 | * the parse to end (typically a null terminator, if @str is | 85 | * the parse to end (typically a null terminator, if @str is |
85 | * completely parseable). | 86 | * completely parseable). |
86 | */ | 87 | */ |
87 | 88 | ||
88 | char *get_options(const char *str, int nints, int *ints) | 89 | char *get_options(const char *str, int nints, int *ints) |
89 | { | 90 | { |
90 | int res, i = 1; | 91 | int res, i = 1; |
91 | 92 | ||
92 | while (i < nints) { | 93 | while (i < nints) { |
93 | res = get_option ((char **)&str, ints + i); | 94 | res = get_option((char **)&str, ints + i); |
94 | if (res == 0) | 95 | if (res == 0) |
95 | break; | 96 | break; |
96 | if (res == 3) { | 97 | if (res == 3) { |
@@ -112,6 +113,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
112 | ints[0] = i - 1; | 113 | ints[0] = i - 1; |
113 | return (char *)str; | 114 | return (char *)str; |
114 | } | 115 | } |
116 | EXPORT_SYMBOL(get_options); | ||
115 | 117 | ||
116 | /** | 118 | /** |
117 | * memparse - parse a string with mem suffixes into a number | 119 | * memparse - parse a string with mem suffixes into a number |
@@ -152,8 +154,4 @@ unsigned long long memparse(const char *ptr, char **retptr) | |||
152 | 154 | ||
153 | return ret; | 155 | return ret; |
154 | } | 156 | } |
155 | |||
156 | |||
157 | EXPORT_SYMBOL(memparse); | 157 | EXPORT_SYMBOL(memparse); |
158 | EXPORT_SYMBOL(get_option); | ||
159 | EXPORT_SYMBOL(get_options); | ||
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 3e67cfad16ad..7d1e83caf8ad 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
@@ -141,6 +141,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
141 | goto exit_2; | 141 | goto exit_2; |
142 | } | 142 | } |
143 | 143 | ||
144 | ret = -1; | ||
144 | if (flush && flush(outp, dest_len) != dest_len) | 145 | if (flush && flush(outp, dest_len) != dest_len) |
145 | goto exit_2; | 146 | goto exit_2; |
146 | if (output) | 147 | if (output) |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c37aeacd7651..600ac57e2777 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * By Greg Banks <gnb@melbourne.sgi.com> | 8 | * By Greg Banks <gnb@melbourne.sgi.com> |
9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. | 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. |
10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
11 | * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com> | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
@@ -24,6 +25,7 @@ | |||
24 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
25 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
26 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/parser.h> | ||
27 | #include <linux/string_helpers.h> | 29 | #include <linux/string_helpers.h> |
28 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
29 | #include <linux/dynamic_debug.h> | 31 | #include <linux/dynamic_debug.h> |
@@ -147,7 +149,8 @@ static int ddebug_change(const struct ddebug_query *query, | |||
147 | list_for_each_entry(dt, &ddebug_tables, link) { | 149 | list_for_each_entry(dt, &ddebug_tables, link) { |
148 | 150 | ||
149 | /* match against the module name */ | 151 | /* match against the module name */ |
150 | if (query->module && strcmp(query->module, dt->mod_name)) | 152 | if (query->module && |
153 | !match_wildcard(query->module, dt->mod_name)) | ||
151 | continue; | 154 | continue; |
152 | 155 | ||
153 | for (i = 0; i < dt->num_ddebugs; i++) { | 156 | for (i = 0; i < dt->num_ddebugs; i++) { |
@@ -155,14 +158,16 @@ static int ddebug_change(const struct ddebug_query *query, | |||
155 | 158 | ||
156 | /* match against the source filename */ | 159 | /* match against the source filename */ |
157 | if (query->filename && | 160 | if (query->filename && |
158 | strcmp(query->filename, dp->filename) && | 161 | !match_wildcard(query->filename, dp->filename) && |
159 | strcmp(query->filename, kbasename(dp->filename)) && | 162 | !match_wildcard(query->filename, |
160 | strcmp(query->filename, trim_prefix(dp->filename))) | 163 | kbasename(dp->filename)) && |
164 | !match_wildcard(query->filename, | ||
165 | trim_prefix(dp->filename))) | ||
161 | continue; | 166 | continue; |
162 | 167 | ||
163 | /* match against the function */ | 168 | /* match against the function */ |
164 | if (query->function && | 169 | if (query->function && |
165 | strcmp(query->function, dp->function)) | 170 | !match_wildcard(query->function, dp->function)) |
166 | continue; | 171 | continue; |
167 | 172 | ||
168 | /* match against the format */ | 173 | /* match against the format */ |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index f78ae0c0c4e2..ec8da78df9be 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -92,7 +92,6 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
92 | rv = _parse_integer(s, base, &_res); | 92 | rv = _parse_integer(s, base, &_res); |
93 | if (rv & KSTRTOX_OVERFLOW) | 93 | if (rv & KSTRTOX_OVERFLOW) |
94 | return -ERANGE; | 94 | return -ERANGE; |
95 | rv &= ~KSTRTOX_OVERFLOW; | ||
96 | if (rv == 0) | 95 | if (rv == 0) |
97 | return -EINVAL; | 96 | return -EINVAL; |
98 | s += rv; | 97 | s += rv; |
diff --git a/lib/parser.c b/lib/parser.c index 807b2aaa33fa..b6d11631231b 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -113,6 +113,7 @@ int match_token(char *s, const match_table_t table, substring_t args[]) | |||
113 | 113 | ||
114 | return p->token; | 114 | return p->token; |
115 | } | 115 | } |
116 | EXPORT_SYMBOL(match_token); | ||
116 | 117 | ||
117 | /** | 118 | /** |
118 | * match_number: scan a number in the given base from a substring_t | 119 | * match_number: scan a number in the given base from a substring_t |
@@ -163,6 +164,7 @@ int match_int(substring_t *s, int *result) | |||
163 | { | 164 | { |
164 | return match_number(s, result, 0); | 165 | return match_number(s, result, 0); |
165 | } | 166 | } |
167 | EXPORT_SYMBOL(match_int); | ||
166 | 168 | ||
167 | /** | 169 | /** |
168 | * match_octal: - scan an octal representation of an integer from a substring_t | 170 | * match_octal: - scan an octal representation of an integer from a substring_t |
@@ -177,6 +179,7 @@ int match_octal(substring_t *s, int *result) | |||
177 | { | 179 | { |
178 | return match_number(s, result, 8); | 180 | return match_number(s, result, 8); |
179 | } | 181 | } |
182 | EXPORT_SYMBOL(match_octal); | ||
180 | 183 | ||
181 | /** | 184 | /** |
182 | * match_hex: - scan a hex representation of an integer from a substring_t | 185 | * match_hex: - scan a hex representation of an integer from a substring_t |
@@ -191,6 +194,58 @@ int match_hex(substring_t *s, int *result) | |||
191 | { | 194 | { |
192 | return match_number(s, result, 16); | 195 | return match_number(s, result, 16); |
193 | } | 196 | } |
197 | EXPORT_SYMBOL(match_hex); | ||
198 | |||
199 | /** | ||
200 | * match_wildcard: - parse if a string matches given wildcard pattern | ||
201 | * @pattern: wildcard pattern | ||
202 | * @str: the string to be parsed | ||
203 | * | ||
204 | * Description: Parse the string @str to check if matches wildcard | ||
205 | * pattern @pattern. The pattern may contain two type wildcardes: | ||
206 | * '*' - matches zero or more characters | ||
207 | * '?' - matches one character | ||
208 | * If it's matched, return true, else return false. | ||
209 | */ | ||
210 | bool match_wildcard(const char *pattern, const char *str) | ||
211 | { | ||
212 | const char *s = str; | ||
213 | const char *p = pattern; | ||
214 | bool star = false; | ||
215 | |||
216 | while (*s) { | ||
217 | switch (*p) { | ||
218 | case '?': | ||
219 | s++; | ||
220 | p++; | ||
221 | break; | ||
222 | case '*': | ||
223 | star = true; | ||
224 | str = s; | ||
225 | if (!*++p) | ||
226 | return true; | ||
227 | pattern = p; | ||
228 | break; | ||
229 | default: | ||
230 | if (*s == *p) { | ||
231 | s++; | ||
232 | p++; | ||
233 | } else { | ||
234 | if (!star) | ||
235 | return false; | ||
236 | str++; | ||
237 | s = str; | ||
238 | p = pattern; | ||
239 | } | ||
240 | break; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | if (*p == '*') | ||
245 | ++p; | ||
246 | return !*p; | ||
247 | } | ||
248 | EXPORT_SYMBOL(match_wildcard); | ||
194 | 249 | ||
195 | /** | 250 | /** |
196 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer | 251 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer |
@@ -213,6 +268,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) | |||
213 | } | 268 | } |
214 | return ret; | 269 | return ret; |
215 | } | 270 | } |
271 | EXPORT_SYMBOL(match_strlcpy); | ||
216 | 272 | ||
217 | /** | 273 | /** |
218 | * match_strdup: - allocate a new string with the contents of a substring_t | 274 | * match_strdup: - allocate a new string with the contents of a substring_t |
@@ -230,10 +286,4 @@ char *match_strdup(const substring_t *s) | |||
230 | match_strlcpy(p, s, sz); | 286 | match_strlcpy(p, s, sz); |
231 | return p; | 287 | return p; |
232 | } | 288 | } |
233 | |||
234 | EXPORT_SYMBOL(match_token); | ||
235 | EXPORT_SYMBOL(match_int); | ||
236 | EXPORT_SYMBOL(match_octal); | ||
237 | EXPORT_SYMBOL(match_hex); | ||
238 | EXPORT_SYMBOL(match_strlcpy); | ||
239 | EXPORT_SYMBOL(match_strdup); | 289 | EXPORT_SYMBOL(match_strdup); |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 31dd4ccd3baa..8b3c9dc88262 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c | |||
@@ -8,8 +8,8 @@ | |||
8 | #define CHECK_LOOPS 100 | 8 | #define CHECK_LOOPS 100 |
9 | 9 | ||
10 | struct test_node { | 10 | struct test_node { |
11 | struct rb_node rb; | ||
12 | u32 key; | 11 | u32 key; |
12 | struct rb_node rb; | ||
13 | 13 | ||
14 | /* following fields used for testing augmented rbtree functionality */ | 14 | /* following fields used for testing augmented rbtree functionality */ |
15 | u32 val; | 15 | u32 val; |
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb) | |||
114 | return count; | 114 | return count; |
115 | } | 115 | } |
116 | 116 | ||
117 | static void check_postorder_foreach(int nr_nodes) | ||
118 | { | ||
119 | struct test_node *cur, *n; | ||
120 | int count = 0; | ||
121 | rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) | ||
122 | count++; | ||
123 | |||
124 | WARN_ON_ONCE(count != nr_nodes); | ||
125 | } | ||
126 | |||
117 | static void check_postorder(int nr_nodes) | 127 | static void check_postorder(int nr_nodes) |
118 | { | 128 | { |
119 | struct rb_node *rb; | 129 | struct rb_node *rb; |
@@ -148,6 +158,7 @@ static void check(int nr_nodes) | |||
148 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); | 158 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); |
149 | 159 | ||
150 | check_postorder(nr_nodes); | 160 | check_postorder(nr_nodes); |
161 | check_postorder_foreach(nr_nodes); | ||
151 | } | 162 | } |
152 | 163 | ||
153 | static void check_augmented(int nr_nodes) | 164 | static void check_augmented(int nr_nodes) |
diff --git a/lib/test_module.c b/lib/test_module.c new file mode 100644 index 000000000000..319b66f1ff61 --- /dev/null +++ b/lib/test_module.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This module emits "Hello, world" on printk when loaded. | ||
3 | * | ||
4 | * It is designed to be used for basic evaluation of the module loading | ||
5 | * subsystem (for example when validating module signing/verification). It | ||
6 | * lacks any extra dependencies, and will not normally be loaded by the | ||
7 | * system unless explicitly requested by name. | ||
8 | */ | ||
9 | |||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/printk.h> | ||
15 | |||
16 | static int __init test_module_init(void) | ||
17 | { | ||
18 | pr_warn("Hello, world\n"); | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | module_init(test_module_init); | ||
24 | |||
25 | static void __exit test_module_exit(void) | ||
26 | { | ||
27 | pr_warn("Goodbye\n"); | ||
28 | } | ||
29 | |||
30 | module_exit(test_module_exit); | ||
31 | |||
32 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
33 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c new file mode 100644 index 000000000000..0ecef3e4690e --- /dev/null +++ b/lib/test_user_copy.c | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * Kernel module for testing copy_to/from_user infrastructure. | ||
3 | * | ||
4 | * Copyright 2013 Google Inc. All Rights Reserved | ||
5 | * | ||
6 | * Authors: | ||
7 | * Kees Cook <keescook@chromium.org> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
21 | #include <linux/mman.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | |||
28 | #define test(condition, msg) \ | ||
29 | ({ \ | ||
30 | int cond = (condition); \ | ||
31 | if (cond) \ | ||
32 | pr_warn("%s\n", msg); \ | ||
33 | cond; \ | ||
34 | }) | ||
35 | |||
36 | static int __init test_user_copy_init(void) | ||
37 | { | ||
38 | int ret = 0; | ||
39 | char *kmem; | ||
40 | char __user *usermem; | ||
41 | char *bad_usermem; | ||
42 | unsigned long user_addr; | ||
43 | unsigned long value = 0x5A; | ||
44 | |||
45 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | ||
46 | if (!kmem) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, | ||
50 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
51 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
52 | if (user_addr >= (unsigned long)(TASK_SIZE)) { | ||
53 | pr_warn("Failed to allocate user memory\n"); | ||
54 | kfree(kmem); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | |||
58 | usermem = (char __user *)user_addr; | ||
59 | bad_usermem = (char *)user_addr; | ||
60 | |||
61 | /* Legitimate usage: none of these should fail. */ | ||
62 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | ||
63 | "legitimate copy_from_user failed"); | ||
64 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), | ||
65 | "legitimate copy_to_user failed"); | ||
66 | ret |= test(get_user(value, (unsigned long __user *)usermem), | ||
67 | "legitimate get_user failed"); | ||
68 | ret |= test(put_user(value, (unsigned long __user *)usermem), | ||
69 | "legitimate put_user failed"); | ||
70 | |||
71 | /* Invalid usage: none of these should succeed. */ | ||
72 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), | ||
73 | PAGE_SIZE), | ||
74 | "illegal all-kernel copy_from_user passed"); | ||
75 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, | ||
76 | PAGE_SIZE), | ||
77 | "illegal reversed copy_from_user passed"); | ||
78 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, | ||
79 | PAGE_SIZE), | ||
80 | "illegal all-kernel copy_to_user passed"); | ||
81 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | ||
82 | PAGE_SIZE), | ||
83 | "illegal reversed copy_to_user passed"); | ||
84 | ret |= test(!get_user(value, (unsigned long __user *)kmem), | ||
85 | "illegal get_user passed"); | ||
86 | ret |= test(!put_user(value, (unsigned long __user *)kmem), | ||
87 | "illegal put_user passed"); | ||
88 | |||
89 | vm_munmap(user_addr, PAGE_SIZE * 2); | ||
90 | kfree(kmem); | ||
91 | |||
92 | if (ret == 0) { | ||
93 | pr_info("tests passed.\n"); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | module_init(test_user_copy_init); | ||
101 | |||
102 | static void __exit test_user_copy_exit(void) | ||
103 | { | ||
104 | pr_info("unloaded.\n"); | ||
105 | } | ||
106 | |||
107 | module_exit(test_user_copy_exit); | ||
108 | |||
109 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
110 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 10909c571494..185b6d300ebc 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -1155,6 +1155,30 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr, | |||
1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); | 1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | static noinline_for_stack | ||
1159 | char *address_val(char *buf, char *end, const void *addr, | ||
1160 | struct printf_spec spec, const char *fmt) | ||
1161 | { | ||
1162 | unsigned long long num; | ||
1163 | |||
1164 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
1165 | spec.base = 16; | ||
1166 | |||
1167 | switch (fmt[1]) { | ||
1168 | case 'd': | ||
1169 | num = *(const dma_addr_t *)addr; | ||
1170 | spec.field_width = sizeof(dma_addr_t) * 2 + 2; | ||
1171 | break; | ||
1172 | case 'p': | ||
1173 | default: | ||
1174 | num = *(const phys_addr_t *)addr; | ||
1175 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
1176 | break; | ||
1177 | } | ||
1178 | |||
1179 | return number(buf, end, num, spec); | ||
1180 | } | ||
1181 | |||
1158 | int kptr_restrict __read_mostly; | 1182 | int kptr_restrict __read_mostly; |
1159 | 1183 | ||
1160 | /* | 1184 | /* |
@@ -1218,7 +1242,8 @@ int kptr_restrict __read_mostly; | |||
1218 | * N no separator | 1242 | * N no separator |
1219 | * The maximum supported length is 64 bytes of the input. Consider | 1243 | * The maximum supported length is 64 bytes of the input. Consider |
1220 | * to use print_hex_dump() for the larger input. | 1244 | * to use print_hex_dump() for the larger input. |
1221 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | 1245 | * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives |
1246 | * (default assumed to be phys_addr_t, passed by reference) | ||
1222 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | 1247 | * - 'd[234]' For a dentry name (optionally 2-4 last components) |
1223 | * - 'D[234]' Same as 'd' but for a struct file | 1248 | * - 'D[234]' Same as 'd' but for a struct file |
1224 | * | 1249 | * |
@@ -1353,11 +1378,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1353 | } | 1378 | } |
1354 | break; | 1379 | break; |
1355 | case 'a': | 1380 | case 'a': |
1356 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 1381 | return address_val(buf, end, ptr, spec, fmt); |
1357 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
1358 | spec.base = 16; | ||
1359 | return number(buf, end, | ||
1360 | (unsigned long long) *((phys_addr_t *)ptr), spec); | ||
1361 | case 'd': | 1382 | case 'd': |
1362 | return dentry_name(buf, end, ptr, spec, fmt); | 1383 | return dentry_name(buf, end, ptr, spec, fmt); |
1363 | case 'D': | 1384 | case 'D': |
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 07dbc8ec46cf..6e45a5074bf0 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c | |||
@@ -267,7 +267,7 @@ void balloon_page_putback(struct page *page) | |||
267 | put_page(page); | 267 | put_page(page); |
268 | } else { | 268 | } else { |
269 | WARN_ON(1); | 269 | WARN_ON(1); |
270 | dump_page(page); | 270 | dump_page(page, "not movable balloon page"); |
271 | } | 271 | } |
272 | unlock_page(page); | 272 | unlock_page(page); |
273 | } | 273 | } |
@@ -287,7 +287,7 @@ int balloon_page_migrate(struct page *newpage, | |||
287 | BUG_ON(!trylock_page(newpage)); | 287 | BUG_ON(!trylock_page(newpage)); |
288 | 288 | ||
289 | if (WARN_ON(!__is_movable_balloon_page(page))) { | 289 | if (WARN_ON(!__is_movable_balloon_page(page))) { |
290 | dump_page(page); | 290 | dump_page(page, "not movable balloon page"); |
291 | unlock_page(newpage); | 291 | unlock_page(newpage); |
292 | return rc; | 292 | return rc; |
293 | } | 293 | } |
diff --git a/mm/cleancache.c b/mm/cleancache.c index 5875f48ce279..d0eac4350403 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
@@ -237,7 +237,7 @@ int __cleancache_get_page(struct page *page) | |||
237 | goto out; | 237 | goto out; |
238 | } | 238 | } |
239 | 239 | ||
240 | VM_BUG_ON(!PageLocked(page)); | 240 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
241 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; | 241 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
242 | if (fake_pool_id < 0) | 242 | if (fake_pool_id < 0) |
243 | goto out; | 243 | goto out; |
@@ -279,7 +279,7 @@ void __cleancache_put_page(struct page *page) | |||
279 | return; | 279 | return; |
280 | } | 280 | } |
281 | 281 | ||
282 | VM_BUG_ON(!PageLocked(page)); | 282 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
283 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; | 283 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
284 | if (fake_pool_id < 0) | 284 | if (fake_pool_id < 0) |
285 | return; | 285 | return; |
@@ -318,7 +318,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
318 | if (pool_id < 0) | 318 | if (pool_id < 0) |
319 | return; | 319 | return; |
320 | 320 | ||
321 | VM_BUG_ON(!PageLocked(page)); | 321 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
322 | if (cleancache_get_key(mapping->host, &key) >= 0) { | 322 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
323 | cleancache_ops->invalidate_page(pool_id, | 323 | cleancache_ops->invalidate_page(pool_id, |
324 | key, page->index); | 324 | key, page->index); |
diff --git a/mm/compaction.c b/mm/compaction.c index 3a91a2ea3d34..b48c5259ea33 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -523,7 +523,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
523 | if (!isolation_suitable(cc, page)) | 523 | if (!isolation_suitable(cc, page)) |
524 | goto next_pageblock; | 524 | goto next_pageblock; |
525 | 525 | ||
526 | /* Skip if free */ | 526 | /* |
527 | * Skip if free. page_order cannot be used without zone->lock | ||
528 | * as nothing prevents parallel allocations or buddy merging. | ||
529 | */ | ||
527 | if (PageBuddy(page)) | 530 | if (PageBuddy(page)) |
528 | continue; | 531 | continue; |
529 | 532 | ||
@@ -601,7 +604,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
601 | if (__isolate_lru_page(page, mode) != 0) | 604 | if (__isolate_lru_page(page, mode) != 0) |
602 | continue; | 605 | continue; |
603 | 606 | ||
604 | VM_BUG_ON(PageTransCompound(page)); | 607 | VM_BUG_ON_PAGE(PageTransCompound(page), page); |
605 | 608 | ||
606 | /* Successfully isolated */ | 609 | /* Successfully isolated */ |
607 | cc->finished_update_migrate = true; | 610 | cc->finished_update_migrate = true; |
diff --git a/mm/filemap.c b/mm/filemap.c index b7749a92021c..7a7f3e0db738 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -409,9 +409,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
409 | { | 409 | { |
410 | int error; | 410 | int error; |
411 | 411 | ||
412 | VM_BUG_ON(!PageLocked(old)); | 412 | VM_BUG_ON_PAGE(!PageLocked(old), old); |
413 | VM_BUG_ON(!PageLocked(new)); | 413 | VM_BUG_ON_PAGE(!PageLocked(new), new); |
414 | VM_BUG_ON(new->mapping); | 414 | VM_BUG_ON_PAGE(new->mapping, new); |
415 | 415 | ||
416 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 416 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
417 | if (!error) { | 417 | if (!error) { |
@@ -461,8 +461,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
461 | { | 461 | { |
462 | int error; | 462 | int error; |
463 | 463 | ||
464 | VM_BUG_ON(!PageLocked(page)); | 464 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
465 | VM_BUG_ON(PageSwapBacked(page)); | 465 | VM_BUG_ON_PAGE(PageSwapBacked(page), page); |
466 | 466 | ||
467 | error = mem_cgroup_cache_charge(page, current->mm, | 467 | error = mem_cgroup_cache_charge(page, current->mm, |
468 | gfp_mask & GFP_RECLAIM_MASK); | 468 | gfp_mask & GFP_RECLAIM_MASK); |
@@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); | |||
607 | */ | 607 | */ |
608 | void unlock_page(struct page *page) | 608 | void unlock_page(struct page *page) |
609 | { | 609 | { |
610 | VM_BUG_ON(!PageLocked(page)); | 610 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
611 | clear_bit_unlock(PG_locked, &page->flags); | 611 | clear_bit_unlock(PG_locked, &page->flags); |
612 | smp_mb__after_clear_bit(); | 612 | smp_mb__after_clear_bit(); |
613 | wake_up_page(page, PG_locked); | 613 | wake_up_page(page, PG_locked); |
@@ -760,7 +760,7 @@ repeat: | |||
760 | page_cache_release(page); | 760 | page_cache_release(page); |
761 | goto repeat; | 761 | goto repeat; |
762 | } | 762 | } |
763 | VM_BUG_ON(page->index != offset); | 763 | VM_BUG_ON_PAGE(page->index != offset, page); |
764 | } | 764 | } |
765 | return page; | 765 | return page; |
766 | } | 766 | } |
@@ -1656,7 +1656,7 @@ retry_find: | |||
1656 | put_page(page); | 1656 | put_page(page); |
1657 | goto retry_find; | 1657 | goto retry_find; |
1658 | } | 1658 | } |
1659 | VM_BUG_ON(page->index != offset); | 1659 | VM_BUG_ON_PAGE(page->index != offset, page); |
1660 | 1660 | ||
1661 | /* | 1661 | /* |
1662 | * We have a locked page in the page cache, now we need to check | 1662 | * We have a locked page in the page cache, now we need to check |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 95d1acb0f3d2..65c98eb5483c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -130,8 +130,14 @@ static int set_recommended_min_free_kbytes(void) | |||
130 | (unsigned long) nr_free_buffer_pages() / 20); | 130 | (unsigned long) nr_free_buffer_pages() / 20); |
131 | recommended_min <<= (PAGE_SHIFT-10); | 131 | recommended_min <<= (PAGE_SHIFT-10); |
132 | 132 | ||
133 | if (recommended_min > min_free_kbytes) | 133 | if (recommended_min > min_free_kbytes) { |
134 | if (user_min_free_kbytes >= 0) | ||
135 | pr_info("raising min_free_kbytes from %d to %lu " | ||
136 | "to help transparent hugepage allocations\n", | ||
137 | min_free_kbytes, recommended_min); | ||
138 | |||
134 | min_free_kbytes = recommended_min; | 139 | min_free_kbytes = recommended_min; |
140 | } | ||
135 | setup_per_zone_wmarks(); | 141 | setup_per_zone_wmarks(); |
136 | return 0; | 142 | return 0; |
137 | } | 143 | } |
@@ -655,7 +661,7 @@ out: | |||
655 | hugepage_exit_sysfs(hugepage_kobj); | 661 | hugepage_exit_sysfs(hugepage_kobj); |
656 | return err; | 662 | return err; |
657 | } | 663 | } |
658 | module_init(hugepage_init) | 664 | subsys_initcall(hugepage_init); |
659 | 665 | ||
660 | static int __init setup_transparent_hugepage(char *str) | 666 | static int __init setup_transparent_hugepage(char *str) |
661 | { | 667 | { |
@@ -712,7 +718,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
712 | pgtable_t pgtable; | 718 | pgtable_t pgtable; |
713 | spinlock_t *ptl; | 719 | spinlock_t *ptl; |
714 | 720 | ||
715 | VM_BUG_ON(!PageCompound(page)); | 721 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
716 | pgtable = pte_alloc_one(mm, haddr); | 722 | pgtable = pte_alloc_one(mm, haddr); |
717 | if (unlikely(!pgtable)) | 723 | if (unlikely(!pgtable)) |
718 | return VM_FAULT_OOM; | 724 | return VM_FAULT_OOM; |
@@ -893,7 +899,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
893 | goto out; | 899 | goto out; |
894 | } | 900 | } |
895 | src_page = pmd_page(pmd); | 901 | src_page = pmd_page(pmd); |
896 | VM_BUG_ON(!PageHead(src_page)); | 902 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
897 | get_page(src_page); | 903 | get_page(src_page); |
898 | page_dup_rmap(src_page); | 904 | page_dup_rmap(src_page); |
899 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); | 905 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
@@ -1067,7 +1073,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
1067 | ptl = pmd_lock(mm, pmd); | 1073 | ptl = pmd_lock(mm, pmd); |
1068 | if (unlikely(!pmd_same(*pmd, orig_pmd))) | 1074 | if (unlikely(!pmd_same(*pmd, orig_pmd))) |
1069 | goto out_free_pages; | 1075 | goto out_free_pages; |
1070 | VM_BUG_ON(!PageHead(page)); | 1076 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1071 | 1077 | ||
1072 | pmdp_clear_flush(vma, haddr, pmd); | 1078 | pmdp_clear_flush(vma, haddr, pmd); |
1073 | /* leave pmd empty until pte is filled */ | 1079 | /* leave pmd empty until pte is filled */ |
@@ -1133,7 +1139,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1133 | goto out_unlock; | 1139 | goto out_unlock; |
1134 | 1140 | ||
1135 | page = pmd_page(orig_pmd); | 1141 | page = pmd_page(orig_pmd); |
1136 | VM_BUG_ON(!PageCompound(page) || !PageHead(page)); | 1142 | VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); |
1137 | if (page_mapcount(page) == 1) { | 1143 | if (page_mapcount(page) == 1) { |
1138 | pmd_t entry; | 1144 | pmd_t entry; |
1139 | entry = pmd_mkyoung(orig_pmd); | 1145 | entry = pmd_mkyoung(orig_pmd); |
@@ -1211,7 +1217,7 @@ alloc: | |||
1211 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 1217 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
1212 | put_huge_zero_page(); | 1218 | put_huge_zero_page(); |
1213 | } else { | 1219 | } else { |
1214 | VM_BUG_ON(!PageHead(page)); | 1220 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1215 | page_remove_rmap(page); | 1221 | page_remove_rmap(page); |
1216 | put_page(page); | 1222 | put_page(page); |
1217 | } | 1223 | } |
@@ -1249,7 +1255,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
1249 | goto out; | 1255 | goto out; |
1250 | 1256 | ||
1251 | page = pmd_page(*pmd); | 1257 | page = pmd_page(*pmd); |
1252 | VM_BUG_ON(!PageHead(page)); | 1258 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1253 | if (flags & FOLL_TOUCH) { | 1259 | if (flags & FOLL_TOUCH) { |
1254 | pmd_t _pmd; | 1260 | pmd_t _pmd; |
1255 | /* | 1261 | /* |
@@ -1274,7 +1280,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
1274 | } | 1280 | } |
1275 | } | 1281 | } |
1276 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; | 1282 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; |
1277 | VM_BUG_ON(!PageCompound(page)); | 1283 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
1278 | if (flags & FOLL_GET) | 1284 | if (flags & FOLL_GET) |
1279 | get_page_foll(page); | 1285 | get_page_foll(page); |
1280 | 1286 | ||
@@ -1432,9 +1438,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1432 | } else { | 1438 | } else { |
1433 | page = pmd_page(orig_pmd); | 1439 | page = pmd_page(orig_pmd); |
1434 | page_remove_rmap(page); | 1440 | page_remove_rmap(page); |
1435 | VM_BUG_ON(page_mapcount(page) < 0); | 1441 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
1436 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); | 1442 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
1437 | VM_BUG_ON(!PageHead(page)); | 1443 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1438 | atomic_long_dec(&tlb->mm->nr_ptes); | 1444 | atomic_long_dec(&tlb->mm->nr_ptes); |
1439 | spin_unlock(ptl); | 1445 | spin_unlock(ptl); |
1440 | tlb_remove_page(tlb, page); | 1446 | tlb_remove_page(tlb, page); |
@@ -2176,9 +2182,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, | |||
2176 | if (unlikely(!page)) | 2182 | if (unlikely(!page)) |
2177 | goto out; | 2183 | goto out; |
2178 | 2184 | ||
2179 | VM_BUG_ON(PageCompound(page)); | 2185 | VM_BUG_ON_PAGE(PageCompound(page), page); |
2180 | BUG_ON(!PageAnon(page)); | 2186 | VM_BUG_ON_PAGE(!PageAnon(page), page); |
2181 | VM_BUG_ON(!PageSwapBacked(page)); | 2187 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
2182 | 2188 | ||
2183 | /* cannot use mapcount: can't collapse if there's a gup pin */ | 2189 | /* cannot use mapcount: can't collapse if there's a gup pin */ |
2184 | if (page_count(page) != 1) | 2190 | if (page_count(page) != 1) |
@@ -2201,8 +2207,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, | |||
2201 | } | 2207 | } |
2202 | /* 0 stands for page_is_file_cache(page) == false */ | 2208 | /* 0 stands for page_is_file_cache(page) == false */ |
2203 | inc_zone_page_state(page, NR_ISOLATED_ANON + 0); | 2209 | inc_zone_page_state(page, NR_ISOLATED_ANON + 0); |
2204 | VM_BUG_ON(!PageLocked(page)); | 2210 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
2205 | VM_BUG_ON(PageLRU(page)); | 2211 | VM_BUG_ON_PAGE(PageLRU(page), page); |
2206 | 2212 | ||
2207 | /* If there is no mapped pte young don't collapse the page */ | 2213 | /* If there is no mapped pte young don't collapse the page */ |
2208 | if (pte_young(pteval) || PageReferenced(page) || | 2214 | if (pte_young(pteval) || PageReferenced(page) || |
@@ -2232,7 +2238,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
2232 | } else { | 2238 | } else { |
2233 | src_page = pte_page(pteval); | 2239 | src_page = pte_page(pteval); |
2234 | copy_user_highpage(page, src_page, address, vma); | 2240 | copy_user_highpage(page, src_page, address, vma); |
2235 | VM_BUG_ON(page_mapcount(src_page) != 1); | 2241 | VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); |
2236 | release_pte_page(src_page); | 2242 | release_pte_page(src_page); |
2237 | /* | 2243 | /* |
2238 | * ptl mostly unnecessary, but preempt has to | 2244 | * ptl mostly unnecessary, but preempt has to |
@@ -2311,7 +2317,7 @@ static struct page | |||
2311 | struct vm_area_struct *vma, unsigned long address, | 2317 | struct vm_area_struct *vma, unsigned long address, |
2312 | int node) | 2318 | int node) |
2313 | { | 2319 | { |
2314 | VM_BUG_ON(*hpage); | 2320 | VM_BUG_ON_PAGE(*hpage, *hpage); |
2315 | /* | 2321 | /* |
2316 | * Allocate the page while the vma is still valid and under | 2322 | * Allocate the page while the vma is still valid and under |
2317 | * the mmap_sem read mode so there is no memory allocation | 2323 | * the mmap_sem read mode so there is no memory allocation |
@@ -2580,7 +2586,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
2580 | */ | 2586 | */ |
2581 | node = page_to_nid(page); | 2587 | node = page_to_nid(page); |
2582 | khugepaged_node_load[node]++; | 2588 | khugepaged_node_load[node]++; |
2583 | VM_BUG_ON(PageCompound(page)); | 2589 | VM_BUG_ON_PAGE(PageCompound(page), page); |
2584 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) | 2590 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) |
2585 | goto out_unmap; | 2591 | goto out_unmap; |
2586 | /* cannot use mapcount: can't collapse if there's a gup pin */ | 2592 | /* cannot use mapcount: can't collapse if there's a gup pin */ |
@@ -2876,7 +2882,7 @@ again: | |||
2876 | return; | 2882 | return; |
2877 | } | 2883 | } |
2878 | page = pmd_page(*pmd); | 2884 | page = pmd_page(*pmd); |
2879 | VM_BUG_ON(!page_count(page)); | 2885 | VM_BUG_ON_PAGE(!page_count(page), page); |
2880 | get_page(page); | 2886 | get_page(page); |
2881 | spin_unlock(ptl); | 2887 | spin_unlock(ptl); |
2882 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 2888 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 04306b9de90d..c01cb9fedb18 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -584,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page) | |||
584 | 1 << PG_active | 1 << PG_reserved | | 584 | 1 << PG_active | 1 << PG_reserved | |
585 | 1 << PG_private | 1 << PG_writeback); | 585 | 1 << PG_private | 1 << PG_writeback); |
586 | } | 586 | } |
587 | VM_BUG_ON(hugetlb_cgroup_from_page(page)); | 587 | VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); |
588 | set_compound_page_dtor(page, NULL); | 588 | set_compound_page_dtor(page, NULL); |
589 | set_page_refcounted(page); | 589 | set_page_refcounted(page); |
590 | arch_release_hugepage(page); | 590 | arch_release_hugepage(page); |
@@ -1089,7 +1089,7 @@ retry: | |||
1089 | * no users -- drop the buddy allocator's reference. | 1089 | * no users -- drop the buddy allocator's reference. |
1090 | */ | 1090 | */ |
1091 | put_page_testzero(page); | 1091 | put_page_testzero(page); |
1092 | VM_BUG_ON(page_count(page)); | 1092 | VM_BUG_ON_PAGE(page_count(page), page); |
1093 | enqueue_huge_page(h, page); | 1093 | enqueue_huge_page(h, page); |
1094 | } | 1094 | } |
1095 | free: | 1095 | free: |
@@ -3503,7 +3503,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage) | |||
3503 | 3503 | ||
3504 | bool isolate_huge_page(struct page *page, struct list_head *list) | 3504 | bool isolate_huge_page(struct page *page, struct list_head *list) |
3505 | { | 3505 | { |
3506 | VM_BUG_ON(!PageHead(page)); | 3506 | VM_BUG_ON_PAGE(!PageHead(page), page); |
3507 | if (!get_page_unless_zero(page)) | 3507 | if (!get_page_unless_zero(page)) |
3508 | return false; | 3508 | return false; |
3509 | spin_lock(&hugetlb_lock); | 3509 | spin_lock(&hugetlb_lock); |
@@ -3514,7 +3514,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list) | |||
3514 | 3514 | ||
3515 | void putback_active_hugepage(struct page *page) | 3515 | void putback_active_hugepage(struct page *page) |
3516 | { | 3516 | { |
3517 | VM_BUG_ON(!PageHead(page)); | 3517 | VM_BUG_ON_PAGE(!PageHead(page), page); |
3518 | spin_lock(&hugetlb_lock); | 3518 | spin_lock(&hugetlb_lock); |
3519 | list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); | 3519 | list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); |
3520 | spin_unlock(&hugetlb_lock); | 3520 | spin_unlock(&hugetlb_lock); |
@@ -3523,7 +3523,7 @@ void putback_active_hugepage(struct page *page) | |||
3523 | 3523 | ||
3524 | bool is_hugepage_active(struct page *page) | 3524 | bool is_hugepage_active(struct page *page) |
3525 | { | 3525 | { |
3526 | VM_BUG_ON(!PageHuge(page)); | 3526 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
3527 | /* | 3527 | /* |
3528 | * This function can be called for a tail page because the caller, | 3528 | * This function can be called for a tail page because the caller, |
3529 | * scan_movable_pages, scans through a given pfn-range which typically | 3529 | * scan_movable_pages, scans through a given pfn-range which typically |
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index d747a84e09b0..cb00829bb466 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c | |||
@@ -390,7 +390,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) | |||
390 | if (hugetlb_cgroup_disabled()) | 390 | if (hugetlb_cgroup_disabled()) |
391 | return; | 391 | return; |
392 | 392 | ||
393 | VM_BUG_ON(!PageHuge(oldhpage)); | 393 | VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); |
394 | spin_lock(&hugetlb_lock); | 394 | spin_lock(&hugetlb_lock); |
395 | h_cg = hugetlb_cgroup_from_page(oldhpage); | 395 | h_cg = hugetlb_cgroup_from_page(oldhpage); |
396 | set_hugetlb_cgroup(oldhpage, NULL); | 396 | set_hugetlb_cgroup(oldhpage, NULL); |
diff --git a/mm/internal.h b/mm/internal.h index a346ba120e42..612c14f5e0f5 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v) | |||
27 | */ | 27 | */ |
28 | static inline void set_page_refcounted(struct page *page) | 28 | static inline void set_page_refcounted(struct page *page) |
29 | { | 29 | { |
30 | VM_BUG_ON(PageTail(page)); | 30 | VM_BUG_ON_PAGE(PageTail(page), page); |
31 | VM_BUG_ON(atomic_read(&page->_count)); | 31 | VM_BUG_ON_PAGE(atomic_read(&page->_count), page); |
32 | set_page_count(page, 1); | 32 | set_page_count(page, 1); |
33 | } | 33 | } |
34 | 34 | ||
@@ -46,7 +46,7 @@ static inline void __get_page_tail_foll(struct page *page, | |||
46 | * speculative page access (like in | 46 | * speculative page access (like in |
47 | * page_cache_get_speculative()) on tail pages. | 47 | * page_cache_get_speculative()) on tail pages. |
48 | */ | 48 | */ |
49 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); | 49 | VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); |
50 | if (get_page_head) | 50 | if (get_page_head) |
51 | atomic_inc(&page->first_page->_count); | 51 | atomic_inc(&page->first_page->_count); |
52 | get_huge_page_tail(page); | 52 | get_huge_page_tail(page); |
@@ -71,7 +71,7 @@ static inline void get_page_foll(struct page *page) | |||
71 | * Getting a normal page or the head of a compound page | 71 | * Getting a normal page or the head of a compound page |
72 | * requires to already have an elevated page->_count. | 72 | * requires to already have an elevated page->_count. |
73 | */ | 73 | */ |
74 | VM_BUG_ON(atomic_read(&page->_count) <= 0); | 74 | VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); |
75 | atomic_inc(&page->_count); | 75 | atomic_inc(&page->_count); |
76 | } | 76 | } |
77 | } | 77 | } |
@@ -99,6 +99,7 @@ extern void prep_compound_page(struct page *page, unsigned long order); | |||
99 | #ifdef CONFIG_MEMORY_FAILURE | 99 | #ifdef CONFIG_MEMORY_FAILURE |
100 | extern bool is_free_buddy_page(struct page *page); | 100 | extern bool is_free_buddy_page(struct page *page); |
101 | #endif | 101 | #endif |
102 | extern int user_min_free_kbytes; | ||
102 | 103 | ||
103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 104 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
104 | 105 | ||
@@ -142,9 +143,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
142 | #endif | 143 | #endif |
143 | 144 | ||
144 | /* | 145 | /* |
145 | * function for dealing with page's order in buddy system. | 146 | * This function returns the order of a free page in the buddy system. In |
146 | * zone->lock is already acquired when we use these. | 147 | * general, page_zone(page)->lock must be held by the caller to prevent the |
147 | * So, we don't need atomic page->flags operations here. | 148 | * page from being allocated in parallel and returning garbage as the order. |
149 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | ||
150 | * page cannot be allocated or merged in parallel. | ||
148 | */ | 151 | */ |
149 | static inline unsigned long page_order(struct page *page) | 152 | static inline unsigned long page_order(struct page *page) |
150 | { | 153 | { |
@@ -173,7 +176,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
173 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | 176 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
174 | struct page *page) | 177 | struct page *page) |
175 | { | 178 | { |
176 | VM_BUG_ON(PageLRU(page)); | 179 | VM_BUG_ON_PAGE(PageLRU(page), page); |
177 | 180 | ||
178 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | 181 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
179 | return 0; | 182 | return 0; |
@@ -1898,13 +1898,13 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) | |||
1898 | int ret = SWAP_AGAIN; | 1898 | int ret = SWAP_AGAIN; |
1899 | int search_new_forks = 0; | 1899 | int search_new_forks = 0; |
1900 | 1900 | ||
1901 | VM_BUG_ON(!PageKsm(page)); | 1901 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
1902 | 1902 | ||
1903 | /* | 1903 | /* |
1904 | * Rely on the page lock to protect against concurrent modifications | 1904 | * Rely on the page lock to protect against concurrent modifications |
1905 | * to that page's node of the stable tree. | 1905 | * to that page's node of the stable tree. |
1906 | */ | 1906 | */ |
1907 | VM_BUG_ON(!PageLocked(page)); | 1907 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1908 | 1908 | ||
1909 | stable_node = page_stable_node(page); | 1909 | stable_node = page_stable_node(page); |
1910 | if (!stable_node) | 1910 | if (!stable_node) |
@@ -1958,13 +1958,13 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage) | |||
1958 | { | 1958 | { |
1959 | struct stable_node *stable_node; | 1959 | struct stable_node *stable_node; |
1960 | 1960 | ||
1961 | VM_BUG_ON(!PageLocked(oldpage)); | 1961 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
1962 | VM_BUG_ON(!PageLocked(newpage)); | 1962 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
1963 | VM_BUG_ON(newpage->mapping != oldpage->mapping); | 1963 | VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); |
1964 | 1964 | ||
1965 | stable_node = page_stable_node(newpage); | 1965 | stable_node = page_stable_node(newpage); |
1966 | if (stable_node) { | 1966 | if (stable_node) { |
1967 | VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); | 1967 | VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); |
1968 | stable_node->kpfn = page_to_pfn(newpage); | 1968 | stable_node->kpfn = page_to_pfn(newpage); |
1969 | /* | 1969 | /* |
1970 | * newpage->mapping was set in advance; now we need smp_wmb() | 1970 | * newpage->mapping was set in advance; now we need smp_wmb() |
@@ -2345,4 +2345,4 @@ out_free: | |||
2345 | out: | 2345 | out: |
2346 | return err; | 2346 | return err; |
2347 | } | 2347 | } |
2348 | module_init(ksm_init) | 2348 | subsys_initcall(ksm_init); |
diff --git a/mm/memblock.c b/mm/memblock.c index 1c2ef2c7edab..9c0aeef19440 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -266,31 +266,34 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u | |||
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK | ||
270 | |||
269 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( | 271 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( |
270 | phys_addr_t *addr) | 272 | phys_addr_t *addr) |
271 | { | 273 | { |
272 | if (memblock.reserved.regions == memblock_reserved_init_regions) | 274 | if (memblock.reserved.regions == memblock_reserved_init_regions) |
273 | return 0; | 275 | return 0; |
274 | 276 | ||
275 | /* | ||
276 | * Don't allow nobootmem allocator to free reserved memory regions | ||
277 | * array if | ||
278 | * - CONFIG_DEBUG_FS is enabled; | ||
279 | * - CONFIG_ARCH_DISCARD_MEMBLOCK is not enabled; | ||
280 | * - reserved memory regions array have been resized during boot. | ||
281 | * Otherwise debug_fs entry "sys/kernel/debug/memblock/reserved" | ||
282 | * will show garbage instead of state of memory reservations. | ||
283 | */ | ||
284 | if (IS_ENABLED(CONFIG_DEBUG_FS) && | ||
285 | !IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK)) | ||
286 | return 0; | ||
287 | |||
288 | *addr = __pa(memblock.reserved.regions); | 277 | *addr = __pa(memblock.reserved.regions); |
289 | 278 | ||
290 | return PAGE_ALIGN(sizeof(struct memblock_region) * | 279 | return PAGE_ALIGN(sizeof(struct memblock_region) * |
291 | memblock.reserved.max); | 280 | memblock.reserved.max); |
292 | } | 281 | } |
293 | 282 | ||
283 | phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( | ||
284 | phys_addr_t *addr) | ||
285 | { | ||
286 | if (memblock.memory.regions == memblock_memory_init_regions) | ||
287 | return 0; | ||
288 | |||
289 | *addr = __pa(memblock.memory.regions); | ||
290 | |||
291 | return PAGE_ALIGN(sizeof(struct memblock_region) * | ||
292 | memblock.memory.max); | ||
293 | } | ||
294 | |||
295 | #endif | ||
296 | |||
294 | /** | 297 | /** |
295 | * memblock_double_array - double the size of the memblock regions array | 298 | * memblock_double_array - double the size of the memblock regions array |
296 | * @type: memblock type of the regions array being doubled | 299 | * @type: memblock type of the regions array being doubled |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 67dd2a881433..19d5d4274e22 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/sort.h> | 49 | #include <linux/sort.h> |
50 | #include <linux/fs.h> | 50 | #include <linux/fs.h> |
51 | #include <linux/seq_file.h> | 51 | #include <linux/seq_file.h> |
52 | #include <linux/vmalloc.h> | ||
53 | #include <linux/vmpressure.h> | 52 | #include <linux/vmpressure.h> |
54 | #include <linux/mm_inline.h> | 53 | #include <linux/mm_inline.h> |
55 | #include <linux/page_cgroup.h> | 54 | #include <linux/page_cgroup.h> |
@@ -150,7 +149,7 @@ struct mem_cgroup_reclaim_iter { | |||
150 | * matches memcg->dead_count of the hierarchy root group. | 149 | * matches memcg->dead_count of the hierarchy root group. |
151 | */ | 150 | */ |
152 | struct mem_cgroup *last_visited; | 151 | struct mem_cgroup *last_visited; |
153 | unsigned long last_dead_count; | 152 | int last_dead_count; |
154 | 153 | ||
155 | /* scan generation, increased every round-trip */ | 154 | /* scan generation, increased every round-trip */ |
156 | unsigned int generation; | 155 | unsigned int generation; |
@@ -381,23 +380,12 @@ struct mem_cgroup { | |||
381 | /* WARNING: nodeinfo must be the last member here */ | 380 | /* WARNING: nodeinfo must be the last member here */ |
382 | }; | 381 | }; |
383 | 382 | ||
384 | static size_t memcg_size(void) | ||
385 | { | ||
386 | return sizeof(struct mem_cgroup) + | ||
387 | nr_node_ids * sizeof(struct mem_cgroup_per_node *); | ||
388 | } | ||
389 | |||
390 | /* internal only representation about the status of kmem accounting. */ | 383 | /* internal only representation about the status of kmem accounting. */ |
391 | enum { | 384 | enum { |
392 | KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ | 385 | KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ |
393 | KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ | ||
394 | KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ | 386 | KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ |
395 | }; | 387 | }; |
396 | 388 | ||
397 | /* We account when limit is on, but only after call sites are patched */ | ||
398 | #define KMEM_ACCOUNTED_MASK \ | ||
399 | ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) | ||
400 | |||
401 | #ifdef CONFIG_MEMCG_KMEM | 389 | #ifdef CONFIG_MEMCG_KMEM |
402 | static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) | 390 | static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) |
403 | { | 391 | { |
@@ -409,16 +397,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) | |||
409 | return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); | 397 | return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); |
410 | } | 398 | } |
411 | 399 | ||
412 | static void memcg_kmem_set_activated(struct mem_cgroup *memcg) | ||
413 | { | ||
414 | set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); | ||
415 | } | ||
416 | |||
417 | static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) | ||
418 | { | ||
419 | clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); | ||
420 | } | ||
421 | |||
422 | static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) | 400 | static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) |
423 | { | 401 | { |
424 | /* | 402 | /* |
@@ -1139,16 +1117,22 @@ skip_node: | |||
1139 | * skipped and we should continue the tree walk. | 1117 | * skipped and we should continue the tree walk. |
1140 | * last_visited css is safe to use because it is | 1118 | * last_visited css is safe to use because it is |
1141 | * protected by css_get and the tree walk is rcu safe. | 1119 | * protected by css_get and the tree walk is rcu safe. |
1120 | * | ||
1121 | * We do not take a reference on the root of the tree walk | ||
1122 | * because we might race with the root removal when it would | ||
1123 | * be the only node in the iterated hierarchy and mem_cgroup_iter | ||
1124 | * would end up in an endless loop because it expects that at | ||
1125 | * least one valid node will be returned. Root cannot disappear | ||
1126 | * because caller of the iterator should hold it already so | ||
1127 | * skipping css reference should be safe. | ||
1142 | */ | 1128 | */ |
1143 | if (next_css) { | 1129 | if (next_css) { |
1144 | struct mem_cgroup *mem = mem_cgroup_from_css(next_css); | 1130 | if ((next_css->flags & CSS_ONLINE) && |
1131 | (next_css == &root->css || css_tryget(next_css))) | ||
1132 | return mem_cgroup_from_css(next_css); | ||
1145 | 1133 | ||
1146 | if (css_tryget(&mem->css)) | 1134 | prev_css = next_css; |
1147 | return mem; | 1135 | goto skip_node; |
1148 | else { | ||
1149 | prev_css = next_css; | ||
1150 | goto skip_node; | ||
1151 | } | ||
1152 | } | 1136 | } |
1153 | 1137 | ||
1154 | return NULL; | 1138 | return NULL; |
@@ -1182,7 +1166,15 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, | |||
1182 | if (iter->last_dead_count == *sequence) { | 1166 | if (iter->last_dead_count == *sequence) { |
1183 | smp_rmb(); | 1167 | smp_rmb(); |
1184 | position = iter->last_visited; | 1168 | position = iter->last_visited; |
1185 | if (position && !css_tryget(&position->css)) | 1169 | |
1170 | /* | ||
1171 | * We cannot take a reference to root because we might race | ||
1172 | * with root removal and returning NULL would end up in | ||
1173 | * an endless loop on the iterator user level when root | ||
1174 | * would be returned all the time. | ||
1175 | */ | ||
1176 | if (position && position != root && | ||
1177 | !css_tryget(&position->css)) | ||
1186 | position = NULL; | 1178 | position = NULL; |
1187 | } | 1179 | } |
1188 | return position; | 1180 | return position; |
@@ -1191,9 +1183,11 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, | |||
1191 | static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, | 1183 | static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, |
1192 | struct mem_cgroup *last_visited, | 1184 | struct mem_cgroup *last_visited, |
1193 | struct mem_cgroup *new_position, | 1185 | struct mem_cgroup *new_position, |
1186 | struct mem_cgroup *root, | ||
1194 | int sequence) | 1187 | int sequence) |
1195 | { | 1188 | { |
1196 | if (last_visited) | 1189 | /* root reference counting symmetric to mem_cgroup_iter_load */ |
1190 | if (last_visited && last_visited != root) | ||
1197 | css_put(&last_visited->css); | 1191 | css_put(&last_visited->css); |
1198 | /* | 1192 | /* |
1199 | * We store the sequence count from the time @last_visited was | 1193 | * We store the sequence count from the time @last_visited was |
@@ -1268,7 +1262,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
1268 | memcg = __mem_cgroup_iter_next(root, last_visited); | 1262 | memcg = __mem_cgroup_iter_next(root, last_visited); |
1269 | 1263 | ||
1270 | if (reclaim) { | 1264 | if (reclaim) { |
1271 | mem_cgroup_iter_update(iter, last_visited, memcg, seq); | 1265 | mem_cgroup_iter_update(iter, last_visited, memcg, root, |
1266 | seq); | ||
1272 | 1267 | ||
1273 | if (!memcg) | 1268 | if (!memcg) |
1274 | iter->generation++; | 1269 | iter->generation++; |
@@ -1865,13 +1860,18 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
1865 | break; | 1860 | break; |
1866 | }; | 1861 | }; |
1867 | points = oom_badness(task, memcg, NULL, totalpages); | 1862 | points = oom_badness(task, memcg, NULL, totalpages); |
1868 | if (points > chosen_points) { | 1863 | if (!points || points < chosen_points) |
1869 | if (chosen) | 1864 | continue; |
1870 | put_task_struct(chosen); | 1865 | /* Prefer thread group leaders for display purposes */ |
1871 | chosen = task; | 1866 | if (points == chosen_points && |
1872 | chosen_points = points; | 1867 | thread_group_leader(chosen)) |
1873 | get_task_struct(chosen); | 1868 | continue; |
1874 | } | 1869 | |
1870 | if (chosen) | ||
1871 | put_task_struct(chosen); | ||
1872 | chosen = task; | ||
1873 | chosen_points = points; | ||
1874 | get_task_struct(chosen); | ||
1875 | } | 1875 | } |
1876 | css_task_iter_end(&it); | 1876 | css_task_iter_end(&it); |
1877 | } | 1877 | } |
@@ -2904,7 +2904,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | |||
2904 | unsigned short id; | 2904 | unsigned short id; |
2905 | swp_entry_t ent; | 2905 | swp_entry_t ent; |
2906 | 2906 | ||
2907 | VM_BUG_ON(!PageLocked(page)); | 2907 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
2908 | 2908 | ||
2909 | pc = lookup_page_cgroup(page); | 2909 | pc = lookup_page_cgroup(page); |
2910 | lock_page_cgroup(pc); | 2910 | lock_page_cgroup(pc); |
@@ -2938,7 +2938,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2938 | bool anon; | 2938 | bool anon; |
2939 | 2939 | ||
2940 | lock_page_cgroup(pc); | 2940 | lock_page_cgroup(pc); |
2941 | VM_BUG_ON(PageCgroupUsed(pc)); | 2941 | VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); |
2942 | /* | 2942 | /* |
2943 | * we don't need page_cgroup_lock about tail pages, becase they are not | 2943 | * we don't need page_cgroup_lock about tail pages, becase they are not |
2944 | * accessed by any other context at this point. | 2944 | * accessed by any other context at this point. |
@@ -2973,7 +2973,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2973 | if (lrucare) { | 2973 | if (lrucare) { |
2974 | if (was_on_lru) { | 2974 | if (was_on_lru) { |
2975 | lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); | 2975 | lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); |
2976 | VM_BUG_ON(PageLRU(page)); | 2976 | VM_BUG_ON_PAGE(PageLRU(page), page); |
2977 | SetPageLRU(page); | 2977 | SetPageLRU(page); |
2978 | add_page_to_lru_list(page, lruvec, page_lru(page)); | 2978 | add_page_to_lru_list(page, lruvec, page_lru(page)); |
2979 | } | 2979 | } |
@@ -2999,11 +2999,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2999 | static DEFINE_MUTEX(set_limit_mutex); | 2999 | static DEFINE_MUTEX(set_limit_mutex); |
3000 | 3000 | ||
3001 | #ifdef CONFIG_MEMCG_KMEM | 3001 | #ifdef CONFIG_MEMCG_KMEM |
3002 | static DEFINE_MUTEX(activate_kmem_mutex); | ||
3003 | |||
3002 | static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) | 3004 | static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) |
3003 | { | 3005 | { |
3004 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && | 3006 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && |
3005 | (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK) == | 3007 | memcg_kmem_is_active(memcg); |
3006 | KMEM_ACCOUNTED_MASK; | ||
3007 | } | 3008 | } |
3008 | 3009 | ||
3009 | /* | 3010 | /* |
@@ -3102,16 +3103,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) | |||
3102 | css_put(&memcg->css); | 3103 | css_put(&memcg->css); |
3103 | } | 3104 | } |
3104 | 3105 | ||
3105 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) | ||
3106 | { | ||
3107 | if (!memcg) | ||
3108 | return; | ||
3109 | |||
3110 | mutex_lock(&memcg->slab_caches_mutex); | ||
3111 | list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); | ||
3112 | mutex_unlock(&memcg->slab_caches_mutex); | ||
3113 | } | ||
3114 | |||
3115 | /* | 3106 | /* |
3116 | * helper for acessing a memcg's index. It will be used as an index in the | 3107 | * helper for acessing a memcg's index. It will be used as an index in the |
3117 | * child cache array in kmem_cache, and also to derive its name. This function | 3108 | * child cache array in kmem_cache, and also to derive its name. This function |
@@ -3122,43 +3113,6 @@ int memcg_cache_id(struct mem_cgroup *memcg) | |||
3122 | return memcg ? memcg->kmemcg_id : -1; | 3113 | return memcg ? memcg->kmemcg_id : -1; |
3123 | } | 3114 | } |
3124 | 3115 | ||
3125 | /* | ||
3126 | * This ends up being protected by the set_limit mutex, during normal | ||
3127 | * operation, because that is its main call site. | ||
3128 | * | ||
3129 | * But when we create a new cache, we can call this as well if its parent | ||
3130 | * is kmem-limited. That will have to hold set_limit_mutex as well. | ||
3131 | */ | ||
3132 | static int memcg_update_cache_sizes(struct mem_cgroup *memcg) | ||
3133 | { | ||
3134 | int num, ret; | ||
3135 | |||
3136 | num = ida_simple_get(&kmem_limited_groups, | ||
3137 | 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); | ||
3138 | if (num < 0) | ||
3139 | return num; | ||
3140 | /* | ||
3141 | * After this point, kmem_accounted (that we test atomically in | ||
3142 | * the beginning of this conditional), is no longer 0. This | ||
3143 | * guarantees only one process will set the following boolean | ||
3144 | * to true. We don't need test_and_set because we're protected | ||
3145 | * by the set_limit_mutex anyway. | ||
3146 | */ | ||
3147 | memcg_kmem_set_activated(memcg); | ||
3148 | |||
3149 | ret = memcg_update_all_caches(num+1); | ||
3150 | if (ret) { | ||
3151 | ida_simple_remove(&kmem_limited_groups, num); | ||
3152 | memcg_kmem_clear_activated(memcg); | ||
3153 | return ret; | ||
3154 | } | ||
3155 | |||
3156 | memcg->kmemcg_id = num; | ||
3157 | INIT_LIST_HEAD(&memcg->memcg_slab_caches); | ||
3158 | mutex_init(&memcg->slab_caches_mutex); | ||
3159 | return 0; | ||
3160 | } | ||
3161 | |||
3162 | static size_t memcg_caches_array_size(int num_groups) | 3116 | static size_t memcg_caches_array_size(int num_groups) |
3163 | { | 3117 | { |
3164 | ssize_t size; | 3118 | ssize_t size; |
@@ -3195,18 +3149,17 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) | |||
3195 | 3149 | ||
3196 | if (num_groups > memcg_limited_groups_array_size) { | 3150 | if (num_groups > memcg_limited_groups_array_size) { |
3197 | int i; | 3151 | int i; |
3152 | struct memcg_cache_params *new_params; | ||
3198 | ssize_t size = memcg_caches_array_size(num_groups); | 3153 | ssize_t size = memcg_caches_array_size(num_groups); |
3199 | 3154 | ||
3200 | size *= sizeof(void *); | 3155 | size *= sizeof(void *); |
3201 | size += offsetof(struct memcg_cache_params, memcg_caches); | 3156 | size += offsetof(struct memcg_cache_params, memcg_caches); |
3202 | 3157 | ||
3203 | s->memcg_params = kzalloc(size, GFP_KERNEL); | 3158 | new_params = kzalloc(size, GFP_KERNEL); |
3204 | if (!s->memcg_params) { | 3159 | if (!new_params) |
3205 | s->memcg_params = cur_params; | ||
3206 | return -ENOMEM; | 3160 | return -ENOMEM; |
3207 | } | ||
3208 | 3161 | ||
3209 | s->memcg_params->is_root_cache = true; | 3162 | new_params->is_root_cache = true; |
3210 | 3163 | ||
3211 | /* | 3164 | /* |
3212 | * There is the chance it will be bigger than | 3165 | * There is the chance it will be bigger than |
@@ -3220,7 +3173,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) | |||
3220 | for (i = 0; i < memcg_limited_groups_array_size; i++) { | 3173 | for (i = 0; i < memcg_limited_groups_array_size; i++) { |
3221 | if (!cur_params->memcg_caches[i]) | 3174 | if (!cur_params->memcg_caches[i]) |
3222 | continue; | 3175 | continue; |
3223 | s->memcg_params->memcg_caches[i] = | 3176 | new_params->memcg_caches[i] = |
3224 | cur_params->memcg_caches[i]; | 3177 | cur_params->memcg_caches[i]; |
3225 | } | 3178 | } |
3226 | 3179 | ||
@@ -3233,13 +3186,15 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) | |||
3233 | * bigger than the others. And all updates will reset this | 3186 | * bigger than the others. And all updates will reset this |
3234 | * anyway. | 3187 | * anyway. |
3235 | */ | 3188 | */ |
3236 | kfree(cur_params); | 3189 | rcu_assign_pointer(s->memcg_params, new_params); |
3190 | if (cur_params) | ||
3191 | kfree_rcu(cur_params, rcu_head); | ||
3237 | } | 3192 | } |
3238 | return 0; | 3193 | return 0; |
3239 | } | 3194 | } |
3240 | 3195 | ||
3241 | int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | 3196 | int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, |
3242 | struct kmem_cache *root_cache) | 3197 | struct kmem_cache *root_cache) |
3243 | { | 3198 | { |
3244 | size_t size; | 3199 | size_t size; |
3245 | 3200 | ||
@@ -3267,35 +3222,85 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3267 | return 0; | 3222 | return 0; |
3268 | } | 3223 | } |
3269 | 3224 | ||
3270 | void memcg_release_cache(struct kmem_cache *s) | 3225 | void memcg_free_cache_params(struct kmem_cache *s) |
3226 | { | ||
3227 | kfree(s->memcg_params); | ||
3228 | } | ||
3229 | |||
3230 | void memcg_register_cache(struct kmem_cache *s) | ||
3271 | { | 3231 | { |
3272 | struct kmem_cache *root; | 3232 | struct kmem_cache *root; |
3273 | struct mem_cgroup *memcg; | 3233 | struct mem_cgroup *memcg; |
3274 | int id; | 3234 | int id; |
3275 | 3235 | ||
3276 | /* | 3236 | if (is_root_cache(s)) |
3277 | * This happens, for instance, when a root cache goes away before we | ||
3278 | * add any memcg. | ||
3279 | */ | ||
3280 | if (!s->memcg_params) | ||
3281 | return; | 3237 | return; |
3282 | 3238 | ||
3283 | if (s->memcg_params->is_root_cache) | 3239 | /* |
3284 | goto out; | 3240 | * Holding the slab_mutex assures nobody will touch the memcg_caches |
3241 | * array while we are modifying it. | ||
3242 | */ | ||
3243 | lockdep_assert_held(&slab_mutex); | ||
3285 | 3244 | ||
3245 | root = s->memcg_params->root_cache; | ||
3286 | memcg = s->memcg_params->memcg; | 3246 | memcg = s->memcg_params->memcg; |
3287 | id = memcg_cache_id(memcg); | 3247 | id = memcg_cache_id(memcg); |
3248 | |||
3249 | css_get(&memcg->css); | ||
3250 | |||
3251 | |||
3252 | /* | ||
3253 | * Since readers won't lock (see cache_from_memcg_idx()), we need a | ||
3254 | * barrier here to ensure nobody will see the kmem_cache partially | ||
3255 | * initialized. | ||
3256 | */ | ||
3257 | smp_wmb(); | ||
3258 | |||
3259 | /* | ||
3260 | * Initialize the pointer to this cache in its parent's memcg_params | ||
3261 | * before adding it to the memcg_slab_caches list, otherwise we can | ||
3262 | * fail to convert memcg_params_to_cache() while traversing the list. | ||
3263 | */ | ||
3264 | VM_BUG_ON(root->memcg_params->memcg_caches[id]); | ||
3265 | root->memcg_params->memcg_caches[id] = s; | ||
3266 | |||
3267 | mutex_lock(&memcg->slab_caches_mutex); | ||
3268 | list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); | ||
3269 | mutex_unlock(&memcg->slab_caches_mutex); | ||
3270 | } | ||
3271 | |||
3272 | void memcg_unregister_cache(struct kmem_cache *s) | ||
3273 | { | ||
3274 | struct kmem_cache *root; | ||
3275 | struct mem_cgroup *memcg; | ||
3276 | int id; | ||
3277 | |||
3278 | if (is_root_cache(s)) | ||
3279 | return; | ||
3280 | |||
3281 | /* | ||
3282 | * Holding the slab_mutex assures nobody will touch the memcg_caches | ||
3283 | * array while we are modifying it. | ||
3284 | */ | ||
3285 | lockdep_assert_held(&slab_mutex); | ||
3288 | 3286 | ||
3289 | root = s->memcg_params->root_cache; | 3287 | root = s->memcg_params->root_cache; |
3290 | root->memcg_params->memcg_caches[id] = NULL; | 3288 | memcg = s->memcg_params->memcg; |
3289 | id = memcg_cache_id(memcg); | ||
3291 | 3290 | ||
3292 | mutex_lock(&memcg->slab_caches_mutex); | 3291 | mutex_lock(&memcg->slab_caches_mutex); |
3293 | list_del(&s->memcg_params->list); | 3292 | list_del(&s->memcg_params->list); |
3294 | mutex_unlock(&memcg->slab_caches_mutex); | 3293 | mutex_unlock(&memcg->slab_caches_mutex); |
3295 | 3294 | ||
3295 | /* | ||
3296 | * Clear the pointer to this cache in its parent's memcg_params only | ||
3297 | * after removing it from the memcg_slab_caches list, otherwise we can | ||
3298 | * fail to convert memcg_params_to_cache() while traversing the list. | ||
3299 | */ | ||
3300 | VM_BUG_ON(!root->memcg_params->memcg_caches[id]); | ||
3301 | root->memcg_params->memcg_caches[id] = NULL; | ||
3302 | |||
3296 | css_put(&memcg->css); | 3303 | css_put(&memcg->css); |
3297 | out: | ||
3298 | kfree(s->memcg_params); | ||
3299 | } | 3304 | } |
3300 | 3305 | ||
3301 | /* | 3306 | /* |
@@ -3354,11 +3359,9 @@ static void kmem_cache_destroy_work_func(struct work_struct *w) | |||
3354 | * So if we aren't down to zero, we'll just schedule a worker and try | 3359 | * So if we aren't down to zero, we'll just schedule a worker and try |
3355 | * again | 3360 | * again |
3356 | */ | 3361 | */ |
3357 | if (atomic_read(&cachep->memcg_params->nr_pages) != 0) { | 3362 | if (atomic_read(&cachep->memcg_params->nr_pages) != 0) |
3358 | kmem_cache_shrink(cachep); | 3363 | kmem_cache_shrink(cachep); |
3359 | if (atomic_read(&cachep->memcg_params->nr_pages) == 0) | 3364 | else |
3360 | return; | ||
3361 | } else | ||
3362 | kmem_cache_destroy(cachep); | 3365 | kmem_cache_destroy(cachep); |
3363 | } | 3366 | } |
3364 | 3367 | ||
@@ -3394,27 +3397,16 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) | |||
3394 | schedule_work(&cachep->memcg_params->destroy); | 3397 | schedule_work(&cachep->memcg_params->destroy); |
3395 | } | 3398 | } |
3396 | 3399 | ||
3397 | /* | 3400 | static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, |
3398 | * This lock protects updaters, not readers. We want readers to be as fast as | 3401 | struct kmem_cache *s) |
3399 | * they can, and they will either see NULL or a valid cache value. Our model | ||
3400 | * allow them to see NULL, in which case the root memcg will be selected. | ||
3401 | * | ||
3402 | * We need this lock because multiple allocations to the same cache from a non | ||
3403 | * will span more than one worker. Only one of them can create the cache. | ||
3404 | */ | ||
3405 | static DEFINE_MUTEX(memcg_cache_mutex); | ||
3406 | |||
3407 | /* | ||
3408 | * Called with memcg_cache_mutex held | ||
3409 | */ | ||
3410 | static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, | ||
3411 | struct kmem_cache *s) | ||
3412 | { | 3402 | { |
3413 | struct kmem_cache *new; | 3403 | struct kmem_cache *new; |
3414 | static char *tmp_name = NULL; | 3404 | static char *tmp_name = NULL; |
3405 | static DEFINE_MUTEX(mutex); /* protects tmp_name */ | ||
3415 | 3406 | ||
3416 | lockdep_assert_held(&memcg_cache_mutex); | 3407 | BUG_ON(!memcg_can_account_kmem(memcg)); |
3417 | 3408 | ||
3409 | mutex_lock(&mutex); | ||
3418 | /* | 3410 | /* |
3419 | * kmem_cache_create_memcg duplicates the given name and | 3411 | * kmem_cache_create_memcg duplicates the given name and |
3420 | * cgroup_name for this name requires RCU context. | 3412 | * cgroup_name for this name requires RCU context. |
@@ -3437,47 +3429,13 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, | |||
3437 | 3429 | ||
3438 | if (new) | 3430 | if (new) |
3439 | new->allocflags |= __GFP_KMEMCG; | 3431 | new->allocflags |= __GFP_KMEMCG; |
3432 | else | ||
3433 | new = s; | ||
3440 | 3434 | ||
3435 | mutex_unlock(&mutex); | ||
3441 | return new; | 3436 | return new; |
3442 | } | 3437 | } |
3443 | 3438 | ||
3444 | static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | ||
3445 | struct kmem_cache *cachep) | ||
3446 | { | ||
3447 | struct kmem_cache *new_cachep; | ||
3448 | int idx; | ||
3449 | |||
3450 | BUG_ON(!memcg_can_account_kmem(memcg)); | ||
3451 | |||
3452 | idx = memcg_cache_id(memcg); | ||
3453 | |||
3454 | mutex_lock(&memcg_cache_mutex); | ||
3455 | new_cachep = cache_from_memcg_idx(cachep, idx); | ||
3456 | if (new_cachep) { | ||
3457 | css_put(&memcg->css); | ||
3458 | goto out; | ||
3459 | } | ||
3460 | |||
3461 | new_cachep = kmem_cache_dup(memcg, cachep); | ||
3462 | if (new_cachep == NULL) { | ||
3463 | new_cachep = cachep; | ||
3464 | css_put(&memcg->css); | ||
3465 | goto out; | ||
3466 | } | ||
3467 | |||
3468 | atomic_set(&new_cachep->memcg_params->nr_pages , 0); | ||
3469 | |||
3470 | cachep->memcg_params->memcg_caches[idx] = new_cachep; | ||
3471 | /* | ||
3472 | * the readers won't lock, make sure everybody sees the updated value, | ||
3473 | * so they won't put stuff in the queue again for no reason | ||
3474 | */ | ||
3475 | wmb(); | ||
3476 | out: | ||
3477 | mutex_unlock(&memcg_cache_mutex); | ||
3478 | return new_cachep; | ||
3479 | } | ||
3480 | |||
3481 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | 3439 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s) |
3482 | { | 3440 | { |
3483 | struct kmem_cache *c; | 3441 | struct kmem_cache *c; |
@@ -3495,9 +3453,10 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |||
3495 | * | 3453 | * |
3496 | * Still, we don't want anyone else freeing memcg_caches under our | 3454 | * Still, we don't want anyone else freeing memcg_caches under our |
3497 | * noses, which can happen if a new memcg comes to life. As usual, | 3455 | * noses, which can happen if a new memcg comes to life. As usual, |
3498 | * we'll take the set_limit_mutex to protect ourselves against this. | 3456 | * we'll take the activate_kmem_mutex to protect ourselves against |
3457 | * this. | ||
3499 | */ | 3458 | */ |
3500 | mutex_lock(&set_limit_mutex); | 3459 | mutex_lock(&activate_kmem_mutex); |
3501 | for_each_memcg_cache_index(i) { | 3460 | for_each_memcg_cache_index(i) { |
3502 | c = cache_from_memcg_idx(s, i); | 3461 | c = cache_from_memcg_idx(s, i); |
3503 | if (!c) | 3462 | if (!c) |
@@ -3520,7 +3479,7 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |||
3520 | cancel_work_sync(&c->memcg_params->destroy); | 3479 | cancel_work_sync(&c->memcg_params->destroy); |
3521 | kmem_cache_destroy(c); | 3480 | kmem_cache_destroy(c); |
3522 | } | 3481 | } |
3523 | mutex_unlock(&set_limit_mutex); | 3482 | mutex_unlock(&activate_kmem_mutex); |
3524 | } | 3483 | } |
3525 | 3484 | ||
3526 | struct create_work { | 3485 | struct create_work { |
@@ -3552,6 +3511,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) | |||
3552 | 3511 | ||
3553 | cw = container_of(w, struct create_work, work); | 3512 | cw = container_of(w, struct create_work, work); |
3554 | memcg_create_kmem_cache(cw->memcg, cw->cachep); | 3513 | memcg_create_kmem_cache(cw->memcg, cw->cachep); |
3514 | css_put(&cw->memcg->css); | ||
3555 | kfree(cw); | 3515 | kfree(cw); |
3556 | } | 3516 | } |
3557 | 3517 | ||
@@ -3611,7 +3571,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, | |||
3611 | gfp_t gfp) | 3571 | gfp_t gfp) |
3612 | { | 3572 | { |
3613 | struct mem_cgroup *memcg; | 3573 | struct mem_cgroup *memcg; |
3614 | int idx; | 3574 | struct kmem_cache *memcg_cachep; |
3615 | 3575 | ||
3616 | VM_BUG_ON(!cachep->memcg_params); | 3576 | VM_BUG_ON(!cachep->memcg_params); |
3617 | VM_BUG_ON(!cachep->memcg_params->is_root_cache); | 3577 | VM_BUG_ON(!cachep->memcg_params->is_root_cache); |
@@ -3625,15 +3585,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, | |||
3625 | if (!memcg_can_account_kmem(memcg)) | 3585 | if (!memcg_can_account_kmem(memcg)) |
3626 | goto out; | 3586 | goto out; |
3627 | 3587 | ||
3628 | idx = memcg_cache_id(memcg); | 3588 | memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); |
3629 | 3589 | if (likely(memcg_cachep)) { | |
3630 | /* | 3590 | cachep = memcg_cachep; |
3631 | * barrier to mare sure we're always seeing the up to date value. The | ||
3632 | * code updating memcg_caches will issue a write barrier to match this. | ||
3633 | */ | ||
3634 | read_barrier_depends(); | ||
3635 | if (likely(cache_from_memcg_idx(cachep, idx))) { | ||
3636 | cachep = cache_from_memcg_idx(cachep, idx); | ||
3637 | goto out; | 3591 | goto out; |
3638 | } | 3592 | } |
3639 | 3593 | ||
@@ -3787,7 +3741,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) | |||
3787 | if (!memcg) | 3741 | if (!memcg) |
3788 | return; | 3742 | return; |
3789 | 3743 | ||
3790 | VM_BUG_ON(mem_cgroup_is_root(memcg)); | 3744 | VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); |
3791 | memcg_uncharge_kmem(memcg, PAGE_SIZE << order); | 3745 | memcg_uncharge_kmem(memcg, PAGE_SIZE << order); |
3792 | } | 3746 | } |
3793 | #else | 3747 | #else |
@@ -3866,7 +3820,7 @@ static int mem_cgroup_move_account(struct page *page, | |||
3866 | bool anon = PageAnon(page); | 3820 | bool anon = PageAnon(page); |
3867 | 3821 | ||
3868 | VM_BUG_ON(from == to); | 3822 | VM_BUG_ON(from == to); |
3869 | VM_BUG_ON(PageLRU(page)); | 3823 | VM_BUG_ON_PAGE(PageLRU(page), page); |
3870 | /* | 3824 | /* |
3871 | * The page is isolated from LRU. So, collapse function | 3825 | * The page is isolated from LRU. So, collapse function |
3872 | * will not handle this page. But page splitting can happen. | 3826 | * will not handle this page. But page splitting can happen. |
@@ -3959,7 +3913,7 @@ static int mem_cgroup_move_parent(struct page *page, | |||
3959 | parent = root_mem_cgroup; | 3913 | parent = root_mem_cgroup; |
3960 | 3914 | ||
3961 | if (nr_pages > 1) { | 3915 | if (nr_pages > 1) { |
3962 | VM_BUG_ON(!PageTransHuge(page)); | 3916 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
3963 | flags = compound_lock_irqsave(page); | 3917 | flags = compound_lock_irqsave(page); |
3964 | } | 3918 | } |
3965 | 3919 | ||
@@ -3993,7 +3947,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
3993 | 3947 | ||
3994 | if (PageTransHuge(page)) { | 3948 | if (PageTransHuge(page)) { |
3995 | nr_pages <<= compound_order(page); | 3949 | nr_pages <<= compound_order(page); |
3996 | VM_BUG_ON(!PageTransHuge(page)); | 3950 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
3997 | /* | 3951 | /* |
3998 | * Never OOM-kill a process for a huge page. The | 3952 | * Never OOM-kill a process for a huge page. The |
3999 | * fault handler will fall back to regular pages. | 3953 | * fault handler will fall back to regular pages. |
@@ -4013,8 +3967,8 @@ int mem_cgroup_newpage_charge(struct page *page, | |||
4013 | { | 3967 | { |
4014 | if (mem_cgroup_disabled()) | 3968 | if (mem_cgroup_disabled()) |
4015 | return 0; | 3969 | return 0; |
4016 | VM_BUG_ON(page_mapped(page)); | 3970 | VM_BUG_ON_PAGE(page_mapped(page), page); |
4017 | VM_BUG_ON(page->mapping && !PageAnon(page)); | 3971 | VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); |
4018 | VM_BUG_ON(!mm); | 3972 | VM_BUG_ON(!mm); |
4019 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 3973 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
4020 | MEM_CGROUP_CHARGE_TYPE_ANON); | 3974 | MEM_CGROUP_CHARGE_TYPE_ANON); |
@@ -4218,7 +4172,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, | |||
4218 | 4172 | ||
4219 | if (PageTransHuge(page)) { | 4173 | if (PageTransHuge(page)) { |
4220 | nr_pages <<= compound_order(page); | 4174 | nr_pages <<= compound_order(page); |
4221 | VM_BUG_ON(!PageTransHuge(page)); | 4175 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
4222 | } | 4176 | } |
4223 | /* | 4177 | /* |
4224 | * Check if our page_cgroup is valid | 4178 | * Check if our page_cgroup is valid |
@@ -4310,7 +4264,7 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
4310 | /* early check. */ | 4264 | /* early check. */ |
4311 | if (page_mapped(page)) | 4265 | if (page_mapped(page)) |
4312 | return; | 4266 | return; |
4313 | VM_BUG_ON(page->mapping && !PageAnon(page)); | 4267 | VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); |
4314 | /* | 4268 | /* |
4315 | * If the page is in swap cache, uncharge should be deferred | 4269 | * If the page is in swap cache, uncharge should be deferred |
4316 | * to the swap path, which also properly accounts swap usage | 4270 | * to the swap path, which also properly accounts swap usage |
@@ -4330,8 +4284,8 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
4330 | 4284 | ||
4331 | void mem_cgroup_uncharge_cache_page(struct page *page) | 4285 | void mem_cgroup_uncharge_cache_page(struct page *page) |
4332 | { | 4286 | { |
4333 | VM_BUG_ON(page_mapped(page)); | 4287 | VM_BUG_ON_PAGE(page_mapped(page), page); |
4334 | VM_BUG_ON(page->mapping); | 4288 | VM_BUG_ON_PAGE(page->mapping, page); |
4335 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); | 4289 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); |
4336 | } | 4290 | } |
4337 | 4291 | ||
@@ -5189,11 +5143,23 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
5189 | return val; | 5143 | return val; |
5190 | } | 5144 | } |
5191 | 5145 | ||
5192 | static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) | ||
5193 | { | ||
5194 | int ret = -EINVAL; | ||
5195 | #ifdef CONFIG_MEMCG_KMEM | 5146 | #ifdef CONFIG_MEMCG_KMEM |
5196 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 5147 | /* should be called with activate_kmem_mutex held */ |
5148 | static int __memcg_activate_kmem(struct mem_cgroup *memcg, | ||
5149 | unsigned long long limit) | ||
5150 | { | ||
5151 | int err = 0; | ||
5152 | int memcg_id; | ||
5153 | |||
5154 | if (memcg_kmem_is_active(memcg)) | ||
5155 | return 0; | ||
5156 | |||
5157 | /* | ||
5158 | * We are going to allocate memory for data shared by all memory | ||
5159 | * cgroups so let's stop accounting here. | ||
5160 | */ | ||
5161 | memcg_stop_kmem_account(); | ||
5162 | |||
5197 | /* | 5163 | /* |
5198 | * For simplicity, we won't allow this to be disabled. It also can't | 5164 | * For simplicity, we won't allow this to be disabled. It also can't |
5199 | * be changed if the cgroup has children already, or if tasks had | 5165 | * be changed if the cgroup has children already, or if tasks had |
@@ -5207,72 +5173,101 @@ static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) | |||
5207 | * of course permitted. | 5173 | * of course permitted. |
5208 | */ | 5174 | */ |
5209 | mutex_lock(&memcg_create_mutex); | 5175 | mutex_lock(&memcg_create_mutex); |
5210 | mutex_lock(&set_limit_mutex); | 5176 | if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg)) |
5211 | if (!memcg->kmem_account_flags && val != RES_COUNTER_MAX) { | 5177 | err = -EBUSY; |
5212 | if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) { | 5178 | mutex_unlock(&memcg_create_mutex); |
5213 | ret = -EBUSY; | 5179 | if (err) |
5214 | goto out; | 5180 | goto out; |
5215 | } | ||
5216 | ret = res_counter_set_limit(&memcg->kmem, val); | ||
5217 | VM_BUG_ON(ret); | ||
5218 | 5181 | ||
5219 | ret = memcg_update_cache_sizes(memcg); | 5182 | memcg_id = ida_simple_get(&kmem_limited_groups, |
5220 | if (ret) { | 5183 | 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); |
5221 | res_counter_set_limit(&memcg->kmem, RES_COUNTER_MAX); | 5184 | if (memcg_id < 0) { |
5222 | goto out; | 5185 | err = memcg_id; |
5223 | } | 5186 | goto out; |
5224 | static_key_slow_inc(&memcg_kmem_enabled_key); | 5187 | } |
5225 | /* | 5188 | |
5226 | * setting the active bit after the inc will guarantee no one | 5189 | /* |
5227 | * starts accounting before all call sites are patched | 5190 | * Make sure we have enough space for this cgroup in each root cache's |
5228 | */ | 5191 | * memcg_params. |
5229 | memcg_kmem_set_active(memcg); | 5192 | */ |
5230 | } else | 5193 | err = memcg_update_all_caches(memcg_id + 1); |
5231 | ret = res_counter_set_limit(&memcg->kmem, val); | 5194 | if (err) |
5195 | goto out_rmid; | ||
5196 | |||
5197 | memcg->kmemcg_id = memcg_id; | ||
5198 | INIT_LIST_HEAD(&memcg->memcg_slab_caches); | ||
5199 | mutex_init(&memcg->slab_caches_mutex); | ||
5200 | |||
5201 | /* | ||
5202 | * We couldn't have accounted to this cgroup, because it hasn't got the | ||
5203 | * active bit set yet, so this should succeed. | ||
5204 | */ | ||
5205 | err = res_counter_set_limit(&memcg->kmem, limit); | ||
5206 | VM_BUG_ON(err); | ||
5207 | |||
5208 | static_key_slow_inc(&memcg_kmem_enabled_key); | ||
5209 | /* | ||
5210 | * Setting the active bit after enabling static branching will | ||
5211 | * guarantee no one starts accounting before all call sites are | ||
5212 | * patched. | ||
5213 | */ | ||
5214 | memcg_kmem_set_active(memcg); | ||
5232 | out: | 5215 | out: |
5233 | mutex_unlock(&set_limit_mutex); | 5216 | memcg_resume_kmem_account(); |
5234 | mutex_unlock(&memcg_create_mutex); | 5217 | return err; |
5235 | #endif | 5218 | |
5219 | out_rmid: | ||
5220 | ida_simple_remove(&kmem_limited_groups, memcg_id); | ||
5221 | goto out; | ||
5222 | } | ||
5223 | |||
5224 | static int memcg_activate_kmem(struct mem_cgroup *memcg, | ||
5225 | unsigned long long limit) | ||
5226 | { | ||
5227 | int ret; | ||
5228 | |||
5229 | mutex_lock(&activate_kmem_mutex); | ||
5230 | ret = __memcg_activate_kmem(memcg, limit); | ||
5231 | mutex_unlock(&activate_kmem_mutex); | ||
5232 | return ret; | ||
5233 | } | ||
5234 | |||
5235 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | ||
5236 | unsigned long long val) | ||
5237 | { | ||
5238 | int ret; | ||
5239 | |||
5240 | if (!memcg_kmem_is_active(memcg)) | ||
5241 | ret = memcg_activate_kmem(memcg, val); | ||
5242 | else | ||
5243 | ret = res_counter_set_limit(&memcg->kmem, val); | ||
5236 | return ret; | 5244 | return ret; |
5237 | } | 5245 | } |
5238 | 5246 | ||
5239 | #ifdef CONFIG_MEMCG_KMEM | ||
5240 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) | 5247 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) |
5241 | { | 5248 | { |
5242 | int ret = 0; | 5249 | int ret = 0; |
5243 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | 5250 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); |
5244 | if (!parent) | ||
5245 | goto out; | ||
5246 | 5251 | ||
5247 | memcg->kmem_account_flags = parent->kmem_account_flags; | 5252 | if (!parent) |
5248 | /* | 5253 | return 0; |
5249 | * When that happen, we need to disable the static branch only on those | ||
5250 | * memcgs that enabled it. To achieve this, we would be forced to | ||
5251 | * complicate the code by keeping track of which memcgs were the ones | ||
5252 | * that actually enabled limits, and which ones got it from its | ||
5253 | * parents. | ||
5254 | * | ||
5255 | * It is a lot simpler just to do static_key_slow_inc() on every child | ||
5256 | * that is accounted. | ||
5257 | */ | ||
5258 | if (!memcg_kmem_is_active(memcg)) | ||
5259 | goto out; | ||
5260 | 5254 | ||
5255 | mutex_lock(&activate_kmem_mutex); | ||
5261 | /* | 5256 | /* |
5262 | * __mem_cgroup_free() will issue static_key_slow_dec() because this | 5257 | * If the parent cgroup is not kmem-active now, it cannot be activated |
5263 | * memcg is active already. If the later initialization fails then the | 5258 | * after this point, because it has at least one child already. |
5264 | * cgroup core triggers the cleanup so we do not have to do it here. | ||
5265 | */ | 5259 | */ |
5266 | static_key_slow_inc(&memcg_kmem_enabled_key); | 5260 | if (memcg_kmem_is_active(parent)) |
5267 | 5261 | ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); | |
5268 | mutex_lock(&set_limit_mutex); | 5262 | mutex_unlock(&activate_kmem_mutex); |
5269 | memcg_stop_kmem_account(); | ||
5270 | ret = memcg_update_cache_sizes(memcg); | ||
5271 | memcg_resume_kmem_account(); | ||
5272 | mutex_unlock(&set_limit_mutex); | ||
5273 | out: | ||
5274 | return ret; | 5263 | return ret; |
5275 | } | 5264 | } |
5265 | #else | ||
5266 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | ||
5267 | unsigned long long val) | ||
5268 | { | ||
5269 | return -EINVAL; | ||
5270 | } | ||
5276 | #endif /* CONFIG_MEMCG_KMEM */ | 5271 | #endif /* CONFIG_MEMCG_KMEM */ |
5277 | 5272 | ||
5278 | /* | 5273 | /* |
@@ -5306,7 +5301,7 @@ static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, | |||
5306 | else if (type == _MEMSWAP) | 5301 | else if (type == _MEMSWAP) |
5307 | ret = mem_cgroup_resize_memsw_limit(memcg, val); | 5302 | ret = mem_cgroup_resize_memsw_limit(memcg, val); |
5308 | else if (type == _KMEM) | 5303 | else if (type == _KMEM) |
5309 | ret = memcg_update_kmem_limit(css, val); | 5304 | ret = memcg_update_kmem_limit(memcg, val); |
5310 | else | 5305 | else |
5311 | return -EINVAL; | 5306 | return -EINVAL; |
5312 | break; | 5307 | break; |
@@ -6405,14 +6400,12 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
6405 | static struct mem_cgroup *mem_cgroup_alloc(void) | 6400 | static struct mem_cgroup *mem_cgroup_alloc(void) |
6406 | { | 6401 | { |
6407 | struct mem_cgroup *memcg; | 6402 | struct mem_cgroup *memcg; |
6408 | size_t size = memcg_size(); | 6403 | size_t size; |
6409 | 6404 | ||
6410 | /* Can be very big if nr_node_ids is very big */ | 6405 | size = sizeof(struct mem_cgroup); |
6411 | if (size < PAGE_SIZE) | 6406 | size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); |
6412 | memcg = kzalloc(size, GFP_KERNEL); | ||
6413 | else | ||
6414 | memcg = vzalloc(size); | ||
6415 | 6407 | ||
6408 | memcg = kzalloc(size, GFP_KERNEL); | ||
6416 | if (!memcg) | 6409 | if (!memcg) |
6417 | return NULL; | 6410 | return NULL; |
6418 | 6411 | ||
@@ -6423,10 +6416,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
6423 | return memcg; | 6416 | return memcg; |
6424 | 6417 | ||
6425 | out_free: | 6418 | out_free: |
6426 | if (size < PAGE_SIZE) | 6419 | kfree(memcg); |
6427 | kfree(memcg); | ||
6428 | else | ||
6429 | vfree(memcg); | ||
6430 | return NULL; | 6420 | return NULL; |
6431 | } | 6421 | } |
6432 | 6422 | ||
@@ -6444,7 +6434,6 @@ out_free: | |||
6444 | static void __mem_cgroup_free(struct mem_cgroup *memcg) | 6434 | static void __mem_cgroup_free(struct mem_cgroup *memcg) |
6445 | { | 6435 | { |
6446 | int node; | 6436 | int node; |
6447 | size_t size = memcg_size(); | ||
6448 | 6437 | ||
6449 | mem_cgroup_remove_from_trees(memcg); | 6438 | mem_cgroup_remove_from_trees(memcg); |
6450 | 6439 | ||
@@ -6465,10 +6454,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) | |||
6465 | * the cgroup_lock. | 6454 | * the cgroup_lock. |
6466 | */ | 6455 | */ |
6467 | disarm_static_keys(memcg); | 6456 | disarm_static_keys(memcg); |
6468 | if (size < PAGE_SIZE) | 6457 | kfree(memcg); |
6469 | kfree(memcg); | ||
6470 | else | ||
6471 | vfree(memcg); | ||
6472 | } | 6458 | } |
6473 | 6459 | ||
6474 | /* | 6460 | /* |
@@ -6549,7 +6535,6 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
6549 | { | 6535 | { |
6550 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 6536 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
6551 | struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); | 6537 | struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); |
6552 | int error = 0; | ||
6553 | 6538 | ||
6554 | if (css->cgroup->id > MEM_CGROUP_ID_MAX) | 6539 | if (css->cgroup->id > MEM_CGROUP_ID_MAX) |
6555 | return -ENOSPC; | 6540 | return -ENOSPC; |
@@ -6584,10 +6569,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
6584 | if (parent != root_mem_cgroup) | 6569 | if (parent != root_mem_cgroup) |
6585 | mem_cgroup_subsys.broken_hierarchy = true; | 6570 | mem_cgroup_subsys.broken_hierarchy = true; |
6586 | } | 6571 | } |
6587 | |||
6588 | error = memcg_init_kmem(memcg, &mem_cgroup_subsys); | ||
6589 | mutex_unlock(&memcg_create_mutex); | 6572 | mutex_unlock(&memcg_create_mutex); |
6590 | return error; | 6573 | |
6574 | return memcg_init_kmem(memcg, &mem_cgroup_subsys); | ||
6591 | } | 6575 | } |
6592 | 6576 | ||
6593 | /* | 6577 | /* |
@@ -6896,7 +6880,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, | |||
6896 | enum mc_target_type ret = MC_TARGET_NONE; | 6880 | enum mc_target_type ret = MC_TARGET_NONE; |
6897 | 6881 | ||
6898 | page = pmd_page(pmd); | 6882 | page = pmd_page(pmd); |
6899 | VM_BUG_ON(!page || !PageHead(page)); | 6883 | VM_BUG_ON_PAGE(!page || !PageHead(page), page); |
6900 | if (!move_anon()) | 6884 | if (!move_anon()) |
6901 | return ret; | 6885 | return ret; |
6902 | pc = lookup_page_cgroup(page); | 6886 | pc = lookup_page_cgroup(page); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b25ed321e667..4f08a2d61487 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -856,14 +856,14 @@ static int page_action(struct page_state *ps, struct page *p, | |||
856 | * the pages and send SIGBUS to the processes if the data was dirty. | 856 | * the pages and send SIGBUS to the processes if the data was dirty. |
857 | */ | 857 | */ |
858 | static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | 858 | static int hwpoison_user_mappings(struct page *p, unsigned long pfn, |
859 | int trapno, int flags) | 859 | int trapno, int flags, struct page **hpagep) |
860 | { | 860 | { |
861 | enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; | 861 | enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; |
862 | struct address_space *mapping; | 862 | struct address_space *mapping; |
863 | LIST_HEAD(tokill); | 863 | LIST_HEAD(tokill); |
864 | int ret; | 864 | int ret; |
865 | int kill = 1, forcekill; | 865 | int kill = 1, forcekill; |
866 | struct page *hpage = compound_head(p); | 866 | struct page *hpage = *hpagep; |
867 | struct page *ppage; | 867 | struct page *ppage; |
868 | 868 | ||
869 | if (PageReserved(p) || PageSlab(p)) | 869 | if (PageReserved(p) || PageSlab(p)) |
@@ -942,11 +942,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
942 | * We pinned the head page for hwpoison handling, | 942 | * We pinned the head page for hwpoison handling, |
943 | * now we split the thp and we are interested in | 943 | * now we split the thp and we are interested in |
944 | * the hwpoisoned raw page, so move the refcount | 944 | * the hwpoisoned raw page, so move the refcount |
945 | * to it. | 945 | * to it. Similarly, page lock is shifted. |
946 | */ | 946 | */ |
947 | if (hpage != p) { | 947 | if (hpage != p) { |
948 | put_page(hpage); | 948 | put_page(hpage); |
949 | get_page(p); | 949 | get_page(p); |
950 | lock_page(p); | ||
951 | unlock_page(hpage); | ||
952 | *hpagep = p; | ||
950 | } | 953 | } |
951 | /* THP is split, so ppage should be the real poisoned page. */ | 954 | /* THP is split, so ppage should be the real poisoned page. */ |
952 | ppage = p; | 955 | ppage = p; |
@@ -964,17 +967,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
964 | if (kill) | 967 | if (kill) |
965 | collect_procs(ppage, &tokill); | 968 | collect_procs(ppage, &tokill); |
966 | 969 | ||
967 | if (hpage != ppage) | ||
968 | lock_page(ppage); | ||
969 | |||
970 | ret = try_to_unmap(ppage, ttu); | 970 | ret = try_to_unmap(ppage, ttu); |
971 | if (ret != SWAP_SUCCESS) | 971 | if (ret != SWAP_SUCCESS) |
972 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | 972 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", |
973 | pfn, page_mapcount(ppage)); | 973 | pfn, page_mapcount(ppage)); |
974 | 974 | ||
975 | if (hpage != ppage) | ||
976 | unlock_page(ppage); | ||
977 | |||
978 | /* | 975 | /* |
979 | * Now that the dirty bit has been propagated to the | 976 | * Now that the dirty bit has been propagated to the |
980 | * struct page and all unmaps done we can decide if | 977 | * struct page and all unmaps done we can decide if |
@@ -1193,8 +1190,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1193 | /* | 1190 | /* |
1194 | * Now take care of user space mappings. | 1191 | * Now take care of user space mappings. |
1195 | * Abort on fail: __delete_from_page_cache() assumes unmapped page. | 1192 | * Abort on fail: __delete_from_page_cache() assumes unmapped page. |
1193 | * | ||
1194 | * When the raw error page is thp tail page, hpage points to the raw | ||
1195 | * page after thp split. | ||
1196 | */ | 1196 | */ |
1197 | if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) { | 1197 | if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) |
1198 | != SWAP_SUCCESS) { | ||
1198 | printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); | 1199 | printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); |
1199 | res = -EBUSY; | 1200 | res = -EBUSY; |
1200 | goto out; | 1201 | goto out; |
diff --git a/mm/memory.c b/mm/memory.c index 86487dfa5e59..be6a0c0d4ae0 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -289,7 +289,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
289 | return 0; | 289 | return 0; |
290 | batch = tlb->active; | 290 | batch = tlb->active; |
291 | } | 291 | } |
292 | VM_BUG_ON(batch->nr > batch->max); | 292 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); |
293 | 293 | ||
294 | return batch->max - batch->nr; | 294 | return batch->max - batch->nr; |
295 | } | 295 | } |
@@ -671,7 +671,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
671 | current->comm, | 671 | current->comm, |
672 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); | 672 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); |
673 | if (page) | 673 | if (page) |
674 | dump_page(page); | 674 | dump_page(page, "bad pte"); |
675 | printk(KERN_ALERT | 675 | printk(KERN_ALERT |
676 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | 676 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", |
677 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | 677 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); |
@@ -2702,7 +2702,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2702 | goto unwritable_page; | 2702 | goto unwritable_page; |
2703 | } | 2703 | } |
2704 | } else | 2704 | } else |
2705 | VM_BUG_ON(!PageLocked(old_page)); | 2705 | VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); |
2706 | 2706 | ||
2707 | /* | 2707 | /* |
2708 | * Since we dropped the lock we need to revalidate | 2708 | * Since we dropped the lock we need to revalidate |
@@ -3358,7 +3358,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3358 | if (unlikely(!(ret & VM_FAULT_LOCKED))) | 3358 | if (unlikely(!(ret & VM_FAULT_LOCKED))) |
3359 | lock_page(vmf.page); | 3359 | lock_page(vmf.page); |
3360 | else | 3360 | else |
3361 | VM_BUG_ON(!PageLocked(vmf.page)); | 3361 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); |
3362 | 3362 | ||
3363 | /* | 3363 | /* |
3364 | * Should we do an early C-O-W break? | 3364 | * Should we do an early C-O-W break? |
@@ -3395,7 +3395,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3395 | goto unwritable_page; | 3395 | goto unwritable_page; |
3396 | } | 3396 | } |
3397 | } else | 3397 | } else |
3398 | VM_BUG_ON(!PageLocked(page)); | 3398 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
3399 | page_mkwrite = 1; | 3399 | page_mkwrite = 1; |
3400 | } | 3400 | } |
3401 | } | 3401 | } |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index cc2ab37220b7..a650db29606f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1107,17 +1107,18 @@ int __ref add_memory(int nid, u64 start, u64 size) | |||
1107 | if (ret) | 1107 | if (ret) |
1108 | return ret; | 1108 | return ret; |
1109 | 1109 | ||
1110 | lock_memory_hotplug(); | ||
1111 | |||
1112 | res = register_memory_resource(start, size); | 1110 | res = register_memory_resource(start, size); |
1113 | ret = -EEXIST; | 1111 | ret = -EEXIST; |
1114 | if (!res) | 1112 | if (!res) |
1115 | goto out; | 1113 | return ret; |
1116 | 1114 | ||
1117 | { /* Stupid hack to suppress address-never-null warning */ | 1115 | { /* Stupid hack to suppress address-never-null warning */ |
1118 | void *p = NODE_DATA(nid); | 1116 | void *p = NODE_DATA(nid); |
1119 | new_pgdat = !p; | 1117 | new_pgdat = !p; |
1120 | } | 1118 | } |
1119 | |||
1120 | lock_memory_hotplug(); | ||
1121 | |||
1121 | new_node = !node_online(nid); | 1122 | new_node = !node_online(nid); |
1122 | if (new_node) { | 1123 | if (new_node) { |
1123 | pgdat = hotadd_new_pgdat(nid, start); | 1124 | pgdat = hotadd_new_pgdat(nid, start); |
@@ -1309,7 +1310,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1309 | #ifdef CONFIG_DEBUG_VM | 1310 | #ifdef CONFIG_DEBUG_VM |
1310 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", | 1311 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", |
1311 | pfn); | 1312 | pfn); |
1312 | dump_page(page); | 1313 | dump_page(page, "failed to remove from LRU"); |
1313 | #endif | 1314 | #endif |
1314 | put_page(page); | 1315 | put_page(page); |
1315 | /* Because we don't have big zone->lock. we should | 1316 | /* Because we don't have big zone->lock. we should |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0cd2c4d4e270..463b7fbf0d1d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1199,10 +1199,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * | |||
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | if (PageHuge(page)) { | 1201 | if (PageHuge(page)) { |
1202 | if (vma) | 1202 | BUG_ON(!vma); |
1203 | return alloc_huge_page_noerr(vma, address, 1); | 1203 | return alloc_huge_page_noerr(vma, address, 1); |
1204 | else | ||
1205 | return NULL; | ||
1206 | } | 1204 | } |
1207 | /* | 1205 | /* |
1208 | * if !vma, alloc_page_vma() will use task or system default policy | 1206 | * if !vma, alloc_page_vma() will use task or system default policy |
@@ -2668,7 +2666,7 @@ static void __init check_numabalancing_enable(void) | |||
2668 | 2666 | ||
2669 | if (nr_node_ids > 1 && !numabalancing_override) { | 2667 | if (nr_node_ids > 1 && !numabalancing_override) { |
2670 | printk(KERN_INFO "Enabling automatic NUMA balancing. " | 2668 | printk(KERN_INFO "Enabling automatic NUMA balancing. " |
2671 | "Configure with numa_balancing= or sysctl"); | 2669 | "Configure with numa_balancing= or the kernel.numa_balancing sysctl"); |
2672 | set_numabalancing_state(numabalancing_default); | 2670 | set_numabalancing_state(numabalancing_default); |
2673 | } | 2671 | } |
2674 | } | 2672 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index a8025befc323..734704f6f29b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -499,7 +499,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
499 | if (PageUptodate(page)) | 499 | if (PageUptodate(page)) |
500 | SetPageUptodate(newpage); | 500 | SetPageUptodate(newpage); |
501 | if (TestClearPageActive(page)) { | 501 | if (TestClearPageActive(page)) { |
502 | VM_BUG_ON(PageUnevictable(page)); | 502 | VM_BUG_ON_PAGE(PageUnevictable(page), page); |
503 | SetPageActive(newpage); | 503 | SetPageActive(newpage); |
504 | } else if (TestClearPageUnevictable(page)) | 504 | } else if (TestClearPageUnevictable(page)) |
505 | SetPageUnevictable(newpage); | 505 | SetPageUnevictable(newpage); |
@@ -871,7 +871,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
871 | * free the metadata, so the page can be freed. | 871 | * free the metadata, so the page can be freed. |
872 | */ | 872 | */ |
873 | if (!page->mapping) { | 873 | if (!page->mapping) { |
874 | VM_BUG_ON(PageAnon(page)); | 874 | VM_BUG_ON_PAGE(PageAnon(page), page); |
875 | if (page_has_private(page)) { | 875 | if (page_has_private(page)) { |
876 | try_to_free_buffers(page); | 876 | try_to_free_buffers(page); |
877 | goto uncharge; | 877 | goto uncharge; |
@@ -1618,7 +1618,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) | |||
1618 | { | 1618 | { |
1619 | int page_lru; | 1619 | int page_lru; |
1620 | 1620 | ||
1621 | VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); | 1621 | VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); |
1622 | 1622 | ||
1623 | /* Avoid migrating to a node that is nearly full */ | 1623 | /* Avoid migrating to a node that is nearly full */ |
1624 | if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) | 1624 | if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) |
@@ -1753,8 +1753,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1753 | if (!new_page) | 1753 | if (!new_page) |
1754 | goto out_fail; | 1754 | goto out_fail; |
1755 | 1755 | ||
1756 | page_cpupid_xchg_last(new_page, page_cpupid_last(page)); | ||
1757 | |||
1758 | isolated = numamigrate_isolate_page(pgdat, page); | 1756 | isolated = numamigrate_isolate_page(pgdat, page); |
1759 | if (!isolated) { | 1757 | if (!isolated) { |
1760 | put_page(new_page); | 1758 | put_page(new_page); |
diff --git a/mm/mincore.c b/mm/mincore.c index da2be56a7b8f..101623378fbf 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
@@ -225,13 +225,6 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v | |||
225 | 225 | ||
226 | end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); | 226 | end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); |
227 | 227 | ||
228 | if (is_vm_hugetlb_page(vma)) { | ||
229 | mincore_hugetlb_page_range(vma, addr, end, vec); | ||
230 | return (end - addr) >> PAGE_SHIFT; | ||
231 | } | ||
232 | |||
233 | end = pmd_addr_end(addr, end); | ||
234 | |||
235 | if (is_vm_hugetlb_page(vma)) | 228 | if (is_vm_hugetlb_page(vma)) |
236 | mincore_hugetlb_page_range(vma, addr, end, vec); | 229 | mincore_hugetlb_page_range(vma, addr, end, vec); |
237 | else | 230 | else |
diff --git a/mm/mlock.c b/mm/mlock.c index 10819ed4df3e..4e1a68162285 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -91,6 +91,26 @@ void mlock_vma_page(struct page *page) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Isolate a page from LRU with optional get_page() pin. | ||
95 | * Assumes lru_lock already held and page already pinned. | ||
96 | */ | ||
97 | static bool __munlock_isolate_lru_page(struct page *page, bool getpage) | ||
98 | { | ||
99 | if (PageLRU(page)) { | ||
100 | struct lruvec *lruvec; | ||
101 | |||
102 | lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); | ||
103 | if (getpage) | ||
104 | get_page(page); | ||
105 | ClearPageLRU(page); | ||
106 | del_page_from_lru_list(page, lruvec, page_lru(page)); | ||
107 | return true; | ||
108 | } | ||
109 | |||
110 | return false; | ||
111 | } | ||
112 | |||
113 | /* | ||
94 | * Finish munlock after successful page isolation | 114 | * Finish munlock after successful page isolation |
95 | * | 115 | * |
96 | * Page must be locked. This is a wrapper for try_to_munlock() | 116 | * Page must be locked. This is a wrapper for try_to_munlock() |
@@ -126,9 +146,9 @@ static void __munlock_isolated_page(struct page *page) | |||
126 | static void __munlock_isolation_failed(struct page *page) | 146 | static void __munlock_isolation_failed(struct page *page) |
127 | { | 147 | { |
128 | if (PageUnevictable(page)) | 148 | if (PageUnevictable(page)) |
129 | count_vm_event(UNEVICTABLE_PGSTRANDED); | 149 | __count_vm_event(UNEVICTABLE_PGSTRANDED); |
130 | else | 150 | else |
131 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 151 | __count_vm_event(UNEVICTABLE_PGMUNLOCKED); |
132 | } | 152 | } |
133 | 153 | ||
134 | /** | 154 | /** |
@@ -152,28 +172,34 @@ static void __munlock_isolation_failed(struct page *page) | |||
152 | unsigned int munlock_vma_page(struct page *page) | 172 | unsigned int munlock_vma_page(struct page *page) |
153 | { | 173 | { |
154 | unsigned int nr_pages; | 174 | unsigned int nr_pages; |
175 | struct zone *zone = page_zone(page); | ||
155 | 176 | ||
156 | BUG_ON(!PageLocked(page)); | 177 | BUG_ON(!PageLocked(page)); |
157 | 178 | ||
158 | if (TestClearPageMlocked(page)) { | ||
159 | nr_pages = hpage_nr_pages(page); | ||
160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | ||
161 | if (!isolate_lru_page(page)) | ||
162 | __munlock_isolated_page(page); | ||
163 | else | ||
164 | __munlock_isolation_failed(page); | ||
165 | } else { | ||
166 | nr_pages = hpage_nr_pages(page); | ||
167 | } | ||
168 | |||
169 | /* | 179 | /* |
170 | * Regardless of the original PageMlocked flag, we determine nr_pages | 180 | * Serialize with any parallel __split_huge_page_refcount() which |
171 | * after touching the flag. This leaves a possible race with a THP page | 181 | * might otherwise copy PageMlocked to part of the tail pages before |
172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | 182 | * we clear it in the head page. It also stabilizes hpage_nr_pages(). |
173 | * Returning a smaller mask due to that is OK, the worst that can | ||
174 | * happen is subsequent useless scanning of the former tail pages. | ||
175 | * The NR_MLOCK accounting can however become broken. | ||
176 | */ | 183 | */ |
184 | spin_lock_irq(&zone->lru_lock); | ||
185 | |||
186 | nr_pages = hpage_nr_pages(page); | ||
187 | if (!TestClearPageMlocked(page)) | ||
188 | goto unlock_out; | ||
189 | |||
190 | __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); | ||
191 | |||
192 | if (__munlock_isolate_lru_page(page, true)) { | ||
193 | spin_unlock_irq(&zone->lru_lock); | ||
194 | __munlock_isolated_page(page); | ||
195 | goto out; | ||
196 | } | ||
197 | __munlock_isolation_failed(page); | ||
198 | |||
199 | unlock_out: | ||
200 | spin_unlock_irq(&zone->lru_lock); | ||
201 | |||
202 | out: | ||
177 | return nr_pages - 1; | 203 | return nr_pages - 1; |
178 | } | 204 | } |
179 | 205 | ||
@@ -253,8 +279,8 @@ static int __mlock_posix_error_return(long retval) | |||
253 | static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, | 279 | static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, |
254 | int *pgrescued) | 280 | int *pgrescued) |
255 | { | 281 | { |
256 | VM_BUG_ON(PageLRU(page)); | 282 | VM_BUG_ON_PAGE(PageLRU(page), page); |
257 | VM_BUG_ON(!PageLocked(page)); | 283 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
258 | 284 | ||
259 | if (page_mapcount(page) <= 1 && page_evictable(page)) { | 285 | if (page_mapcount(page) <= 1 && page_evictable(page)) { |
260 | pagevec_add(pvec, page); | 286 | pagevec_add(pvec, page); |
@@ -310,34 +336,24 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
310 | struct page *page = pvec->pages[i]; | 336 | struct page *page = pvec->pages[i]; |
311 | 337 | ||
312 | if (TestClearPageMlocked(page)) { | 338 | if (TestClearPageMlocked(page)) { |
313 | struct lruvec *lruvec; | ||
314 | int lru; | ||
315 | |||
316 | if (PageLRU(page)) { | ||
317 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
318 | lru = page_lru(page); | ||
319 | /* | ||
320 | * We already have pin from follow_page_mask() | ||
321 | * so we can spare the get_page() here. | ||
322 | */ | ||
323 | ClearPageLRU(page); | ||
324 | del_page_from_lru_list(page, lruvec, lru); | ||
325 | } else { | ||
326 | __munlock_isolation_failed(page); | ||
327 | goto skip_munlock; | ||
328 | } | ||
329 | |||
330 | } else { | ||
331 | skip_munlock: | ||
332 | /* | 339 | /* |
333 | * We won't be munlocking this page in the next phase | 340 | * We already have pin from follow_page_mask() |
334 | * but we still need to release the follow_page_mask() | 341 | * so we can spare the get_page() here. |
335 | * pin. We cannot do it under lru_lock however. If it's | ||
336 | * the last pin, __page_cache_release would deadlock. | ||
337 | */ | 342 | */ |
338 | pagevec_add(&pvec_putback, pvec->pages[i]); | 343 | if (__munlock_isolate_lru_page(page, false)) |
339 | pvec->pages[i] = NULL; | 344 | continue; |
345 | else | ||
346 | __munlock_isolation_failed(page); | ||
340 | } | 347 | } |
348 | |||
349 | /* | ||
350 | * We won't be munlocking this page in the next phase | ||
351 | * but we still need to release the follow_page_mask() | ||
352 | * pin. We cannot do it under lru_lock however. If it's | ||
353 | * the last pin, __page_cache_release() would deadlock. | ||
354 | */ | ||
355 | pagevec_add(&pvec_putback, pvec->pages[i]); | ||
356 | pvec->pages[i] = NULL; | ||
341 | } | 357 | } |
342 | delta_munlocked = -nr + pagevec_count(&pvec_putback); | 358 | delta_munlocked = -nr + pagevec_count(&pvec_putback); |
343 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); | 359 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
diff --git a/mm/mm_init.c b/mm/mm_init.c index 68562e92d50c..857a6434e3a5 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c | |||
@@ -202,5 +202,4 @@ static int __init mm_sysfs_init(void) | |||
202 | 202 | ||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | pure_initcall(mm_sysfs_init); | |
206 | __initcall(mm_sysfs_init); | ||
@@ -894,7 +894,15 @@ again: remove_next = 1 + (end > next->vm_end); | |||
894 | static inline int is_mergeable_vma(struct vm_area_struct *vma, | 894 | static inline int is_mergeable_vma(struct vm_area_struct *vma, |
895 | struct file *file, unsigned long vm_flags) | 895 | struct file *file, unsigned long vm_flags) |
896 | { | 896 | { |
897 | if (vma->vm_flags ^ vm_flags) | 897 | /* |
898 | * VM_SOFTDIRTY should not prevent from VMA merging, if we | ||
899 | * match the flags but dirty bit -- the caller should mark | ||
900 | * merged VMA as dirty. If dirty bit won't be excluded from | ||
901 | * comparison, we increase pressue on the memory system forcing | ||
902 | * the kernel to generate new VMAs when old one could be | ||
903 | * extended instead. | ||
904 | */ | ||
905 | if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) | ||
898 | return 0; | 906 | return 0; |
899 | if (vma->vm_file != file) | 907 | if (vma->vm_file != file) |
900 | return 0; | 908 | return 0; |
@@ -1083,7 +1091,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * | |||
1083 | return a->vm_end == b->vm_start && | 1091 | return a->vm_end == b->vm_start && |
1084 | mpol_equal(vma_policy(a), vma_policy(b)) && | 1092 | mpol_equal(vma_policy(a), vma_policy(b)) && |
1085 | a->vm_file == b->vm_file && | 1093 | a->vm_file == b->vm_file && |
1086 | !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && | 1094 | !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && |
1087 | b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); | 1095 | b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); |
1088 | } | 1096 | } |
1089 | 1097 | ||
@@ -3142,7 +3150,7 @@ static int init_user_reserve(void) | |||
3142 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); | 3150 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
3143 | return 0; | 3151 | return 0; |
3144 | } | 3152 | } |
3145 | module_init(init_user_reserve) | 3153 | subsys_initcall(init_user_reserve); |
3146 | 3154 | ||
3147 | /* | 3155 | /* |
3148 | * Initialise sysctl_admin_reserve_kbytes. | 3156 | * Initialise sysctl_admin_reserve_kbytes. |
@@ -3163,7 +3171,7 @@ static int init_admin_reserve(void) | |||
3163 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); | 3171 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
3164 | return 0; | 3172 | return 0; |
3165 | } | 3173 | } |
3166 | module_init(init_admin_reserve) | 3174 | subsys_initcall(init_admin_reserve); |
3167 | 3175 | ||
3168 | /* | 3176 | /* |
3169 | * Reinititalise user and admin reserves if memory is added or removed. | 3177 | * Reinititalise user and admin reserves if memory is added or removed. |
@@ -3233,4 +3241,4 @@ static int __meminit init_reserve_notifier(void) | |||
3233 | 3241 | ||
3234 | return 0; | 3242 | return 0; |
3235 | } | 3243 | } |
3236 | module_init(init_reserve_notifier) | 3244 | subsys_initcall(init_reserve_notifier); |
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 93e6089cb456..41cefdf0aadd 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c | |||
@@ -329,5 +329,4 @@ static int __init mmu_notifier_init(void) | |||
329 | { | 329 | { |
330 | return init_srcu_struct(&srcu); | 330 | return init_srcu_struct(&srcu); |
331 | } | 331 | } |
332 | 332 | subsys_initcall(mmu_notifier_init); | |
333 | module_init(mmu_notifier_init); | ||
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 19121ceb8874..f73f2987a852 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -45,7 +45,9 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | |||
45 | if (!addr) | 45 | if (!addr) |
46 | return NULL; | 46 | return NULL; |
47 | 47 | ||
48 | memblock_reserve(addr, size); | 48 | if (memblock_reserve(addr, size)) |
49 | return NULL; | ||
50 | |||
49 | ptr = phys_to_virt(addr); | 51 | ptr = phys_to_virt(addr); |
50 | memset(ptr, 0, size); | 52 | memset(ptr, 0, size); |
51 | /* | 53 | /* |
@@ -114,16 +116,27 @@ static unsigned long __init __free_memory_core(phys_addr_t start, | |||
114 | static unsigned long __init free_low_memory_core_early(void) | 116 | static unsigned long __init free_low_memory_core_early(void) |
115 | { | 117 | { |
116 | unsigned long count = 0; | 118 | unsigned long count = 0; |
117 | phys_addr_t start, end, size; | 119 | phys_addr_t start, end; |
118 | u64 i; | 120 | u64 i; |
119 | 121 | ||
120 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) | 122 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) |
121 | count += __free_memory_core(start, end); | 123 | count += __free_memory_core(start, end); |
122 | 124 | ||
123 | /* free range that is used for reserved array if we allocate it */ | 125 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
124 | size = get_allocated_memblock_reserved_regions_info(&start); | 126 | { |
125 | if (size) | 127 | phys_addr_t size; |
126 | count += __free_memory_core(start, start + size); | 128 | |
129 | /* Free memblock.reserved array if it was allocated */ | ||
130 | size = get_allocated_memblock_reserved_regions_info(&start); | ||
131 | if (size) | ||
132 | count += __free_memory_core(start, start + size); | ||
133 | |||
134 | /* Free memblock.memory array if it was allocated */ | ||
135 | size = get_allocated_memblock_memory_regions_info(&start); | ||
136 | if (size) | ||
137 | count += __free_memory_core(start, start + size); | ||
138 | } | ||
139 | #endif | ||
127 | 140 | ||
128 | return count; | 141 | return count; |
129 | } | 142 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 054ff47c4478..37b1b1903fb2 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -327,10 +327,14 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
327 | break; | 327 | break; |
328 | }; | 328 | }; |
329 | points = oom_badness(p, NULL, nodemask, totalpages); | 329 | points = oom_badness(p, NULL, nodemask, totalpages); |
330 | if (points > chosen_points) { | 330 | if (!points || points < chosen_points) |
331 | chosen = p; | 331 | continue; |
332 | chosen_points = points; | 332 | /* Prefer thread group leaders for display purposes */ |
333 | } | 333 | if (points == chosen_points && thread_group_leader(chosen)) |
334 | continue; | ||
335 | |||
336 | chosen = p; | ||
337 | chosen_points = points; | ||
334 | } | 338 | } |
335 | if (chosen) | 339 | if (chosen) |
336 | get_task_struct(chosen); | 340 | get_task_struct(chosen); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 533e2147d14f..e3758a09a009 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -205,7 +205,7 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
205 | }; | 205 | }; |
206 | 206 | ||
207 | int min_free_kbytes = 1024; | 207 | int min_free_kbytes = 1024; |
208 | int user_min_free_kbytes; | 208 | int user_min_free_kbytes = -1; |
209 | 209 | ||
210 | static unsigned long __meminitdata nr_kernel_pages; | 210 | static unsigned long __meminitdata nr_kernel_pages; |
211 | static unsigned long __meminitdata nr_all_pages; | 211 | static unsigned long __meminitdata nr_all_pages; |
@@ -295,7 +295,7 @@ static inline int bad_range(struct zone *zone, struct page *page) | |||
295 | } | 295 | } |
296 | #endif | 296 | #endif |
297 | 297 | ||
298 | static void bad_page(struct page *page) | 298 | static void bad_page(struct page *page, char *reason, unsigned long bad_flags) |
299 | { | 299 | { |
300 | static unsigned long resume; | 300 | static unsigned long resume; |
301 | static unsigned long nr_shown; | 301 | static unsigned long nr_shown; |
@@ -329,7 +329,7 @@ static void bad_page(struct page *page) | |||
329 | 329 | ||
330 | printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", | 330 | printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", |
331 | current->comm, page_to_pfn(page)); | 331 | current->comm, page_to_pfn(page)); |
332 | dump_page(page); | 332 | dump_page_badflags(page, reason, bad_flags); |
333 | 333 | ||
334 | print_modules(); | 334 | print_modules(); |
335 | dump_stack(); | 335 | dump_stack(); |
@@ -383,7 +383,7 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
383 | int bad = 0; | 383 | int bad = 0; |
384 | 384 | ||
385 | if (unlikely(compound_order(page) != order)) { | 385 | if (unlikely(compound_order(page) != order)) { |
386 | bad_page(page); | 386 | bad_page(page, "wrong compound order", 0); |
387 | bad++; | 387 | bad++; |
388 | } | 388 | } |
389 | 389 | ||
@@ -392,8 +392,11 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
392 | for (i = 1; i < nr_pages; i++) { | 392 | for (i = 1; i < nr_pages; i++) { |
393 | struct page *p = page + i; | 393 | struct page *p = page + i; |
394 | 394 | ||
395 | if (unlikely(!PageTail(p) || (p->first_page != page))) { | 395 | if (unlikely(!PageTail(p))) { |
396 | bad_page(page); | 396 | bad_page(page, "PageTail not set", 0); |
397 | bad++; | ||
398 | } else if (unlikely(p->first_page != page)) { | ||
399 | bad_page(page, "first_page not consistent", 0); | ||
397 | bad++; | 400 | bad++; |
398 | } | 401 | } |
399 | __ClearPageTail(p); | 402 | __ClearPageTail(p); |
@@ -506,12 +509,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, | |||
506 | return 0; | 509 | return 0; |
507 | 510 | ||
508 | if (page_is_guard(buddy) && page_order(buddy) == order) { | 511 | if (page_is_guard(buddy) && page_order(buddy) == order) { |
509 | VM_BUG_ON(page_count(buddy) != 0); | 512 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); |
510 | return 1; | 513 | return 1; |
511 | } | 514 | } |
512 | 515 | ||
513 | if (PageBuddy(buddy) && page_order(buddy) == order) { | 516 | if (PageBuddy(buddy) && page_order(buddy) == order) { |
514 | VM_BUG_ON(page_count(buddy) != 0); | 517 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); |
515 | return 1; | 518 | return 1; |
516 | } | 519 | } |
517 | return 0; | 520 | return 0; |
@@ -561,8 +564,8 @@ static inline void __free_one_page(struct page *page, | |||
561 | 564 | ||
562 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); | 565 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
563 | 566 | ||
564 | VM_BUG_ON(page_idx & ((1 << order) - 1)); | 567 | VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); |
565 | VM_BUG_ON(bad_range(zone, page)); | 568 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
566 | 569 | ||
567 | while (order < MAX_ORDER-1) { | 570 | while (order < MAX_ORDER-1) { |
568 | buddy_idx = __find_buddy_index(page_idx, order); | 571 | buddy_idx = __find_buddy_index(page_idx, order); |
@@ -618,12 +621,23 @@ out: | |||
618 | 621 | ||
619 | static inline int free_pages_check(struct page *page) | 622 | static inline int free_pages_check(struct page *page) |
620 | { | 623 | { |
621 | if (unlikely(page_mapcount(page) | | 624 | char *bad_reason = NULL; |
622 | (page->mapping != NULL) | | 625 | unsigned long bad_flags = 0; |
623 | (atomic_read(&page->_count) != 0) | | 626 | |
624 | (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | | 627 | if (unlikely(page_mapcount(page))) |
625 | (mem_cgroup_bad_page_check(page)))) { | 628 | bad_reason = "nonzero mapcount"; |
626 | bad_page(page); | 629 | if (unlikely(page->mapping != NULL)) |
630 | bad_reason = "non-NULL mapping"; | ||
631 | if (unlikely(atomic_read(&page->_count) != 0)) | ||
632 | bad_reason = "nonzero _count"; | ||
633 | if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { | ||
634 | bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; | ||
635 | bad_flags = PAGE_FLAGS_CHECK_AT_FREE; | ||
636 | } | ||
637 | if (unlikely(mem_cgroup_bad_page_check(page))) | ||
638 | bad_reason = "cgroup check failed"; | ||
639 | if (unlikely(bad_reason)) { | ||
640 | bad_page(page, bad_reason, bad_flags); | ||
627 | return 1; | 641 | return 1; |
628 | } | 642 | } |
629 | page_cpupid_reset_last(page); | 643 | page_cpupid_reset_last(page); |
@@ -813,7 +827,7 @@ static inline void expand(struct zone *zone, struct page *page, | |||
813 | area--; | 827 | area--; |
814 | high--; | 828 | high--; |
815 | size >>= 1; | 829 | size >>= 1; |
816 | VM_BUG_ON(bad_range(zone, &page[size])); | 830 | VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); |
817 | 831 | ||
818 | #ifdef CONFIG_DEBUG_PAGEALLOC | 832 | #ifdef CONFIG_DEBUG_PAGEALLOC |
819 | if (high < debug_guardpage_minorder()) { | 833 | if (high < debug_guardpage_minorder()) { |
@@ -843,12 +857,23 @@ static inline void expand(struct zone *zone, struct page *page, | |||
843 | */ | 857 | */ |
844 | static inline int check_new_page(struct page *page) | 858 | static inline int check_new_page(struct page *page) |
845 | { | 859 | { |
846 | if (unlikely(page_mapcount(page) | | 860 | char *bad_reason = NULL; |
847 | (page->mapping != NULL) | | 861 | unsigned long bad_flags = 0; |
848 | (atomic_read(&page->_count) != 0) | | 862 | |
849 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | | 863 | if (unlikely(page_mapcount(page))) |
850 | (mem_cgroup_bad_page_check(page)))) { | 864 | bad_reason = "nonzero mapcount"; |
851 | bad_page(page); | 865 | if (unlikely(page->mapping != NULL)) |
866 | bad_reason = "non-NULL mapping"; | ||
867 | if (unlikely(atomic_read(&page->_count) != 0)) | ||
868 | bad_reason = "nonzero _count"; | ||
869 | if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { | ||
870 | bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; | ||
871 | bad_flags = PAGE_FLAGS_CHECK_AT_PREP; | ||
872 | } | ||
873 | if (unlikely(mem_cgroup_bad_page_check(page))) | ||
874 | bad_reason = "cgroup check failed"; | ||
875 | if (unlikely(bad_reason)) { | ||
876 | bad_page(page, bad_reason, bad_flags); | ||
852 | return 1; | 877 | return 1; |
853 | } | 878 | } |
854 | return 0; | 879 | return 0; |
@@ -955,7 +980,7 @@ int move_freepages(struct zone *zone, | |||
955 | 980 | ||
956 | for (page = start_page; page <= end_page;) { | 981 | for (page = start_page; page <= end_page;) { |
957 | /* Make sure we are not inadvertently changing nodes */ | 982 | /* Make sure we are not inadvertently changing nodes */ |
958 | VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); | 983 | VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); |
959 | 984 | ||
960 | if (!pfn_valid_within(page_to_pfn(page))) { | 985 | if (!pfn_valid_within(page_to_pfn(page))) { |
961 | page++; | 986 | page++; |
@@ -1404,8 +1429,8 @@ void split_page(struct page *page, unsigned int order) | |||
1404 | { | 1429 | { |
1405 | int i; | 1430 | int i; |
1406 | 1431 | ||
1407 | VM_BUG_ON(PageCompound(page)); | 1432 | VM_BUG_ON_PAGE(PageCompound(page), page); |
1408 | VM_BUG_ON(!page_count(page)); | 1433 | VM_BUG_ON_PAGE(!page_count(page), page); |
1409 | 1434 | ||
1410 | #ifdef CONFIG_KMEMCHECK | 1435 | #ifdef CONFIG_KMEMCHECK |
1411 | /* | 1436 | /* |
@@ -1552,7 +1577,7 @@ again: | |||
1552 | zone_statistics(preferred_zone, zone, gfp_flags); | 1577 | zone_statistics(preferred_zone, zone, gfp_flags); |
1553 | local_irq_restore(flags); | 1578 | local_irq_restore(flags); |
1554 | 1579 | ||
1555 | VM_BUG_ON(bad_range(zone, page)); | 1580 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
1556 | if (prep_new_page(page, order, gfp_flags)) | 1581 | if (prep_new_page(page, order, gfp_flags)) |
1557 | goto again; | 1582 | goto again; |
1558 | return page; | 1583 | return page; |
@@ -5729,7 +5754,12 @@ module_init(init_per_zone_wmark_min) | |||
5729 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write, | 5754 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write, |
5730 | void __user *buffer, size_t *length, loff_t *ppos) | 5755 | void __user *buffer, size_t *length, loff_t *ppos) |
5731 | { | 5756 | { |
5732 | proc_dointvec(table, write, buffer, length, ppos); | 5757 | int rc; |
5758 | |||
5759 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
5760 | if (rc) | ||
5761 | return rc; | ||
5762 | |||
5733 | if (write) { | 5763 | if (write) { |
5734 | user_min_free_kbytes = min_free_kbytes; | 5764 | user_min_free_kbytes = min_free_kbytes; |
5735 | setup_per_zone_wmarks(); | 5765 | setup_per_zone_wmarks(); |
@@ -5996,7 +6026,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
5996 | pfn = page_to_pfn(page); | 6026 | pfn = page_to_pfn(page); |
5997 | bitmap = get_pageblock_bitmap(zone, pfn); | 6027 | bitmap = get_pageblock_bitmap(zone, pfn); |
5998 | bitidx = pfn_to_bitidx(zone, pfn); | 6028 | bitidx = pfn_to_bitidx(zone, pfn); |
5999 | VM_BUG_ON(!zone_spans_pfn(zone, pfn)); | 6029 | VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); |
6000 | 6030 | ||
6001 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) | 6031 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
6002 | if (flags & value) | 6032 | if (flags & value) |
@@ -6494,12 +6524,24 @@ static void dump_page_flags(unsigned long flags) | |||
6494 | printk(")\n"); | 6524 | printk(")\n"); |
6495 | } | 6525 | } |
6496 | 6526 | ||
6497 | void dump_page(struct page *page) | 6527 | void dump_page_badflags(struct page *page, char *reason, unsigned long badflags) |
6498 | { | 6528 | { |
6499 | printk(KERN_ALERT | 6529 | printk(KERN_ALERT |
6500 | "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", | 6530 | "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", |
6501 | page, atomic_read(&page->_count), page_mapcount(page), | 6531 | page, atomic_read(&page->_count), page_mapcount(page), |
6502 | page->mapping, page->index); | 6532 | page->mapping, page->index); |
6503 | dump_page_flags(page->flags); | 6533 | dump_page_flags(page->flags); |
6534 | if (reason) | ||
6535 | pr_alert("page dumped because: %s\n", reason); | ||
6536 | if (page->flags & badflags) { | ||
6537 | pr_alert("bad because of flags:\n"); | ||
6538 | dump_page_flags(page->flags & badflags); | ||
6539 | } | ||
6504 | mem_cgroup_print_bad_page(page); | 6540 | mem_cgroup_print_bad_page(page); |
6505 | } | 6541 | } |
6542 | |||
6543 | void dump_page(struct page *page, char *reason) | ||
6544 | { | ||
6545 | dump_page_badflags(page, reason, 0); | ||
6546 | } | ||
6547 | EXPORT_SYMBOL_GPL(dump_page); | ||
diff --git a/mm/page_io.c b/mm/page_io.c index 8c79a4764be0..7247be6114ac 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -320,8 +320,8 @@ int swap_readpage(struct page *page) | |||
320 | int ret = 0; | 320 | int ret = 0; |
321 | struct swap_info_struct *sis = page_swap_info(page); | 321 | struct swap_info_struct *sis = page_swap_info(page); |
322 | 322 | ||
323 | VM_BUG_ON(!PageLocked(page)); | 323 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
324 | VM_BUG_ON(PageUptodate(page)); | 324 | VM_BUG_ON_PAGE(PageUptodate(page), page); |
325 | if (frontswap_load(page) == 0) { | 325 | if (frontswap_load(page) == 0) { |
326 | SetPageUptodate(page); | 326 | SetPageUptodate(page); |
327 | unlock_page(page); | 327 | unlock_page(page); |
@@ -848,9 +848,9 @@ out: | |||
848 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) | 848 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
849 | { | 849 | { |
850 | if (vma->vm_flags & VM_SHARED) | 850 | if (vma->vm_flags & VM_SHARED) |
851 | return 0; | 851 | return false; |
852 | 852 | ||
853 | return 1; | 853 | return true; |
854 | } | 854 | } |
855 | 855 | ||
856 | int page_mkclean(struct page *page) | 856 | int page_mkclean(struct page *page) |
@@ -894,9 +894,9 @@ void page_move_anon_rmap(struct page *page, | |||
894 | { | 894 | { |
895 | struct anon_vma *anon_vma = vma->anon_vma; | 895 | struct anon_vma *anon_vma = vma->anon_vma; |
896 | 896 | ||
897 | VM_BUG_ON(!PageLocked(page)); | 897 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
898 | VM_BUG_ON(!anon_vma); | 898 | VM_BUG_ON(!anon_vma); |
899 | VM_BUG_ON(page->index != linear_page_index(vma, address)); | 899 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); |
900 | 900 | ||
901 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 901 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
902 | page->mapping = (struct address_space *) anon_vma; | 902 | page->mapping = (struct address_space *) anon_vma; |
@@ -995,7 +995,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
995 | if (unlikely(PageKsm(page))) | 995 | if (unlikely(PageKsm(page))) |
996 | return; | 996 | return; |
997 | 997 | ||
998 | VM_BUG_ON(!PageLocked(page)); | 998 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
999 | /* address might be in next vma when migration races vma_adjust */ | 999 | /* address might be in next vma when migration races vma_adjust */ |
1000 | if (first) | 1000 | if (first) |
1001 | __page_set_anon_rmap(page, vma, address, exclusive); | 1001 | __page_set_anon_rmap(page, vma, address, exclusive); |
@@ -1481,7 +1481,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1481 | .anon_lock = page_lock_anon_vma_read, | 1481 | .anon_lock = page_lock_anon_vma_read, |
1482 | }; | 1482 | }; |
1483 | 1483 | ||
1484 | VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); | 1484 | VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); |
1485 | 1485 | ||
1486 | /* | 1486 | /* |
1487 | * During exec, a temporary VMA is setup and later moved. | 1487 | * During exec, a temporary VMA is setup and later moved. |
@@ -1533,7 +1533,7 @@ int try_to_munlock(struct page *page) | |||
1533 | 1533 | ||
1534 | }; | 1534 | }; |
1535 | 1535 | ||
1536 | VM_BUG_ON(!PageLocked(page) || PageLRU(page)); | 1536 | VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); |
1537 | 1537 | ||
1538 | ret = rmap_walk(page, &rwc); | 1538 | ret = rmap_walk(page, &rwc); |
1539 | return ret; | 1539 | return ret; |
diff --git a/mm/shmem.c b/mm/shmem.c index 902a14842b74..8156f95ec0cf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -285,8 +285,8 @@ static int shmem_add_to_page_cache(struct page *page, | |||
285 | { | 285 | { |
286 | int error; | 286 | int error; |
287 | 287 | ||
288 | VM_BUG_ON(!PageLocked(page)); | 288 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
289 | VM_BUG_ON(!PageSwapBacked(page)); | 289 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
290 | 290 | ||
291 | page_cache_get(page); | 291 | page_cache_get(page); |
292 | page->mapping = mapping; | 292 | page->mapping = mapping; |
@@ -491,7 +491,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
491 | continue; | 491 | continue; |
492 | if (!unfalloc || !PageUptodate(page)) { | 492 | if (!unfalloc || !PageUptodate(page)) { |
493 | if (page->mapping == mapping) { | 493 | if (page->mapping == mapping) { |
494 | VM_BUG_ON(PageWriteback(page)); | 494 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
495 | truncate_inode_page(mapping, page); | 495 | truncate_inode_page(mapping, page); |
496 | } | 496 | } |
497 | } | 497 | } |
@@ -568,7 +568,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
568 | lock_page(page); | 568 | lock_page(page); |
569 | if (!unfalloc || !PageUptodate(page)) { | 569 | if (!unfalloc || !PageUptodate(page)) { |
570 | if (page->mapping == mapping) { | 570 | if (page->mapping == mapping) { |
571 | VM_BUG_ON(PageWriteback(page)); | 571 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
572 | truncate_inode_page(mapping, page); | 572 | truncate_inode_page(mapping, page); |
573 | } | 573 | } |
574 | } | 574 | } |
@@ -160,12 +160,36 @@ static inline const char *cache_name(struct kmem_cache *s) | |||
160 | return s->name; | 160 | return s->name; |
161 | } | 161 | } |
162 | 162 | ||
163 | /* | ||
164 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | ||
165 | * That said the caller must assure the memcg's cache won't go away. Since once | ||
166 | * created a memcg's cache is destroyed only along with the root cache, it is | ||
167 | * true if we are going to allocate from the cache or hold a reference to the | ||
168 | * root cache by other means. Otherwise, we should hold either the slab_mutex | ||
169 | * or the memcg's slab_caches_mutex while calling this function and accessing | ||
170 | * the returned value. | ||
171 | */ | ||
163 | static inline struct kmem_cache * | 172 | static inline struct kmem_cache * |
164 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | 173 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
165 | { | 174 | { |
175 | struct kmem_cache *cachep; | ||
176 | struct memcg_cache_params *params; | ||
177 | |||
166 | if (!s->memcg_params) | 178 | if (!s->memcg_params) |
167 | return NULL; | 179 | return NULL; |
168 | return s->memcg_params->memcg_caches[idx]; | 180 | |
181 | rcu_read_lock(); | ||
182 | params = rcu_dereference(s->memcg_params); | ||
183 | cachep = params->memcg_caches[idx]; | ||
184 | rcu_read_unlock(); | ||
185 | |||
186 | /* | ||
187 | * Make sure we will access the up-to-date value. The code updating | ||
188 | * memcg_caches issues a write barrier to match this (see | ||
189 | * memcg_register_cache()). | ||
190 | */ | ||
191 | smp_read_barrier_depends(); | ||
192 | return cachep; | ||
169 | } | 193 | } |
170 | 194 | ||
171 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | 195 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 0b7bb399b0e4..8e40321da091 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -171,13 +171,26 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |||
171 | struct kmem_cache *parent_cache) | 171 | struct kmem_cache *parent_cache) |
172 | { | 172 | { |
173 | struct kmem_cache *s = NULL; | 173 | struct kmem_cache *s = NULL; |
174 | int err = 0; | 174 | int err; |
175 | 175 | ||
176 | get_online_cpus(); | 176 | get_online_cpus(); |
177 | mutex_lock(&slab_mutex); | 177 | mutex_lock(&slab_mutex); |
178 | 178 | ||
179 | if (!kmem_cache_sanity_check(memcg, name, size) == 0) | 179 | err = kmem_cache_sanity_check(memcg, name, size); |
180 | goto out_locked; | 180 | if (err) |
181 | goto out_unlock; | ||
182 | |||
183 | if (memcg) { | ||
184 | /* | ||
185 | * Since per-memcg caches are created asynchronously on first | ||
186 | * allocation (see memcg_kmem_get_cache()), several threads can | ||
187 | * try to create the same cache, but only one of them may | ||
188 | * succeed. Therefore if we get here and see the cache has | ||
189 | * already been created, we silently return NULL. | ||
190 | */ | ||
191 | if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) | ||
192 | goto out_unlock; | ||
193 | } | ||
181 | 194 | ||
182 | /* | 195 | /* |
183 | * Some allocators will constraint the set of valid flags to a subset | 196 | * Some allocators will constraint the set of valid flags to a subset |
@@ -189,45 +202,45 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |||
189 | 202 | ||
190 | s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); | 203 | s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); |
191 | if (s) | 204 | if (s) |
192 | goto out_locked; | 205 | goto out_unlock; |
193 | 206 | ||
207 | err = -ENOMEM; | ||
194 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | 208 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
195 | if (s) { | 209 | if (!s) |
196 | s->object_size = s->size = size; | 210 | goto out_unlock; |
197 | s->align = calculate_alignment(flags, align, size); | ||
198 | s->ctor = ctor; | ||
199 | 211 | ||
200 | if (memcg_register_cache(memcg, s, parent_cache)) { | 212 | s->object_size = s->size = size; |
201 | kmem_cache_free(kmem_cache, s); | 213 | s->align = calculate_alignment(flags, align, size); |
202 | err = -ENOMEM; | 214 | s->ctor = ctor; |
203 | goto out_locked; | ||
204 | } | ||
205 | 215 | ||
206 | s->name = kstrdup(name, GFP_KERNEL); | 216 | s->name = kstrdup(name, GFP_KERNEL); |
207 | if (!s->name) { | 217 | if (!s->name) |
208 | kmem_cache_free(kmem_cache, s); | 218 | goto out_free_cache; |
209 | err = -ENOMEM; | ||
210 | goto out_locked; | ||
211 | } | ||
212 | 219 | ||
213 | err = __kmem_cache_create(s, flags); | 220 | err = memcg_alloc_cache_params(memcg, s, parent_cache); |
214 | if (!err) { | 221 | if (err) |
215 | s->refcount = 1; | 222 | goto out_free_cache; |
216 | list_add(&s->list, &slab_caches); | 223 | |
217 | memcg_cache_list_add(memcg, s); | 224 | err = __kmem_cache_create(s, flags); |
218 | } else { | 225 | if (err) |
219 | kfree(s->name); | 226 | goto out_free_cache; |
220 | kmem_cache_free(kmem_cache, s); | ||
221 | } | ||
222 | } else | ||
223 | err = -ENOMEM; | ||
224 | 227 | ||
225 | out_locked: | 228 | s->refcount = 1; |
229 | list_add(&s->list, &slab_caches); | ||
230 | memcg_register_cache(s); | ||
231 | |||
232 | out_unlock: | ||
226 | mutex_unlock(&slab_mutex); | 233 | mutex_unlock(&slab_mutex); |
227 | put_online_cpus(); | 234 | put_online_cpus(); |
228 | 235 | ||
229 | if (err) { | 236 | /* |
230 | 237 | * There is no point in flooding logs with warnings or especially | |
238 | * crashing the system if we fail to create a cache for a memcg. In | ||
239 | * this case we will be accounting the memcg allocation to the root | ||
240 | * cgroup until we succeed to create its own cache, but it isn't that | ||
241 | * critical. | ||
242 | */ | ||
243 | if (err && !memcg) { | ||
231 | if (flags & SLAB_PANIC) | 244 | if (flags & SLAB_PANIC) |
232 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", | 245 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
233 | name, err); | 246 | name, err); |
@@ -236,11 +249,15 @@ out_locked: | |||
236 | name, err); | 249 | name, err); |
237 | dump_stack(); | 250 | dump_stack(); |
238 | } | 251 | } |
239 | |||
240 | return NULL; | 252 | return NULL; |
241 | } | 253 | } |
242 | |||
243 | return s; | 254 | return s; |
255 | |||
256 | out_free_cache: | ||
257 | memcg_free_cache_params(s); | ||
258 | kfree(s->name); | ||
259 | kmem_cache_free(kmem_cache, s); | ||
260 | goto out_unlock; | ||
244 | } | 261 | } |
245 | 262 | ||
246 | struct kmem_cache * | 263 | struct kmem_cache * |
@@ -263,11 +280,12 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
263 | list_del(&s->list); | 280 | list_del(&s->list); |
264 | 281 | ||
265 | if (!__kmem_cache_shutdown(s)) { | 282 | if (!__kmem_cache_shutdown(s)) { |
283 | memcg_unregister_cache(s); | ||
266 | mutex_unlock(&slab_mutex); | 284 | mutex_unlock(&slab_mutex); |
267 | if (s->flags & SLAB_DESTROY_BY_RCU) | 285 | if (s->flags & SLAB_DESTROY_BY_RCU) |
268 | rcu_barrier(); | 286 | rcu_barrier(); |
269 | 287 | ||
270 | memcg_release_cache(s); | 288 | memcg_free_cache_params(s); |
271 | kfree(s->name); | 289 | kfree(s->name); |
272 | kmem_cache_free(kmem_cache, s); | 290 | kmem_cache_free(kmem_cache, s); |
273 | } else { | 291 | } else { |
@@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1559 | new.freelist = freelist; | 1559 | new.freelist = freelist; |
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | VM_BUG_ON(new.frozen); | 1562 | VM_BUG_ON_PAGE(new.frozen, &new); |
1563 | new.frozen = 1; | 1563 | new.frozen = 1; |
1564 | 1564 | ||
1565 | if (!__cmpxchg_double_slab(s, page, | 1565 | if (!__cmpxchg_double_slab(s, page, |
@@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, | |||
1812 | set_freepointer(s, freelist, prior); | 1812 | set_freepointer(s, freelist, prior); |
1813 | new.counters = counters; | 1813 | new.counters = counters; |
1814 | new.inuse--; | 1814 | new.inuse--; |
1815 | VM_BUG_ON(!new.frozen); | 1815 | VM_BUG_ON_PAGE(!new.frozen, &new); |
1816 | 1816 | ||
1817 | } while (!__cmpxchg_double_slab(s, page, | 1817 | } while (!__cmpxchg_double_slab(s, page, |
1818 | prior, counters, | 1818 | prior, counters, |
@@ -1840,7 +1840,7 @@ redo: | |||
1840 | 1840 | ||
1841 | old.freelist = page->freelist; | 1841 | old.freelist = page->freelist; |
1842 | old.counters = page->counters; | 1842 | old.counters = page->counters; |
1843 | VM_BUG_ON(!old.frozen); | 1843 | VM_BUG_ON_PAGE(!old.frozen, &old); |
1844 | 1844 | ||
1845 | /* Determine target state of the slab */ | 1845 | /* Determine target state of the slab */ |
1846 | new.counters = old.counters; | 1846 | new.counters = old.counters; |
@@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1952 | 1952 | ||
1953 | old.freelist = page->freelist; | 1953 | old.freelist = page->freelist; |
1954 | old.counters = page->counters; | 1954 | old.counters = page->counters; |
1955 | VM_BUG_ON(!old.frozen); | 1955 | VM_BUG_ON_PAGE(!old.frozen, &old); |
1956 | 1956 | ||
1957 | new.counters = old.counters; | 1957 | new.counters = old.counters; |
1958 | new.freelist = old.freelist; | 1958 | new.freelist = old.freelist; |
@@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) | |||
2225 | counters = page->counters; | 2225 | counters = page->counters; |
2226 | 2226 | ||
2227 | new.counters = counters; | 2227 | new.counters = counters; |
2228 | VM_BUG_ON(!new.frozen); | 2228 | VM_BUG_ON_PAGE(!new.frozen, &new); |
2229 | 2229 | ||
2230 | new.inuse = page->objects; | 2230 | new.inuse = page->objects; |
2231 | new.frozen = freelist != NULL; | 2231 | new.frozen = freelist != NULL; |
@@ -2319,7 +2319,7 @@ load_freelist: | |||
2319 | * page is pointing to the page from which the objects are obtained. | 2319 | * page is pointing to the page from which the objects are obtained. |
2320 | * That page must be frozen for per cpu allocations to work. | 2320 | * That page must be frozen for per cpu allocations to work. |
2321 | */ | 2321 | */ |
2322 | VM_BUG_ON(!c->page->frozen); | 2322 | VM_BUG_ON_PAGE(!c->page->frozen, c->page); |
2323 | c->freelist = get_freepointer(s, freelist); | 2323 | c->freelist = get_freepointer(s, freelist); |
2324 | c->tid = next_tid(c->tid); | 2324 | c->tid = next_tid(c->tid); |
2325 | local_irq_restore(flags); | 2325 | local_irq_restore(flags); |
@@ -57,7 +57,7 @@ static void __page_cache_release(struct page *page) | |||
57 | 57 | ||
58 | spin_lock_irqsave(&zone->lru_lock, flags); | 58 | spin_lock_irqsave(&zone->lru_lock, flags); |
59 | lruvec = mem_cgroup_page_lruvec(page, zone); | 59 | lruvec = mem_cgroup_page_lruvec(page, zone); |
60 | VM_BUG_ON(!PageLRU(page)); | 60 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
61 | __ClearPageLRU(page); | 61 | __ClearPageLRU(page); |
62 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); | 62 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
63 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 63 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
@@ -130,8 +130,8 @@ static void put_compound_page(struct page *page) | |||
130 | * __split_huge_page_refcount cannot race | 130 | * __split_huge_page_refcount cannot race |
131 | * here. | 131 | * here. |
132 | */ | 132 | */ |
133 | VM_BUG_ON(!PageHead(page_head)); | 133 | VM_BUG_ON_PAGE(!PageHead(page_head), page_head); |
134 | VM_BUG_ON(page_mapcount(page) != 0); | 134 | VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); |
135 | if (put_page_testzero(page_head)) { | 135 | if (put_page_testzero(page_head)) { |
136 | /* | 136 | /* |
137 | * If this is the tail of a slab | 137 | * If this is the tail of a slab |
@@ -148,7 +148,7 @@ static void put_compound_page(struct page *page) | |||
148 | * the compound page enters the buddy | 148 | * the compound page enters the buddy |
149 | * allocator. | 149 | * allocator. |
150 | */ | 150 | */ |
151 | VM_BUG_ON(PageSlab(page_head)); | 151 | VM_BUG_ON_PAGE(PageSlab(page_head), page_head); |
152 | __put_compound_page(page_head); | 152 | __put_compound_page(page_head); |
153 | } | 153 | } |
154 | return; | 154 | return; |
@@ -199,7 +199,7 @@ out_put_single: | |||
199 | __put_single_page(page); | 199 | __put_single_page(page); |
200 | return; | 200 | return; |
201 | } | 201 | } |
202 | VM_BUG_ON(page_head != page->first_page); | 202 | VM_BUG_ON_PAGE(page_head != page->first_page, page); |
203 | /* | 203 | /* |
204 | * We can release the refcount taken by | 204 | * We can release the refcount taken by |
205 | * get_page_unless_zero() now that | 205 | * get_page_unless_zero() now that |
@@ -207,12 +207,12 @@ out_put_single: | |||
207 | * compound_lock. | 207 | * compound_lock. |
208 | */ | 208 | */ |
209 | if (put_page_testzero(page_head)) | 209 | if (put_page_testzero(page_head)) |
210 | VM_BUG_ON(1); | 210 | VM_BUG_ON_PAGE(1, page_head); |
211 | /* __split_huge_page_refcount will wait now */ | 211 | /* __split_huge_page_refcount will wait now */ |
212 | VM_BUG_ON(page_mapcount(page) <= 0); | 212 | VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); |
213 | atomic_dec(&page->_mapcount); | 213 | atomic_dec(&page->_mapcount); |
214 | VM_BUG_ON(atomic_read(&page_head->_count) <= 0); | 214 | VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); |
215 | VM_BUG_ON(atomic_read(&page->_count) != 0); | 215 | VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); |
216 | compound_unlock_irqrestore(page_head, flags); | 216 | compound_unlock_irqrestore(page_head, flags); |
217 | 217 | ||
218 | if (put_page_testzero(page_head)) { | 218 | if (put_page_testzero(page_head)) { |
@@ -223,7 +223,7 @@ out_put_single: | |||
223 | } | 223 | } |
224 | } else { | 224 | } else { |
225 | /* page_head is a dangling pointer */ | 225 | /* page_head is a dangling pointer */ |
226 | VM_BUG_ON(PageTail(page)); | 226 | VM_BUG_ON_PAGE(PageTail(page), page); |
227 | goto out_put_single; | 227 | goto out_put_single; |
228 | } | 228 | } |
229 | } | 229 | } |
@@ -264,7 +264,7 @@ bool __get_page_tail(struct page *page) | |||
264 | * page. __split_huge_page_refcount | 264 | * page. __split_huge_page_refcount |
265 | * cannot race here. | 265 | * cannot race here. |
266 | */ | 266 | */ |
267 | VM_BUG_ON(!PageHead(page_head)); | 267 | VM_BUG_ON_PAGE(!PageHead(page_head), page_head); |
268 | __get_page_tail_foll(page, true); | 268 | __get_page_tail_foll(page, true); |
269 | return true; | 269 | return true; |
270 | } else { | 270 | } else { |
@@ -604,8 +604,8 @@ EXPORT_SYMBOL(__lru_cache_add); | |||
604 | */ | 604 | */ |
605 | void lru_cache_add(struct page *page) | 605 | void lru_cache_add(struct page *page) |
606 | { | 606 | { |
607 | VM_BUG_ON(PageActive(page) && PageUnevictable(page)); | 607 | VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); |
608 | VM_BUG_ON(PageLRU(page)); | 608 | VM_BUG_ON_PAGE(PageLRU(page), page); |
609 | __lru_cache_add(page); | 609 | __lru_cache_add(page); |
610 | } | 610 | } |
611 | 611 | ||
@@ -846,7 +846,7 @@ void release_pages(struct page **pages, int nr, int cold) | |||
846 | } | 846 | } |
847 | 847 | ||
848 | lruvec = mem_cgroup_page_lruvec(page, zone); | 848 | lruvec = mem_cgroup_page_lruvec(page, zone); |
849 | VM_BUG_ON(!PageLRU(page)); | 849 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
850 | __ClearPageLRU(page); | 850 | __ClearPageLRU(page); |
851 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); | 851 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
852 | } | 852 | } |
@@ -888,9 +888,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
888 | { | 888 | { |
889 | const int file = 0; | 889 | const int file = 0; |
890 | 890 | ||
891 | VM_BUG_ON(!PageHead(page)); | 891 | VM_BUG_ON_PAGE(!PageHead(page), page); |
892 | VM_BUG_ON(PageCompound(page_tail)); | 892 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); |
893 | VM_BUG_ON(PageLRU(page_tail)); | 893 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); |
894 | VM_BUG_ON(NR_CPUS != 1 && | 894 | VM_BUG_ON(NR_CPUS != 1 && |
895 | !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); | 895 | !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); |
896 | 896 | ||
@@ -929,7 +929,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | |||
929 | int active = PageActive(page); | 929 | int active = PageActive(page); |
930 | enum lru_list lru = page_lru(page); | 930 | enum lru_list lru = page_lru(page); |
931 | 931 | ||
932 | VM_BUG_ON(PageLRU(page)); | 932 | VM_BUG_ON_PAGE(PageLRU(page), page); |
933 | 933 | ||
934 | SetPageLRU(page); | 934 | SetPageLRU(page); |
935 | add_page_to_lru_list(page, lruvec, lru); | 935 | add_page_to_lru_list(page, lruvec, lru); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index e6f15f8ca2af..98e85e9c2b2d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -83,9 +83,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
83 | int error; | 83 | int error; |
84 | struct address_space *address_space; | 84 | struct address_space *address_space; |
85 | 85 | ||
86 | VM_BUG_ON(!PageLocked(page)); | 86 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
87 | VM_BUG_ON(PageSwapCache(page)); | 87 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
88 | VM_BUG_ON(!PageSwapBacked(page)); | 88 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
89 | 89 | ||
90 | page_cache_get(page); | 90 | page_cache_get(page); |
91 | SetPageSwapCache(page); | 91 | SetPageSwapCache(page); |
@@ -139,9 +139,9 @@ void __delete_from_swap_cache(struct page *page) | |||
139 | swp_entry_t entry; | 139 | swp_entry_t entry; |
140 | struct address_space *address_space; | 140 | struct address_space *address_space; |
141 | 141 | ||
142 | VM_BUG_ON(!PageLocked(page)); | 142 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
143 | VM_BUG_ON(!PageSwapCache(page)); | 143 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
144 | VM_BUG_ON(PageWriteback(page)); | 144 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
145 | 145 | ||
146 | entry.val = page_private(page); | 146 | entry.val = page_private(page); |
147 | address_space = swap_address_space(entry); | 147 | address_space = swap_address_space(entry); |
@@ -165,8 +165,8 @@ int add_to_swap(struct page *page, struct list_head *list) | |||
165 | swp_entry_t entry; | 165 | swp_entry_t entry; |
166 | int err; | 166 | int err; |
167 | 167 | ||
168 | VM_BUG_ON(!PageLocked(page)); | 168 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
169 | VM_BUG_ON(!PageUptodate(page)); | 169 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
170 | 170 | ||
171 | entry = get_swap_page(); | 171 | entry = get_swap_page(); |
172 | if (!entry.val) | 172 | if (!entry.val) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 612a7c9795f6..c6c13b050a58 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -616,7 +616,7 @@ scan: | |||
616 | } | 616 | } |
617 | } | 617 | } |
618 | offset = si->lowest_bit; | 618 | offset = si->lowest_bit; |
619 | while (++offset < scan_base) { | 619 | while (offset < scan_base) { |
620 | if (!si->swap_map[offset]) { | 620 | if (!si->swap_map[offset]) { |
621 | spin_lock(&si->lock); | 621 | spin_lock(&si->lock); |
622 | goto checks; | 622 | goto checks; |
@@ -629,6 +629,7 @@ scan: | |||
629 | cond_resched(); | 629 | cond_resched(); |
630 | latency_ration = LATENCY_LIMIT; | 630 | latency_ration = LATENCY_LIMIT; |
631 | } | 631 | } |
632 | offset++; | ||
632 | } | 633 | } |
633 | spin_lock(&si->lock); | 634 | spin_lock(&si->lock); |
634 | 635 | ||
@@ -906,7 +907,7 @@ int reuse_swap_page(struct page *page) | |||
906 | { | 907 | { |
907 | int count; | 908 | int count; |
908 | 909 | ||
909 | VM_BUG_ON(!PageLocked(page)); | 910 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
910 | if (unlikely(PageKsm(page))) | 911 | if (unlikely(PageKsm(page))) |
911 | return 0; | 912 | return 0; |
912 | count = page_mapcount(page); | 913 | count = page_mapcount(page); |
@@ -926,7 +927,7 @@ int reuse_swap_page(struct page *page) | |||
926 | */ | 927 | */ |
927 | int try_to_free_swap(struct page *page) | 928 | int try_to_free_swap(struct page *page) |
928 | { | 929 | { |
929 | VM_BUG_ON(!PageLocked(page)); | 930 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
930 | 931 | ||
931 | if (!PageSwapCache(page)) | 932 | if (!PageSwapCache(page)) |
932 | return 0; | 933 | return 0; |
@@ -2714,7 +2715,7 @@ struct swap_info_struct *page_swap_info(struct page *page) | |||
2714 | */ | 2715 | */ |
2715 | struct address_space *__page_file_mapping(struct page *page) | 2716 | struct address_space *__page_file_mapping(struct page *page) |
2716 | { | 2717 | { |
2717 | VM_BUG_ON(!PageSwapCache(page)); | 2718 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
2718 | return page_swap_info(page)->swap_file->f_mapping; | 2719 | return page_swap_info(page)->swap_file->f_mapping; |
2719 | } | 2720 | } |
2720 | EXPORT_SYMBOL_GPL(__page_file_mapping); | 2721 | EXPORT_SYMBOL_GPL(__page_file_mapping); |
@@ -2722,7 +2723,7 @@ EXPORT_SYMBOL_GPL(__page_file_mapping); | |||
2722 | pgoff_t __page_file_index(struct page *page) | 2723 | pgoff_t __page_file_index(struct page *page) |
2723 | { | 2724 | { |
2724 | swp_entry_t swap = { .val = page_private(page) }; | 2725 | swp_entry_t swap = { .val = page_private(page) }; |
2725 | VM_BUG_ON(!PageSwapCache(page)); | 2726 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
2726 | return swp_offset(swap); | 2727 | return swp_offset(swap); |
2727 | } | 2728 | } |
2728 | EXPORT_SYMBOL_GPL(__page_file_index); | 2729 | EXPORT_SYMBOL_GPL(__page_file_index); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index eea668d9cff6..90c4075d8d75 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -281,17 +281,34 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, | |||
281 | nr_pages_scanned, lru_pages, | 281 | nr_pages_scanned, lru_pages, |
282 | max_pass, delta, total_scan); | 282 | max_pass, delta, total_scan); |
283 | 283 | ||
284 | while (total_scan >= batch_size) { | 284 | /* |
285 | * Normally, we should not scan less than batch_size objects in one | ||
286 | * pass to avoid too frequent shrinker calls, but if the slab has less | ||
287 | * than batch_size objects in total and we are really tight on memory, | ||
288 | * we will try to reclaim all available objects, otherwise we can end | ||
289 | * up failing allocations although there are plenty of reclaimable | ||
290 | * objects spread over several slabs with usage less than the | ||
291 | * batch_size. | ||
292 | * | ||
293 | * We detect the "tight on memory" situations by looking at the total | ||
294 | * number of objects we want to scan (total_scan). If it is greater | ||
295 | * than the total number of objects on slab (max_pass), we must be | ||
296 | * scanning at high prio and therefore should try to reclaim as much as | ||
297 | * possible. | ||
298 | */ | ||
299 | while (total_scan >= batch_size || | ||
300 | total_scan >= max_pass) { | ||
285 | unsigned long ret; | 301 | unsigned long ret; |
302 | unsigned long nr_to_scan = min(batch_size, total_scan); | ||
286 | 303 | ||
287 | shrinkctl->nr_to_scan = batch_size; | 304 | shrinkctl->nr_to_scan = nr_to_scan; |
288 | ret = shrinker->scan_objects(shrinker, shrinkctl); | 305 | ret = shrinker->scan_objects(shrinker, shrinkctl); |
289 | if (ret == SHRINK_STOP) | 306 | if (ret == SHRINK_STOP) |
290 | break; | 307 | break; |
291 | freed += ret; | 308 | freed += ret; |
292 | 309 | ||
293 | count_vm_events(SLABS_SCANNED, batch_size); | 310 | count_vm_events(SLABS_SCANNED, nr_to_scan); |
294 | total_scan -= batch_size; | 311 | total_scan -= nr_to_scan; |
295 | 312 | ||
296 | cond_resched(); | 313 | cond_resched(); |
297 | } | 314 | } |
@@ -352,16 +369,17 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl, | |||
352 | } | 369 | } |
353 | 370 | ||
354 | list_for_each_entry(shrinker, &shrinker_list, list) { | 371 | list_for_each_entry(shrinker, &shrinker_list, list) { |
355 | for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { | 372 | if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) { |
356 | if (!node_online(shrinkctl->nid)) | 373 | shrinkctl->nid = 0; |
357 | continue; | ||
358 | |||
359 | if (!(shrinker->flags & SHRINKER_NUMA_AWARE) && | ||
360 | (shrinkctl->nid != 0)) | ||
361 | break; | ||
362 | |||
363 | freed += shrink_slab_node(shrinkctl, shrinker, | 374 | freed += shrink_slab_node(shrinkctl, shrinker, |
364 | nr_pages_scanned, lru_pages); | 375 | nr_pages_scanned, lru_pages); |
376 | continue; | ||
377 | } | ||
378 | |||
379 | for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { | ||
380 | if (node_online(shrinkctl->nid)) | ||
381 | freed += shrink_slab_node(shrinkctl, shrinker, | ||
382 | nr_pages_scanned, lru_pages); | ||
365 | 383 | ||
366 | } | 384 | } |
367 | } | 385 | } |
@@ -603,7 +621,7 @@ void putback_lru_page(struct page *page) | |||
603 | bool is_unevictable; | 621 | bool is_unevictable; |
604 | int was_unevictable = PageUnevictable(page); | 622 | int was_unevictable = PageUnevictable(page); |
605 | 623 | ||
606 | VM_BUG_ON(PageLRU(page)); | 624 | VM_BUG_ON_PAGE(PageLRU(page), page); |
607 | 625 | ||
608 | redo: | 626 | redo: |
609 | ClearPageUnevictable(page); | 627 | ClearPageUnevictable(page); |
@@ -794,8 +812,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
794 | if (!trylock_page(page)) | 812 | if (!trylock_page(page)) |
795 | goto keep; | 813 | goto keep; |
796 | 814 | ||
797 | VM_BUG_ON(PageActive(page)); | 815 | VM_BUG_ON_PAGE(PageActive(page), page); |
798 | VM_BUG_ON(page_zone(page) != zone); | 816 | VM_BUG_ON_PAGE(page_zone(page) != zone, page); |
799 | 817 | ||
800 | sc->nr_scanned++; | 818 | sc->nr_scanned++; |
801 | 819 | ||
@@ -1079,14 +1097,14 @@ activate_locked: | |||
1079 | /* Not a candidate for swapping, so reclaim swap space. */ | 1097 | /* Not a candidate for swapping, so reclaim swap space. */ |
1080 | if (PageSwapCache(page) && vm_swap_full()) | 1098 | if (PageSwapCache(page) && vm_swap_full()) |
1081 | try_to_free_swap(page); | 1099 | try_to_free_swap(page); |
1082 | VM_BUG_ON(PageActive(page)); | 1100 | VM_BUG_ON_PAGE(PageActive(page), page); |
1083 | SetPageActive(page); | 1101 | SetPageActive(page); |
1084 | pgactivate++; | 1102 | pgactivate++; |
1085 | keep_locked: | 1103 | keep_locked: |
1086 | unlock_page(page); | 1104 | unlock_page(page); |
1087 | keep: | 1105 | keep: |
1088 | list_add(&page->lru, &ret_pages); | 1106 | list_add(&page->lru, &ret_pages); |
1089 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); | 1107 | VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); |
1090 | } | 1108 | } |
1091 | 1109 | ||
1092 | free_hot_cold_page_list(&free_pages, 1); | 1110 | free_hot_cold_page_list(&free_pages, 1); |
@@ -1240,7 +1258,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1240 | page = lru_to_page(src); | 1258 | page = lru_to_page(src); |
1241 | prefetchw_prev_lru_page(page, src, flags); | 1259 | prefetchw_prev_lru_page(page, src, flags); |
1242 | 1260 | ||
1243 | VM_BUG_ON(!PageLRU(page)); | 1261 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
1244 | 1262 | ||
1245 | switch (__isolate_lru_page(page, mode)) { | 1263 | switch (__isolate_lru_page(page, mode)) { |
1246 | case 0: | 1264 | case 0: |
@@ -1295,7 +1313,7 @@ int isolate_lru_page(struct page *page) | |||
1295 | { | 1313 | { |
1296 | int ret = -EBUSY; | 1314 | int ret = -EBUSY; |
1297 | 1315 | ||
1298 | VM_BUG_ON(!page_count(page)); | 1316 | VM_BUG_ON_PAGE(!page_count(page), page); |
1299 | 1317 | ||
1300 | if (PageLRU(page)) { | 1318 | if (PageLRU(page)) { |
1301 | struct zone *zone = page_zone(page); | 1319 | struct zone *zone = page_zone(page); |
@@ -1366,7 +1384,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1366 | struct page *page = lru_to_page(page_list); | 1384 | struct page *page = lru_to_page(page_list); |
1367 | int lru; | 1385 | int lru; |
1368 | 1386 | ||
1369 | VM_BUG_ON(PageLRU(page)); | 1387 | VM_BUG_ON_PAGE(PageLRU(page), page); |
1370 | list_del(&page->lru); | 1388 | list_del(&page->lru); |
1371 | if (unlikely(!page_evictable(page))) { | 1389 | if (unlikely(!page_evictable(page))) { |
1372 | spin_unlock_irq(&zone->lru_lock); | 1390 | spin_unlock_irq(&zone->lru_lock); |
@@ -1586,7 +1604,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec, | |||
1586 | page = lru_to_page(list); | 1604 | page = lru_to_page(list); |
1587 | lruvec = mem_cgroup_page_lruvec(page, zone); | 1605 | lruvec = mem_cgroup_page_lruvec(page, zone); |
1588 | 1606 | ||
1589 | VM_BUG_ON(PageLRU(page)); | 1607 | VM_BUG_ON_PAGE(PageLRU(page), page); |
1590 | SetPageLRU(page); | 1608 | SetPageLRU(page); |
1591 | 1609 | ||
1592 | nr_pages = hpage_nr_pages(page); | 1610 | nr_pages = hpage_nr_pages(page); |
@@ -3701,7 +3719,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) | |||
3701 | if (page_evictable(page)) { | 3719 | if (page_evictable(page)) { |
3702 | enum lru_list lru = page_lru_base_type(page); | 3720 | enum lru_list lru = page_lru_base_type(page); |
3703 | 3721 | ||
3704 | VM_BUG_ON(PageActive(page)); | 3722 | VM_BUG_ON_PAGE(PageActive(page), page); |
3705 | ClearPageUnevictable(page); | 3723 | ClearPageUnevictable(page); |
3706 | del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); | 3724 | del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); |
3707 | add_page_to_lru_list(page, lruvec, lru); | 3725 | add_page_to_lru_list(page, lruvec, lru); |
diff --git a/mm/zswap.c b/mm/zswap.c index 5a63f78a5601..e55bab9dc41f 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -77,12 +77,12 @@ static u64 zswap_duplicate_entry; | |||
77 | **********************************/ | 77 | **********************************/ |
78 | /* Enable/disable zswap (disabled by default, fixed at boot for now) */ | 78 | /* Enable/disable zswap (disabled by default, fixed at boot for now) */ |
79 | static bool zswap_enabled __read_mostly; | 79 | static bool zswap_enabled __read_mostly; |
80 | module_param_named(enabled, zswap_enabled, bool, 0); | 80 | module_param_named(enabled, zswap_enabled, bool, 0444); |
81 | 81 | ||
82 | /* Compressor to be used by zswap (fixed at boot for now) */ | 82 | /* Compressor to be used by zswap (fixed at boot for now) */ |
83 | #define ZSWAP_COMPRESSOR_DEFAULT "lzo" | 83 | #define ZSWAP_COMPRESSOR_DEFAULT "lzo" |
84 | static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; | 84 | static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; |
85 | module_param_named(compressor, zswap_compressor, charp, 0); | 85 | module_param_named(compressor, zswap_compressor, charp, 0444); |
86 | 86 | ||
87 | /* The maximum percentage of memory that the compressed pool can occupy */ | 87 | /* The maximum percentage of memory that the compressed pool can occupy */ |
88 | static unsigned int zswap_max_pool_percent = 20; | 88 | static unsigned int zswap_max_pool_percent = 20; |
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 8a520996f3d2..e498a62b8f97 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ | 23 | #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ |
24 | #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ | 24 | #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ |
25 | #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ | 25 | #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ |
26 | #define U32_MAX ((u32)~0U) | ||
27 | #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ | 26 | #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ |
28 | 27 | ||
29 | #define BETA_SHIFT 6 | 28 | #define BETA_SHIFT 6 |
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 3f64a66bf5d9..b827a0f1f351 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
@@ -46,31 +46,12 @@ struct iface_node { | |||
46 | static void | 46 | static void |
47 | rbtree_destroy(struct rb_root *root) | 47 | rbtree_destroy(struct rb_root *root) |
48 | { | 48 | { |
49 | struct rb_node *p, *n = root->rb_node; | 49 | struct iface_node *node, *next; |
50 | struct iface_node *node; | ||
51 | |||
52 | /* Non-recursive destroy, like in ext3 */ | ||
53 | while (n) { | ||
54 | if (n->rb_left) { | ||
55 | n = n->rb_left; | ||
56 | continue; | ||
57 | } | ||
58 | if (n->rb_right) { | ||
59 | n = n->rb_right; | ||
60 | continue; | ||
61 | } | ||
62 | p = rb_parent(n); | ||
63 | node = rb_entry(n, struct iface_node, node); | ||
64 | if (!p) | ||
65 | *root = RB_ROOT; | ||
66 | else if (p->rb_left == n) | ||
67 | p->rb_left = NULL; | ||
68 | else if (p->rb_right == n) | ||
69 | p->rb_right = NULL; | ||
70 | 50 | ||
51 | rbtree_postorder_for_each_entry_safe(node, next, root, node) | ||
71 | kfree(node); | 52 | kfree(node); |
72 | n = p; | 53 | |
73 | } | 54 | *root = RB_ROOT; |
74 | } | 55 | } |
75 | 56 | ||
76 | static int | 57 | static int |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9fb30b15c9dc..1dbd6d1cd1b5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -29,6 +29,7 @@ my $mailback = 0; | |||
29 | my $summary_file = 0; | 29 | my $summary_file = 0; |
30 | my $show_types = 0; | 30 | my $show_types = 0; |
31 | my $fix = 0; | 31 | my $fix = 0; |
32 | my $fix_inplace = 0; | ||
32 | my $root; | 33 | my $root; |
33 | my %debug; | 34 | my %debug; |
34 | my %camelcase = (); | 35 | my %camelcase = (); |
@@ -76,6 +77,9 @@ Options: | |||
76 | "<inputfile>.EXPERIMENTAL-checkpatch-fixes" | 77 | "<inputfile>.EXPERIMENTAL-checkpatch-fixes" |
77 | with potential errors corrected to the preferred | 78 | with potential errors corrected to the preferred |
78 | checkpatch style | 79 | checkpatch style |
80 | --fix-inplace EXPERIMENTAL - may create horrible results | ||
81 | Is the same as --fix, but overwrites the input | ||
82 | file. It's your fault if there's no backup or git | ||
79 | --ignore-perl-version override checking of perl version. expect | 83 | --ignore-perl-version override checking of perl version. expect |
80 | runtime errors. | 84 | runtime errors. |
81 | -h, --help, --version display this help and exit | 85 | -h, --help, --version display this help and exit |
@@ -131,6 +135,7 @@ GetOptions( | |||
131 | 'mailback!' => \$mailback, | 135 | 'mailback!' => \$mailback, |
132 | 'summary-file!' => \$summary_file, | 136 | 'summary-file!' => \$summary_file, |
133 | 'fix!' => \$fix, | 137 | 'fix!' => \$fix, |
138 | 'fix-inplace!' => \$fix_inplace, | ||
134 | 'ignore-perl-version!' => \$ignore_perl_version, | 139 | 'ignore-perl-version!' => \$ignore_perl_version, |
135 | 'debug=s' => \%debug, | 140 | 'debug=s' => \%debug, |
136 | 'test-only=s' => \$tst_only, | 141 | 'test-only=s' => \$tst_only, |
@@ -140,6 +145,8 @@ GetOptions( | |||
140 | 145 | ||
141 | help(0) if ($help); | 146 | help(0) if ($help); |
142 | 147 | ||
148 | $fix = 1 if ($fix_inplace); | ||
149 | |||
143 | my $exit = 0; | 150 | my $exit = 0; |
144 | 151 | ||
145 | if ($^V && $^V lt $minimum_perl_version) { | 152 | if ($^V && $^V lt $minimum_perl_version) { |
@@ -1963,15 +1970,14 @@ sub process { | |||
1963 | } | 1970 | } |
1964 | 1971 | ||
1965 | # Check for FSF mailing addresses. | 1972 | # Check for FSF mailing addresses. |
1966 | if ($rawline =~ /You should have received a copy/ || | 1973 | if ($rawline =~ /\bwrite to the Free/i || |
1967 | $rawline =~ /write to the Free Software/ || | 1974 | $rawline =~ /\b59\s+Temple\s+Pl/i || |
1968 | $rawline =~ /59 Temple Place/ || | 1975 | $rawline =~ /\b51\s+Franklin\s+St/i) { |
1969 | $rawline =~ /51 Franklin Street/) { | ||
1970 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 1976 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1971 | my $msg_type = \&ERROR; | 1977 | my $msg_type = \&ERROR; |
1972 | $msg_type = \&CHK if ($file); | 1978 | $msg_type = \&CHK if ($file); |
1973 | &{$msg_type}("FSF_MAILING_ADDRESS", | 1979 | &{$msg_type}("FSF_MAILING_ADDRESS", |
1974 | "Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet) | 1980 | "Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet) |
1975 | } | 1981 | } |
1976 | 1982 | ||
1977 | # check for Kconfig help text having a real description | 1983 | # check for Kconfig help text having a real description |
@@ -2034,6 +2040,33 @@ sub process { | |||
2034 | "Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag}); | 2040 | "Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag}); |
2035 | } | 2041 | } |
2036 | 2042 | ||
2043 | # check for DT compatible documentation | ||
2044 | if (defined $root && $realfile =~ /\.dts/ && | ||
2045 | $rawline =~ /^\+\s*compatible\s*=/) { | ||
2046 | my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g; | ||
2047 | |||
2048 | foreach my $compat (@compats) { | ||
2049 | my $compat2 = $compat; | ||
2050 | my $dt_path = $root . "/Documentation/devicetree/bindings/"; | ||
2051 | $compat2 =~ s/\,[a-z]*\-/\,<\.\*>\-/; | ||
2052 | `grep -Erq "$compat|$compat2" $dt_path`; | ||
2053 | if ( $? >> 8 ) { | ||
2054 | WARN("UNDOCUMENTED_DT_STRING", | ||
2055 | "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr); | ||
2056 | } | ||
2057 | |||
2058 | my $vendor = $compat; | ||
2059 | my $vendor_path = $dt_path . "vendor-prefixes.txt"; | ||
2060 | next if (! -f $vendor_path); | ||
2061 | $vendor =~ s/^([a-zA-Z0-9]+)\,.*/$1/; | ||
2062 | `grep -Eq "$vendor" $vendor_path`; | ||
2063 | if ( $? >> 8 ) { | ||
2064 | WARN("UNDOCUMENTED_DT_STRING", | ||
2065 | "DT compatible string vendor \"$vendor\" appears un-documented -- check $vendor_path\n" . $herecurr); | ||
2066 | } | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2037 | # check we are in a valid source file if not then ignore this hunk | 2070 | # check we are in a valid source file if not then ignore this hunk |
2038 | next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/); | 2071 | next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/); |
2039 | 2072 | ||
@@ -2049,16 +2082,12 @@ sub process { | |||
2049 | } | 2082 | } |
2050 | 2083 | ||
2051 | # Check for user-visible strings broken across lines, which breaks the ability | 2084 | # Check for user-visible strings broken across lines, which breaks the ability |
2052 | # to grep for the string. Limited to strings used as parameters (those | 2085 | # to grep for the string. Make exceptions when the previous string ends in a |
2053 | # following an open parenthesis), which almost completely eliminates false | 2086 | # newline (multiple lines in one string constant) or '\t', '\r', ';', or '{' |
2054 | # positives, as well as warning only once per parameter rather than once per | 2087 | # (common in inline assembly) or is a octal \123 or hexadecimal \xaf value |
2055 | # line of the string. Make an exception when the previous string ends in a | ||
2056 | # newline (multiple lines in one string constant) or \n\t (common in inline | ||
2057 | # assembly to indent the instruction on the following line). | ||
2058 | if ($line =~ /^\+\s*"/ && | 2088 | if ($line =~ /^\+\s*"/ && |
2059 | $prevline =~ /"\s*$/ && | 2089 | $prevline =~ /"\s*$/ && |
2060 | $prevline =~ /\(/ && | 2090 | $prevrawline !~ /(?:\\(?:[ntr]|[0-7]{1,3}|x[0-9a-fA-F]{1,2})|;\s*|\{\s*)"\s*$/) { |
2061 | $prevrawline !~ /\\n(?:\\t)*"\s*$/) { | ||
2062 | WARN("SPLIT_STRING", | 2091 | WARN("SPLIT_STRING", |
2063 | "quoted string split across lines\n" . $hereprev); | 2092 | "quoted string split across lines\n" . $hereprev); |
2064 | } | 2093 | } |
@@ -2115,8 +2144,10 @@ sub process { | |||
2115 | if (WARN("SPACE_BEFORE_TAB", | 2144 | if (WARN("SPACE_BEFORE_TAB", |
2116 | "please, no space before tabs\n" . $herevet) && | 2145 | "please, no space before tabs\n" . $herevet) && |
2117 | $fix) { | 2146 | $fix) { |
2118 | $fixed[$linenr - 1] =~ | 2147 | while ($fixed[$linenr - 1] =~ |
2119 | s/(^\+.*) +\t/$1\t/; | 2148 | s/(^\+.*) {8,8}+\t/$1\t\t/) {} |
2149 | while ($fixed[$linenr - 1] =~ | ||
2150 | s/(^\+.*) +\t/$1\t/) {} | ||
2120 | } | 2151 | } |
2121 | } | 2152 | } |
2122 | 2153 | ||
@@ -2805,6 +2836,65 @@ sub process { | |||
2805 | } | 2836 | } |
2806 | } | 2837 | } |
2807 | 2838 | ||
2839 | # Function pointer declarations | ||
2840 | # check spacing between type, funcptr, and args | ||
2841 | # canonical declaration is "type (*funcptr)(args...)" | ||
2842 | # | ||
2843 | # the $Declare variable will capture all spaces after the type | ||
2844 | # so check it for trailing missing spaces or multiple spaces | ||
2845 | if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)$Ident(\s*)\)(\s*)\(/) { | ||
2846 | my $declare = $1; | ||
2847 | my $pre_pointer_space = $2; | ||
2848 | my $post_pointer_space = $3; | ||
2849 | my $funcname = $4; | ||
2850 | my $post_funcname_space = $5; | ||
2851 | my $pre_args_space = $6; | ||
2852 | |||
2853 | if ($declare !~ /\s$/) { | ||
2854 | WARN("SPACING", | ||
2855 | "missing space after return type\n" . $herecurr); | ||
2856 | } | ||
2857 | |||
2858 | # unnecessary space "type (*funcptr)(args...)" | ||
2859 | elsif ($declare =~ /\s{2,}$/) { | ||
2860 | WARN("SPACING", | ||
2861 | "Multiple spaces after return type\n" . $herecurr); | ||
2862 | } | ||
2863 | |||
2864 | # unnecessary space "type ( *funcptr)(args...)" | ||
2865 | if (defined $pre_pointer_space && | ||
2866 | $pre_pointer_space =~ /^\s/) { | ||
2867 | WARN("SPACING", | ||
2868 | "Unnecessary space after function pointer open parenthesis\n" . $herecurr); | ||
2869 | } | ||
2870 | |||
2871 | # unnecessary space "type (* funcptr)(args...)" | ||
2872 | if (defined $post_pointer_space && | ||
2873 | $post_pointer_space =~ /^\s/) { | ||
2874 | WARN("SPACING", | ||
2875 | "Unnecessary space before function pointer name\n" . $herecurr); | ||
2876 | } | ||
2877 | |||
2878 | # unnecessary space "type (*funcptr )(args...)" | ||
2879 | if (defined $post_funcname_space && | ||
2880 | $post_funcname_space =~ /^\s/) { | ||
2881 | WARN("SPACING", | ||
2882 | "Unnecessary space after function pointer name\n" . $herecurr); | ||
2883 | } | ||
2884 | |||
2885 | # unnecessary space "type (*funcptr) (args...)" | ||
2886 | if (defined $pre_args_space && | ||
2887 | $pre_args_space =~ /^\s/) { | ||
2888 | WARN("SPACING", | ||
2889 | "Unnecessary space before function pointer arguments\n" . $herecurr); | ||
2890 | } | ||
2891 | |||
2892 | if (show_type("SPACING") && $fix) { | ||
2893 | $fixed[$linenr - 1] =~ | ||
2894 | s/^(.\s*$Declare)\(\s*\*\s*($Ident)\s*\)\s*\(/rtrim($1) . " " . "\(\*$2\)\("/ex; | ||
2895 | } | ||
2896 | } | ||
2897 | |||
2808 | # check for spacing round square brackets; allowed: | 2898 | # check for spacing round square brackets; allowed: |
2809 | # 1. with a type on the left -- int [] a; | 2899 | # 1. with a type on the left -- int [] a; |
2810 | # 2. at the beginning of a line for slice initialisers -- [0...10] = 5, | 2900 | # 2. at the beginning of a line for slice initialisers -- [0...10] = 5, |
@@ -3125,7 +3215,7 @@ sub process { | |||
3125 | } | 3215 | } |
3126 | 3216 | ||
3127 | # check for whitespace before a non-naked semicolon | 3217 | # check for whitespace before a non-naked semicolon |
3128 | if ($line =~ /^\+.*\S\s+;/) { | 3218 | if ($line =~ /^\+.*\S\s+;\s*$/) { |
3129 | if (WARN("SPACING", | 3219 | if (WARN("SPACING", |
3130 | "space prohibited before semicolon\n" . $herecurr) && | 3220 | "space prohibited before semicolon\n" . $herecurr) && |
3131 | $fix) { | 3221 | $fix) { |
@@ -3249,6 +3339,20 @@ sub process { | |||
3249 | } | 3339 | } |
3250 | } | 3340 | } |
3251 | 3341 | ||
3342 | # if statements using unnecessary parentheses - ie: if ((foo == bar)) | ||
3343 | if ($^V && $^V ge 5.10.0 && | ||
3344 | $line =~ /\bif\s*((?:\(\s*){2,})/) { | ||
3345 | my $openparens = $1; | ||
3346 | my $count = $openparens =~ tr@\(@\(@; | ||
3347 | my $msg = ""; | ||
3348 | if ($line =~ /\bif\s*(?:\(\s*){$count,$count}$LvalOrFunc\s*($Compare)\s*$LvalOrFunc(?:\s*\)){$count,$count}/) { | ||
3349 | my $comp = $4; #Not $1 because of $LvalOrFunc | ||
3350 | $msg = " - maybe == should be = ?" if ($comp eq "=="); | ||
3351 | WARN("UNNECESSARY_PARENTHESES", | ||
3352 | "Unnecessary parentheses$msg\n" . $herecurr); | ||
3353 | } | ||
3354 | } | ||
3355 | |||
3252 | # Return of what appears to be an errno should normally be -'ve | 3356 | # Return of what appears to be an errno should normally be -'ve |
3253 | if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) { | 3357 | if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) { |
3254 | my $name = $1; | 3358 | my $name = $1; |
@@ -3983,6 +4087,16 @@ sub process { | |||
3983 | } | 4087 | } |
3984 | } | 4088 | } |
3985 | 4089 | ||
4090 | # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar) | ||
4091 | if ($^V && $^V ge 5.10.0 && | ||
4092 | $line =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/s) { | ||
4093 | if (WARN("PREFER_ETHER_ADDR_COPY", | ||
4094 | "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . $herecurr) && | ||
4095 | $fix) { | ||
4096 | $fixed[$linenr - 1] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/; | ||
4097 | } | ||
4098 | } | ||
4099 | |||
3986 | # typecasts on min/max could be min_t/max_t | 4100 | # typecasts on min/max could be min_t/max_t |
3987 | if ($^V && $^V ge 5.10.0 && | 4101 | if ($^V && $^V ge 5.10.0 && |
3988 | defined $stat && | 4102 | defined $stat && |
@@ -4117,6 +4231,12 @@ sub process { | |||
4117 | "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr); | 4231 | "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr); |
4118 | } | 4232 | } |
4119 | 4233 | ||
4234 | # check for GFP_NOWAIT use | ||
4235 | if ($line =~ /\b__GFP_NOFAIL\b/) { | ||
4236 | WARN("__GFP_NOFAIL", | ||
4237 | "Use of __GFP_NOFAIL is deprecated, no new users should be added\n" . $herecurr); | ||
4238 | } | ||
4239 | |||
4120 | # check for multiple semicolons | 4240 | # check for multiple semicolons |
4121 | if ($line =~ /;\s*;\s*$/) { | 4241 | if ($line =~ /;\s*;\s*$/) { |
4122 | if (WARN("ONE_SEMICOLON", | 4242 | if (WARN("ONE_SEMICOLON", |
@@ -4126,6 +4246,31 @@ sub process { | |||
4126 | } | 4246 | } |
4127 | } | 4247 | } |
4128 | 4248 | ||
4249 | # check for case / default statements not preceeded by break/fallthrough/switch | ||
4250 | if ($line =~ /^.\s*(?:case\s+(?:$Ident|$Constant)\s*|default):/) { | ||
4251 | my $has_break = 0; | ||
4252 | my $has_statement = 0; | ||
4253 | my $count = 0; | ||
4254 | my $prevline = $linenr; | ||
4255 | while ($prevline > 1 && $count < 3 && !$has_break) { | ||
4256 | $prevline--; | ||
4257 | my $rline = $rawlines[$prevline - 1]; | ||
4258 | my $fline = $lines[$prevline - 1]; | ||
4259 | last if ($fline =~ /^\@\@/); | ||
4260 | next if ($fline =~ /^\-/); | ||
4261 | next if ($fline =~ /^.(?:\s*(?:case\s+(?:$Ident|$Constant)[\s$;]*|default):[\s$;]*)*$/); | ||
4262 | $has_break = 1 if ($rline =~ /fall[\s_-]*(through|thru)/i); | ||
4263 | next if ($fline =~ /^.[\s$;]*$/); | ||
4264 | $has_statement = 1; | ||
4265 | $count++; | ||
4266 | $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|return\b|goto\b|continue\b)/); | ||
4267 | } | ||
4268 | if (!$has_break && $has_statement) { | ||
4269 | WARN("MISSING_BREAK", | ||
4270 | "Possible switch case/default not preceeded by break or fallthrough comment\n" . $herecurr); | ||
4271 | } | ||
4272 | } | ||
4273 | |||
4129 | # check for switch/default statements without a break; | 4274 | # check for switch/default statements without a break; |
4130 | if ($^V && $^V ge 5.10.0 && | 4275 | if ($^V && $^V ge 5.10.0 && |
4131 | defined $stat && | 4276 | defined $stat && |
@@ -4361,7 +4506,8 @@ sub process { | |||
4361 | hash_show_words(\%ignore_type, "Ignored"); | 4506 | hash_show_words(\%ignore_type, "Ignored"); |
4362 | 4507 | ||
4363 | if ($clean == 0 && $fix && "@rawlines" ne "@fixed") { | 4508 | if ($clean == 0 && $fix && "@rawlines" ne "@fixed") { |
4364 | my $newfile = $filename . ".EXPERIMENTAL-checkpatch-fixes"; | 4509 | my $newfile = $filename; |
4510 | $newfile .= ".EXPERIMENTAL-checkpatch-fixes" if (!$fix_inplace); | ||
4365 | my $linecount = 0; | 4511 | my $linecount = 0; |
4366 | my $f; | 4512 | my $f; |
4367 | 4513 | ||
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 5e4fb144a04f..9c3986f4140c 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -98,6 +98,7 @@ my %VCS_cmds_git = ( | |||
98 | "available" => '(which("git") ne "") && (-d ".git")', | 98 | "available" => '(which("git") ne "") && (-d ".git")', |
99 | "find_signers_cmd" => | 99 | "find_signers_cmd" => |
100 | "git log --no-color --follow --since=\$email_git_since " . | 100 | "git log --no-color --follow --since=\$email_git_since " . |
101 | '--numstat --no-merges ' . | ||
101 | '--format="GitCommit: %H%n' . | 102 | '--format="GitCommit: %H%n' . |
102 | 'GitAuthor: %an <%ae>%n' . | 103 | 'GitAuthor: %an <%ae>%n' . |
103 | 'GitDate: %aD%n' . | 104 | 'GitDate: %aD%n' . |
@@ -106,6 +107,7 @@ my %VCS_cmds_git = ( | |||
106 | " -- \$file", | 107 | " -- \$file", |
107 | "find_commit_signers_cmd" => | 108 | "find_commit_signers_cmd" => |
108 | "git log --no-color " . | 109 | "git log --no-color " . |
110 | '--numstat ' . | ||
109 | '--format="GitCommit: %H%n' . | 111 | '--format="GitCommit: %H%n' . |
110 | 'GitAuthor: %an <%ae>%n' . | 112 | 'GitAuthor: %an <%ae>%n' . |
111 | 'GitDate: %aD%n' . | 113 | 'GitDate: %aD%n' . |
@@ -114,6 +116,7 @@ my %VCS_cmds_git = ( | |||
114 | " -1 \$commit", | 116 | " -1 \$commit", |
115 | "find_commit_author_cmd" => | 117 | "find_commit_author_cmd" => |
116 | "git log --no-color " . | 118 | "git log --no-color " . |
119 | '--numstat ' . | ||
117 | '--format="GitCommit: %H%n' . | 120 | '--format="GitCommit: %H%n' . |
118 | 'GitAuthor: %an <%ae>%n' . | 121 | 'GitAuthor: %an <%ae>%n' . |
119 | 'GitDate: %aD%n' . | 122 | 'GitDate: %aD%n' . |
@@ -125,6 +128,7 @@ my %VCS_cmds_git = ( | |||
125 | "blame_commit_pattern" => "^([0-9a-f]+) ", | 128 | "blame_commit_pattern" => "^([0-9a-f]+) ", |
126 | "author_pattern" => "^GitAuthor: (.*)", | 129 | "author_pattern" => "^GitAuthor: (.*)", |
127 | "subject_pattern" => "^GitSubject: (.*)", | 130 | "subject_pattern" => "^GitSubject: (.*)", |
131 | "stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$", | ||
128 | ); | 132 | ); |
129 | 133 | ||
130 | my %VCS_cmds_hg = ( | 134 | my %VCS_cmds_hg = ( |
@@ -152,6 +156,7 @@ my %VCS_cmds_hg = ( | |||
152 | "blame_commit_pattern" => "^([ 0-9a-f]+):", | 156 | "blame_commit_pattern" => "^([ 0-9a-f]+):", |
153 | "author_pattern" => "^HgAuthor: (.*)", | 157 | "author_pattern" => "^HgAuthor: (.*)", |
154 | "subject_pattern" => "^HgSubject: (.*)", | 158 | "subject_pattern" => "^HgSubject: (.*)", |
159 | "stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$", | ||
155 | ); | 160 | ); |
156 | 161 | ||
157 | my $conf = which_conf(".get_maintainer.conf"); | 162 | my $conf = which_conf(".get_maintainer.conf"); |
@@ -1269,20 +1274,30 @@ sub extract_formatted_signatures { | |||
1269 | } | 1274 | } |
1270 | 1275 | ||
1271 | sub vcs_find_signers { | 1276 | sub vcs_find_signers { |
1272 | my ($cmd) = @_; | 1277 | my ($cmd, $file) = @_; |
1273 | my $commits; | 1278 | my $commits; |
1274 | my @lines = (); | 1279 | my @lines = (); |
1275 | my @signatures = (); | 1280 | my @signatures = (); |
1281 | my @authors = (); | ||
1282 | my @stats = (); | ||
1276 | 1283 | ||
1277 | @lines = &{$VCS_cmds{"execute_cmd"}}($cmd); | 1284 | @lines = &{$VCS_cmds{"execute_cmd"}}($cmd); |
1278 | 1285 | ||
1279 | my $pattern = $VCS_cmds{"commit_pattern"}; | 1286 | my $pattern = $VCS_cmds{"commit_pattern"}; |
1287 | my $author_pattern = $VCS_cmds{"author_pattern"}; | ||
1288 | my $stat_pattern = $VCS_cmds{"stat_pattern"}; | ||
1289 | |||
1290 | $stat_pattern =~ s/(\$\w+)/$1/eeg; #interpolate $stat_pattern | ||
1280 | 1291 | ||
1281 | $commits = grep(/$pattern/, @lines); # of commits | 1292 | $commits = grep(/$pattern/, @lines); # of commits |
1282 | 1293 | ||
1294 | @authors = grep(/$author_pattern/, @lines); | ||
1283 | @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines); | 1295 | @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines); |
1296 | @stats = grep(/$stat_pattern/, @lines); | ||
1284 | 1297 | ||
1285 | return (0, @signatures) if !@signatures; | 1298 | # print("stats: <@stats>\n"); |
1299 | |||
1300 | return (0, \@signatures, \@authors, \@stats) if !@signatures; | ||
1286 | 1301 | ||
1287 | save_commits_by_author(@lines) if ($interactive); | 1302 | save_commits_by_author(@lines) if ($interactive); |
1288 | save_commits_by_signer(@lines) if ($interactive); | 1303 | save_commits_by_signer(@lines) if ($interactive); |
@@ -1291,9 +1306,10 @@ sub vcs_find_signers { | |||
1291 | @signatures = grep(!/${penguin_chiefs}/i, @signatures); | 1306 | @signatures = grep(!/${penguin_chiefs}/i, @signatures); |
1292 | } | 1307 | } |
1293 | 1308 | ||
1309 | my ($author_ref, $authors_ref) = extract_formatted_signatures(@authors); | ||
1294 | my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures); | 1310 | my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures); |
1295 | 1311 | ||
1296 | return ($commits, @$signers_ref); | 1312 | return ($commits, $signers_ref, $authors_ref, \@stats); |
1297 | } | 1313 | } |
1298 | 1314 | ||
1299 | sub vcs_find_author { | 1315 | sub vcs_find_author { |
@@ -1849,7 +1865,12 @@ sub vcs_assign { | |||
1849 | sub vcs_file_signoffs { | 1865 | sub vcs_file_signoffs { |
1850 | my ($file) = @_; | 1866 | my ($file) = @_; |
1851 | 1867 | ||
1868 | my $authors_ref; | ||
1869 | my $signers_ref; | ||
1870 | my $stats_ref; | ||
1871 | my @authors = (); | ||
1852 | my @signers = (); | 1872 | my @signers = (); |
1873 | my @stats = (); | ||
1853 | my $commits; | 1874 | my $commits; |
1854 | 1875 | ||
1855 | $vcs_used = vcs_exists(); | 1876 | $vcs_used = vcs_exists(); |
@@ -1858,13 +1879,59 @@ sub vcs_file_signoffs { | |||
1858 | my $cmd = $VCS_cmds{"find_signers_cmd"}; | 1879 | my $cmd = $VCS_cmds{"find_signers_cmd"}; |
1859 | $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd | 1880 | $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd |
1860 | 1881 | ||
1861 | ($commits, @signers) = vcs_find_signers($cmd); | 1882 | ($commits, $signers_ref, $authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); |
1883 | |||
1884 | @signers = @{$signers_ref} if defined $signers_ref; | ||
1885 | @authors = @{$authors_ref} if defined $authors_ref; | ||
1886 | @stats = @{$stats_ref} if defined $stats_ref; | ||
1887 | |||
1888 | # print("commits: <$commits>\nsigners:<@signers>\nauthors: <@authors>\nstats: <@stats>\n"); | ||
1862 | 1889 | ||
1863 | foreach my $signer (@signers) { | 1890 | foreach my $signer (@signers) { |
1864 | $signer = deduplicate_email($signer); | 1891 | $signer = deduplicate_email($signer); |
1865 | } | 1892 | } |
1866 | 1893 | ||
1867 | vcs_assign("commit_signer", $commits, @signers); | 1894 | vcs_assign("commit_signer", $commits, @signers); |
1895 | vcs_assign("authored", $commits, @authors); | ||
1896 | if ($#authors == $#stats) { | ||
1897 | my $stat_pattern = $VCS_cmds{"stat_pattern"}; | ||
1898 | $stat_pattern =~ s/(\$\w+)/$1/eeg; #interpolate $stat_pattern | ||
1899 | |||
1900 | my $added = 0; | ||
1901 | my $deleted = 0; | ||
1902 | for (my $i = 0; $i <= $#stats; $i++) { | ||
1903 | if ($stats[$i] =~ /$stat_pattern/) { | ||
1904 | $added += $1; | ||
1905 | $deleted += $2; | ||
1906 | } | ||
1907 | } | ||
1908 | my @tmp_authors = uniq(@authors); | ||
1909 | foreach my $author (@tmp_authors) { | ||
1910 | $author = deduplicate_email($author); | ||
1911 | } | ||
1912 | @tmp_authors = uniq(@tmp_authors); | ||
1913 | my @list_added = (); | ||
1914 | my @list_deleted = (); | ||
1915 | foreach my $author (@tmp_authors) { | ||
1916 | my $auth_added = 0; | ||
1917 | my $auth_deleted = 0; | ||
1918 | for (my $i = 0; $i <= $#stats; $i++) { | ||
1919 | if ($author eq deduplicate_email($authors[$i]) && | ||
1920 | $stats[$i] =~ /$stat_pattern/) { | ||
1921 | $auth_added += $1; | ||
1922 | $auth_deleted += $2; | ||
1923 | } | ||
1924 | } | ||
1925 | for (my $i = 0; $i < $auth_added; $i++) { | ||
1926 | push(@list_added, $author); | ||
1927 | } | ||
1928 | for (my $i = 0; $i < $auth_deleted; $i++) { | ||
1929 | push(@list_deleted, $author); | ||
1930 | } | ||
1931 | } | ||
1932 | vcs_assign("added_lines", $added, @list_added); | ||
1933 | vcs_assign("removed_lines", $deleted, @list_deleted); | ||
1934 | } | ||
1868 | } | 1935 | } |
1869 | 1936 | ||
1870 | sub vcs_file_blame { | 1937 | sub vcs_file_blame { |
@@ -1887,6 +1954,10 @@ sub vcs_file_blame { | |||
1887 | if ($email_git_blame_signatures) { | 1954 | if ($email_git_blame_signatures) { |
1888 | if (vcs_is_hg()) { | 1955 | if (vcs_is_hg()) { |
1889 | my $commit_count; | 1956 | my $commit_count; |
1957 | my $commit_authors_ref; | ||
1958 | my $commit_signers_ref; | ||
1959 | my $stats_ref; | ||
1960 | my @commit_authors = (); | ||
1890 | my @commit_signers = (); | 1961 | my @commit_signers = (); |
1891 | my $commit = join(" -r ", @commits); | 1962 | my $commit = join(" -r ", @commits); |
1892 | my $cmd; | 1963 | my $cmd; |
@@ -1894,19 +1965,27 @@ sub vcs_file_blame { | |||
1894 | $cmd = $VCS_cmds{"find_commit_signers_cmd"}; | 1965 | $cmd = $VCS_cmds{"find_commit_signers_cmd"}; |
1895 | $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd | 1966 | $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd |
1896 | 1967 | ||
1897 | ($commit_count, @commit_signers) = vcs_find_signers($cmd); | 1968 | ($commit_count, $commit_signers_ref, $commit_authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); |
1969 | @commit_authors = @{$commit_authors_ref} if defined $commit_authors_ref; | ||
1970 | @commit_signers = @{$commit_signers_ref} if defined $commit_signers_ref; | ||
1898 | 1971 | ||
1899 | push(@signers, @commit_signers); | 1972 | push(@signers, @commit_signers); |
1900 | } else { | 1973 | } else { |
1901 | foreach my $commit (@commits) { | 1974 | foreach my $commit (@commits) { |
1902 | my $commit_count; | 1975 | my $commit_count; |
1976 | my $commit_authors_ref; | ||
1977 | my $commit_signers_ref; | ||
1978 | my $stats_ref; | ||
1979 | my @commit_authors = (); | ||
1903 | my @commit_signers = (); | 1980 | my @commit_signers = (); |
1904 | my $cmd; | 1981 | my $cmd; |
1905 | 1982 | ||
1906 | $cmd = $VCS_cmds{"find_commit_signers_cmd"}; | 1983 | $cmd = $VCS_cmds{"find_commit_signers_cmd"}; |
1907 | $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd | 1984 | $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd |
1908 | 1985 | ||
1909 | ($commit_count, @commit_signers) = vcs_find_signers($cmd); | 1986 | ($commit_count, $commit_signers_ref, $commit_authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); |
1987 | @commit_authors = @{$commit_authors_ref} if defined $commit_authors_ref; | ||
1988 | @commit_signers = @{$commit_signers_ref} if defined $commit_signers_ref; | ||
1910 | 1989 | ||
1911 | push(@signers, @commit_signers); | 1990 | push(@signers, @commit_signers); |
1912 | } | 1991 | } |
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl index 64ac2380e4d5..62320f93e903 100644 --- a/scripts/headers_check.pl +++ b/scripts/headers_check.pl | |||
@@ -65,7 +65,11 @@ sub check_include | |||
65 | 65 | ||
66 | sub check_declarations | 66 | sub check_declarations |
67 | { | 67 | { |
68 | if ($line =~m/^(\s*extern|unsigned|char|short|int|long|void)\b/) { | 68 | # soundcard.h is what it is |
69 | if ($line =~ m/^void seqbuf_dump\(void\);/) { | ||
70 | return; | ||
71 | } | ||
72 | if ($line =~ m/^(\s*extern|unsigned|char|short|int|long|void)\b/) { | ||
69 | printf STDERR "$filename:$lineno: " . | 73 | printf STDERR "$filename:$lineno: " . |
70 | "userspace cannot reference function or " . | 74 | "userspace cannot reference function or " . |
71 | "variable defined in the kernel\n"; | 75 | "variable defined in the kernel\n"; |
diff --git a/scripts/sortextable.c b/scripts/sortextable.c index 7941fbdfb050..cc49062acdee 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c | |||
@@ -39,6 +39,10 @@ | |||
39 | #define EM_AARCH64 183 | 39 | #define EM_AARCH64 183 |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifndef EM_MICROBLAZE | ||
43 | #define EM_MICROBLAZE 189 | ||
44 | #endif | ||
45 | |||
42 | static int fd_map; /* File descriptor for file being modified. */ | 46 | static int fd_map; /* File descriptor for file being modified. */ |
43 | static int mmap_failed; /* Boolean flag. */ | 47 | static int mmap_failed; /* Boolean flag. */ |
44 | static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ | 48 | static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ |
@@ -275,6 +279,7 @@ do_file(char const *const fname) | |||
275 | case EM_ARCOMPACT: | 279 | case EM_ARCOMPACT: |
276 | case EM_ARM: | 280 | case EM_ARM: |
277 | case EM_AARCH64: | 281 | case EM_AARCH64: |
282 | case EM_MICROBLAZE: | ||
278 | case EM_MIPS: | 283 | case EM_MIPS: |
279 | break; | 284 | break; |
280 | } /* end switch */ | 285 | } /* end switch */ |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 9f3eae290900..32487ed18354 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -9,6 +9,7 @@ TARGETS += ptrace | |||
9 | TARGETS += timers | 9 | TARGETS += timers |
10 | TARGETS += vm | 10 | TARGETS += vm |
11 | TARGETS += powerpc | 11 | TARGETS += powerpc |
12 | TARGETS += user | ||
12 | 13 | ||
13 | all: | 14 | all: |
14 | for TARGET in $(TARGETS); do \ | 15 | for TARGET in $(TARGETS); do \ |
diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile new file mode 100644 index 000000000000..396255bd720e --- /dev/null +++ b/tools/testing/selftests/user/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | # Makefile for user memory selftests | ||
2 | |||
3 | # No binaries, but make sure arg-less "make" doesn't trigger "run_tests" | ||
4 | all: | ||
5 | |||
6 | run_tests: all | ||
7 | @if /sbin/modprobe test_user_copy ; then \ | ||
8 | rmmod test_user_copy; \ | ||
9 | echo "user_copy: ok"; \ | ||
10 | else \ | ||
11 | echo "user_copy: [FAIL]"; \ | ||
12 | exit 1; \ | ||
13 | fi | ||