diff options
585 files changed, 4914 insertions, 4418 deletions
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index c1513c756af1..9d2339a485c8 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram | |||
@@ -98,3 +98,35 @@ Description: | |||
98 | The backing_dev file is read-write and set up backing | 98 | The backing_dev file is read-write and set up backing |
99 | device for zram to write incompressible pages. | 99 | device for zram to write incompressible pages. |
100 | For using, user should enable CONFIG_ZRAM_WRITEBACK. | 100 | For using, user should enable CONFIG_ZRAM_WRITEBACK. |
101 | |||
102 | What: /sys/block/zram<id>/idle | ||
103 | Date: November 2018 | ||
104 | Contact: Minchan Kim <minchan@kernel.org> | ||
105 | Description: | ||
106 | idle file is write-only and mark zram slot as idle. | ||
107 | If system has mounted debugfs, user can see which slots | ||
108 | are idle via /sys/kernel/debug/zram/zram<id>/block_state | ||
109 | |||
110 | What: /sys/block/zram<id>/writeback | ||
111 | Date: November 2018 | ||
112 | Contact: Minchan Kim <minchan@kernel.org> | ||
113 | Description: | ||
114 | The writeback file is write-only and trigger idle and/or | ||
115 | huge page writeback to backing device. | ||
116 | |||
117 | What: /sys/block/zram<id>/bd_stat | ||
118 | Date: November 2018 | ||
119 | Contact: Minchan Kim <minchan@kernel.org> | ||
120 | Description: | ||
121 | The bd_stat file is read-only and represents backing device's | ||
122 | statistics (bd_count, bd_reads, bd_writes) in a format | ||
123 | similar to block layer statistics file format. | ||
124 | |||
125 | What: /sys/block/zram<id>/writeback_limit | ||
126 | Date: November 2018 | ||
127 | Contact: Minchan Kim <minchan@kernel.org> | ||
128 | Description: | ||
129 | The writeback_limit file is read-write and specifies the maximum | ||
130 | amount of writeback ZRAM can do. The limit could be changed | ||
131 | in run time and "0" means disable the limit. | ||
132 | No limit is the initial state. | ||
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 3c1b5ab54bc0..436c5e98e1b6 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt | |||
@@ -164,11 +164,14 @@ reset WO trigger device reset | |||
164 | mem_used_max WO reset the `mem_used_max' counter (see later) | 164 | mem_used_max WO reset the `mem_used_max' counter (see later) |
165 | mem_limit WO specifies the maximum amount of memory ZRAM can use | 165 | mem_limit WO specifies the maximum amount of memory ZRAM can use |
166 | to store the compressed data | 166 | to store the compressed data |
167 | writeback_limit WO specifies the maximum amount of write IO zram can | ||
168 | write out to backing device as 4KB unit | ||
167 | max_comp_streams RW the number of possible concurrent compress operations | 169 | max_comp_streams RW the number of possible concurrent compress operations |
168 | comp_algorithm RW show and change the compression algorithm | 170 | comp_algorithm RW show and change the compression algorithm |
169 | compact WO trigger memory compaction | 171 | compact WO trigger memory compaction |
170 | debug_stat RO this file is used for zram debugging purposes | 172 | debug_stat RO this file is used for zram debugging purposes |
171 | backing_dev RW set up backend storage for zram to write out | 173 | backing_dev RW set up backend storage for zram to write out |
174 | idle WO mark allocated slot as idle | ||
172 | 175 | ||
173 | 176 | ||
174 | User space is advised to use the following files to read the device statistics. | 177 | User space is advised to use the following files to read the device statistics. |
@@ -220,6 +223,17 @@ line of text and contains the following stats separated by whitespace: | |||
220 | pages_compacted the number of pages freed during compaction | 223 | pages_compacted the number of pages freed during compaction |
221 | huge_pages the number of incompressible pages | 224 | huge_pages the number of incompressible pages |
222 | 225 | ||
226 | File /sys/block/zram<id>/bd_stat | ||
227 | |||
228 | The stat file represents device's backing device statistics. It consists of | ||
229 | a single line of text and contains the following stats separated by whitespace: | ||
230 | bd_count size of data written in backing device. | ||
231 | Unit: 4K bytes | ||
232 | bd_reads the number of reads from backing device | ||
233 | Unit: 4K bytes | ||
234 | bd_writes the number of writes to backing device | ||
235 | Unit: 4K bytes | ||
236 | |||
223 | 9) Deactivate: | 237 | 9) Deactivate: |
224 | swapoff /dev/zram0 | 238 | swapoff /dev/zram0 |
225 | umount /dev/zram1 | 239 | umount /dev/zram1 |
@@ -237,11 +251,60 @@ line of text and contains the following stats separated by whitespace: | |||
237 | 251 | ||
238 | = writeback | 252 | = writeback |
239 | 253 | ||
240 | With incompressible pages, there is no memory saving with zram. | 254 | With CONFIG_ZRAM_WRITEBACK, zram can write idle/incompressible page |
241 | Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page | ||
242 | to backing storage rather than keeping it in memory. | 255 | to backing storage rather than keeping it in memory. |
243 | User should set up backing device via /sys/block/zramX/backing_dev | 256 | To use the feature, admin should set up backing device via |
244 | before disksize setting. | 257 | |
258 | "echo /dev/sda5 > /sys/block/zramX/backing_dev" | ||
259 | |||
260 | before disksize setting. It supports only partition at this moment. | ||
261 | If admin want to use incompressible page writeback, they could do via | ||
262 | |||
263 | "echo huge > /sys/block/zramX/write" | ||
264 | |||
265 | To use idle page writeback, first, user need to declare zram pages | ||
266 | as idle. | ||
267 | |||
268 | "echo all > /sys/block/zramX/idle" | ||
269 | |||
270 | From now on, any pages on zram are idle pages. The idle mark | ||
271 | will be removed until someone request access of the block. | ||
272 | IOW, unless there is access request, those pages are still idle pages. | ||
273 | |||
274 | Admin can request writeback of those idle pages at right timing via | ||
275 | |||
276 | "echo idle > /sys/block/zramX/writeback" | ||
277 | |||
278 | With the command, zram writeback idle pages from memory to the storage. | ||
279 | |||
280 | If there are lots of write IO with flash device, potentially, it has | ||
281 | flash wearout problem so that admin needs to design write limitation | ||
282 | to guarantee storage health for entire product life. | ||
283 | To overcome the concern, zram supports "writeback_limit". | ||
284 | The "writeback_limit"'s default value is 0 so that it doesn't limit | ||
285 | any writeback. If admin want to measure writeback count in a certain | ||
286 | period, he could know it via /sys/block/zram0/bd_stat's 3rd column. | ||
287 | |||
288 | If admin want to limit writeback as per-day 400M, he could do it | ||
289 | like below. | ||
290 | |||
291 | MB_SHIFT=20 | ||
292 | 4K_SHIFT=12 | ||
293 | echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ | ||
294 | /sys/block/zram0/writeback_limit. | ||
295 | |||
296 | If admin want to allow further write again, he could do it like below | ||
297 | |||
298 | echo 0 > /sys/block/zram0/writeback_limit | ||
299 | |||
300 | If admin want to see remaining writeback budget since he set, | ||
301 | |||
302 | cat /sys/block/zram0/writeback_limit | ||
303 | |||
304 | The writeback_limit count will reset whenever you reset zram(e.g., | ||
305 | system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of | ||
306 | writeback happened until you reset the zram to allocate extra writeback | ||
307 | budget in next setting is user's job. | ||
245 | 308 | ||
246 | = memory tracking | 309 | = memory tracking |
247 | 310 | ||
@@ -251,16 +314,17 @@ pages of the process with*pagemap. | |||
251 | If you enable the feature, you could see block state via | 314 | If you enable the feature, you could see block state via |
252 | /sys/kernel/debug/zram/zram0/block_state". The output is as follows, | 315 | /sys/kernel/debug/zram/zram0/block_state". The output is as follows, |
253 | 316 | ||
254 | 300 75.033841 .wh | 317 | 300 75.033841 .wh. |
255 | 301 63.806904 s.. | 318 | 301 63.806904 s... |
256 | 302 63.806919 ..h | 319 | 302 63.806919 ..hi |
257 | 320 | ||
258 | First column is zram's block index. | 321 | First column is zram's block index. |
259 | Second column is access time since the system was booted | 322 | Second column is access time since the system was booted |
260 | Third column is state of the block. | 323 | Third column is state of the block. |
261 | (s: same page | 324 | (s: same page |
262 | w: written page to backing store | 325 | w: written page to backing store |
263 | h: huge page) | 326 | h: huge page |
327 | i: idle page) | ||
264 | 328 | ||
265 | First line of above example says 300th block is accessed at 75.033841sec | 329 | First line of above example says 300th block is accessed at 75.033841sec |
266 | and the block's state is huge so it is written back to the backing | 330 | and the block's state is huge so it is written back to the backing |
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst index aabc8738b3d8..b72d07d70239 100644 --- a/Documentation/dev-tools/kasan.rst +++ b/Documentation/dev-tools/kasan.rst | |||
@@ -4,15 +4,25 @@ The Kernel Address Sanitizer (KASAN) | |||
4 | Overview | 4 | Overview |
5 | -------- | 5 | -------- |
6 | 6 | ||
7 | KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides | 7 | KernelAddressSANitizer (KASAN) is a dynamic memory error detector designed to |
8 | a fast and comprehensive solution for finding use-after-free and out-of-bounds | 8 | find out-of-bound and use-after-free bugs. KASAN has two modes: generic KASAN |
9 | bugs. | 9 | (similar to userspace ASan) and software tag-based KASAN (similar to userspace |
10 | HWASan). | ||
10 | 11 | ||
11 | KASAN uses compile-time instrumentation for checking every memory access, | 12 | KASAN uses compile-time instrumentation to insert validity checks before every |
12 | therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is | 13 | memory access, and therefore requires a compiler version that supports that. |
13 | required for detection of out-of-bounds accesses to stack or global variables. | ||
14 | 14 | ||
15 | Currently KASAN is supported only for the x86_64 and arm64 architectures. | 15 | Generic KASAN is supported in both GCC and Clang. With GCC it requires version |
16 | 4.9.2 or later for basic support and version 5.0 or later for detection of | ||
17 | out-of-bounds accesses for stack and global variables and for inline | ||
18 | instrumentation mode (see the Usage section). With Clang it requires version | ||
19 | 7.0.0 or later and it doesn't support detection of out-of-bounds accesses for | ||
20 | global variables yet. | ||
21 | |||
22 | Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later. | ||
23 | |||
24 | Currently generic KASAN is supported for the x86_64, arm64, xtensa and s390 | ||
25 | architectures, and tag-based KASAN is supported only for arm64. | ||
16 | 26 | ||
17 | Usage | 27 | Usage |
18 | ----- | 28 | ----- |
@@ -21,12 +31,14 @@ To enable KASAN configure kernel with:: | |||
21 | 31 | ||
22 | CONFIG_KASAN = y | 32 | CONFIG_KASAN = y |
23 | 33 | ||
24 | and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline and | 34 | and choose between CONFIG_KASAN_GENERIC (to enable generic KASAN) and |
25 | inline are compiler instrumentation types. The former produces smaller binary | 35 | CONFIG_KASAN_SW_TAGS (to enable software tag-based KASAN). |
26 | the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC | 36 | |
27 | version 5.0 or later. | 37 | You also need to choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. |
38 | Outline and inline are compiler instrumentation types. The former produces | ||
39 | smaller binary while the latter is 1.1 - 2 times faster. | ||
28 | 40 | ||
29 | KASAN works with both SLUB and SLAB memory allocators. | 41 | Both KASAN modes work with both SLUB and SLAB memory allocators. |
30 | For better bug detection and nicer reporting, enable CONFIG_STACKTRACE. | 42 | For better bug detection and nicer reporting, enable CONFIG_STACKTRACE. |
31 | 43 | ||
32 | To disable instrumentation for specific files or directories, add a line | 44 | To disable instrumentation for specific files or directories, add a line |
@@ -43,85 +55,85 @@ similar to the following to the respective kernel Makefile: | |||
43 | Error reports | 55 | Error reports |
44 | ~~~~~~~~~~~~~ | 56 | ~~~~~~~~~~~~~ |
45 | 57 | ||
46 | A typical out of bounds access report looks like this:: | 58 | A typical out-of-bounds access generic KASAN report looks like this:: |
47 | 59 | ||
48 | ================================================================== | 60 | ================================================================== |
49 | BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3 | 61 | BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan] |
50 | Write of size 1 by task modprobe/1689 | 62 | Write of size 1 at addr ffff8801f44ec37b by task insmod/2760 |
51 | ============================================================================= | 63 | |
52 | BUG kmalloc-128 (Not tainted): kasan error | 64 | CPU: 1 PID: 2760 Comm: insmod Not tainted 4.19.0-rc3+ #698 |
53 | ----------------------------------------------------------------------------- | 65 | Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014 |
54 | |||
55 | Disabling lock debugging due to kernel taint | ||
56 | INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689 | ||
57 | __slab_alloc+0x4b4/0x4f0 | ||
58 | kmem_cache_alloc_trace+0x10b/0x190 | ||
59 | kmalloc_oob_right+0x3d/0x75 [test_kasan] | ||
60 | init_module+0x9/0x47 [test_kasan] | ||
61 | do_one_initcall+0x99/0x200 | ||
62 | load_module+0x2cb3/0x3b20 | ||
63 | SyS_finit_module+0x76/0x80 | ||
64 | system_call_fastpath+0x12/0x17 | ||
65 | INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080 | ||
66 | INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720 | ||
67 | |||
68 | Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ | ||
69 | Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
70 | Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
71 | Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
72 | Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
73 | Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
74 | Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
75 | Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk | ||
76 | Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk. | ||
77 | Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........ | ||
78 | Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ | ||
79 | CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98 | ||
80 | Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 | ||
81 | ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78 | ||
82 | ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8 | ||
83 | ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558 | ||
84 | Call Trace: | 66 | Call Trace: |
85 | [<ffffffff81cc68ae>] dump_stack+0x46/0x58 | 67 | dump_stack+0x94/0xd8 |
86 | [<ffffffff811fd848>] print_trailer+0xf8/0x160 | 68 | print_address_description+0x73/0x280 |
87 | [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan] | 69 | kasan_report+0x144/0x187 |
88 | [<ffffffff811ff0f5>] object_err+0x35/0x40 | 70 | __asan_report_store1_noabort+0x17/0x20 |
89 | [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan] | 71 | kmalloc_oob_right+0xa8/0xbc [test_kasan] |
90 | [<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0 | 72 | kmalloc_tests_init+0x16/0x700 [test_kasan] |
91 | [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40 | 73 | do_one_initcall+0xa5/0x3ae |
92 | [<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40 | 74 | do_init_module+0x1b6/0x547 |
93 | [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40 | 75 | load_module+0x75df/0x8070 |
94 | [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan] | 76 | __do_sys_init_module+0x1c6/0x200 |
95 | [<ffffffff8120a995>] __asan_store1+0x75/0xb0 | 77 | __x64_sys_init_module+0x6e/0xb0 |
96 | [<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan] | 78 | do_syscall_64+0x9f/0x2c0 |
97 | [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan] | 79 | entry_SYSCALL_64_after_hwframe+0x44/0xa9 |
98 | [<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan] | 80 | RIP: 0033:0x7f96443109da |
99 | [<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan] | 81 | RSP: 002b:00007ffcf0b51b08 EFLAGS: 00000202 ORIG_RAX: 00000000000000af |
100 | [<ffffffff810002d9>] do_one_initcall+0x99/0x200 | 82 | RAX: ffffffffffffffda RBX: 000055dc3ee521a0 RCX: 00007f96443109da |
101 | [<ffffffff811e4e5c>] ? __vunmap+0xec/0x160 | 83 | RDX: 00007f96445cff88 RSI: 0000000000057a50 RDI: 00007f9644992000 |
102 | [<ffffffff81114f63>] load_module+0x2cb3/0x3b20 | 84 | RBP: 000055dc3ee510b0 R08: 0000000000000003 R09: 0000000000000000 |
103 | [<ffffffff8110fd70>] ? m_show+0x240/0x240 | 85 | R10: 00007f964430cd0a R11: 0000000000000202 R12: 00007f96445cff88 |
104 | [<ffffffff81115f06>] SyS_finit_module+0x76/0x80 | 86 | R13: 000055dc3ee51090 R14: 0000000000000000 R15: 0000000000000000 |
105 | [<ffffffff81cd3129>] system_call_fastpath+0x12/0x17 | 87 | |
88 | Allocated by task 2760: | ||
89 | save_stack+0x43/0xd0 | ||
90 | kasan_kmalloc+0xa7/0xd0 | ||
91 | kmem_cache_alloc_trace+0xe1/0x1b0 | ||
92 | kmalloc_oob_right+0x56/0xbc [test_kasan] | ||
93 | kmalloc_tests_init+0x16/0x700 [test_kasan] | ||
94 | do_one_initcall+0xa5/0x3ae | ||
95 | do_init_module+0x1b6/0x547 | ||
96 | load_module+0x75df/0x8070 | ||
97 | __do_sys_init_module+0x1c6/0x200 | ||
98 | __x64_sys_init_module+0x6e/0xb0 | ||
99 | do_syscall_64+0x9f/0x2c0 | ||
100 | entry_SYSCALL_64_after_hwframe+0x44/0xa9 | ||
101 | |||
102 | Freed by task 815: | ||
103 | save_stack+0x43/0xd0 | ||
104 | __kasan_slab_free+0x135/0x190 | ||
105 | kasan_slab_free+0xe/0x10 | ||
106 | kfree+0x93/0x1a0 | ||
107 | umh_complete+0x6a/0xa0 | ||
108 | call_usermodehelper_exec_async+0x4c3/0x640 | ||
109 | ret_from_fork+0x35/0x40 | ||
110 | |||
111 | The buggy address belongs to the object at ffff8801f44ec300 | ||
112 | which belongs to the cache kmalloc-128 of size 128 | ||
113 | The buggy address is located 123 bytes inside of | ||
114 | 128-byte region [ffff8801f44ec300, ffff8801f44ec380) | ||
115 | The buggy address belongs to the page: | ||
116 | page:ffffea0007d13b00 count:1 mapcount:0 mapping:ffff8801f7001640 index:0x0 | ||
117 | flags: 0x200000000000100(slab) | ||
118 | raw: 0200000000000100 ffffea0007d11dc0 0000001a0000001a ffff8801f7001640 | ||
119 | raw: 0000000000000000 0000000080150015 00000001ffffffff 0000000000000000 | ||
120 | page dumped because: kasan: bad access detected | ||
121 | |||
106 | Memory state around the buggy address: | 122 | Memory state around the buggy address: |
107 | ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc | 123 | ffff8801f44ec200: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb |
108 | ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc | 124 | ffff8801f44ec280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc |
109 | ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc | 125 | >ffff8801f44ec300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03 |
110 | ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc | 126 | ^ |
111 | ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00 | 127 | ffff8801f44ec380: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb |
112 | >ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc | 128 | ffff8801f44ec400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc |
113 | ^ | ||
114 | ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc | ||
115 | ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc | ||
116 | ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb | ||
117 | ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb | ||
118 | ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb | ||
119 | ================================================================== | 129 | ================================================================== |
120 | 130 | ||
121 | The header of the report discribe what kind of bug happened and what kind of | 131 | The header of the report provides a short summary of what kind of bug happened |
122 | access caused it. It's followed by the description of the accessed slub object | 132 | and what kind of access caused it. It's followed by a stack trace of the bad |
123 | (see 'SLUB Debug output' section in Documentation/vm/slub.rst for details) and | 133 | access, a stack trace of where the accessed memory was allocated (in case bad |
124 | the description of the accessed memory page. | 134 | access happens on a slab object), and a stack trace of where the object was |
135 | freed (in case of a use-after-free bug report). Next comes a description of | ||
136 | the accessed slab object and information about the accessed memory page. | ||
125 | 137 | ||
126 | In the last section the report shows memory state around the accessed address. | 138 | In the last section the report shows memory state around the accessed address. |
127 | Reading this part requires some understanding of how KASAN works. | 139 | Reading this part requires some understanding of how KASAN works. |
@@ -138,18 +150,24 @@ inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h). | |||
138 | In the report above the arrows point to the shadow byte 03, which means that | 150 | In the report above the arrows point to the shadow byte 03, which means that |
139 | the accessed address is partially accessible. | 151 | the accessed address is partially accessible. |
140 | 152 | ||
153 | For tag-based KASAN this last report section shows the memory tags around the | ||
154 | accessed address (see Implementation details section). | ||
155 | |||
141 | 156 | ||
142 | Implementation details | 157 | Implementation details |
143 | ---------------------- | 158 | ---------------------- |
144 | 159 | ||
160 | Generic KASAN | ||
161 | ~~~~~~~~~~~~~ | ||
162 | |||
145 | From a high level, our approach to memory error detection is similar to that | 163 | From a high level, our approach to memory error detection is similar to that |
146 | of kmemcheck: use shadow memory to record whether each byte of memory is safe | 164 | of kmemcheck: use shadow memory to record whether each byte of memory is safe |
147 | to access, and use compile-time instrumentation to check shadow memory on each | 165 | to access, and use compile-time instrumentation to insert checks of shadow |
148 | memory access. | 166 | memory on each memory access. |
149 | 167 | ||
150 | AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory | 168 | Generic KASAN dedicates 1/8th of kernel memory to its shadow memory (e.g. 16TB |
151 | (e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and | 169 | to cover 128TB on x86_64) and uses direct mapping with a scale and offset to |
152 | offset to translate a memory address to its corresponding shadow address. | 170 | translate a memory address to its corresponding shadow address. |
153 | 171 | ||
154 | Here is the function which translates an address to its corresponding shadow | 172 | Here is the function which translates an address to its corresponding shadow |
155 | address:: | 173 | address:: |
@@ -162,12 +180,38 @@ address:: | |||
162 | 180 | ||
163 | where ``KASAN_SHADOW_SCALE_SHIFT = 3``. | 181 | where ``KASAN_SHADOW_SCALE_SHIFT = 3``. |
164 | 182 | ||
165 | Compile-time instrumentation used for checking memory accesses. Compiler inserts | 183 | Compile-time instrumentation is used to insert memory access checks. Compiler |
166 | function calls (__asan_load*(addr), __asan_store*(addr)) before each memory | 184 | inserts function calls (__asan_load*(addr), __asan_store*(addr)) before each |
167 | access of size 1, 2, 4, 8 or 16. These functions check whether memory access is | 185 | memory access of size 1, 2, 4, 8 or 16. These functions check whether memory |
168 | valid or not by checking corresponding shadow memory. | 186 | access is valid or not by checking corresponding shadow memory. |
169 | 187 | ||
170 | GCC 5.0 has possibility to perform inline instrumentation. Instead of making | 188 | GCC 5.0 has possibility to perform inline instrumentation. Instead of making |
171 | function calls GCC directly inserts the code to check the shadow memory. | 189 | function calls GCC directly inserts the code to check the shadow memory. |
172 | This option significantly enlarges kernel but it gives x1.1-x2 performance | 190 | This option significantly enlarges kernel but it gives x1.1-x2 performance |
173 | boost over outline instrumented kernel. | 191 | boost over outline instrumented kernel. |
192 | |||
193 | Software tag-based KASAN | ||
194 | ~~~~~~~~~~~~~~~~~~~~~~~~ | ||
195 | |||
196 | Tag-based KASAN uses the Top Byte Ignore (TBI) feature of modern arm64 CPUs to | ||
197 | store a pointer tag in the top byte of kernel pointers. Like generic KASAN it | ||
198 | uses shadow memory to store memory tags associated with each 16-byte memory | ||
199 | cell (therefore it dedicates 1/16th of the kernel memory for shadow memory). | ||
200 | |||
201 | On each memory allocation tag-based KASAN generates a random tag, tags the | ||
202 | allocated memory with this tag, and embeds this tag into the returned pointer. | ||
203 | Software tag-based KASAN uses compile-time instrumentation to insert checks | ||
204 | before each memory access. These checks make sure that tag of the memory that | ||
205 | is being accessed is equal to tag of the pointer that is used to access this | ||
206 | memory. In case of a tag mismatch tag-based KASAN prints a bug report. | ||
207 | |||
208 | Software tag-based KASAN also has two instrumentation modes (outline, that | ||
209 | emits callbacks to check memory accesses; and inline, that performs the shadow | ||
210 | memory checks inline). With outline instrumentation mode, a bug report is | ||
211 | simply printed from the function that performs the access check. With inline | ||
212 | instrumentation a brk instruction is emitted by the compiler, and a dedicated | ||
213 | brk handler is used to print bug reports. | ||
214 | |||
215 | A potential expansion of this mode is a hardware tag-based mode, which would | ||
216 | use hardware memory tagging support instead of compiler instrumentation and | ||
217 | manual shadow memory manipulation. | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 12a5e6e693b6..b24fd9bccc99 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -182,6 +182,7 @@ read the file /proc/PID/status: | |||
182 | VmSwap: 0 kB | 182 | VmSwap: 0 kB |
183 | HugetlbPages: 0 kB | 183 | HugetlbPages: 0 kB |
184 | CoreDumping: 0 | 184 | CoreDumping: 0 |
185 | THP_enabled: 1 | ||
185 | Threads: 1 | 186 | Threads: 1 |
186 | SigQ: 0/28578 | 187 | SigQ: 0/28578 |
187 | SigPnd: 0000000000000000 | 188 | SigPnd: 0000000000000000 |
@@ -256,6 +257,8 @@ Table 1-2: Contents of the status files (as of 4.8) | |||
256 | HugetlbPages size of hugetlb memory portions | 257 | HugetlbPages size of hugetlb memory portions |
257 | CoreDumping process's memory is currently being dumped | 258 | CoreDumping process's memory is currently being dumped |
258 | (killing the process may lead to a corrupted core) | 259 | (killing the process may lead to a corrupted core) |
260 | THP_enabled process is allowed to use THP (returns 0 when | ||
261 | PR_SET_THP_DISABLE is set on the process | ||
259 | Threads number of threads | 262 | Threads number of threads |
260 | SigQ number of signals queued/max. number for queue | 263 | SigQ number of signals queued/max. number for queue |
261 | SigPnd bitmap of pending signals for the thread | 264 | SigPnd bitmap of pending signals for the thread |
@@ -425,6 +428,7 @@ SwapPss: 0 kB | |||
425 | KernelPageSize: 4 kB | 428 | KernelPageSize: 4 kB |
426 | MMUPageSize: 4 kB | 429 | MMUPageSize: 4 kB |
427 | Locked: 0 kB | 430 | Locked: 0 kB |
431 | THPeligible: 0 | ||
428 | VmFlags: rd ex mr mw me dw | 432 | VmFlags: rd ex mr mw me dw |
429 | 433 | ||
430 | the first of these lines shows the same information as is displayed for the | 434 | the first of these lines shows the same information as is displayed for the |
@@ -462,6 +466,8 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. | |||
462 | "SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this | 466 | "SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this |
463 | does not take into account swapped out page of underlying shmem objects. | 467 | does not take into account swapped out page of underlying shmem objects. |
464 | "Locked" indicates whether the mapping is locked in memory or not. | 468 | "Locked" indicates whether the mapping is locked in memory or not. |
469 | "THPeligible" indicates whether the mapping is eligible for THP pages - 1 if | ||
470 | true, 0 otherwise. | ||
465 | 471 | ||
466 | "VmFlags" field deserves a separate description. This member represents the kernel | 472 | "VmFlags" field deserves a separate description. This member represents the kernel |
467 | flags associated with the particular virtual memory area in two letter encoded | 473 | flags associated with the particular virtual memory area in two letter encoded |
@@ -496,7 +502,9 @@ manner. The codes are the following: | |||
496 | 502 | ||
497 | Note that there is no guarantee that every flag and associated mnemonic will | 503 | Note that there is no guarantee that every flag and associated mnemonic will |
498 | be present in all further kernel releases. Things get changed, the flags may | 504 | be present in all further kernel releases. Things get changed, the flags may |
499 | be vanished or the reverse -- new added. | 505 | be vanished or the reverse -- new added. Interpretation of their meaning |
506 | might change in future as well. So each consumer of these flags has to | ||
507 | follow each specific kernel version for the exact semantic. | ||
500 | 508 | ||
501 | This file is only present if the CONFIG_MMU kernel configuration option is | 509 | This file is only present if the CONFIG_MMU kernel configuration option is |
502 | enabled. | 510 | enabled. |
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 7d73882e2c27..187ce4f599a2 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -63,6 +63,7 @@ Currently, these files are in /proc/sys/vm: | |||
63 | - swappiness | 63 | - swappiness |
64 | - user_reserve_kbytes | 64 | - user_reserve_kbytes |
65 | - vfs_cache_pressure | 65 | - vfs_cache_pressure |
66 | - watermark_boost_factor | ||
66 | - watermark_scale_factor | 67 | - watermark_scale_factor |
67 | - zone_reclaim_mode | 68 | - zone_reclaim_mode |
68 | 69 | ||
@@ -856,6 +857,26 @@ ten times more freeable objects than there are. | |||
856 | 857 | ||
857 | ============================================================= | 858 | ============================================================= |
858 | 859 | ||
860 | watermark_boost_factor: | ||
861 | |||
862 | This factor controls the level of reclaim when memory is being fragmented. | ||
863 | It defines the percentage of the high watermark of a zone that will be | ||
864 | reclaimed if pages of different mobility are being mixed within pageblocks. | ||
865 | The intent is that compaction has less work to do in the future and to | ||
866 | increase the success rate of future high-order allocations such as SLUB | ||
867 | allocations, THP and hugetlbfs pages. | ||
868 | |||
869 | To make it sensible with respect to the watermark_scale_factor parameter, | ||
870 | the unit is in fractions of 10,000. The default value of 15,000 means | ||
871 | that up to 150% of the high watermark will be reclaimed in the event of | ||
872 | a pageblock being mixed due to fragmentation. The level of reclaim is | ||
873 | determined by the number of fragmentation events that occurred in the | ||
874 | recent past. If this value is smaller than a pageblock then a pageblocks | ||
875 | worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor | ||
876 | of 0 will disable the feature. | ||
877 | |||
878 | ============================================================= | ||
879 | |||
859 | watermark_scale_factor: | 880 | watermark_scale_factor: |
860 | 881 | ||
861 | This factor controls the aggressiveness of kswapd. It defines the | 882 | This factor controls the aggressiveness of kswapd. It defines the |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ff9291872372..86b18c1bd33c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -110,6 +110,7 @@ config ARM64 | |||
110 | select HAVE_ARCH_JUMP_LABEL | 110 | select HAVE_ARCH_JUMP_LABEL |
111 | select HAVE_ARCH_JUMP_LABEL_RELATIVE | 111 | select HAVE_ARCH_JUMP_LABEL_RELATIVE |
112 | select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) | 112 | select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) |
113 | select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN | ||
113 | select HAVE_ARCH_KGDB | 114 | select HAVE_ARCH_KGDB |
114 | select HAVE_ARCH_MMAP_RND_BITS | 115 | select HAVE_ARCH_MMAP_RND_BITS |
115 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT | 116 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 398bdb81a900..b025304bde46 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -101,10 +101,19 @@ else | |||
101 | TEXT_OFFSET := 0x00080000 | 101 | TEXT_OFFSET := 0x00080000 |
102 | endif | 102 | endif |
103 | 103 | ||
104 | ifeq ($(CONFIG_KASAN_SW_TAGS), y) | ||
105 | KASAN_SHADOW_SCALE_SHIFT := 4 | ||
106 | else | ||
107 | KASAN_SHADOW_SCALE_SHIFT := 3 | ||
108 | endif | ||
109 | |||
110 | KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) | ||
111 | KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) | ||
112 | KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) | ||
113 | |||
104 | # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) | 114 | # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) |
105 | # - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) | 115 | # - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) |
106 | # in 32-bit arithmetic | 116 | # in 32-bit arithmetic |
107 | KASAN_SHADOW_SCALE_SHIFT := 3 | ||
108 | KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ | 117 | KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ |
109 | (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ | 118 | (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ |
110 | + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ | 119 | + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ |
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index ed693c5bcec0..2945fe6cd863 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h | |||
@@ -16,10 +16,12 @@ | |||
16 | * 0x400: for dynamic BRK instruction | 16 | * 0x400: for dynamic BRK instruction |
17 | * 0x401: for compile time BRK instruction | 17 | * 0x401: for compile time BRK instruction |
18 | * 0x800: kernel-mode BUG() and WARN() traps | 18 | * 0x800: kernel-mode BUG() and WARN() traps |
19 | * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff) | ||
19 | */ | 20 | */ |
20 | #define FAULT_BRK_IMM 0x100 | 21 | #define FAULT_BRK_IMM 0x100 |
21 | #define KGDB_DYN_DBG_BRK_IMM 0x400 | 22 | #define KGDB_DYN_DBG_BRK_IMM 0x400 |
22 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 | 23 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 |
23 | #define BUG_BRK_IMM 0x800 | 24 | #define BUG_BRK_IMM 0x800 |
25 | #define KASAN_BRK_IMM 0x900 | ||
24 | 26 | ||
25 | #endif | 27 | #endif |
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 8758bb008436..b52aacd2c526 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h | |||
@@ -4,12 +4,16 @@ | |||
4 | 4 | ||
5 | #ifndef __ASSEMBLY__ | 5 | #ifndef __ASSEMBLY__ |
6 | 6 | ||
7 | #ifdef CONFIG_KASAN | ||
8 | |||
9 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
10 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
11 | #include <asm/pgtable-types.h> | 9 | #include <asm/pgtable-types.h> |
12 | 10 | ||
11 | #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) | ||
12 | #define arch_kasan_reset_tag(addr) __tag_reset(addr) | ||
13 | #define arch_kasan_get_tag(addr) __tag_get(addr) | ||
14 | |||
15 | #ifdef CONFIG_KASAN | ||
16 | |||
13 | /* | 17 | /* |
14 | * KASAN_SHADOW_START: beginning of the kernel virtual addresses. | 18 | * KASAN_SHADOW_START: beginning of the kernel virtual addresses. |
15 | * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, | 19 | * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0385752bd079..2bb8721da7ef 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -74,13 +74,11 @@ | |||
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * KASAN requires 1/8th of the kernel virtual address space for the shadow | 77 | * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual |
78 | * region. KASAN can bloat the stack significantly, so double the (minimum) | 78 | * address space for the shadow region respectively. They can bloat the stack |
79 | * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is | 79 | * significantly, so double the (minimum) stack size when they are in use. |
80 | * on. | ||
81 | */ | 80 | */ |
82 | #ifdef CONFIG_KASAN | 81 | #ifdef CONFIG_KASAN |
83 | #define KASAN_SHADOW_SCALE_SHIFT 3 | ||
84 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) | 82 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) |
85 | #ifdef CONFIG_KASAN_EXTRA | 83 | #ifdef CONFIG_KASAN_EXTRA |
86 | #define KASAN_THREAD_SHIFT 2 | 84 | #define KASAN_THREAD_SHIFT 2 |
@@ -221,6 +219,26 @@ extern u64 vabits_user; | |||
221 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) | 219 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) |
222 | 220 | ||
223 | /* | 221 | /* |
222 | * When dealing with data aborts, watchpoints, or instruction traps we may end | ||
223 | * up with a tagged userland pointer. Clear the tag to get a sane pointer to | ||
224 | * pass on to access_ok(), for instance. | ||
225 | */ | ||
226 | #define untagged_addr(addr) \ | ||
227 | ((__typeof__(addr))sign_extend64((u64)(addr), 55)) | ||
228 | |||
229 | #ifdef CONFIG_KASAN_SW_TAGS | ||
230 | #define __tag_shifted(tag) ((u64)(tag) << 56) | ||
231 | #define __tag_set(addr, tag) (__typeof__(addr))( \ | ||
232 | ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag)) | ||
233 | #define __tag_reset(addr) untagged_addr(addr) | ||
234 | #define __tag_get(addr) (__u8)((u64)(addr) >> 56) | ||
235 | #else | ||
236 | #define __tag_set(addr, tag) (addr) | ||
237 | #define __tag_reset(addr) (addr) | ||
238 | #define __tag_get(addr) 0 | ||
239 | #endif | ||
240 | |||
241 | /* | ||
224 | * Physical vs virtual RAM address space conversion. These are | 242 | * Physical vs virtual RAM address space conversion. These are |
225 | * private definitions which should NOT be used outside memory.h | 243 | * private definitions which should NOT be used outside memory.h |
226 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | 244 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
@@ -303,7 +321,13 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
303 | #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) | 321 | #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) |
304 | #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) | 322 | #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) |
305 | 323 | ||
306 | #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) | 324 | #define page_to_virt(page) ({ \ |
325 | unsigned long __addr = \ | ||
326 | ((__page_to_voff(page)) | PAGE_OFFSET); \ | ||
327 | __addr = __tag_set(__addr, page_kasan_tag(page)); \ | ||
328 | ((void *)__addr); \ | ||
329 | }) | ||
330 | |||
307 | #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) | 331 | #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) |
308 | 332 | ||
309 | #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ | 333 | #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ |
@@ -311,9 +335,10 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
311 | #endif | 335 | #endif |
312 | #endif | 336 | #endif |
313 | 337 | ||
314 | #define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) | 338 | #define _virt_addr_is_linear(kaddr) \ |
315 | #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ | 339 | (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) |
316 | _virt_addr_valid(kaddr)) | 340 | #define virt_addr_valid(kaddr) \ |
341 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) | ||
317 | 342 | ||
318 | #include <asm-generic/memory_model.h> | 343 | #include <asm-generic/memory_model.h> |
319 | 344 | ||
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 22bb3ae514f5..e9b0a7d75184 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -299,6 +299,7 @@ | |||
299 | #define TCR_A1 (UL(1) << 22) | 299 | #define TCR_A1 (UL(1) << 22) |
300 | #define TCR_ASID16 (UL(1) << 36) | 300 | #define TCR_ASID16 (UL(1) << 36) |
301 | #define TCR_TBI0 (UL(1) << 37) | 301 | #define TCR_TBI0 (UL(1) << 37) |
302 | #define TCR_TBI1 (UL(1) << 38) | ||
302 | #define TCR_HA (UL(1) << 39) | 303 | #define TCR_HA (UL(1) << 39) |
303 | #define TCR_HD (UL(1) << 40) | 304 | #define TCR_HD (UL(1) << 40) |
304 | #define TCR_NFD1 (UL(1) << 54) | 305 | #define TCR_NFD1 (UL(1) << 54) |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fad33f5fde47..ed252435fd92 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -95,13 +95,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si | |||
95 | return ret; | 95 | return ret; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* | ||
99 | * When dealing with data aborts, watchpoints, or instruction traps we may end | ||
100 | * up with a tagged userland pointer. Clear the tag to get a sane pointer to | ||
101 | * pass on to access_ok(), for instance. | ||
102 | */ | ||
103 | #define untagged_addr(addr) sign_extend64(addr, 55) | ||
104 | |||
105 | #define access_ok(type, addr, size) __range_ok(addr, size) | 98 | #define access_ok(type, addr, size) __range_ok(addr, size) |
106 | #define user_addr_max get_fs | 99 | #define user_addr_max get_fs |
107 | 100 | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5f4d9acb32f5..cdc71cf70aad 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/sizes.h> | 35 | #include <linux/sizes.h> |
36 | #include <linux/syscalls.h> | 36 | #include <linux/syscalls.h> |
37 | #include <linux/mm_types.h> | 37 | #include <linux/mm_types.h> |
38 | #include <linux/kasan.h> | ||
38 | 39 | ||
39 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
40 | #include <asm/bug.h> | 41 | #include <asm/bug.h> |
@@ -969,6 +970,58 @@ static struct break_hook bug_break_hook = { | |||
969 | .fn = bug_handler, | 970 | .fn = bug_handler, |
970 | }; | 971 | }; |
971 | 972 | ||
973 | #ifdef CONFIG_KASAN_SW_TAGS | ||
974 | |||
975 | #define KASAN_ESR_RECOVER 0x20 | ||
976 | #define KASAN_ESR_WRITE 0x10 | ||
977 | #define KASAN_ESR_SIZE_MASK 0x0f | ||
978 | #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) | ||
979 | |||
980 | static int kasan_handler(struct pt_regs *regs, unsigned int esr) | ||
981 | { | ||
982 | bool recover = esr & KASAN_ESR_RECOVER; | ||
983 | bool write = esr & KASAN_ESR_WRITE; | ||
984 | size_t size = KASAN_ESR_SIZE(esr); | ||
985 | u64 addr = regs->regs[0]; | ||
986 | u64 pc = regs->pc; | ||
987 | |||
988 | if (user_mode(regs)) | ||
989 | return DBG_HOOK_ERROR; | ||
990 | |||
991 | kasan_report(addr, size, write, pc); | ||
992 | |||
993 | /* | ||
994 | * The instrumentation allows to control whether we can proceed after | ||
995 | * a crash was detected. This is done by passing the -recover flag to | ||
996 | * the compiler. Disabling recovery allows to generate more compact | ||
997 | * code. | ||
998 | * | ||
999 | * Unfortunately disabling recovery doesn't work for the kernel right | ||
1000 | * now. KASAN reporting is disabled in some contexts (for example when | ||
1001 | * the allocator accesses slab object metadata; this is controlled by | ||
1002 | * current->kasan_depth). All these accesses are detected by the tool, | ||
1003 | * even though the reports for them are not printed. | ||
1004 | * | ||
1005 | * This is something that might be fixed at some point in the future. | ||
1006 | */ | ||
1007 | if (!recover) | ||
1008 | die("Oops - KASAN", regs, 0); | ||
1009 | |||
1010 | /* If thread survives, skip over the brk instruction and continue: */ | ||
1011 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); | ||
1012 | return DBG_HOOK_HANDLED; | ||
1013 | } | ||
1014 | |||
1015 | #define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM) | ||
1016 | #define KASAN_ESR_MASK 0xffffff00 | ||
1017 | |||
1018 | static struct break_hook kasan_break_hook = { | ||
1019 | .esr_val = KASAN_ESR_VAL, | ||
1020 | .esr_mask = KASAN_ESR_MASK, | ||
1021 | .fn = kasan_handler, | ||
1022 | }; | ||
1023 | #endif | ||
1024 | |||
972 | /* | 1025 | /* |
973 | * Initial handler for AArch64 BRK exceptions | 1026 | * Initial handler for AArch64 BRK exceptions |
974 | * This handler only used until debug_traps_init(). | 1027 | * This handler only used until debug_traps_init(). |
@@ -976,6 +1029,10 @@ static struct break_hook bug_break_hook = { | |||
976 | int __init early_brk64(unsigned long addr, unsigned int esr, | 1029 | int __init early_brk64(unsigned long addr, unsigned int esr, |
977 | struct pt_regs *regs) | 1030 | struct pt_regs *regs) |
978 | { | 1031 | { |
1032 | #ifdef CONFIG_KASAN_SW_TAGS | ||
1033 | if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL) | ||
1034 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; | ||
1035 | #endif | ||
979 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; | 1036 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; |
980 | } | 1037 | } |
981 | 1038 | ||
@@ -983,4 +1040,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr, | |||
983 | void __init trap_init(void) | 1040 | void __init trap_init(void) |
984 | { | 1041 | { |
985 | register_break_hook(&bug_break_hook); | 1042 | register_break_hook(&bug_break_hook); |
1043 | #ifdef CONFIG_KASAN_SW_TAGS | ||
1044 | register_break_hook(&kasan_break_hook); | ||
1045 | #endif | ||
986 | } | 1046 | } |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5fe6d2e40e9b..efb7b2cbead5 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/daifflags.h> | 40 | #include <asm/daifflags.h> |
41 | #include <asm/debug-monitors.h> | 41 | #include <asm/debug-monitors.h> |
42 | #include <asm/esr.h> | 42 | #include <asm/esr.h> |
43 | #include <asm/kasan.h> | ||
43 | #include <asm/sysreg.h> | 44 | #include <asm/sysreg.h> |
44 | #include <asm/system_misc.h> | 45 | #include <asm/system_misc.h> |
45 | #include <asm/pgtable.h> | 46 | #include <asm/pgtable.h> |
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr) | |||
132 | data_abort_decode(esr); | 133 | data_abort_decode(esr); |
133 | } | 134 | } |
134 | 135 | ||
136 | static inline bool is_ttbr0_addr(unsigned long addr) | ||
137 | { | ||
138 | /* entry assembly clears tags for TTBR0 addrs */ | ||
139 | return addr < TASK_SIZE; | ||
140 | } | ||
141 | |||
142 | static inline bool is_ttbr1_addr(unsigned long addr) | ||
143 | { | ||
144 | /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ | ||
145 | return arch_kasan_reset_tag(addr) >= VA_START; | ||
146 | } | ||
147 | |||
135 | /* | 148 | /* |
136 | * Dump out the page tables associated with 'addr' in the currently active mm. | 149 | * Dump out the page tables associated with 'addr' in the currently active mm. |
137 | */ | 150 | */ |
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr) | |||
141 | pgd_t *pgdp; | 154 | pgd_t *pgdp; |
142 | pgd_t pgd; | 155 | pgd_t pgd; |
143 | 156 | ||
144 | if (addr < TASK_SIZE) { | 157 | if (is_ttbr0_addr(addr)) { |
145 | /* TTBR0 */ | 158 | /* TTBR0 */ |
146 | mm = current->active_mm; | 159 | mm = current->active_mm; |
147 | if (mm == &init_mm) { | 160 | if (mm == &init_mm) { |
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr) | |||
149 | addr); | 162 | addr); |
150 | return; | 163 | return; |
151 | } | 164 | } |
152 | } else if (addr >= VA_START) { | 165 | } else if (is_ttbr1_addr(addr)) { |
153 | /* TTBR1 */ | 166 | /* TTBR1 */ |
154 | mm = &init_mm; | 167 | mm = &init_mm; |
155 | } else { | 168 | } else { |
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, | |||
254 | if (fsc_type == ESR_ELx_FSC_PERM) | 267 | if (fsc_type == ESR_ELx_FSC_PERM) |
255 | return true; | 268 | return true; |
256 | 269 | ||
257 | if (addr < TASK_SIZE && system_uses_ttbr0_pan()) | 270 | if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) |
258 | return fsc_type == ESR_ELx_FSC_FAULT && | 271 | return fsc_type == ESR_ELx_FSC_FAULT && |
259 | (regs->pstate & PSR_PAN_BIT); | 272 | (regs->pstate & PSR_PAN_BIT); |
260 | 273 | ||
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) | |||
319 | * type", so we ignore this wrinkle and just return the translation | 332 | * type", so we ignore this wrinkle and just return the translation |
320 | * fault.) | 333 | * fault.) |
321 | */ | 334 | */ |
322 | if (current->thread.fault_address >= TASK_SIZE) { | 335 | if (!is_ttbr0_addr(current->thread.fault_address)) { |
323 | switch (ESR_ELx_EC(esr)) { | 336 | switch (ESR_ELx_EC(esr)) { |
324 | case ESR_ELx_EC_DABT_LOW: | 337 | case ESR_ELx_EC_DABT_LOW: |
325 | /* | 338 | /* |
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
455 | mm_flags |= FAULT_FLAG_WRITE; | 468 | mm_flags |= FAULT_FLAG_WRITE; |
456 | } | 469 | } |
457 | 470 | ||
458 | if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) { | 471 | if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { |
459 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ | 472 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ |
460 | if (regs->orig_addr_limit == KERNEL_DS) | 473 | if (regs->orig_addr_limit == KERNEL_DS) |
461 | die_kernel_fault("access to user memory with fs=KERNEL_DS", | 474 | die_kernel_fault("access to user memory with fs=KERNEL_DS", |
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr, | |||
603 | unsigned int esr, | 616 | unsigned int esr, |
604 | struct pt_regs *regs) | 617 | struct pt_regs *regs) |
605 | { | 618 | { |
606 | if (addr < TASK_SIZE) | 619 | if (is_ttbr0_addr(addr)) |
607 | return do_page_fault(addr, esr, regs); | 620 | return do_page_fault(addr, esr, regs); |
608 | 621 | ||
609 | do_bad_area(addr, esr, regs); | 622 | do_bad_area(addr, esr, regs); |
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, | |||
758 | * re-enabled IRQs. If the address is a kernel address, apply | 771 | * re-enabled IRQs. If the address is a kernel address, apply |
759 | * BP hardening prior to enabling IRQs and pre-emption. | 772 | * BP hardening prior to enabling IRQs and pre-emption. |
760 | */ | 773 | */ |
761 | if (addr > TASK_SIZE) | 774 | if (!is_ttbr0_addr(addr)) |
762 | arm64_apply_bp_hardening(); | 775 | arm64_apply_bp_hardening(); |
763 | 776 | ||
764 | local_daif_restore(DAIF_PROCCTX); | 777 | local_daif_restore(DAIF_PROCCTX); |
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, | |||
771 | struct pt_regs *regs) | 784 | struct pt_regs *regs) |
772 | { | 785 | { |
773 | if (user_mode(regs)) { | 786 | if (user_mode(regs)) { |
774 | if (instruction_pointer(regs) > TASK_SIZE) | 787 | if (!is_ttbr0_addr(instruction_pointer(regs))) |
775 | arm64_apply_bp_hardening(); | 788 | arm64_apply_bp_hardening(); |
776 | local_daif_restore(DAIF_PROCCTX); | 789 | local_daif_restore(DAIF_PROCCTX); |
777 | } | 790 | } |
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, | |||
825 | if (interrupts_enabled(regs)) | 838 | if (interrupts_enabled(regs)) |
826 | trace_hardirqs_off(); | 839 | trace_hardirqs_off(); |
827 | 840 | ||
828 | if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) | 841 | if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs))) |
829 | arm64_apply_bp_hardening(); | 842 | arm64_apply_bp_hardening(); |
830 | 843 | ||
831 | if (!inf->fn(addr, esr, regs)) { | 844 | if (!inf->fn(addr, esr, regs)) { |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 63527e585aac..4b55b15707a3 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
@@ -39,7 +39,15 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) | |||
39 | { | 39 | { |
40 | void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, | 40 | void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, |
41 | __pa(MAX_DMA_ADDRESS), | 41 | __pa(MAX_DMA_ADDRESS), |
42 | MEMBLOCK_ALLOC_ACCESSIBLE, node); | 42 | MEMBLOCK_ALLOC_KASAN, node); |
43 | return __pa(p); | ||
44 | } | ||
45 | |||
46 | static phys_addr_t __init kasan_alloc_raw_page(int node) | ||
47 | { | ||
48 | void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, | ||
49 | __pa(MAX_DMA_ADDRESS), | ||
50 | MEMBLOCK_ALLOC_KASAN, node); | ||
43 | return __pa(p); | 51 | return __pa(p); |
44 | } | 52 | } |
45 | 53 | ||
@@ -47,8 +55,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, | |||
47 | bool early) | 55 | bool early) |
48 | { | 56 | { |
49 | if (pmd_none(READ_ONCE(*pmdp))) { | 57 | if (pmd_none(READ_ONCE(*pmdp))) { |
50 | phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte) | 58 | phys_addr_t pte_phys = early ? |
51 | : kasan_alloc_zeroed_page(node); | 59 | __pa_symbol(kasan_early_shadow_pte) |
60 | : kasan_alloc_zeroed_page(node); | ||
52 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); | 61 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
53 | } | 62 | } |
54 | 63 | ||
@@ -60,8 +69,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, | |||
60 | bool early) | 69 | bool early) |
61 | { | 70 | { |
62 | if (pud_none(READ_ONCE(*pudp))) { | 71 | if (pud_none(READ_ONCE(*pudp))) { |
63 | phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd) | 72 | phys_addr_t pmd_phys = early ? |
64 | : kasan_alloc_zeroed_page(node); | 73 | __pa_symbol(kasan_early_shadow_pmd) |
74 | : kasan_alloc_zeroed_page(node); | ||
65 | __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); | 75 | __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); |
66 | } | 76 | } |
67 | 77 | ||
@@ -72,8 +82,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node, | |||
72 | bool early) | 82 | bool early) |
73 | { | 83 | { |
74 | if (pgd_none(READ_ONCE(*pgdp))) { | 84 | if (pgd_none(READ_ONCE(*pgdp))) { |
75 | phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud) | 85 | phys_addr_t pud_phys = early ? |
76 | : kasan_alloc_zeroed_page(node); | 86 | __pa_symbol(kasan_early_shadow_pud) |
87 | : kasan_alloc_zeroed_page(node); | ||
77 | __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); | 88 | __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); |
78 | } | 89 | } |
79 | 90 | ||
@@ -87,8 +98,11 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, | |||
87 | pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); | 98 | pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); |
88 | 99 | ||
89 | do { | 100 | do { |
90 | phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page) | 101 | phys_addr_t page_phys = early ? |
91 | : kasan_alloc_zeroed_page(node); | 102 | __pa_symbol(kasan_early_shadow_page) |
103 | : kasan_alloc_raw_page(node); | ||
104 | if (!early) | ||
105 | memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); | ||
92 | next = addr + PAGE_SIZE; | 106 | next = addr + PAGE_SIZE; |
93 | set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); | 107 | set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); |
94 | } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); | 108 | } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); |
@@ -205,14 +219,14 @@ void __init kasan_init(void) | |||
205 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, | 219 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, |
206 | early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); | 220 | early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); |
207 | 221 | ||
208 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, | 222 | kasan_populate_early_shadow((void *)KASAN_SHADOW_START, |
209 | (void *)mod_shadow_start); | 223 | (void *)mod_shadow_start); |
210 | kasan_populate_zero_shadow((void *)kimg_shadow_end, | 224 | kasan_populate_early_shadow((void *)kimg_shadow_end, |
211 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); | 225 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
212 | 226 | ||
213 | if (kimg_shadow_start > mod_shadow_end) | 227 | if (kimg_shadow_start > mod_shadow_end) |
214 | kasan_populate_zero_shadow((void *)mod_shadow_end, | 228 | kasan_populate_early_shadow((void *)mod_shadow_end, |
215 | (void *)kimg_shadow_start); | 229 | (void *)kimg_shadow_start); |
216 | 230 | ||
217 | for_each_memblock(memory, reg) { | 231 | for_each_memblock(memory, reg) { |
218 | void *start = (void *)__phys_to_virt(reg->base); | 232 | void *start = (void *)__phys_to_virt(reg->base); |
@@ -227,16 +241,19 @@ void __init kasan_init(void) | |||
227 | } | 241 | } |
228 | 242 | ||
229 | /* | 243 | /* |
230 | * KAsan may reuse the contents of kasan_zero_pte directly, so we | 244 | * KAsan may reuse the contents of kasan_early_shadow_pte directly, |
231 | * should make sure that it maps the zero page read-only. | 245 | * so we should make sure that it maps the zero page read-only. |
232 | */ | 246 | */ |
233 | for (i = 0; i < PTRS_PER_PTE; i++) | 247 | for (i = 0; i < PTRS_PER_PTE; i++) |
234 | set_pte(&kasan_zero_pte[i], | 248 | set_pte(&kasan_early_shadow_pte[i], |
235 | pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); | 249 | pfn_pte(sym_to_pfn(kasan_early_shadow_page), |
250 | PAGE_KERNEL_RO)); | ||
236 | 251 | ||
237 | memset(kasan_zero_page, 0, PAGE_SIZE); | 252 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); |
238 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); | 253 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
239 | 254 | ||
255 | kasan_init_tags(); | ||
256 | |||
240 | /* At this point kasan is fully initialized. Enable error messages */ | 257 | /* At this point kasan is fully initialized. Enable error messages */ |
241 | init_task.kasan_depth = 0; | 258 | init_task.kasan_depth = 0; |
242 | pr_info("KernelAddressSanitizer initialized\n"); | 259 | pr_info("KernelAddressSanitizer initialized\n"); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index da513a1facf4..b6f5aa52ac67 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -1003,10 +1003,8 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) | |||
1003 | 1003 | ||
1004 | pmd = READ_ONCE(*pmdp); | 1004 | pmd = READ_ONCE(*pmdp); |
1005 | 1005 | ||
1006 | if (!pmd_present(pmd)) | ||
1007 | return 1; | ||
1008 | if (!pmd_table(pmd)) { | 1006 | if (!pmd_table(pmd)) { |
1009 | VM_WARN_ON(!pmd_table(pmd)); | 1007 | VM_WARN_ON(1); |
1010 | return 1; | 1008 | return 1; |
1011 | } | 1009 | } |
1012 | 1010 | ||
@@ -1026,10 +1024,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) | |||
1026 | 1024 | ||
1027 | pud = READ_ONCE(*pudp); | 1025 | pud = READ_ONCE(*pudp); |
1028 | 1026 | ||
1029 | if (!pud_present(pud)) | ||
1030 | return 1; | ||
1031 | if (!pud_table(pud)) { | 1027 | if (!pud_table(pud)) { |
1032 | VM_WARN_ON(!pud_table(pud)); | 1028 | VM_WARN_ON(1); |
1033 | return 1; | 1029 | return 1; |
1034 | } | 1030 | } |
1035 | 1031 | ||
@@ -1047,6 +1043,11 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) | |||
1047 | return 1; | 1043 | return 1; |
1048 | } | 1044 | } |
1049 | 1045 | ||
1046 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) | ||
1047 | { | ||
1048 | return 0; /* Don't attempt a block mapping */ | ||
1049 | } | ||
1050 | |||
1050 | #ifdef CONFIG_MEMORY_HOTPLUG | 1051 | #ifdef CONFIG_MEMORY_HOTPLUG |
1051 | int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | 1052 | int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
1052 | bool want_memblock) | 1053 | bool want_memblock) |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index e05b3ce1db6b..73886a5f1f30 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -47,6 +47,12 @@ | |||
47 | /* PTWs cacheable, inner/outer WBWA */ | 47 | /* PTWs cacheable, inner/outer WBWA */ |
48 | #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | 48 | #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA |
49 | 49 | ||
50 | #ifdef CONFIG_KASAN_SW_TAGS | ||
51 | #define TCR_KASAN_FLAGS TCR_TBI1 | ||
52 | #else | ||
53 | #define TCR_KASAN_FLAGS 0 | ||
54 | #endif | ||
55 | |||
50 | #define MAIR(attr, mt) ((attr) << ((mt) * 8)) | 56 | #define MAIR(attr, mt) ((attr) << ((mt) * 8)) |
51 | 57 | ||
52 | /* | 58 | /* |
@@ -449,7 +455,7 @@ ENTRY(__cpu_setup) | |||
449 | */ | 455 | */ |
450 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ | 456 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ |
451 | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ | 457 | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ |
452 | TCR_TBI0 | TCR_A1 | 458 | TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS |
453 | 459 | ||
454 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 | 460 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
455 | ldr_l x9, vabits_user | 461 | ldr_l x9, vabits_user |
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index dc07c078f9b8..66e597053488 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c | |||
@@ -71,7 +71,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
71 | ClearPageReserved(virt_to_page(start)); | 71 | ClearPageReserved(virt_to_page(start)); |
72 | init_page_count(virt_to_page(start)); | 72 | init_page_count(virt_to_page(start)); |
73 | free_page(start); | 73 | free_page(start); |
74 | totalram_pages++; | 74 | totalram_pages_inc(); |
75 | } | 75 | } |
76 | } | 76 | } |
77 | #endif | 77 | #endif |
@@ -88,7 +88,7 @@ void free_initmem(void) | |||
88 | ClearPageReserved(virt_to_page(addr)); | 88 | ClearPageReserved(virt_to_page(addr)); |
89 | init_page_count(virt_to_page(addr)); | 89 | init_page_count(virt_to_page(addr)); |
90 | free_page(addr); | 90 | free_page(addr); |
91 | totalram_pages++; | 91 | totalram_pages_inc(); |
92 | addr += PAGE_SIZE; | 92 | addr += PAGE_SIZE; |
93 | } | 93 | } |
94 | 94 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 0cf43bb13d6e..055382622f07 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -658,7 +658,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |||
658 | } | 658 | } |
659 | 659 | ||
660 | #ifdef CONFIG_MEMORY_HOTREMOVE | 660 | #ifdef CONFIG_MEMORY_HOTREMOVE |
661 | int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 661 | int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) |
662 | { | 662 | { |
663 | unsigned long start_pfn = start >> PAGE_SHIFT; | 663 | unsigned long start_pfn = start >> PAGE_SHIFT; |
664 | unsigned long nr_pages = size >> PAGE_SHIFT; | 664 | unsigned long nr_pages = size >> PAGE_SHIFT; |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 20394e52fe27..33cc6f676fa6 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -139,7 +139,8 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap * | |||
139 | } | 139 | } |
140 | 140 | ||
141 | #ifdef CONFIG_MEMORY_HOTREMOVE | 141 | #ifdef CONFIG_MEMORY_HOTREMOVE |
142 | int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 142 | int __meminit arch_remove_memory(int nid, u64 start, u64 size, |
143 | struct vmem_altmap *altmap) | ||
143 | { | 144 | { |
144 | unsigned long start_pfn = start >> PAGE_SHIFT; | 145 | unsigned long start_pfn = start >> PAGE_SHIFT; |
145 | unsigned long nr_pages = size >> PAGE_SHIFT; | 146 | unsigned long nr_pages = size >> PAGE_SHIFT; |
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 25427a48feae..e8d63a6a9002 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c | |||
@@ -208,7 +208,7 @@ static long cmm_alloc_pages(long nr) | |||
208 | 208 | ||
209 | pa->page[pa->index++] = addr; | 209 | pa->page[pa->index++] = addr; |
210 | loaned_pages++; | 210 | loaned_pages++; |
211 | totalram_pages--; | 211 | totalram_pages_dec(); |
212 | spin_unlock(&cmm_lock); | 212 | spin_unlock(&cmm_lock); |
213 | nr--; | 213 | nr--; |
214 | } | 214 | } |
@@ -247,7 +247,7 @@ static long cmm_free_pages(long nr) | |||
247 | free_page(addr); | 247 | free_page(addr); |
248 | loaned_pages--; | 248 | loaned_pages--; |
249 | nr--; | 249 | nr--; |
250 | totalram_pages++; | 250 | totalram_pages_inc(); |
251 | } | 251 | } |
252 | spin_unlock(&cmm_lock); | 252 | spin_unlock(&cmm_lock); |
253 | cmm_dbg("End request with %ld pages unfulfilled\n", nr); | 253 | cmm_dbg("End request with %ld pages unfulfilled\n", nr); |
@@ -291,7 +291,7 @@ static void cmm_get_mpp(void) | |||
291 | int rc; | 291 | int rc; |
292 | struct hvcall_mpp_data mpp_data; | 292 | struct hvcall_mpp_data mpp_data; |
293 | signed long active_pages_target, page_loan_request, target; | 293 | signed long active_pages_target, page_loan_request, target; |
294 | signed long total_pages = totalram_pages + loaned_pages; | 294 | signed long total_pages = totalram_pages() + loaned_pages; |
295 | signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE; | 295 | signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE; |
296 | 296 | ||
297 | rc = h_get_mpp(&mpp_data); | 297 | rc = h_get_mpp(&mpp_data); |
@@ -322,7 +322,7 @@ static void cmm_get_mpp(void) | |||
322 | 322 | ||
323 | cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n", | 323 | cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n", |
324 | page_loan_request, loaned_pages, loaned_pages_target, | 324 | page_loan_request, loaned_pages, loaned_pages_target, |
325 | oom_freed_pages, totalram_pages); | 325 | oom_freed_pages, totalram_pages()); |
326 | } | 326 | } |
327 | 327 | ||
328 | static struct notifier_block cmm_oom_nb = { | 328 | static struct notifier_block cmm_oom_nb = { |
@@ -581,7 +581,7 @@ static int cmm_mem_going_offline(void *arg) | |||
581 | free_page(pa_curr->page[idx]); | 581 | free_page(pa_curr->page[idx]); |
582 | freed++; | 582 | freed++; |
583 | loaned_pages--; | 583 | loaned_pages--; |
584 | totalram_pages++; | 584 | totalram_pages_inc(); |
585 | pa_curr->page[idx] = pa_last->page[--pa_last->index]; | 585 | pa_curr->page[idx] = pa_last->page[--pa_last->index]; |
586 | if (pa_last->index == 0) { | 586 | if (pa_last->index == 0) { |
587 | if (pa_curr == pa_last) | 587 | if (pa_curr == pa_last) |
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 363f6470d742..3b93ba0b5d8d 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c | |||
@@ -111,11 +111,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | #ifdef CONFIG_KASAN | 113 | #ifdef CONFIG_KASAN |
114 | static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st) | 114 | static void note_kasan_early_shadow_page(struct seq_file *m, |
115 | struct pg_state *st) | ||
115 | { | 116 | { |
116 | unsigned int prot; | 117 | unsigned int prot; |
117 | 118 | ||
118 | prot = pte_val(*kasan_zero_pte) & | 119 | prot = pte_val(*kasan_early_shadow_pte) & |
119 | (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); | 120 | (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); |
120 | note_page(m, st, prot, 4); | 121 | note_page(m, st, prot, 4); |
121 | } | 122 | } |
@@ -154,8 +155,8 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, | |||
154 | int i; | 155 | int i; |
155 | 156 | ||
156 | #ifdef CONFIG_KASAN | 157 | #ifdef CONFIG_KASAN |
157 | if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) { | 158 | if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) { |
158 | note_kasan_zero_page(m, st); | 159 | note_kasan_early_shadow_page(m, st); |
159 | return; | 160 | return; |
160 | } | 161 | } |
161 | #endif | 162 | #endif |
@@ -185,8 +186,8 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, | |||
185 | int i; | 186 | int i; |
186 | 187 | ||
187 | #ifdef CONFIG_KASAN | 188 | #ifdef CONFIG_KASAN |
188 | if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) { | 189 | if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) { |
189 | note_kasan_zero_page(m, st); | 190 | note_kasan_early_shadow_page(m, st); |
190 | return; | 191 | return; |
191 | } | 192 | } |
192 | #endif | 193 | #endif |
@@ -215,8 +216,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, | |||
215 | int i; | 216 | int i; |
216 | 217 | ||
217 | #ifdef CONFIG_KASAN | 218 | #ifdef CONFIG_KASAN |
218 | if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) { | 219 | if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) { |
219 | note_kasan_zero_page(m, st); | 220 | note_kasan_early_shadow_page(m, st); |
220 | return; | 221 | return; |
221 | } | 222 | } |
222 | #endif | 223 | #endif |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 76d0708438e9..3e82f66d5c61 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -59,7 +59,7 @@ static void __init setup_zero_pages(void) | |||
59 | order = 7; | 59 | order = 7; |
60 | 60 | ||
61 | /* Limit number of empty zero pages for small memory sizes */ | 61 | /* Limit number of empty zero pages for small memory sizes */ |
62 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) | 62 | while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) |
63 | order--; | 63 | order--; |
64 | 64 | ||
65 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 65 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
@@ -242,7 +242,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |||
242 | } | 242 | } |
243 | 243 | ||
244 | #ifdef CONFIG_MEMORY_HOTREMOVE | 244 | #ifdef CONFIG_MEMORY_HOTREMOVE |
245 | int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 245 | int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) |
246 | { | 246 | { |
247 | /* | 247 | /* |
248 | * There is no hardware or firmware interface which could trigger a | 248 | * There is no hardware or firmware interface which could trigger a |
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index acb9645b762b..bac5c27d11fc 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c | |||
@@ -107,7 +107,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, | |||
107 | if (mode == POPULATE_ZERO_SHADOW && | 107 | if (mode == POPULATE_ZERO_SHADOW && |
108 | IS_ALIGNED(address, PGDIR_SIZE) && | 108 | IS_ALIGNED(address, PGDIR_SIZE) && |
109 | end - address >= PGDIR_SIZE) { | 109 | end - address >= PGDIR_SIZE) { |
110 | pgd_populate(&init_mm, pg_dir, kasan_zero_p4d); | 110 | pgd_populate(&init_mm, pg_dir, |
111 | kasan_early_shadow_p4d); | ||
111 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 112 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
112 | continue; | 113 | continue; |
113 | } | 114 | } |
@@ -120,7 +121,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, | |||
120 | if (mode == POPULATE_ZERO_SHADOW && | 121 | if (mode == POPULATE_ZERO_SHADOW && |
121 | IS_ALIGNED(address, P4D_SIZE) && | 122 | IS_ALIGNED(address, P4D_SIZE) && |
122 | end - address >= P4D_SIZE) { | 123 | end - address >= P4D_SIZE) { |
123 | p4d_populate(&init_mm, p4_dir, kasan_zero_pud); | 124 | p4d_populate(&init_mm, p4_dir, |
125 | kasan_early_shadow_pud); | ||
124 | address = (address + P4D_SIZE) & P4D_MASK; | 126 | address = (address + P4D_SIZE) & P4D_MASK; |
125 | continue; | 127 | continue; |
126 | } | 128 | } |
@@ -133,7 +135,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, | |||
133 | if (mode == POPULATE_ZERO_SHADOW && | 135 | if (mode == POPULATE_ZERO_SHADOW && |
134 | IS_ALIGNED(address, PUD_SIZE) && | 136 | IS_ALIGNED(address, PUD_SIZE) && |
135 | end - address >= PUD_SIZE) { | 137 | end - address >= PUD_SIZE) { |
136 | pud_populate(&init_mm, pu_dir, kasan_zero_pmd); | 138 | pud_populate(&init_mm, pu_dir, |
139 | kasan_early_shadow_pmd); | ||
137 | address = (address + PUD_SIZE) & PUD_MASK; | 140 | address = (address + PUD_SIZE) & PUD_MASK; |
138 | continue; | 141 | continue; |
139 | } | 142 | } |
@@ -146,7 +149,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, | |||
146 | if (mode == POPULATE_ZERO_SHADOW && | 149 | if (mode == POPULATE_ZERO_SHADOW && |
147 | IS_ALIGNED(address, PMD_SIZE) && | 150 | IS_ALIGNED(address, PMD_SIZE) && |
148 | end - address >= PMD_SIZE) { | 151 | end - address >= PMD_SIZE) { |
149 | pmd_populate(&init_mm, pm_dir, kasan_zero_pte); | 152 | pmd_populate(&init_mm, pm_dir, |
153 | kasan_early_shadow_pte); | ||
150 | address = (address + PMD_SIZE) & PMD_MASK; | 154 | address = (address + PMD_SIZE) & PMD_MASK; |
151 | continue; | 155 | continue; |
152 | } | 156 | } |
@@ -188,7 +192,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, | |||
188 | pte_val(*pt_dir) = __pa(page) | pgt_prot; | 192 | pte_val(*pt_dir) = __pa(page) | pgt_prot; |
189 | break; | 193 | break; |
190 | case POPULATE_ZERO_SHADOW: | 194 | case POPULATE_ZERO_SHADOW: |
191 | page = kasan_zero_page; | 195 | page = kasan_early_shadow_page; |
192 | pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; | 196 | pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; |
193 | break; | 197 | break; |
194 | } | 198 | } |
@@ -256,14 +260,14 @@ void __init kasan_early_init(void) | |||
256 | unsigned long vmax; | 260 | unsigned long vmax; |
257 | unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); | 261 | unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); |
258 | pte_t pte_z; | 262 | pte_t pte_z; |
259 | pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY); | 263 | pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); |
260 | pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY); | 264 | pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY); |
261 | p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY); | 265 | p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY); |
262 | 266 | ||
263 | kasan_early_detect_facilities(); | 267 | kasan_early_detect_facilities(); |
264 | if (!has_nx) | 268 | if (!has_nx) |
265 | pgt_prot &= ~_PAGE_NOEXEC; | 269 | pgt_prot &= ~_PAGE_NOEXEC; |
266 | pte_z = __pte(__pa(kasan_zero_page) | pgt_prot); | 270 | pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot); |
267 | 271 | ||
268 | memsize = get_mem_detect_end(); | 272 | memsize = get_mem_detect_end(); |
269 | if (!memsize) | 273 | if (!memsize) |
@@ -292,10 +296,13 @@ void __init kasan_early_init(void) | |||
292 | } | 296 | } |
293 | 297 | ||
294 | /* init kasan zero shadow */ | 298 | /* init kasan zero shadow */ |
295 | crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z)); | 299 | crst_table_init((unsigned long *)kasan_early_shadow_p4d, |
296 | crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z)); | 300 | p4d_val(p4d_z)); |
297 | crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z)); | 301 | crst_table_init((unsigned long *)kasan_early_shadow_pud, |
298 | memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE); | 302 | pud_val(pud_z)); |
303 | crst_table_init((unsigned long *)kasan_early_shadow_pmd, | ||
304 | pmd_val(pmd_z)); | ||
305 | memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE); | ||
299 | 306 | ||
300 | shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; | 307 | shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; |
301 | pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE); | 308 | pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE); |
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c index 0a39c241628a..346eda7a2ef6 100644 --- a/arch/sh/boards/board-apsh4a3a.c +++ b/arch/sh/boards/board-apsh4a3a.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * ALPHAPROJECT AP-SH4A-3A Support. | 3 | * ALPHAPROJECT AP-SH4A-3A Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. | 5 | * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. |
5 | * Copyright (C) 2008 Yoshihiro Shimoda | 6 | * Copyright (C) 2008 Yoshihiro Shimoda |
6 | * Copyright (C) 2009 Paul Mundt | 7 | * Copyright (C) 2009 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c index 92eac3a99187..4efa9c571f64 100644 --- a/arch/sh/boards/board-apsh4ad0a.c +++ b/arch/sh/boards/board-apsh4ad0a.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * ALPHAPROJECT AP-SH4AD-0A Support. | 3 | * ALPHAPROJECT AP-SH4AD-0A Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. | 5 | * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. |
5 | * Copyright (C) 2010 Matt Fleming | 6 | * Copyright (C) 2010 Matt Fleming |
6 | * Copyright (C) 2010 Paul Mundt | 7 | * Copyright (C) 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c index bab5b9513904..0fbe91cba67a 100644 --- a/arch/sh/boards/board-edosk7760.c +++ b/arch/sh/boards/board-edosk7760.c | |||
@@ -1,22 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * Renesas Europe EDOSK7760 Board Support | 3 | * Renesas Europe EDOSK7760 Board Support |
3 | * | 4 | * |
4 | * Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd. | 5 | * Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd. |
5 | * Author: Luca Santini <luca.santini@spesonline.com> | 6 | * Author: Luca Santini <luca.santini@spesonline.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 7 | */ |
21 | #include <linux/init.h> | 8 | #include <linux/init.h> |
22 | #include <linux/types.h> | 9 | #include <linux/types.h> |
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c index 4d6be53058d6..f478fee3b48a 100644 --- a/arch/sh/boards/board-espt.c +++ b/arch/sh/boards/board-espt.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Data Technology Inc. ESPT-GIGA board support | 3 | * Data Technology Inc. ESPT-GIGA board support |
3 | * | 4 | * |
4 | * Copyright (C) 2008, 2009 Renesas Solutions Corp. | 5 | * Copyright (C) 2008, 2009 Renesas Solutions Corp. |
5 | * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 6 | * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c index 20500858b56c..56bd386ff3b0 100644 --- a/arch/sh/boards/board-magicpanelr2.c +++ b/arch/sh/boards/board-magicpanelr2.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/magicpanel/setup.c | 3 | * linux/arch/sh/boards/magicpanel/setup.c |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Markus Brunner, Mark Jonas | 5 | * Copyright (C) 2007 Markus Brunner, Mark Jonas |
5 | * | 6 | * |
6 | * Magic Panel Release 2 board setup | 7 | * Magic Panel Release 2 board setup |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 1bde08dc067d..c32b4c6229d3 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas R0P7757LC0012RL Support. | 3 | * Renesas R0P7757LC0012RL Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2009 - 2010 Renesas Solutions Corp. | 5 | * Copyright (C) 2009 - 2010 Renesas Solutions Corp. |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index 3cba60ff7aab..d964c4d6b139 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Corp. R0P7785LC0011RL Support. | 3 | * Renesas Technology Corp. R0P7785LC0011RL Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Yoshihiro Shimoda | 5 | * Copyright (C) 2008 Yoshihiro Shimoda |
5 | * Copyright (C) 2009 Paul Mundt | 6 | * Copyright (C) 2009 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/board-titan.c b/arch/sh/boards/board-titan.c index 94c36c7bc0b3..074a848d8b56 100644 --- a/arch/sh/boards/board-titan.c +++ b/arch/sh/boards/board-titan.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/titan/setup.c - Setup for Titan | 3 | * arch/sh/boards/titan/setup.c - Setup for Titan |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Jamie Lenehan | 5 | * Copyright (C) 2006 Jamie Lenehan |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c index b52abcc5259a..799af57c0b81 100644 --- a/arch/sh/boards/board-urquell.c +++ b/arch/sh/boards/board-urquell.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Corp. SH7786 Urquell Support. | 3 | * Renesas Technology Corp. SH7786 Urquell Support. |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Based on board-sh7785lcr.c | 8 | * Based on board-sh7785lcr.c |
8 | * Copyright (C) 2008 Yoshihiro Shimoda | 9 | * Copyright (C) 2008 Yoshihiro Shimoda |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-ap325rxa/Makefile b/arch/sh/boards/mach-ap325rxa/Makefile index 4cf1774d2613..dba5d0c20261 100644 --- a/arch/sh/boards/mach-ap325rxa/Makefile +++ b/arch/sh/boards/mach-ap325rxa/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o sdram.o | 2 | obj-y := setup.o sdram.o |
2 | 3 | ||
diff --git a/arch/sh/boards/mach-ap325rxa/sdram.S b/arch/sh/boards/mach-ap325rxa/sdram.S index db24fbed4fca..541c82cc30b1 100644 --- a/arch/sh/boards/mach-ap325rxa/sdram.S +++ b/arch/sh/boards/mach-ap325rxa/sdram.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * AP325RXA sdram self/auto-refresh setup code | 3 | * AP325RXA sdram self/auto-refresh setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
diff --git a/arch/sh/boards/mach-cayman/Makefile b/arch/sh/boards/mach-cayman/Makefile index 00fa3eaecb1b..775a4be57434 100644 --- a/arch/sh/boards/mach-cayman/Makefile +++ b/arch/sh/boards/mach-cayman/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Hitachi Cayman specific parts of the kernel | 3 | # Makefile for the Hitachi Cayman specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c index 724e8b7271f4..9108789fafef 100644 --- a/arch/sh/boards/mach-cayman/irq.c +++ b/arch/sh/boards/mach-cayman/irq.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support | 3 | * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support |
3 | * | 4 | * |
4 | * This file handles the board specific parts of the Cayman interrupt system | 5 | * This file handles the board specific parts of the Cayman interrupt system |
5 | * | 6 | * |
6 | * Copyright (C) 2002 Stuart Menefy | 7 | * Copyright (C) 2002 Stuart Menefy |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/io.h> | 9 | #include <linux/io.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-cayman/panic.c b/arch/sh/boards/mach-cayman/panic.c index d1e67306d07c..cfc46314e7d9 100644 --- a/arch/sh/boards/mach-cayman/panic.c +++ b/arch/sh/boards/mach-cayman/panic.c | |||
@@ -1,9 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2003 Richard Curnow, SuperH UK Limited | 3 | * Copyright (C) 2003 Richard Curnow, SuperH UK Limited |
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | 4 | */ |
8 | 5 | ||
9 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c index 9c292c27e0d7..4cec14700adc 100644 --- a/arch/sh/boards/mach-cayman/setup.c +++ b/arch/sh/boards/mach-cayman/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/mach-cayman/setup.c | 3 | * arch/sh/mach-cayman/setup.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2002 David J. Mckay & Benedict Gaster | 7 | * Copyright (C) 2002 David J. Mckay & Benedict Gaster |
7 | * Copyright (C) 2003 - 2007 Paul Mundt | 8 | * Copyright (C) 2003 - 2007 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/io.h> | 11 | #include <linux/io.h> |
diff --git a/arch/sh/boards/mach-dreamcast/Makefile b/arch/sh/boards/mach-dreamcast/Makefile index 62b024bc2a3e..37b2452206aa 100644 --- a/arch/sh/boards/mach-dreamcast/Makefile +++ b/arch/sh/boards/mach-dreamcast/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Sega Dreamcast specific parts of the kernel | 3 | # Makefile for the Sega Dreamcast specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c index 2789647abebe..a929f764ae04 100644 --- a/arch/sh/boards/mach-dreamcast/irq.c +++ b/arch/sh/boards/mach-dreamcast/irq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/dreamcast/irq.c | 3 | * arch/sh/boards/dreamcast/irq.c |
3 | * | 4 | * |
@@ -6,7 +7,6 @@ | |||
6 | * Copyright (c) 2001, 2002 M. R. Brown <mrbrown@0xd6.org> | 7 | * Copyright (c) 2001, 2002 M. R. Brown <mrbrown@0xd6.org> |
7 | * | 8 | * |
8 | * This file is part of the LinuxDC project (www.linuxdc.org) | 9 | * This file is part of the LinuxDC project (www.linuxdc.org) |
9 | * Released under the terms of the GNU GPL v2.0 | ||
10 | */ | 10 | */ |
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
diff --git a/arch/sh/boards/mach-dreamcast/rtc.c b/arch/sh/boards/mach-dreamcast/rtc.c index 0eb12c45fa59..7873cd27e4e0 100644 --- a/arch/sh/boards/mach-dreamcast/rtc.c +++ b/arch/sh/boards/mach-dreamcast/rtc.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/dreamcast/rtc.c | 3 | * arch/sh/boards/dreamcast/rtc.c |
3 | * | 4 | * |
@@ -5,9 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (c) 2001, 2002 M. R. Brown <mrbrown@0xd6.org> | 7 | * Copyright (c) 2001, 2002 M. R. Brown <mrbrown@0xd6.org> |
7 | * Copyright (c) 2002 Paul Mundt <lethal@chaoticdreams.org> | 8 | * Copyright (c) 2002 Paul Mundt <lethal@chaoticdreams.org> |
8 | * | ||
9 | * Released under the terms of the GNU GPL v2.0. | ||
10 | * | ||
11 | */ | 9 | */ |
12 | 10 | ||
13 | #include <linux/time.h> | 11 | #include <linux/time.h> |
diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c index 672c2ad8f8d5..2d966c1c2cc1 100644 --- a/arch/sh/boards/mach-dreamcast/setup.c +++ b/arch/sh/boards/mach-dreamcast/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/dreamcast/setup.c | 3 | * arch/sh/boards/dreamcast/setup.c |
3 | * | 4 | * |
@@ -8,8 +9,6 @@ | |||
8 | * | 9 | * |
9 | * This file is part of the LinuxDC project (www.linuxdc.org) | 10 | * This file is part of the LinuxDC project (www.linuxdc.org) |
10 | * | 11 | * |
11 | * Released under the terms of the GNU GPL v2.0. | ||
12 | * | ||
13 | * This file originally bore the message (with enclosed-$): | 12 | * This file originally bore the message (with enclosed-$): |
14 | * Id: setup_dc.c,v 1.5 2001/05/24 05:09:16 mrbrown Exp | 13 | * Id: setup_dc.c,v 1.5 2001/05/24 05:09:16 mrbrown Exp |
15 | * SEGA Dreamcast support | 14 | * SEGA Dreamcast support |
diff --git a/arch/sh/boards/mach-ecovec24/Makefile b/arch/sh/boards/mach-ecovec24/Makefile index e69bc82208fc..d78d4904ddee 100644 --- a/arch/sh/boards/mach-ecovec24/Makefile +++ b/arch/sh/boards/mach-ecovec24/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the R0P7724LC0011/21RL (EcoVec) | 3 | # Makefile for the R0P7724LC0011/21RL (EcoVec) |
3 | # | 4 | # |
@@ -6,4 +7,4 @@ | |||
6 | # for more details. | 7 | # for more details. |
7 | # | 8 | # |
8 | 9 | ||
9 | obj-y := setup.o sdram.o \ No newline at end of file | 10 | obj-y := setup.o sdram.o |
diff --git a/arch/sh/boards/mach-ecovec24/sdram.S b/arch/sh/boards/mach-ecovec24/sdram.S index 3963c6f23d52..d2f269169abb 100644 --- a/arch/sh/boards/mach-ecovec24/sdram.S +++ b/arch/sh/boards/mach-ecovec24/sdram.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Ecovec24 sdram self/auto-refresh setup code | 3 | * Ecovec24 sdram self/auto-refresh setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 058b168bdf26..22b4106b8084 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2009 Renesas Solutions Corp. | 3 | * Copyright (C) 2009 Renesas Solutions Corp. |
3 | * | 4 | * |
4 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> | 5 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <asm/clock.h> | 7 | #include <asm/clock.h> |
11 | #include <asm/heartbeat.h> | 8 | #include <asm/heartbeat.h> |
diff --git a/arch/sh/boards/mach-highlander/irq-r7780mp.c b/arch/sh/boards/mach-highlander/irq-r7780mp.c index 9893fd3a1358..f46637377b6a 100644 --- a/arch/sh/boards/mach-highlander/irq-r7780mp.c +++ b/arch/sh/boards/mach-highlander/irq-r7780mp.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Solutions Highlander R7780MP Support. | 3 | * Renesas Solutions Highlander R7780MP Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. | 5 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. |
5 | * Copyright (C) 2006 Paul Mundt | 6 | * Copyright (C) 2006 Paul Mundt |
6 | * Copyright (C) 2007 Magnus Damm | 7 | * Copyright (C) 2007 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-highlander/irq-r7780rp.c b/arch/sh/boards/mach-highlander/irq-r7780rp.c index 0805b2151452..c61177e8724b 100644 --- a/arch/sh/boards/mach-highlander/irq-r7780rp.c +++ b/arch/sh/boards/mach-highlander/irq-r7780rp.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Solutions Highlander R7780RP-1 Support. | 3 | * Renesas Solutions Highlander R7780RP-1 Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. | 5 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. |
5 | * Copyright (C) 2006 Paul Mundt | 6 | * Copyright (C) 2006 Paul Mundt |
6 | * Copyright (C) 2008 Magnus Damm | 7 | * Copyright (C) 2008 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-highlander/irq-r7785rp.c b/arch/sh/boards/mach-highlander/irq-r7785rp.c index 558b24862776..0ebebbed0d63 100644 --- a/arch/sh/boards/mach-highlander/irq-r7785rp.c +++ b/arch/sh/boards/mach-highlander/irq-r7785rp.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Solutions Highlander R7785RP Support. | 3 | * Renesas Solutions Highlander R7785RP Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. | 5 | * Copyright (C) 2002 Atom Create Engineering Co., Ltd. |
5 | * Copyright (C) 2006 - 2008 Paul Mundt | 6 | * Copyright (C) 2006 - 2008 Paul Mundt |
6 | * Copyright (C) 2007 Magnus Damm | 7 | * Copyright (C) 2007 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-highlander/pinmux-r7785rp.c b/arch/sh/boards/mach-highlander/pinmux-r7785rp.c index c77a2bea8f2a..703179faf652 100644 --- a/arch/sh/boards/mach-highlander/pinmux-r7785rp.c +++ b/arch/sh/boards/mach-highlander/pinmux-r7785rp.c | |||
@@ -1,9 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2008 Paul Mundt | 3 | * Copyright (C) 2008 Paul Mundt |
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | 4 | */ |
8 | #include <linux/init.h> | 5 | #include <linux/init.h> |
9 | #include <linux/gpio.h> | 6 | #include <linux/gpio.h> |
diff --git a/arch/sh/boards/mach-highlander/psw.c b/arch/sh/boards/mach-highlander/psw.c index 40e2b585d488..d445c54f74e4 100644 --- a/arch/sh/boards/mach-highlander/psw.c +++ b/arch/sh/boards/mach-highlander/psw.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/renesas/r7780rp/psw.c | 3 | * arch/sh/boards/renesas/r7780rp/psw.c |
3 | * | 4 | * |
4 | * push switch support for RDBRP-1/RDBREVRP-1 debug boards. | 5 | * push switch support for RDBRP-1/RDBREVRP-1 debug boards. |
5 | * | 6 | * |
6 | * Copyright (C) 2006 Paul Mundt | 7 | * Copyright (C) 2006 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/io.h> | 9 | #include <linux/io.h> |
13 | #include <linux/module.h> | 10 | #include <linux/module.h> |
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c index 4a52590fe3d8..533393d779c2 100644 --- a/arch/sh/boards/mach-highlander/setup.c +++ b/arch/sh/boards/mach-highlander/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/renesas/r7780rp/setup.c | 3 | * arch/sh/boards/renesas/r7780rp/setup.c |
3 | * | 4 | * |
@@ -8,10 +9,6 @@ | |||
8 | * | 9 | * |
9 | * This contains support for the R7780RP-1, R7780MP, and R7785RP | 10 | * This contains support for the R7780RP-1, R7780MP, and R7785RP |
10 | * Highlander modules. | 11 | * Highlander modules. |
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | 12 | */ |
16 | #include <linux/init.h> | 13 | #include <linux/init.h> |
17 | #include <linux/io.h> | 14 | #include <linux/io.h> |
diff --git a/arch/sh/boards/mach-hp6xx/Makefile b/arch/sh/boards/mach-hp6xx/Makefile index b3124278247c..4b0fe29e5612 100644 --- a/arch/sh/boards/mach-hp6xx/Makefile +++ b/arch/sh/boards/mach-hp6xx/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the HP6xx specific parts of the kernel | 3 | # Makefile for the HP6xx specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c index 865d8d6e823f..e5c4c7d34139 100644 --- a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c +++ b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c | |||
@@ -1,11 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * bios-less APM driver for hp680 | 3 | * bios-less APM driver for hp680 |
3 | * | 4 | * |
4 | * Copyright 2005 (c) Andriy Skulysh <askulysh@gmail.com> | 5 | * Copyright 2005 (c) Andriy Skulysh <askulysh@gmail.com> |
5 | * Copyright 2008 (c) Kristoffer Ericson <kristoffer.ericson@gmail.com> | 6 | * Copyright 2008 (c) Kristoffer Ericson <kristoffer.ericson@gmail.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License. | ||
9 | */ | 7 | */ |
10 | #include <linux/module.h> | 8 | #include <linux/module.h> |
11 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c index 8b50cf763c06..fe505ec168d0 100644 --- a/arch/sh/boards/mach-hp6xx/pm.c +++ b/arch/sh/boards/mach-hp6xx/pm.c | |||
@@ -1,10 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * hp6x0 Power Management Routines | 3 | * hp6x0 Power Management Routines |
3 | * | 4 | * |
4 | * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> | 5 | * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License. | ||
8 | */ | 6 | */ |
9 | #include <linux/init.h> | 7 | #include <linux/init.h> |
10 | #include <linux/suspend.h> | 8 | #include <linux/suspend.h> |
diff --git a/arch/sh/boards/mach-hp6xx/pm_wakeup.S b/arch/sh/boards/mach-hp6xx/pm_wakeup.S index 4f18d44e0541..0fd43301f083 100644 --- a/arch/sh/boards/mach-hp6xx/pm_wakeup.S +++ b/arch/sh/boards/mach-hp6xx/pm_wakeup.S | |||
@@ -1,10 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | 2 | * |
3 | * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> | ||
8 | */ | 4 | */ |
9 | 5 | ||
10 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
diff --git a/arch/sh/boards/mach-hp6xx/setup.c b/arch/sh/boards/mach-hp6xx/setup.c index 05797b33f68e..2ceead68d7bf 100644 --- a/arch/sh/boards/mach-hp6xx/setup.c +++ b/arch/sh/boards/mach-hp6xx/setup.c | |||
@@ -1,12 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/hp6xx/setup.c | 3 | * linux/arch/sh/boards/hp6xx/setup.c |
3 | * | 4 | * |
4 | * Copyright (C) 2002 Andriy Skulysh | 5 | * Copyright (C) 2002 Andriy Skulysh |
5 | * Copyright (C) 2007 Kristoffer Ericson <Kristoffer_e1@hotmail.com> | 6 | * Copyright (C) 2007 Kristoffer Ericson <Kristoffer_e1@hotmail.com> |
6 | * | 7 | * |
7 | * May be copied or modified under the terms of the GNU General Public | ||
8 | * License. See linux/COPYING for more information. | ||
9 | * | ||
10 | * Setup code for HP620/HP660/HP680/HP690 (internal peripherials only) | 8 | * Setup code for HP620/HP660/HP680/HP690 (internal peripherials only) |
11 | */ | 9 | */ |
12 | #include <linux/types.h> | 10 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-kfr2r09/Makefile b/arch/sh/boards/mach-kfr2r09/Makefile index 60dd63f4a427..4a4a35ad7ba0 100644 --- a/arch/sh/boards/mach-kfr2r09/Makefile +++ b/arch/sh/boards/mach-kfr2r09/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o sdram.o | 2 | obj-y := setup.o sdram.o |
2 | ifneq ($(CONFIG_FB_SH_MOBILE_LCDC),) | 3 | ifneq ($(CONFIG_FB_SH_MOBILE_LCDC),) |
3 | obj-y += lcd_wqvga.o | 4 | obj-y += lcd_wqvga.o |
diff --git a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c index 355a78a3b313..f6bbac106d13 100644 --- a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c +++ b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * KFR2R09 LCD panel support | 3 | * KFR2R09 LCD panel support |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Register settings based on the out-of-tree t33fb.c driver | 7 | * Register settings based on the out-of-tree t33fb.c driver |
7 | * Copyright (C) 2008 Lineo Solutions, Inc. | 8 | * Copyright (C) 2008 Lineo Solutions, Inc. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
diff --git a/arch/sh/boards/mach-kfr2r09/sdram.S b/arch/sh/boards/mach-kfr2r09/sdram.S index 0c9f55bec2fe..f1b8985cb922 100644 --- a/arch/sh/boards/mach-kfr2r09/sdram.S +++ b/arch/sh/boards/mach-kfr2r09/sdram.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * KFR2R09 sdram self/auto-refresh setup code | 3 | * KFR2R09 sdram self/auto-refresh setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index e59c577ed871..203d249a0a2b 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/memblock.h> | 25 | #include <linux/memblock.h> |
26 | #include <linux/mfd/tmio.h> | 26 | #include <linux/mfd/tmio.h> |
27 | #include <linux/mmc/host.h> | 27 | #include <linux/mmc/host.h> |
28 | #include <linux/mtd/onenand.h> | ||
29 | #include <linux/mtd/physmap.h> | 28 | #include <linux/mtd/physmap.h> |
30 | #include <linux/platform_data/lv5207lp.h> | 29 | #include <linux/platform_data/lv5207lp.h> |
31 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
@@ -478,7 +477,7 @@ extern char kfr2r09_sdram_leave_end; | |||
478 | 477 | ||
479 | static int __init kfr2r09_devices_setup(void) | 478 | static int __init kfr2r09_devices_setup(void) |
480 | { | 479 | { |
481 | static struct clk *camera_clk; | 480 | struct clk *camera_clk; |
482 | 481 | ||
483 | /* register board specific self-refresh code */ | 482 | /* register board specific self-refresh code */ |
484 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | | 483 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | |
diff --git a/arch/sh/boards/mach-landisk/Makefile b/arch/sh/boards/mach-landisk/Makefile index a696b4277fa9..6cba041fffe0 100644 --- a/arch/sh/boards/mach-landisk/Makefile +++ b/arch/sh/boards/mach-landisk/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for I-O DATA DEVICE, INC. "LANDISK Series" | 3 | # Makefile for I-O DATA DEVICE, INC. "LANDISK Series" |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c index 32c317f5d991..1c0da99dfc60 100644 --- a/arch/sh/boards/mach-landisk/gio.c +++ b/arch/sh/boards/mach-landisk/gio.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/landisk/gio.c - driver for landisk | 3 | * arch/sh/boards/landisk/gio.c - driver for landisk |
3 | * | 4 | * |
@@ -6,11 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copylight (C) 2006 kogiidena | 8 | * Copylight (C) 2006 kogiidena |
8 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. * | 9 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. * |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | * | ||
14 | */ | 10 | */ |
15 | #include <linux/module.h> | 11 | #include <linux/module.h> |
16 | #include <linux/init.h> | 12 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c index c00ace38db3f..29b8b1f85246 100644 --- a/arch/sh/boards/mach-landisk/irq.c +++ b/arch/sh/boards/mach-landisk/irq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/mach-landisk/irq.c | 3 | * arch/sh/boards/mach-landisk/irq.c |
3 | * | 4 | * |
@@ -8,10 +9,6 @@ | |||
8 | * | 9 | * |
9 | * Copyright (C) 2001 Ian da Silva, Jeremy Siegel | 10 | * Copyright (C) 2001 Ian da Silva, Jeremy Siegel |
10 | * Based largely on io_se.c. | 11 | * Based largely on io_se.c. |
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | 12 | */ |
16 | 13 | ||
17 | #include <linux/init.h> | 14 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-landisk/psw.c b/arch/sh/boards/mach-landisk/psw.c index 5192b1f43ada..e171d9af48f3 100644 --- a/arch/sh/boards/mach-landisk/psw.c +++ b/arch/sh/boards/mach-landisk/psw.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/landisk/psw.c | 3 | * arch/sh/boards/landisk/psw.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2006-2007 Paul Mundt | 7 | * Copyright (C) 2006-2007 Paul Mundt |
7 | * Copyright (C) 2007 kogiidena | 8 | * Copyright (C) 2007 kogiidena |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/io.h> | 10 | #include <linux/io.h> |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index f1147caebacf..16b4d8b0bb85 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/landisk/setup.c | 3 | * arch/sh/boards/landisk/setup.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * Copyright (C) 2002 Paul Mundt | 8 | * Copyright (C) 2002 Paul Mundt |
8 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. | 9 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. |
9 | * Copyright (C) 2005-2007 kogiidena | 10 | * Copyright (C) 2005-2007 kogiidena |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-lboxre2/Makefile b/arch/sh/boards/mach-lboxre2/Makefile index e9ed140c06f6..0fbd0822911a 100644 --- a/arch/sh/boards/mach-lboxre2/Makefile +++ b/arch/sh/boards/mach-lboxre2/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the L-BOX RE2 specific parts of the kernel | 3 | # Makefile for the L-BOX RE2 specific parts of the kernel |
3 | # Copyright (c) 2007 Nobuhiro Iwamatsu | 4 | # Copyright (c) 2007 Nobuhiro Iwamatsu |
diff --git a/arch/sh/boards/mach-lboxre2/irq.c b/arch/sh/boards/mach-lboxre2/irq.c index 8aa171ab833e..a250e3b9019d 100644 --- a/arch/sh/boards/mach-lboxre2/irq.c +++ b/arch/sh/boards/mach-lboxre2/irq.c | |||
@@ -1,14 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/lboxre2/irq.c | 3 | * linux/arch/sh/boards/lboxre2/irq.c |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 5 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
5 | * | 6 | * |
6 | * NTT COMWARE L-BOX RE2 Support. | 7 | * NTT COMWARE L-BOX RE2 Support. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | */ | 8 | */ |
13 | #include <linux/init.h> | 9 | #include <linux/init.h> |
14 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c index 6660622aa457..20d01b430f2a 100644 --- a/arch/sh/boards/mach-lboxre2/setup.c +++ b/arch/sh/boards/mach-lboxre2/setup.c | |||
@@ -1,14 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/lbox/setup.c | 3 | * linux/arch/sh/boards/lbox/setup.c |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 5 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
5 | * | 6 | * |
6 | * NTT COMWARE L-BOX RE2 Support | 7 | * NTT COMWARE L-BOX RE2 Support |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | */ | 8 | */ |
13 | 9 | ||
14 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-microdev/Makefile b/arch/sh/boards/mach-microdev/Makefile index 4e3588e8806b..05c5698dcad0 100644 --- a/arch/sh/boards/mach-microdev/Makefile +++ b/arch/sh/boards/mach-microdev/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the SuperH MicroDev specific parts of the kernel | 3 | # Makefile for the SuperH MicroDev specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-microdev/fdc37c93xapm.c b/arch/sh/boards/mach-microdev/fdc37c93xapm.c index 458a7cf5fb46..2a04f72dd145 100644 --- a/arch/sh/boards/mach-microdev/fdc37c93xapm.c +++ b/arch/sh/boards/mach-microdev/fdc37c93xapm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * | ||
3 | * Setup for the SMSC FDC37C93xAPM | 3 | * Setup for the SMSC FDC37C93xAPM |
4 | * | 4 | * |
5 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) | 5 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) |
@@ -7,9 +7,6 @@ | |||
7 | * Copyright (C) 2004, 2005 Paul Mundt | 7 | * Copyright (C) 2004, 2005 Paul Mundt |
8 | * | 8 | * |
9 | * SuperH SH4-202 MicroDev board support. | 9 | * SuperH SH4-202 MicroDev board support. |
10 | * | ||
11 | * May be copied or modified under the terms of the GNU General Public | ||
12 | * License. See linux/COPYING for more information. | ||
13 | */ | 10 | */ |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
15 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c index acdafb0c6404..a76c12721e63 100644 --- a/arch/sh/boards/mach-microdev/io.c +++ b/arch/sh/boards/mach-microdev/io.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/superh/microdev/io.c | 3 | * linux/arch/sh/boards/superh/microdev/io.c |
3 | * | 4 | * |
@@ -6,9 +7,6 @@ | |||
6 | * Copyright (C) 2004 Paul Mundt | 7 | * Copyright (C) 2004 Paul Mundt |
7 | * | 8 | * |
8 | * SuperH SH4-202 MicroDev board support. | 9 | * SuperH SH4-202 MicroDev board support. |
9 | * | ||
10 | * May be copied or modified under the terms of the GNU General Public | ||
11 | * License. See linux/COPYING for more information. | ||
12 | */ | 10 | */ |
13 | 11 | ||
14 | #include <linux/init.h> | 12 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c index 9a8aff339619..dc27492c83d7 100644 --- a/arch/sh/boards/mach-microdev/irq.c +++ b/arch/sh/boards/mach-microdev/irq.c | |||
@@ -1,12 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/superh/microdev/irq.c | 3 | * arch/sh/boards/superh/microdev/irq.c |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) | 5 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) |
5 | * | 6 | * |
6 | * SuperH SH4-202 MicroDev board support. | 7 | * SuperH SH4-202 MicroDev board support. |
7 | * | ||
8 | * May be copied or modified under the terms of the GNU General Public | ||
9 | * License. See linux/COPYING for more information. | ||
10 | */ | 8 | */ |
11 | 9 | ||
12 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c index 6c66ee4d842b..706b48f797be 100644 --- a/arch/sh/boards/mach-microdev/setup.c +++ b/arch/sh/boards/mach-microdev/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/superh/microdev/setup.c | 3 | * arch/sh/boards/superh/microdev/setup.c |
3 | * | 4 | * |
@@ -6,9 +7,6 @@ | |||
6 | * Copyright (C) 2004, 2005 Paul Mundt | 7 | * Copyright (C) 2004, 2005 Paul Mundt |
7 | * | 8 | * |
8 | * SuperH SH4-202 MicroDev board support. | 9 | * SuperH SH4-202 MicroDev board support. |
9 | * | ||
10 | * May be copied or modified under the terms of the GNU General Public | ||
11 | * License. See linux/COPYING for more information. | ||
12 | */ | 10 | */ |
13 | #include <linux/init.h> | 11 | #include <linux/init.h> |
14 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-migor/Makefile b/arch/sh/boards/mach-migor/Makefile index 4601a89e5ac7..c223d759fcb1 100644 --- a/arch/sh/boards/mach-migor/Makefile +++ b/arch/sh/boards/mach-migor/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o sdram.o | 2 | obj-y := setup.o sdram.o |
2 | obj-$(CONFIG_SH_MIGOR_QVGA) += lcd_qvga.o | 3 | obj-$(CONFIG_SH_MIGOR_QVGA) += lcd_qvga.o |
diff --git a/arch/sh/boards/mach-migor/lcd_qvga.c b/arch/sh/boards/mach-migor/lcd_qvga.c index 8bccd345b69c..4ebf130510bc 100644 --- a/arch/sh/boards/mach-migor/lcd_qvga.c +++ b/arch/sh/boards/mach-migor/lcd_qvga.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Support for SuperH MigoR Quarter VGA LCD Panel | 3 | * Support for SuperH MigoR Quarter VGA LCD Panel |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. | 7 | * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. |
7 | * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, | 8 | * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, |
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
diff --git a/arch/sh/boards/mach-migor/sdram.S b/arch/sh/boards/mach-migor/sdram.S index 614aa3a1398c..3a6bee1270aa 100644 --- a/arch/sh/boards/mach-migor/sdram.S +++ b/arch/sh/boards/mach-migor/sdram.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Migo-R sdram self/auto-refresh setup code | 3 | * Migo-R sdram self/auto-refresh setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
diff --git a/arch/sh/boards/mach-r2d/Makefile b/arch/sh/boards/mach-r2d/Makefile index 0d4c75a72be0..7e7ac5e05662 100644 --- a/arch/sh/boards/mach-r2d/Makefile +++ b/arch/sh/boards/mach-r2d/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the RTS7751R2D specific parts of the kernel | 3 | # Makefile for the RTS7751R2D specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c index 4b98a5251f83..3bc52f651d96 100644 --- a/arch/sh/boards/mach-r2d/setup.c +++ b/arch/sh/boards/mach-r2d/setup.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Sales RTS7751R2D Support. | 3 | * Renesas Technology Sales RTS7751R2D Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2002 - 2006 Atom Create Engineering Co., Ltd. | 5 | * Copyright (C) 2002 - 2006 Atom Create Engineering Co., Ltd. |
5 | * Copyright (C) 2004 - 2007 Paul Mundt | 6 | * Copyright (C) 2004 - 2007 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-rsk/Makefile b/arch/sh/boards/mach-rsk/Makefile index 6a4e1b538a62..43cca39a9fe6 100644 --- a/arch/sh/boards/mach-rsk/Makefile +++ b/arch/sh/boards/mach-rsk/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o | 2 | obj-y := setup.o |
2 | obj-$(CONFIG_SH_RSK7203) += devices-rsk7203.o | 3 | obj-$(CONFIG_SH_RSK7203) += devices-rsk7203.o |
3 | obj-$(CONFIG_SH_RSK7264) += devices-rsk7264.o | 4 | obj-$(CONFIG_SH_RSK7264) += devices-rsk7264.o |
diff --git a/arch/sh/boards/mach-rsk/devices-rsk7203.c b/arch/sh/boards/mach-rsk/devices-rsk7203.c index a8089f79d058..e6b05d4588b7 100644 --- a/arch/sh/boards/mach-rsk/devices-rsk7203.c +++ b/arch/sh/boards/mach-rsk/devices-rsk7203.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Europe RSK+ 7203 Support. | 3 | * Renesas Technology Europe RSK+ 7203 Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2008 - 2010 Paul Mundt | 5 | * Copyright (C) 2008 - 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/types.h> | 8 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-rsk/devices-rsk7264.c b/arch/sh/boards/mach-rsk/devices-rsk7264.c index 7251e37a842f..eaf700a20b83 100644 --- a/arch/sh/boards/mach-rsk/devices-rsk7264.c +++ b/arch/sh/boards/mach-rsk/devices-rsk7264.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * RSK+SH7264 Support. | 3 | * RSK+SH7264 Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe | 5 | * Copyright (C) 2012 Renesas Electronics Europe |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/types.h> | 8 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-rsk/devices-rsk7269.c b/arch/sh/boards/mach-rsk/devices-rsk7269.c index 4a544591d6f0..4b1e386b51dd 100644 --- a/arch/sh/boards/mach-rsk/devices-rsk7269.c +++ b/arch/sh/boards/mach-rsk/devices-rsk7269.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * RSK+SH7269 Support | 3 | * RSK+SH7269 Support |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe Ltd | 5 | * Copyright (C) 2012 Renesas Electronics Europe Ltd |
5 | * Copyright (C) 2012 Phil Edworthy | 6 | * Copyright (C) 2012 Phil Edworthy |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/types.h> | 9 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-rsk/setup.c b/arch/sh/boards/mach-rsk/setup.c index 6bc134bd7ec2..9370c4fdc41e 100644 --- a/arch/sh/boards/mach-rsk/setup.c +++ b/arch/sh/boards/mach-rsk/setup.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Europe RSK+ Support. | 3 | * Renesas Technology Europe RSK+ Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Paul Mundt | 5 | * Copyright (C) 2008 Paul Mundt |
5 | * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> | 6 | * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/types.h> | 9 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-sdk7780/Makefile b/arch/sh/boards/mach-sdk7780/Makefile index 3d8f0befc35d..37e857f9a55a 100644 --- a/arch/sh/boards/mach-sdk7780/Makefile +++ b/arch/sh/boards/mach-sdk7780/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the SDK7780 specific parts of the kernel | 3 | # Makefile for the SDK7780 specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-sdk7780/irq.c b/arch/sh/boards/mach-sdk7780/irq.c index e5f7564f2511..fa392f3dce26 100644 --- a/arch/sh/boards/mach-sdk7780/irq.c +++ b/arch/sh/boards/mach-sdk7780/irq.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/renesas/sdk7780/irq.c | 3 | * linux/arch/sh/boards/renesas/sdk7780/irq.c |
3 | * | 4 | * |
4 | * Renesas Technology Europe SDK7780 Support. | 5 | * Renesas Technology Europe SDK7780 Support. |
5 | * | 6 | * |
6 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> | 7 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-sdk7780/setup.c b/arch/sh/boards/mach-sdk7780/setup.c index 2241659c3299..482761b780e4 100644 --- a/arch/sh/boards/mach-sdk7780/setup.c +++ b/arch/sh/boards/mach-sdk7780/setup.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/renesas/sdk7780/setup.c | 3 | * arch/sh/boards/renesas/sdk7780/setup.c |
3 | * | 4 | * |
4 | * Renesas Solutions SH7780 SDK Support | 5 | * Renesas Solutions SH7780 SDK Support |
5 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> | 6 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/types.h> | 9 | #include <linux/types.h> |
diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile index 45d32e3590b9..731a87c694b3 100644 --- a/arch/sh/boards/mach-sdk7786/Makefile +++ b/arch/sh/boards/mach-sdk7786/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := fpga.o irq.o nmi.o setup.o | 2 | obj-y := fpga.o irq.o nmi.o setup.o |
2 | 3 | ||
3 | obj-$(CONFIG_GPIOLIB) += gpio.o | 4 | obj-$(CONFIG_GPIOLIB) += gpio.o |
diff --git a/arch/sh/boards/mach-sdk7786/fpga.c b/arch/sh/boards/mach-sdk7786/fpga.c index 3e4ec66a0417..6d2a3d381c2a 100644 --- a/arch/sh/boards/mach-sdk7786/fpga.c +++ b/arch/sh/boards/mach-sdk7786/fpga.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA Support. | 3 | * SDK7786 FPGA Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/io.h> | 8 | #include <linux/io.h> |
diff --git a/arch/sh/boards/mach-sdk7786/gpio.c b/arch/sh/boards/mach-sdk7786/gpio.c index 47997010b77a..c4587d1013e6 100644 --- a/arch/sh/boards/mach-sdk7786/gpio.c +++ b/arch/sh/boards/mach-sdk7786/gpio.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA USRGPIR Support. | 3 | * SDK7786 FPGA USRGPIR Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
diff --git a/arch/sh/boards/mach-sdk7786/irq.c b/arch/sh/boards/mach-sdk7786/irq.c index 46943a0da5b7..340c306ea952 100644 --- a/arch/sh/boards/mach-sdk7786/irq.c +++ b/arch/sh/boards/mach-sdk7786/irq.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA IRQ Controller Support. | 3 | * SDK7786 FPGA IRQ Controller Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Matt Fleming | 5 | * Copyright (C) 2010 Matt Fleming |
5 | * Copyright (C) 2010 Paul Mundt | 6 | * Copyright (C) 2010 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
12 | #include <mach/fpga.h> | 9 | #include <mach/fpga.h> |
diff --git a/arch/sh/boards/mach-sdk7786/nmi.c b/arch/sh/boards/mach-sdk7786/nmi.c index edcfa1f568ba..c2e09d798537 100644 --- a/arch/sh/boards/mach-sdk7786/nmi.c +++ b/arch/sh/boards/mach-sdk7786/nmi.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA NMI Support. | 3 | * SDK7786 FPGA NMI Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c index c29268bfd34a..65721c3a482c 100644 --- a/arch/sh/boards/mach-sdk7786/setup.c +++ b/arch/sh/boards/mach-sdk7786/setup.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas Technology Europe SDK7786 Support. | 3 | * Renesas Technology Europe SDK7786 Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Matt Fleming | 5 | * Copyright (C) 2010 Matt Fleming |
5 | * Copyright (C) 2010 Paul Mundt | 6 | * Copyright (C) 2010 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-sdk7786/sram.c b/arch/sh/boards/mach-sdk7786/sram.c index c81c3abbe01c..d76cdb7ede39 100644 --- a/arch/sh/boards/mach-sdk7786/sram.c +++ b/arch/sh/boards/mach-sdk7786/sram.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA SRAM Support. | 3 | * SDK7786 FPGA SRAM Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | 8 | ||
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile index 5c9eaa0535b9..b40b30853ce3 100644 --- a/arch/sh/boards/mach-se/7206/Makefile +++ b/arch/sh/boards/mach-se/7206/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the 7206 SolutionEngine specific parts of the kernel | 3 | # Makefile for the 7206 SolutionEngine specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7343/Makefile b/arch/sh/boards/mach-se/7343/Makefile index 4c3666a93790..e058661091a2 100644 --- a/arch/sh/boards/mach-se/7343/Makefile +++ b/arch/sh/boards/mach-se/7343/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the 7343 SolutionEngine specific parts of the kernel | 3 | # Makefile for the 7343 SolutionEngine specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index 6129aef6db76..39a3175e72b2 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Hitachi UL SolutionEngine 7343 FPGA IRQ Support. | 3 | * Hitachi UL SolutionEngine 7343 FPGA IRQ Support. |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Based on linux/arch/sh/boards/se/7343/irq.c | 8 | * Based on linux/arch/sh/boards/se/7343/irq.c |
8 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 9 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #define DRV_NAME "SE7343-FPGA" | 11 | #define DRV_NAME "SE7343-FPGA" |
15 | #define pr_fmt(fmt) DRV_NAME ": " fmt | 12 | #define pr_fmt(fmt) DRV_NAME ": " fmt |
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile index 43ea14feef51..900d93cfb6a5 100644 --- a/arch/sh/boards/mach-se/770x/Makefile +++ b/arch/sh/boards/mach-se/770x/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the 770x SolutionEngine specific parts of the kernel | 3 | # Makefile for the 770x SolutionEngine specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7721/Makefile b/arch/sh/boards/mach-se/7721/Makefile index 7f09030980b3..09436f10ddf1 100644 --- a/arch/sh/boards/mach-se/7721/Makefile +++ b/arch/sh/boards/mach-se/7721/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o irq.o | 2 | obj-y := setup.o irq.o |
diff --git a/arch/sh/boards/mach-se/7721/irq.c b/arch/sh/boards/mach-se/7721/irq.c index d85022ea3f12..e6ef2a2655c3 100644 --- a/arch/sh/boards/mach-se/7721/irq.c +++ b/arch/sh/boards/mach-se/7721/irq.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7721/irq.c | 3 | * linux/arch/sh/boards/se/7721/irq.c |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Renesas Solutions Corp. | 5 | * Copyright (C) 2008 Renesas Solutions Corp. |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-se/7721/setup.c b/arch/sh/boards/mach-se/7721/setup.c index a0b3dba34ebf..3af724dc4ba4 100644 --- a/arch/sh/boards/mach-se/7721/setup.c +++ b/arch/sh/boards/mach-se/7721/setup.c | |||
@@ -1,14 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7721/setup.c | 3 | * linux/arch/sh/boards/se/7721/setup.c |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Renesas Solutions Corp. | 5 | * Copyright (C) 2008 Renesas Solutions Corp. |
5 | * | 6 | * |
6 | * Hitachi UL SolutionEngine 7721 Support. | 7 | * Hitachi UL SolutionEngine 7721 Support. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | */ | 8 | */ |
13 | #include <linux/init.h> | 9 | #include <linux/init.h> |
14 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-se/7722/Makefile b/arch/sh/boards/mach-se/7722/Makefile index 8694373389e5..a5e89c0c6bb2 100644 --- a/arch/sh/boards/mach-se/7722/Makefile +++ b/arch/sh/boards/mach-se/7722/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the HITACHI UL SolutionEngine 7722 specific parts of the kernel | 3 | # Makefile for the HITACHI UL SolutionEngine 7722 specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index 24c74a88290c..f6e3009edd4e 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Hitachi UL SolutionEngine 7722 FPGA IRQ Support. | 3 | * Hitachi UL SolutionEngine 7722 FPGA IRQ Support. |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 5 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
5 | * Copyright (C) 2012 Paul Mundt | 6 | * Copyright (C) 2012 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #define DRV_NAME "SE7722-FPGA" | 8 | #define DRV_NAME "SE7722-FPGA" |
12 | #define pr_fmt(fmt) DRV_NAME ": " fmt | 9 | #define pr_fmt(fmt) DRV_NAME ": " fmt |
diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c index e04e2bc46984..2cd4a2e84b93 100644 --- a/arch/sh/boards/mach-se/7722/setup.c +++ b/arch/sh/boards/mach-se/7722/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7722/setup.c | 3 | * linux/arch/sh/boards/se/7722/setup.c |
3 | * | 4 | * |
@@ -5,11 +6,6 @@ | |||
5 | * Copyright (C) 2012 Paul Mundt | 6 | * Copyright (C) 2012 Paul Mundt |
6 | * | 7 | * |
7 | * Hitachi UL SolutionEngine 7722 Support. | 8 | * Hitachi UL SolutionEngine 7722 Support. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | * | ||
13 | */ | 9 | */ |
14 | #include <linux/init.h> | 10 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-se/7724/Makefile b/arch/sh/boards/mach-se/7724/Makefile index a08b36830f0e..6c6112b24617 100644 --- a/arch/sh/boards/mach-se/7724/Makefile +++ b/arch/sh/boards/mach-se/7724/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the HITACHI UL SolutionEngine 7724 specific parts of the kernel | 3 | # Makefile for the HITACHI UL SolutionEngine 7724 specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c index 64e681e66c57..14ce3024738f 100644 --- a/arch/sh/boards/mach-se/7724/irq.c +++ b/arch/sh/boards/mach-se/7724/irq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7724/irq.c | 3 | * linux/arch/sh/boards/se/7724/irq.c |
3 | * | 4 | * |
@@ -9,10 +10,6 @@ | |||
9 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 10 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
10 | * | 11 | * |
11 | * Hitachi UL SolutionEngine 7724 Support. | 12 | * Hitachi UL SolutionEngine 7724 Support. |
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General Public | ||
14 | * License. See the file "COPYING" in the main directory of this archive | ||
15 | * for more details. | ||
16 | */ | 13 | */ |
17 | #include <linux/init.h> | 14 | #include <linux/init.h> |
18 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-se/7724/sdram.S b/arch/sh/boards/mach-se/7724/sdram.S index 6fa4734d09c7..61c1fe78d71a 100644 --- a/arch/sh/boards/mach-se/7724/sdram.S +++ b/arch/sh/boards/mach-se/7724/sdram.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * MS7724SE sdram self/auto-refresh setup code | 3 | * MS7724SE sdram self/auto-refresh setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile index a338fd9d5039..2406d3e35352 100644 --- a/arch/sh/boards/mach-se/7751/Makefile +++ b/arch/sh/boards/mach-se/7751/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the 7751 SolutionEngine specific parts of the kernel | 3 | # Makefile for the 7751 SolutionEngine specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7780/Makefile b/arch/sh/boards/mach-se/7780/Makefile index 6b88adae3ecc..1f6669ab1bc0 100644 --- a/arch/sh/boards/mach-se/7780/Makefile +++ b/arch/sh/boards/mach-se/7780/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the HITACHI UL SolutionEngine 7780 specific parts of the kernel | 3 | # Makefile for the HITACHI UL SolutionEngine 7780 specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-se/7780/irq.c b/arch/sh/boards/mach-se/7780/irq.c index d5c9edc172a3..d427dfd711f1 100644 --- a/arch/sh/boards/mach-se/7780/irq.c +++ b/arch/sh/boards/mach-se/7780/irq.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7780/irq.c | 3 | * linux/arch/sh/boards/se/7780/irq.c |
3 | * | 4 | * |
4 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu | 5 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu |
5 | * | 6 | * |
6 | * Hitachi UL SolutionEngine 7780 Support. | 7 | * Hitachi UL SolutionEngine 7780 Support. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
diff --git a/arch/sh/boards/mach-se/7780/setup.c b/arch/sh/boards/mach-se/7780/setup.c index ae5a1d84fdf8..309f2681381b 100644 --- a/arch/sh/boards/mach-se/7780/setup.c +++ b/arch/sh/boards/mach-se/7780/setup.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/se/7780/setup.c | 3 | * linux/arch/sh/boards/se/7780/setup.c |
3 | * | 4 | * |
4 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu | 5 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu |
5 | * | 6 | * |
6 | * Hitachi UL SolutionEngine 7780 Support. | 7 | * Hitachi UL SolutionEngine 7780 Support. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-sh03/Makefile b/arch/sh/boards/mach-sh03/Makefile index 47007a3a2fc8..f89c25c6a39c 100644 --- a/arch/sh/boards/mach-sh03/Makefile +++ b/arch/sh/boards/mach-sh03/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Interface (CTP/PCI-SH03) specific parts of the kernel | 3 | # Makefile for the Interface (CTP/PCI-SH03) specific parts of the kernel |
3 | # | 4 | # |
diff --git a/arch/sh/boards/mach-sh7763rdp/Makefile b/arch/sh/boards/mach-sh7763rdp/Makefile index f6c0b55516d2..d6341310444a 100644 --- a/arch/sh/boards/mach-sh7763rdp/Makefile +++ b/arch/sh/boards/mach-sh7763rdp/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y := setup.o irq.o | 2 | obj-y := setup.o irq.o |
diff --git a/arch/sh/boards/mach-sh7763rdp/irq.c b/arch/sh/boards/mach-sh7763rdp/irq.c index add698c8f2b4..efd382b7dad4 100644 --- a/arch/sh/boards/mach-sh7763rdp/irq.c +++ b/arch/sh/boards/mach-sh7763rdp/irq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/renesas/sh7763rdp/irq.c | 3 | * linux/arch/sh/boards/renesas/sh7763rdp/irq.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2008 Renesas Solutions Corp. | 7 | * Copyright (C) 2008 Renesas Solutions Corp. |
7 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 8 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index 6e62686b81b1..97e715e4e9b3 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * linux/arch/sh/boards/renesas/sh7763rdp/setup.c | 3 | * linux/arch/sh/boards/renesas/sh7763rdp/setup.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2008 Renesas Solutions Corp. | 7 | * Copyright (C) 2008 Renesas Solutions Corp. |
7 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 8 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/mach-x3proto/Makefile b/arch/sh/boards/mach-x3proto/Makefile index 0cbe3d02dea3..6caefa114598 100644 --- a/arch/sh/boards/mach-x3proto/Makefile +++ b/arch/sh/boards/mach-x3proto/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | obj-y += setup.o ilsel.o | 2 | obj-y += setup.o ilsel.o |
2 | 3 | ||
3 | obj-$(CONFIG_GPIOLIB) += gpio.o | 4 | obj-$(CONFIG_GPIOLIB) += gpio.o |
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index cea88b0effa2..efc992f641a6 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/mach-x3proto/gpio.c | 3 | * arch/sh/boards/mach-x3proto/gpio.c |
3 | * | 4 | * |
4 | * Renesas SH-X3 Prototype Baseboard GPIO Support. | 5 | * Renesas SH-X3 Prototype Baseboard GPIO Support. |
5 | * | 6 | * |
6 | * Copyright (C) 2010 - 2012 Paul Mundt | 7 | * Copyright (C) 2010 - 2012 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 10 | ||
diff --git a/arch/sh/boards/mach-x3proto/ilsel.c b/arch/sh/boards/mach-x3proto/ilsel.c index 95e346139515..f0d5eb41521a 100644 --- a/arch/sh/boards/mach-x3proto/ilsel.c +++ b/arch/sh/boards/mach-x3proto/ilsel.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/mach-x3proto/ilsel.c | 3 | * arch/sh/boards/mach-x3proto/ilsel.c |
3 | * | 4 | * |
4 | * Helper routines for SH-X3 proto board ILSEL. | 5 | * Helper routines for SH-X3 proto board ILSEL. |
5 | * | 6 | * |
6 | * Copyright (C) 2007 - 2010 Paul Mundt | 7 | * Copyright (C) 2007 - 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 10 | ||
diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c index d682e2b6a856..95b85f2e13dd 100644 --- a/arch/sh/boards/mach-x3proto/setup.c +++ b/arch/sh/boards/mach-x3proto/setup.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/boards/mach-x3proto/setup.c | 3 | * arch/sh/boards/mach-x3proto/setup.c |
3 | * | 4 | * |
4 | * Renesas SH-X3 Prototype Board Support. | 5 | * Renesas SH-X3 Prototype Board Support. |
5 | * | 6 | * |
6 | * Copyright (C) 2007 - 2010 Paul Mundt | 7 | * Copyright (C) 2007 - 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 6e9786548ac6..958f46da3a79 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH generic board support, using device tree | 3 | * SH generic board support, using device tree |
3 | * | 4 | * |
4 | * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. | 5 | * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/of.h> | 8 | #include <linux/of.h> |
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index d88c9484762c..d2fdd56208f6 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the SuperH DMA specific kernel interface routines under Linux. | 3 | # Makefile for the SuperH DMA specific kernel interface routines under Linux. |
3 | # | 4 | # |
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c index b05be597b19f..ab9170494dcc 100644 --- a/arch/sh/drivers/dma/dma-api.c +++ b/arch/sh/drivers/dma/dma-api.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/dma/dma-api.c | 3 | * arch/sh/drivers/dma/dma-api.c |
3 | * | 4 | * |
4 | * SuperH-specific DMA management API | 5 | * SuperH-specific DMA management API |
5 | * | 6 | * |
6 | * Copyright (C) 2003, 2004, 2005 Paul Mundt | 7 | * Copyright (C) 2003, 2004, 2005 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -417,4 +414,4 @@ subsys_initcall(dma_api_init); | |||
417 | 414 | ||
418 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); | 415 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); |
419 | MODULE_DESCRIPTION("DMA API for SuperH"); | 416 | MODULE_DESCRIPTION("DMA API for SuperH"); |
420 | MODULE_LICENSE("GPL"); | 417 | MODULE_LICENSE("GPL v2"); |
diff --git a/arch/sh/drivers/dma/dma-g2.c b/arch/sh/drivers/dma/dma-g2.c index e1ab6eb3c04b..52a8ae5e30d2 100644 --- a/arch/sh/drivers/dma/dma-g2.c +++ b/arch/sh/drivers/dma/dma-g2.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/dma/dma-g2.c | 3 | * arch/sh/drivers/dma/dma-g2.c |
3 | * | 4 | * |
4 | * G2 bus DMA support | 5 | * G2 bus DMA support |
5 | * | 6 | * |
6 | * Copyright (C) 2003 - 2006 Paul Mundt | 7 | * Copyright (C) 2003 - 2006 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
@@ -197,4 +194,4 @@ module_exit(g2_dma_exit); | |||
197 | 194 | ||
198 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); | 195 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); |
199 | MODULE_DESCRIPTION("G2 bus DMA driver"); | 196 | MODULE_DESCRIPTION("G2 bus DMA driver"); |
200 | MODULE_LICENSE("GPL"); | 197 | MODULE_LICENSE("GPL v2"); |
diff --git a/arch/sh/drivers/dma/dma-pvr2.c b/arch/sh/drivers/dma/dma-pvr2.c index 706a3434af7a..b5dbd1f75768 100644 --- a/arch/sh/drivers/dma/dma-pvr2.c +++ b/arch/sh/drivers/dma/dma-pvr2.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/dma/dma-pvr2.c | 3 | * arch/sh/drivers/dma/dma-pvr2.c |
3 | * | 4 | * |
4 | * NEC PowerVR 2 (Dreamcast) DMA support | 5 | * NEC PowerVR 2 (Dreamcast) DMA support |
5 | * | 6 | * |
6 | * Copyright (C) 2003, 2004 Paul Mundt | 7 | * Copyright (C) 2003, 2004 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
@@ -105,4 +102,4 @@ module_exit(pvr2_dma_exit); | |||
105 | 102 | ||
106 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); | 103 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); |
107 | MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver"); | 104 | MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver"); |
108 | MODULE_LICENSE("GPL"); | 105 | MODULE_LICENSE("GPL v2"); |
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index afde2a7d3eb3..96c626c2cd0a 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/dma/dma-sh.c | 3 | * arch/sh/drivers/dma/dma-sh.c |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2000 Takashi YOSHII | 7 | * Copyright (C) 2000 Takashi YOSHII |
7 | * Copyright (C) 2003, 2004 Paul Mundt | 8 | * Copyright (C) 2003, 2004 Paul Mundt |
8 | * Copyright (C) 2005 Andriy Skulysh | 9 | * Copyright (C) 2005 Andriy Skulysh |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
@@ -414,4 +411,4 @@ module_exit(sh_dmac_exit); | |||
414 | 411 | ||
415 | MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); | 412 | MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); |
416 | MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); | 413 | MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); |
417 | MODULE_LICENSE("GPL"); | 414 | MODULE_LICENSE("GPL v2"); |
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c index 4b15feda54b0..8ef318150f84 100644 --- a/arch/sh/drivers/dma/dma-sysfs.c +++ b/arch/sh/drivers/dma/dma-sysfs.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/dma/dma-sysfs.c | 3 | * arch/sh/drivers/dma/dma-sysfs.c |
3 | * | 4 | * |
4 | * sysfs interface for SH DMA API | 5 | * sysfs interface for SH DMA API |
5 | * | 6 | * |
6 | * Copyright (C) 2004 - 2006 Paul Mundt | 7 | * Copyright (C) 2004 - 2006 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c index e5a57a109d6c..5b2c1fd254d7 100644 --- a/arch/sh/drivers/dma/dmabrg.c +++ b/arch/sh/drivers/dma/dmabrg.c | |||
@@ -1,9 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7760 DMABRG IRQ handling | 3 | * SH7760 DMABRG IRQ handling |
3 | * | 4 | * |
4 | * (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com> | 5 | * (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com> |
5 | * licensed under the GPLv2. | ||
6 | * | ||
7 | */ | 6 | */ |
8 | 7 | ||
9 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c index e8af2ff29bc3..cf2fcccca812 100644 --- a/arch/sh/drivers/heartbeat.c +++ b/arch/sh/drivers/heartbeat.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Generic heartbeat driver for regular LED banks | 3 | * Generic heartbeat driver for regular LED banks |
3 | * | 4 | * |
@@ -13,10 +14,6 @@ | |||
13 | * traditionally used for strobing the load average. This use case is | 14 | * traditionally used for strobing the load average. This use case is |
14 | * handled by this driver, rather than giving each LED bit position its | 15 | * handled by this driver, rather than giving each LED bit position its |
15 | * own struct device. | 16 | * own struct device. |
16 | * | ||
17 | * This file is subject to the terms and conditions of the GNU General Public | ||
18 | * License. See the file "COPYING" in the main directory of this archive | ||
19 | * for more details. | ||
20 | */ | 17 | */ |
21 | #include <linux/init.h> | 18 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index 48aaefd8f5d6..dfdbd05b6eb1 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/fixups-dreamcast.c | 3 | * arch/sh/drivers/pci/fixups-dreamcast.c |
3 | * | 4 | * |
@@ -9,10 +10,6 @@ | |||
9 | * This file originally bore the message (with enclosed-$): | 10 | * This file originally bore the message (with enclosed-$): |
10 | * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp | 11 | * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp |
11 | * Dreamcast PCI: Supports SEGA Broadband Adaptor only. | 12 | * Dreamcast PCI: Supports SEGA Broadband Adaptor only. |
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General Public | ||
14 | * License. See the file "COPYING" in the main directory of this archive | ||
15 | * for more details. | ||
16 | */ | 13 | */ |
17 | 14 | ||
18 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
diff --git a/arch/sh/drivers/pci/fixups-landisk.c b/arch/sh/drivers/pci/fixups-landisk.c index db5b40a98e62..53fa2fc87eec 100644 --- a/arch/sh/drivers/pci/fixups-landisk.c +++ b/arch/sh/drivers/pci/fixups-landisk.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/fixups-landisk.c | 3 | * arch/sh/drivers/pci/fixups-landisk.c |
3 | * | 4 | * |
@@ -5,9 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2006 kogiidena | 7 | * Copyright (C) 2006 kogiidena |
7 | * Copyright (C) 2010 Nobuhiro Iwamatsu | 8 | * Copyright (C) 2010 Nobuhiro Iwamatsu |
8 | * | ||
9 | * May be copied or modified under the terms of the GNU General Public | ||
10 | * License. See linux/COPYING for more information. | ||
11 | */ | 9 | */ |
12 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
13 | #include <linux/types.h> | 11 | #include <linux/types.h> |
diff --git a/arch/sh/drivers/pci/fixups-r7780rp.c b/arch/sh/drivers/pci/fixups-r7780rp.c index 2c9b58f848dd..3c9139c5955e 100644 --- a/arch/sh/drivers/pci/fixups-r7780rp.c +++ b/arch/sh/drivers/pci/fixups-r7780rp.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/fixups-r7780rp.c | 3 | * arch/sh/drivers/pci/fixups-r7780rp.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2003 Lineo uSolutions, Inc. | 7 | * Copyright (C) 2003 Lineo uSolutions, Inc. |
7 | * Copyright (C) 2004 - 2006 Paul Mundt | 8 | * Copyright (C) 2004 - 2006 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
14 | #include <linux/io.h> | 11 | #include <linux/io.h> |
diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c index 358ac104f08c..3f0a6fe1610b 100644 --- a/arch/sh/drivers/pci/fixups-rts7751r2d.c +++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/fixups-rts7751r2d.c | 3 | * arch/sh/drivers/pci/fixups-rts7751r2d.c |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2003 Lineo uSolutions, Inc. | 7 | * Copyright (C) 2003 Lineo uSolutions, Inc. |
7 | * Copyright (C) 2004 Paul Mundt | 8 | * Copyright (C) 2004 Paul Mundt |
8 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 9 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
15 | #include <mach/lboxre2.h> | 12 | #include <mach/lboxre2.h> |
diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c index 24e96dfbdb22..c306040485bd 100644 --- a/arch/sh/drivers/pci/fixups-sdk7780.c +++ b/arch/sh/drivers/pci/fixups-sdk7780.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/fixups-sdk7780.c | 3 | * arch/sh/drivers/pci/fixups-sdk7780.c |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2003 Lineo uSolutions, Inc. | 7 | * Copyright (C) 2003 Lineo uSolutions, Inc. |
7 | * Copyright (C) 2004 - 2006 Paul Mundt | 8 | * Copyright (C) 2004 - 2006 Paul Mundt |
8 | * Copyright (C) 2006 Nobuhiro Iwamatsu | 9 | * Copyright (C) 2006 Nobuhiro Iwamatsu |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
15 | #include <linux/io.h> | 12 | #include <linux/io.h> |
diff --git a/arch/sh/drivers/pci/fixups-sdk7786.c b/arch/sh/drivers/pci/fixups-sdk7786.c index 36eb6fc3c18a..8cbfa5310a4b 100644 --- a/arch/sh/drivers/pci/fixups-sdk7786.c +++ b/arch/sh/drivers/pci/fixups-sdk7786.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SDK7786 FPGA PCIe mux handling | 3 | * SDK7786 FPGA PCIe mux handling |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #define pr_fmt(fmt) "PCI: " fmt | 7 | #define pr_fmt(fmt) "PCI: " fmt |
11 | 8 | ||
diff --git a/arch/sh/drivers/pci/fixups-snapgear.c b/arch/sh/drivers/pci/fixups-snapgear.c index a931e5928f58..317225c09413 100644 --- a/arch/sh/drivers/pci/fixups-snapgear.c +++ b/arch/sh/drivers/pci/fixups-snapgear.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/ops-snapgear.c | 3 | * arch/sh/drivers/pci/ops-snapgear.c |
3 | * | 4 | * |
@@ -7,9 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. | 9 | * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. |
9 | * | 10 | * |
10 | * May be copied or modified under the terms of the GNU General Public | ||
11 | * License. See linux/COPYING for more information. | ||
12 | * | ||
13 | * PCI initialization for the SnapGear boards | 11 | * PCI initialization for the SnapGear boards |
14 | */ | 12 | */ |
15 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/drivers/pci/fixups-titan.c b/arch/sh/drivers/pci/fixups-titan.c index a9d563e479d5..b5bb65caa16d 100644 --- a/arch/sh/drivers/pci/fixups-titan.c +++ b/arch/sh/drivers/pci/fixups-titan.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/pci/ops-titan.c | 3 | * arch/sh/drivers/pci/ops-titan.c |
3 | * | 4 | * |
@@ -6,9 +7,6 @@ | |||
6 | * Modified from ops-snapgear.c written by David McCullough | 7 | * Modified from ops-snapgear.c written by David McCullough |
7 | * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. | 8 | * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. |
8 | * | 9 | * |
9 | * May be copied or modified under the terms of the GNU General Public | ||
10 | * License. See linux/COPYING for more information. | ||
11 | * | ||
12 | * PCI initialization for the Titan boards | 10 | * PCI initialization for the Titan boards |
13 | */ | 11 | */ |
14 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
diff --git a/arch/sh/drivers/pci/ops-dreamcast.c b/arch/sh/drivers/pci/ops-dreamcast.c index 16e0a1baad88..517a8a9702f6 100644 --- a/arch/sh/drivers/pci/ops-dreamcast.c +++ b/arch/sh/drivers/pci/ops-dreamcast.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * PCI operations for the Sega Dreamcast | 3 | * PCI operations for the Sega Dreamcast |
3 | * | 4 | * |
4 | * Copyright (C) 2001, 2002 M. R. Brown | 5 | * Copyright (C) 2001, 2002 M. R. Brown |
5 | * Copyright (C) 2002, 2003 Paul Mundt | 6 | * Copyright (C) 2002, 2003 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
diff --git a/arch/sh/drivers/pci/ops-sh4.c b/arch/sh/drivers/pci/ops-sh4.c index b6234203e0ac..a205be3bfc4a 100644 --- a/arch/sh/drivers/pci/ops-sh4.c +++ b/arch/sh/drivers/pci/ops-sh4.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780). | 3 | * Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780). |
3 | * | 4 | * |
4 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License v2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
11 | #include <linux/io.h> | 8 | #include <linux/io.h> |
diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c index 45361946460f..9fbaf72949ab 100644 --- a/arch/sh/drivers/pci/ops-sh5.c +++ b/arch/sh/drivers/pci/ops-sh5.c | |||
@@ -1,12 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Support functions for the SH5 PCI hardware. | 3 | * Support functions for the SH5 PCI hardware. |
3 | * | 4 | * |
4 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | 5 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) |
5 | * Copyright (C) 2003, 2004 Paul Mundt | 6 | * Copyright (C) 2003, 2004 Paul Mundt |
6 | * Copyright (C) 2004 Richard Curnow | 7 | * Copyright (C) 2004 Richard Curnow |
7 | * | ||
8 | * May be copied or modified under the terms of the GNU General Public | ||
9 | * License. See linux/COPYING for more information. | ||
10 | */ | 8 | */ |
11 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
12 | #include <linux/rwsem.h> | 10 | #include <linux/rwsem.h> |
diff --git a/arch/sh/drivers/pci/ops-sh7786.c b/arch/sh/drivers/pci/ops-sh7786.c index 128421009e3f..a10f9f4ebd7f 100644 --- a/arch/sh/drivers/pci/ops-sh7786.c +++ b/arch/sh/drivers/pci/ops-sh7786.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Generic SH7786 PCI-Express operations. | 3 | * Generic SH7786 PCI-Express operations. |
3 | * | 4 | * |
4 | * Copyright (C) 2009 - 2010 Paul Mundt | 5 | * Copyright (C) 2009 - 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License v2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/drivers/pci/pci-dreamcast.c b/arch/sh/drivers/pci/pci-dreamcast.c index 633694193af8..4cff2a8107bf 100644 --- a/arch/sh/drivers/pci/pci-dreamcast.c +++ b/arch/sh/drivers/pci/pci-dreamcast.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * PCI support for the Sega Dreamcast | 3 | * PCI support for the Sega Dreamcast |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * This file originally bore the message (with enclosed-$): | 8 | * This file originally bore the message (with enclosed-$): |
8 | * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp | 9 | * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp |
9 | * Dreamcast PCI: Supports SEGA Broadband Adaptor only. | 10 | * Dreamcast PCI: Supports SEGA Broadband Adaptor only. |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | 12 | ||
16 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c index 8229114c6a58..49303fab187b 100644 --- a/arch/sh/drivers/pci/pci-sh5.c +++ b/arch/sh/drivers/pci/pci-sh5.c | |||
@@ -1,11 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | 3 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) |
3 | * Copyright (C) 2003, 2004 Paul Mundt | 4 | * Copyright (C) 2003, 2004 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | 6 | * |
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * Support functions for the SH5 PCI hardware. | 7 | * Support functions for the SH5 PCI hardware. |
10 | */ | 8 | */ |
11 | 9 | ||
diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h index 3f01decb4307..91348af0ef6c 100644 --- a/arch/sh/drivers/pci/pci-sh5.h +++ b/arch/sh/drivers/pci/pci-sh5.h | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | ||
3 | * | 2 | * |
4 | * May be copied or modified under the terms of the GNU General Public | 3 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) |
5 | * License. See linux/COPYING for more information. | ||
6 | * | 4 | * |
7 | * Definitions for the SH5 PCI hardware. | 5 | * Definitions for the SH5 PCI hardware. |
8 | */ | 6 | */ |
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c index 86adb1e235cd..1b9e5caac389 100644 --- a/arch/sh/drivers/pci/pci-sh7751.c +++ b/arch/sh/drivers/pci/pci-sh7751.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Low-Level PCI Support for the SH7751 | 3 | * Low-Level PCI Support for the SH7751 |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 2001 Dustin McIntire | 6 | * Copyright (C) 2001 Dustin McIntire |
6 | * | 7 | * |
7 | * With cleanup by Paul van Gool <pvangool@mimotech.com>, 2003. | 8 | * With cleanup by Paul van Gool <pvangool@mimotech.com>, 2003. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
diff --git a/arch/sh/drivers/pci/pci-sh7751.h b/arch/sh/drivers/pci/pci-sh7751.h index 5ede38c330d3..d1951e50effc 100644 --- a/arch/sh/drivers/pci/pci-sh7751.h +++ b/arch/sh/drivers/pci/pci-sh7751.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Low-Level PCI Support for SH7751 targets | 3 | * Low-Level PCI Support for SH7751 targets |
3 | * | 4 | * |
4 | * Dustin McIntire (dustin@sensoria.com) (c) 2001 | 5 | * Dustin McIntire (dustin@sensoria.com) (c) 2001 |
5 | * Paul Mundt (lethal@linux-sh.org) (c) 2003 | 6 | * Paul Mundt (lethal@linux-sh.org) (c) 2003 |
6 | * | ||
7 | * May be copied or modified under the terms of the GNU General Public | ||
8 | * License. See linux/COPYING for more information. | ||
9 | * | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef _PCI_SH7751_H_ | 9 | #ifndef _PCI_SH7751_H_ |
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c index 5a6dab6e27d9..3fd0f392a0ee 100644 --- a/arch/sh/drivers/pci/pci-sh7780.c +++ b/arch/sh/drivers/pci/pci-sh7780.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Low-Level PCI Support for the SH7780 | 3 | * Low-Level PCI Support for the SH7780 |
3 | * | 4 | * |
4 | * Copyright (C) 2005 - 2010 Paul Mundt | 5 | * Copyright (C) 2005 - 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/types.h> | 7 | #include <linux/types.h> |
11 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h index 1742e2c9db7a..e2ac770f8e35 100644 --- a/arch/sh/drivers/pci/pci-sh7780.h +++ b/arch/sh/drivers/pci/pci-sh7780.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Low-Level PCI Support for SH7780 targets | 3 | * Low-Level PCI Support for SH7780 targets |
3 | * | 4 | * |
4 | * Dustin McIntire (dustin@sensoria.com) (c) 2001 | 5 | * Dustin McIntire (dustin@sensoria.com) (c) 2001 |
5 | * Paul Mundt (lethal@linux-sh.org) (c) 2003 | 6 | * Paul Mundt (lethal@linux-sh.org) (c) 2003 |
6 | * | ||
7 | * May be copied or modified under the terms of the GNU General Public | ||
8 | * License. See linux/COPYING for more information. | ||
9 | * | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef _PCI_SH7780_H_ | 9 | #ifndef _PCI_SH7780_H_ |
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 8256626bc53c..c7784e156964 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * New-style PCI core. | 3 | * New-style PCI core. |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Modelled after arch/mips/pci/pci.c: | 8 | * Modelled after arch/mips/pci/pci.c: |
8 | * Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org) | 9 | * Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org) |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
15 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index 3d81a8b80942..a58b77cea295 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Low-Level PCI Express Support for the SH7786 | 3 | * Low-Level PCI Express Support for the SH7786 |
3 | * | 4 | * |
4 | * Copyright (C) 2009 - 2011 Paul Mundt | 5 | * Copyright (C) 2009 - 2011 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #define pr_fmt(fmt) "PCI: " fmt | 7 | #define pr_fmt(fmt) "PCI: " fmt |
11 | 8 | ||
diff --git a/arch/sh/drivers/pci/pcie-sh7786.h b/arch/sh/drivers/pci/pcie-sh7786.h index 4a6ff55f759b..ffe383681a0b 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.h +++ b/arch/sh/drivers/pci/pcie-sh7786.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * SH7786 PCI-Express controller definitions. | 3 | * SH7786 PCI-Express controller definitions. |
3 | * | 4 | * |
4 | * Copyright (C) 2008, 2009 Renesas Technology Corp. | 5 | * Copyright (C) 2008, 2009 Renesas Technology Corp. |
5 | * All rights reserved. | 6 | * All rights reserved. |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __PCI_SH7786_H | 8 | #ifndef __PCI_SH7786_H |
12 | #define __PCI_SH7786_H | 9 | #define __PCI_SH7786_H |
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c index 762bc5619910..2813140fd92b 100644 --- a/arch/sh/drivers/push-switch.c +++ b/arch/sh/drivers/push-switch.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Generic push-switch framework | 3 | * Generic push-switch framework |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
diff --git a/arch/sh/drivers/superhyway/Makefile b/arch/sh/drivers/superhyway/Makefile index 5b8e0c7ca3a5..aa6e3267c055 100644 --- a/arch/sh/drivers/superhyway/Makefile +++ b/arch/sh/drivers/superhyway/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the SuperHyway specific kernel interface routines under Linux. | 3 | # Makefile for the SuperHyway specific kernel interface routines under Linux. |
3 | # | 4 | # |
diff --git a/arch/sh/drivers/superhyway/ops-sh4-202.c b/arch/sh/drivers/superhyway/ops-sh4-202.c index 6da62e9475c4..490142274e3b 100644 --- a/arch/sh/drivers/superhyway/ops-sh4-202.c +++ b/arch/sh/drivers/superhyway/ops-sh4-202.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/drivers/superhyway/ops-sh4-202.c | 3 | * arch/sh/drivers/superhyway/ops-sh4-202.c |
3 | * | 4 | * |
4 | * SuperHyway bus support for SH4-202 | 5 | * SuperHyway bus support for SH4-202 |
5 | * | 6 | * |
6 | * Copyright (C) 2005 Paul Mundt | 7 | * Copyright (C) 2005 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU | ||
9 | * General Public License. See the file "COPYING" in the main | ||
10 | * directory of this archive for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index b15caf34813a..a6ef3fee5f85 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | generated-y += syscall_table.h | 2 | generated-y += syscall_table.h |
2 | generic-y += compat.h | 3 | generic-y += compat.h |
3 | generic-y += current.h | 4 | generic-y += current.h |
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 3d1ae2bfaa6f..34bfbcddcce0 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | 2 | * |
6 | * Copyright (C) 1999 by Kaz Kojima | 3 | * Copyright (C) 1999 by Kaz Kojima |
7 | * | 4 | * |
diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h index d370ee36a182..9f8535716392 100644 --- a/arch/sh/include/asm/asm-offsets.h +++ b/arch/sh/include/asm/asm-offsets.h | |||
@@ -1 +1,2 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #include <generated/asm-offsets.h> | 2 | #include <generated/asm-offsets.h> |
diff --git a/arch/sh/include/asm/bl_bit_64.h b/arch/sh/include/asm/bl_bit_64.h index 6cc8711af435..aac9780fe864 100644 --- a/arch/sh/include/asm/bl_bit_64.h +++ b/arch/sh/include/asm/bl_bit_64.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2000, 2001 Paolo Alberelli | 3 | * Copyright (C) 2000, 2001 Paolo Alberelli |
3 | * Copyright (C) 2003 Paul Mundt | 4 | * Copyright (C) 2003 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_BL_BIT_64_H | 7 | #ifndef __ASM_SH_BL_BIT_64_H |
11 | #define __ASM_SH_BL_BIT_64_H | 8 | #define __ASM_SH_BL_BIT_64_H |
diff --git a/arch/sh/include/asm/cache_insns_64.h b/arch/sh/include/asm/cache_insns_64.h index 70b6357eaf1a..ed682b987b0d 100644 --- a/arch/sh/include/asm/cache_insns_64.h +++ b/arch/sh/include/asm/cache_insns_64.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2000, 2001 Paolo Alberelli | 3 | * Copyright (C) 2000, 2001 Paolo Alberelli |
3 | * Copyright (C) 2003 Paul Mundt | 4 | * Copyright (C) 2003 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_CACHE_INSNS_64_H | 7 | #ifndef __ASM_SH_CACHE_INSNS_64_H |
11 | #define __ASM_SH_CACHE_INSNS_64_H | 8 | #define __ASM_SH_CACHE_INSNS_64_H |
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 9c84386d35cb..b58f3d95dc19 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_CHECKSUM_H | 2 | #ifndef __ASM_SH_CHECKSUM_H |
2 | #define __ASM_SH_CHECKSUM_H | 3 | #define __ASM_SH_CHECKSUM_H |
3 | 4 | ||
4 | /* | 5 | /* |
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka | 6 | * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka |
10 | */ | 7 | */ |
11 | 8 | ||
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h index 593a9704782b..c373f21efe4d 100644 --- a/arch/sh/include/asm/cmpxchg-xchg.h +++ b/arch/sh/include/asm/cmpxchg-xchg.h | |||
@@ -1,12 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_CMPXCHG_XCHG_H | 2 | #ifndef __ASM_SH_CMPXCHG_XCHG_H |
2 | #define __ASM_SH_CMPXCHG_XCHG_H | 3 | #define __ASM_SH_CMPXCHG_XCHG_H |
3 | 4 | ||
4 | /* | 5 | /* |
5 | * Copyright (C) 2016 Red Hat, Inc. | 6 | * Copyright (C) 2016 Red Hat, Inc. |
6 | * Author: Michael S. Tsirkin <mst@redhat.com> | 7 | * Author: Michael S. Tsirkin <mst@redhat.com> |
7 | * | ||
8 | * This work is licensed under the terms of the GNU GPL, version 2. See the | ||
9 | * file "COPYING" in the main directory of this archive for more details. | ||
10 | */ | 8 | */ |
11 | #include <linux/bits.h> | 9 | #include <linux/bits.h> |
12 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
diff --git a/arch/sh/include/asm/device.h b/arch/sh/include/asm/device.h index 071bcb4d4bfd..6f3e686a1c6f 100644 --- a/arch/sh/include/asm/device.h +++ b/arch/sh/include/asm/device.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * Arch specific extensions to struct device | ||
3 | * | 2 | * |
4 | * This file is released under the GPLv2 | 3 | * Arch specific extensions to struct device |
5 | */ | 4 | */ |
6 | #ifndef __ASM_SH_DEVICE_H | 5 | #ifndef __ASM_SH_DEVICE_H |
7 | #define __ASM_SH_DEVICE_H | 6 | #define __ASM_SH_DEVICE_H |
diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h index c757b47e6b64..724dab912b71 100644 --- a/arch/sh/include/asm/dma-register.h +++ b/arch/sh/include/asm/dma-register.h | |||
@@ -1,14 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Common header for the legacy SH DMA driver and the new dmaengine driver | 3 | * Common header for the legacy SH DMA driver and the new dmaengine driver |
3 | * | 4 | * |
4 | * extracted from arch/sh/include/asm/dma-sh.h: | 5 | * extracted from arch/sh/include/asm/dma-sh.h: |
5 | * | 6 | * |
6 | * Copyright (C) 2000 Takashi YOSHII | 7 | * Copyright (C) 2000 Takashi YOSHII |
7 | * Copyright (C) 2003 Paul Mundt | 8 | * Copyright (C) 2003 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #ifndef DMA_REGISTER_H | 10 | #ifndef DMA_REGISTER_H |
14 | #define DMA_REGISTER_H | 11 | #define DMA_REGISTER_H |
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index fb6e4f7b00a2..4d5a21a891c0 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/dma.h | 3 | * include/asm-sh/dma.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003, 2004 Paul Mundt | 5 | * Copyright (C) 2003, 2004 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_DMA_H | 7 | #ifndef __ASM_SH_DMA_H |
11 | #define __ASM_SH_DMA_H | 8 | #define __ASM_SH_DMA_H |
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index d62abd1d0c05..571954474122 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h | |||
@@ -1,10 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | 2 | * |
3 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | ||
8 | */ | 4 | */ |
9 | #ifndef __ASM_SH_DWARF_H | 5 | #ifndef __ASM_SH_DWARF_H |
10 | #define __ASM_SH_DWARF_H | 6 | #define __ASM_SH_DWARF_H |
diff --git a/arch/sh/include/asm/fb.h b/arch/sh/include/asm/fb.h index d92e99cd8c8a..9a0bca2686fd 100644 --- a/arch/sh/include/asm/fb.h +++ b/arch/sh/include/asm/fb.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef _ASM_FB_H_ | 2 | #ifndef _ASM_FB_H_ |
2 | #define _ASM_FB_H_ | 3 | #define _ASM_FB_H_ |
3 | 4 | ||
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 4daf91c3b725..e30348c58073 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h | |||
@@ -1,9 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | 2 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 3 | * fixmap.h: compile-time virtual memory allocation |
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | 4 | * |
8 | * Copyright (C) 1998 Ingo Molnar | 5 | * Copyright (C) 1998 Ingo Molnar |
9 | * | 6 | * |
diff --git a/arch/sh/include/asm/flat.h b/arch/sh/include/asm/flat.h index 275fcae23539..843d458b8329 100644 --- a/arch/sh/include/asm/flat.h +++ b/arch/sh/include/asm/flat.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/flat.h | 3 | * include/asm-sh/flat.h |
3 | * | 4 | * |
4 | * uClinux flat-format executables | 5 | * uClinux flat-format executables |
5 | * | 6 | * |
6 | * Copyright (C) 2003 Paul Mundt | 7 | * Copyright (C) 2003 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive for | ||
10 | * more details. | ||
11 | */ | 8 | */ |
12 | #ifndef __ASM_SH_FLAT_H | 9 | #ifndef __ASM_SH_FLAT_H |
13 | #define __ASM_SH_FLAT_H | 10 | #define __ASM_SH_FLAT_H |
diff --git a/arch/sh/include/asm/freq.h b/arch/sh/include/asm/freq.h index 4ece90b09b9c..18133bf83738 100644 --- a/arch/sh/include/asm/freq.h +++ b/arch/sh/include/asm/freq.h | |||
@@ -1,12 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0+ |
2 | * | ||
2 | * include/asm-sh/freq.h | 3 | * include/asm-sh/freq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | 6 | */ |
11 | #ifndef __ASM_SH_FREQ_H | 7 | #ifndef __ASM_SH_FREQ_H |
12 | #define __ASM_SH_FREQ_H | 8 | #define __ASM_SH_FREQ_H |
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 7dfe15e2e990..351918894e86 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/gpio.h | 3 | * include/asm-sh/gpio.h |
3 | * | 4 | * |
4 | * Generic GPIO API and pinmux table support for SuperH. | 5 | * Generic GPIO API and pinmux table support for SuperH. |
5 | * | 6 | * |
6 | * Copyright (c) 2008 Magnus Damm | 7 | * Copyright (c) 2008 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #ifndef __ASM_SH_GPIO_H | 9 | #ifndef __ASM_SH_GPIO_H |
13 | #define __ASM_SH_GPIO_H | 10 | #define __ASM_SH_GPIO_H |
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index d3324e4f372e..f7d05546beca 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/machvec.h | 3 | * include/asm-sh/machvec.h |
3 | * | 4 | * |
4 | * Copyright 2000 Stuart Menefy (stuart.menefy@st.com) | 5 | * Copyright 2000 Stuart Menefy (stuart.menefy@st.com) |
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | */ | 6 | */ |
9 | 7 | ||
10 | #ifndef _ASM_SH_MACHVEC_H | 8 | #ifndef _ASM_SH_MACHVEC_H |
diff --git a/arch/sh/include/asm/mmu_context_64.h b/arch/sh/include/asm/mmu_context_64.h index de121025d87f..bacafe0b887d 100644 --- a/arch/sh/include/asm/mmu_context_64.h +++ b/arch/sh/include/asm/mmu_context_64.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_MMU_CONTEXT_64_H | 2 | #ifndef __ASM_SH_MMU_CONTEXT_64_H |
2 | #define __ASM_SH_MMU_CONTEXT_64_H | 3 | #define __ASM_SH_MMU_CONTEXT_64_H |
3 | 4 | ||
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 8 | * Copyright (C) 2000, 2001 Paolo Alberelli |
8 | * Copyright (C) 2003 - 2007 Paul Mundt | 9 | * Copyright (C) 2003 - 2007 Paul Mundt |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <cpu/registers.h> | 11 | #include <cpu/registers.h> |
15 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index f6abfe2bca93..3587103afe59 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * This file contains the functions and defines necessary to modify and | 3 | * This file contains the functions and defines necessary to modify and |
3 | * use the SuperH page table tree. | 4 | * use the SuperH page table tree. |
4 | * | 5 | * |
5 | * Copyright (C) 1999 Niibe Yutaka | 6 | * Copyright (C) 1999 Niibe Yutaka |
6 | * Copyright (C) 2002 - 2007 Paul Mundt | 7 | * Copyright (C) 2002 - 2007 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General | ||
9 | * Public License. See the file "COPYING" in the main directory of this | ||
10 | * archive for more details. | ||
11 | */ | 8 | */ |
12 | #ifndef __ASM_SH_PGTABLE_H | 9 | #ifndef __ASM_SH_PGTABLE_H |
13 | #define __ASM_SH_PGTABLE_H | 10 | #define __ASM_SH_PGTABLE_H |
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index 07424968df62..1778bc5971e7 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_PGTABLE_64_H | 2 | #ifndef __ASM_SH_PGTABLE_64_H |
2 | #define __ASM_SH_PGTABLE_64_H | 3 | #define __ASM_SH_PGTABLE_64_H |
3 | 4 | ||
@@ -10,10 +11,6 @@ | |||
10 | * Copyright (C) 2000, 2001 Paolo Alberelli | 11 | * Copyright (C) 2000, 2001 Paolo Alberelli |
11 | * Copyright (C) 2003, 2004 Paul Mundt | 12 | * Copyright (C) 2003, 2004 Paul Mundt |
12 | * Copyright (C) 2003, 2004 Richard Curnow | 13 | * Copyright (C) 2003, 2004 Richard Curnow |
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General Public | ||
15 | * License. See the file "COPYING" in the main directory of this archive | ||
16 | * for more details. | ||
17 | */ | 14 | */ |
18 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
19 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index f3d7075648d0..53efc9f51ef1 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_PROCESSOR_64_H | 2 | #ifndef __ASM_SH_PROCESSOR_64_H |
2 | #define __ASM_SH_PROCESSOR_64_H | 3 | #define __ASM_SH_PROCESSOR_64_H |
3 | 4 | ||
@@ -7,10 +8,6 @@ | |||
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 8 | * Copyright (C) 2000, 2001 Paolo Alberelli |
8 | * Copyright (C) 2003 Paul Mundt | 9 | * Copyright (C) 2003 Paul Mundt |
9 | * Copyright (C) 2004 Richard Curnow | 10 | * Copyright (C) 2004 Richard Curnow |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
16 | 13 | ||
diff --git a/arch/sh/include/asm/sfp-machine.h b/arch/sh/include/asm/sfp-machine.h index d3c548443f2a..cbc7cf8c97ce 100644 --- a/arch/sh/include/asm/sfp-machine.h +++ b/arch/sh/include/asm/sfp-machine.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* Machine-dependent software floating-point definitions. | 1 | /* SPDX-License-Identifier: GPL-2.0+ |
2 | * | ||
3 | * Machine-dependent software floating-point definitions. | ||
2 | SuperH kernel version. | 4 | SuperH kernel version. |
3 | Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. | 5 | Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. |
4 | This file is part of the GNU C Library. | 6 | This file is part of the GNU C Library. |
@@ -6,21 +8,7 @@ | |||
6 | Jakub Jelinek (jj@ultra.linux.cz), | 8 | Jakub Jelinek (jj@ultra.linux.cz), |
7 | David S. Miller (davem@redhat.com) and | 9 | David S. Miller (davem@redhat.com) and |
8 | Peter Maydell (pmaydell@chiark.greenend.org.uk). | 10 | Peter Maydell (pmaydell@chiark.greenend.org.uk). |
9 | 11 | */ | |
10 | The GNU C Library is free software; you can redistribute it and/or | ||
11 | modify it under the terms of the GNU Library General Public License as | ||
12 | published by the Free Software Foundation; either version 2 of the | ||
13 | License, or (at your option) any later version. | ||
14 | |||
15 | The GNU C Library is distributed in the hope that it will be useful, | ||
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | Library General Public License for more details. | ||
19 | |||
20 | You should have received a copy of the GNU Library General Public | ||
21 | License along with the GNU C Library; see the file COPYING.LIB. If | ||
22 | not, write to the Free Software Foundation, Inc., | ||
23 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ | ||
24 | 12 | ||
25 | #ifndef _SFP_MACHINE_H | 13 | #ifndef _SFP_MACHINE_H |
26 | #define _SFP_MACHINE_H | 14 | #define _SFP_MACHINE_H |
diff --git a/arch/sh/include/asm/shmparam.h b/arch/sh/include/asm/shmparam.h index ba1758d90106..6c580a644a78 100644 --- a/arch/sh/include/asm/shmparam.h +++ b/arch/sh/include/asm/shmparam.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/shmparam.h | 3 | * include/asm-sh/shmparam.h |
3 | * | 4 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2006 Paul Mundt | 6 | * Copyright (C) 2006 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __ASM_SH_SHMPARAM_H | 8 | #ifndef __ASM_SH_SHMPARAM_H |
12 | #define __ASM_SH_SHMPARAM_H | 9 | #define __ASM_SH_SHMPARAM_H |
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h index 580b7ac228b7..35e4839d381e 100644 --- a/arch/sh/include/asm/siu.h +++ b/arch/sh/include/asm/siu.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * platform header for the SIU ASoC driver | 3 | * platform header for the SIU ASoC driver |
3 | * | 4 | * |
4 | * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 5 | * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #ifndef ASM_SIU_H | 8 | #ifndef ASM_SIU_H |
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h index 270ee4d3e25b..3d49985ebf41 100644 --- a/arch/sh/include/asm/spinlock-cas.h +++ b/arch/sh/include/asm/spinlock-cas.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/spinlock-cas.h | 3 | * include/asm-sh/spinlock-cas.h |
3 | * | 4 | * |
4 | * Copyright (C) 2015 SEI | 5 | * Copyright (C) 2015 SEI |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_SPINLOCK_CAS_H | 7 | #ifndef __ASM_SH_SPINLOCK_CAS_H |
11 | #define __ASM_SH_SPINLOCK_CAS_H | 8 | #define __ASM_SH_SPINLOCK_CAS_H |
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h index 715595de286a..786ee0fde3b0 100644 --- a/arch/sh/include/asm/spinlock-llsc.h +++ b/arch/sh/include/asm/spinlock-llsc.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/spinlock-llsc.h | 3 | * include/asm-sh/spinlock-llsc.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2006, 2007 Akio Idehara | 6 | * Copyright (C) 2006, 2007 Akio Idehara |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __ASM_SH_SPINLOCK_LLSC_H | 8 | #ifndef __ASM_SH_SPINLOCK_LLSC_H |
12 | #define __ASM_SH_SPINLOCK_LLSC_H | 9 | #define __ASM_SH_SPINLOCK_LLSC_H |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index c2c61ea6a8e2..fa6801f63551 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/spinlock.h | 3 | * include/asm-sh/spinlock.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2006, 2007 Akio Idehara | 6 | * Copyright (C) 2006, 2007 Akio Idehara |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __ASM_SH_SPINLOCK_H | 8 | #ifndef __ASM_SH_SPINLOCK_H |
12 | #define __ASM_SH_SPINLOCK_H | 9 | #define __ASM_SH_SPINLOCK_H |
diff --git a/arch/sh/include/asm/string_32.h b/arch/sh/include/asm/string_32.h index 55f8db6bc1d7..3558b1d7123e 100644 --- a/arch/sh/include/asm/string_32.h +++ b/arch/sh/include/asm/string_32.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_STRING_H | 2 | #ifndef __ASM_SH_STRING_H |
2 | #define __ASM_SH_STRING_H | 3 | #define __ASM_SH_STRING_H |
3 | 4 | ||
diff --git a/arch/sh/include/asm/switch_to.h b/arch/sh/include/asm/switch_to.h index bcd722fc8347..9eec80ab5aa2 100644 --- a/arch/sh/include/asm/switch_to.h +++ b/arch/sh/include/asm/switch_to.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2000, 2001 Paolo Alberelli | 3 | * Copyright (C) 2000, 2001 Paolo Alberelli |
3 | * Copyright (C) 2003 Paul Mundt | 4 | * Copyright (C) 2003 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_SWITCH_TO_H | 7 | #ifndef __ASM_SH_SWITCH_TO_H |
11 | #define __ASM_SH_SWITCH_TO_H | 8 | #define __ASM_SH_SWITCH_TO_H |
diff --git a/arch/sh/include/asm/switch_to_64.h b/arch/sh/include/asm/switch_to_64.h index ba3129d6bc21..2dbf2311669f 100644 --- a/arch/sh/include/asm/switch_to_64.h +++ b/arch/sh/include/asm/switch_to_64.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2000, 2001 Paolo Alberelli | 3 | * Copyright (C) 2000, 2001 Paolo Alberelli |
3 | * Copyright (C) 2003 Paul Mundt | 4 | * Copyright (C) 2003 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_SWITCH_TO_64_H | 7 | #ifndef __ASM_SH_SWITCH_TO_64_H |
11 | #define __ASM_SH_SWITCH_TO_64_H | 8 | #define __ASM_SH_SWITCH_TO_64_H |
diff --git a/arch/sh/include/asm/tlb_64.h b/arch/sh/include/asm/tlb_64.h index ef0ae2a28f23..59fa0a23dad7 100644 --- a/arch/sh/include/asm/tlb_64.h +++ b/arch/sh/include/asm/tlb_64.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/tlb_64.h | 3 | * include/asm-sh/tlb_64.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_TLB_64_H | 7 | #ifndef __ASM_SH_TLB_64_H |
11 | #define __ASM_SH_TLB_64_H | 8 | #define __ASM_SH_TLB_64_H |
diff --git a/arch/sh/include/asm/traps_64.h b/arch/sh/include/asm/traps_64.h index ef5eff919449..f28db6dfbe45 100644 --- a/arch/sh/include/asm/traps_64.h +++ b/arch/sh/include/asm/traps_64.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2000, 2001 Paolo Alberelli | 3 | * Copyright (C) 2000, 2001 Paolo Alberelli |
3 | * Copyright (C) 2003 Paul Mundt | 4 | * Copyright (C) 2003 Paul Mundt |
4 | * Copyright (C) 2004 Richard Curnow | 5 | * Copyright (C) 2004 Richard Curnow |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_TRAPS_64_H | 7 | #ifndef __ASM_SH_TRAPS_64_H |
11 | #define __ASM_SH_TRAPS_64_H | 8 | #define __ASM_SH_TRAPS_64_H |
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index ca5073dd4596..0c19d02dc566 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_UACCESS_64_H | 2 | #ifndef __ASM_SH_UACCESS_64_H |
2 | #define __ASM_SH_UACCESS_64_H | 3 | #define __ASM_SH_UACCESS_64_H |
3 | 4 | ||
@@ -15,10 +16,6 @@ | |||
15 | * MIPS implementation version 1.15 by | 16 | * MIPS implementation version 1.15 by |
16 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | 17 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle |
17 | * and i386 version. | 18 | * and i386 version. |
18 | * | ||
19 | * This file is subject to the terms and conditions of the GNU General Public | ||
20 | * License. See the file "COPYING" in the main directory of this archive | ||
21 | * for more details. | ||
22 | */ | 19 | */ |
23 | 20 | ||
24 | #define __get_user_size(x,ptr,size,retval) \ | 21 | #define __get_user_size(x,ptr,size,retval) \ |
diff --git a/arch/sh/include/asm/vga.h b/arch/sh/include/asm/vga.h index 06a5de8ace1a..089fbdc6c0b1 100644 --- a/arch/sh/include/asm/vga.h +++ b/arch/sh/include/asm/vga.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_VGA_H | 2 | #ifndef __ASM_SH_VGA_H |
2 | #define __ASM_SH_VGA_H | 3 | #define __ASM_SH_VGA_H |
3 | 4 | ||
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index 85a7aca7fb8f..cecd0fc507f9 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h | |||
@@ -1,14 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0+ |
2 | * | ||
2 | * include/asm-sh/watchdog.h | 3 | * include/asm-sh/watchdog.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2009 Siemens AG | 6 | * Copyright (C) 2009 Siemens AG |
6 | * Copyright (C) 2009 Valentin Sitdikov | 7 | * Copyright (C) 2009 Valentin Sitdikov |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | 8 | */ |
13 | #ifndef __ASM_SH_WATCHDOG_H | 9 | #ifndef __ASM_SH_WATCHDOG_H |
14 | #define __ASM_SH_WATCHDOG_H | 10 | #define __ASM_SH_WATCHDOG_H |
diff --git a/arch/sh/include/cpu-common/cpu/addrspace.h b/arch/sh/include/cpu-common/cpu/addrspace.h index 2b9ab93efa4e..d8bf5d7d2fdf 100644 --- a/arch/sh/include/cpu-common/cpu/addrspace.h +++ b/arch/sh/include/cpu-common/cpu/addrspace.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Definitions for the address spaces of the SH-2 CPUs. | 3 | * Definitions for the address spaces of the SH-2 CPUs. |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2_ADDRSPACE_H | 7 | #ifndef __ASM_CPU_SH2_ADDRSPACE_H |
11 | #define __ASM_CPU_SH2_ADDRSPACE_H | 8 | #define __ASM_CPU_SH2_ADDRSPACE_H |
diff --git a/arch/sh/include/cpu-common/cpu/mmu_context.h b/arch/sh/include/cpu-common/cpu/mmu_context.h index beeb299e01ec..cef3a30dbf97 100644 --- a/arch/sh/include/cpu-common/cpu/mmu_context.h +++ b/arch/sh/include/cpu-common/cpu/mmu_context.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2/mmu_context.h | 3 | * include/asm-sh/cpu-sh2/mmu_context.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2_MMU_CONTEXT_H | 7 | #ifndef __ASM_CPU_SH2_MMU_CONTEXT_H |
11 | #define __ASM_CPU_SH2_MMU_CONTEXT_H | 8 | #define __ASM_CPU_SH2_MMU_CONTEXT_H |
diff --git a/arch/sh/include/cpu-common/cpu/pfc.h b/arch/sh/include/cpu-common/cpu/pfc.h index e538813286a8..879d2c9da537 100644 --- a/arch/sh/include/cpu-common/cpu/pfc.h +++ b/arch/sh/include/cpu-common/cpu/pfc.h | |||
@@ -1,16 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * SH Pin Function Control Initialization | 3 | * SH Pin Function Control Initialization |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | 6 | */ |
15 | 7 | ||
16 | #ifndef __ARCH_SH_CPU_PFC_H__ | 8 | #ifndef __ARCH_SH_CPU_PFC_H__ |
diff --git a/arch/sh/include/cpu-common/cpu/timer.h b/arch/sh/include/cpu-common/cpu/timer.h index a39c241e8195..af51438755e0 100644 --- a/arch/sh/include/cpu-common/cpu/timer.h +++ b/arch/sh/include/cpu-common/cpu/timer.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_CPU_SH2_TIMER_H | 2 | #ifndef __ASM_CPU_SH2_TIMER_H |
2 | #define __ASM_CPU_SH2_TIMER_H | 3 | #define __ASM_CPU_SH2_TIMER_H |
3 | 4 | ||
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index aa1b2b9088a7..070aa9f50d3f 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2/cache.h | 3 | * include/asm-sh/cpu-sh2/cache.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2_CACHE_H | 7 | #ifndef __ASM_CPU_SH2_CACHE_H |
11 | #define __ASM_CPU_SH2_CACHE_H | 8 | #define __ASM_CPU_SH2_CACHE_H |
diff --git a/arch/sh/include/cpu-sh2/cpu/freq.h b/arch/sh/include/cpu-sh2/cpu/freq.h index 31de475da70b..fb2e5d2831bc 100644 --- a/arch/sh/include/cpu-sh2/cpu/freq.h +++ b/arch/sh/include/cpu-sh2/cpu/freq.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2/freq.h | 3 | * include/asm-sh/cpu-sh2/freq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Yoshinori Sato | 5 | * Copyright (C) 2006 Yoshinori Sato |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2_FREQ_H | 7 | #ifndef __ASM_CPU_SH2_FREQ_H |
11 | #define __ASM_CPU_SH2_FREQ_H | 8 | #define __ASM_CPU_SH2_FREQ_H |
diff --git a/arch/sh/include/cpu-sh2/cpu/watchdog.h b/arch/sh/include/cpu-sh2/cpu/watchdog.h index 1eab8aa63a6d..141fe296d751 100644 --- a/arch/sh/include/cpu-sh2/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh2/cpu/watchdog.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2/watchdog.h | 3 | * include/asm-sh/cpu-sh2/watchdog.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2_WATCHDOG_H | 7 | #ifndef __ASM_CPU_SH2_WATCHDOG_H |
11 | #define __ASM_CPU_SH2_WATCHDOG_H | 8 | #define __ASM_CPU_SH2_WATCHDOG_H |
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index b27ce92cb600..06efb233eb35 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2a/cache.h | 3 | * include/asm-sh/cpu-sh2a/cache.h |
3 | * | 4 | * |
4 | * Copyright (C) 2004 Paul Mundt | 5 | * Copyright (C) 2004 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2A_CACHE_H | 7 | #ifndef __ASM_CPU_SH2A_CACHE_H |
11 | #define __ASM_CPU_SH2A_CACHE_H | 8 | #define __ASM_CPU_SH2A_CACHE_H |
diff --git a/arch/sh/include/cpu-sh2a/cpu/freq.h b/arch/sh/include/cpu-sh2a/cpu/freq.h index 830fd43b6cdc..fb0813f47043 100644 --- a/arch/sh/include/cpu-sh2a/cpu/freq.h +++ b/arch/sh/include/cpu-sh2a/cpu/freq.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh2a/freq.h | 3 | * include/asm-sh/cpu-sh2a/freq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Yoshinori Sato | 5 | * Copyright (C) 2006 Yoshinori Sato |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH2A_FREQ_H | 7 | #ifndef __ASM_CPU_SH2A_FREQ_H |
11 | #define __ASM_CPU_SH2A_FREQ_H | 8 | #define __ASM_CPU_SH2A_FREQ_H |
diff --git a/arch/sh/include/cpu-sh2a/cpu/watchdog.h b/arch/sh/include/cpu-sh2a/cpu/watchdog.h index e7e8259e468c..8f932b733c67 100644 --- a/arch/sh/include/cpu-sh2a/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh2a/cpu/watchdog.h | |||
@@ -1 +1,2 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #include <cpu-sh2/cpu/watchdog.h> | 2 | #include <cpu-sh2/cpu/watchdog.h> |
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index 29700fd88c75..f57124826943 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh3/cache.h | 3 | * include/asm-sh/cpu-sh3/cache.h |
3 | * | 4 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH3_CACHE_H | 7 | #ifndef __ASM_CPU_SH3_CACHE_H |
11 | #define __ASM_CPU_SH3_CACHE_H | 8 | #define __ASM_CPU_SH3_CACHE_H |
diff --git a/arch/sh/include/cpu-sh3/cpu/dma-register.h b/arch/sh/include/cpu-sh3/cpu/dma-register.h index 2349e488c9a6..c0f921fb4edc 100644 --- a/arch/sh/include/cpu-sh3/cpu/dma-register.h +++ b/arch/sh/include/cpu-sh3/cpu/dma-register.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * SH3 CPU-specific DMA definitions, used by both DMA drivers | 3 | * SH3 CPU-specific DMA definitions, used by both DMA drivers |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 5 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | 6 | */ |
10 | #ifndef CPU_DMA_REGISTER_H | 7 | #ifndef CPU_DMA_REGISTER_H |
11 | #define CPU_DMA_REGISTER_H | 8 | #define CPU_DMA_REGISTER_H |
diff --git a/arch/sh/include/cpu-sh3/cpu/freq.h b/arch/sh/include/cpu-sh3/cpu/freq.h index 53c62302b2e3..7290f02b7173 100644 --- a/arch/sh/include/cpu-sh3/cpu/freq.h +++ b/arch/sh/include/cpu-sh3/cpu/freq.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh3/freq.h | 3 | * include/asm-sh/cpu-sh3/freq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH3_FREQ_H | 7 | #ifndef __ASM_CPU_SH3_FREQ_H |
11 | #define __ASM_CPU_SH3_FREQ_H | 8 | #define __ASM_CPU_SH3_FREQ_H |
diff --git a/arch/sh/include/cpu-sh3/cpu/gpio.h b/arch/sh/include/cpu-sh3/cpu/gpio.h index 9a22b882f3dc..aeb0588ace98 100644 --- a/arch/sh/include/cpu-sh3/cpu/gpio.h +++ b/arch/sh/include/cpu-sh3/cpu/gpio.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh3/gpio.h | 3 | * include/asm-sh/cpu-sh3/gpio.h |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Markus Brunner, Mark Jonas | 5 | * Copyright (C) 2007 Markus Brunner, Mark Jonas |
5 | * | 6 | * |
6 | * Addresses for the Pin Function Controller | 7 | * Addresses for the Pin Function Controller |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #ifndef _CPU_SH3_GPIO_H | 9 | #ifndef _CPU_SH3_GPIO_H |
13 | #define _CPU_SH3_GPIO_H | 10 | #define _CPU_SH3_GPIO_H |
diff --git a/arch/sh/include/cpu-sh3/cpu/mmu_context.h b/arch/sh/include/cpu-sh3/cpu/mmu_context.h index 0c7c735ea82a..ead9a6f72113 100644 --- a/arch/sh/include/cpu-sh3/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh3/cpu/mmu_context.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh3/mmu_context.h | 3 | * include/asm-sh/cpu-sh3/mmu_context.h |
3 | * | 4 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH3_MMU_CONTEXT_H | 7 | #ifndef __ASM_CPU_SH3_MMU_CONTEXT_H |
11 | #define __ASM_CPU_SH3_MMU_CONTEXT_H | 8 | #define __ASM_CPU_SH3_MMU_CONTEXT_H |
diff --git a/arch/sh/include/cpu-sh3/cpu/watchdog.h b/arch/sh/include/cpu-sh3/cpu/watchdog.h index 4ee0347298d8..9d7e9d986809 100644 --- a/arch/sh/include/cpu-sh3/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh3/cpu/watchdog.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh3/watchdog.h | 3 | * include/asm-sh/cpu-sh3/watchdog.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH3_WATCHDOG_H | 7 | #ifndef __ASM_CPU_SH3_WATCHDOG_H |
11 | #define __ASM_CPU_SH3_WATCHDOG_H | 8 | #define __ASM_CPU_SH3_WATCHDOG_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/addrspace.h b/arch/sh/include/cpu-sh4/cpu/addrspace.h index d51da25da72c..f006c9489f5a 100644 --- a/arch/sh/include/cpu-sh4/cpu/addrspace.h +++ b/arch/sh/include/cpu-sh4/cpu/addrspace.h | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | 2 | * |
6 | * Copyright (C) 1999 by Kaz Kojima | 3 | * Copyright (C) 1999 by Kaz Kojima |
7 | * | 4 | * |
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 92c4cd119b66..72b4d13da127 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh4/cache.h | 3 | * include/asm-sh/cpu-sh4/cache.h |
3 | * | 4 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH4_CACHE_H | 7 | #ifndef __ASM_CPU_SH4_CACHE_H |
11 | #define __ASM_CPU_SH4_CACHE_H | 8 | #define __ASM_CPU_SH4_CACHE_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h index 9cd81e54056a..53f7ab990d88 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-register.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * SH4 CPU-specific DMA definitions, used by both DMA drivers | 3 | * SH4 CPU-specific DMA definitions, used by both DMA drivers |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 5 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | 6 | */ |
10 | #ifndef CPU_DMA_REGISTER_H | 7 | #ifndef CPU_DMA_REGISTER_H |
11 | #define CPU_DMA_REGISTER_H | 8 | #define CPU_DMA_REGISTER_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/fpu.h b/arch/sh/include/cpu-sh4/cpu/fpu.h index febef7342528..29f451bfef19 100644 --- a/arch/sh/include/cpu-sh4/cpu/fpu.h +++ b/arch/sh/include/cpu-sh4/cpu/fpu.h | |||
@@ -1,12 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h | 3 | * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h |
3 | * | 4 | * |
4 | * Copyright (C) 2006 STMicroelectronics Limited | 5 | * Copyright (C) 2006 STMicroelectronics Limited |
5 | * Author: Carl Shaw <carl.shaw@st.com> | 6 | * Author: Carl Shaw <carl.shaw@st.com> |
6 | * | 7 | * |
7 | * May be copied or modified under the terms of the GNU General Public | ||
8 | * License Version 2. See linux/COPYING for more information. | ||
9 | * | ||
10 | * Definitions for SH4 FPU operations | 8 | * Definitions for SH4 FPU operations |
11 | */ | 9 | */ |
12 | 10 | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/freq.h b/arch/sh/include/cpu-sh4/cpu/freq.h index 1631fc238e6f..662f0f30e106 100644 --- a/arch/sh/include/cpu-sh4/cpu/freq.h +++ b/arch/sh/include/cpu-sh4/cpu/freq.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh4/freq.h | 3 | * include/asm-sh/cpu-sh4/freq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH4_FREQ_H | 7 | #ifndef __ASM_CPU_SH4_FREQ_H |
11 | #define __ASM_CPU_SH4_FREQ_H | 8 | #define __ASM_CPU_SH4_FREQ_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h index e46ec708105a..421b56d5c595 100644 --- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh4/mmu_context.h | 3 | * include/asm-sh/cpu-sh4/mmu_context.h |
3 | * | 4 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_CPU_SH4_MMU_CONTEXT_H | 7 | #ifndef __ASM_CPU_SH4_MMU_CONTEXT_H |
11 | #define __ASM_CPU_SH4_MMU_CONTEXT_H | 8 | #define __ASM_CPU_SH4_MMU_CONTEXT_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h index 96b8cb1f754a..8f9bfbf3cdb1 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7786.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h | |||
@@ -1,14 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * SH7786 Pinmux | 3 | * SH7786 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2008, 2009 Renesas Solutions Corp. | 5 | * Copyright (C) 2008, 2009 Renesas Solutions Corp. |
5 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> | 6 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> |
6 | * | 7 | * |
7 | * Based on sh7785.h | 8 | * Based on sh7785.h |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #ifndef __CPU_SH7786_H__ | 11 | #ifndef __CPU_SH7786_H__ |
diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h index 74716ba2dc3c..81966e41fc21 100644 --- a/arch/sh/include/cpu-sh4/cpu/sq.h +++ b/arch/sh/include/cpu-sh4/cpu/sq.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh4/sq.h | 3 | * include/asm-sh/cpu-sh4/sq.h |
3 | * | 4 | * |
4 | * Copyright (C) 2001, 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2001, 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2001, 2002 M. R. Brown | 6 | * Copyright (C) 2001, 2002 M. R. Brown |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __ASM_CPU_SH4_SQ_H | 8 | #ifndef __ASM_CPU_SH4_SQ_H |
12 | #define __ASM_CPU_SH4_SQ_H | 9 | #define __ASM_CPU_SH4_SQ_H |
diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h index 7f62b9380938..fa7bcb398b8c 100644 --- a/arch/sh/include/cpu-sh4/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/cpu-sh4/watchdog.h | 3 | * include/asm-sh/cpu-sh4/watchdog.h |
3 | * | 4 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2009 Siemens AG | 6 | * Copyright (C) 2009 Siemens AG |
6 | * Copyright (C) 2009 Sitdikov Valentin | 7 | * Copyright (C) 2009 Sitdikov Valentin |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #ifndef __ASM_CPU_SH4_WATCHDOG_H | 9 | #ifndef __ASM_CPU_SH4_WATCHDOG_H |
13 | #define __ASM_CPU_SH4_WATCHDOG_H | 10 | #define __ASM_CPU_SH4_WATCHDOG_H |
diff --git a/arch/sh/include/cpu-sh5/cpu/cache.h b/arch/sh/include/cpu-sh5/cpu/cache.h index ed050ab526f2..ef49538f386f 100644 --- a/arch/sh/include/cpu-sh5/cpu/cache.h +++ b/arch/sh/include/cpu-sh5/cpu/cache.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_CPU_SH5_CACHE_H | 2 | #ifndef __ASM_SH_CPU_SH5_CACHE_H |
2 | #define __ASM_SH_CPU_SH5_CACHE_H | 3 | #define __ASM_SH_CPU_SH5_CACHE_H |
3 | 4 | ||
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 8 | * Copyright (C) 2000, 2001 Paolo Alberelli |
8 | * Copyright (C) 2003, 2004 Paul Mundt | 9 | * Copyright (C) 2003, 2004 Paul Mundt |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | 11 | ||
15 | #define L1_CACHE_SHIFT 5 | 12 | #define L1_CACHE_SHIFT 5 |
diff --git a/arch/sh/include/cpu-sh5/cpu/irq.h b/arch/sh/include/cpu-sh5/cpu/irq.h index 0ccf257a72d1..4aa6ac54b9d6 100644 --- a/arch/sh/include/cpu-sh5/cpu/irq.h +++ b/arch/sh/include/cpu-sh5/cpu/irq.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_CPU_SH5_IRQ_H | 2 | #ifndef __ASM_SH_CPU_SH5_IRQ_H |
2 | #define __ASM_SH_CPU_SH5_IRQ_H | 3 | #define __ASM_SH_CPU_SH5_IRQ_H |
3 | 4 | ||
@@ -5,10 +6,6 @@ | |||
5 | * include/asm-sh/cpu-sh5/irq.h | 6 | * include/asm-sh/cpu-sh5/irq.h |
6 | * | 7 | * |
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 8 | * Copyright (C) 2000, 2001 Paolo Alberelli |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | 11 | ||
diff --git a/arch/sh/include/cpu-sh5/cpu/registers.h b/arch/sh/include/cpu-sh5/cpu/registers.h index 6664ea6f1566..372c1e1978b3 100644 --- a/arch/sh/include/cpu-sh5/cpu/registers.h +++ b/arch/sh/include/cpu-sh5/cpu/registers.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_CPU_SH5_REGISTERS_H | 2 | #ifndef __ASM_SH_CPU_SH5_REGISTERS_H |
2 | #define __ASM_SH_CPU_SH5_REGISTERS_H | 3 | #define __ASM_SH_CPU_SH5_REGISTERS_H |
3 | 4 | ||
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 8 | * Copyright (C) 2000, 2001 Paolo Alberelli |
8 | * Copyright (C) 2004 Richard Curnow | 9 | * Copyright (C) 2004 Richard Curnow |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | 11 | ||
15 | #ifdef __ASSEMBLY__ | 12 | #ifdef __ASSEMBLY__ |
diff --git a/arch/sh/include/mach-common/mach/hp6xx.h b/arch/sh/include/mach-common/mach/hp6xx.h index 6aaaf8596e6a..71241f0d02a1 100644 --- a/arch/sh/include/mach-common/mach/hp6xx.h +++ b/arch/sh/include/mach-common/mach/hp6xx.h | |||
@@ -1,14 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 | ||
2 | * | ||
3 | * Copyright (C) 2003, 2004, 2005 Andriy Skulysh | ||
4 | */ | ||
1 | #ifndef __ASM_SH_HP6XX_H | 5 | #ifndef __ASM_SH_HP6XX_H |
2 | #define __ASM_SH_HP6XX_H | 6 | #define __ASM_SH_HP6XX_H |
3 | 7 | ||
4 | /* | ||
5 | * Copyright (C) 2003, 2004, 2005 Andriy Skulysh | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/sh_intc.h> | 8 | #include <linux/sh_intc.h> |
13 | 9 | ||
14 | #define HP680_BTN_IRQ evt2irq(0x600) /* IRQ0_IRQ */ | 10 | #define HP680_BTN_IRQ evt2irq(0x600) /* IRQ0_IRQ */ |
diff --git a/arch/sh/include/mach-common/mach/lboxre2.h b/arch/sh/include/mach-common/mach/lboxre2.h index 3a4dcc5c74ee..5b6bb8e3cf28 100644 --- a/arch/sh/include/mach-common/mach/lboxre2.h +++ b/arch/sh/include/mach-common/mach/lboxre2.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_LBOXRE2_H | 2 | #ifndef __ASM_SH_LBOXRE2_H |
2 | #define __ASM_SH_LBOXRE2_H | 3 | #define __ASM_SH_LBOXRE2_H |
3 | 4 | ||
@@ -5,11 +6,6 @@ | |||
5 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 6 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
6 | * | 7 | * |
7 | * NTT COMWARE L-BOX RE2 support | 8 | * NTT COMWARE L-BOX RE2 support |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | * | ||
13 | */ | 9 | */ |
14 | #include <linux/sh_intc.h> | 10 | #include <linux/sh_intc.h> |
15 | 11 | ||
diff --git a/arch/sh/include/mach-common/mach/magicpanelr2.h b/arch/sh/include/mach-common/mach/magicpanelr2.h index eb0cf205176f..c2d218cea74b 100644 --- a/arch/sh/include/mach-common/mach/magicpanelr2.h +++ b/arch/sh/include/mach-common/mach/magicpanelr2.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/magicpanelr2.h | 3 | * include/asm-sh/magicpanelr2.h |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Markus Brunner, Mark Jonas | 5 | * Copyright (C) 2007 Markus Brunner, Mark Jonas |
5 | * | 6 | * |
6 | * I/O addresses and bitmasks for Magic Panel Release 2 board | 7 | * I/O addresses and bitmasks for Magic Panel Release 2 board |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #ifndef __ASM_SH_MAGICPANELR2_H | 10 | #ifndef __ASM_SH_MAGICPANELR2_H |
diff --git a/arch/sh/include/mach-common/mach/mangle-port.h b/arch/sh/include/mach-common/mach/mangle-port.h index 4ca1769a0f12..dd5a761a52ee 100644 --- a/arch/sh/include/mach-common/mach/mangle-port.h +++ b/arch/sh/include/mach-common/mach/mangle-port.h | |||
@@ -1,9 +1,6 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * SH version cribbed from the MIPS copy: | ||
3 | * | 2 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 3 | * SH version cribbed from the MIPS copy: |
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | 4 | * |
8 | * Copyright (C) 2003, 2004 Ralf Baechle | 5 | * Copyright (C) 2003, 2004 Ralf Baechle |
9 | */ | 6 | */ |
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h index dcb05fa8c164..0e2f9ab11976 100644 --- a/arch/sh/include/mach-common/mach/microdev.h +++ b/arch/sh/include/mach-common/mach/microdev.h | |||
@@ -1,12 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * linux/include/asm-sh/microdev.h | 3 | * linux/include/asm-sh/microdev.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) | 5 | * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) |
5 | * | 6 | * |
6 | * Definitions for the SuperH SH4-202 MicroDev board. | 7 | * Definitions for the SuperH SH4-202 MicroDev board. |
7 | * | ||
8 | * May be copied or modified under the terms of the GNU General Public | ||
9 | * License. See linux/COPYING for more information. | ||
10 | */ | 8 | */ |
11 | #ifndef __ASM_SH_MICRODEV_H | 9 | #ifndef __ASM_SH_MICRODEV_H |
12 | #define __ASM_SH_MICRODEV_H | 10 | #define __ASM_SH_MICRODEV_H |
diff --git a/arch/sh/include/mach-common/mach/sdk7780.h b/arch/sh/include/mach-common/mach/sdk7780.h index ce64e02e9b50..a27dbe4184b3 100644 --- a/arch/sh/include/mach-common/mach/sdk7780.h +++ b/arch/sh/include/mach-common/mach/sdk7780.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_RENESAS_SDK7780_H | 2 | #ifndef __ASM_SH_RENESAS_SDK7780_H |
2 | #define __ASM_SH_RENESAS_SDK7780_H | 3 | #define __ASM_SH_RENESAS_SDK7780_H |
3 | 4 | ||
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Renesas Solutions SH7780 SDK Support | 8 | * Renesas Solutions SH7780 SDK Support |
8 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> | 9 | * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/sh_intc.h> | 11 | #include <linux/sh_intc.h> |
15 | #include <asm/addrspace.h> | 12 | #include <asm/addrspace.h> |
diff --git a/arch/sh/include/mach-common/mach/secureedge5410.h b/arch/sh/include/mach-common/mach/secureedge5410.h index 3653b9a4bacc..dfc68aa91003 100644 --- a/arch/sh/include/mach-common/mach/secureedge5410.h +++ b/arch/sh/include/mach-common/mach/secureedge5410.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/snapgear.h | 3 | * include/asm-sh/snapgear.h |
3 | * | 4 | * |
4 | * Modified version of io_se.h for the snapgear-specific functions. | 5 | * Modified version of io_se.h for the snapgear-specific functions. |
5 | * | 6 | * |
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * IO functions for a SnapGear | 7 | * IO functions for a SnapGear |
10 | */ | 8 | */ |
11 | 9 | ||
diff --git a/arch/sh/include/mach-common/mach/sh7763rdp.h b/arch/sh/include/mach-common/mach/sh7763rdp.h index 8750cc852977..301f85a1c044 100644 --- a/arch/sh/include/mach-common/mach/sh7763rdp.h +++ b/arch/sh/include/mach-common/mach/sh7763rdp.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_SH7763RDP_H | 2 | #ifndef __ASM_SH_SH7763RDP_H |
2 | #define __ASM_SH_SH7763RDP_H | 3 | #define __ASM_SH_SH7763RDP_H |
3 | 4 | ||
@@ -6,11 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copyright (C) 2008 Renesas Solutions | 8 | * Copyright (C) 2008 Renesas Solutions |
8 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 9 | * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | * | ||
14 | */ | 10 | */ |
15 | #include <asm/addrspace.h> | 11 | #include <asm/addrspace.h> |
16 | 12 | ||
diff --git a/arch/sh/include/mach-dreamcast/mach/dma.h b/arch/sh/include/mach-dreamcast/mach/dma.h index 1dbfdf701c9d..a773a763843a 100644 --- a/arch/sh/include/mach-dreamcast/mach/dma.h +++ b/arch/sh/include/mach-dreamcast/mach/dma.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/dreamcast/dma.h | 3 | * include/asm-sh/dreamcast/dma.h |
3 | * | 4 | * |
4 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #ifndef __ASM_SH_DREAMCAST_DMA_H | 7 | #ifndef __ASM_SH_DREAMCAST_DMA_H |
11 | #define __ASM_SH_DREAMCAST_DMA_H | 8 | #define __ASM_SH_DREAMCAST_DMA_H |
diff --git a/arch/sh/include/mach-dreamcast/mach/pci.h b/arch/sh/include/mach-dreamcast/mach/pci.h index 0314d975e626..c037c1ec63a9 100644 --- a/arch/sh/include/mach-dreamcast/mach/pci.h +++ b/arch/sh/include/mach-dreamcast/mach/pci.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * include/asm-sh/dreamcast/pci.h | 3 | * include/asm-sh/dreamcast/pci.h |
3 | * | 4 | * |
4 | * Copyright (C) 2001, 2002 M. R. Brown | 5 | * Copyright (C) 2001, 2002 M. R. Brown |
5 | * Copyright (C) 2002, 2003 Paul Mundt | 6 | * Copyright (C) 2002, 2003 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #ifndef __ASM_SH_DREAMCAST_PCI_H | 8 | #ifndef __ASM_SH_DREAMCAST_PCI_H |
12 | #define __ASM_SH_DREAMCAST_PCI_H | 9 | #define __ASM_SH_DREAMCAST_PCI_H |
diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h index 59effd1ed3e1..ed69ce7f2030 100644 --- a/arch/sh/include/mach-dreamcast/mach/sysasic.h +++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* include/asm-sh/dreamcast/sysasic.h | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
3 | * include/asm-sh/dreamcast/sysasic.h | ||
2 | * | 4 | * |
3 | * Definitions for the Dreamcast System ASIC and related peripherals. | 5 | * Definitions for the Dreamcast System ASIC and related peripherals. |
4 | * | 6 | * |
@@ -6,9 +8,6 @@ | |||
6 | * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> | 8 | * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> |
7 | * | 9 | * |
8 | * This file is part of the LinuxDC project (www.linuxdc.org) | 10 | * This file is part of the LinuxDC project (www.linuxdc.org) |
9 | * | ||
10 | * Released under the terms of the GNU GPL v2.0. | ||
11 | * | ||
12 | */ | 11 | */ |
13 | #ifndef __ASM_SH_DREAMCAST_SYSASIC_H | 12 | #ifndef __ASM_SH_DREAMCAST_SYSASIC_H |
14 | #define __ASM_SH_DREAMCAST_SYSASIC_H | 13 | #define __ASM_SH_DREAMCAST_SYSASIC_H |
diff --git a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt index cc737b807334..2d685cc2d54c 100644 --- a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt +++ b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt | |||
@@ -1,3 +1,4 @@ | |||
1 | LIST "SPDX-License-Identifier: GPL-2.0" | ||
1 | LIST "partner-jet-setup.txt" | 2 | LIST "partner-jet-setup.txt" |
2 | LIST "(C) Copyright 2009 Renesas Solutions Corp" | 3 | LIST "(C) Copyright 2009 Renesas Solutions Corp" |
3 | LIST "Kuninori Morimoto <morimoto.kuninori@renesas.com>" | 4 | LIST "Kuninori Morimoto <morimoto.kuninori@renesas.com>" |
diff --git a/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt b/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt index 3a65503714ee..a67b1926be22 100644 --- a/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt +++ b/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt | |||
@@ -1,3 +1,4 @@ | |||
1 | LIST "SPDX-License-Identifier: GPL-2.0" | ||
1 | LIST "partner-jet-setup.txt - 20090729 Magnus Damm" | 2 | LIST "partner-jet-setup.txt - 20090729 Magnus Damm" |
2 | LIST "set up enough of the kfr2r09 hardware to boot the kernel" | 3 | LIST "set up enough of the kfr2r09 hardware to boot the kernel" |
3 | 4 | ||
diff --git a/arch/sh/include/mach-se/mach/se7721.h b/arch/sh/include/mach-se/mach/se7721.h index eabd0538de44..82226d40faf5 100644 --- a/arch/sh/include/mach-se/mach/se7721.h +++ b/arch/sh/include/mach-se/mach/se7721.h | |||
@@ -1,12 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 2008 Renesas Solutions Corp. | 3 | * Copyright (C) 2008 Renesas Solutions Corp. |
3 | * | 4 | * |
4 | * Hitachi UL SolutionEngine 7721 Support. | 5 | * Hitachi UL SolutionEngine 7721 Support. |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | */ | 6 | */ |
11 | 7 | ||
12 | #ifndef __ASM_SH_SE7721_H | 8 | #ifndef __ASM_SH_SE7721_H |
diff --git a/arch/sh/include/mach-se/mach/se7722.h b/arch/sh/include/mach-se/mach/se7722.h index 637e7ac753f8..efb761f9f6e0 100644 --- a/arch/sh/include/mach-se/mach/se7722.h +++ b/arch/sh/include/mach-se/mach/se7722.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_SE7722_H | 2 | #ifndef __ASM_SH_SE7722_H |
2 | #define __ASM_SH_SE7722_H | 3 | #define __ASM_SH_SE7722_H |
3 | 4 | ||
@@ -7,11 +8,6 @@ | |||
7 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 8 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
8 | * | 9 | * |
9 | * Hitachi UL SolutionEngine 7722 Support. | 10 | * Hitachi UL SolutionEngine 7722 Support. |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | * | ||
15 | */ | 11 | */ |
16 | #include <linux/sh_intc.h> | 12 | #include <linux/sh_intc.h> |
17 | #include <asm/addrspace.h> | 13 | #include <asm/addrspace.h> |
diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h index be842dd1ca02..1fe28820dfa9 100644 --- a/arch/sh/include/mach-se/mach/se7724.h +++ b/arch/sh/include/mach-se/mach/se7724.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_SE7724_H | 2 | #ifndef __ASM_SH_SE7724_H |
2 | #define __ASM_SH_SE7724_H | 3 | #define __ASM_SH_SE7724_H |
3 | 4 | ||
@@ -12,11 +13,6 @@ | |||
12 | * | 13 | * |
13 | * Based on se7722.h | 14 | * Based on se7722.h |
14 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 15 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
15 | * | ||
16 | * This file is subject to the terms and conditions of the GNU General Public | ||
17 | * License. See the file "COPYING" in the main directory of this archive | ||
18 | * for more details. | ||
19 | * | ||
20 | */ | 16 | */ |
21 | #include <linux/sh_intc.h> | 17 | #include <linux/sh_intc.h> |
22 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
diff --git a/arch/sh/include/mach-se/mach/se7780.h b/arch/sh/include/mach-se/mach/se7780.h index bde357cf81bd..24f0ac82f8b3 100644 --- a/arch/sh/include/mach-se/mach/se7780.h +++ b/arch/sh/include/mach-se/mach/se7780.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #ifndef __ASM_SH_SE7780_H | 2 | #ifndef __ASM_SH_SE7780_H |
2 | #define __ASM_SH_SE7780_H | 3 | #define __ASM_SH_SE7780_H |
3 | 4 | ||
@@ -7,10 +8,6 @@ | |||
7 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu | 8 | * Copyright (C) 2006,2007 Nobuhiro Iwamatsu |
8 | * | 9 | * |
9 | * Hitachi UL SolutionEngine 7780 Support. | 10 | * Hitachi UL SolutionEngine 7780 Support. |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/sh_intc.h> | 12 | #include <linux/sh_intc.h> |
16 | #include <asm/addrspace.h> | 13 | #include <asm/addrspace.h> |
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index a55e317c1ef2..dcb93543f55d 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # UAPI Header export list | 2 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 3 | include include/uapi/asm-generic/Kbuild.asm |
3 | 4 | ||
diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h index 552df83f1a49..1170dd2fb998 100644 --- a/arch/sh/include/uapi/asm/setup.h +++ b/arch/sh/include/uapi/asm/setup.h | |||
@@ -1 +1,2 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #include <asm-generic/setup.h> | 2 | #include <asm-generic/setup.h> |
diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h index b9e79bc580dd..f83795fdc0da 100644 --- a/arch/sh/include/uapi/asm/types.h +++ b/arch/sh/include/uapi/asm/types.h | |||
@@ -1 +1,2 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | #include <asm-generic/types.h> | 2 | #include <asm-generic/types.h> |
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index fca9b1e78a63..6fb34410d630 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/clock.c - SuperH clock framework | 3 | * arch/sh/kernel/cpu/clock.c - SuperH clock framework |
3 | * | 4 | * |
@@ -9,10 +10,6 @@ | |||
9 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> | 10 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> |
10 | * | 11 | * |
11 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> | 12 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> |
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General Public | ||
14 | * License. See the file "COPYING" in the main directory of this archive | ||
15 | * for more details. | ||
16 | */ | 13 | */ |
17 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 15 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index c4f01c5c8736..ce7291e12a30 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/init.c | 3 | * arch/sh/kernel/cpu/init.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2002 - 2009 Paul Mundt | 7 | * Copyright (C) 2002 - 2009 Paul Mundt |
7 | * Copyright (C) 2003 Richard Curnow | 8 | * Copyright (C) 2003 Richard Curnow |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile index 3f8e79402d7d..8b91cb96411b 100644 --- a/arch/sh/kernel/cpu/irq/Makefile +++ b/arch/sh/kernel/cpu/irq/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Linux/SuperH CPU-specific IRQ handlers. | 3 | # Makefile for the Linux/SuperH CPU-specific IRQ handlers. |
3 | # | 4 | # |
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c index 9e056a3a0c73..744f903b4df3 100644 --- a/arch/sh/kernel/cpu/irq/intc-sh5.c +++ b/arch/sh/kernel/cpu/irq/intc-sh5.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/irq/intc-sh5.c | 3 | * arch/sh/kernel/cpu/irq/intc-sh5.c |
3 | * | 4 | * |
@@ -9,10 +10,6 @@ | |||
9 | * Per-interrupt selective. IRLM=0 (Fixed priority) is not | 10 | * Per-interrupt selective. IRLM=0 (Fixed priority) is not |
10 | * supported being useless without a cascaded interrupt | 11 | * supported being useless without a cascaded interrupt |
11 | * controller. | 12 | * controller. |
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General Public | ||
14 | * License. See the file "COPYING" in the main directory of this archive | ||
15 | * for more details. | ||
16 | */ | 13 | */ |
17 | #include <linux/init.h> | 14 | #include <linux/init.h> |
18 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index 5de6dff5c21b..d41bce71f211 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Interrupt handling for IPR-based IRQ. | 3 | * Interrupt handling for IPR-based IRQ. |
3 | * | 4 | * |
@@ -11,10 +12,6 @@ | |||
11 | * On-chip supporting modules for SH7709/SH7709A/SH7729. | 12 | * On-chip supporting modules for SH7709/SH7709A/SH7729. |
12 | * Hitachi SolutionEngine external I/O: | 13 | * Hitachi SolutionEngine external I/O: |
13 | * MS7709SE01, MS7709ASE01, and MS7750SE01 | 14 | * MS7709SE01, MS7709ASE01, and MS7750SE01 |
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | */ | 15 | */ |
19 | #include <linux/init.h> | 16 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
diff --git a/arch/sh/kernel/cpu/pfc.c b/arch/sh/kernel/cpu/pfc.c index d766564ef7c2..062056ede88d 100644 --- a/arch/sh/kernel/cpu/pfc.c +++ b/arch/sh/kernel/cpu/pfc.c | |||
@@ -1,16 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH Pin Function Control Initialization | 3 | * SH Pin Function Control Initialization |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | 6 | */ |
15 | 7 | ||
16 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile index 904c4283d923..214c3a5b184a 100644 --- a/arch/sh/kernel/cpu/sh2/Makefile +++ b/arch/sh/kernel/cpu/sh2/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Linux/SuperH SH-2 backends. | 3 | # Makefile for the Linux/SuperH SH-2 backends. |
3 | # | 4 | # |
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c index e80252ae5bca..d66d194c7731 100644 --- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2/clock-sh7619.c | 3 | * arch/sh/kernel/cpu/sh2/clock-sh7619.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on clock-sh4.c | 9 | * Based on clock-sh4.c |
9 | * Copyright (C) 2005 Paul Mundt | 10 | * Copyright (C) 2005 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S index 1ee0a6e774c6..0a1c2bf216bc 100644 --- a/arch/sh/kernel/cpu/sh2/entry.S +++ b/arch/sh/kernel/cpu/sh2/entry.S | |||
@@ -1,14 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh2/entry.S | 3 | * arch/sh/kernel/cpu/sh2/entry.S |
3 | * | 4 | * |
4 | * The SH-2 exception entry | 5 | * The SH-2 exception entry |
5 | * | 6 | * |
6 | * Copyright (C) 2005-2008 Yoshinori Sato | 7 | * Copyright (C) 2005-2008 Yoshinori Sato |
7 | * Copyright (C) 2005 AXE,Inc. | 8 | * Copyright (C) 2005 AXE,Inc. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S index 85b0bf81fc1d..dd0cc887a3ca 100644 --- a/arch/sh/kernel/cpu/sh2/ex.S +++ b/arch/sh/kernel/cpu/sh2/ex.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh2/ex.S | 3 | * arch/sh/kernel/cpu/sh2/ex.S |
3 | * | 4 | * |
4 | * The SH-2 exception vector table | 5 | * The SH-2 exception vector table |
5 | * | 6 | * |
6 | * Copyright (C) 2005 Yoshinori Sato | 7 | * Copyright (C) 2005 Yoshinori Sato |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index a5bd03642678..d342ea08843f 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2/probe.c | 3 | * arch/sh/kernel/cpu/sh2/probe.c |
3 | * | 4 | * |
4 | * CPU Subtype Probing for SH-2. | 5 | * CPU Subtype Probing for SH-2. |
5 | * | 6 | * |
6 | * Copyright (C) 2002 Paul Mundt | 7 | * Copyright (C) 2002 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/of_fdt.h> | 10 | #include <linux/of_fdt.h> |
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index d08db08dec38..f5b6841ef7e1 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7619 Setup | 3 | * SH7619 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Yoshinori Sato | 5 | * Copyright (C) 2006 Yoshinori Sato |
5 | * Copyright (C) 2009 Paul Mundt | 6 | * Copyright (C) 2009 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2/smp-j2.c b/arch/sh/kernel/cpu/sh2/smp-j2.c index 6ccd7e4dc008..ae44dc24c455 100644 --- a/arch/sh/kernel/cpu/sh2/smp-j2.c +++ b/arch/sh/kernel/cpu/sh2/smp-j2.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SMP support for J2 processor | 3 | * SMP support for J2 processor |
3 | * | 4 | * |
4 | * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. | 5 | * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c index 532a36c72322..5a5daaafb27a 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7201.c | 3 | * arch/sh/kernel/cpu/sh2a/clock-sh7201.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on clock-sh4.c | 9 | * Based on clock-sh4.c |
9 | * Copyright (C) 2005 Paul Mundt | 10 | * Copyright (C) 2005 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c index 529f719b6e33..c62053945664 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7203.c | 3 | * arch/sh/kernel/cpu/sh2a/clock-sh7203.c |
3 | * | 4 | * |
@@ -10,10 +11,6 @@ | |||
10 | * | 11 | * |
11 | * Based on clock-sh4.c | 12 | * Based on clock-sh4.c |
12 | * Copyright (C) 2005 Paul Mundt | 13 | * Copyright (C) 2005 Paul Mundt |
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General Public | ||
15 | * License. See the file "COPYING" in the main directory of this archive | ||
16 | * for more details. | ||
17 | */ | 14 | */ |
18 | #include <linux/init.h> | 15 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c index 177789834678..d286d7b918d5 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7206.c | 3 | * arch/sh/kernel/cpu/sh2a/clock-sh7206.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on clock-sh4.c | 9 | * Based on clock-sh4.c |
9 | * Copyright (C) 2005 Paul Mundt | 10 | * Copyright (C) 2005 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7264.c b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c index 7e06e39b0958..d9acc1ed7981 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7264.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7264.c | 3 | * arch/sh/kernel/cpu/sh2a/clock-sh7264.c |
3 | * | 4 | * |
4 | * SH7264 clock framework support | 5 | * SH7264 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2012 Phil Edworthy | 7 | * Copyright (C) 2012 Phil Edworthy |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7269.c b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c index 663a97bed554..c17ab0d76538 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7269.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7269.c | 3 | * arch/sh/kernel/cpu/sh2a/clock-sh7269.c |
3 | * | 4 | * |
4 | * SH7269 clock framework support | 5 | * SH7269 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2012 Phil Edworthy | 7 | * Copyright (C) 2012 Phil Edworthy |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S index da77a8ef4696..9f11fc8b5052 100644 --- a/arch/sh/kernel/cpu/sh2a/entry.S +++ b/arch/sh/kernel/cpu/sh2a/entry.S | |||
@@ -1,14 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh2a/entry.S | 3 | * arch/sh/kernel/cpu/sh2a/entry.S |
3 | * | 4 | * |
4 | * The SH-2A exception entry | 5 | * The SH-2A exception entry |
5 | * | 6 | * |
6 | * Copyright (C) 2008 Yoshinori Sato | 7 | * Copyright (C) 2008 Yoshinori Sato |
7 | * Based on arch/sh/kernel/cpu/sh2/entry.S | 8 | * Based on arch/sh/kernel/cpu/sh2/entry.S |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/ex.S b/arch/sh/kernel/cpu/sh2a/ex.S index 4568066700cf..ed91996287c7 100644 --- a/arch/sh/kernel/cpu/sh2a/ex.S +++ b/arch/sh/kernel/cpu/sh2a/ex.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh2a/ex.S | 3 | * arch/sh/kernel/cpu/sh2a/ex.S |
3 | * | 4 | * |
4 | * The SH-2A exception vector table | 5 | * The SH-2A exception vector table |
5 | * | 6 | * |
6 | * Copyright (C) 2008 Yoshinori Sato | 7 | * Copyright (C) 2008 Yoshinori Sato |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 352f894bece1..74b48db86dd7 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Save/restore floating point context for signal handlers. | 3 | * Save/restore floating point context for signal handlers. |
3 | * | 4 | * |
4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | 5 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
5 | * | 6 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * FIXME! These routines can be optimized in big endian case. | 7 | * FIXME! These routines can be optimized in big endian case. |
11 | */ | 8 | */ |
12 | #include <linux/sched/signal.h> | 9 | #include <linux/sched/signal.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c index 72aa61c81e48..c509081d90b9 100644 --- a/arch/sh/kernel/cpu/sh2a/opcode_helper.c +++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/opcode_helper.c | 3 | * arch/sh/kernel/cpu/sh2a/opcode_helper.c |
3 | * | 4 | * |
4 | * Helper for the SH-2A 32-bit opcodes. | 5 | * Helper for the SH-2A 32-bit opcodes. |
5 | * | 6 | * |
6 | * Copyright (C) 2007 Paul Mundt | 7 | * Copyright (C) 2007 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
13 | 10 | ||
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c index eef17dcc3a41..a6777e6fc8cd 100644 --- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7203 Pinmux | 3 | * SH7203 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c index 569decbd6d93..7a103e16cf01 100644 --- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7264 Pinmux | 3 | * SH7264 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe Ltd | 5 | * Copyright (C) 2012 Renesas Electronics Europe Ltd |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c index 4c17fb6970b1..4da432ef1b40 100644 --- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7269 Pinmux | 3 | * SH7269 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe Ltd | 5 | * Copyright (C) 2012 Renesas Electronics Europe Ltd |
5 | * Copyright (C) 2012 Phil Edworthy | 6 | * Copyright (C) 2012 Phil Edworthy |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c index 3f87971082f1..c66a3bc882bf 100644 --- a/arch/sh/kernel/cpu/sh2a/probe.c +++ b/arch/sh/kernel/cpu/sh2a/probe.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh2a/probe.c | 3 | * arch/sh/kernel/cpu/sh2a/probe.c |
3 | * | 4 | * |
4 | * CPU Subtype Probing for SH-2A. | 5 | * CPU Subtype Probing for SH-2A. |
5 | * | 6 | * |
6 | * Copyright (C) 2004 - 2007 Paul Mundt | 7 | * Copyright (C) 2004 - 2007 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 060fdd369f09..52350ad0b0a2 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Renesas MX-G (R8A03022BG) Setup | 3 | * Renesas MX-G (R8A03022BG) Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2008, 2009 Paul Mundt | 5 | * Copyright (C) 2008, 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index c1301f68d3cd..b51ed761ae08 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7201 setup | 3 | * SH7201 setup |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk | 5 | * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk |
5 | * Copyright (C) 2009 Paul Mundt | 6 | * Copyright (C) 2009 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 32ec732e28e5..89b3e49fc250 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7203 and SH7263 Setup | 3 | * SH7203 and SH7263 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2007 - 2009 Paul Mundt | 5 | * Copyright (C) 2007 - 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index 8d8d354851ce..36ff3a3139da 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7206 Setup | 3 | * SH7206 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Yoshinori Sato | 5 | * Copyright (C) 2006 Yoshinori Sato |
5 | * Copyright (C) 2009 Paul Mundt | 6 | * Copyright (C) 2009 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c index ab71eab690fd..d199618d877c 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7264 Setup | 3 | * SH7264 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe Ltd | 5 | * Copyright (C) 2012 Renesas Electronics Europe Ltd |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c index c7e81b20967c..9095c960b455 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7269 Setup | 3 | * SH7269 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Electronics Europe Ltd | 5 | * Copyright (C) 2012 Renesas Electronics Europe Ltd |
5 | * Copyright (C) 2012 Phil Edworthy | 6 | * Copyright (C) 2012 Phil Edworthy |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c index 90faa44ca94d..d7765728cadf 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh3.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh3.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh3.c |
3 | * | 4 | * |
@@ -11,10 +12,6 @@ | |||
11 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 12 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
12 | * Copyright (C) 2002, 2003, 2004 Paul Mundt | 13 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
13 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | 14 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | */ | 15 | */ |
19 | #include <linux/init.h> | 16 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c index a8da4a9986b3..4947114af090 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh7705.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh7705.c |
3 | * | 4 | * |
@@ -11,10 +12,6 @@ | |||
11 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 12 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
12 | * Copyright (C) 2002, 2003, 2004 Paul Mundt | 13 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
13 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | 14 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | */ | 15 | */ |
19 | #include <linux/init.h> | 16 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c index a4088e5b2203..17855022c118 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh7706.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh7706.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c | 9 | * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c |
9 | * Copyright (C) 2005 Andriy Skulysh | 10 | * Copyright (C) 2005 Andriy Skulysh |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c index 54a6d4bcc0db..54701bbf7caa 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh7709.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh7709.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c | 9 | * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c |
9 | * Copyright (C) 2005 Paul Mundt | 10 | * Copyright (C) 2005 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c index ce601b2e3976..e60d0bc19cbe 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh7710.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh7710.c |
3 | * | 4 | * |
@@ -11,10 +12,6 @@ | |||
11 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 12 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
12 | * Copyright (C) 2002, 2003, 2004 Paul Mundt | 13 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
13 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | 14 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | */ | 15 | */ |
19 | #include <linux/init.h> | 16 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c index 21438a9a1ae1..5af553f38d3a 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/clock-sh7712.c | 3 | * arch/sh/kernel/cpu/sh3/clock-sh7712.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c | 9 | * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c |
9 | * Copyright (C) 2005 Paul Mundt | 10 | * Copyright (C) 2005 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 262db6ec067b..25eb80905416 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh3/entry.S | 3 | * arch/sh/kernel/cpu/sh3/entry.S |
3 | * | 4 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 5 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2003 - 2012 Paul Mundt | 6 | * Copyright (C) 2003 - 2012 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/sys.h> | 8 | #include <linux/sys.h> |
12 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index 99b4d020179a..ee2113f4215c 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S | |||
@@ -1,14 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh3/ex.S | 3 | * arch/sh/kernel/cpu/sh3/ex.S |
3 | * | 4 | * |
4 | * The SH-3 and SH-4 exception vector table. | 5 | * The SH-3 and SH-4 exception vector table. |
5 | 6 | * | |
6 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 7 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
7 | * Copyright (C) 2003 - 2008 Paul Mundt | 8 | * Copyright (C) 2003 - 2008 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
14 | 11 | ||
diff --git a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c index 26e90a66ebb7..34015e608ee9 100644 --- a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7720 Pinmux | 3 | * SH7720 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index 426e1e1dcedc..5e7ad591ab16 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh3/probe.c | 3 | * arch/sh/kernel/cpu/sh3/probe.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 1999, 2000 Niibe Yutaka | 7 | * Copyright (C) 1999, 2000 Niibe Yutaka |
7 | * Copyright (C) 2002 Paul Mundt | 8 | * Copyright (C) 2002 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c index 53be70b98116..8058c01cf09d 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh3.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Shared SH3 Setup code | 3 | * Shared SH3 Setup code |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index f6e392e0d27e..e19d1ce7b6ad 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7705 Setup | 3 | * SH7705 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2009 Paul Mundt | 5 | * Copyright (C) 2006 - 2009 Paul Mundt |
5 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 6 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 59a88611df55..5c5144bee6bc 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH3 Setup code for SH7706, SH7707, SH7708, SH7709 | 3 | * SH3 Setup code for SH7706, SH7707, SH7708, SH7709 |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * Based on setup-sh7709.c | 8 | * Based on setup-sh7709.c |
8 | * | 9 | * |
9 | * Copyright (C) 2006 Paul Mundt | 10 | * Copyright (C) 2006 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/init.h> | 12 | #include <linux/init.h> |
16 | #include <linux/io.h> | 13 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index ea52410b430d..4776e2495738 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH3 Setup code for SH7710, SH7712 | 3 | * SH3 Setup code for SH7710, SH7712 |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2009 Paul Mundt | 5 | * Copyright (C) 2006 - 2009 Paul Mundt |
5 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 6 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index bf34b4e2e9ef..1d4c34e7b7db 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Setup code for SH7720, SH7721. | 3 | * Setup code for SH7720, SH7721. |
3 | * | 4 | * |
@@ -8,10 +9,6 @@ | |||
8 | * | 9 | * |
9 | * Copyright (C) 2006 Paul Mundt | 10 | * Copyright (C) 2006 Paul Mundt |
10 | * Copyright (C) 2006 Jamie Lenehan | 11 | * Copyright (C) 2006 Jamie Lenehan |
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | 12 | */ |
16 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
17 | #include <linux/init.h> | 14 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh3/swsusp.S b/arch/sh/kernel/cpu/sh3/swsusp.S index 01145426a2b8..dc111c4ccf21 100644 --- a/arch/sh/kernel/cpu/sh3/swsusp.S +++ b/arch/sh/kernel/cpu/sh3/swsusp.S | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh3/swsusp.S | 3 | * arch/sh/kernel/cpu/sh3/swsusp.S |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/sys.h> | 7 | #include <linux/sys.h> |
11 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index 4b5bab5f875f..c1cdef763cb2 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/clock-sh4-202.c | 3 | * arch/sh/kernel/cpu/sh4/clock-sh4-202.c |
3 | * | 4 | * |
4 | * Additional SH4-202 support for the clock framework | 5 | * Additional SH4-202 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2005 Paul Mundt | 7 | * Copyright (C) 2005 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c index 99e5ec8b483d..ee3c5537a9d8 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/clock-sh4.c | 3 | * arch/sh/kernel/cpu/sh4/clock-sh4.c |
3 | * | 4 | * |
@@ -11,10 +12,6 @@ | |||
11 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 12 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
12 | * Copyright (C) 2002, 2003, 2004 Paul Mundt | 13 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
13 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | 14 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | */ | 15 | */ |
19 | #include <linux/init.h> | 16 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 95fd2dcb83da..1ff56e5ba990 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
@@ -1,10 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Save/restore floating point context for signal handlers. | 3 | * Save/restore floating point context for signal handlers. |
3 | * | 4 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | 5 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
9 | * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support) | 6 | * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support) |
10 | * | 7 | * |
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c index fa4f724b295a..db5847bb7330 100644 --- a/arch/sh/kernel/cpu/sh4/perf_event.c +++ b/arch/sh/kernel/cpu/sh4/perf_event.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Performance events support for SH7750-style performance counters | 3 | * Performance events support for SH7750-style performance counters |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Paul Mundt | 5 | * Copyright (C) 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index a521bcf50695..ef4dd6295263 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/probe.c | 3 | * arch/sh/kernel/cpu/sh4/probe.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2001 - 2007 Paul Mundt | 7 | * Copyright (C) 2001 - 2007 Paul Mundt |
7 | * Copyright (C) 2003 Richard Curnow | 8 | * Copyright (C) 2003 Richard Curnow |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/io.h> | 11 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 2623f820d510..a40ef35d101a 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH4-202 Setup | 3 | * SH4-202 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * Copyright (C) 2009 Magnus Damm | 6 | * Copyright (C) 2009 Magnus Damm |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 57d30689204d..b37bda66a532 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup | 3 | * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * Copyright (C) 2006 Jamie Lenehan | 6 | * Copyright (C) 2006 Jamie Lenehan |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index e51fe1734e13..86845da85997 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7760 Setup | 3 | * SH7760 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 4ca78ed71ad2..934ff84844fa 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/sq.c | 3 | * arch/sh/kernel/cpu/sh4/sq.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2001 - 2006 Paul Mundt | 7 | * Copyright (C) 2001 - 2006 Paul Mundt |
7 | * Copyright (C) 2001, 2002 M. R. Brown | 8 | * Copyright (C) 2001, 2002 M. R. Brown |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/cpu.h> | 11 | #include <linux/cpu.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c index a907ee2388bf..32cb5d1fd3b3 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c | |||
@@ -1,22 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7343.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7343.c |
3 | * | 4 | * |
4 | * SH7343 clock framework support | 5 | * SH7343 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 8 | */ |
21 | #include <linux/init.h> | 9 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c index ac9854179dee..aa3444b41e72 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c | |||
@@ -1,22 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7366.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7366.c |
3 | * | 4 | * |
4 | * SH7366 clock framework support | 5 | * SH7366 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 8 | */ |
21 | #include <linux/init.h> | 9 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index d85091ec4b01..38b057703eaa 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c | |||
@@ -1,22 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7722.c |
3 | * | 4 | * |
4 | * SH7722 clock framework support | 5 | * SH7722 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 8 | */ |
21 | #include <linux/init.h> | 9 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index af01664f7b4c..9dc3a987d7cf 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c | |||
@@ -1,22 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7723.c |
3 | * | 4 | * |
4 | * SH7723 clock framework support | 5 | * SH7723 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 8 | */ |
21 | #include <linux/init.h> | 9 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 3194336a3599..2a1f0d847a2e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c | |||
@@ -1,22 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7724.c |
3 | * | 4 | * |
4 | * SH7724 clock framework support | 5 | * SH7724 clock framework support |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 8 | */ |
21 | #include <linux/init.h> | 9 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c index 354dcac5e4cd..c81ee60eddb8 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7734.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7734.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2011, 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> | 7 | * Copyright (C) 2011, 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> |
7 | * Copyright (C) 2011, 2012 Renesas Solutions Corp. | 8 | * Copyright (C) 2011, 2012 Renesas Solutions Corp. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c index b10af2ae9f35..9acb72210fed 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/clock-sh7757.c | 3 | * arch/sh/kernel/cpu/sh4/clock-sh7757.c |
3 | * | 4 | * |
4 | * SH7757 support for the clock framework | 5 | * SH7757 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2009-2010 Renesas Solutions Corp. | 7 | * Copyright (C) 2009-2010 Renesas Solutions Corp. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c index 7707e35aea46..aaff4b96812c 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7763.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7763.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2005 Paul Mundt | 7 | * Copyright (C) 2005 Paul Mundt |
7 | * Copyright (C) 2007 Yoshihiro Shimoda | 8 | * Copyright (C) 2007 Yoshihiro Shimoda |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c index 5d36f334bb0a..f356dfcd17b7 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7770.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7770.c |
3 | * | 4 | * |
4 | * SH7770 support for the clock framework | 5 | * SH7770 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2005 Paul Mundt | 7 | * Copyright (C) 2005 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c index 793dae42a2f8..fc0a3efb53d5 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7780.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7780.c |
3 | * | 4 | * |
4 | * SH7780 support for the clock framework | 5 | * SH7780 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2005 Paul Mundt | 7 | * Copyright (C) 2005 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c index 1aafd5496752..fca351378bbc 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7785.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7785.c |
3 | * | 4 | * |
4 | * SH7785 support for the clock framework | 5 | * SH7785 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2007 - 2010 Paul Mundt | 7 | * Copyright (C) 2007 - 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c index ac3dcfe5d303..f23862df3e8f 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/clock-sh7786.c | 3 | * arch/sh/kernel/cpu/sh4a/clock-sh7786.c |
3 | * | 4 | * |
4 | * SH7786 support for the clock framework | 5 | * SH7786 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2010 Paul Mundt | 7 | * Copyright (C) 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c index b1bdbc3cbc21..6c7b6ab6cab5 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4/clock-shx3.c | 3 | * arch/sh/kernel/cpu/sh4/clock-shx3.c |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2006-2007 Renesas Technology Corp. | 7 | * Copyright (C) 2006-2007 Renesas Technology Corp. |
7 | * Copyright (C) 2006-2007 Renesas Solutions Corp. | 8 | * Copyright (C) 2006-2007 Renesas Solutions Corp. |
8 | * Copyright (C) 2006-2010 Paul Mundt | 9 | * Copyright (C) 2006-2010 Paul Mundt |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c index 78c971486b4e..eea87d25efbb 100644 --- a/arch/sh/kernel/cpu/sh4a/intc-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Shared support for SH-X3 interrupt controllers. | 3 | * Shared support for SH-X3 interrupt controllers. |
3 | * | 4 | * |
4 | * Copyright (C) 2009 - 2010 Paul Mundt | 5 | * Copyright (C) 2009 - 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
11 | #include <linux/io.h> | 8 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c index 84a2c396ceee..3beb8fed3d28 100644 --- a/arch/sh/kernel/cpu/sh4a/perf_event.c +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Performance events support for SH-4A performance counters | 3 | * Performance events support for SH-4A performance counters |
3 | * | 4 | * |
4 | * Copyright (C) 2009, 2010 Paul Mundt | 5 | * Copyright (C) 2009, 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c index 99c637d5bf7a..b67abc0637a4 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7723 Pinmux | 3 | * SH7723 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c index 63be4749e341..b43c3259060b 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7724 Pinmux | 3 | * SH7724 Pinmux |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on SH7723 Pinmux | 9 | * Based on SH7723 Pinmux |
9 | * Copyright (C) 2008 Magnus Damm | 10 | * Copyright (C) 2008 Magnus Damm |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | 12 | ||
16 | #include <linux/bug.h> | 13 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c index ea2db632a764..46256b19619a 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7734 processor support - PFC hardware block | 3 | * SH7734 processor support - PFC hardware block |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
5 | * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> | 6 | * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c index 567745d44221..c92f304cb4ba 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7757 (B0 step) Pinmux | 3 | * SH7757 (B0 step) Pinmux |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on SH7723 Pinmux | 9 | * Based on SH7723 Pinmux |
9 | * Copyright (C) 2008 Magnus Damm | 10 | * Copyright (C) 2008 Magnus Damm |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | 12 | ||
16 | #include <linux/bug.h> | 13 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c index e336ab8b5125..f329de6e758a 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7785 Pinmux | 3 | * SH7785 Pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c index 9a459556a2f7..47e8639f3e71 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7786 Pinmux | 3 | * SH7786 Pinmux |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * Based on SH7785 pinmux | 8 | * Based on SH7785 pinmux |
8 | * | 9 | * |
9 | * Copyright (C) 2008 Magnus Damm | 10 | * Copyright (C) 2008 Magnus Damm |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | 12 | ||
16 | #include <linux/bug.h> | 13 | #include <linux/bug.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c index 444bf25c60fa..6c02f6256467 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH-X3 prototype CPU pinmux | 3 | * SH-X3 prototype CPU pinmux |
3 | * | 4 | * |
4 | * Copyright (C) 2010 Paul Mundt | 5 | * Copyright (C) 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/bug.h> | 7 | #include <linux/bug.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 5788073a7c30..a15e25690b5f 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7343 Setup | 3 | * SH7343 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index 646918713d9a..7bd2776441ba 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7366 Setup | 3 | * SH7366 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Renesas Solutions | 5 | * Copyright (C) 2008 Renesas Solutions |
5 | * | 6 | * |
6 | * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 7 | * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 6b3a26e61abb..1ce65f88f060 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7722 Setup | 3 | * SH7722 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2008 Paul Mundt | 5 | * Copyright (C) 2006 - 2008 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/init.h> | 7 | #include <linux/init.h> |
11 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 1c1b3c469831..edb649950662 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7723 Setup | 3 | * SH7723 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Paul Mundt | 5 | * Copyright (C) 2008 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index c20258b18775..3e9825031d3d 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7724 Setup | 3 | * SH7724 Setup |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Based on SH7723 Setup | 9 | * Based on SH7723 Setup |
9 | * Copyright (C) 2008 Paul Mundt | 10 | * Copyright (C) 2008 Paul Mundt |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
16 | #include <linux/init.h> | 13 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c index 8c0c9da6b5b3..06a91569697a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c | |||
@@ -1,14 +1,11 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/setup-sh7734.c | 3 | * arch/sh/kernel/cpu/sh4a/setup-sh7734.c |
3 | 4 | * | |
4 | * SH7734 Setup | 5 | * SH7734 Setup |
5 | * | 6 | * |
6 | * Copyright (C) 2011,2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> | 7 | * Copyright (C) 2011,2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> |
7 | * Copyright (C) 2011,2012 Renesas Solutions Corp. | 8 | * Copyright (C) 2011,2012 Renesas Solutions Corp. |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index a46a19b49e08..2501ce656511 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7757 Setup | 3 | * SH7757 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2009, 2011 Renesas Solutions Corp. | 5 | * Copyright (C) 2009, 2011 Renesas Solutions Corp. |
5 | * | 6 | * |
6 | * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt | 7 | * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 40e6cda914d3..419c5efe4a17 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7763 Setup | 3 | * SH7763 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * Copyright (C) 2007 Yoshihiro Shimoda | 6 | * Copyright (C) 2007 Yoshihiro Shimoda |
6 | * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu | 7 | * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/platform_device.h> | 9 | #include <linux/platform_device.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 82e3bdf2e1b6..5fb4cf9b58c6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7770 Setup | 3 | * SH7770 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2008 Paul Mundt | 5 | * Copyright (C) 2006 - 2008 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index d90ff67a4633..ab7d6b715865 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7780 Setup | 3 | * SH7780 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2006 Paul Mundt | 5 | * Copyright (C) 2006 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index b0d6f82f2d71..a438da47285d 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7785 Setup | 3 | * SH7785 Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Paul Mundt | 5 | * Copyright (C) 2007 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index 17aac38a6e90..d894165a0ef6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH7786 Setup | 3 | * SH7786 Setup |
3 | * | 4 | * |
@@ -8,10 +9,6 @@ | |||
8 | * Based on SH7785 Setup | 9 | * Based on SH7785 Setup |
9 | * | 10 | * |
10 | * Copyright (C) 2007 Paul Mundt | 11 | * Copyright (C) 2007 Paul Mundt |
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | 12 | */ |
16 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
17 | #include <linux/init.h> | 14 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index ee14d92d840f..14aa4552bc45 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH-X3 Prototype Setup | 3 | * SH-X3 Prototype Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2007 - 2010 Paul Mundt | 5 | * Copyright (C) 2007 - 2010 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 0d3637c494bf..f8a2bec0f260 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH-X3 SMP | 3 | * SH-X3 SMP |
3 | * | 4 | * |
4 | * Copyright (C) 2007 - 2010 Paul Mundt | 5 | * Copyright (C) 2007 - 2010 Paul Mundt |
5 | * Copyright (C) 2007 Magnus Damm | 6 | * Copyright (C) 2007 Magnus Damm |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c index efb2745bcb36..25eacd9c47d1 100644 --- a/arch/sh/kernel/cpu/sh4a/ubc.c +++ b/arch/sh/kernel/cpu/sh4a/ubc.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh4a/ubc.c | 3 | * arch/sh/kernel/cpu/sh4a/ubc.c |
3 | * | 4 | * |
4 | * On-chip UBC support for SH-4A CPUs. | 5 | * On-chip UBC support for SH-4A CPUs. |
5 | * | 6 | * |
6 | * Copyright (C) 2009 - 2010 Paul Mundt | 7 | * Copyright (C) 2009 - 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/err.h> | 10 | #include <linux/err.h> |
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c index c48b93d4c081..43763c26a752 100644 --- a/arch/sh/kernel/cpu/sh5/clock-sh5.c +++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh5/clock-sh5.c | 3 | * arch/sh/kernel/cpu/sh5/clock-sh5.c |
3 | * | 4 | * |
4 | * SH-5 support for the clock framework | 5 | * SH-5 support for the clock framework |
5 | * | 6 | * |
6 | * Copyright (C) 2008 Paul Mundt | 7 | * Copyright (C) 2008 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index 0c8d0377d40b..de68ffdfffbf 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh5/entry.S | 3 | * arch/sh/kernel/cpu/sh5/entry.S |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2004 - 2008 Paul Mundt | 6 | * Copyright (C) 2004 - 2008 Paul Mundt |
6 | * Copyright (C) 2003, 2004 Richard Curnow | 7 | * Copyright (C) 2003, 2004 Richard Curnow |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c index 9f8713aa7184..9218d9ed787e 100644 --- a/arch/sh/kernel/cpu/sh5/fpu.c +++ b/arch/sh/kernel/cpu/sh5/fpu.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh5/fpu.c | 3 | * arch/sh/kernel/cpu/sh5/fpu.c |
3 | * | 4 | * |
@@ -7,10 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Started from SH4 version: | 9 | * Started from SH4 version: |
9 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | 10 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | 11 | */ |
15 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
16 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c index eca427c2f2f3..947250188065 100644 --- a/arch/sh/kernel/cpu/sh5/probe.c +++ b/arch/sh/kernel/cpu/sh5/probe.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh5/probe.c | 3 | * arch/sh/kernel/cpu/sh5/probe.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2000, 2001 Paolo Alberelli | 7 | * Copyright (C) 2000, 2001 Paolo Alberelli |
7 | * Copyright (C) 2003 - 2007 Paul Mundt | 8 | * Copyright (C) 2003 - 2007 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/io.h> | 11 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index 084a9cc99175..41c1673afc0b 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SH5-101/SH5-103 CPU Setup | 3 | * SH5-101/SH5-103 CPU Setup |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Paul Mundt | 5 | * Copyright (C) 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S index 45c351b0f1ba..d1beff755632 100644 --- a/arch/sh/kernel/cpu/sh5/switchto.S +++ b/arch/sh/kernel/cpu/sh5/switchto.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh5/switchto.S | 3 | * arch/sh/kernel/cpu/sh5/switchto.S |
3 | * | 4 | * |
4 | * sh64 context switch | 5 | * sh64 context switch |
5 | * | 6 | * |
6 | * Copyright (C) 2004 Richard Curnow | 7 | * Copyright (C) 2004 Richard Curnow |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | .section .text..SHmedia32,"ax" | 10 | .section .text..SHmedia32,"ax" |
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c index 3a4fed406fc6..3cb0cd9cea29 100644 --- a/arch/sh/kernel/cpu/sh5/unwind.c +++ b/arch/sh/kernel/cpu/sh5/unwind.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/sh5/unwind.c | 3 | * arch/sh/kernel/cpu/sh5/unwind.c |
3 | * | 4 | * |
4 | * Copyright (C) 2004 Paul Mundt | 5 | * Copyright (C) 2004 Paul Mundt |
5 | * Copyright (C) 2004 Richard Curnow | 6 | * Copyright (C) 2004 Richard Curnow |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/kallsyms.h> | 8 | #include <linux/kallsyms.h> |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile index e8a5111e848a..7581d5f03ce1 100644 --- a/arch/sh/kernel/cpu/shmobile/Makefile +++ b/arch/sh/kernel/cpu/shmobile/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Linux/SuperH SH-Mobile backends. | 3 | # Makefile for the Linux/SuperH SH-Mobile backends. |
3 | # | 4 | # |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index c32e66079f7c..dbd2cdec2ddb 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/shmobile/cpuidle.c | 3 | * arch/sh/kernel/cpu/shmobile/cpuidle.c |
3 | * | 4 | * |
4 | * Cpuidle support code for SuperH Mobile | 5 | * Cpuidle support code for SuperH Mobile |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index fba2be5d72e9..ca9945f51e51 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/cpu/shmobile/pm.c | 3 | * arch/sh/kernel/cpu/shmobile/pm.c |
3 | * | 4 | * |
4 | * Power management support code for SuperH Mobile | 5 | * Power management support code for SuperH Mobile |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index e6aac65f5750..f928c0315129 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S | 3 | * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S |
3 | * | 4 | * |
4 | * Sleep mode and Standby modes support for SuperH Mobile | 5 | * Sleep mode and Standby modes support for SuperH Mobile |
5 | * | 6 | * |
6 | * Copyright (C) 2009 Magnus Damm | 7 | * Copyright (C) 2009 Magnus Damm |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/sys.h> | 10 | #include <linux/sys.h> |
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S index 7a1b46fec0f4..ad07527e2a99 100644 --- a/arch/sh/kernel/debugtraps.S +++ b/arch/sh/kernel/debugtraps.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/debugtraps.S | 3 | * arch/sh/kernel/debugtraps.S |
3 | * | 4 | * |
4 | * Debug trap jump tables for SuperH | 5 | * Debug trap jump tables for SuperH |
5 | * | 6 | * |
6 | * Copyright (C) 2006 - 2008 Paul Mundt | 7 | * Copyright (C) 2006 - 2008 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/sys.h> | 9 | #include <linux/sys.h> |
13 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 015fee58014b..defebf1a9c8a 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Disassemble SuperH instructions. | 3 | * Disassemble SuperH instructions. |
3 | * | 4 | * |
4 | * Copyright (C) 1999 kaz Kojima | 5 | * Copyright (C) 1999 kaz Kojima |
5 | * Copyright (C) 2008 Paul Mundt | 6 | * Copyright (C) 2008 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
12 | #include <linux/string.h> | 9 | #include <linux/string.h> |
diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c index a0021eef956b..b17514619b7e 100644 --- a/arch/sh/kernel/dma-coherent.c +++ b/arch/sh/kernel/dma-coherent.c | |||
@@ -1,9 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2004 - 2007 Paul Mundt | 3 | * Copyright (C) 2004 - 2007 Paul Mundt |
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | 4 | */ |
8 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
9 | #include <linux/init.h> | 6 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index b564b1eae4ae..93c6c0e691ee 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | 4 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
4 | * Copyright (C) 2009 Matt Fleming | 5 | * Copyright (C) 2009 Matt Fleming |
5 | * Copyright (C) 2002 - 2012 Paul Mundt | 6 | * Copyright (C) 2002 - 2012 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/kallsyms.h> | 8 | #include <linux/kallsyms.h> |
12 | #include <linux/ftrace.h> | 9 | #include <linux/ftrace.h> |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index bb511e2d9d68..9e1d26c8a0c4 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -1,10 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | 3 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> |
3 | * | 4 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * This is an implementation of a DWARF unwinder. Its main purpose is | 5 | * This is an implementation of a DWARF unwinder. Its main purpose is |
9 | * for generating stacktrace information. Based on the DWARF 3 | 6 | * for generating stacktrace information. Based on the DWARF 3 |
10 | * specification from http://www.dwarfstd.org. | 7 | * specification from http://www.dwarfstd.org. |
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 28cc61216b64..d31f66e82ce5 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S | |||
@@ -1,11 +1,7 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 3 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
3 | * Copyright (C) 2003 - 2008 Paul Mundt | 4 | * Copyright (C) 2003 - 2008 Paul Mundt |
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | */ | 5 | */ |
10 | 6 | ||
11 | ! NOTE: | 7 | ! NOTE: |
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 4e352c3f79e6..4adbd4ade319 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -1,14 +1,11 @@ | |||
1 | /* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $ | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $ | ||
2 | * | 3 | * |
3 | * arch/sh/kernel/head.S | 4 | * arch/sh/kernel/head.S |
4 | * | 5 | * |
5 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 6 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
6 | * Copyright (C) 2010 Matt Fleming | 7 | * Copyright (C) 2010 Matt Fleming |
7 | * | 8 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * Head.S contains the SH exception handlers and startup code. | 9 | * Head.S contains the SH exception handlers and startup code. |
13 | */ | 10 | */ |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S index cca491397a28..67685e1f00e1 100644 --- a/arch/sh/kernel/head_64.S +++ b/arch/sh/kernel/head_64.S | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/head_64.S | 3 | * arch/sh/kernel/head_64.S |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2003, 2004 Paul Mundt | 6 | * Copyright (C) 2003, 2004 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index d9ff3b42da7c..bc96b16288c1 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/hw_breakpoint.c | 3 | * arch/sh/kernel/hw_breakpoint.c |
3 | * | 4 | * |
4 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. | 5 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. |
5 | * | 6 | * |
6 | * Copyright (C) 2009 - 2010 Paul Mundt | 7 | * Copyright (C) 2009 - 2010 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/init.h> | 9 | #include <linux/init.h> |
13 | #include <linux/perf_event.h> | 10 | #include <linux/perf_event.h> |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index be616ee0cf87..c20fc5487e05 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * The idle loop for all SuperH platforms. | 3 | * The idle loop for all SuperH platforms. |
3 | * | 4 | * |
4 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/module.h> | 7 | #include <linux/module.h> |
11 | #include <linux/init.h> | 8 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index 5c51b794ba2a..da22f3b32d30 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/io.c - Machine independent I/O functions. | 3 | * arch/sh/kernel/io.c - Machine independent I/O functions. |
3 | * | 4 | * |
4 | * Copyright (C) 2000 - 2009 Stuart Menefy | 5 | * Copyright (C) 2000 - 2009 Stuart Menefy |
5 | * Copyright (C) 2005 Paul Mundt | 6 | * Copyright (C) 2005 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
12 | #include <linux/pci.h> | 9 | #include <linux/pci.h> |
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 4d4e7a2a774b..bacad6da4fe4 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Trapped io support | 3 | * Trapped io support |
3 | * | 4 | * |
4 | * Copyright (C) 2008 Magnus Damm | 5 | * Copyright (C) 2008 Magnus Damm |
5 | * | 6 | * |
6 | * Intercept io operations by trapping. | 7 | * Intercept io operations by trapping. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
13 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c index 2e8e8b9b9cef..ef9e2c97cbb7 100644 --- a/arch/sh/kernel/iomap.c +++ b/arch/sh/kernel/iomap.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/iomap.c | 3 | * arch/sh/kernel/iomap.c |
3 | * | 4 | * |
4 | * Copyright (C) 2000 Niibe Yutaka | 5 | * Copyright (C) 2000 Niibe Yutaka |
5 | * Copyright (C) 2005 - 2007 Paul Mundt | 6 | * Copyright (C) 2005 - 2007 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
12 | #include <linux/io.h> | 9 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c index cca14ba84a37..34f8cdbbcf0b 100644 --- a/arch/sh/kernel/ioport.c +++ b/arch/sh/kernel/ioport.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/ioport.c | 3 | * arch/sh/kernel/ioport.c |
3 | * | 4 | * |
4 | * Copyright (C) 2000 Niibe Yutaka | 5 | * Copyright (C) 2000 Niibe Yutaka |
5 | * Copyright (C) 2005 - 2007 Paul Mundt | 6 | * Copyright (C) 2005 - 2007 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
12 | #include <linux/io.h> | 9 | #include <linux/io.h> |
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c index e5a755be9129..e09cdc4ada68 100644 --- a/arch/sh/kernel/irq_32.c +++ b/arch/sh/kernel/irq_32.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SHcompact irqflags support | 3 | * SHcompact irqflags support |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2009 Paul Mundt | 5 | * Copyright (C) 2006 - 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/irqflags.h> | 7 | #include <linux/irqflags.h> |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c index 8fc05b997b6d..7a1f50435e33 100644 --- a/arch/sh/kernel/irq_64.c +++ b/arch/sh/kernel/irq_64.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SHmedia irqflags support | 3 | * SHmedia irqflags support |
3 | * | 4 | * |
4 | * Copyright (C) 2006 - 2009 Paul Mundt | 5 | * Copyright (C) 2006 - 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/irqflags.h> | 7 | #include <linux/irqflags.h> |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 4f04c6638a4d..d24bd2d2ffad 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SuperH KGDB support | 3 | * SuperH KGDB support |
3 | * | 4 | * |
4 | * Copyright (C) 2008 - 2012 Paul Mundt | 5 | * Copyright (C) 2008 - 2012 Paul Mundt |
5 | * | 6 | * |
6 | * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. | 7 | * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/kgdb.h> | 9 | #include <linux/kgdb.h> |
13 | #include <linux/kdebug.h> | 10 | #include <linux/kdebug.h> |
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 241e903dd3ee..1f8c0d30567f 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Kernel probes (kprobes) for SuperH | 3 | * Kernel probes (kprobes) for SuperH |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Chris Smith <chris.smith@st.com> | 5 | * Copyright (C) 2007 Chris Smith <chris.smith@st.com> |
5 | * Copyright (C) 2006 Lineo Solutions, Inc. | 6 | * Copyright (C) 2006 Lineo Solutions, Inc. |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/kprobes.h> | 8 | #include <linux/kprobes.h> |
12 | #include <linux/extable.h> | 9 | #include <linux/extable.h> |
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 9fea49f6e667..b9f9f1a5afdc 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -1,12 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * machine_kexec.c - handle transition of Linux booting another kernel | 3 | * machine_kexec.c - handle transition of Linux booting another kernel |
3 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | 4 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> |
4 | * | 5 | * |
5 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | 6 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz |
6 | * LANDISK/sh4 supported by kogiidena | 7 | * LANDISK/sh4 supported by kogiidena |
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | */ | 8 | */ |
11 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
12 | #include <linux/kexec.h> | 10 | #include <linux/kexec.h> |
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index ec05f491c347..beadbbdb4486 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/machvec.c | 3 | * arch/sh/kernel/machvec.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 1999 Niibe Yutaka | 7 | * Copyright (C) 1999 Niibe Yutaka |
7 | * Copyright (C) 2002 - 2007 Paul Mundt | 8 | * Copyright (C) 2002 - 2007 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/init.h> | 10 | #include <linux/init.h> |
14 | #include <linux/string.h> | 11 | #include <linux/string.h> |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index 1b525dedd29a..bbc78d1d618e 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* Kernel module help for SH. | 2 | /* Kernel module help for SH. |
2 | 3 | ||
3 | SHcompact version by Kaz Kojima and Paul Mundt. | 4 | SHcompact version by Kaz Kojima and Paul Mundt. |
@@ -9,20 +10,6 @@ | |||
9 | 10 | ||
10 | Based on the sh version, and on code from the sh64-specific parts of | 11 | Based on the sh version, and on code from the sh64-specific parts of |
11 | modutils, originally written by Richard Curnow and Ben Gaster. | 12 | modutils, originally written by Richard Curnow and Ben Gaster. |
12 | |||
13 | This program is free software; you can redistribute it and/or modify | ||
14 | it under the terms of the GNU General Public License as published by | ||
15 | the Free Software Foundation; either version 2 of the License, or | ||
16 | (at your option) any later version. | ||
17 | |||
18 | This program is distributed in the hope that it will be useful, | ||
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | GNU General Public License for more details. | ||
22 | |||
23 | You should have received a copy of the GNU General Public License | ||
24 | along with this program; if not, write to the Free Software | ||
25 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | 13 | */ |
27 | #include <linux/moduleloader.h> | 14 | #include <linux/moduleloader.h> |
28 | #include <linux/elf.h> | 15 | #include <linux/elf.h> |
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c index 730d928f0d12..11777867c6f5 100644 --- a/arch/sh/kernel/nmi_debug.c +++ b/arch/sh/kernel/nmi_debug.c | |||
@@ -1,9 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2007 Atmel Corporation | 3 | * Copyright (C) 2007 Atmel Corporation |
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | 4 | */ |
8 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
9 | #include <linux/kdebug.h> | 6 | #include <linux/kdebug.h> |
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index fa2c0cd23eaa..6281f2fdf9ca 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Performance event callchain support - SuperH architecture code | 3 | * Performance event callchain support - SuperH architecture code |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Paul Mundt | 5 | * Copyright (C) 2009 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
11 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index ba3269a8304b..445e3ece4c23 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Performance event support framework for SuperH hardware counters. | 3 | * Performance event support framework for SuperH hardware counters. |
3 | * | 4 | * |
@@ -15,10 +16,6 @@ | |||
15 | * | 16 | * |
16 | * ppc: | 17 | * ppc: |
17 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 18 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. |
18 | * | ||
19 | * This file is subject to the terms and conditions of the GNU General Public | ||
20 | * License. See the file "COPYING" in the main directory of this archive | ||
21 | * for more details. | ||
22 | */ | 19 | */ |
23 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
24 | #include <linux/init.h> | 21 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 27fddb56b3e1..a094633874c3 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/process.c | 3 | * arch/sh/kernel/process.c |
3 | * | 4 | * |
@@ -8,10 +9,6 @@ | |||
8 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 9 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
9 | * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC | 10 | * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC |
10 | * Copyright (C) 2002 - 2008 Paul Mundt | 11 | * Copyright (C) 2002 - 2008 Paul Mundt |
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | 12 | */ |
16 | #include <linux/module.h> | 13 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index ee2abe96f9f3..c2844a2e18cd 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/process_64.c | 3 | * arch/sh/kernel/process_64.c |
3 | * | 4 | * |
@@ -12,10 +13,6 @@ | |||
12 | * | 13 | * |
13 | * In turn started from i386 version: | 14 | * In turn started from i386 version: |
14 | * Copyright (C) 1995 Linus Torvalds | 15 | * Copyright (C) 1995 Linus Torvalds |
15 | * | ||
16 | * This file is subject to the terms and conditions of the GNU General Public | ||
17 | * License. See the file "COPYING" in the main directory of this archive | ||
18 | * for more details. | ||
19 | */ | 16 | */ |
20 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
21 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 5fc3ff606210..d5052c30a0e9 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SuperH process tracing | 3 | * SuperH process tracing |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 2002 - 2009 Paul Mundt | 6 | * Copyright (C) 2002 - 2009 Paul Mundt |
6 | * | 7 | * |
7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> | 8 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
14 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 1e0656d9e7af..3390349ff976 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/ptrace_64.c | 3 | * arch/sh/kernel/ptrace_64.c |
3 | * | 4 | * |
@@ -10,10 +11,6 @@ | |||
10 | * Original x86 implementation: | 11 | * Original x86 implementation: |
11 | * By Ross Biro 1/23/92 | 12 | * By Ross Biro 1/23/92 |
12 | * edited by Linus Torvalds | 13 | * edited by Linus Torvalds |
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General Public | ||
15 | * License. See the file "COPYING" in the main directory of this archive | ||
16 | * for more details. | ||
17 | */ | 14 | */ |
18 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
19 | #include <linux/rwsem.h> | 16 | #include <linux/rwsem.h> |
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index fcc9934fb97b..d9bf2b727b42 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S | |||
@@ -1,13 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * relocate_kernel.S - put the kernel image in place to boot | 3 | * relocate_kernel.S - put the kernel image in place to boot |
3 | * 2005.9.17 kogiidena@eggplant.ddo.jp | 4 | * 2005.9.17 kogiidena@eggplant.ddo.jp |
4 | * | 5 | * |
5 | * LANDISK/sh4 is supported. Maybe, SH archtecture works well. | 6 | * LANDISK/sh4 is supported. Maybe, SH archtecture works well. |
6 | * | 7 | * |
7 | * 2009-03-18 Magnus Damm - Added Kexec Jump support | 8 | * 2009-03-18 Magnus Damm - Added Kexec Jump support |
8 | * | ||
9 | * This source code is licensed under the GNU General Public License, | ||
10 | * Version 2. See the file COPYING for more details. | ||
11 | */ | 9 | */ |
12 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
13 | #include <asm/addrspace.h> | 11 | #include <asm/addrspace.h> |
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c index 5124aeb28c3f..8838094c9ff9 100644 --- a/arch/sh/kernel/return_address.c +++ b/arch/sh/kernel/return_address.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/return_address.c | 3 | * arch/sh/kernel/return_address.c |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Matt Fleming | 5 | * Copyright (C) 2009 Matt Fleming |
5 | * Copyright (C) 2009 Paul Mundt | 6 | * Copyright (C) 2009 Paul Mundt |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 9 | #include <linux/module.h> |
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c index fe584e516964..250dbdf3fa74 100644 --- a/arch/sh/kernel/sh_bios.c +++ b/arch/sh/kernel/sh_bios.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * C interface for trapping into the standard LinuxSH BIOS. | 3 | * C interface for trapping into the standard LinuxSH BIOS. |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 1999, 2000 Niibe Yutaka | 6 | * Copyright (C) 1999, 2000 Niibe Yutaka |
6 | * Copyright (C) 2002 M. R. Brown | 7 | * Copyright (C) 2002 M. R. Brown |
7 | * Copyright (C) 2004 - 2010 Paul Mundt | 8 | * Copyright (C) 2004 - 2010 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/module.h> | 10 | #include <linux/module.h> |
14 | #include <linux/console.h> | 11 | #include <linux/console.h> |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 6ee3740e009e..9de17065afb4 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/sh_ksyms_64.c | 3 | * arch/sh/kernel/sh_ksyms_64.c |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/rwsem.h> | 7 | #include <linux/rwsem.h> |
11 | #include <linux/module.h> | 8 | #include <linux/module.h> |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 7b77f1812434..76661dee3c65 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/signal_64.c | 3 | * arch/sh/kernel/signal_64.c |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2003 - 2008 Paul Mundt | 6 | * Copyright (C) 2003 - 2008 Paul Mundt |
6 | * Copyright (C) 2004 Richard Curnow | 7 | * Copyright (C) 2004 Richard Curnow |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/rwsem.h> | 9 | #include <linux/rwsem.h> |
13 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index c483422ea4d0..372acdc9033e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/smp.c | 3 | * arch/sh/kernel/smp.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * Copyright (C) 2002 - 2010 Paul Mundt | 7 | * Copyright (C) 2002 - 2010 Paul Mundt |
7 | * Copyright (C) 2006 - 2007 Akio Idehara | 8 | * Copyright (C) 2006 - 2007 Akio Idehara |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/err.h> | 10 | #include <linux/err.h> |
14 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 7a73d2763e1b..f3cb2cccb262 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/stacktrace.c | 3 | * arch/sh/kernel/stacktrace.c |
3 | * | 4 | * |
4 | * Stack trace management functions | 5 | * Stack trace management functions |
5 | * | 6 | * |
6 | * Copyright (C) 2006 - 2008 Paul Mundt | 7 | * Copyright (C) 2006 - 2008 Paul Mundt |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
13 | #include <linux/sched/debug.h> | 10 | #include <linux/sched/debug.h> |
diff --git a/arch/sh/kernel/swsusp.c b/arch/sh/kernel/swsusp.c index 12b64a0f2f01..0b772d6d714f 100644 --- a/arch/sh/kernel/swsusp.c +++ b/arch/sh/kernel/swsusp.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * swsusp.c - SuperH hibernation support | 3 | * swsusp.c - SuperH hibernation support |
3 | * | 4 | * |
4 | * Copyright (C) 2009 Magnus Damm | 5 | * Copyright (C) 2009 Magnus Damm |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 54978e01bf94..96e9c54a07f5 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S | |||
@@ -1,15 +1,11 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/syscalls.S | 3 | * arch/sh/kernel/syscalls.S |
3 | * | 4 | * |
4 | * System call table for SuperH | 5 | * System call table for SuperH |
5 | * | 6 | * |
6 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 7 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
7 | * Copyright (C) 2003 Paul Mundt | 8 | * Copyright (C) 2003 Paul Mundt |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | * | ||
13 | */ | 9 | */ |
14 | #include <linux/sys.h> | 10 | #include <linux/sys.h> |
15 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index d6a27f7a4c54..1bcb86f0b728 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S | |||
@@ -1,13 +1,10 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/kernel/syscalls_64.S | 3 | * arch/sh/kernel/syscalls_64.S |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2004 - 2007 Paul Mundt | 6 | * Copyright (C) 2004 - 2007 Paul Mundt |
6 | * Copyright (C) 2003, 2004 Richard Curnow | 7 | * Copyright (C) 2003, 2004 Richard Curnow |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/sys.h> | 10 | #include <linux/sys.h> |
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 8a1c6c8ab4ec..e16b2cd269a3 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/time.c | 3 | * arch/sh/kernel/time.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 6 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
6 | * Copyright (C) 2002 - 2009 Paul Mundt | 7 | * Copyright (C) 2002 - 2009 Paul Mundt |
7 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | 8 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | 11 | #include <linux/init.h> |
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c index c82912a61d74..7a989eed3b18 100644 --- a/arch/sh/kernel/topology.c +++ b/arch/sh/kernel/topology.c | |||
@@ -1,11 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/topology.c | 3 | * arch/sh/kernel/topology.c |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Paul Mundt | 5 | * Copyright (C) 2007 Paul Mundt |
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | 6 | */ |
10 | #include <linux/cpu.h> | 7 | #include <linux/cpu.h> |
11 | #include <linux/cpumask.h> | 8 | #include <linux/cpumask.h> |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 60709ad17fc7..f2a18b5fafd8 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * 'traps.c' handles hardware traps and faults after we have saved some | 3 | * 'traps.c' handles hardware traps and faults after we have saved some |
3 | * state in 'entry.S'. | 4 | * state in 'entry.S'. |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2000 Philipp Rumpf | 7 | * Copyright (C) 2000 Philipp Rumpf |
7 | * Copyright (C) 2000 David Howells | 8 | * Copyright (C) 2000 David Howells |
8 | * Copyright (C) 2002 - 2010 Paul Mundt | 9 | * Copyright (C) 2002 - 2010 Paul Mundt |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
15 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 014fb08cf133..c52bda4d2574 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/traps_64.c | 3 | * arch/sh/kernel/traps_64.c |
3 | * | 4 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 5 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2003, 2004 Paul Mundt | 6 | * Copyright (C) 2003, 2004 Paul Mundt |
6 | * Copyright (C) 2003, 2004 Richard Curnow | 7 | * Copyright (C) 2003, 2004 Richard Curnow |
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | 8 | */ |
12 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
13 | #include <linux/sched/debug.h> | 10 | #include <linux/sched/debug.h> |
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index 521b5432471f..7a54b72dd923 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright (C) 2009 Matt Fleming | 3 | * Copyright (C) 2009 Matt Fleming |
3 | * | 4 | * |
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index cc0cc5b4ff18..98494480f048 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/kernel/vsyscall/vsyscall.c | 3 | * arch/sh/kernel/vsyscall/vsyscall.c |
3 | * | 4 | * |
@@ -5,10 +6,6 @@ | |||
5 | * | 6 | * |
6 | * vDSO randomization | 7 | * vDSO randomization |
7 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | 8 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | 9 | */ |
13 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
diff --git a/arch/sh/lib/ashiftrt.S b/arch/sh/lib/ashiftrt.S index 45ce86558f46..0f7145e3c51e 100644 --- a/arch/sh/lib/ashiftrt.S +++ b/arch/sh/lib/ashiftrt.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/ashlsi3.S b/arch/sh/lib/ashlsi3.S index 70a6434945ab..4df4401cdf31 100644 --- a/arch/sh/lib/ashlsi3.S +++ b/arch/sh/lib/ashlsi3.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/ashrsi3.S b/arch/sh/lib/ashrsi3.S index 602599d80209..bf3c4e03e6ff 100644 --- a/arch/sh/lib/ashrsi3.S +++ b/arch/sh/lib/ashrsi3.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S index 356c8ec92893..97b5c2d9fec4 100644 --- a/arch/sh/lib/checksum.S +++ b/arch/sh/lib/checksum.S | |||
@@ -1,4 +1,6 @@ | |||
1 | /* $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $ | 1 | /* SPDX-License-Identifier: GPL-2.0+ |
2 | * | ||
3 | * $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $ | ||
2 | * | 4 | * |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | 5 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket | 6 | * operating system. INET is implemented using the BSD Socket |
@@ -21,11 +23,6 @@ | |||
21 | * converted to pure assembler | 23 | * converted to pure assembler |
22 | * | 24 | * |
23 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | 25 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
24 | * | ||
25 | * This program is free software; you can redistribute it and/or | ||
26 | * modify it under the terms of the GNU General Public License | ||
27 | * as published by the Free Software Foundation; either version | ||
28 | * 2 of the License, or (at your option) any later version. | ||
29 | */ | 26 | */ |
30 | 27 | ||
31 | #include <asm/errno.h> | 28 | #include <asm/errno.h> |
diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c index 88dfe6e396bc..ebcf7c0a7335 100644 --- a/arch/sh/lib/io.c +++ b/arch/sh/lib/io.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * arch/sh/lib/io.c - SH32 optimized I/O routines | 3 | * arch/sh/lib/io.c - SH32 optimized I/O routines |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Provide real functions which expand to whatever the header file defined. | 8 | * Provide real functions which expand to whatever the header file defined. |
8 | * Also definitions of machine independent IO functions. | 9 | * Also definitions of machine independent IO functions. |
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | 10 | */ |
14 | #include <linux/module.h> | 11 | #include <linux/module.h> |
15 | #include <linux/io.h> | 12 | #include <linux/io.h> |
diff --git a/arch/sh/lib/libgcc.h b/arch/sh/lib/libgcc.h index 05909d58e2fe..58ada9e8f1c2 100644 --- a/arch/sh/lib/libgcc.h +++ b/arch/sh/lib/libgcc.h | |||
@@ -1,3 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
1 | #ifndef __ASM_LIBGCC_H | 3 | #ifndef __ASM_LIBGCC_H |
2 | #define __ASM_LIBGCC_H | 4 | #define __ASM_LIBGCC_H |
3 | 5 | ||
diff --git a/arch/sh/lib/lshrsi3.S b/arch/sh/lib/lshrsi3.S index f2a6959f526d..b79b8170061f 100644 --- a/arch/sh/lib/lshrsi3.S +++ b/arch/sh/lib/lshrsi3.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 7a8572f9d58b..c6ca90cc9606 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | ||
2 | * arch/sh/lib/mcount.S | 3 | * arch/sh/lib/mcount.S |
3 | * | 4 | * |
4 | * Copyright (C) 2008, 2009 Paul Mundt | 5 | * Copyright (C) 2008, 2009 Paul Mundt |
5 | * Copyright (C) 2008, 2009 Matt Fleming | 6 | * Copyright (C) 2008, 2009 Matt Fleming |
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | 7 | */ |
11 | #include <asm/ftrace.h> | 8 | #include <asm/ftrace.h> |
12 | #include <asm/thread_info.h> | 9 | #include <asm/thread_info.h> |
diff --git a/arch/sh/lib/movmem.S b/arch/sh/lib/movmem.S index 62075f6bc67c..8ac54d6b38a1 100644 --- a/arch/sh/lib/movmem.S +++ b/arch/sh/lib/movmem.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/udiv_qrnnd.S b/arch/sh/lib/udiv_qrnnd.S index 32b9a36de943..28938daccd6b 100644 --- a/arch/sh/lib/udiv_qrnnd.S +++ b/arch/sh/lib/udiv_qrnnd.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/udivsi3.S b/arch/sh/lib/udivsi3.S index 72157ab5c314..09ed1f9deb2e 100644 --- a/arch/sh/lib/udivsi3.S +++ b/arch/sh/lib/udivsi3.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005 | 4 | 2004, 2005 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/lib/udivsi3_i4i-Os.S b/arch/sh/lib/udivsi3_i4i-Os.S index 4835553e1ea9..fa4e4dff3da1 100644 --- a/arch/sh/lib/udivsi3_i4i-Os.S +++ b/arch/sh/lib/udivsi3_i4i-Os.S | |||
@@ -1,28 +1,7 @@ | |||
1 | /* Copyright (C) 2006 Free Software Foundation, Inc. | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | 2 | * | |
3 | This file is free software; you can redistribute it and/or modify it | 3 | * Copyright (C) 2006 Free Software Foundation, Inc. |
4 | under the terms of the GNU General Public License as published by the | 4 | */ |
5 | Free Software Foundation; either version 2, or (at your option) any | ||
6 | later version. | ||
7 | |||
8 | In addition to the permissions in the GNU General Public License, the | ||
9 | Free Software Foundation gives you unlimited permission to link the | ||
10 | compiled version of this file into combinations with other programs, | ||
11 | and to distribute those combinations without any restriction coming | ||
12 | from the use of this file. (The General Public License restrictions | ||
13 | do apply in other respects; for example, they cover modification of | ||
14 | the file, and distribution when not linked into a combine | ||
15 | executable.) | ||
16 | |||
17 | This file is distributed in the hope that it will be useful, but | ||
18 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
20 | General Public License for more details. | ||
21 | |||
22 | You should have received a copy of the GNU General Public License | ||
23 | along with this program; see the file COPYING. If not, write to | ||
24 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
25 | Boston, MA 02110-1301, USA. */ | ||
26 | 5 | ||
27 | /* Moderately Space-optimized libgcc routines for the Renesas SH / | 6 | /* Moderately Space-optimized libgcc routines for the Renesas SH / |
28 | STMicroelectronics ST40 CPUs. | 7 | STMicroelectronics ST40 CPUs. |
diff --git a/arch/sh/lib/udivsi3_i4i.S b/arch/sh/lib/udivsi3_i4i.S index f1a79d9c5015..6944eb6b4a75 100644 --- a/arch/sh/lib/udivsi3_i4i.S +++ b/arch/sh/lib/udivsi3_i4i.S | |||
@@ -1,30 +1,9 @@ | |||
1 | /* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | 1 | /* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 |
2 | |||
3 | Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, | ||
2 | 2004, 2005, 2006 | 4 | 2004, 2005, 2006 |
3 | Free Software Foundation, Inc. | 5 | Free Software Foundation, Inc. |
4 | 6 | */ | |
5 | This file is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the | ||
7 | Free Software Foundation; either version 2, or (at your option) any | ||
8 | later version. | ||
9 | |||
10 | In addition to the permissions in the GNU General Public License, the | ||
11 | Free Software Foundation gives you unlimited permission to link the | ||
12 | compiled version of this file into combinations with other programs, | ||
13 | and to distribute those combinations without any restriction coming | ||
14 | from the use of this file. (The General Public License restrictions | ||
15 | do apply in other respects; for example, they cover modification of | ||
16 | the file, and distribution when not linked into a combine | ||
17 | executable.) | ||
18 | |||
19 | This file is distributed in the hope that it will be useful, but | ||
20 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; see the file COPYING. If not, write to | ||
26 | the Free Software Foundation, 51 Franklin Street, Fifth Floor, | ||
27 | Boston, MA 02110-1301, USA. */ | ||
28 | 7 | ||
29 | !! libgcc routines for the Renesas / SuperH SH CPUs. | 8 | !! libgcc routines for the Renesas / SuperH SH CPUs. |
30 | !! Contributed by Steve Chamberlain. | 9 | !! Contributed by Steve Chamberlain. |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index c8c13c777162..a8e5c0e00fca 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |||
443 | #endif | 443 | #endif |
444 | 444 | ||
445 | #ifdef CONFIG_MEMORY_HOTREMOVE | 445 | #ifdef CONFIG_MEMORY_HOTREMOVE |
446 | int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 446 | int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) |
447 | { | 447 | { |
448 | unsigned long start_pfn = PFN_DOWN(start); | 448 | unsigned long start_pfn = PFN_DOWN(start); |
449 | unsigned long nr_pages = size >> PAGE_SHIFT; | 449 | unsigned long nr_pages = size >> PAGE_SHIFT; |
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 1067469ba2ea..8d21a83dd289 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c | |||
@@ -51,8 +51,8 @@ void __init mem_init(void) | |||
51 | 51 | ||
52 | /* this will put all low memory onto the freelists */ | 52 | /* this will put all low memory onto the freelists */ |
53 | memblock_free_all(); | 53 | memblock_free_all(); |
54 | max_low_pfn = totalram_pages; | 54 | max_low_pfn = totalram_pages(); |
55 | max_pfn = totalram_pages; | 55 | max_pfn = max_low_pfn; |
56 | mem_init_print_info(NULL); | 56 | mem_init_print_info(NULL); |
57 | kmalloc_ok = 1; | 57 | kmalloc_ok = 1; |
58 | } | 58 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 071b2a6fff85..33051436c864 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -967,7 +967,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) | |||
967 | } | 967 | } |
968 | 968 | ||
969 | extern unsigned long arch_align_stack(unsigned long sp); | 969 | extern unsigned long arch_align_stack(unsigned long sp); |
970 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | 970 | void free_init_pages(const char *what, unsigned long begin, unsigned long end); |
971 | extern void free_kernel_image_pages(void *begin, void *end); | 971 | extern void free_kernel_image_pages(void *begin, void *end); |
972 | 972 | ||
973 | void default_idle(void); | 973 | void default_idle(void); |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 2637ff09d6a0..97f9ada9ceda 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -434,9 +434,10 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, | |||
434 | size_t len, loff_t *ppos) | 434 | size_t len, loff_t *ppos) |
435 | { | 435 | { |
436 | ssize_t ret = -EINVAL; | 436 | ssize_t ret = -EINVAL; |
437 | unsigned long nr_pages = totalram_pages(); | ||
437 | 438 | ||
438 | if ((len >> PAGE_SHIFT) > totalram_pages) { | 439 | if ((len >> PAGE_SHIFT) > nr_pages) { |
439 | pr_err("too much data (max %ld pages)\n", totalram_pages); | 440 | pr_err("too much data (max %ld pages)\n", nr_pages); |
440 | return ret; | 441 | return ret; |
441 | } | 442 | } |
442 | 443 | ||
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index abcb8d00b014..e3cdc85ce5b6 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, | |||
377 | 377 | ||
378 | /* | 378 | /* |
379 | * This is an optimization for KASAN=y case. Since all kasan page tables | 379 | * This is an optimization for KASAN=y case. Since all kasan page tables |
380 | * eventually point to the kasan_zero_page we could call note_page() | 380 | * eventually point to the kasan_early_shadow_page we could call note_page() |
381 | * right away without walking through lower level page tables. This saves | 381 | * right away without walking through lower level page tables. This saves |
382 | * us dozens of seconds (minutes for 5-level config) while checking for | 382 | * us dozens of seconds (minutes for 5-level config) while checking for |
383 | * W+X mapping or reading kernel_page_tables debugfs file. | 383 | * W+X mapping or reading kernel_page_tables debugfs file. |
@@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, | |||
385 | static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, | 385 | static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, |
386 | void *pt) | 386 | void *pt) |
387 | { | 387 | { |
388 | if (__pa(pt) == __pa(kasan_zero_pmd) || | 388 | if (__pa(pt) == __pa(kasan_early_shadow_pmd) || |
389 | (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) || | 389 | (pgtable_l5_enabled() && |
390 | __pa(pt) == __pa(kasan_zero_pud)) { | 390 | __pa(pt) == __pa(kasan_early_shadow_p4d)) || |
391 | pgprotval_t prot = pte_flags(kasan_zero_pte[0]); | 391 | __pa(pt) == __pa(kasan_early_shadow_pud)) { |
392 | pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]); | ||
392 | note_page(m, st, __pgprot(prot), 0, 5); | 393 | note_page(m, st, __pgprot(prot), 0, 5); |
393 | return true; | 394 | return true; |
394 | } | 395 | } |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 427a955a2cf2..f905a2371080 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -742,7 +742,7 @@ int devmem_is_allowed(unsigned long pagenr) | |||
742 | return 1; | 742 | return 1; |
743 | } | 743 | } |
744 | 744 | ||
745 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 745 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) |
746 | { | 746 | { |
747 | unsigned long begin_aligned, end_aligned; | 747 | unsigned long begin_aligned, end_aligned; |
748 | 748 | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 49ecf5ecf6d3..85c94f9a87f8 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -860,7 +860,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |||
860 | } | 860 | } |
861 | 861 | ||
862 | #ifdef CONFIG_MEMORY_HOTREMOVE | 862 | #ifdef CONFIG_MEMORY_HOTREMOVE |
863 | int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 863 | int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) |
864 | { | 864 | { |
865 | unsigned long start_pfn = start >> PAGE_SHIFT; | 865 | unsigned long start_pfn = start >> PAGE_SHIFT; |
866 | unsigned long nr_pages = size >> PAGE_SHIFT; | 866 | unsigned long nr_pages = size >> PAGE_SHIFT; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 484c1b92f078..bccff68e3267 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -1141,7 +1141,8 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |||
1141 | remove_pagetable(start, end, true, NULL); | 1141 | remove_pagetable(start, end, true, NULL); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 1144 | int __ref arch_remove_memory(int nid, u64 start, u64 size, |
1145 | struct vmem_altmap *altmap) | ||
1145 | { | 1146 | { |
1146 | unsigned long start_pfn = start >> PAGE_SHIFT; | 1147 | unsigned long start_pfn = start >> PAGE_SHIFT; |
1147 | unsigned long nr_pages = size >> PAGE_SHIFT; | 1148 | unsigned long nr_pages = size >> PAGE_SHIFT; |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 04a9cf6b034f..462fde83b515 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd, | |||
211 | unsigned long next; | 211 | unsigned long next; |
212 | 212 | ||
213 | if (pgd_none(*pgd)) { | 213 | if (pgd_none(*pgd)) { |
214 | pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); | 214 | pgd_entry = __pgd(_KERNPG_TABLE | |
215 | __pa_nodebug(kasan_early_shadow_p4d)); | ||
215 | set_pgd(pgd, pgd_entry); | 216 | set_pgd(pgd, pgd_entry); |
216 | } | 217 | } |
217 | 218 | ||
@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd, | |||
222 | if (!p4d_none(*p4d)) | 223 | if (!p4d_none(*p4d)) |
223 | continue; | 224 | continue; |
224 | 225 | ||
225 | p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); | 226 | p4d_entry = __p4d(_KERNPG_TABLE | |
227 | __pa_nodebug(kasan_early_shadow_pud)); | ||
226 | set_p4d(p4d, p4d_entry); | 228 | set_p4d(p4d, p4d_entry); |
227 | } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); | 229 | } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); |
228 | } | 230 | } |
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = { | |||
261 | void __init kasan_early_init(void) | 263 | void __init kasan_early_init(void) |
262 | { | 264 | { |
263 | int i; | 265 | int i; |
264 | pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC; | 266 | pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) | |
265 | pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; | 267 | __PAGE_KERNEL | _PAGE_ENC; |
266 | pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; | 268 | pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE; |
267 | p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE; | 269 | pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE; |
270 | p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE; | ||
268 | 271 | ||
269 | /* Mask out unsupported __PAGE_KERNEL bits: */ | 272 | /* Mask out unsupported __PAGE_KERNEL bits: */ |
270 | pte_val &= __default_kernel_pte_mask; | 273 | pte_val &= __default_kernel_pte_mask; |
@@ -273,16 +276,16 @@ void __init kasan_early_init(void) | |||
273 | p4d_val &= __default_kernel_pte_mask; | 276 | p4d_val &= __default_kernel_pte_mask; |
274 | 277 | ||
275 | for (i = 0; i < PTRS_PER_PTE; i++) | 278 | for (i = 0; i < PTRS_PER_PTE; i++) |
276 | kasan_zero_pte[i] = __pte(pte_val); | 279 | kasan_early_shadow_pte[i] = __pte(pte_val); |
277 | 280 | ||
278 | for (i = 0; i < PTRS_PER_PMD; i++) | 281 | for (i = 0; i < PTRS_PER_PMD; i++) |
279 | kasan_zero_pmd[i] = __pmd(pmd_val); | 282 | kasan_early_shadow_pmd[i] = __pmd(pmd_val); |
280 | 283 | ||
281 | for (i = 0; i < PTRS_PER_PUD; i++) | 284 | for (i = 0; i < PTRS_PER_PUD; i++) |
282 | kasan_zero_pud[i] = __pud(pud_val); | 285 | kasan_early_shadow_pud[i] = __pud(pud_val); |
283 | 286 | ||
284 | for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) | 287 | for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) |
285 | kasan_zero_p4d[i] = __p4d(p4d_val); | 288 | kasan_early_shadow_p4d[i] = __p4d(p4d_val); |
286 | 289 | ||
287 | kasan_map_early_shadow(early_top_pgt); | 290 | kasan_map_early_shadow(early_top_pgt); |
288 | kasan_map_early_shadow(init_top_pgt); | 291 | kasan_map_early_shadow(init_top_pgt); |
@@ -326,7 +329,7 @@ void __init kasan_init(void) | |||
326 | 329 | ||
327 | clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); | 330 | clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); |
328 | 331 | ||
329 | kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), | 332 | kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), |
330 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); | 333 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
331 | 334 | ||
332 | for (i = 0; i < E820_MAX_ENTRIES; i++) { | 335 | for (i = 0; i < E820_MAX_ENTRIES; i++) { |
@@ -338,41 +341,41 @@ void __init kasan_init(void) | |||
338 | 341 | ||
339 | shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; | 342 | shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; |
340 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); | 343 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); |
341 | shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, | 344 | shadow_cpu_entry_begin = (void *)round_down( |
342 | PAGE_SIZE); | 345 | (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); |
343 | 346 | ||
344 | shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + | 347 | shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + |
345 | CPU_ENTRY_AREA_MAP_SIZE); | 348 | CPU_ENTRY_AREA_MAP_SIZE); |
346 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); | 349 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); |
347 | shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, | 350 | shadow_cpu_entry_end = (void *)round_up( |
348 | PAGE_SIZE); | 351 | (unsigned long)shadow_cpu_entry_end, PAGE_SIZE); |
349 | 352 | ||
350 | kasan_populate_zero_shadow( | 353 | kasan_populate_early_shadow( |
351 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | 354 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
352 | shadow_cpu_entry_begin); | 355 | shadow_cpu_entry_begin); |
353 | 356 | ||
354 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, | 357 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, |
355 | (unsigned long)shadow_cpu_entry_end, 0); | 358 | (unsigned long)shadow_cpu_entry_end, 0); |
356 | 359 | ||
357 | kasan_populate_zero_shadow(shadow_cpu_entry_end, | 360 | kasan_populate_early_shadow(shadow_cpu_entry_end, |
358 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | 361 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); |
359 | 362 | ||
360 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | 363 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), |
361 | (unsigned long)kasan_mem_to_shadow(_end), | 364 | (unsigned long)kasan_mem_to_shadow(_end), |
362 | early_pfn_to_nid(__pa(_stext))); | 365 | early_pfn_to_nid(__pa(_stext))); |
363 | 366 | ||
364 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | 367 | kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
365 | (void *)KASAN_SHADOW_END); | 368 | (void *)KASAN_SHADOW_END); |
366 | 369 | ||
367 | load_cr3(init_top_pgt); | 370 | load_cr3(init_top_pgt); |
368 | __flush_tlb_all(); | 371 | __flush_tlb_all(); |
369 | 372 | ||
370 | /* | 373 | /* |
371 | * kasan_zero_page has been used as early shadow memory, thus it may | 374 | * kasan_early_shadow_page has been used as early shadow memory, thus |
372 | * contain some garbage. Now we can clear and write protect it, since | 375 | * it may contain some garbage. Now we can clear and write protect it, |
373 | * after the TLB flush no one should write to it. | 376 | * since after the TLB flush no one should write to it. |
374 | */ | 377 | */ |
375 | memset(kasan_zero_page, 0, PAGE_SIZE); | 378 | memset(kasan_early_shadow_page, 0, PAGE_SIZE); |
376 | for (i = 0; i < PTRS_PER_PTE; i++) { | 379 | for (i = 0; i < PTRS_PER_PTE; i++) { |
377 | pte_t pte; | 380 | pte_t pte; |
378 | pgprot_t prot; | 381 | pgprot_t prot; |
@@ -380,8 +383,8 @@ void __init kasan_init(void) | |||
380 | prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC); | 383 | prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC); |
381 | pgprot_val(prot) &= __default_kernel_pte_mask; | 384 | pgprot_val(prot) &= __default_kernel_pte_mask; |
382 | 385 | ||
383 | pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot)); | 386 | pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot)); |
384 | set_pte(&kasan_zero_pte[i], pte); | 387 | set_pte(&kasan_early_shadow_pte[i], pte); |
385 | } | 388 | } |
386 | /* Flush TLBs again to be sure that write protection applied. */ | 389 | /* Flush TLBs again to be sure that write protection applied. */ |
387 | __flush_tlb_all(); | 390 | __flush_tlb_all(); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 59274e2c1ac4..b0284eab14dc 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -794,6 +794,14 @@ int pmd_clear_huge(pmd_t *pmd) | |||
794 | return 0; | 794 | return 0; |
795 | } | 795 | } |
796 | 796 | ||
797 | /* | ||
798 | * Until we support 512GB pages, skip them in the vmap area. | ||
799 | */ | ||
800 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) | ||
801 | { | ||
802 | return 0; | ||
803 | } | ||
804 | |||
797 | #ifdef CONFIG_X86_64 | 805 | #ifdef CONFIG_X86_64 |
798 | /** | 806 | /** |
799 | * pud_free_pmd_page - Clear pud entry and free pmd page. | 807 | * pud_free_pmd_page - Clear pud entry and free pmd page. |
@@ -811,9 +819,6 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) | |||
811 | pte_t *pte; | 819 | pte_t *pte; |
812 | int i; | 820 | int i; |
813 | 821 | ||
814 | if (pud_none(*pud)) | ||
815 | return 1; | ||
816 | |||
817 | pmd = (pmd_t *)pud_page_vaddr(*pud); | 822 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
818 | pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); | 823 | pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); |
819 | if (!pmd_sv) | 824 | if (!pmd_sv) |
@@ -855,9 +860,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) | |||
855 | { | 860 | { |
856 | pte_t *pte; | 861 | pte_t *pte; |
857 | 862 | ||
858 | if (pmd_none(*pmd)) | ||
859 | return 1; | ||
860 | |||
861 | pte = (pte_t *)pmd_page_vaddr(*pmd); | 863 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
862 | pmd_clear(pmd); | 864 | pmd_clear(pmd); |
863 | 865 | ||
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index 6b95ca43aec0..1734cda6bc4a 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c | |||
@@ -24,12 +24,13 @@ void __init kasan_early_init(void) | |||
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | for (i = 0; i < PTRS_PER_PTE; ++i) | 26 | for (i = 0; i < PTRS_PER_PTE; ++i) |
27 | set_pte(kasan_zero_pte + i, | 27 | set_pte(kasan_early_shadow_pte + i, |
28 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); | 28 | mk_pte(virt_to_page(kasan_early_shadow_page), |
29 | PAGE_KERNEL)); | ||
29 | 30 | ||
30 | for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { | 31 | for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { |
31 | BUG_ON(!pmd_none(*pmd)); | 32 | BUG_ON(!pmd_none(*pmd)); |
32 | set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); | 33 | set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); |
33 | } | 34 | } |
34 | early_trap_init(); | 35 | early_trap_init(); |
35 | } | 36 | } |
@@ -80,13 +81,16 @@ void __init kasan_init(void) | |||
80 | populate(kasan_mem_to_shadow((void *)VMALLOC_START), | 81 | populate(kasan_mem_to_shadow((void *)VMALLOC_START), |
81 | kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); | 82 | kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); |
82 | 83 | ||
83 | /* Write protect kasan_zero_page and zero-initialize it again. */ | 84 | /* |
85 | * Write protect kasan_early_shadow_page and zero-initialize it again. | ||
86 | */ | ||
84 | for (i = 0; i < PTRS_PER_PTE; ++i) | 87 | for (i = 0; i < PTRS_PER_PTE; ++i) |
85 | set_pte(kasan_zero_pte + i, | 88 | set_pte(kasan_early_shadow_pte + i, |
86 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); | 89 | mk_pte(virt_to_page(kasan_early_shadow_page), |
90 | PAGE_KERNEL_RO)); | ||
87 | 91 | ||
88 | local_flush_tlb_all(); | 92 | local_flush_tlb_all(); |
89 | memset(kasan_zero_page, 0, PAGE_SIZE); | 93 | memset(kasan_early_shadow_page, 0, PAGE_SIZE); |
90 | 94 | ||
91 | /* At this point kasan is fully initialized. Enable error messages. */ | 95 | /* At this point kasan is fully initialized. Enable error messages. */ |
92 | current->kasan_depth = 0; | 96 | current->kasan_depth = 0; |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 0e5985682642..fb75a6fd4bd9 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -207,15 +207,15 @@ static bool pages_correctly_probed(unsigned long start_pfn) | |||
207 | return false; | 207 | return false; |
208 | 208 | ||
209 | if (!present_section_nr(section_nr)) { | 209 | if (!present_section_nr(section_nr)) { |
210 | pr_warn("section %ld pfn[%lx, %lx) not present", | 210 | pr_warn("section %ld pfn[%lx, %lx) not present\n", |
211 | section_nr, pfn, pfn + PAGES_PER_SECTION); | 211 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
212 | return false; | 212 | return false; |
213 | } else if (!valid_section_nr(section_nr)) { | 213 | } else if (!valid_section_nr(section_nr)) { |
214 | pr_warn("section %ld pfn[%lx, %lx) no valid memmap", | 214 | pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n", |
215 | section_nr, pfn, pfn + PAGES_PER_SECTION); | 215 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
216 | return false; | 216 | return false; |
217 | } else if (online_section_nr(section_nr)) { | 217 | } else if (online_section_nr(section_nr)) { |
218 | pr_warn("section %ld pfn[%lx, %lx) is already online", | 218 | pr_warn("section %ld pfn[%lx, %lx) is already online\n", |
219 | section_nr, pfn, pfn + PAGES_PER_SECTION); | 219 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
220 | return false; | 220 | return false; |
221 | } | 221 | } |
@@ -688,7 +688,7 @@ static int add_memory_block(int base_section_nr) | |||
688 | int i, ret, section_count = 0, section_nr; | 688 | int i, ret, section_count = 0, section_nr; |
689 | 689 | ||
690 | for (i = base_section_nr; | 690 | for (i = base_section_nr; |
691 | (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; | 691 | i < base_section_nr + sections_per_block; |
692 | i++) { | 692 | i++) { |
693 | if (!present_section_nr(i)) | 693 | if (!present_section_nr(i)) |
694 | continue; | 694 | continue; |
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index fcd055457364..1ffc64770643 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig | |||
@@ -15,7 +15,7 @@ config ZRAM | |||
15 | See Documentation/blockdev/zram.txt for more information. | 15 | See Documentation/blockdev/zram.txt for more information. |
16 | 16 | ||
17 | config ZRAM_WRITEBACK | 17 | config ZRAM_WRITEBACK |
18 | bool "Write back incompressible page to backing device" | 18 | bool "Write back incompressible or idle page to backing device" |
19 | depends on ZRAM | 19 | depends on ZRAM |
20 | help | 20 | help |
21 | With incompressible page, there is no memory saving to keep it | 21 | With incompressible page, there is no memory saving to keep it |
@@ -23,6 +23,9 @@ config ZRAM_WRITEBACK | |||
23 | For this feature, admin should set up backing device via | 23 | For this feature, admin should set up backing device via |
24 | /sys/block/zramX/backing_dev. | 24 | /sys/block/zramX/backing_dev. |
25 | 25 | ||
26 | With /sys/block/zramX/{idle,writeback}, application could ask | ||
27 | idle page's writeback to the backing device to save in memory. | ||
28 | |||
26 | See Documentation/blockdev/zram.txt for more information. | 29 | See Documentation/blockdev/zram.txt for more information. |
27 | 30 | ||
28 | config ZRAM_MEMORY_TRACKING | 31 | config ZRAM_MEMORY_TRACKING |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 4879595200e1..33c5cc879f24 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -52,15 +52,23 @@ static unsigned int num_devices = 1; | |||
52 | static size_t huge_class_size; | 52 | static size_t huge_class_size; |
53 | 53 | ||
54 | static void zram_free_page(struct zram *zram, size_t index); | 54 | static void zram_free_page(struct zram *zram, size_t index); |
55 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | ||
56 | u32 index, int offset, struct bio *bio); | ||
57 | |||
58 | |||
59 | static int zram_slot_trylock(struct zram *zram, u32 index) | ||
60 | { | ||
61 | return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); | ||
62 | } | ||
55 | 63 | ||
56 | static void zram_slot_lock(struct zram *zram, u32 index) | 64 | static void zram_slot_lock(struct zram *zram, u32 index) |
57 | { | 65 | { |
58 | bit_spin_lock(ZRAM_LOCK, &zram->table[index].value); | 66 | bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); |
59 | } | 67 | } |
60 | 68 | ||
61 | static void zram_slot_unlock(struct zram *zram, u32 index) | 69 | static void zram_slot_unlock(struct zram *zram, u32 index) |
62 | { | 70 | { |
63 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value); | 71 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); |
64 | } | 72 | } |
65 | 73 | ||
66 | static inline bool init_done(struct zram *zram) | 74 | static inline bool init_done(struct zram *zram) |
@@ -68,13 +76,6 @@ static inline bool init_done(struct zram *zram) | |||
68 | return zram->disksize; | 76 | return zram->disksize; |
69 | } | 77 | } |
70 | 78 | ||
71 | static inline bool zram_allocated(struct zram *zram, u32 index) | ||
72 | { | ||
73 | |||
74 | return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) || | ||
75 | zram->table[index].handle; | ||
76 | } | ||
77 | |||
78 | static inline struct zram *dev_to_zram(struct device *dev) | 79 | static inline struct zram *dev_to_zram(struct device *dev) |
79 | { | 80 | { |
80 | return (struct zram *)dev_to_disk(dev)->private_data; | 81 | return (struct zram *)dev_to_disk(dev)->private_data; |
@@ -94,19 +95,19 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) | |||
94 | static bool zram_test_flag(struct zram *zram, u32 index, | 95 | static bool zram_test_flag(struct zram *zram, u32 index, |
95 | enum zram_pageflags flag) | 96 | enum zram_pageflags flag) |
96 | { | 97 | { |
97 | return zram->table[index].value & BIT(flag); | 98 | return zram->table[index].flags & BIT(flag); |
98 | } | 99 | } |
99 | 100 | ||
100 | static void zram_set_flag(struct zram *zram, u32 index, | 101 | static void zram_set_flag(struct zram *zram, u32 index, |
101 | enum zram_pageflags flag) | 102 | enum zram_pageflags flag) |
102 | { | 103 | { |
103 | zram->table[index].value |= BIT(flag); | 104 | zram->table[index].flags |= BIT(flag); |
104 | } | 105 | } |
105 | 106 | ||
106 | static void zram_clear_flag(struct zram *zram, u32 index, | 107 | static void zram_clear_flag(struct zram *zram, u32 index, |
107 | enum zram_pageflags flag) | 108 | enum zram_pageflags flag) |
108 | { | 109 | { |
109 | zram->table[index].value &= ~BIT(flag); | 110 | zram->table[index].flags &= ~BIT(flag); |
110 | } | 111 | } |
111 | 112 | ||
112 | static inline void zram_set_element(struct zram *zram, u32 index, | 113 | static inline void zram_set_element(struct zram *zram, u32 index, |
@@ -122,15 +123,22 @@ static unsigned long zram_get_element(struct zram *zram, u32 index) | |||
122 | 123 | ||
123 | static size_t zram_get_obj_size(struct zram *zram, u32 index) | 124 | static size_t zram_get_obj_size(struct zram *zram, u32 index) |
124 | { | 125 | { |
125 | return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | 126 | return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); |
126 | } | 127 | } |
127 | 128 | ||
128 | static void zram_set_obj_size(struct zram *zram, | 129 | static void zram_set_obj_size(struct zram *zram, |
129 | u32 index, size_t size) | 130 | u32 index, size_t size) |
130 | { | 131 | { |
131 | unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT; | 132 | unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; |
132 | 133 | ||
133 | zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | 134 | zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; |
135 | } | ||
136 | |||
137 | static inline bool zram_allocated(struct zram *zram, u32 index) | ||
138 | { | ||
139 | return zram_get_obj_size(zram, index) || | ||
140 | zram_test_flag(zram, index, ZRAM_SAME) || | ||
141 | zram_test_flag(zram, index, ZRAM_WB); | ||
134 | } | 142 | } |
135 | 143 | ||
136 | #if PAGE_SIZE != 4096 | 144 | #if PAGE_SIZE != 4096 |
@@ -276,17 +284,90 @@ static ssize_t mem_used_max_store(struct device *dev, | |||
276 | return len; | 284 | return len; |
277 | } | 285 | } |
278 | 286 | ||
287 | static ssize_t idle_store(struct device *dev, | ||
288 | struct device_attribute *attr, const char *buf, size_t len) | ||
289 | { | ||
290 | struct zram *zram = dev_to_zram(dev); | ||
291 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; | ||
292 | int index; | ||
293 | char mode_buf[8]; | ||
294 | ssize_t sz; | ||
295 | |||
296 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); | ||
297 | if (sz <= 0) | ||
298 | return -EINVAL; | ||
299 | |||
300 | /* ignore trailing new line */ | ||
301 | if (mode_buf[sz - 1] == '\n') | ||
302 | mode_buf[sz - 1] = 0x00; | ||
303 | |||
304 | if (strcmp(mode_buf, "all")) | ||
305 | return -EINVAL; | ||
306 | |||
307 | down_read(&zram->init_lock); | ||
308 | if (!init_done(zram)) { | ||
309 | up_read(&zram->init_lock); | ||
310 | return -EINVAL; | ||
311 | } | ||
312 | |||
313 | for (index = 0; index < nr_pages; index++) { | ||
314 | /* | ||
315 | * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. | ||
316 | * See the comment in writeback_store. | ||
317 | */ | ||
318 | zram_slot_lock(zram, index); | ||
319 | if (!zram_allocated(zram, index) || | ||
320 | zram_test_flag(zram, index, ZRAM_UNDER_WB)) | ||
321 | goto next; | ||
322 | zram_set_flag(zram, index, ZRAM_IDLE); | ||
323 | next: | ||
324 | zram_slot_unlock(zram, index); | ||
325 | } | ||
326 | |||
327 | up_read(&zram->init_lock); | ||
328 | |||
329 | return len; | ||
330 | } | ||
331 | |||
279 | #ifdef CONFIG_ZRAM_WRITEBACK | 332 | #ifdef CONFIG_ZRAM_WRITEBACK |
280 | static bool zram_wb_enabled(struct zram *zram) | 333 | static ssize_t writeback_limit_store(struct device *dev, |
334 | struct device_attribute *attr, const char *buf, size_t len) | ||
335 | { | ||
336 | struct zram *zram = dev_to_zram(dev); | ||
337 | u64 val; | ||
338 | ssize_t ret = -EINVAL; | ||
339 | |||
340 | if (kstrtoull(buf, 10, &val)) | ||
341 | return ret; | ||
342 | |||
343 | down_read(&zram->init_lock); | ||
344 | atomic64_set(&zram->stats.bd_wb_limit, val); | ||
345 | if (val == 0) | ||
346 | zram->stop_writeback = false; | ||
347 | up_read(&zram->init_lock); | ||
348 | ret = len; | ||
349 | |||
350 | return ret; | ||
351 | } | ||
352 | |||
353 | static ssize_t writeback_limit_show(struct device *dev, | ||
354 | struct device_attribute *attr, char *buf) | ||
281 | { | 355 | { |
282 | return zram->backing_dev; | 356 | u64 val; |
357 | struct zram *zram = dev_to_zram(dev); | ||
358 | |||
359 | down_read(&zram->init_lock); | ||
360 | val = atomic64_read(&zram->stats.bd_wb_limit); | ||
361 | up_read(&zram->init_lock); | ||
362 | |||
363 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val); | ||
283 | } | 364 | } |
284 | 365 | ||
285 | static void reset_bdev(struct zram *zram) | 366 | static void reset_bdev(struct zram *zram) |
286 | { | 367 | { |
287 | struct block_device *bdev; | 368 | struct block_device *bdev; |
288 | 369 | ||
289 | if (!zram_wb_enabled(zram)) | 370 | if (!zram->backing_dev) |
290 | return; | 371 | return; |
291 | 372 | ||
292 | bdev = zram->bdev; | 373 | bdev = zram->bdev; |
@@ -313,7 +394,7 @@ static ssize_t backing_dev_show(struct device *dev, | |||
313 | ssize_t ret; | 394 | ssize_t ret; |
314 | 395 | ||
315 | down_read(&zram->init_lock); | 396 | down_read(&zram->init_lock); |
316 | if (!zram_wb_enabled(zram)) { | 397 | if (!zram->backing_dev) { |
317 | memcpy(buf, "none\n", 5); | 398 | memcpy(buf, "none\n", 5); |
318 | up_read(&zram->init_lock); | 399 | up_read(&zram->init_lock); |
319 | return 5; | 400 | return 5; |
@@ -382,8 +463,10 @@ static ssize_t backing_dev_store(struct device *dev, | |||
382 | 463 | ||
383 | bdev = bdgrab(I_BDEV(inode)); | 464 | bdev = bdgrab(I_BDEV(inode)); |
384 | err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); | 465 | err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
385 | if (err < 0) | 466 | if (err < 0) { |
467 | bdev = NULL; | ||
386 | goto out; | 468 | goto out; |
469 | } | ||
387 | 470 | ||
388 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; | 471 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
389 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); | 472 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); |
@@ -399,7 +482,6 @@ static ssize_t backing_dev_store(struct device *dev, | |||
399 | goto out; | 482 | goto out; |
400 | 483 | ||
401 | reset_bdev(zram); | 484 | reset_bdev(zram); |
402 | spin_lock_init(&zram->bitmap_lock); | ||
403 | 485 | ||
404 | zram->old_block_size = old_block_size; | 486 | zram->old_block_size = old_block_size; |
405 | zram->bdev = bdev; | 487 | zram->bdev = bdev; |
@@ -441,32 +523,29 @@ out: | |||
441 | return err; | 523 | return err; |
442 | } | 524 | } |
443 | 525 | ||
444 | static unsigned long get_entry_bdev(struct zram *zram) | 526 | static unsigned long alloc_block_bdev(struct zram *zram) |
445 | { | 527 | { |
446 | unsigned long entry; | 528 | unsigned long blk_idx = 1; |
447 | 529 | retry: | |
448 | spin_lock(&zram->bitmap_lock); | ||
449 | /* skip 0 bit to confuse zram.handle = 0 */ | 530 | /* skip 0 bit to confuse zram.handle = 0 */ |
450 | entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1); | 531 | blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); |
451 | if (entry == zram->nr_pages) { | 532 | if (blk_idx == zram->nr_pages) |
452 | spin_unlock(&zram->bitmap_lock); | ||
453 | return 0; | 533 | return 0; |
454 | } | ||
455 | 534 | ||
456 | set_bit(entry, zram->bitmap); | 535 | if (test_and_set_bit(blk_idx, zram->bitmap)) |
457 | spin_unlock(&zram->bitmap_lock); | 536 | goto retry; |
458 | 537 | ||
459 | return entry; | 538 | atomic64_inc(&zram->stats.bd_count); |
539 | return blk_idx; | ||
460 | } | 540 | } |
461 | 541 | ||
462 | static void put_entry_bdev(struct zram *zram, unsigned long entry) | 542 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) |
463 | { | 543 | { |
464 | int was_set; | 544 | int was_set; |
465 | 545 | ||
466 | spin_lock(&zram->bitmap_lock); | 546 | was_set = test_and_clear_bit(blk_idx, zram->bitmap); |
467 | was_set = test_and_clear_bit(entry, zram->bitmap); | ||
468 | spin_unlock(&zram->bitmap_lock); | ||
469 | WARN_ON_ONCE(!was_set); | 547 | WARN_ON_ONCE(!was_set); |
548 | atomic64_dec(&zram->stats.bd_count); | ||
470 | } | 549 | } |
471 | 550 | ||
472 | static void zram_page_end_io(struct bio *bio) | 551 | static void zram_page_end_io(struct bio *bio) |
@@ -509,6 +588,169 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, | |||
509 | return 1; | 588 | return 1; |
510 | } | 589 | } |
511 | 590 | ||
591 | #define HUGE_WRITEBACK 0x1 | ||
592 | #define IDLE_WRITEBACK 0x2 | ||
593 | |||
594 | static ssize_t writeback_store(struct device *dev, | ||
595 | struct device_attribute *attr, const char *buf, size_t len) | ||
596 | { | ||
597 | struct zram *zram = dev_to_zram(dev); | ||
598 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; | ||
599 | unsigned long index; | ||
600 | struct bio bio; | ||
601 | struct bio_vec bio_vec; | ||
602 | struct page *page; | ||
603 | ssize_t ret, sz; | ||
604 | char mode_buf[8]; | ||
605 | unsigned long mode = -1UL; | ||
606 | unsigned long blk_idx = 0; | ||
607 | |||
608 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); | ||
609 | if (sz <= 0) | ||
610 | return -EINVAL; | ||
611 | |||
612 | /* ignore trailing newline */ | ||
613 | if (mode_buf[sz - 1] == '\n') | ||
614 | mode_buf[sz - 1] = 0x00; | ||
615 | |||
616 | if (!strcmp(mode_buf, "idle")) | ||
617 | mode = IDLE_WRITEBACK; | ||
618 | else if (!strcmp(mode_buf, "huge")) | ||
619 | mode = HUGE_WRITEBACK; | ||
620 | |||
621 | if (mode == -1UL) | ||
622 | return -EINVAL; | ||
623 | |||
624 | down_read(&zram->init_lock); | ||
625 | if (!init_done(zram)) { | ||
626 | ret = -EINVAL; | ||
627 | goto release_init_lock; | ||
628 | } | ||
629 | |||
630 | if (!zram->backing_dev) { | ||
631 | ret = -ENODEV; | ||
632 | goto release_init_lock; | ||
633 | } | ||
634 | |||
635 | page = alloc_page(GFP_KERNEL); | ||
636 | if (!page) { | ||
637 | ret = -ENOMEM; | ||
638 | goto release_init_lock; | ||
639 | } | ||
640 | |||
641 | for (index = 0; index < nr_pages; index++) { | ||
642 | struct bio_vec bvec; | ||
643 | |||
644 | bvec.bv_page = page; | ||
645 | bvec.bv_len = PAGE_SIZE; | ||
646 | bvec.bv_offset = 0; | ||
647 | |||
648 | if (zram->stop_writeback) { | ||
649 | ret = -EIO; | ||
650 | break; | ||
651 | } | ||
652 | |||
653 | if (!blk_idx) { | ||
654 | blk_idx = alloc_block_bdev(zram); | ||
655 | if (!blk_idx) { | ||
656 | ret = -ENOSPC; | ||
657 | break; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | zram_slot_lock(zram, index); | ||
662 | if (!zram_allocated(zram, index)) | ||
663 | goto next; | ||
664 | |||
665 | if (zram_test_flag(zram, index, ZRAM_WB) || | ||
666 | zram_test_flag(zram, index, ZRAM_SAME) || | ||
667 | zram_test_flag(zram, index, ZRAM_UNDER_WB)) | ||
668 | goto next; | ||
669 | |||
670 | if ((mode & IDLE_WRITEBACK && | ||
671 | !zram_test_flag(zram, index, ZRAM_IDLE)) && | ||
672 | (mode & HUGE_WRITEBACK && | ||
673 | !zram_test_flag(zram, index, ZRAM_HUGE))) | ||
674 | goto next; | ||
675 | /* | ||
676 | * Clearing ZRAM_UNDER_WB is duty of caller. | ||
677 | * IOW, zram_free_page never clear it. | ||
678 | */ | ||
679 | zram_set_flag(zram, index, ZRAM_UNDER_WB); | ||
680 | /* Need for hugepage writeback racing */ | ||
681 | zram_set_flag(zram, index, ZRAM_IDLE); | ||
682 | zram_slot_unlock(zram, index); | ||
683 | if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { | ||
684 | zram_slot_lock(zram, index); | ||
685 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | ||
686 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
687 | zram_slot_unlock(zram, index); | ||
688 | continue; | ||
689 | } | ||
690 | |||
691 | bio_init(&bio, &bio_vec, 1); | ||
692 | bio_set_dev(&bio, zram->bdev); | ||
693 | bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); | ||
694 | bio.bi_opf = REQ_OP_WRITE | REQ_SYNC; | ||
695 | |||
696 | bio_add_page(&bio, bvec.bv_page, bvec.bv_len, | ||
697 | bvec.bv_offset); | ||
698 | /* | ||
699 | * XXX: A single page IO would be inefficient for write | ||
700 | * but it would be not bad as starter. | ||
701 | */ | ||
702 | ret = submit_bio_wait(&bio); | ||
703 | if (ret) { | ||
704 | zram_slot_lock(zram, index); | ||
705 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | ||
706 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
707 | zram_slot_unlock(zram, index); | ||
708 | continue; | ||
709 | } | ||
710 | |||
711 | atomic64_inc(&zram->stats.bd_writes); | ||
712 | /* | ||
713 | * We released zram_slot_lock so need to check if the slot was | ||
714 | * changed. If there is freeing for the slot, we can catch it | ||
715 | * easily by zram_allocated. | ||
716 | * A subtle case is the slot is freed/reallocated/marked as | ||
717 | * ZRAM_IDLE again. To close the race, idle_store doesn't | ||
718 | * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. | ||
719 | * Thus, we could close the race by checking ZRAM_IDLE bit. | ||
720 | */ | ||
721 | zram_slot_lock(zram, index); | ||
722 | if (!zram_allocated(zram, index) || | ||
723 | !zram_test_flag(zram, index, ZRAM_IDLE)) { | ||
724 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | ||
725 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
726 | goto next; | ||
727 | } | ||
728 | |||
729 | zram_free_page(zram, index); | ||
730 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | ||
731 | zram_set_flag(zram, index, ZRAM_WB); | ||
732 | zram_set_element(zram, index, blk_idx); | ||
733 | blk_idx = 0; | ||
734 | atomic64_inc(&zram->stats.pages_stored); | ||
735 | if (atomic64_add_unless(&zram->stats.bd_wb_limit, | ||
736 | -1 << (PAGE_SHIFT - 12), 0)) { | ||
737 | if (atomic64_read(&zram->stats.bd_wb_limit) == 0) | ||
738 | zram->stop_writeback = true; | ||
739 | } | ||
740 | next: | ||
741 | zram_slot_unlock(zram, index); | ||
742 | } | ||
743 | |||
744 | if (blk_idx) | ||
745 | free_block_bdev(zram, blk_idx); | ||
746 | ret = len; | ||
747 | __free_page(page); | ||
748 | release_init_lock: | ||
749 | up_read(&zram->init_lock); | ||
750 | |||
751 | return ret; | ||
752 | } | ||
753 | |||
512 | struct zram_work { | 754 | struct zram_work { |
513 | struct work_struct work; | 755 | struct work_struct work; |
514 | struct zram *zram; | 756 | struct zram *zram; |
@@ -561,79 +803,21 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, | |||
561 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, | 803 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, |
562 | unsigned long entry, struct bio *parent, bool sync) | 804 | unsigned long entry, struct bio *parent, bool sync) |
563 | { | 805 | { |
806 | atomic64_inc(&zram->stats.bd_reads); | ||
564 | if (sync) | 807 | if (sync) |
565 | return read_from_bdev_sync(zram, bvec, entry, parent); | 808 | return read_from_bdev_sync(zram, bvec, entry, parent); |
566 | else | 809 | else |
567 | return read_from_bdev_async(zram, bvec, entry, parent); | 810 | return read_from_bdev_async(zram, bvec, entry, parent); |
568 | } | 811 | } |
569 | |||
570 | static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, | ||
571 | u32 index, struct bio *parent, | ||
572 | unsigned long *pentry) | ||
573 | { | ||
574 | struct bio *bio; | ||
575 | unsigned long entry; | ||
576 | |||
577 | bio = bio_alloc(GFP_ATOMIC, 1); | ||
578 | if (!bio) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | entry = get_entry_bdev(zram); | ||
582 | if (!entry) { | ||
583 | bio_put(bio); | ||
584 | return -ENOSPC; | ||
585 | } | ||
586 | |||
587 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); | ||
588 | bio_set_dev(bio, zram->bdev); | ||
589 | if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, | ||
590 | bvec->bv_offset)) { | ||
591 | bio_put(bio); | ||
592 | put_entry_bdev(zram, entry); | ||
593 | return -EIO; | ||
594 | } | ||
595 | |||
596 | if (!parent) { | ||
597 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; | ||
598 | bio->bi_end_io = zram_page_end_io; | ||
599 | } else { | ||
600 | bio->bi_opf = parent->bi_opf; | ||
601 | bio_chain(bio, parent); | ||
602 | } | ||
603 | |||
604 | submit_bio(bio); | ||
605 | *pentry = entry; | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static void zram_wb_clear(struct zram *zram, u32 index) | ||
611 | { | ||
612 | unsigned long entry; | ||
613 | |||
614 | zram_clear_flag(zram, index, ZRAM_WB); | ||
615 | entry = zram_get_element(zram, index); | ||
616 | zram_set_element(zram, index, 0); | ||
617 | put_entry_bdev(zram, entry); | ||
618 | } | ||
619 | |||
620 | #else | 812 | #else |
621 | static bool zram_wb_enabled(struct zram *zram) { return false; } | ||
622 | static inline void reset_bdev(struct zram *zram) {}; | 813 | static inline void reset_bdev(struct zram *zram) {}; |
623 | static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, | ||
624 | u32 index, struct bio *parent, | ||
625 | unsigned long *pentry) | ||
626 | |||
627 | { | ||
628 | return -EIO; | ||
629 | } | ||
630 | |||
631 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, | 814 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, |
632 | unsigned long entry, struct bio *parent, bool sync) | 815 | unsigned long entry, struct bio *parent, bool sync) |
633 | { | 816 | { |
634 | return -EIO; | 817 | return -EIO; |
635 | } | 818 | } |
636 | static void zram_wb_clear(struct zram *zram, u32 index) {} | 819 | |
820 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; | ||
637 | #endif | 821 | #endif |
638 | 822 | ||
639 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING | 823 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
@@ -652,14 +836,10 @@ static void zram_debugfs_destroy(void) | |||
652 | 836 | ||
653 | static void zram_accessed(struct zram *zram, u32 index) | 837 | static void zram_accessed(struct zram *zram, u32 index) |
654 | { | 838 | { |
839 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
655 | zram->table[index].ac_time = ktime_get_boottime(); | 840 | zram->table[index].ac_time = ktime_get_boottime(); |
656 | } | 841 | } |
657 | 842 | ||
658 | static void zram_reset_access(struct zram *zram, u32 index) | ||
659 | { | ||
660 | zram->table[index].ac_time = 0; | ||
661 | } | ||
662 | |||
663 | static ssize_t read_block_state(struct file *file, char __user *buf, | 843 | static ssize_t read_block_state(struct file *file, char __user *buf, |
664 | size_t count, loff_t *ppos) | 844 | size_t count, loff_t *ppos) |
665 | { | 845 | { |
@@ -689,12 +869,13 @@ static ssize_t read_block_state(struct file *file, char __user *buf, | |||
689 | 869 | ||
690 | ts = ktime_to_timespec64(zram->table[index].ac_time); | 870 | ts = ktime_to_timespec64(zram->table[index].ac_time); |
691 | copied = snprintf(kbuf + written, count, | 871 | copied = snprintf(kbuf + written, count, |
692 | "%12zd %12lld.%06lu %c%c%c\n", | 872 | "%12zd %12lld.%06lu %c%c%c%c\n", |
693 | index, (s64)ts.tv_sec, | 873 | index, (s64)ts.tv_sec, |
694 | ts.tv_nsec / NSEC_PER_USEC, | 874 | ts.tv_nsec / NSEC_PER_USEC, |
695 | zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', | 875 | zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', |
696 | zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', | 876 | zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', |
697 | zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.'); | 877 | zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', |
878 | zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.'); | ||
698 | 879 | ||
699 | if (count < copied) { | 880 | if (count < copied) { |
700 | zram_slot_unlock(zram, index); | 881 | zram_slot_unlock(zram, index); |
@@ -739,8 +920,10 @@ static void zram_debugfs_unregister(struct zram *zram) | |||
739 | #else | 920 | #else |
740 | static void zram_debugfs_create(void) {}; | 921 | static void zram_debugfs_create(void) {}; |
741 | static void zram_debugfs_destroy(void) {}; | 922 | static void zram_debugfs_destroy(void) {}; |
742 | static void zram_accessed(struct zram *zram, u32 index) {}; | 923 | static void zram_accessed(struct zram *zram, u32 index) |
743 | static void zram_reset_access(struct zram *zram, u32 index) {}; | 924 | { |
925 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
926 | }; | ||
744 | static void zram_debugfs_register(struct zram *zram) {}; | 927 | static void zram_debugfs_register(struct zram *zram) {}; |
745 | static void zram_debugfs_unregister(struct zram *zram) {}; | 928 | static void zram_debugfs_unregister(struct zram *zram) {}; |
746 | #endif | 929 | #endif |
@@ -877,6 +1060,26 @@ static ssize_t mm_stat_show(struct device *dev, | |||
877 | return ret; | 1060 | return ret; |
878 | } | 1061 | } |
879 | 1062 | ||
1063 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
1064 | #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12))) | ||
1065 | static ssize_t bd_stat_show(struct device *dev, | ||
1066 | struct device_attribute *attr, char *buf) | ||
1067 | { | ||
1068 | struct zram *zram = dev_to_zram(dev); | ||
1069 | ssize_t ret; | ||
1070 | |||
1071 | down_read(&zram->init_lock); | ||
1072 | ret = scnprintf(buf, PAGE_SIZE, | ||
1073 | "%8llu %8llu %8llu\n", | ||
1074 | FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), | ||
1075 | FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), | ||
1076 | FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); | ||
1077 | up_read(&zram->init_lock); | ||
1078 | |||
1079 | return ret; | ||
1080 | } | ||
1081 | #endif | ||
1082 | |||
880 | static ssize_t debug_stat_show(struct device *dev, | 1083 | static ssize_t debug_stat_show(struct device *dev, |
881 | struct device_attribute *attr, char *buf) | 1084 | struct device_attribute *attr, char *buf) |
882 | { | 1085 | { |
@@ -886,9 +1089,10 @@ static ssize_t debug_stat_show(struct device *dev, | |||
886 | 1089 | ||
887 | down_read(&zram->init_lock); | 1090 | down_read(&zram->init_lock); |
888 | ret = scnprintf(buf, PAGE_SIZE, | 1091 | ret = scnprintf(buf, PAGE_SIZE, |
889 | "version: %d\n%8llu\n", | 1092 | "version: %d\n%8llu %8llu\n", |
890 | version, | 1093 | version, |
891 | (u64)atomic64_read(&zram->stats.writestall)); | 1094 | (u64)atomic64_read(&zram->stats.writestall), |
1095 | (u64)atomic64_read(&zram->stats.miss_free)); | ||
892 | up_read(&zram->init_lock); | 1096 | up_read(&zram->init_lock); |
893 | 1097 | ||
894 | return ret; | 1098 | return ret; |
@@ -896,6 +1100,9 @@ static ssize_t debug_stat_show(struct device *dev, | |||
896 | 1100 | ||
897 | static DEVICE_ATTR_RO(io_stat); | 1101 | static DEVICE_ATTR_RO(io_stat); |
898 | static DEVICE_ATTR_RO(mm_stat); | 1102 | static DEVICE_ATTR_RO(mm_stat); |
1103 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
1104 | static DEVICE_ATTR_RO(bd_stat); | ||
1105 | #endif | ||
899 | static DEVICE_ATTR_RO(debug_stat); | 1106 | static DEVICE_ATTR_RO(debug_stat); |
900 | 1107 | ||
901 | static void zram_meta_free(struct zram *zram, u64 disksize) | 1108 | static void zram_meta_free(struct zram *zram, u64 disksize) |
@@ -940,17 +1147,21 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
940 | { | 1147 | { |
941 | unsigned long handle; | 1148 | unsigned long handle; |
942 | 1149 | ||
943 | zram_reset_access(zram, index); | 1150 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
1151 | zram->table[index].ac_time = 0; | ||
1152 | #endif | ||
1153 | if (zram_test_flag(zram, index, ZRAM_IDLE)) | ||
1154 | zram_clear_flag(zram, index, ZRAM_IDLE); | ||
944 | 1155 | ||
945 | if (zram_test_flag(zram, index, ZRAM_HUGE)) { | 1156 | if (zram_test_flag(zram, index, ZRAM_HUGE)) { |
946 | zram_clear_flag(zram, index, ZRAM_HUGE); | 1157 | zram_clear_flag(zram, index, ZRAM_HUGE); |
947 | atomic64_dec(&zram->stats.huge_pages); | 1158 | atomic64_dec(&zram->stats.huge_pages); |
948 | } | 1159 | } |
949 | 1160 | ||
950 | if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) { | 1161 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
951 | zram_wb_clear(zram, index); | 1162 | zram_clear_flag(zram, index, ZRAM_WB); |
952 | atomic64_dec(&zram->stats.pages_stored); | 1163 | free_block_bdev(zram, zram_get_element(zram, index)); |
953 | return; | 1164 | goto out; |
954 | } | 1165 | } |
955 | 1166 | ||
956 | /* | 1167 | /* |
@@ -959,10 +1170,8 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
959 | */ | 1170 | */ |
960 | if (zram_test_flag(zram, index, ZRAM_SAME)) { | 1171 | if (zram_test_flag(zram, index, ZRAM_SAME)) { |
961 | zram_clear_flag(zram, index, ZRAM_SAME); | 1172 | zram_clear_flag(zram, index, ZRAM_SAME); |
962 | zram_set_element(zram, index, 0); | ||
963 | atomic64_dec(&zram->stats.same_pages); | 1173 | atomic64_dec(&zram->stats.same_pages); |
964 | atomic64_dec(&zram->stats.pages_stored); | 1174 | goto out; |
965 | return; | ||
966 | } | 1175 | } |
967 | 1176 | ||
968 | handle = zram_get_handle(zram, index); | 1177 | handle = zram_get_handle(zram, index); |
@@ -973,10 +1182,12 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
973 | 1182 | ||
974 | atomic64_sub(zram_get_obj_size(zram, index), | 1183 | atomic64_sub(zram_get_obj_size(zram, index), |
975 | &zram->stats.compr_data_size); | 1184 | &zram->stats.compr_data_size); |
1185 | out: | ||
976 | atomic64_dec(&zram->stats.pages_stored); | 1186 | atomic64_dec(&zram->stats.pages_stored); |
977 | |||
978 | zram_set_handle(zram, index, 0); | 1187 | zram_set_handle(zram, index, 0); |
979 | zram_set_obj_size(zram, index, 0); | 1188 | zram_set_obj_size(zram, index, 0); |
1189 | WARN_ON_ONCE(zram->table[index].flags & | ||
1190 | ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); | ||
980 | } | 1191 | } |
981 | 1192 | ||
982 | static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, | 1193 | static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, |
@@ -987,24 +1198,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, | |||
987 | unsigned int size; | 1198 | unsigned int size; |
988 | void *src, *dst; | 1199 | void *src, *dst; |
989 | 1200 | ||
990 | if (zram_wb_enabled(zram)) { | 1201 | zram_slot_lock(zram, index); |
991 | zram_slot_lock(zram, index); | 1202 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
992 | if (zram_test_flag(zram, index, ZRAM_WB)) { | 1203 | struct bio_vec bvec; |
993 | struct bio_vec bvec; | ||
994 | |||
995 | zram_slot_unlock(zram, index); | ||
996 | 1204 | ||
997 | bvec.bv_page = page; | ||
998 | bvec.bv_len = PAGE_SIZE; | ||
999 | bvec.bv_offset = 0; | ||
1000 | return read_from_bdev(zram, &bvec, | ||
1001 | zram_get_element(zram, index), | ||
1002 | bio, partial_io); | ||
1003 | } | ||
1004 | zram_slot_unlock(zram, index); | 1205 | zram_slot_unlock(zram, index); |
1206 | |||
1207 | bvec.bv_page = page; | ||
1208 | bvec.bv_len = PAGE_SIZE; | ||
1209 | bvec.bv_offset = 0; | ||
1210 | return read_from_bdev(zram, &bvec, | ||
1211 | zram_get_element(zram, index), | ||
1212 | bio, partial_io); | ||
1005 | } | 1213 | } |
1006 | 1214 | ||
1007 | zram_slot_lock(zram, index); | ||
1008 | handle = zram_get_handle(zram, index); | 1215 | handle = zram_get_handle(zram, index); |
1009 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { | 1216 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { |
1010 | unsigned long value; | 1217 | unsigned long value; |
@@ -1089,7 +1296,6 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, | |||
1089 | struct page *page = bvec->bv_page; | 1296 | struct page *page = bvec->bv_page; |
1090 | unsigned long element = 0; | 1297 | unsigned long element = 0; |
1091 | enum zram_pageflags flags = 0; | 1298 | enum zram_pageflags flags = 0; |
1092 | bool allow_wb = true; | ||
1093 | 1299 | ||
1094 | mem = kmap_atomic(page); | 1300 | mem = kmap_atomic(page); |
1095 | if (page_same_filled(mem, &element)) { | 1301 | if (page_same_filled(mem, &element)) { |
@@ -1114,21 +1320,8 @@ compress_again: | |||
1114 | return ret; | 1320 | return ret; |
1115 | } | 1321 | } |
1116 | 1322 | ||
1117 | if (unlikely(comp_len >= huge_class_size)) { | 1323 | if (comp_len >= huge_class_size) |
1118 | comp_len = PAGE_SIZE; | 1324 | comp_len = PAGE_SIZE; |
1119 | if (zram_wb_enabled(zram) && allow_wb) { | ||
1120 | zcomp_stream_put(zram->comp); | ||
1121 | ret = write_to_bdev(zram, bvec, index, bio, &element); | ||
1122 | if (!ret) { | ||
1123 | flags = ZRAM_WB; | ||
1124 | ret = 1; | ||
1125 | goto out; | ||
1126 | } | ||
1127 | allow_wb = false; | ||
1128 | goto compress_again; | ||
1129 | } | ||
1130 | } | ||
1131 | |||
1132 | /* | 1325 | /* |
1133 | * handle allocation has 2 paths: | 1326 | * handle allocation has 2 paths: |
1134 | * a) fast path is executed with preemption disabled (for | 1327 | * a) fast path is executed with preemption disabled (for |
@@ -1400,10 +1593,14 @@ static void zram_slot_free_notify(struct block_device *bdev, | |||
1400 | 1593 | ||
1401 | zram = bdev->bd_disk->private_data; | 1594 | zram = bdev->bd_disk->private_data; |
1402 | 1595 | ||
1403 | zram_slot_lock(zram, index); | 1596 | atomic64_inc(&zram->stats.notify_free); |
1597 | if (!zram_slot_trylock(zram, index)) { | ||
1598 | atomic64_inc(&zram->stats.miss_free); | ||
1599 | return; | ||
1600 | } | ||
1601 | |||
1404 | zram_free_page(zram, index); | 1602 | zram_free_page(zram, index); |
1405 | zram_slot_unlock(zram, index); | 1603 | zram_slot_unlock(zram, index); |
1406 | atomic64_inc(&zram->stats.notify_free); | ||
1407 | } | 1604 | } |
1408 | 1605 | ||
1409 | static int zram_rw_page(struct block_device *bdev, sector_t sector, | 1606 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
@@ -1608,10 +1805,13 @@ static DEVICE_ATTR_RO(initstate); | |||
1608 | static DEVICE_ATTR_WO(reset); | 1805 | static DEVICE_ATTR_WO(reset); |
1609 | static DEVICE_ATTR_WO(mem_limit); | 1806 | static DEVICE_ATTR_WO(mem_limit); |
1610 | static DEVICE_ATTR_WO(mem_used_max); | 1807 | static DEVICE_ATTR_WO(mem_used_max); |
1808 | static DEVICE_ATTR_WO(idle); | ||
1611 | static DEVICE_ATTR_RW(max_comp_streams); | 1809 | static DEVICE_ATTR_RW(max_comp_streams); |
1612 | static DEVICE_ATTR_RW(comp_algorithm); | 1810 | static DEVICE_ATTR_RW(comp_algorithm); |
1613 | #ifdef CONFIG_ZRAM_WRITEBACK | 1811 | #ifdef CONFIG_ZRAM_WRITEBACK |
1614 | static DEVICE_ATTR_RW(backing_dev); | 1812 | static DEVICE_ATTR_RW(backing_dev); |
1813 | static DEVICE_ATTR_WO(writeback); | ||
1814 | static DEVICE_ATTR_RW(writeback_limit); | ||
1615 | #endif | 1815 | #endif |
1616 | 1816 | ||
1617 | static struct attribute *zram_disk_attrs[] = { | 1817 | static struct attribute *zram_disk_attrs[] = { |
@@ -1621,13 +1821,19 @@ static struct attribute *zram_disk_attrs[] = { | |||
1621 | &dev_attr_compact.attr, | 1821 | &dev_attr_compact.attr, |
1622 | &dev_attr_mem_limit.attr, | 1822 | &dev_attr_mem_limit.attr, |
1623 | &dev_attr_mem_used_max.attr, | 1823 | &dev_attr_mem_used_max.attr, |
1824 | &dev_attr_idle.attr, | ||
1624 | &dev_attr_max_comp_streams.attr, | 1825 | &dev_attr_max_comp_streams.attr, |
1625 | &dev_attr_comp_algorithm.attr, | 1826 | &dev_attr_comp_algorithm.attr, |
1626 | #ifdef CONFIG_ZRAM_WRITEBACK | 1827 | #ifdef CONFIG_ZRAM_WRITEBACK |
1627 | &dev_attr_backing_dev.attr, | 1828 | &dev_attr_backing_dev.attr, |
1829 | &dev_attr_writeback.attr, | ||
1830 | &dev_attr_writeback_limit.attr, | ||
1628 | #endif | 1831 | #endif |
1629 | &dev_attr_io_stat.attr, | 1832 | &dev_attr_io_stat.attr, |
1630 | &dev_attr_mm_stat.attr, | 1833 | &dev_attr_mm_stat.attr, |
1834 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
1835 | &dev_attr_bd_stat.attr, | ||
1836 | #endif | ||
1631 | &dev_attr_debug_stat.attr, | 1837 | &dev_attr_debug_stat.attr, |
1632 | NULL, | 1838 | NULL, |
1633 | }; | 1839 | }; |
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 72c8584b6dff..4bd3afd15e83 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | 31 | ||
32 | /* | 32 | /* |
33 | * The lower ZRAM_FLAG_SHIFT bits of table.value is for | 33 | * The lower ZRAM_FLAG_SHIFT bits of table.flags is for |
34 | * object size (excluding header), the higher bits is for | 34 | * object size (excluding header), the higher bits is for |
35 | * zram_pageflags. | 35 | * zram_pageflags. |
36 | * | 36 | * |
@@ -41,13 +41,15 @@ | |||
41 | */ | 41 | */ |
42 | #define ZRAM_FLAG_SHIFT 24 | 42 | #define ZRAM_FLAG_SHIFT 24 |
43 | 43 | ||
44 | /* Flags for zram pages (table[page_no].value) */ | 44 | /* Flags for zram pages (table[page_no].flags) */ |
45 | enum zram_pageflags { | 45 | enum zram_pageflags { |
46 | /* zram slot is locked */ | 46 | /* zram slot is locked */ |
47 | ZRAM_LOCK = ZRAM_FLAG_SHIFT, | 47 | ZRAM_LOCK = ZRAM_FLAG_SHIFT, |
48 | ZRAM_SAME, /* Page consists the same element */ | 48 | ZRAM_SAME, /* Page consists the same element */ |
49 | ZRAM_WB, /* page is stored on backing_device */ | 49 | ZRAM_WB, /* page is stored on backing_device */ |
50 | ZRAM_UNDER_WB, /* page is under writeback */ | ||
50 | ZRAM_HUGE, /* Incompressible page */ | 51 | ZRAM_HUGE, /* Incompressible page */ |
52 | ZRAM_IDLE, /* not accessed page since last idle marking */ | ||
51 | 53 | ||
52 | __NR_ZRAM_PAGEFLAGS, | 54 | __NR_ZRAM_PAGEFLAGS, |
53 | }; | 55 | }; |
@@ -60,7 +62,7 @@ struct zram_table_entry { | |||
60 | unsigned long handle; | 62 | unsigned long handle; |
61 | unsigned long element; | 63 | unsigned long element; |
62 | }; | 64 | }; |
63 | unsigned long value; | 65 | unsigned long flags; |
64 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING | 66 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
65 | ktime_t ac_time; | 67 | ktime_t ac_time; |
66 | #endif | 68 | #endif |
@@ -79,6 +81,13 @@ struct zram_stats { | |||
79 | atomic64_t pages_stored; /* no. of pages currently stored */ | 81 | atomic64_t pages_stored; /* no. of pages currently stored */ |
80 | atomic_long_t max_used_pages; /* no. of maximum pages stored */ | 82 | atomic_long_t max_used_pages; /* no. of maximum pages stored */ |
81 | atomic64_t writestall; /* no. of write slow paths */ | 83 | atomic64_t writestall; /* no. of write slow paths */ |
84 | atomic64_t miss_free; /* no. of missed free */ | ||
85 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
86 | atomic64_t bd_count; /* no. of pages in backing device */ | ||
87 | atomic64_t bd_reads; /* no. of reads from backing device */ | ||
88 | atomic64_t bd_writes; /* no. of writes from backing device */ | ||
89 | atomic64_t bd_wb_limit; /* writeback limit of backing device */ | ||
90 | #endif | ||
82 | }; | 91 | }; |
83 | 92 | ||
84 | struct zram { | 93 | struct zram { |
@@ -104,13 +113,13 @@ struct zram { | |||
104 | * zram is claimed so open request will be failed | 113 | * zram is claimed so open request will be failed |
105 | */ | 114 | */ |
106 | bool claim; /* Protected by bdev->bd_mutex */ | 115 | bool claim; /* Protected by bdev->bd_mutex */ |
107 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
108 | struct file *backing_dev; | 116 | struct file *backing_dev; |
117 | bool stop_writeback; | ||
118 | #ifdef CONFIG_ZRAM_WRITEBACK | ||
109 | struct block_device *bdev; | 119 | struct block_device *bdev; |
110 | unsigned int old_block_size; | 120 | unsigned int old_block_size; |
111 | unsigned long *bitmap; | 121 | unsigned long *bitmap; |
112 | unsigned long nr_pages; | 122 | unsigned long nr_pages; |
113 | spinlock_t bitmap_lock; | ||
114 | #endif | 123 | #endif |
115 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING | 124 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
116 | struct dentry *debugfs_dir; | 125 | struct dentry *debugfs_dir; |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 38ffb281df97..004a3ce8ba72 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
@@ -115,9 +115,9 @@ static int agp_find_max(void) | |||
115 | long memory, index, result; | 115 | long memory, index, result; |
116 | 116 | ||
117 | #if PAGE_SHIFT < 20 | 117 | #if PAGE_SHIFT < 20 |
118 | memory = totalram_pages >> (20 - PAGE_SHIFT); | 118 | memory = totalram_pages() >> (20 - PAGE_SHIFT); |
119 | #else | 119 | #else |
120 | memory = totalram_pages << (PAGE_SHIFT - 20); | 120 | memory = totalram_pages() << (PAGE_SHIFT - 20); |
121 | #endif | 121 | #endif |
122 | index = 1; | 122 | index = 1; |
123 | 123 | ||
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 99e2aace8078..2c1f459c0c63 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c | |||
@@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data) | |||
48 | percpu_ref_exit(ref); | 48 | percpu_ref_exit(ref); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void dax_pmem_percpu_kill(void *data) | 51 | static void dax_pmem_percpu_kill(struct percpu_ref *ref) |
52 | { | 52 | { |
53 | struct percpu_ref *ref = data; | ||
54 | struct dax_pmem *dax_pmem = to_dax_pmem(ref); | 53 | struct dax_pmem *dax_pmem = to_dax_pmem(ref); |
55 | 54 | ||
56 | dev_dbg(dax_pmem->dev, "trace\n"); | 55 | dev_dbg(dax_pmem->dev, "trace\n"); |
@@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev) | |||
112 | } | 111 | } |
113 | 112 | ||
114 | dax_pmem->pgmap.ref = &dax_pmem->ref; | 113 | dax_pmem->pgmap.ref = &dax_pmem->ref; |
114 | dax_pmem->pgmap.kill = dax_pmem_percpu_kill; | ||
115 | addr = devm_memremap_pages(dev, &dax_pmem->pgmap); | 115 | addr = devm_memremap_pages(dev, &dax_pmem->pgmap); |
116 | if (IS_ERR(addr)) { | 116 | if (IS_ERR(addr)) |
117 | devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); | ||
118 | percpu_ref_exit(&dax_pmem->ref); | ||
119 | return PTR_ERR(addr); | 117 | return PTR_ERR(addr); |
120 | } | ||
121 | |||
122 | rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, | ||
123 | &dax_pmem->ref); | ||
124 | if (rc) | ||
125 | return rc; | ||
126 | 118 | ||
127 | /* adjust the dax_region resource to the start of data */ | 119 | /* adjust the dax_region resource to the start of data */ |
128 | memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); | 120 | memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index e55508b39496..3e6823fdd939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | |||
238 | * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change | 238 | * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change |
239 | * | 239 | * |
240 | * @mn: our notifier | 240 | * @mn: our notifier |
241 | * @mm: the mm this callback is about | 241 | * @range: mmu notifier context |
242 | * @start: start of updated range | ||
243 | * @end: end of updated range | ||
244 | * | 242 | * |
245 | * Block for operations on BOs to finish and mark pages as accessed and | 243 | * Block for operations on BOs to finish and mark pages as accessed and |
246 | * potentially dirty. | 244 | * potentially dirty. |
247 | */ | 245 | */ |
248 | static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | 246 | static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, |
249 | struct mm_struct *mm, | 247 | const struct mmu_notifier_range *range) |
250 | unsigned long start, | ||
251 | unsigned long end, | ||
252 | bool blockable) | ||
253 | { | 248 | { |
254 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 249 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
255 | struct interval_tree_node *it; | 250 | struct interval_tree_node *it; |
251 | unsigned long end; | ||
256 | 252 | ||
257 | /* notification is exclusive, but interval is inclusive */ | 253 | /* notification is exclusive, but interval is inclusive */ |
258 | end -= 1; | 254 | end = range->end - 1; |
259 | 255 | ||
260 | /* TODO we should be able to split locking for interval tree and | 256 | /* TODO we should be able to split locking for interval tree and |
261 | * amdgpu_mn_invalidate_node | 257 | * amdgpu_mn_invalidate_node |
262 | */ | 258 | */ |
263 | if (amdgpu_mn_read_lock(amn, blockable)) | 259 | if (amdgpu_mn_read_lock(amn, range->blockable)) |
264 | return -EAGAIN; | 260 | return -EAGAIN; |
265 | 261 | ||
266 | it = interval_tree_iter_first(&amn->objects, start, end); | 262 | it = interval_tree_iter_first(&amn->objects, range->start, end); |
267 | while (it) { | 263 | while (it) { |
268 | struct amdgpu_mn_node *node; | 264 | struct amdgpu_mn_node *node; |
269 | 265 | ||
270 | if (!blockable) { | 266 | if (!range->blockable) { |
271 | amdgpu_mn_read_unlock(amn); | 267 | amdgpu_mn_read_unlock(amn); |
272 | return -EAGAIN; | 268 | return -EAGAIN; |
273 | } | 269 | } |
274 | 270 | ||
275 | node = container_of(it, struct amdgpu_mn_node, it); | 271 | node = container_of(it, struct amdgpu_mn_node, it); |
276 | it = interval_tree_iter_next(it, start, end); | 272 | it = interval_tree_iter_next(it, range->start, end); |
277 | 273 | ||
278 | amdgpu_mn_invalidate_node(node, start, end); | 274 | amdgpu_mn_invalidate_node(node, range->start, end); |
279 | } | 275 | } |
280 | 276 | ||
281 | return 0; | 277 | return 0; |
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | |||
294 | * are restorted in amdgpu_mn_invalidate_range_end_hsa. | 290 | * are restorted in amdgpu_mn_invalidate_range_end_hsa. |
295 | */ | 291 | */ |
296 | static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | 292 | static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, |
297 | struct mm_struct *mm, | 293 | const struct mmu_notifier_range *range) |
298 | unsigned long start, | ||
299 | unsigned long end, | ||
300 | bool blockable) | ||
301 | { | 294 | { |
302 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 295 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
303 | struct interval_tree_node *it; | 296 | struct interval_tree_node *it; |
297 | unsigned long end; | ||
304 | 298 | ||
305 | /* notification is exclusive, but interval is inclusive */ | 299 | /* notification is exclusive, but interval is inclusive */ |
306 | end -= 1; | 300 | end = range->end - 1; |
307 | 301 | ||
308 | if (amdgpu_mn_read_lock(amn, blockable)) | 302 | if (amdgpu_mn_read_lock(amn, range->blockable)) |
309 | return -EAGAIN; | 303 | return -EAGAIN; |
310 | 304 | ||
311 | it = interval_tree_iter_first(&amn->objects, start, end); | 305 | it = interval_tree_iter_first(&amn->objects, range->start, end); |
312 | while (it) { | 306 | while (it) { |
313 | struct amdgpu_mn_node *node; | 307 | struct amdgpu_mn_node *node; |
314 | struct amdgpu_bo *bo; | 308 | struct amdgpu_bo *bo; |
315 | 309 | ||
316 | if (!blockable) { | 310 | if (!range->blockable) { |
317 | amdgpu_mn_read_unlock(amn); | 311 | amdgpu_mn_read_unlock(amn); |
318 | return -EAGAIN; | 312 | return -EAGAIN; |
319 | } | 313 | } |
320 | 314 | ||
321 | node = container_of(it, struct amdgpu_mn_node, it); | 315 | node = container_of(it, struct amdgpu_mn_node, it); |
322 | it = interval_tree_iter_next(it, start, end); | 316 | it = interval_tree_iter_next(it, range->start, end); |
323 | 317 | ||
324 | list_for_each_entry(bo, &node->bos, mn_list) { | 318 | list_for_each_entry(bo, &node->bos, mn_list) { |
325 | struct kgd_mem *mem = bo->kfd_bo; | 319 | struct kgd_mem *mem = bo->kfd_bo; |
326 | 320 | ||
327 | if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, | 321 | if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, |
328 | start, end)) | 322 | range->start, |
329 | amdgpu_amdkfd_evict_userptr(mem, mm); | 323 | end)) |
324 | amdgpu_amdkfd_evict_userptr(mem, range->mm); | ||
330 | } | 325 | } |
331 | } | 326 | } |
332 | 327 | ||
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | |||
344 | * Release the lock again to allow new command submissions. | 339 | * Release the lock again to allow new command submissions. |
345 | */ | 340 | */ |
346 | static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, | 341 | static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, |
347 | struct mm_struct *mm, | 342 | const struct mmu_notifier_range *range) |
348 | unsigned long start, | ||
349 | unsigned long end) | ||
350 | { | 343 | { |
351 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 344 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
352 | 345 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index c02adbbeef2a..b7bc7d7d048f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -853,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
853 | */ | 853 | */ |
854 | pgdat = NODE_DATA(numa_node_id); | 854 | pgdat = NODE_DATA(numa_node_id); |
855 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) | 855 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) |
856 | mem_in_bytes += pgdat->node_zones[zone_type].managed_pages; | 856 | mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); |
857 | mem_in_bytes <<= PAGE_SHIFT; | 857 | mem_in_bytes <<= PAGE_SHIFT; |
858 | 858 | ||
859 | sub_type_hdr->length_low = lower_32_bits(mem_in_bytes); | 859 | sub_type_hdr->length_low = lower_32_bits(mem_in_bytes); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d36a9755ad91..a9de07bb72c8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2559,7 +2559,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
2559 | * If there's no chance of allocating enough pages for the whole | 2559 | * If there's no chance of allocating enough pages for the whole |
2560 | * object, bail early. | 2560 | * object, bail early. |
2561 | */ | 2561 | */ |
2562 | if (page_count > totalram_pages) | 2562 | if (page_count > totalram_pages()) |
2563 | return -ENOMEM; | 2563 | return -ENOMEM; |
2564 | 2564 | ||
2565 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 2565 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 2c9b284036d1..3df77020aada 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo) | |||
113 | } | 113 | } |
114 | 114 | ||
115 | static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, | 115 | static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
116 | struct mm_struct *mm, | 116 | const struct mmu_notifier_range *range) |
117 | unsigned long start, | ||
118 | unsigned long end, | ||
119 | bool blockable) | ||
120 | { | 117 | { |
121 | struct i915_mmu_notifier *mn = | 118 | struct i915_mmu_notifier *mn = |
122 | container_of(_mn, struct i915_mmu_notifier, mn); | 119 | container_of(_mn, struct i915_mmu_notifier, mn); |
123 | struct i915_mmu_object *mo; | 120 | struct i915_mmu_object *mo; |
124 | struct interval_tree_node *it; | 121 | struct interval_tree_node *it; |
125 | LIST_HEAD(cancelled); | 122 | LIST_HEAD(cancelled); |
123 | unsigned long end; | ||
126 | 124 | ||
127 | if (RB_EMPTY_ROOT(&mn->objects.rb_root)) | 125 | if (RB_EMPTY_ROOT(&mn->objects.rb_root)) |
128 | return 0; | 126 | return 0; |
129 | 127 | ||
130 | /* interval ranges are inclusive, but invalidate range is exclusive */ | 128 | /* interval ranges are inclusive, but invalidate range is exclusive */ |
131 | end--; | 129 | end = range->end - 1; |
132 | 130 | ||
133 | spin_lock(&mn->lock); | 131 | spin_lock(&mn->lock); |
134 | it = interval_tree_iter_first(&mn->objects, start, end); | 132 | it = interval_tree_iter_first(&mn->objects, range->start, end); |
135 | while (it) { | 133 | while (it) { |
136 | if (!blockable) { | 134 | if (!range->blockable) { |
137 | spin_unlock(&mn->lock); | 135 | spin_unlock(&mn->lock); |
138 | return -EAGAIN; | 136 | return -EAGAIN; |
139 | } | 137 | } |
@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, | |||
151 | queue_work(mn->wq, &mo->work); | 149 | queue_work(mn->wq, &mo->work); |
152 | 150 | ||
153 | list_add(&mo->link, &cancelled); | 151 | list_add(&mo->link, &cancelled); |
154 | it = interval_tree_iter_next(it, start, end); | 152 | it = interval_tree_iter_next(it, range->start, end); |
155 | } | 153 | } |
156 | list_for_each_entry(mo, &cancelled, link) | 154 | list_for_each_entry(mo, &cancelled, link) |
157 | del_object(mo); | 155 | del_object(mo); |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 69fe86b30fbb..a9ed0ecc94e2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | |||
@@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg) | |||
170 | * This should ensure that we do not run into the oomkiller during | 170 | * This should ensure that we do not run into the oomkiller during |
171 | * the test and take down the machine wilfully. | 171 | * the test and take down the machine wilfully. |
172 | */ | 172 | */ |
173 | limit = totalram_pages << PAGE_SHIFT; | 173 | limit = totalram_pages() << PAGE_SHIFT; |
174 | limit = min(ppgtt->vm.total, limit); | 174 | limit = min(ppgtt->vm.total, limit); |
175 | 175 | ||
176 | /* Check we can allocate the entire range */ | 176 | /* Check we can allocate the entire range */ |
@@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915, | |||
1244 | u64 hole_start, u64 hole_end, | 1244 | u64 hole_start, u64 hole_end, |
1245 | unsigned long end_time)) | 1245 | unsigned long end_time)) |
1246 | { | 1246 | { |
1247 | const u64 limit = totalram_pages << PAGE_SHIFT; | 1247 | const u64 limit = totalram_pages() << PAGE_SHIFT; |
1248 | struct i915_gem_context *ctx; | 1248 | struct i915_gem_context *ctx; |
1249 | struct i915_hw_ppgtt *ppgtt; | 1249 | struct i915_hw_ppgtt *ppgtt; |
1250 | IGT_TIMEOUT(end_time); | 1250 | IGT_TIMEOUT(end_time); |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index f8b35df44c60..b3019505065a 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn, | |||
119 | * unmap them by move them into system domain again. | 119 | * unmap them by move them into system domain again. |
120 | */ | 120 | */ |
121 | static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | 121 | static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, |
122 | struct mm_struct *mm, | 122 | const struct mmu_notifier_range *range) |
123 | unsigned long start, | ||
124 | unsigned long end, | ||
125 | bool blockable) | ||
126 | { | 123 | { |
127 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | 124 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); |
128 | struct ttm_operation_ctx ctx = { false, false }; | 125 | struct ttm_operation_ctx ctx = { false, false }; |
129 | struct interval_tree_node *it; | 126 | struct interval_tree_node *it; |
127 | unsigned long end; | ||
130 | int ret = 0; | 128 | int ret = 0; |
131 | 129 | ||
132 | /* notification is exclusive, but interval is inclusive */ | 130 | /* notification is exclusive, but interval is inclusive */ |
133 | end -= 1; | 131 | end = range->end - 1; |
134 | 132 | ||
135 | /* TODO we should be able to split locking for interval tree and | 133 | /* TODO we should be able to split locking for interval tree and |
136 | * the tear down. | 134 | * the tear down. |
137 | */ | 135 | */ |
138 | if (blockable) | 136 | if (range->blockable) |
139 | mutex_lock(&rmn->lock); | 137 | mutex_lock(&rmn->lock); |
140 | else if (!mutex_trylock(&rmn->lock)) | 138 | else if (!mutex_trylock(&rmn->lock)) |
141 | return -EAGAIN; | 139 | return -EAGAIN; |
142 | 140 | ||
143 | it = interval_tree_iter_first(&rmn->objects, start, end); | 141 | it = interval_tree_iter_first(&rmn->objects, range->start, end); |
144 | while (it) { | 142 | while (it) { |
145 | struct radeon_mn_node *node; | 143 | struct radeon_mn_node *node; |
146 | struct radeon_bo *bo; | 144 | struct radeon_bo *bo; |
147 | long r; | 145 | long r; |
148 | 146 | ||
149 | if (!blockable) { | 147 | if (!range->blockable) { |
150 | ret = -EAGAIN; | 148 | ret = -EAGAIN; |
151 | goto out_unlock; | 149 | goto out_unlock; |
152 | } | 150 | } |
153 | 151 | ||
154 | node = container_of(it, struct radeon_mn_node, it); | 152 | node = container_of(it, struct radeon_mn_node, it); |
155 | it = interval_tree_iter_next(it, start, end); | 153 | it = interval_tree_iter_next(it, range->start, end); |
156 | 154 | ||
157 | list_for_each_entry(bo, &node->bos, mn_list) { | 155 | list_for_each_entry(bo, &node->bos, mn_list) { |
158 | 156 | ||
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 41631512ae97..5301fef16c31 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
@@ -1090,6 +1090,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) | |||
1090 | static unsigned long compute_balloon_floor(void) | 1090 | static unsigned long compute_balloon_floor(void) |
1091 | { | 1091 | { |
1092 | unsigned long min_pages; | 1092 | unsigned long min_pages; |
1093 | unsigned long nr_pages = totalram_pages(); | ||
1093 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) | 1094 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) |
1094 | /* Simple continuous piecewiese linear function: | 1095 | /* Simple continuous piecewiese linear function: |
1095 | * max MiB -> min MiB gradient | 1096 | * max MiB -> min MiB gradient |
@@ -1102,16 +1103,16 @@ static unsigned long compute_balloon_floor(void) | |||
1102 | * 8192 744 (1/16) | 1103 | * 8192 744 (1/16) |
1103 | * 32768 1512 (1/32) | 1104 | * 32768 1512 (1/32) |
1104 | */ | 1105 | */ |
1105 | if (totalram_pages < MB2PAGES(128)) | 1106 | if (nr_pages < MB2PAGES(128)) |
1106 | min_pages = MB2PAGES(8) + (totalram_pages >> 1); | 1107 | min_pages = MB2PAGES(8) + (nr_pages >> 1); |
1107 | else if (totalram_pages < MB2PAGES(512)) | 1108 | else if (nr_pages < MB2PAGES(512)) |
1108 | min_pages = MB2PAGES(40) + (totalram_pages >> 2); | 1109 | min_pages = MB2PAGES(40) + (nr_pages >> 2); |
1109 | else if (totalram_pages < MB2PAGES(2048)) | 1110 | else if (nr_pages < MB2PAGES(2048)) |
1110 | min_pages = MB2PAGES(104) + (totalram_pages >> 3); | 1111 | min_pages = MB2PAGES(104) + (nr_pages >> 3); |
1111 | else if (totalram_pages < MB2PAGES(8192)) | 1112 | else if (nr_pages < MB2PAGES(8192)) |
1112 | min_pages = MB2PAGES(232) + (totalram_pages >> 4); | 1113 | min_pages = MB2PAGES(232) + (nr_pages >> 4); |
1113 | else | 1114 | else |
1114 | min_pages = MB2PAGES(488) + (totalram_pages >> 5); | 1115 | min_pages = MB2PAGES(488) + (nr_pages >> 5); |
1115 | #undef MB2PAGES | 1116 | #undef MB2PAGES |
1116 | return min_pages; | 1117 | return min_pages; |
1117 | } | 1118 | } |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 9608681224e6..a4ec43093cb3 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -146,15 +146,12 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item, | |||
146 | } | 146 | } |
147 | 147 | ||
148 | static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, | 148 | static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, |
149 | struct mm_struct *mm, | 149 | const struct mmu_notifier_range *range) |
150 | unsigned long start, | ||
151 | unsigned long end, | ||
152 | bool blockable) | ||
153 | { | 150 | { |
154 | struct ib_ucontext_per_mm *per_mm = | 151 | struct ib_ucontext_per_mm *per_mm = |
155 | container_of(mn, struct ib_ucontext_per_mm, mn); | 152 | container_of(mn, struct ib_ucontext_per_mm, mn); |
156 | 153 | ||
157 | if (blockable) | 154 | if (range->blockable) |
158 | down_read(&per_mm->umem_rwsem); | 155 | down_read(&per_mm->umem_rwsem); |
159 | else if (!down_read_trylock(&per_mm->umem_rwsem)) | 156 | else if (!down_read_trylock(&per_mm->umem_rwsem)) |
160 | return -EAGAIN; | 157 | return -EAGAIN; |
@@ -169,9 +166,10 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, | |||
169 | return 0; | 166 | return 0; |
170 | } | 167 | } |
171 | 168 | ||
172 | return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end, | 169 | return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, |
170 | range->end, | ||
173 | invalidate_range_start_trampoline, | 171 | invalidate_range_start_trampoline, |
174 | blockable, NULL); | 172 | range->blockable, NULL); |
175 | } | 173 | } |
176 | 174 | ||
177 | static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, | 175 | static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, |
@@ -182,9 +180,7 @@ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, | |||
182 | } | 180 | } |
183 | 181 | ||
184 | static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, | 182 | static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, |
185 | struct mm_struct *mm, | 183 | const struct mmu_notifier_range *range) |
186 | unsigned long start, | ||
187 | unsigned long end) | ||
188 | { | 184 | { |
189 | struct ib_ucontext_per_mm *per_mm = | 185 | struct ib_ucontext_per_mm *per_mm = |
190 | container_of(mn, struct ib_ucontext_per_mm, mn); | 186 | container_of(mn, struct ib_ucontext_per_mm, mn); |
@@ -192,8 +188,8 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, | |||
192 | if (unlikely(!per_mm->active)) | 188 | if (unlikely(!per_mm->active)) |
193 | return; | 189 | return; |
194 | 190 | ||
195 | rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, | 191 | rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, |
196 | end, | 192 | range->end, |
197 | invalidate_range_end_trampoline, true, NULL); | 193 | invalidate_range_end_trampoline, true, NULL); |
198 | up_read(&per_mm->umem_rwsem); | 194 | up_read(&per_mm->umem_rwsem); |
199 | } | 195 | } |
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 475b769e120c..14d2a90964c3 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c | |||
@@ -68,8 +68,7 @@ struct mmu_rb_handler { | |||
68 | static unsigned long mmu_node_start(struct mmu_rb_node *); | 68 | static unsigned long mmu_node_start(struct mmu_rb_node *); |
69 | static unsigned long mmu_node_last(struct mmu_rb_node *); | 69 | static unsigned long mmu_node_last(struct mmu_rb_node *); |
70 | static int mmu_notifier_range_start(struct mmu_notifier *, | 70 | static int mmu_notifier_range_start(struct mmu_notifier *, |
71 | struct mm_struct *, | 71 | const struct mmu_notifier_range *); |
72 | unsigned long, unsigned long, bool); | ||
73 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, | 72 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, |
74 | unsigned long, unsigned long); | 73 | unsigned long, unsigned long); |
75 | static void do_remove(struct mmu_rb_handler *handler, | 74 | static void do_remove(struct mmu_rb_handler *handler, |
@@ -284,10 +283,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, | |||
284 | } | 283 | } |
285 | 284 | ||
286 | static int mmu_notifier_range_start(struct mmu_notifier *mn, | 285 | static int mmu_notifier_range_start(struct mmu_notifier *mn, |
287 | struct mm_struct *mm, | 286 | const struct mmu_notifier_range *range) |
288 | unsigned long start, | ||
289 | unsigned long end, | ||
290 | bool blockable) | ||
291 | { | 287 | { |
292 | struct mmu_rb_handler *handler = | 288 | struct mmu_rb_handler *handler = |
293 | container_of(mn, struct mmu_rb_handler, mn); | 289 | container_of(mn, struct mmu_rb_handler, mn); |
@@ -297,10 +293,11 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn, | |||
297 | bool added = false; | 293 | bool added = false; |
298 | 294 | ||
299 | spin_lock_irqsave(&handler->lock, flags); | 295 | spin_lock_irqsave(&handler->lock, flags); |
300 | for (node = __mmu_int_rb_iter_first(root, start, end - 1); | 296 | for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); |
301 | node; node = ptr) { | 297 | node; node = ptr) { |
302 | /* Guard against node removal. */ | 298 | /* Guard against node removal. */ |
303 | ptr = __mmu_int_rb_iter_next(node, start, end - 1); | 299 | ptr = __mmu_int_rb_iter_next(node, range->start, |
300 | range->end - 1); | ||
304 | trace_hfi1_mmu_mem_invalidate(node->addr, node->len); | 301 | trace_hfi1_mmu_mem_invalidate(node->addr, node->len); |
305 | if (handler->ops->invalidate(handler->ops_arg, node)) { | 302 | if (handler->ops->invalidate(handler->ops_arg, node)) { |
306 | __mmu_int_rb_remove(node, root); | 303 | __mmu_int_rb_remove(node, root); |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 0e9fcceaefd2..1ecef76225a1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1887,7 +1887,7 @@ static int __init dm_bufio_init(void) | |||
1887 | dm_bufio_allocated_vmalloc = 0; | 1887 | dm_bufio_allocated_vmalloc = 0; |
1888 | dm_bufio_current_allocated = 0; | 1888 | dm_bufio_current_allocated = 0; |
1889 | 1889 | ||
1890 | mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, | 1890 | mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), |
1891 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; | 1891 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
1892 | 1892 | ||
1893 | if (mem > ULONG_MAX) | 1893 | if (mem > ULONG_MAX) |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1ea73ace9b9e..0ff22159a0ca 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2167,7 +2167,7 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
2167 | 2167 | ||
2168 | static void crypt_calculate_pages_per_client(void) | 2168 | static void crypt_calculate_pages_per_client(void) |
2169 | { | 2169 | { |
2170 | unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100; | 2170 | unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; |
2171 | 2171 | ||
2172 | if (!dm_crypt_clients_n) | 2172 | if (!dm_crypt_clients_n) |
2173 | return; | 2173 | return; |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 2b27abfa428d..457200ca6287 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -2843,7 +2843,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
2843 | journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, | 2843 | journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, |
2844 | PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); | 2844 | PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); |
2845 | journal_desc_size = journal_pages * sizeof(struct page_list); | 2845 | journal_desc_size = journal_pages * sizeof(struct page_list); |
2846 | if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { | 2846 | if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) { |
2847 | *error = "Journal doesn't fit into memory"; | 2847 | *error = "Journal doesn't fit into memory"; |
2848 | r = -ENOMEM; | 2848 | r = -ENOMEM; |
2849 | goto bad; | 2849 | goto bad; |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 21de30b4e2a1..45b92a3d9d8e 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -85,7 +85,7 @@ static bool __check_shared_memory(size_t alloc_size) | |||
85 | a = shared_memory_amount + alloc_size; | 85 | a = shared_memory_amount + alloc_size; |
86 | if (a < shared_memory_amount) | 86 | if (a < shared_memory_amount) |
87 | return false; | 87 | return false; |
88 | if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR) | 88 | if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR) |
89 | return false; | 89 | return false; |
90 | #ifdef CONFIG_MMU | 90 | #ifdef CONFIG_MMU |
91 | if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) | 91 | if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) |
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index 616f78b24a79..b6602490a247 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c | |||
@@ -855,7 +855,7 @@ static int mtk_vpu_probe(struct platform_device *pdev) | |||
855 | /* Set PTCM to 96K and DTCM to 32K */ | 855 | /* Set PTCM to 96K and DTCM to 32K */ |
856 | vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG); | 856 | vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG); |
857 | 857 | ||
858 | vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT)); | 858 | vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT)); |
859 | dev_info(dev, "4GB mode %u\n", vpu->enable_4GB); | 859 | dev_info(dev, "4GB mode %u\n", vpu->enable_4GB); |
860 | 860 | ||
861 | if (vpu->enable_4GB) { | 861 | if (vpu->enable_4GB) { |
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c index 18b8ed57c4ac..e0d97044d0e9 100644 --- a/drivers/misc/mic/scif/scif_dma.c +++ b/drivers/misc/mic/scif/scif_dma.c | |||
@@ -201,23 +201,18 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn, | |||
201 | } | 201 | } |
202 | 202 | ||
203 | static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 203 | static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
204 | struct mm_struct *mm, | 204 | const struct mmu_notifier_range *range) |
205 | unsigned long start, | ||
206 | unsigned long end, | ||
207 | bool blockable) | ||
208 | { | 205 | { |
209 | struct scif_mmu_notif *mmn; | 206 | struct scif_mmu_notif *mmn; |
210 | 207 | ||
211 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | 208 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); |
212 | scif_rma_destroy_tcw(mmn, start, end - start); | 209 | scif_rma_destroy_tcw(mmn, range->start, range->end - range->start); |
213 | 210 | ||
214 | return 0; | 211 | return 0; |
215 | } | 212 | } |
216 | 213 | ||
217 | static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | 214 | static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
218 | struct mm_struct *mm, | 215 | const struct mmu_notifier_range *range) |
219 | unsigned long start, | ||
220 | unsigned long end) | ||
221 | { | 216 | { |
222 | /* | 217 | /* |
223 | * Nothing to do here, everything needed was done in | 218 | * Nothing to do here, everything needed was done in |
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index 03b49d52092e..ca2032afe035 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c | |||
@@ -220,9 +220,7 @@ void gru_flush_all_tlb(struct gru_state *gru) | |||
220 | * MMUOPS notifier callout functions | 220 | * MMUOPS notifier callout functions |
221 | */ | 221 | */ |
222 | static int gru_invalidate_range_start(struct mmu_notifier *mn, | 222 | static int gru_invalidate_range_start(struct mmu_notifier *mn, |
223 | struct mm_struct *mm, | 223 | const struct mmu_notifier_range *range) |
224 | unsigned long start, unsigned long end, | ||
225 | bool blockable) | ||
226 | { | 224 | { |
227 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, | 225 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, |
228 | ms_notifier); | 226 | ms_notifier); |
@@ -230,15 +228,14 @@ static int gru_invalidate_range_start(struct mmu_notifier *mn, | |||
230 | STAT(mmu_invalidate_range); | 228 | STAT(mmu_invalidate_range); |
231 | atomic_inc(&gms->ms_range_active); | 229 | atomic_inc(&gms->ms_range_active); |
232 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, | 230 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, |
233 | start, end, atomic_read(&gms->ms_range_active)); | 231 | range->start, range->end, atomic_read(&gms->ms_range_active)); |
234 | gru_flush_tlb_range(gms, start, end - start); | 232 | gru_flush_tlb_range(gms, range->start, range->end - range->start); |
235 | 233 | ||
236 | return 0; | 234 | return 0; |
237 | } | 235 | } |
238 | 236 | ||
239 | static void gru_invalidate_range_end(struct mmu_notifier *mn, | 237 | static void gru_invalidate_range_end(struct mmu_notifier *mn, |
240 | struct mm_struct *mm, unsigned long start, | 238 | const struct mmu_notifier_range *range) |
241 | unsigned long end) | ||
242 | { | 239 | { |
243 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, | 240 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, |
244 | ms_notifier); | 241 | ms_notifier); |
@@ -247,7 +244,8 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn, | |||
247 | (void)atomic_dec_and_test(&gms->ms_range_active); | 244 | (void)atomic_dec_and_test(&gms->ms_range_active); |
248 | 245 | ||
249 | wake_up_all(&gms->ms_wait_queue); | 246 | wake_up_all(&gms->ms_wait_queue); |
250 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); | 247 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", |
248 | gms, range->start, range->end); | ||
251 | } | 249 | } |
252 | 250 | ||
253 | static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) | 251 | static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) |
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 9b0b3fa4f836..e6126a4b95d3 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
@@ -570,7 +570,7 @@ static int vmballoon_send_get_target(struct vmballoon *b) | |||
570 | unsigned long status; | 570 | unsigned long status; |
571 | unsigned long limit; | 571 | unsigned long limit; |
572 | 572 | ||
573 | limit = totalram_pages; | 573 | limit = totalram_pages(); |
574 | 574 | ||
575 | /* Ensure limit fits in 32-bits */ | 575 | /* Ensure limit fits in 32-bits */ |
576 | if (limit != (u32)limit) | 576 | if (limit != (u32)limit) |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index f7019294740c..bc2f700feef8 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -309,8 +309,11 @@ static void pmem_release_queue(void *q) | |||
309 | blk_cleanup_queue(q); | 309 | blk_cleanup_queue(q); |
310 | } | 310 | } |
311 | 311 | ||
312 | static void pmem_freeze_queue(void *q) | 312 | static void pmem_freeze_queue(struct percpu_ref *ref) |
313 | { | 313 | { |
314 | struct request_queue *q; | ||
315 | |||
316 | q = container_of(ref, typeof(*q), q_usage_counter); | ||
314 | blk_freeze_queue_start(q); | 317 | blk_freeze_queue_start(q); |
315 | } | 318 | } |
316 | 319 | ||
@@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev, | |||
402 | 405 | ||
403 | pmem->pfn_flags = PFN_DEV; | 406 | pmem->pfn_flags = PFN_DEV; |
404 | pmem->pgmap.ref = &q->q_usage_counter; | 407 | pmem->pgmap.ref = &q->q_usage_counter; |
408 | pmem->pgmap.kill = pmem_freeze_queue; | ||
405 | if (is_nd_pfn(dev)) { | 409 | if (is_nd_pfn(dev)) { |
406 | if (setup_pagemap_fsdax(dev, &pmem->pgmap)) | 410 | if (setup_pagemap_fsdax(dev, &pmem->pgmap)) |
407 | return -ENOMEM; | 411 | return -ENOMEM; |
@@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev, | |||
427 | memcpy(&bb_res, &nsio->res, sizeof(bb_res)); | 431 | memcpy(&bb_res, &nsio->res, sizeof(bb_res)); |
428 | } | 432 | } |
429 | 433 | ||
430 | /* | ||
431 | * At release time the queue must be frozen before | ||
432 | * devm_memremap_pages is unwound | ||
433 | */ | ||
434 | if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) | ||
435 | return -ENOMEM; | ||
436 | |||
437 | if (IS_ERR(addr)) | 434 | if (IS_ERR(addr)) |
438 | return PTR_ERR(addr); | 435 | return PTR_ERR(addr); |
439 | pmem->virt_addr = addr; | 436 | pmem->virt_addr = addr; |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 714aac72df0e..8d2fc84119c6 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -1243,7 +1243,7 @@ ccio_ioc_init(struct ioc *ioc) | |||
1243 | ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). | 1243 | ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). |
1244 | */ | 1244 | */ |
1245 | 1245 | ||
1246 | iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver)); | 1246 | iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver)); |
1247 | 1247 | ||
1248 | /* limit IOVA space size to 1MB-1GB */ | 1248 | /* limit IOVA space size to 1MB-1GB */ |
1249 | 1249 | ||
@@ -1282,7 +1282,7 @@ ccio_ioc_init(struct ioc *ioc) | |||
1282 | 1282 | ||
1283 | DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", | 1283 | DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", |
1284 | __func__, ioc->ioc_regs, | 1284 | __func__, ioc->ioc_regs, |
1285 | (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), | 1285 | (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), |
1286 | iova_space_size>>20, | 1286 | iova_space_size>>20, |
1287 | iov_order + PAGE_SHIFT); | 1287 | iov_order + PAGE_SHIFT); |
1288 | 1288 | ||
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 452d306ce5cb..42172eb32235 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -1406,7 +1406,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1406 | ** for DMA hints - ergo only 30 bits max. | 1406 | ** for DMA hints - ergo only 30 bits max. |
1407 | */ | 1407 | */ |
1408 | 1408 | ||
1409 | iova_space_size = (u32) (totalram_pages/global_ioc_cnt); | 1409 | iova_space_size = (u32) (totalram_pages()/global_ioc_cnt); |
1410 | 1410 | ||
1411 | /* limit IOVA space size to 1MB-1GB */ | 1411 | /* limit IOVA space size to 1MB-1GB */ |
1412 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { | 1412 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { |
@@ -1431,7 +1431,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1431 | DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", | 1431 | DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", |
1432 | __func__, | 1432 | __func__, |
1433 | ioc->ioc_hpa, | 1433 | ioc->ioc_hpa, |
1434 | (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), | 1434 | (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), |
1435 | iova_space_size>>20, | 1435 | iova_space_size>>20, |
1436 | iov_order + PAGE_SHIFT); | 1436 | iov_order + PAGE_SHIFT); |
1437 | 1437 | ||
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index ae3c5b25dcc7..a2eb25271c96 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c | |||
@@ -82,10 +82,8 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref) | |||
82 | complete_all(&p2p->devmap_ref_done); | 82 | complete_all(&p2p->devmap_ref_done); |
83 | } | 83 | } |
84 | 84 | ||
85 | static void pci_p2pdma_percpu_kill(void *data) | 85 | static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) |
86 | { | 86 | { |
87 | struct percpu_ref *ref = data; | ||
88 | |||
89 | /* | 87 | /* |
90 | * pci_p2pdma_add_resource() may be called multiple times | 88 | * pci_p2pdma_add_resource() may be called multiple times |
91 | * by a driver and may register the percpu_kill devm action multiple | 89 | * by a driver and may register the percpu_kill devm action multiple |
@@ -198,6 +196,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, | |||
198 | pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; | 196 | pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; |
199 | pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - | 197 | pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - |
200 | pci_resource_start(pdev, bar); | 198 | pci_resource_start(pdev, bar); |
199 | pgmap->kill = pci_p2pdma_percpu_kill; | ||
201 | 200 | ||
202 | addr = devm_memremap_pages(&pdev->dev, pgmap); | 201 | addr = devm_memremap_pages(&pdev->dev, pgmap); |
203 | if (IS_ERR(addr)) { | 202 | if (IS_ERR(addr)) { |
@@ -211,11 +210,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, | |||
211 | if (error) | 210 | if (error) |
212 | goto pgmap_free; | 211 | goto pgmap_free; |
213 | 212 | ||
214 | error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill, | ||
215 | &pdev->p2pdma->devmap_ref); | ||
216 | if (error) | ||
217 | goto pgmap_free; | ||
218 | |||
219 | pci_info(pdev, "added peer-to-peer DMA memory %pR\n", | 213 | pci_info(pdev, "added peer-to-peer DMA memory %pR\n", |
220 | &pgmap->res); | 214 | &pgmap->res); |
221 | 215 | ||
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 548bb02c0ca6..6cb0eebdff89 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c | |||
@@ -110,7 +110,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, | |||
110 | unsigned long size_remaining = PAGE_ALIGN(size); | 110 | unsigned long size_remaining = PAGE_ALIGN(size); |
111 | unsigned int max_order = orders[0]; | 111 | unsigned int max_order = orders[0]; |
112 | 112 | ||
113 | if (size / PAGE_SIZE > totalram_pages / 2) | 113 | if (size / PAGE_SIZE > totalram_pages() / 2) |
114 | return -ENOMEM; | 114 | return -ENOMEM; |
115 | 115 | ||
116 | INIT_LIST_HEAD(&pages); | 116 | INIT_LIST_HEAD(&pages); |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 221b7333d067..ceb5048de9a7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -352,7 +352,7 @@ static enum bp_state reserve_additional_memory(void) | |||
352 | mutex_unlock(&balloon_mutex); | 352 | mutex_unlock(&balloon_mutex); |
353 | /* add_memory_resource() requires the device_hotplug lock */ | 353 | /* add_memory_resource() requires the device_hotplug lock */ |
354 | lock_device_hotplug(); | 354 | lock_device_hotplug(); |
355 | rc = add_memory_resource(nid, resource, memhp_auto_online); | 355 | rc = add_memory_resource(nid, resource); |
356 | unlock_device_hotplug(); | 356 | unlock_device_hotplug(); |
357 | mutex_lock(&balloon_mutex); | 357 | mutex_lock(&balloon_mutex); |
358 | 358 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b0b02a501167..5efc5eee9544 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -520,26 +520,26 @@ static int unmap_if_in_range(struct gntdev_grant_map *map, | |||
520 | } | 520 | } |
521 | 521 | ||
522 | static int mn_invl_range_start(struct mmu_notifier *mn, | 522 | static int mn_invl_range_start(struct mmu_notifier *mn, |
523 | struct mm_struct *mm, | 523 | const struct mmu_notifier_range *range) |
524 | unsigned long start, unsigned long end, | ||
525 | bool blockable) | ||
526 | { | 524 | { |
527 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 525 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
528 | struct gntdev_grant_map *map; | 526 | struct gntdev_grant_map *map; |
529 | int ret = 0; | 527 | int ret = 0; |
530 | 528 | ||
531 | if (blockable) | 529 | if (range->blockable) |
532 | mutex_lock(&priv->lock); | 530 | mutex_lock(&priv->lock); |
533 | else if (!mutex_trylock(&priv->lock)) | 531 | else if (!mutex_trylock(&priv->lock)) |
534 | return -EAGAIN; | 532 | return -EAGAIN; |
535 | 533 | ||
536 | list_for_each_entry(map, &priv->maps, next) { | 534 | list_for_each_entry(map, &priv->maps, next) { |
537 | ret = unmap_if_in_range(map, start, end, blockable); | 535 | ret = unmap_if_in_range(map, range->start, range->end, |
536 | range->blockable); | ||
538 | if (ret) | 537 | if (ret) |
539 | goto out_unlock; | 538 | goto out_unlock; |
540 | } | 539 | } |
541 | list_for_each_entry(map, &priv->freeable_maps, next) { | 540 | list_for_each_entry(map, &priv->freeable_maps, next) { |
542 | ret = unmap_if_in_range(map, start, end, blockable); | 541 | ret = unmap_if_in_range(map, range->start, range->end, |
542 | range->blockable); | ||
543 | if (ret) | 543 | if (ret) |
544 | goto out_unlock; | 544 | goto out_unlock; |
545 | } | 545 | } |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 5165aa82bf7d..246f6122c9ee 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -189,7 +189,7 @@ static void selfballoon_process(struct work_struct *work) | |||
189 | bool reset_timer = false; | 189 | bool reset_timer = false; |
190 | 190 | ||
191 | if (xen_selfballooning_enabled) { | 191 | if (xen_selfballooning_enabled) { |
192 | cur_pages = totalram_pages; | 192 | cur_pages = totalram_pages(); |
193 | tgt_pages = cur_pages; /* default is no change */ | 193 | tgt_pages = cur_pages; /* default is no change */ |
194 | goal_pages = vm_memory_committed() + | 194 | goal_pages = vm_memory_committed() + |
195 | totalreserve_pages + | 195 | totalreserve_pages + |
@@ -227,7 +227,7 @@ static void selfballoon_process(struct work_struct *work) | |||
227 | if (tgt_pages < floor_pages) | 227 | if (tgt_pages < floor_pages) |
228 | tgt_pages = floor_pages; | 228 | tgt_pages = floor_pages; |
229 | balloon_set_new_target(tgt_pages + | 229 | balloon_set_new_target(tgt_pages + |
230 | balloon_stats.current_pages - totalram_pages); | 230 | balloon_stats.current_pages - totalram_pages()); |
231 | reset_timer = true; | 231 | reset_timer = true; |
232 | } | 232 | } |
233 | #ifdef CONFIG_FRONTSWAP | 233 | #ifdef CONFIG_FRONTSWAP |
@@ -569,7 +569,7 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) | |||
569 | * much more reliably and response faster in some cases. | 569 | * much more reliably and response faster in some cases. |
570 | */ | 570 | */ |
571 | if (!selfballoon_reserved_mb) { | 571 | if (!selfballoon_reserved_mb) { |
572 | reserve_pages = totalram_pages / 10; | 572 | reserve_pages = totalram_pages() / 10; |
573 | selfballoon_reserved_mb = PAGES2MB(reserve_pages); | 573 | selfballoon_reserved_mb = PAGES2MB(reserve_pages); |
574 | } | 574 | } |
575 | schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ); | 575 | schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ); |
@@ -415,7 +415,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
415 | BUG_ON(PageWriteback(old)); | 415 | BUG_ON(PageWriteback(old)); |
416 | get_page(new); | 416 | get_page(new); |
417 | 417 | ||
418 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); | 418 | rc = migrate_page_move_mapping(mapping, new, old, mode, 1); |
419 | if (rc != MIGRATEPAGE_SUCCESS) { | 419 | if (rc != MIGRATEPAGE_SUCCESS) { |
420 | put_page(new); | 420 | put_page(new); |
421 | goto out_unlock; | 421 | goto out_unlock; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index e1886cc7048f..450be88cffef 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1992,6 +1992,7 @@ static const struct address_space_operations def_blk_aops = { | |||
1992 | .writepages = blkdev_writepages, | 1992 | .writepages = blkdev_writepages, |
1993 | .releasepage = blkdev_releasepage, | 1993 | .releasepage = blkdev_releasepage, |
1994 | .direct_IO = blkdev_direct_IO, | 1994 | .direct_IO = blkdev_direct_IO, |
1995 | .migratepage = buffer_migrate_page_norefs, | ||
1995 | .is_dirty_writeback = buffer_check_dirty_writeback, | 1996 | .is_dirty_writeback = buffer_check_dirty_writeback, |
1996 | }; | 1997 | }; |
1997 | 1998 | ||
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 79a265ba9200..dfb64a5211b6 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -810,7 +810,7 @@ static inline int default_congestion_kb(void) | |||
810 | * This allows larger machines to have larger/more transfers. | 810 | * This allows larger machines to have larger/more transfers. |
811 | * Limit the default to 256M | 811 | * Limit the default to 256M |
812 | */ | 812 | */ |
813 | congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); | 813 | congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
814 | if (congestion_kb > 256*1024) | 814 | if (congestion_kb > 256*1024) |
815 | congestion_kb = 256*1024; | 815 | congestion_kb = 256*1024; |
816 | 816 | ||
@@ -779,7 +779,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, | |||
779 | 779 | ||
780 | i_mmap_lock_read(mapping); | 780 | i_mmap_lock_read(mapping); |
781 | vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { | 781 | vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { |
782 | unsigned long address, start, end; | 782 | struct mmu_notifier_range range; |
783 | unsigned long address; | ||
783 | 784 | ||
784 | cond_resched(); | 785 | cond_resched(); |
785 | 786 | ||
@@ -793,7 +794,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, | |||
793 | * call mmu_notifier_invalidate_range_start() on our behalf | 794 | * call mmu_notifier_invalidate_range_start() on our behalf |
794 | * before taking any lock. | 795 | * before taking any lock. |
795 | */ | 796 | */ |
796 | if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) | 797 | if (follow_pte_pmd(vma->vm_mm, address, &range, |
798 | &ptep, &pmdp, &ptl)) | ||
797 | continue; | 799 | continue; |
798 | 800 | ||
799 | /* | 801 | /* |
@@ -835,7 +837,7 @@ unlock_pte: | |||
835 | pte_unmap_unlock(ptep, ptl); | 837 | pte_unmap_unlock(ptep, ptl); |
836 | } | 838 | } |
837 | 839 | ||
838 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | 840 | mmu_notifier_invalidate_range_end(&range); |
839 | } | 841 | } |
840 | i_mmap_unlock_read(mapping); | 842 | i_mmap_unlock_read(mapping); |
841 | } | 843 | } |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b293cb3e27a2..008b74eff00d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -2738,7 +2738,7 @@ int f2fs_migrate_page(struct address_space *mapping, | |||
2738 | */ | 2738 | */ |
2739 | extra_count = (atomic_written ? 1 : 0) - page_has_private(page); | 2739 | extra_count = (atomic_written ? 1 : 0) - page_has_private(page); |
2740 | rc = migrate_page_move_mapping(mapping, newpage, | 2740 | rc = migrate_page_move_mapping(mapping, newpage, |
2741 | page, NULL, mode, extra_count); | 2741 | page, mode, extra_count); |
2742 | if (rc != MIGRATEPAGE_SUCCESS) { | 2742 | if (rc != MIGRATEPAGE_SUCCESS) { |
2743 | if (atomic_written) | 2743 | if (atomic_written) |
2744 | mutex_unlock(&fi->inmem_lock); | 2744 | mutex_unlock(&fi->inmem_lock); |
diff --git a/fs/file_table.c b/fs/file_table.c index e49af4caf15d..5679e7fcb6b0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -380,10 +380,11 @@ void __init files_init(void) | |||
380 | void __init files_maxfiles_init(void) | 380 | void __init files_maxfiles_init(void) |
381 | { | 381 | { |
382 | unsigned long n; | 382 | unsigned long n; |
383 | unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2; | 383 | unsigned long nr_pages = totalram_pages(); |
384 | unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; | ||
384 | 385 | ||
385 | memreserve = min(memreserve, totalram_pages - 1); | 386 | memreserve = min(memreserve, nr_pages - 1); |
386 | n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; | 387 | n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; |
387 | 388 | ||
388 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); | 389 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); |
389 | } | 390 | } |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 568abed20eb2..76baaa6be393 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -824,7 +824,7 @@ static const struct super_operations fuse_super_operations = { | |||
824 | static void sanitize_global_limit(unsigned *limit) | 824 | static void sanitize_global_limit(unsigned *limit) |
825 | { | 825 | { |
826 | if (*limit == 0) | 826 | if (*limit == 0) |
827 | *limit = ((totalram_pages << PAGE_SHIFT) >> 13) / | 827 | *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / |
828 | sizeof(struct fuse_req); | 828 | sizeof(struct fuse_req); |
829 | 829 | ||
830 | if (*limit >= 1 << 16) | 830 | if (*limit >= 1 << 16) |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 32920a10100e..a2fcea5f8225 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -383,17 +383,16 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) | |||
383 | * truncation is indicated by end of range being LLONG_MAX | 383 | * truncation is indicated by end of range being LLONG_MAX |
384 | * In this case, we first scan the range and release found pages. | 384 | * In this case, we first scan the range and release found pages. |
385 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | 385 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv |
386 | * maps and global counts. Page faults can not race with truncation | 386 | * maps and global counts. |
387 | * in this routine. hugetlb_no_page() prevents page faults in the | ||
388 | * truncated range. It checks i_size before allocation, and again after | ||
389 | * with the page table lock for the page held. The same lock must be | ||
390 | * acquired to unmap a page. | ||
391 | * hole punch is indicated if end is not LLONG_MAX | 387 | * hole punch is indicated if end is not LLONG_MAX |
392 | * In the hole punch case we scan the range and release found pages. | 388 | * In the hole punch case we scan the range and release found pages. |
393 | * Only when releasing a page is the associated region/reserv map | 389 | * Only when releasing a page is the associated region/reserv map |
394 | * deleted. The region/reserv map for ranges without associated | 390 | * deleted. The region/reserv map for ranges without associated |
395 | * pages are not modified. Page faults can race with hole punch. | 391 | * pages are not modified. |
396 | * This is indicated if we find a mapped page. | 392 | * |
393 | * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent | ||
394 | * races with page faults. | ||
395 | * | ||
397 | * Note: If the passed end of range value is beyond the end of file, but | 396 | * Note: If the passed end of range value is beyond the end of file, but |
398 | * not LLONG_MAX this routine still performs a hole punch operation. | 397 | * not LLONG_MAX this routine still performs a hole punch operation. |
399 | */ | 398 | */ |
@@ -423,32 +422,14 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
423 | 422 | ||
424 | for (i = 0; i < pagevec_count(&pvec); ++i) { | 423 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
425 | struct page *page = pvec.pages[i]; | 424 | struct page *page = pvec.pages[i]; |
426 | u32 hash; | ||
427 | 425 | ||
428 | index = page->index; | 426 | index = page->index; |
429 | hash = hugetlb_fault_mutex_hash(h, current->mm, | ||
430 | &pseudo_vma, | ||
431 | mapping, index, 0); | ||
432 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | ||
433 | |||
434 | /* | 427 | /* |
435 | * If page is mapped, it was faulted in after being | 428 | * A mapped page is impossible as callers should unmap |
436 | * unmapped in caller. Unmap (again) now after taking | 429 | * all references before calling. And, i_mmap_rwsem |
437 | * the fault mutex. The mutex will prevent faults | 430 | * prevents the creation of additional mappings. |
438 | * until we finish removing the page. | ||
439 | * | ||
440 | * This race can only happen in the hole punch case. | ||
441 | * Getting here in a truncate operation is a bug. | ||
442 | */ | 431 | */ |
443 | if (unlikely(page_mapped(page))) { | 432 | VM_BUG_ON(page_mapped(page)); |
444 | BUG_ON(truncate_op); | ||
445 | |||
446 | i_mmap_lock_write(mapping); | ||
447 | hugetlb_vmdelete_list(&mapping->i_mmap, | ||
448 | index * pages_per_huge_page(h), | ||
449 | (index + 1) * pages_per_huge_page(h)); | ||
450 | i_mmap_unlock_write(mapping); | ||
451 | } | ||
452 | 433 | ||
453 | lock_page(page); | 434 | lock_page(page); |
454 | /* | 435 | /* |
@@ -470,7 +451,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
470 | } | 451 | } |
471 | 452 | ||
472 | unlock_page(page); | 453 | unlock_page(page); |
473 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | ||
474 | } | 454 | } |
475 | huge_pagevec_release(&pvec); | 455 | huge_pagevec_release(&pvec); |
476 | cond_resched(); | 456 | cond_resched(); |
@@ -482,9 +462,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
482 | 462 | ||
483 | static void hugetlbfs_evict_inode(struct inode *inode) | 463 | static void hugetlbfs_evict_inode(struct inode *inode) |
484 | { | 464 | { |
465 | struct address_space *mapping = inode->i_mapping; | ||
485 | struct resv_map *resv_map; | 466 | struct resv_map *resv_map; |
486 | 467 | ||
468 | /* | ||
469 | * The vfs layer guarantees that there are no other users of this | ||
470 | * inode. Therefore, it would be safe to call remove_inode_hugepages | ||
471 | * without holding i_mmap_rwsem. We acquire and hold here to be | ||
472 | * consistent with other callers. Since there will be no contention | ||
473 | * on the semaphore, overhead is negligible. | ||
474 | */ | ||
475 | i_mmap_lock_write(mapping); | ||
487 | remove_inode_hugepages(inode, 0, LLONG_MAX); | 476 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
477 | i_mmap_unlock_write(mapping); | ||
478 | |||
488 | resv_map = (struct resv_map *)inode->i_mapping->private_data; | 479 | resv_map = (struct resv_map *)inode->i_mapping->private_data; |
489 | /* root inode doesn't have the resv_map, so we should check it */ | 480 | /* root inode doesn't have the resv_map, so we should check it */ |
490 | if (resv_map) | 481 | if (resv_map) |
@@ -505,8 +496,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
505 | i_mmap_lock_write(mapping); | 496 | i_mmap_lock_write(mapping); |
506 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | 497 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
507 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); | 498 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
508 | i_mmap_unlock_write(mapping); | ||
509 | remove_inode_hugepages(inode, offset, LLONG_MAX); | 499 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
500 | i_mmap_unlock_write(mapping); | ||
510 | return 0; | 501 | return 0; |
511 | } | 502 | } |
512 | 503 | ||
@@ -540,8 +531,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
540 | hugetlb_vmdelete_list(&mapping->i_mmap, | 531 | hugetlb_vmdelete_list(&mapping->i_mmap, |
541 | hole_start >> PAGE_SHIFT, | 532 | hole_start >> PAGE_SHIFT, |
542 | hole_end >> PAGE_SHIFT); | 533 | hole_end >> PAGE_SHIFT); |
543 | i_mmap_unlock_write(mapping); | ||
544 | remove_inode_hugepages(inode, hole_start, hole_end); | 534 | remove_inode_hugepages(inode, hole_start, hole_end); |
535 | i_mmap_unlock_write(mapping); | ||
545 | inode_unlock(inode); | 536 | inode_unlock(inode); |
546 | } | 537 | } |
547 | 538 | ||
@@ -624,7 +615,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |||
624 | /* addr is the offset within the file (zero based) */ | 615 | /* addr is the offset within the file (zero based) */ |
625 | addr = index * hpage_size; | 616 | addr = index * hpage_size; |
626 | 617 | ||
627 | /* mutex taken here, fault path and hole punch */ | 618 | /* |
619 | * fault mutex taken here, protects against fault path | ||
620 | * and hole punch. inode_lock previously taken protects | ||
621 | * against truncation. | ||
622 | */ | ||
628 | hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, | 623 | hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, |
629 | index, addr); | 624 | index, addr); |
630 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 625 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
diff --git a/fs/iomap.c b/fs/iomap.c index e87c288cd5ef..3a0cd557b4cf 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -563,7 +563,7 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage, | |||
563 | { | 563 | { |
564 | int ret; | 564 | int ret; |
565 | 565 | ||
566 | ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); | 566 | ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
567 | if (ret != MIGRATEPAGE_SUCCESS) | 567 | if (ret != MIGRATEPAGE_SUCCESS) |
568 | return ret; | 568 | return ret; |
569 | 569 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 586726a590d8..4f15665f0ad1 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -2121,7 +2121,7 @@ int __init nfs_init_writepagecache(void) | |||
2121 | * This allows larger machines to have larger/more transfers. | 2121 | * This allows larger machines to have larger/more transfers. |
2122 | * Limit the default to 256M | 2122 | * Limit the default to 256M |
2123 | */ | 2123 | */ |
2124 | nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); | 2124 | nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
2125 | if (nfs_congestion_kb > 256*1024) | 2125 | if (nfs_congestion_kb > 256*1024) |
2126 | nfs_congestion_kb = 256*1024; | 2126 | nfs_congestion_kb = 256*1024; |
2127 | 2127 | ||
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index e2fe0e9ce0df..da52b594362a 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -99,7 +99,7 @@ static unsigned int | |||
99 | nfsd_cache_size_limit(void) | 99 | nfsd_cache_size_limit(void) |
100 | { | 100 | { |
101 | unsigned int limit; | 101 | unsigned int limit; |
102 | unsigned long low_pages = totalram_pages - totalhigh_pages; | 102 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
103 | 103 | ||
104 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | 104 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
105 | return min_t(unsigned int, limit, 256*1024); | 105 | return min_t(unsigned int, limit, 256*1024); |
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index ab172e5f51d9..5becc8acc8f4 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h | |||
@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) | |||
47 | return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); | 47 | return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); |
48 | /* return (void *)__get_free_page(gfp_mask); */ | 48 | /* return (void *)__get_free_page(gfp_mask); */ |
49 | } | 49 | } |
50 | if (likely((size >> PAGE_SHIFT) < totalram_pages)) | 50 | if (likely((size >> PAGE_SHIFT) < totalram_pages())) |
51 | return __vmalloc(size, gfp_mask, PAGE_KERNEL); | 51 | return __vmalloc(size, gfp_mask, PAGE_KERNEL); |
52 | return NULL; | 52 | return NULL; |
53 | } | 53 | } |
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 99ee093182cb..cc9b32b9db7c 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | ccflags-y := -Ifs/ocfs2 | 2 | ccflags-y := -I$(src) |
3 | 3 | ||
4 | obj-$(CONFIG_OCFS2_FS) += \ | 4 | obj-$(CONFIG_OCFS2_FS) += \ |
5 | ocfs2.o \ | 5 | ocfs2.o \ |
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 4ebbd57cbf84..f9b84f7a3e4b 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
@@ -161,7 +161,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
161 | #endif | 161 | #endif |
162 | } | 162 | } |
163 | 163 | ||
164 | clear_buffer_uptodate(bh); | ||
165 | get_bh(bh); /* for end_buffer_read_sync() */ | 164 | get_bh(bh); /* for end_buffer_read_sync() */ |
166 | bh->b_end_io = end_buffer_read_sync; | 165 | bh->b_end_io = end_buffer_read_sync; |
167 | submit_bh(REQ_OP_READ, 0, bh); | 166 | submit_bh(REQ_OP_READ, 0, bh); |
@@ -341,7 +340,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
341 | continue; | 340 | continue; |
342 | } | 341 | } |
343 | 342 | ||
344 | clear_buffer_uptodate(bh); | ||
345 | get_bh(bh); /* for end_buffer_read_sync() */ | 343 | get_bh(bh); /* for end_buffer_read_sync() */ |
346 | if (validate) | 344 | if (validate) |
347 | set_buffer_needs_validate(bh); | 345 | set_buffer_needs_validate(bh); |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9b2ed62dd638..f3c20b279eb2 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -582,9 +582,10 @@ bail: | |||
582 | } | 582 | } |
583 | 583 | ||
584 | static int o2hb_read_slots(struct o2hb_region *reg, | 584 | static int o2hb_read_slots(struct o2hb_region *reg, |
585 | unsigned int begin_slot, | ||
585 | unsigned int max_slots) | 586 | unsigned int max_slots) |
586 | { | 587 | { |
587 | unsigned int current_slot=0; | 588 | unsigned int current_slot = begin_slot; |
588 | int status; | 589 | int status; |
589 | struct o2hb_bio_wait_ctxt wc; | 590 | struct o2hb_bio_wait_ctxt wc; |
590 | struct bio *bio; | 591 | struct bio *bio; |
@@ -1093,9 +1094,14 @@ static int o2hb_highest_node(unsigned long *nodes, int numbits) | |||
1093 | return find_last_bit(nodes, numbits); | 1094 | return find_last_bit(nodes, numbits); |
1094 | } | 1095 | } |
1095 | 1096 | ||
1097 | static int o2hb_lowest_node(unsigned long *nodes, int numbits) | ||
1098 | { | ||
1099 | return find_first_bit(nodes, numbits); | ||
1100 | } | ||
1101 | |||
1096 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | 1102 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) |
1097 | { | 1103 | { |
1098 | int i, ret, highest_node; | 1104 | int i, ret, highest_node, lowest_node; |
1099 | int membership_change = 0, own_slot_ok = 0; | 1105 | int membership_change = 0, own_slot_ok = 0; |
1100 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 1106 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
1101 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 1107 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
@@ -1120,7 +1126,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
1120 | } | 1126 | } |
1121 | 1127 | ||
1122 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); | 1128 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); |
1123 | if (highest_node >= O2NM_MAX_NODES) { | 1129 | lowest_node = o2hb_lowest_node(configured_nodes, O2NM_MAX_NODES); |
1130 | if (highest_node >= O2NM_MAX_NODES || lowest_node >= O2NM_MAX_NODES) { | ||
1124 | mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); | 1131 | mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); |
1125 | ret = -EINVAL; | 1132 | ret = -EINVAL; |
1126 | goto bail; | 1133 | goto bail; |
@@ -1130,7 +1137,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
1130 | * yet. Of course, if the node definitions have holes in them | 1137 | * yet. Of course, if the node definitions have holes in them |
1131 | * then we're reading an empty slot anyway... Consider this | 1138 | * then we're reading an empty slot anyway... Consider this |
1132 | * best-effort. */ | 1139 | * best-effort. */ |
1133 | ret = o2hb_read_slots(reg, highest_node + 1); | 1140 | ret = o2hb_read_slots(reg, lowest_node, highest_node + 1); |
1134 | if (ret < 0) { | 1141 | if (ret < 0) { |
1135 | mlog_errno(ret); | 1142 | mlog_errno(ret); |
1136 | goto bail; | 1143 | goto bail; |
@@ -1801,7 +1808,7 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg) | |||
1801 | struct o2hb_disk_slot *slot; | 1808 | struct o2hb_disk_slot *slot; |
1802 | struct o2hb_disk_heartbeat_block *hb_block; | 1809 | struct o2hb_disk_heartbeat_block *hb_block; |
1803 | 1810 | ||
1804 | ret = o2hb_read_slots(reg, reg->hr_blocks); | 1811 | ret = o2hb_read_slots(reg, 0, reg->hr_blocks); |
1805 | if (ret) | 1812 | if (ret) |
1806 | goto out; | 1813 | goto out; |
1807 | 1814 | ||
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile index bd1aab1f49a4..ef2854422a6e 100644 --- a/fs/ocfs2/dlm/Makefile +++ b/fs/ocfs2/dlm/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | ccflags-y := -Ifs/ocfs2 | 1 | ccflags-y := -I$(src)/.. |
2 | 2 | ||
3 | obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o | 3 | obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o |
4 | 4 | ||
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile index eed3db8c5b49..33431a0296a3 100644 --- a/fs/ocfs2/dlmfs/Makefile +++ b/fs/ocfs2/dlmfs/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | ccflags-y := -Ifs/ocfs2 | 1 | ccflags-y := -I$(src)/.. |
2 | 2 | ||
3 | obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o | 3 | obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o |
4 | 4 | ||
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 602c71f32740..b8fa1487cd85 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c | |||
@@ -179,7 +179,7 @@ bail: | |||
179 | static int dlmfs_file_release(struct inode *inode, | 179 | static int dlmfs_file_release(struct inode *inode, |
180 | struct file *file) | 180 | struct file *file) |
181 | { | 181 | { |
182 | int level, status; | 182 | int level; |
183 | struct dlmfs_inode_private *ip = DLMFS_I(inode); | 183 | struct dlmfs_inode_private *ip = DLMFS_I(inode); |
184 | struct dlmfs_filp_private *fp = file->private_data; | 184 | struct dlmfs_filp_private *fp = file->private_data; |
185 | 185 | ||
@@ -188,7 +188,6 @@ static int dlmfs_file_release(struct inode *inode, | |||
188 | 188 | ||
189 | mlog(0, "close called on inode %lu\n", inode->i_ino); | 189 | mlog(0, "close called on inode %lu\n", inode->i_ino); |
190 | 190 | ||
191 | status = 0; | ||
192 | if (fp) { | 191 | if (fp) { |
193 | level = fp->fp_lock_level; | 192 | level = fp->fp_lock_level; |
194 | if (level != DLM_LOCK_IV) | 193 | if (level != DLM_LOCK_IV) |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b63c97f4318e..46fd3ef2cf21 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1017,7 +1017,8 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
1017 | mlog_errno(status); | 1017 | mlog_errno(status); |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | if (status == 0) { | 1020 | /* Shutdown the kernel journal system */ |
1021 | if (!jbd2_journal_destroy(journal->j_journal) && !status) { | ||
1021 | /* | 1022 | /* |
1022 | * Do not toggle if flush was unsuccessful otherwise | 1023 | * Do not toggle if flush was unsuccessful otherwise |
1023 | * will leave dirty metadata in a "clean" journal | 1024 | * will leave dirty metadata in a "clean" journal |
@@ -1026,9 +1027,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
1026 | if (status < 0) | 1027 | if (status < 0) |
1027 | mlog_errno(status); | 1028 | mlog_errno(status); |
1028 | } | 1029 | } |
1029 | |||
1030 | /* Shutdown the kernel journal system */ | ||
1031 | jbd2_journal_destroy(journal->j_journal); | ||
1032 | journal->j_journal = NULL; | 1030 | journal->j_journal = NULL; |
1033 | 1031 | ||
1034 | OCFS2_I(inode)->ip_open_count--; | 1032 | OCFS2_I(inode)->ip_open_count--; |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 7642b6712c39..58973e4d2471 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb) | |||
345 | if (num_used | 345 | if (num_used |
346 | || alloc->id1.bitmap1.i_used | 346 | || alloc->id1.bitmap1.i_used |
347 | || alloc->id1.bitmap1.i_total | 347 | || alloc->id1.bitmap1.i_total |
348 | || la->la_bm_off) | 348 | || la->la_bm_off) { |
349 | mlog(ML_ERROR, "Local alloc hasn't been recovered!\n" | 349 | mlog(ML_ERROR, "inconsistent detected, clean journal with" |
350 | " unrecovered local alloc, please run fsck.ocfs2!\n" | ||
350 | "found = %u, set = %u, taken = %u, off = %u\n", | 351 | "found = %u, set = %u, taken = %u, off = %u\n", |
351 | num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), | 352 | num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), |
352 | le32_to_cpu(alloc->id1.bitmap1.i_total), | 353 | le32_to_cpu(alloc->id1.bitmap1.i_total), |
353 | OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); | 354 | OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); |
354 | 355 | ||
356 | status = -EINVAL; | ||
357 | goto bail; | ||
358 | } | ||
359 | |||
355 | osb->local_alloc_bh = alloc_bh; | 360 | osb->local_alloc_bh = alloc_bh; |
356 | osb->local_alloc_state = OCFS2_LA_ENABLED; | 361 | osb->local_alloc_state = OCFS2_LA_ENABLED; |
357 | 362 | ||
@@ -835,7 +840,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, | |||
835 | u32 *numbits, | 840 | u32 *numbits, |
836 | struct ocfs2_alloc_reservation *resv) | 841 | struct ocfs2_alloc_reservation *resv) |
837 | { | 842 | { |
838 | int numfound = 0, bitoff, left, startoff, lastzero; | 843 | int numfound = 0, bitoff, left, startoff; |
839 | int local_resv = 0; | 844 | int local_resv = 0; |
840 | struct ocfs2_alloc_reservation r; | 845 | struct ocfs2_alloc_reservation r; |
841 | void *bitmap = NULL; | 846 | void *bitmap = NULL; |
@@ -873,7 +878,6 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, | |||
873 | bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; | 878 | bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; |
874 | 879 | ||
875 | numfound = bitoff = startoff = 0; | 880 | numfound = bitoff = startoff = 0; |
876 | lastzero = -1; | ||
877 | left = le32_to_cpu(alloc->id1.bitmap1.i_total); | 881 | left = le32_to_cpu(alloc->id1.bitmap1.i_total); |
878 | while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { | 882 | while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { |
879 | if (bitoff == left) { | 883 | if (bitoff == left) { |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 0ceb3b6b37e7..9d428d5a0ac8 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -392,6 +392,15 @@ static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) | |||
392 | seq_putc(m, '\n'); | 392 | seq_putc(m, '\n'); |
393 | } | 393 | } |
394 | 394 | ||
395 | static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) | ||
396 | { | ||
397 | bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE); | ||
398 | |||
399 | if (thp_enabled) | ||
400 | thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags); | ||
401 | seq_printf(m, "THP_enabled:\t%d\n", thp_enabled); | ||
402 | } | ||
403 | |||
395 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | 404 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, |
396 | struct pid *pid, struct task_struct *task) | 405 | struct pid *pid, struct task_struct *task) |
397 | { | 406 | { |
@@ -406,6 +415,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
406 | if (mm) { | 415 | if (mm) { |
407 | task_mem(m, mm); | 416 | task_mem(m, mm); |
408 | task_core_dumping(m, mm); | 417 | task_core_dumping(m, mm); |
418 | task_thp_status(m, mm); | ||
409 | mmput(mm); | 419 | mmput(mm); |
410 | } | 420 | } |
411 | task_sig(m, task); | 421 | task_sig(m, task); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index ce3465479447..d7fd1ca807d2 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -530,7 +530,7 @@ static const struct file_operations proc_lstats_operations = { | |||
530 | static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, | 530 | static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, |
531 | struct pid *pid, struct task_struct *task) | 531 | struct pid *pid, struct task_struct *task) |
532 | { | 532 | { |
533 | unsigned long totalpages = totalram_pages + total_swap_pages; | 533 | unsigned long totalpages = totalram_pages() + total_swap_pages; |
534 | unsigned long points = 0; | 534 | unsigned long points = 0; |
535 | 535 | ||
536 | points = oom_badness(task, NULL, NULL, totalpages) * | 536 | points = oom_badness(task, NULL, NULL, totalpages) * |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 6c517b11acf8..40b05e0d4274 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -46,7 +46,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, | |||
46 | ppage = pfn_to_page(pfn); | 46 | ppage = pfn_to_page(pfn); |
47 | else | 47 | else |
48 | ppage = NULL; | 48 | ppage = NULL; |
49 | if (!ppage || PageSlab(ppage)) | 49 | if (!ppage || PageSlab(ppage) || page_has_type(ppage)) |
50 | pcount = 0; | 50 | pcount = 0; |
51 | else | 51 | else |
52 | pcount = page_mapcount(ppage); | 52 | pcount = page_mapcount(ppage); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 47c3764c469b..f0ec9edab2f3 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -790,6 +790,8 @@ static int show_smap(struct seq_file *m, void *v) | |||
790 | 790 | ||
791 | __show_smap(m, &mss); | 791 | __show_smap(m, &mss); |
792 | 792 | ||
793 | seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma)); | ||
794 | |||
793 | if (arch_pkeys_enabled()) | 795 | if (arch_pkeys_enabled()) |
794 | seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); | 796 | seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); |
795 | show_smap_vma_flags(m, vma); | 797 | show_smap_vma_flags(m, vma); |
@@ -1096,6 +1098,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
1096 | return -ESRCH; | 1098 | return -ESRCH; |
1097 | mm = get_task_mm(task); | 1099 | mm = get_task_mm(task); |
1098 | if (mm) { | 1100 | if (mm) { |
1101 | struct mmu_notifier_range range; | ||
1099 | struct clear_refs_private cp = { | 1102 | struct clear_refs_private cp = { |
1100 | .type = type, | 1103 | .type = type, |
1101 | }; | 1104 | }; |
@@ -1139,11 +1142,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
1139 | downgrade_write(&mm->mmap_sem); | 1142 | downgrade_write(&mm->mmap_sem); |
1140 | break; | 1143 | break; |
1141 | } | 1144 | } |
1142 | mmu_notifier_invalidate_range_start(mm, 0, -1); | 1145 | |
1146 | mmu_notifier_range_init(&range, mm, 0, -1UL); | ||
1147 | mmu_notifier_invalidate_range_start(&range); | ||
1143 | } | 1148 | } |
1144 | walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); | 1149 | walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); |
1145 | if (type == CLEAR_REFS_SOFT_DIRTY) | 1150 | if (type == CLEAR_REFS_SOFT_DIRTY) |
1146 | mmu_notifier_invalidate_range_end(mm, 0, -1); | 1151 | mmu_notifier_invalidate_range_end(&range); |
1147 | tlb_finish_mmu(&tlb, 0, -1); | 1152 | tlb_finish_mmu(&tlb, 0, -1); |
1148 | up_read(&mm->mmap_sem); | 1153 | up_read(&mm->mmap_sem); |
1149 | out_mm: | 1154 | out_mm: |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 1b78f2e09218..5d2ffb1a45fc 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -1481,7 +1481,7 @@ static int ubifs_migrate_page(struct address_space *mapping, | |||
1481 | { | 1481 | { |
1482 | int rc; | 1482 | int rc; |
1483 | 1483 | ||
1484 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); | 1484 | rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
1485 | if (rc != MIGRATEPAGE_SUCCESS) | 1485 | if (rc != MIGRATEPAGE_SUCCESS) |
1486 | return rc; | 1486 | return rc; |
1487 | 1487 | ||
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 59dc28047030..89800fc7dc9d 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -53,7 +53,7 @@ struct userfaultfd_ctx { | |||
53 | /* a refile sequence protected by fault_pending_wqh lock */ | 53 | /* a refile sequence protected by fault_pending_wqh lock */ |
54 | struct seqcount refile_seq; | 54 | struct seqcount refile_seq; |
55 | /* pseudo fd refcounting */ | 55 | /* pseudo fd refcounting */ |
56 | atomic_t refcount; | 56 | refcount_t refcount; |
57 | /* userfaultfd syscall flags */ | 57 | /* userfaultfd syscall flags */ |
58 | unsigned int flags; | 58 | unsigned int flags; |
59 | /* features requested from the userspace */ | 59 | /* features requested from the userspace */ |
@@ -140,8 +140,7 @@ out: | |||
140 | */ | 140 | */ |
141 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) | 141 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) |
142 | { | 142 | { |
143 | if (!atomic_inc_not_zero(&ctx->refcount)) | 143 | refcount_inc(&ctx->refcount); |
144 | BUG(); | ||
145 | } | 144 | } |
146 | 145 | ||
147 | /** | 146 | /** |
@@ -154,7 +153,7 @@ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) | |||
154 | */ | 153 | */ |
155 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) | 154 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) |
156 | { | 155 | { |
157 | if (atomic_dec_and_test(&ctx->refcount)) { | 156 | if (refcount_dec_and_test(&ctx->refcount)) { |
158 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); | 157 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
159 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); | 158 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); |
160 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); | 159 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); |
@@ -686,7 +685,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) | |||
686 | return -ENOMEM; | 685 | return -ENOMEM; |
687 | } | 686 | } |
688 | 687 | ||
689 | atomic_set(&ctx->refcount, 1); | 688 | refcount_set(&ctx->refcount, 1); |
690 | ctx->flags = octx->flags; | 689 | ctx->flags = octx->flags; |
691 | ctx->state = UFFD_STATE_RUNNING; | 690 | ctx->state = UFFD_STATE_RUNNING; |
692 | ctx->features = octx->features; | 691 | ctx->features = octx->features; |
@@ -736,10 +735,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, | |||
736 | struct userfaultfd_ctx *ctx; | 735 | struct userfaultfd_ctx *ctx; |
737 | 736 | ||
738 | ctx = vma->vm_userfaultfd_ctx.ctx; | 737 | ctx = vma->vm_userfaultfd_ctx.ctx; |
739 | if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) { | 738 | |
739 | if (!ctx) | ||
740 | return; | ||
741 | |||
742 | if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { | ||
740 | vm_ctx->ctx = ctx; | 743 | vm_ctx->ctx = ctx; |
741 | userfaultfd_ctx_get(ctx); | 744 | userfaultfd_ctx_get(ctx); |
742 | WRITE_ONCE(ctx->mmap_changing, true); | 745 | WRITE_ONCE(ctx->mmap_changing, true); |
746 | } else { | ||
747 | /* Drop uffd context if remap feature not enabled */ | ||
748 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | ||
749 | vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); | ||
743 | } | 750 | } |
744 | } | 751 | } |
745 | 752 | ||
@@ -1927,7 +1934,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) | |||
1927 | if (!ctx) | 1934 | if (!ctx) |
1928 | return -ENOMEM; | 1935 | return -ENOMEM; |
1929 | 1936 | ||
1930 | atomic_set(&ctx->refcount, 1); | 1937 | refcount_set(&ctx->refcount, 1); |
1931 | ctx->flags = flags; | 1938 | ctx->flags = flags; |
1932 | ctx->features = 0; | 1939 | ctx->features = 0; |
1933 | ctx->state = UFFD_STATE_WAIT_API; | 1940 | ctx->state = UFFD_STATE_WAIT_API; |
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 296c65442f00..95a159a4137f 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h | |||
@@ -8,6 +8,7 @@ enum { | |||
8 | EI_ETYPE_NULL, /* Return NULL if failure */ | 8 | EI_ETYPE_NULL, /* Return NULL if failure */ |
9 | EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ | 9 | EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ |
10 | EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ | 10 | EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ |
11 | EI_ETYPE_TRUE, /* Return true if failure */ | ||
11 | }; | 12 | }; |
12 | 13 | ||
13 | struct error_injection_entry { | 14 | struct error_injection_entry { |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index a9cac82e9a7a..05e61e6c843f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -1057,6 +1057,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); | |||
1057 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); | 1057 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); |
1058 | int pud_clear_huge(pud_t *pud); | 1058 | int pud_clear_huge(pud_t *pud); |
1059 | int pmd_clear_huge(pmd_t *pmd); | 1059 | int pmd_clear_huge(pmd_t *pmd); |
1060 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); | ||
1060 | int pud_free_pmd_page(pud_t *pud, unsigned long addr); | 1061 | int pud_free_pmd_page(pud_t *pud, unsigned long addr); |
1061 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); | 1062 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); |
1062 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ | 1063 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
@@ -1084,6 +1085,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) | |||
1084 | { | 1085 | { |
1085 | return 0; | 1086 | return 0; |
1086 | } | 1087 | } |
1088 | static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) | ||
1089 | { | ||
1090 | return 0; | ||
1091 | } | ||
1087 | static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) | 1092 | static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
1088 | { | 1093 | { |
1089 | return 0; | 1094 | return 0; |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 9a6bc0951cfa..c31157135598 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
@@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb) | |||
258 | */ | 258 | */ |
259 | static inline void wb_put(struct bdi_writeback *wb) | 259 | static inline void wb_put(struct bdi_writeback *wb) |
260 | { | 260 | { |
261 | if (WARN_ON_ONCE(!wb->bdi)) { | ||
262 | /* | ||
263 | * A driver bug might cause a file to be removed before bdi was | ||
264 | * initialized. | ||
265 | */ | ||
266 | return; | ||
267 | } | ||
268 | |||
261 | if (wb != &wb->bdi->wb) | 269 | if (wb != &wb->bdi->wb) |
262 | percpu_ref_put(&wb->refcnt); | 270 | percpu_ref_put(&wb->refcnt); |
263 | } | 271 | } |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3e7dafb3ea80..39f668d5066b 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -16,9 +16,13 @@ | |||
16 | /* all clang versions usable with the kernel support KASAN ABI version 5 */ | 16 | /* all clang versions usable with the kernel support KASAN ABI version 5 */ |
17 | #define KASAN_ABI_VERSION 5 | 17 | #define KASAN_ABI_VERSION 5 |
18 | 18 | ||
19 | #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) | ||
19 | /* emulate gcc's __SANITIZE_ADDRESS__ flag */ | 20 | /* emulate gcc's __SANITIZE_ADDRESS__ flag */ |
20 | #if __has_feature(address_sanitizer) | ||
21 | #define __SANITIZE_ADDRESS__ | 21 | #define __SANITIZE_ADDRESS__ |
22 | #define __no_sanitize_address \ | ||
23 | __attribute__((no_sanitize("address", "hwaddress"))) | ||
24 | #else | ||
25 | #define __no_sanitize_address | ||
22 | #endif | 26 | #endif |
23 | 27 | ||
24 | /* | 28 | /* |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 2010493e1040..5776da43da97 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -143,6 +143,12 @@ | |||
143 | #define KASAN_ABI_VERSION 3 | 143 | #define KASAN_ABI_VERSION 3 |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | #if __has_attribute(__no_sanitize_address__) | ||
147 | #define __no_sanitize_address __attribute__((no_sanitize_address)) | ||
148 | #else | ||
149 | #define __no_sanitize_address | ||
150 | #endif | ||
151 | |||
146 | #if GCC_VERSION >= 50100 | 152 | #if GCC_VERSION >= 50100 |
147 | #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 | 153 | #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 |
148 | #endif | 154 | #endif |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index fe07b680dd4a..19f32b0c29af 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h | |||
@@ -200,19 +200,6 @@ | |||
200 | #define __noreturn __attribute__((__noreturn__)) | 200 | #define __noreturn __attribute__((__noreturn__)) |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * Optional: only supported since gcc >= 4.8 | ||
204 | * Optional: not supported by icc | ||
205 | * | ||
206 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute | ||
207 | * clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis | ||
208 | */ | ||
209 | #if __has_attribute(__no_sanitize_address__) | ||
210 | # define __no_sanitize_address __attribute__((__no_sanitize_address__)) | ||
211 | #else | ||
212 | # define __no_sanitize_address | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute | 203 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute |
217 | * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute | 204 | * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute |
218 | */ | 205 | */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 6d52ce6af4ff..811c77743dad 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -3269,8 +3269,12 @@ extern int generic_check_addressable(unsigned, u64); | |||
3269 | extern int buffer_migrate_page(struct address_space *, | 3269 | extern int buffer_migrate_page(struct address_space *, |
3270 | struct page *, struct page *, | 3270 | struct page *, struct page *, |
3271 | enum migrate_mode); | 3271 | enum migrate_mode); |
3272 | extern int buffer_migrate_page_norefs(struct address_space *, | ||
3273 | struct page *, struct page *, | ||
3274 | enum migrate_mode); | ||
3272 | #else | 3275 | #else |
3273 | #define buffer_migrate_page NULL | 3276 | #define buffer_migrate_page NULL |
3277 | #define buffer_migrate_page_norefs NULL | ||
3274 | #endif | 3278 | #endif |
3275 | 3279 | ||
3276 | extern int setattr_prepare(struct dentry *, struct iattr *); | 3280 | extern int setattr_prepare(struct dentry *, struct iattr *); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0705164f928c..5f5e25fd6149 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -81,7 +81,7 @@ struct vm_area_struct; | |||
81 | * | 81 | * |
82 | * %__GFP_HARDWALL enforces the cpuset memory allocation policy. | 82 | * %__GFP_HARDWALL enforces the cpuset memory allocation policy. |
83 | * | 83 | * |
84 | * %__GFP_THISNODE forces the allocation to be satisified from the requested | 84 | * %__GFP_THISNODE forces the allocation to be satisfied from the requested |
85 | * node with no fallbacks or placement policy enforcements. | 85 | * node with no fallbacks or placement policy enforcements. |
86 | * | 86 | * |
87 | * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. | 87 | * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 0690679832d4..ea5cdbd8c2c3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -36,7 +36,31 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |||
36 | 36 | ||
37 | /* declarations for linux/mm/highmem.c */ | 37 | /* declarations for linux/mm/highmem.c */ |
38 | unsigned int nr_free_highpages(void); | 38 | unsigned int nr_free_highpages(void); |
39 | extern unsigned long totalhigh_pages; | 39 | extern atomic_long_t _totalhigh_pages; |
40 | static inline unsigned long totalhigh_pages(void) | ||
41 | { | ||
42 | return (unsigned long)atomic_long_read(&_totalhigh_pages); | ||
43 | } | ||
44 | |||
45 | static inline void totalhigh_pages_inc(void) | ||
46 | { | ||
47 | atomic_long_inc(&_totalhigh_pages); | ||
48 | } | ||
49 | |||
50 | static inline void totalhigh_pages_dec(void) | ||
51 | { | ||
52 | atomic_long_dec(&_totalhigh_pages); | ||
53 | } | ||
54 | |||
55 | static inline void totalhigh_pages_add(long count) | ||
56 | { | ||
57 | atomic_long_add(count, &_totalhigh_pages); | ||
58 | } | ||
59 | |||
60 | static inline void totalhigh_pages_set(long val) | ||
61 | { | ||
62 | atomic_long_set(&_totalhigh_pages, val); | ||
63 | } | ||
40 | 64 | ||
41 | void kmap_flush_unused(void); | 65 | void kmap_flush_unused(void); |
42 | 66 | ||
@@ -51,7 +75,7 @@ static inline struct page *kmap_to_page(void *addr) | |||
51 | return virt_to_page(addr); | 75 | return virt_to_page(addr); |
52 | } | 76 | } |
53 | 77 | ||
54 | #define totalhigh_pages 0UL | 78 | static inline unsigned long totalhigh_pages(void) { return 0UL; } |
55 | 79 | ||
56 | #ifndef ARCH_HAS_KMAP | 80 | #ifndef ARCH_HAS_KMAP |
57 | static inline void *kmap(struct page *page) | 81 | static inline void *kmap(struct page *page) |
diff --git a/include/linux/hmm.h b/include/linux/hmm.h index c6fb869a81c0..66f9ebbb1df3 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h | |||
@@ -69,6 +69,7 @@ | |||
69 | #define LINUX_HMM_H | 69 | #define LINUX_HMM_H |
70 | 70 | ||
71 | #include <linux/kconfig.h> | 71 | #include <linux/kconfig.h> |
72 | #include <asm/pgtable.h> | ||
72 | 73 | ||
73 | #if IS_ENABLED(CONFIG_HMM) | 74 | #if IS_ENABLED(CONFIG_HMM) |
74 | 75 | ||
@@ -486,6 +487,7 @@ struct hmm_devmem_ops { | |||
486 | * @device: device to bind resource to | 487 | * @device: device to bind resource to |
487 | * @ops: memory operations callback | 488 | * @ops: memory operations callback |
488 | * @ref: per CPU refcount | 489 | * @ref: per CPU refcount |
490 | * @page_fault: callback when CPU fault on an unaddressable device page | ||
489 | * | 491 | * |
490 | * This an helper structure for device drivers that do not wish to implement | 492 | * This an helper structure for device drivers that do not wish to implement |
491 | * the gory details related to hotplugging new memoy and allocating struct | 493 | * the gory details related to hotplugging new memoy and allocating struct |
@@ -493,7 +495,28 @@ struct hmm_devmem_ops { | |||
493 | * | 495 | * |
494 | * Device drivers can directly use ZONE_DEVICE memory on their own if they | 496 | * Device drivers can directly use ZONE_DEVICE memory on their own if they |
495 | * wish to do so. | 497 | * wish to do so. |
498 | * | ||
499 | * The page_fault() callback must migrate page back, from device memory to | ||
500 | * system memory, so that the CPU can access it. This might fail for various | ||
501 | * reasons (device issues, device have been unplugged, ...). When such error | ||
502 | * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and | ||
503 | * set the CPU page table entry to "poisoned". | ||
504 | * | ||
505 | * Note that because memory cgroup charges are transferred to the device memory, | ||
506 | * this should never fail due to memory restrictions. However, allocation | ||
507 | * of a regular system page might still fail because we are out of memory. If | ||
508 | * that happens, the page_fault() callback must return VM_FAULT_OOM. | ||
509 | * | ||
510 | * The page_fault() callback can also try to migrate back multiple pages in one | ||
511 | * chunk, as an optimization. It must, however, prioritize the faulting address | ||
512 | * over all the others. | ||
496 | */ | 513 | */ |
514 | typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, | ||
515 | unsigned long addr, | ||
516 | const struct page *page, | ||
517 | unsigned int flags, | ||
518 | pmd_t *pmdp); | ||
519 | |||
497 | struct hmm_devmem { | 520 | struct hmm_devmem { |
498 | struct completion completion; | 521 | struct completion completion; |
499 | unsigned long pfn_first; | 522 | unsigned long pfn_first; |
@@ -503,6 +526,7 @@ struct hmm_devmem { | |||
503 | struct dev_pagemap pagemap; | 526 | struct dev_pagemap pagemap; |
504 | const struct hmm_devmem_ops *ops; | 527 | const struct hmm_devmem_ops *ops; |
505 | struct percpu_ref ref; | 528 | struct percpu_ref ref; |
529 | dev_page_fault_t page_fault; | ||
506 | }; | 530 | }; |
507 | 531 | ||
508 | /* | 532 | /* |
@@ -512,8 +536,7 @@ struct hmm_devmem { | |||
512 | * enough and allocate struct page for it. | 536 | * enough and allocate struct page for it. |
513 | * | 537 | * |
514 | * The device driver can wrap the hmm_devmem struct inside a private device | 538 | * The device driver can wrap the hmm_devmem struct inside a private device |
515 | * driver struct. The device driver must call hmm_devmem_remove() before the | 539 | * driver struct. |
516 | * device goes away and before freeing the hmm_devmem struct memory. | ||
517 | */ | 540 | */ |
518 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | 541 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
519 | struct device *device, | 542 | struct device *device, |
@@ -521,7 +544,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
521 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | 544 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
522 | struct device *device, | 545 | struct device *device, |
523 | struct resource *res); | 546 | struct resource *res); |
524 | void hmm_devmem_remove(struct hmm_devmem *devmem); | ||
525 | 547 | ||
526 | /* | 548 | /* |
527 | * hmm_devmem_page_set_drvdata - set per-page driver data field | 549 | * hmm_devmem_page_set_drvdata - set per-page driver data field |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4663ee96cf59..381e872bfde0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -93,7 +93,11 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); | |||
93 | 93 | ||
94 | extern unsigned long transparent_hugepage_flags; | 94 | extern unsigned long transparent_hugepage_flags; |
95 | 95 | ||
96 | static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) | 96 | /* |
97 | * to be used on vmas which are known to support THP. | ||
98 | * Use transparent_hugepage_enabled otherwise | ||
99 | */ | ||
100 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) | ||
97 | { | 101 | { |
98 | if (vma->vm_flags & VM_NOHUGEPAGE) | 102 | if (vma->vm_flags & VM_NOHUGEPAGE) |
99 | return false; | 103 | return false; |
@@ -117,6 +121,8 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) | |||
117 | return false; | 121 | return false; |
118 | } | 122 | } |
119 | 123 | ||
124 | bool transparent_hugepage_enabled(struct vm_area_struct *vma); | ||
125 | |||
120 | #define transparent_hugepage_use_zero_page() \ | 126 | #define transparent_hugepage_use_zero_page() \ |
121 | (transparent_hugepage_flags & \ | 127 | (transparent_hugepage_flags & \ |
122 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | 128 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) |
@@ -257,6 +263,11 @@ static inline bool thp_migration_supported(void) | |||
257 | 263 | ||
258 | #define hpage_nr_pages(x) 1 | 264 | #define hpage_nr_pages(x) 1 |
259 | 265 | ||
266 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) | ||
267 | { | ||
268 | return false; | ||
269 | } | ||
270 | |||
260 | static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) | 271 | static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) |
261 | { | 272 | { |
262 | return false; | 273 | return false; |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 46aae129917c..b40ea104dd36 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -14,13 +14,13 @@ struct task_struct; | |||
14 | #include <asm/kasan.h> | 14 | #include <asm/kasan.h> |
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | 16 | ||
17 | extern unsigned char kasan_zero_page[PAGE_SIZE]; | 17 | extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; |
18 | extern pte_t kasan_zero_pte[PTRS_PER_PTE]; | 18 | extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; |
19 | extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; | 19 | extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; |
20 | extern pud_t kasan_zero_pud[PTRS_PER_PUD]; | 20 | extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; |
21 | extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D]; | 21 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; |
22 | 22 | ||
23 | int kasan_populate_zero_shadow(const void *shadow_start, | 23 | int kasan_populate_early_shadow(const void *shadow_start, |
24 | const void *shadow_end); | 24 | const void *shadow_end); |
25 | 25 | ||
26 | static inline void *kasan_mem_to_shadow(const void *addr) | 26 | static inline void *kasan_mem_to_shadow(const void *addr) |
@@ -45,22 +45,24 @@ void kasan_free_pages(struct page *page, unsigned int order); | |||
45 | 45 | ||
46 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | 46 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
47 | slab_flags_t *flags); | 47 | slab_flags_t *flags); |
48 | void kasan_cache_shrink(struct kmem_cache *cache); | ||
49 | void kasan_cache_shutdown(struct kmem_cache *cache); | ||
50 | 48 | ||
51 | void kasan_poison_slab(struct page *page); | 49 | void kasan_poison_slab(struct page *page); |
52 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); | 50 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); |
53 | void kasan_poison_object_data(struct kmem_cache *cache, void *object); | 51 | void kasan_poison_object_data(struct kmem_cache *cache, void *object); |
54 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); | 52 | void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, |
53 | const void *object); | ||
55 | 54 | ||
56 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); | 55 | void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, |
56 | gfp_t flags); | ||
57 | void kasan_kfree_large(void *ptr, unsigned long ip); | 57 | void kasan_kfree_large(void *ptr, unsigned long ip); |
58 | void kasan_poison_kfree(void *ptr, unsigned long ip); | 58 | void kasan_poison_kfree(void *ptr, unsigned long ip); |
59 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, | 59 | void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, |
60 | gfp_t flags); | 60 | size_t size, gfp_t flags); |
61 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); | 61 | void * __must_check kasan_krealloc(const void *object, size_t new_size, |
62 | gfp_t flags); | ||
62 | 63 | ||
63 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); | 64 | void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, |
65 | gfp_t flags); | ||
64 | bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); | 66 | bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); |
65 | 67 | ||
66 | struct kasan_cache { | 68 | struct kasan_cache { |
@@ -97,27 +99,40 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} | |||
97 | static inline void kasan_cache_create(struct kmem_cache *cache, | 99 | static inline void kasan_cache_create(struct kmem_cache *cache, |
98 | unsigned int *size, | 100 | unsigned int *size, |
99 | slab_flags_t *flags) {} | 101 | slab_flags_t *flags) {} |
100 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} | ||
101 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} | ||
102 | 102 | ||
103 | static inline void kasan_poison_slab(struct page *page) {} | 103 | static inline void kasan_poison_slab(struct page *page) {} |
104 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, | 104 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, |
105 | void *object) {} | 105 | void *object) {} |
106 | static inline void kasan_poison_object_data(struct kmem_cache *cache, | 106 | static inline void kasan_poison_object_data(struct kmem_cache *cache, |
107 | void *object) {} | 107 | void *object) {} |
108 | static inline void kasan_init_slab_obj(struct kmem_cache *cache, | 108 | static inline void *kasan_init_slab_obj(struct kmem_cache *cache, |
109 | const void *object) {} | 109 | const void *object) |
110 | { | ||
111 | return (void *)object; | ||
112 | } | ||
110 | 113 | ||
111 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} | 114 | static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) |
115 | { | ||
116 | return ptr; | ||
117 | } | ||
112 | static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} | 118 | static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} |
113 | static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} | 119 | static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} |
114 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, | 120 | static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, |
115 | size_t size, gfp_t flags) {} | 121 | size_t size, gfp_t flags) |
116 | static inline void kasan_krealloc(const void *object, size_t new_size, | 122 | { |
117 | gfp_t flags) {} | 123 | return (void *)object; |
124 | } | ||
125 | static inline void *kasan_krealloc(const void *object, size_t new_size, | ||
126 | gfp_t flags) | ||
127 | { | ||
128 | return (void *)object; | ||
129 | } | ||
118 | 130 | ||
119 | static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, | 131 | static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, |
120 | gfp_t flags) {} | 132 | gfp_t flags) |
133 | { | ||
134 | return object; | ||
135 | } | ||
121 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, | 136 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, |
122 | unsigned long ip) | 137 | unsigned long ip) |
123 | { | 138 | { |
@@ -140,4 +155,40 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } | |||
140 | 155 | ||
141 | #endif /* CONFIG_KASAN */ | 156 | #endif /* CONFIG_KASAN */ |
142 | 157 | ||
158 | #ifdef CONFIG_KASAN_GENERIC | ||
159 | |||
160 | #define KASAN_SHADOW_INIT 0 | ||
161 | |||
162 | void kasan_cache_shrink(struct kmem_cache *cache); | ||
163 | void kasan_cache_shutdown(struct kmem_cache *cache); | ||
164 | |||
165 | #else /* CONFIG_KASAN_GENERIC */ | ||
166 | |||
167 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} | ||
168 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} | ||
169 | |||
170 | #endif /* CONFIG_KASAN_GENERIC */ | ||
171 | |||
172 | #ifdef CONFIG_KASAN_SW_TAGS | ||
173 | |||
174 | #define KASAN_SHADOW_INIT 0xFF | ||
175 | |||
176 | void kasan_init_tags(void); | ||
177 | |||
178 | void *kasan_reset_tag(const void *addr); | ||
179 | |||
180 | void kasan_report(unsigned long addr, size_t size, | ||
181 | bool is_write, unsigned long ip); | ||
182 | |||
183 | #else /* CONFIG_KASAN_SW_TAGS */ | ||
184 | |||
185 | static inline void kasan_init_tags(void) { } | ||
186 | |||
187 | static inline void *kasan_reset_tag(const void *addr) | ||
188 | { | ||
189 | return (void *)addr; | ||
190 | } | ||
191 | |||
192 | #endif /* CONFIG_KASAN_SW_TAGS */ | ||
193 | |||
143 | #endif /* LINUX_KASAN_H */ | 194 | #endif /* LINUX_KASAN_H */ |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index aee299a6aa76..64c41cf45590 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -154,7 +154,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, | |||
154 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, | 154 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, |
155 | phys_addr_t *out_end); | 155 | phys_addr_t *out_end); |
156 | 156 | ||
157 | void __memblock_free_early(phys_addr_t base, phys_addr_t size); | ||
158 | void __memblock_free_late(phys_addr_t base, phys_addr_t size); | 157 | void __memblock_free_late(phys_addr_t base, phys_addr_t size); |
159 | 158 | ||
160 | /** | 159 | /** |
@@ -320,6 +319,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) | |||
320 | /* Flags for memblock allocation APIs */ | 319 | /* Flags for memblock allocation APIs */ |
321 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | 320 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
322 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | 321 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
322 | #define MEMBLOCK_ALLOC_KASAN 1 | ||
323 | 323 | ||
324 | /* We are using top down, so it is safe to use 0 here */ | 324 | /* We are using top down, so it is safe to use 0 here */ |
325 | #define MEMBLOCK_LOW_LIMIT 0 | 325 | #define MEMBLOCK_LOW_LIMIT 0 |
@@ -414,13 +414,13 @@ static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, | |||
414 | static inline void __init memblock_free_early(phys_addr_t base, | 414 | static inline void __init memblock_free_early(phys_addr_t base, |
415 | phys_addr_t size) | 415 | phys_addr_t size) |
416 | { | 416 | { |
417 | __memblock_free_early(base, size); | 417 | memblock_free(base, size); |
418 | } | 418 | } |
419 | 419 | ||
420 | static inline void __init memblock_free_early_nid(phys_addr_t base, | 420 | static inline void __init memblock_free_early_nid(phys_addr_t base, |
421 | phys_addr_t size, int nid) | 421 | phys_addr_t size, int nid) |
422 | { | 422 | { |
423 | __memblock_free_early(base, size); | 423 | memblock_free(base, size); |
424 | } | 424 | } |
425 | 425 | ||
426 | static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) | 426 | static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7ab2120155a4..83ae11cbd12c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -526,9 +526,11 @@ void mem_cgroup_handle_over_high(void); | |||
526 | 526 | ||
527 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); | 527 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
528 | 528 | ||
529 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 529 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
530 | struct task_struct *p); | 530 | struct task_struct *p); |
531 | 531 | ||
532 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); | ||
533 | |||
532 | static inline void mem_cgroup_enter_user_fault(void) | 534 | static inline void mem_cgroup_enter_user_fault(void) |
533 | { | 535 | { |
534 | WARN_ON(current->in_user_fault); | 536 | WARN_ON(current->in_user_fault); |
@@ -970,7 +972,12 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) | |||
970 | } | 972 | } |
971 | 973 | ||
972 | static inline void | 974 | static inline void |
973 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 975 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
976 | { | ||
977 | } | ||
978 | |||
979 | static inline void | ||
980 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | ||
974 | { | 981 | { |
975 | } | 982 | } |
976 | 983 | ||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ffd9cd10fcf3..07da5c6c5ba0 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -107,8 +107,8 @@ static inline bool movable_node_is_enabled(void) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | #ifdef CONFIG_MEMORY_HOTREMOVE | 109 | #ifdef CONFIG_MEMORY_HOTREMOVE |
110 | extern int arch_remove_memory(u64 start, u64 size, | 110 | extern int arch_remove_memory(int nid, u64 start, u64 size, |
111 | struct vmem_altmap *altmap); | 111 | struct vmem_altmap *altmap); |
112 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, | 112 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
113 | unsigned long nr_pages, struct vmem_altmap *altmap); | 113 | unsigned long nr_pages, struct vmem_altmap *altmap); |
114 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 114 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
@@ -326,15 +326,14 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | |||
326 | void *arg, int (*func)(struct memory_block *, void *)); | 326 | void *arg, int (*func)(struct memory_block *, void *)); |
327 | extern int __add_memory(int nid, u64 start, u64 size); | 327 | extern int __add_memory(int nid, u64 start, u64 size); |
328 | extern int add_memory(int nid, u64 start, u64 size); | 328 | extern int add_memory(int nid, u64 start, u64 size); |
329 | extern int add_memory_resource(int nid, struct resource *resource, bool online); | 329 | extern int add_memory_resource(int nid, struct resource *resource); |
330 | extern int arch_add_memory(int nid, u64 start, u64 size, | 330 | extern int arch_add_memory(int nid, u64 start, u64 size, |
331 | struct vmem_altmap *altmap, bool want_memblock); | 331 | struct vmem_altmap *altmap, bool want_memblock); |
332 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, | 332 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
333 | unsigned long nr_pages, struct vmem_altmap *altmap); | 333 | unsigned long nr_pages, struct vmem_altmap *altmap); |
334 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); | ||
335 | extern bool is_memblock_offlined(struct memory_block *mem); | 334 | extern bool is_memblock_offlined(struct memory_block *mem); |
336 | extern int sparse_add_one_section(struct pglist_data *pgdat, | 335 | extern int sparse_add_one_section(int nid, unsigned long start_pfn, |
337 | unsigned long start_pfn, struct vmem_altmap *altmap); | 336 | struct vmem_altmap *altmap); |
338 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | 337 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
339 | unsigned long map_offset, struct vmem_altmap *altmap); | 338 | unsigned long map_offset, struct vmem_altmap *altmap); |
340 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, | 339 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 0ac69ddf5fc4..f0628660d541 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/ioport.h> | 4 | #include <linux/ioport.h> |
5 | #include <linux/percpu-refcount.h> | 5 | #include <linux/percpu-refcount.h> |
6 | 6 | ||
7 | #include <asm/pgtable.h> | ||
8 | |||
9 | struct resource; | 7 | struct resource; |
10 | struct device; | 8 | struct device; |
11 | 9 | ||
@@ -66,62 +64,34 @@ enum memory_type { | |||
66 | }; | 64 | }; |
67 | 65 | ||
68 | /* | 66 | /* |
69 | * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two | ||
70 | * callbacks: | ||
71 | * page_fault() | ||
72 | * page_free() | ||
73 | * | ||
74 | * Additional notes about MEMORY_DEVICE_PRIVATE may be found in | 67 | * Additional notes about MEMORY_DEVICE_PRIVATE may be found in |
75 | * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief | 68 | * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief |
76 | * explanation in include/linux/memory_hotplug.h. | 69 | * explanation in include/linux/memory_hotplug.h. |
77 | * | 70 | * |
78 | * The page_fault() callback must migrate page back, from device memory to | ||
79 | * system memory, so that the CPU can access it. This might fail for various | ||
80 | * reasons (device issues, device have been unplugged, ...). When such error | ||
81 | * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and | ||
82 | * set the CPU page table entry to "poisoned". | ||
83 | * | ||
84 | * Note that because memory cgroup charges are transferred to the device memory, | ||
85 | * this should never fail due to memory restrictions. However, allocation | ||
86 | * of a regular system page might still fail because we are out of memory. If | ||
87 | * that happens, the page_fault() callback must return VM_FAULT_OOM. | ||
88 | * | ||
89 | * The page_fault() callback can also try to migrate back multiple pages in one | ||
90 | * chunk, as an optimization. It must, however, prioritize the faulting address | ||
91 | * over all the others. | ||
92 | * | ||
93 | * | ||
94 | * The page_free() callback is called once the page refcount reaches 1 | 71 | * The page_free() callback is called once the page refcount reaches 1 |
95 | * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. | 72 | * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. |
96 | * This allows the device driver to implement its own memory management.) | 73 | * This allows the device driver to implement its own memory management.) |
97 | * | ||
98 | * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter. | ||
99 | */ | 74 | */ |
100 | typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, | ||
101 | unsigned long addr, | ||
102 | const struct page *page, | ||
103 | unsigned int flags, | ||
104 | pmd_t *pmdp); | ||
105 | typedef void (*dev_page_free_t)(struct page *page, void *data); | 75 | typedef void (*dev_page_free_t)(struct page *page, void *data); |
106 | 76 | ||
107 | /** | 77 | /** |
108 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings | 78 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings |
109 | * @page_fault: callback when CPU fault on an unaddressable device page | ||
110 | * @page_free: free page callback when page refcount reaches 1 | 79 | * @page_free: free page callback when page refcount reaches 1 |
111 | * @altmap: pre-allocated/reserved memory for vmemmap allocations | 80 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
112 | * @res: physical address range covered by @ref | 81 | * @res: physical address range covered by @ref |
113 | * @ref: reference count that pins the devm_memremap_pages() mapping | 82 | * @ref: reference count that pins the devm_memremap_pages() mapping |
83 | * @kill: callback to transition @ref to the dead state | ||
114 | * @dev: host device of the mapping for debug | 84 | * @dev: host device of the mapping for debug |
115 | * @data: private data pointer for page_free() | 85 | * @data: private data pointer for page_free() |
116 | * @type: memory type: see MEMORY_* in memory_hotplug.h | 86 | * @type: memory type: see MEMORY_* in memory_hotplug.h |
117 | */ | 87 | */ |
118 | struct dev_pagemap { | 88 | struct dev_pagemap { |
119 | dev_page_fault_t page_fault; | ||
120 | dev_page_free_t page_free; | 89 | dev_page_free_t page_free; |
121 | struct vmem_altmap altmap; | 90 | struct vmem_altmap altmap; |
122 | bool altmap_valid; | 91 | bool altmap_valid; |
123 | struct resource res; | 92 | struct resource res; |
124 | struct percpu_ref *ref; | 93 | struct percpu_ref *ref; |
94 | void (*kill)(struct percpu_ref *ref); | ||
125 | struct device *dev; | 95 | struct device *dev; |
126 | void *data; | 96 | void *data; |
127 | enum memory_type type; | 97 | enum memory_type type; |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f2b4abbca55e..e13d9bf2f9a5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -29,7 +29,7 @@ enum migrate_reason { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ | 31 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
32 | extern char *migrate_reason_names[MR_TYPES]; | 32 | extern const char *migrate_reason_names[MR_TYPES]; |
33 | 33 | ||
34 | static inline struct page *new_page_nodemask(struct page *page, | 34 | static inline struct page *new_page_nodemask(struct page *page, |
35 | int preferred_nid, nodemask_t *nodemask) | 35 | int preferred_nid, nodemask_t *nodemask) |
@@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); | |||
77 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | 77 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
78 | struct page *newpage, struct page *page); | 78 | struct page *newpage, struct page *page); |
79 | extern int migrate_page_move_mapping(struct address_space *mapping, | 79 | extern int migrate_page_move_mapping(struct address_space *mapping, |
80 | struct page *newpage, struct page *page, | 80 | struct page *newpage, struct page *page, enum migrate_mode mode, |
81 | struct buffer_head *head, enum migrate_mode mode, | ||
82 | int extra_count); | 81 | int extra_count); |
83 | #else | 82 | #else |
84 | 83 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 5411de93a363..ea1f12d15365 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -48,7 +48,32 @@ static inline void set_max_mapnr(unsigned long limit) | |||
48 | static inline void set_max_mapnr(unsigned long limit) { } | 48 | static inline void set_max_mapnr(unsigned long limit) { } |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | extern unsigned long totalram_pages; | 51 | extern atomic_long_t _totalram_pages; |
52 | static inline unsigned long totalram_pages(void) | ||
53 | { | ||
54 | return (unsigned long)atomic_long_read(&_totalram_pages); | ||
55 | } | ||
56 | |||
57 | static inline void totalram_pages_inc(void) | ||
58 | { | ||
59 | atomic_long_inc(&_totalram_pages); | ||
60 | } | ||
61 | |||
62 | static inline void totalram_pages_dec(void) | ||
63 | { | ||
64 | atomic_long_dec(&_totalram_pages); | ||
65 | } | ||
66 | |||
67 | static inline void totalram_pages_add(long count) | ||
68 | { | ||
69 | atomic_long_add(count, &_totalram_pages); | ||
70 | } | ||
71 | |||
72 | static inline void totalram_pages_set(long val) | ||
73 | { | ||
74 | atomic_long_set(&_totalram_pages, val); | ||
75 | } | ||
76 | |||
52 | extern void * high_memory; | 77 | extern void * high_memory; |
53 | extern int page_cluster; | 78 | extern int page_cluster; |
54 | 79 | ||
@@ -804,6 +829,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); | |||
804 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) | 829 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) |
805 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) | 830 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) |
806 | #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) | 831 | #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) |
832 | #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) | ||
807 | 833 | ||
808 | /* | 834 | /* |
809 | * Define the bit shifts to access each section. For non-existent | 835 | * Define the bit shifts to access each section. For non-existent |
@@ -814,6 +840,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); | |||
814 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) | 840 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) |
815 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | 841 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) |
816 | #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) | 842 | #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) |
843 | #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) | ||
817 | 844 | ||
818 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ | 845 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ |
819 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 846 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
@@ -836,6 +863,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); | |||
836 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) | 863 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) |
837 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) | 864 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
838 | #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) | 865 | #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) |
866 | #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) | ||
839 | #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) | 867 | #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) |
840 | 868 | ||
841 | static inline enum zone_type page_zonenum(const struct page *page) | 869 | static inline enum zone_type page_zonenum(const struct page *page) |
@@ -1101,6 +1129,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) | |||
1101 | } | 1129 | } |
1102 | #endif /* CONFIG_NUMA_BALANCING */ | 1130 | #endif /* CONFIG_NUMA_BALANCING */ |
1103 | 1131 | ||
1132 | #ifdef CONFIG_KASAN_SW_TAGS | ||
1133 | static inline u8 page_kasan_tag(const struct page *page) | ||
1134 | { | ||
1135 | return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; | ||
1136 | } | ||
1137 | |||
1138 | static inline void page_kasan_tag_set(struct page *page, u8 tag) | ||
1139 | { | ||
1140 | page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); | ||
1141 | page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; | ||
1142 | } | ||
1143 | |||
1144 | static inline void page_kasan_tag_reset(struct page *page) | ||
1145 | { | ||
1146 | page_kasan_tag_set(page, 0xff); | ||
1147 | } | ||
1148 | #else | ||
1149 | static inline u8 page_kasan_tag(const struct page *page) | ||
1150 | { | ||
1151 | return 0xff; | ||
1152 | } | ||
1153 | |||
1154 | static inline void page_kasan_tag_set(struct page *page, u8 tag) { } | ||
1155 | static inline void page_kasan_tag_reset(struct page *page) { } | ||
1156 | #endif | ||
1157 | |||
1104 | static inline struct zone *page_zone(const struct page *page) | 1158 | static inline struct zone *page_zone(const struct page *page) |
1105 | { | 1159 | { |
1106 | return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; | 1160 | return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; |
@@ -1397,6 +1451,8 @@ struct mm_walk { | |||
1397 | void *private; | 1451 | void *private; |
1398 | }; | 1452 | }; |
1399 | 1453 | ||
1454 | struct mmu_notifier_range; | ||
1455 | |||
1400 | int walk_page_range(unsigned long addr, unsigned long end, | 1456 | int walk_page_range(unsigned long addr, unsigned long end, |
1401 | struct mm_walk *walk); | 1457 | struct mm_walk *walk); |
1402 | int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); | 1458 | int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); |
@@ -1405,8 +1461,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | |||
1405 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 1461 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
1406 | struct vm_area_struct *vma); | 1462 | struct vm_area_struct *vma); |
1407 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, | 1463 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, |
1408 | unsigned long *start, unsigned long *end, | 1464 | struct mmu_notifier_range *range, |
1409 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); | 1465 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); |
1410 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | 1466 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
1411 | unsigned long *pfn); | 1467 | unsigned long *pfn); |
1412 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | 1468 | int follow_phys(struct vm_area_struct *vma, unsigned long address, |
@@ -1900,13 +1956,6 @@ static inline bool ptlock_init(struct page *page) | |||
1900 | return true; | 1956 | return true; |
1901 | } | 1957 | } |
1902 | 1958 | ||
1903 | /* Reset page->mapping so free_pages_check won't complain. */ | ||
1904 | static inline void pte_lock_deinit(struct page *page) | ||
1905 | { | ||
1906 | page->mapping = NULL; | ||
1907 | ptlock_free(page); | ||
1908 | } | ||
1909 | |||
1910 | #else /* !USE_SPLIT_PTE_PTLOCKS */ | 1959 | #else /* !USE_SPLIT_PTE_PTLOCKS */ |
1911 | /* | 1960 | /* |
1912 | * We use mm->page_table_lock to guard all pagetable pages of the mm. | 1961 | * We use mm->page_table_lock to guard all pagetable pages of the mm. |
@@ -1917,7 +1966,7 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) | |||
1917 | } | 1966 | } |
1918 | static inline void ptlock_cache_init(void) {} | 1967 | static inline void ptlock_cache_init(void) {} |
1919 | static inline bool ptlock_init(struct page *page) { return true; } | 1968 | static inline bool ptlock_init(struct page *page) { return true; } |
1920 | static inline void pte_lock_deinit(struct page *page) {} | 1969 | static inline void ptlock_free(struct page *page) {} |
1921 | #endif /* USE_SPLIT_PTE_PTLOCKS */ | 1970 | #endif /* USE_SPLIT_PTE_PTLOCKS */ |
1922 | 1971 | ||
1923 | static inline void pgtable_init(void) | 1972 | static inline void pgtable_init(void) |
@@ -1937,7 +1986,7 @@ static inline bool pgtable_page_ctor(struct page *page) | |||
1937 | 1986 | ||
1938 | static inline void pgtable_page_dtor(struct page *page) | 1987 | static inline void pgtable_page_dtor(struct page *page) |
1939 | { | 1988 | { |
1940 | pte_lock_deinit(page); | 1989 | ptlock_free(page); |
1941 | __ClearPageTable(page); | 1990 | __ClearPageTable(page); |
1942 | dec_zone_page_state(page, NR_PAGETABLE); | 1991 | dec_zone_page_state(page, NR_PAGETABLE); |
1943 | } | 1992 | } |
@@ -2054,7 +2103,7 @@ extern void free_initmem(void); | |||
2054 | * Return pages freed into the buddy system. | 2103 | * Return pages freed into the buddy system. |
2055 | */ | 2104 | */ |
2056 | extern unsigned long free_reserved_area(void *start, void *end, | 2105 | extern unsigned long free_reserved_area(void *start, void *end, |
2057 | int poison, char *s); | 2106 | int poison, const char *s); |
2058 | 2107 | ||
2059 | #ifdef CONFIG_HIGHMEM | 2108 | #ifdef CONFIG_HIGHMEM |
2060 | /* | 2109 | /* |
@@ -2202,6 +2251,7 @@ extern void zone_pcp_reset(struct zone *zone); | |||
2202 | 2251 | ||
2203 | /* page_alloc.c */ | 2252 | /* page_alloc.c */ |
2204 | extern int min_free_kbytes; | 2253 | extern int min_free_kbytes; |
2254 | extern int watermark_boost_factor; | ||
2205 | extern int watermark_scale_factor; | 2255 | extern int watermark_scale_factor; |
2206 | 2256 | ||
2207 | /* nommu.c */ | 2257 | /* nommu.c */ |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 9893a6432adf..4050ec1c3b45 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -25,6 +25,13 @@ struct mmu_notifier_mm { | |||
25 | spinlock_t lock; | 25 | spinlock_t lock; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct mmu_notifier_range { | ||
29 | struct mm_struct *mm; | ||
30 | unsigned long start; | ||
31 | unsigned long end; | ||
32 | bool blockable; | ||
33 | }; | ||
34 | |||
28 | struct mmu_notifier_ops { | 35 | struct mmu_notifier_ops { |
29 | /* | 36 | /* |
30 | * Called either by mmu_notifier_unregister or when the mm is | 37 | * Called either by mmu_notifier_unregister or when the mm is |
@@ -146,12 +153,9 @@ struct mmu_notifier_ops { | |||
146 | * | 153 | * |
147 | */ | 154 | */ |
148 | int (*invalidate_range_start)(struct mmu_notifier *mn, | 155 | int (*invalidate_range_start)(struct mmu_notifier *mn, |
149 | struct mm_struct *mm, | 156 | const struct mmu_notifier_range *range); |
150 | unsigned long start, unsigned long end, | ||
151 | bool blockable); | ||
152 | void (*invalidate_range_end)(struct mmu_notifier *mn, | 157 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
153 | struct mm_struct *mm, | 158 | const struct mmu_notifier_range *range); |
154 | unsigned long start, unsigned long end); | ||
155 | 159 | ||
156 | /* | 160 | /* |
157 | * invalidate_range() is either called between | 161 | * invalidate_range() is either called between |
@@ -216,11 +220,8 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, | |||
216 | unsigned long address); | 220 | unsigned long address); |
217 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, | 221 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, |
218 | unsigned long address, pte_t pte); | 222 | unsigned long address, pte_t pte); |
219 | extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | 223 | extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); |
220 | unsigned long start, unsigned long end, | 224 | extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, |
221 | bool blockable); | ||
222 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | ||
223 | unsigned long start, unsigned long end, | ||
224 | bool only_end); | 225 | bool only_end); |
225 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | 226 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
226 | unsigned long start, unsigned long end); | 227 | unsigned long start, unsigned long end); |
@@ -264,33 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, | |||
264 | __mmu_notifier_change_pte(mm, address, pte); | 265 | __mmu_notifier_change_pte(mm, address, pte); |
265 | } | 266 | } |
266 | 267 | ||
267 | static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, | 268 | static inline void |
268 | unsigned long start, unsigned long end) | 269 | mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
269 | { | 270 | { |
270 | if (mm_has_notifiers(mm)) | 271 | if (mm_has_notifiers(range->mm)) { |
271 | __mmu_notifier_invalidate_range_start(mm, start, end, true); | 272 | range->blockable = true; |
273 | __mmu_notifier_invalidate_range_start(range); | ||
274 | } | ||
272 | } | 275 | } |
273 | 276 | ||
274 | static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, | 277 | static inline int |
275 | unsigned long start, unsigned long end) | 278 | mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) |
276 | { | 279 | { |
277 | if (mm_has_notifiers(mm)) | 280 | if (mm_has_notifiers(range->mm)) { |
278 | return __mmu_notifier_invalidate_range_start(mm, start, end, false); | 281 | range->blockable = false; |
282 | return __mmu_notifier_invalidate_range_start(range); | ||
283 | } | ||
279 | return 0; | 284 | return 0; |
280 | } | 285 | } |
281 | 286 | ||
282 | static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 287 | static inline void |
283 | unsigned long start, unsigned long end) | 288 | mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) |
284 | { | 289 | { |
285 | if (mm_has_notifiers(mm)) | 290 | if (mm_has_notifiers(range->mm)) |
286 | __mmu_notifier_invalidate_range_end(mm, start, end, false); | 291 | __mmu_notifier_invalidate_range_end(range, false); |
287 | } | 292 | } |
288 | 293 | ||
289 | static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, | 294 | static inline void |
290 | unsigned long start, unsigned long end) | 295 | mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) |
291 | { | 296 | { |
292 | if (mm_has_notifiers(mm)) | 297 | if (mm_has_notifiers(range->mm)) |
293 | __mmu_notifier_invalidate_range_end(mm, start, end, true); | 298 | __mmu_notifier_invalidate_range_end(range, true); |
294 | } | 299 | } |
295 | 300 | ||
296 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | 301 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, |
@@ -311,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
311 | __mmu_notifier_mm_destroy(mm); | 316 | __mmu_notifier_mm_destroy(mm); |
312 | } | 317 | } |
313 | 318 | ||
319 | |||
320 | static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, | ||
321 | struct mm_struct *mm, | ||
322 | unsigned long start, | ||
323 | unsigned long end) | ||
324 | { | ||
325 | range->mm = mm; | ||
326 | range->start = start; | ||
327 | range->end = end; | ||
328 | } | ||
329 | |||
314 | #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ | 330 | #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ |
315 | ({ \ | 331 | ({ \ |
316 | int __young; \ | 332 | int __young; \ |
@@ -420,10 +436,26 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
420 | 436 | ||
421 | extern void mmu_notifier_call_srcu(struct rcu_head *rcu, | 437 | extern void mmu_notifier_call_srcu(struct rcu_head *rcu, |
422 | void (*func)(struct rcu_head *rcu)); | 438 | void (*func)(struct rcu_head *rcu)); |
423 | extern void mmu_notifier_synchronize(void); | ||
424 | 439 | ||
425 | #else /* CONFIG_MMU_NOTIFIER */ | 440 | #else /* CONFIG_MMU_NOTIFIER */ |
426 | 441 | ||
442 | struct mmu_notifier_range { | ||
443 | unsigned long start; | ||
444 | unsigned long end; | ||
445 | }; | ||
446 | |||
447 | static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, | ||
448 | unsigned long start, | ||
449 | unsigned long end) | ||
450 | { | ||
451 | range->start = start; | ||
452 | range->end = end; | ||
453 | } | ||
454 | |||
455 | #define mmu_notifier_range_init(range, mm, start, end) \ | ||
456 | _mmu_notifier_range_init(range, start, end) | ||
457 | |||
458 | |||
427 | static inline int mm_has_notifiers(struct mm_struct *mm) | 459 | static inline int mm_has_notifiers(struct mm_struct *mm) |
428 | { | 460 | { |
429 | return 0; | 461 | return 0; |
@@ -451,24 +483,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, | |||
451 | { | 483 | { |
452 | } | 484 | } |
453 | 485 | ||
454 | static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, | 486 | static inline void |
455 | unsigned long start, unsigned long end) | 487 | mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
456 | { | 488 | { |
457 | } | 489 | } |
458 | 490 | ||
459 | static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, | 491 | static inline int |
460 | unsigned long start, unsigned long end) | 492 | mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) |
461 | { | 493 | { |
462 | return 0; | 494 | return 0; |
463 | } | 495 | } |
464 | 496 | ||
465 | static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 497 | static inline |
466 | unsigned long start, unsigned long end) | 498 | void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) |
467 | { | 499 | { |
468 | } | 500 | } |
469 | 501 | ||
470 | static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, | 502 | static inline void |
471 | unsigned long start, unsigned long end) | 503 | mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) |
472 | { | 504 | { |
473 | } | 505 | } |
474 | 506 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 077d797d1f60..cc4a507d7ca4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -65,7 +65,7 @@ enum migratetype { | |||
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ | 67 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ |
68 | extern char * const migratetype_names[MIGRATE_TYPES]; | 68 | extern const char * const migratetype_names[MIGRATE_TYPES]; |
69 | 69 | ||
70 | #ifdef CONFIG_CMA | 70 | #ifdef CONFIG_CMA |
71 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | 71 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
@@ -269,9 +269,10 @@ enum zone_watermarks { | |||
269 | NR_WMARK | 269 | NR_WMARK |
270 | }; | 270 | }; |
271 | 271 | ||
272 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | 272 | #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) |
273 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | 273 | #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) |
274 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | 274 | #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) |
275 | #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) | ||
275 | 276 | ||
276 | struct per_cpu_pages { | 277 | struct per_cpu_pages { |
277 | int count; /* number of pages in the list */ | 278 | int count; /* number of pages in the list */ |
@@ -362,7 +363,8 @@ struct zone { | |||
362 | /* Read-mostly fields */ | 363 | /* Read-mostly fields */ |
363 | 364 | ||
364 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | 365 | /* zone watermarks, access with *_wmark_pages(zone) macros */ |
365 | unsigned long watermark[NR_WMARK]; | 366 | unsigned long _watermark[NR_WMARK]; |
367 | unsigned long watermark_boost; | ||
366 | 368 | ||
367 | unsigned long nr_reserved_highatomic; | 369 | unsigned long nr_reserved_highatomic; |
368 | 370 | ||
@@ -428,14 +430,8 @@ struct zone { | |||
428 | * Write access to present_pages at runtime should be protected by | 430 | * Write access to present_pages at runtime should be protected by |
429 | * mem_hotplug_begin/end(). Any reader who can't tolerant drift of | 431 | * mem_hotplug_begin/end(). Any reader who can't tolerant drift of |
430 | * present_pages should get_online_mems() to get a stable value. | 432 | * present_pages should get_online_mems() to get a stable value. |
431 | * | ||
432 | * Read access to managed_pages should be safe because it's unsigned | ||
433 | * long. Write access to zone->managed_pages and totalram_pages are | ||
434 | * protected by managed_page_count_lock at runtime. Idealy only | ||
435 | * adjust_managed_page_count() should be used instead of directly | ||
436 | * touching zone->managed_pages and totalram_pages. | ||
437 | */ | 433 | */ |
438 | unsigned long managed_pages; | 434 | atomic_long_t managed_pages; |
439 | unsigned long spanned_pages; | 435 | unsigned long spanned_pages; |
440 | unsigned long present_pages; | 436 | unsigned long present_pages; |
441 | 437 | ||
@@ -524,6 +520,11 @@ enum pgdat_flags { | |||
524 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 520 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
525 | }; | 521 | }; |
526 | 522 | ||
523 | static inline unsigned long zone_managed_pages(struct zone *zone) | ||
524 | { | ||
525 | return (unsigned long)atomic_long_read(&zone->managed_pages); | ||
526 | } | ||
527 | |||
527 | static inline unsigned long zone_end_pfn(const struct zone *zone) | 528 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
528 | { | 529 | { |
529 | return zone->zone_start_pfn + zone->spanned_pages; | 530 | return zone->zone_start_pfn + zone->spanned_pages; |
@@ -635,9 +636,8 @@ typedef struct pglist_data { | |||
635 | #endif | 636 | #endif |
636 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) | 637 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
637 | /* | 638 | /* |
638 | * Must be held any time you expect node_start_pfn, node_present_pages | 639 | * Must be held any time you expect node_start_pfn, |
639 | * or node_spanned_pages stay constant. Holding this will also | 640 | * node_present_pages, node_spanned_pages or nr_zones to stay constant. |
640 | * guarantee that any pfn_valid() stays that way. | ||
641 | * | 641 | * |
642 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to | 642 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
643 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG | 643 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG |
@@ -691,8 +691,6 @@ typedef struct pglist_data { | |||
691 | * is the first PFN that needs to be initialised. | 691 | * is the first PFN that needs to be initialised. |
692 | */ | 692 | */ |
693 | unsigned long first_deferred_pfn; | 693 | unsigned long first_deferred_pfn; |
694 | /* Number of non-deferred pages */ | ||
695 | unsigned long static_init_pgcnt; | ||
696 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | 694 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
697 | 695 | ||
698 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 696 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -820,7 +818,7 @@ static inline bool is_dev_zone(const struct zone *zone) | |||
820 | */ | 818 | */ |
821 | static inline bool managed_zone(struct zone *zone) | 819 | static inline bool managed_zone(struct zone *zone) |
822 | { | 820 | { |
823 | return zone->managed_pages; | 821 | return zone_managed_pages(zone); |
824 | } | 822 | } |
825 | 823 | ||
826 | /* Returns true if a zone has memory */ | 824 | /* Returns true if a zone has memory */ |
@@ -890,6 +888,8 @@ static inline int is_highmem(struct zone *zone) | |||
890 | struct ctl_table; | 888 | struct ctl_table; |
891 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | 889 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
892 | void __user *, size_t *, loff_t *); | 890 | void __user *, size_t *, loff_t *); |
891 | int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, | ||
892 | void __user *, size_t *, loff_t *); | ||
893 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, | 893 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, |
894 | void __user *, size_t *, loff_t *); | 894 | void __user *, size_t *, loff_t *); |
895 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; | 895 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 69864a547663..d07992009265 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -15,6 +15,13 @@ struct notifier_block; | |||
15 | struct mem_cgroup; | 15 | struct mem_cgroup; |
16 | struct task_struct; | 16 | struct task_struct; |
17 | 17 | ||
18 | enum oom_constraint { | ||
19 | CONSTRAINT_NONE, | ||
20 | CONSTRAINT_CPUSET, | ||
21 | CONSTRAINT_MEMORY_POLICY, | ||
22 | CONSTRAINT_MEMCG, | ||
23 | }; | ||
24 | |||
18 | /* | 25 | /* |
19 | * Details of the page allocation that triggered the oom killer that are used to | 26 | * Details of the page allocation that triggered the oom killer that are used to |
20 | * determine what should be killed. | 27 | * determine what should be killed. |
@@ -42,6 +49,9 @@ struct oom_control { | |||
42 | unsigned long totalpages; | 49 | unsigned long totalpages; |
43 | struct task_struct *chosen; | 50 | struct task_struct *chosen; |
44 | unsigned long chosen_points; | 51 | unsigned long chosen_points; |
52 | |||
53 | /* Used to print the constraint info. */ | ||
54 | enum oom_constraint constraint; | ||
45 | }; | 55 | }; |
46 | 56 | ||
47 | extern struct mutex oom_lock; | 57 | extern struct mutex oom_lock; |
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7ec86bf31ce4..1dda31825ec4 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h | |||
@@ -82,6 +82,16 @@ | |||
82 | #define LAST_CPUPID_WIDTH 0 | 82 | #define LAST_CPUPID_WIDTH 0 |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #ifdef CONFIG_KASAN_SW_TAGS | ||
86 | #define KASAN_TAG_WIDTH 8 | ||
87 | #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ | ||
88 | > BITS_PER_LONG - NR_PAGEFLAGS | ||
89 | #error "KASAN: not enough bits in page flags for tag" | ||
90 | #endif | ||
91 | #else | ||
92 | #define KASAN_TAG_WIDTH 0 | ||
93 | #endif | ||
94 | |||
85 | /* | 95 | /* |
86 | * We are going to use the flags for the page to node mapping if its in | 96 | * We are going to use the flags for the page to node mapping if its in |
87 | * there. This includes the case where there is no node, so it is implicit. | 97 | * there. This includes the case where there is no node, so it is implicit. |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 50ce1bddaf56..39b4494e29f1 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -669,6 +669,7 @@ PAGEFLAG_FALSE(DoubleMap) | |||
669 | 669 | ||
670 | #define PAGE_TYPE_BASE 0xf0000000 | 670 | #define PAGE_TYPE_BASE 0xf0000000 |
671 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ | 671 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ |
672 | #define PAGE_MAPCOUNT_RESERVE -128 | ||
672 | #define PG_buddy 0x00000080 | 673 | #define PG_buddy 0x00000080 |
673 | #define PG_balloon 0x00000100 | 674 | #define PG_balloon 0x00000100 |
674 | #define PG_kmemcg 0x00000200 | 675 | #define PG_kmemcg 0x00000200 |
@@ -677,6 +678,11 @@ PAGEFLAG_FALSE(DoubleMap) | |||
677 | #define PageType(page, flag) \ | 678 | #define PageType(page, flag) \ |
678 | ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) | 679 | ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) |
679 | 680 | ||
681 | static inline int page_has_type(struct page *page) | ||
682 | { | ||
683 | return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; | ||
684 | } | ||
685 | |||
680 | #define PAGE_TYPE_OPS(uname, lname) \ | 686 | #define PAGE_TYPE_OPS(uname, lname) \ |
681 | static __always_inline int Page##uname(struct page *page) \ | 687 | static __always_inline int Page##uname(struct page *page) \ |
682 | { \ | 688 | { \ |
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4ae347cbc36d..4eb26d278046 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h | |||
@@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype) | |||
30 | } | 30 | } |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define SKIP_HWPOISON 0x1 | ||
34 | #define REPORT_FAILURE 0x2 | ||
35 | |||
33 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | 36 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
34 | int migratetype, bool skip_hwpoisoned_pages); | 37 | int migratetype, int flags); |
35 | void set_pageblock_migratetype(struct page *page, int migratetype); | 38 | void set_pageblock_migratetype(struct page *page, int migratetype); |
36 | int move_freepages_block(struct zone *zone, struct page *page, | 39 | int move_freepages_block(struct zone *zone, struct page *page, |
37 | int migratetype, int *num_movable); | 40 | int migratetype, int *num_movable); |
@@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page, | |||
44 | * For isolating all pages in the range finally, the caller have to | 47 | * For isolating all pages in the range finally, the caller have to |
45 | * free all pages in the range. test_page_isolated() can be used for | 48 | * free all pages in the range. test_page_isolated() can be used for |
46 | * test it. | 49 | * test it. |
50 | * | ||
51 | * The following flags are allowed (they can be combined in a bit mask) | ||
52 | * SKIP_HWPOISON - ignore hwpoison pages | ||
53 | * REPORT_FAILURE - report details about the failure to isolate the range | ||
47 | */ | 54 | */ |
48 | int | 55 | int |
49 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 56 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
50 | unsigned migratetype, bool skip_hwpoisoned_pages); | 57 | unsigned migratetype, int flags); |
51 | 58 | ||
52 | /* | 59 | /* |
53 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. | 60 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 9132c5cb41f1..06a66327333d 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h | |||
@@ -25,10 +25,11 @@ | |||
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | 27 | ||
28 | #define PB_migratetype_bits 3 | ||
28 | /* Bit indices that affect a whole block of pages */ | 29 | /* Bit indices that affect a whole block of pages */ |
29 | enum pageblock_bits { | 30 | enum pageblock_bits { |
30 | PB_migrate, | 31 | PB_migrate, |
31 | PB_migrate_end = PB_migrate + 3 - 1, | 32 | PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, |
32 | /* 3 bits required for migrate types */ | 33 | /* 3 bits required for migrate types */ |
33 | PB_migrate_skip,/* If set the block is skipped by compaction */ | 34 | PB_migrate_skip,/* If set the block is skipped by compaction */ |
34 | 35 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 226f96f0dee0..e2d7039af6a3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -537,6 +537,8 @@ static inline int wait_on_page_locked_killable(struct page *page) | |||
537 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | 537 | return wait_on_page_bit_killable(compound_head(page), PG_locked); |
538 | } | 538 | } |
539 | 539 | ||
540 | extern void put_and_wait_on_page_locked(struct page *page); | ||
541 | |||
540 | /* | 542 | /* |
541 | * Wait for a page to complete writeback | 543 | * Wait for a page to complete writeback |
542 | */ | 544 | */ |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 918f374e7156..6d9bd6fc0c57 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -314,22 +314,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; | |||
314 | 314 | ||
315 | static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) | 315 | static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) |
316 | { | 316 | { |
317 | int is_dma = 0; | ||
318 | int type_dma = 0; | ||
319 | int is_reclaimable; | ||
320 | |||
321 | #ifdef CONFIG_ZONE_DMA | 317 | #ifdef CONFIG_ZONE_DMA |
322 | is_dma = !!(flags & __GFP_DMA); | 318 | /* |
323 | type_dma = is_dma * KMALLOC_DMA; | 319 | * The most common case is KMALLOC_NORMAL, so test for it |
324 | #endif | 320 | * with a single branch for both flags. |
325 | 321 | */ | |
326 | is_reclaimable = !!(flags & __GFP_RECLAIMABLE); | 322 | if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) |
323 | return KMALLOC_NORMAL; | ||
327 | 324 | ||
328 | /* | 325 | /* |
329 | * If an allocation is both __GFP_DMA and __GFP_RECLAIMABLE, return | 326 | * At least one of the flags has to be set. If both are, __GFP_DMA |
330 | * KMALLOC_DMA and effectively ignore __GFP_RECLAIMABLE | 327 | * is more important. |
331 | */ | 328 | */ |
332 | return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM; | 329 | return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; |
330 | #else | ||
331 | return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; | ||
332 | #endif | ||
333 | } | 333 | } |
334 | 334 | ||
335 | /* | 335 | /* |
@@ -444,7 +444,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | |||
444 | { | 444 | { |
445 | void *ret = kmem_cache_alloc(s, flags); | 445 | void *ret = kmem_cache_alloc(s, flags); |
446 | 446 | ||
447 | kasan_kmalloc(s, ret, size, flags); | 447 | ret = kasan_kmalloc(s, ret, size, flags); |
448 | return ret; | 448 | return ret; |
449 | } | 449 | } |
450 | 450 | ||
@@ -455,7 +455,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
455 | { | 455 | { |
456 | void *ret = kmem_cache_alloc_node(s, gfpflags, node); | 456 | void *ret = kmem_cache_alloc_node(s, gfpflags, node); |
457 | 457 | ||
458 | kasan_kmalloc(s, ret, size, gfpflags); | 458 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
461 | #endif /* CONFIG_TRACING */ | 461 | #endif /* CONFIG_TRACING */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 3485c58cfd1c..9a5eafb7145b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -104,4 +104,17 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, | |||
104 | return object; | 104 | return object; |
105 | } | 105 | } |
106 | 106 | ||
107 | /* | ||
108 | * We want to avoid an expensive divide : (offset / cache->size) | ||
109 | * Using the fact that size is a constant for a particular cache, | ||
110 | * we can replace (offset / cache->size) by | ||
111 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | ||
112 | */ | ||
113 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | ||
114 | const struct page *page, void *obj) | ||
115 | { | ||
116 | u32 offset = (obj - page->s_mem); | ||
117 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | ||
118 | } | ||
119 | |||
107 | #endif /* _LINUX_SLAB_DEF_H */ | 120 | #endif /* _LINUX_SLAB_DEF_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index a8f6d5d89524..622025ac1461 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -235,7 +235,6 @@ struct swap_info_struct { | |||
235 | unsigned long flags; /* SWP_USED etc: see above */ | 235 | unsigned long flags; /* SWP_USED etc: see above */ |
236 | signed short prio; /* swap priority of this type */ | 236 | signed short prio; /* swap priority of this type */ |
237 | struct plist_node list; /* entry in swap_active_head */ | 237 | struct plist_node list; /* entry in swap_active_head */ |
238 | struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */ | ||
239 | signed char type; /* strange name for an index */ | 238 | signed char type; /* strange name for an index */ |
240 | unsigned int max; /* extent of the swap_map */ | 239 | unsigned int max; /* extent of the swap_map */ |
241 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ | 240 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
@@ -276,6 +275,16 @@ struct swap_info_struct { | |||
276 | */ | 275 | */ |
277 | struct work_struct discard_work; /* discard worker */ | 276 | struct work_struct discard_work; /* discard worker */ |
278 | struct swap_cluster_list discard_clusters; /* discard clusters list */ | 277 | struct swap_cluster_list discard_clusters; /* discard clusters list */ |
278 | struct plist_node avail_lists[0]; /* | ||
279 | * entries in swap_avail_heads, one | ||
280 | * entry per node. | ||
281 | * Must be last as the number of the | ||
282 | * array is nr_node_ids, which is not | ||
283 | * a fixed value so have to allocate | ||
284 | * dynamically. | ||
285 | * And it has to be an array so that | ||
286 | * plist_for_each_* can work. | ||
287 | */ | ||
279 | }; | 288 | }; |
280 | 289 | ||
281 | #ifdef CONFIG_64BIT | 290 | #ifdef CONFIG_64BIT |
@@ -310,7 +319,6 @@ void workingset_update_node(struct xa_node *node); | |||
310 | } while (0) | 319 | } while (0) |
311 | 320 | ||
312 | /* linux/mm/page_alloc.c */ | 321 | /* linux/mm/page_alloc.c */ |
313 | extern unsigned long totalram_pages; | ||
314 | extern unsigned long totalreserve_pages; | 322 | extern unsigned long totalreserve_pages; |
315 | extern unsigned long nr_free_buffer_pages(void); | 323 | extern unsigned long nr_free_buffer_pages(void); |
316 | extern unsigned long nr_free_pagecache_pages(void); | 324 | extern unsigned long nr_free_pagecache_pages(void); |
@@ -360,14 +368,8 @@ extern unsigned long vm_total_pages; | |||
360 | extern int node_reclaim_mode; | 368 | extern int node_reclaim_mode; |
361 | extern int sysctl_min_unmapped_ratio; | 369 | extern int sysctl_min_unmapped_ratio; |
362 | extern int sysctl_min_slab_ratio; | 370 | extern int sysctl_min_slab_ratio; |
363 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | ||
364 | #else | 371 | #else |
365 | #define node_reclaim_mode 0 | 372 | #define node_reclaim_mode 0 |
366 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | ||
367 | unsigned int order) | ||
368 | { | ||
369 | return 0; | ||
370 | } | ||
371 | #endif | 373 | #endif |
372 | 374 | ||
373 | extern int page_evictable(struct page *page); | 375 | extern int page_evictable(struct page *page); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index f25cef84b41d..2db8d60981fe 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -239,11 +239,6 @@ extern unsigned long node_page_state(struct pglist_data *pgdat, | |||
239 | #define node_page_state(node, item) global_node_page_state(item) | 239 | #define node_page_state(node, item) global_node_page_state(item) |
240 | #endif /* CONFIG_NUMA */ | 240 | #endif /* CONFIG_NUMA */ |
241 | 241 | ||
242 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | ||
243 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | ||
244 | #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) | ||
245 | #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) | ||
246 | |||
247 | #ifdef CONFIG_SMP | 242 | #ifdef CONFIG_SMP |
248 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); | 243 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
249 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | 244 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 9e1f42cb57e9..52b073fea17f 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h | |||
@@ -107,6 +107,29 @@ uint32_t xxh32(const void *input, size_t length, uint32_t seed); | |||
107 | */ | 107 | */ |
108 | uint64_t xxh64(const void *input, size_t length, uint64_t seed); | 108 | uint64_t xxh64(const void *input, size_t length, uint64_t seed); |
109 | 109 | ||
110 | /** | ||
111 | * xxhash() - calculate wordsize hash of the input with a given seed | ||
112 | * @input: The data to hash. | ||
113 | * @length: The length of the data to hash. | ||
114 | * @seed: The seed can be used to alter the result predictably. | ||
115 | * | ||
116 | * If the hash does not need to be comparable between machines with | ||
117 | * different word sizes, this function will call whichever of xxh32() | ||
118 | * or xxh64() is faster. | ||
119 | * | ||
120 | * Return: wordsize hash of the data. | ||
121 | */ | ||
122 | |||
123 | static inline unsigned long xxhash(const void *input, size_t length, | ||
124 | uint64_t seed) | ||
125 | { | ||
126 | #if BITS_PER_LONG == 64 | ||
127 | return xxh64(input, length, seed); | ||
128 | #else | ||
129 | return xxh32(input, length, seed); | ||
130 | #endif | ||
131 | } | ||
132 | |||
110 | /*-**************************** | 133 | /*-**************************** |
111 | * Streaming Hash Functions | 134 | * Streaming Hash Functions |
112 | *****************************/ | 135 | *****************************/ |
diff --git a/init/main.c b/init/main.c index 0f8cc626e634..86d894852bef 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -521,6 +521,7 @@ static void __init mm_init(void) | |||
521 | mem_init(); | 521 | mem_init(); |
522 | kmem_cache_init(); | 522 | kmem_cache_init(); |
523 | pgtable_init(); | 523 | pgtable_init(); |
524 | debug_objects_mem_init(); | ||
524 | vmalloc_init(); | 525 | vmalloc_init(); |
525 | ioremap_huge_init(); | 526 | ioremap_huge_init(); |
526 | /* Should be run before the first non-init thread is created */ | 527 | /* Should be run before the first non-init thread is created */ |
@@ -697,7 +698,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
697 | #endif | 698 | #endif |
698 | page_ext_init(); | 699 | page_ext_init(); |
699 | kmemleak_init(); | 700 | kmemleak_init(); |
700 | debug_objects_mem_init(); | ||
701 | setup_per_cpu_pageset(); | 701 | setup_per_cpu_pageset(); |
702 | numa_policy_init(); | 702 | numa_policy_init(); |
703 | acpi_early_init(); | 703 | acpi_early_init(); |
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 266f10cb7222..9510a5b32eaf 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c | |||
@@ -2666,9 +2666,9 @@ void cpuset_print_current_mems_allowed(void) | |||
2666 | rcu_read_lock(); | 2666 | rcu_read_lock(); |
2667 | 2667 | ||
2668 | cgrp = task_cs(current)->css.cgroup; | 2668 | cgrp = task_cs(current)->css.cgroup; |
2669 | pr_info("%s cpuset=", current->comm); | 2669 | pr_cont(",cpuset="); |
2670 | pr_cont_cgroup_name(cgrp); | 2670 | pr_cont_cgroup_name(cgrp); |
2671 | pr_cont(" mems_allowed=%*pbl\n", | 2671 | pr_cont(",mems_allowed=%*pbl", |
2672 | nodemask_pr_args(¤t->mems_allowed)); | 2672 | nodemask_pr_args(¤t->mems_allowed)); |
2673 | 2673 | ||
2674 | rcu_read_unlock(); | 2674 | rcu_read_unlock(); |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index abbd8da9ac21..8aef47ee7bfa 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -171,11 +171,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
171 | .address = addr, | 171 | .address = addr, |
172 | }; | 172 | }; |
173 | int err; | 173 | int err; |
174 | /* For mmu_notifiers */ | 174 | struct mmu_notifier_range range; |
175 | const unsigned long mmun_start = addr; | ||
176 | const unsigned long mmun_end = addr + PAGE_SIZE; | ||
177 | struct mem_cgroup *memcg; | 175 | struct mem_cgroup *memcg; |
178 | 176 | ||
177 | mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); | ||
178 | |||
179 | VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); | 179 | VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); |
180 | 180 | ||
181 | err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, | 181 | err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, |
@@ -186,7 +186,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
186 | /* For try_to_free_swap() and munlock_vma_page() below */ | 186 | /* For try_to_free_swap() and munlock_vma_page() below */ |
187 | lock_page(old_page); | 187 | lock_page(old_page); |
188 | 188 | ||
189 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 189 | mmu_notifier_invalidate_range_start(&range); |
190 | err = -EAGAIN; | 190 | err = -EAGAIN; |
191 | if (!page_vma_mapped_walk(&pvmw)) { | 191 | if (!page_vma_mapped_walk(&pvmw)) { |
192 | mem_cgroup_cancel_charge(new_page, memcg, false); | 192 | mem_cgroup_cancel_charge(new_page, memcg, false); |
@@ -220,7 +220,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
220 | 220 | ||
221 | err = 0; | 221 | err = 0; |
222 | unlock: | 222 | unlock: |
223 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 223 | mmu_notifier_invalidate_range_end(&range); |
224 | unlock_page(old_page); | 224 | unlock_page(old_page); |
225 | return err; | 225 | return err; |
226 | } | 226 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index e2a5156bc9c3..d439c48ecf18 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -744,15 +744,16 @@ void __init __weak arch_task_cache_init(void) { } | |||
744 | static void set_max_threads(unsigned int max_threads_suggested) | 744 | static void set_max_threads(unsigned int max_threads_suggested) |
745 | { | 745 | { |
746 | u64 threads; | 746 | u64 threads; |
747 | unsigned long nr_pages = totalram_pages(); | ||
747 | 748 | ||
748 | /* | 749 | /* |
749 | * The number of threads shall be limited such that the thread | 750 | * The number of threads shall be limited such that the thread |
750 | * structures may only consume a small part of the available memory. | 751 | * structures may only consume a small part of the available memory. |
751 | */ | 752 | */ |
752 | if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) | 753 | if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) |
753 | threads = MAX_THREADS; | 754 | threads = MAX_THREADS; |
754 | else | 755 | else |
755 | threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, | 756 | threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, |
756 | (u64) THREAD_SIZE * 8UL); | 757 | (u64) THREAD_SIZE * 8UL); |
757 | 758 | ||
758 | if (threads > max_threads_suggested) | 759 | if (threads > max_threads_suggested) |
@@ -840,7 +841,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
840 | { | 841 | { |
841 | struct task_struct *tsk; | 842 | struct task_struct *tsk; |
842 | unsigned long *stack; | 843 | unsigned long *stack; |
843 | struct vm_struct *stack_vm_area; | 844 | struct vm_struct *stack_vm_area __maybe_unused; |
844 | int err; | 845 | int err; |
845 | 846 | ||
846 | if (node == NUMA_NO_NODE) | 847 | if (node == NUMA_NO_NODE) |
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 86ef06d3dbe3..d7140447be75 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -152,6 +152,7 @@ int sanity_check_segment_list(struct kimage *image) | |||
152 | int i; | 152 | int i; |
153 | unsigned long nr_segments = image->nr_segments; | 153 | unsigned long nr_segments = image->nr_segments; |
154 | unsigned long total_pages = 0; | 154 | unsigned long total_pages = 0; |
155 | unsigned long nr_pages = totalram_pages(); | ||
155 | 156 | ||
156 | /* | 157 | /* |
157 | * Verify we have good destination addresses. The caller is | 158 | * Verify we have good destination addresses. The caller is |
@@ -217,13 +218,13 @@ int sanity_check_segment_list(struct kimage *image) | |||
217 | * wasted allocating pages, which can cause a soft lockup. | 218 | * wasted allocating pages, which can cause a soft lockup. |
218 | */ | 219 | */ |
219 | for (i = 0; i < nr_segments; i++) { | 220 | for (i = 0; i < nr_segments; i++) { |
220 | if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2) | 221 | if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) |
221 | return -EINVAL; | 222 | return -EINVAL; |
222 | 223 | ||
223 | total_pages += PAGE_COUNT(image->segment[i].memsz); | 224 | total_pages += PAGE_COUNT(image->segment[i].memsz); |
224 | } | 225 | } |
225 | 226 | ||
226 | if (total_pages > totalram_pages / 2) | 227 | if (total_pages > nr_pages / 2) |
227 | return -EINVAL; | 228 | return -EINVAL; |
228 | 229 | ||
229 | /* | 230 | /* |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 9eced2cc9f94..a856cb5ff192 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/wait_bit.h> | 12 | #include <linux/wait_bit.h> |
13 | #include <linux/xarray.h> | 13 | #include <linux/xarray.h> |
14 | #include <linux/hmm.h> | ||
14 | 15 | ||
15 | static DEFINE_XARRAY(pgmap_array); | 16 | static DEFINE_XARRAY(pgmap_array); |
16 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | 17 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
@@ -24,6 +25,9 @@ vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, | |||
24 | pmd_t *pmdp) | 25 | pmd_t *pmdp) |
25 | { | 26 | { |
26 | struct page *page = device_private_entry_to_page(entry); | 27 | struct page *page = device_private_entry_to_page(entry); |
28 | struct hmm_devmem *devmem; | ||
29 | |||
30 | devmem = container_of(page->pgmap, typeof(*devmem), pagemap); | ||
27 | 31 | ||
28 | /* | 32 | /* |
29 | * The page_fault() callback must migrate page back to system memory | 33 | * The page_fault() callback must migrate page back to system memory |
@@ -39,7 +43,7 @@ vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, | |||
39 | * There is a more in-depth description of what that callback can and | 43 | * There is a more in-depth description of what that callback can and |
40 | * cannot do, in include/linux/memremap.h | 44 | * cannot do, in include/linux/memremap.h |
41 | */ | 45 | */ |
42 | return page->pgmap->page_fault(vma, addr, page, flags, pmdp); | 46 | return devmem->page_fault(vma, addr, page, flags, pmdp); |
43 | } | 47 | } |
44 | EXPORT_SYMBOL(device_private_entry_fault); | 48 | EXPORT_SYMBOL(device_private_entry_fault); |
45 | #endif /* CONFIG_DEVICE_PRIVATE */ | 49 | #endif /* CONFIG_DEVICE_PRIVATE */ |
@@ -87,24 +91,29 @@ static void devm_memremap_pages_release(void *data) | |||
87 | struct resource *res = &pgmap->res; | 91 | struct resource *res = &pgmap->res; |
88 | resource_size_t align_start, align_size; | 92 | resource_size_t align_start, align_size; |
89 | unsigned long pfn; | 93 | unsigned long pfn; |
94 | int nid; | ||
90 | 95 | ||
96 | pgmap->kill(pgmap->ref); | ||
91 | for_each_device_pfn(pfn, pgmap) | 97 | for_each_device_pfn(pfn, pgmap) |
92 | put_page(pfn_to_page(pfn)); | 98 | put_page(pfn_to_page(pfn)); |
93 | 99 | ||
94 | if (percpu_ref_tryget_live(pgmap->ref)) { | ||
95 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | ||
96 | percpu_ref_put(pgmap->ref); | ||
97 | } | ||
98 | |||
99 | /* pages are dead and unused, undo the arch mapping */ | 100 | /* pages are dead and unused, undo the arch mapping */ |
100 | align_start = res->start & ~(SECTION_SIZE - 1); | 101 | align_start = res->start & ~(SECTION_SIZE - 1); |
101 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | 102 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
102 | - align_start; | 103 | - align_start; |
103 | 104 | ||
105 | nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT)); | ||
106 | |||
104 | mem_hotplug_begin(); | 107 | mem_hotplug_begin(); |
105 | arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? | 108 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
106 | &pgmap->altmap : NULL); | 109 | pfn = align_start >> PAGE_SHIFT; |
107 | kasan_remove_zero_shadow(__va(align_start), align_size); | 110 | __remove_pages(page_zone(pfn_to_page(pfn)), pfn, |
111 | align_size >> PAGE_SHIFT, NULL); | ||
112 | } else { | ||
113 | arch_remove_memory(nid, align_start, align_size, | ||
114 | pgmap->altmap_valid ? &pgmap->altmap : NULL); | ||
115 | kasan_remove_zero_shadow(__va(align_start), align_size); | ||
116 | } | ||
108 | mem_hotplug_done(); | 117 | mem_hotplug_done(); |
109 | 118 | ||
110 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | 119 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
@@ -116,7 +125,7 @@ static void devm_memremap_pages_release(void *data) | |||
116 | /** | 125 | /** |
117 | * devm_memremap_pages - remap and provide memmap backing for the given resource | 126 | * devm_memremap_pages - remap and provide memmap backing for the given resource |
118 | * @dev: hosting device for @res | 127 | * @dev: hosting device for @res |
119 | * @pgmap: pointer to a struct dev_pgmap | 128 | * @pgmap: pointer to a struct dev_pagemap |
120 | * | 129 | * |
121 | * Notes: | 130 | * Notes: |
122 | * 1/ At a minimum the res, ref and type members of @pgmap must be initialized | 131 | * 1/ At a minimum the res, ref and type members of @pgmap must be initialized |
@@ -125,11 +134,8 @@ static void devm_memremap_pages_release(void *data) | |||
125 | * 2/ The altmap field may optionally be initialized, in which case altmap_valid | 134 | * 2/ The altmap field may optionally be initialized, in which case altmap_valid |
126 | * must be set to true | 135 | * must be set to true |
127 | * | 136 | * |
128 | * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages() | 137 | * 3/ pgmap->ref must be 'live' on entry and will be killed at |
129 | * time (or devm release event). The expected order of events is that ref has | 138 | * devm_memremap_pages_release() time, or if this routine fails. |
130 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The | ||
131 | * wait for the completion of all references being dropped and | ||
132 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). | ||
133 | * | 139 | * |
134 | * 4/ res is expected to be a host memory range that could feasibly be | 140 | * 4/ res is expected to be a host memory range that could feasibly be |
135 | * treated as a "System RAM" range, i.e. not a device mmio range, but | 141 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
@@ -145,6 +151,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
145 | pgprot_t pgprot = PAGE_KERNEL; | 151 | pgprot_t pgprot = PAGE_KERNEL; |
146 | int error, nid, is_ram; | 152 | int error, nid, is_ram; |
147 | 153 | ||
154 | if (!pgmap->ref || !pgmap->kill) | ||
155 | return ERR_PTR(-EINVAL); | ||
156 | |||
148 | align_start = res->start & ~(SECTION_SIZE - 1); | 157 | align_start = res->start & ~(SECTION_SIZE - 1); |
149 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | 158 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
150 | - align_start; | 159 | - align_start; |
@@ -167,18 +176,13 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
167 | is_ram = region_intersects(align_start, align_size, | 176 | is_ram = region_intersects(align_start, align_size, |
168 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | 177 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
169 | 178 | ||
170 | if (is_ram == REGION_MIXED) { | 179 | if (is_ram != REGION_DISJOINT) { |
171 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | 180 | WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, |
172 | __func__, res); | 181 | is_ram == REGION_MIXED ? "mixed" : "ram", res); |
173 | return ERR_PTR(-ENXIO); | 182 | error = -ENXIO; |
183 | goto err_array; | ||
174 | } | 184 | } |
175 | 185 | ||
176 | if (is_ram == REGION_INTERSECTS) | ||
177 | return __va(res->start); | ||
178 | |||
179 | if (!pgmap->ref) | ||
180 | return ERR_PTR(-EINVAL); | ||
181 | |||
182 | pgmap->dev = dev; | 186 | pgmap->dev = dev; |
183 | 187 | ||
184 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), | 188 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), |
@@ -196,17 +200,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
196 | goto err_pfn_remap; | 200 | goto err_pfn_remap; |
197 | 201 | ||
198 | mem_hotplug_begin(); | 202 | mem_hotplug_begin(); |
199 | error = kasan_add_zero_shadow(__va(align_start), align_size); | 203 | |
200 | if (error) { | 204 | /* |
201 | mem_hotplug_done(); | 205 | * For device private memory we call add_pages() as we only need to |
202 | goto err_kasan; | 206 | * allocate and initialize struct page for the device memory. More- |
207 | * over the device memory is un-accessible thus we do not want to | ||
208 | * create a linear mapping for the memory like arch_add_memory() | ||
209 | * would do. | ||
210 | * | ||
211 | * For all other device memory types, which are accessible by | ||
212 | * the CPU, we do want the linear mapping and thus use | ||
213 | * arch_add_memory(). | ||
214 | */ | ||
215 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { | ||
216 | error = add_pages(nid, align_start >> PAGE_SHIFT, | ||
217 | align_size >> PAGE_SHIFT, NULL, false); | ||
218 | } else { | ||
219 | error = kasan_add_zero_shadow(__va(align_start), align_size); | ||
220 | if (error) { | ||
221 | mem_hotplug_done(); | ||
222 | goto err_kasan; | ||
223 | } | ||
224 | |||
225 | error = arch_add_memory(nid, align_start, align_size, altmap, | ||
226 | false); | ||
227 | } | ||
228 | |||
229 | if (!error) { | ||
230 | struct zone *zone; | ||
231 | |||
232 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | ||
233 | move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT, | ||
234 | align_size >> PAGE_SHIFT, altmap); | ||
203 | } | 235 | } |
204 | 236 | ||
205 | error = arch_add_memory(nid, align_start, align_size, altmap, false); | ||
206 | if (!error) | ||
207 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||
208 | align_start >> PAGE_SHIFT, | ||
209 | align_size >> PAGE_SHIFT, altmap); | ||
210 | mem_hotplug_done(); | 237 | mem_hotplug_done(); |
211 | if (error) | 238 | if (error) |
212 | goto err_add_memory; | 239 | goto err_add_memory; |
@@ -220,7 +247,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
220 | align_size >> PAGE_SHIFT, pgmap); | 247 | align_size >> PAGE_SHIFT, pgmap); |
221 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); | 248 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); |
222 | 249 | ||
223 | devm_add_action(dev, devm_memremap_pages_release, pgmap); | 250 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, |
251 | pgmap); | ||
252 | if (error) | ||
253 | return ERR_PTR(error); | ||
224 | 254 | ||
225 | return __va(res->start); | 255 | return __va(res->start); |
226 | 256 | ||
@@ -231,9 +261,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
231 | err_pfn_remap: | 261 | err_pfn_remap: |
232 | pgmap_array_delete(res); | 262 | pgmap_array_delete(res); |
233 | err_array: | 263 | err_array: |
264 | pgmap->kill(pgmap->ref); | ||
234 | return ERR_PTR(error); | 265 | return ERR_PTR(error); |
235 | } | 266 | } |
236 | EXPORT_SYMBOL(devm_memremap_pages); | 267 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
237 | 268 | ||
238 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | 269 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
239 | { | 270 | { |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index b0308a2c6000..640b2034edd6 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -105,7 +105,7 @@ unsigned long image_size; | |||
105 | 105 | ||
106 | void __init hibernate_image_size_init(void) | 106 | void __init hibernate_image_size_init(void) |
107 | { | 107 | { |
108 | image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; | 108 | image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE; |
109 | } | 109 | } |
110 | 110 | ||
111 | /* | 111 | /* |
diff --git a/kernel/resource.c b/kernel/resource.c index b0fbf685c77a..915c02e8e5dd 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -1256,6 +1256,21 @@ int release_mem_region_adjustable(struct resource *parent, | |||
1256 | continue; | 1256 | continue; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | /* | ||
1260 | * All memory regions added from memory-hotplug path have the | ||
1261 | * flag IORESOURCE_SYSTEM_RAM. If the resource does not have | ||
1262 | * this flag, we know that we are dealing with a resource coming | ||
1263 | * from HMM/devm. HMM/devm use another mechanism to add/release | ||
1264 | * a resource. This goes via devm_request_mem_region and | ||
1265 | * devm_release_mem_region. | ||
1266 | * HMM/devm take care to release their resources when they want, | ||
1267 | * so if we are dealing with them, let us just back off here. | ||
1268 | */ | ||
1269 | if (!(res->flags & IORESOURCE_SYSRAM)) { | ||
1270 | ret = 0; | ||
1271 | break; | ||
1272 | } | ||
1273 | |||
1259 | if (!(res->flags & IORESOURCE_MEM)) | 1274 | if (!(res->flags & IORESOURCE_MEM)) |
1260 | break; | 1275 | break; |
1261 | 1276 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 5fc724e4e454..1825f712e73b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1463,6 +1463,14 @@ static struct ctl_table vm_table[] = { | |||
1463 | .extra1 = &zero, | 1463 | .extra1 = &zero, |
1464 | }, | 1464 | }, |
1465 | { | 1465 | { |
1466 | .procname = "watermark_boost_factor", | ||
1467 | .data = &watermark_boost_factor, | ||
1468 | .maxlen = sizeof(watermark_boost_factor), | ||
1469 | .mode = 0644, | ||
1470 | .proc_handler = watermark_boost_factor_sysctl_handler, | ||
1471 | .extra1 = &zero, | ||
1472 | }, | ||
1473 | { | ||
1466 | .procname = "watermark_scale_factor", | 1474 | .procname = "watermark_scale_factor", |
1467 | .data = &watermark_scale_factor, | 1475 | .data = &watermark_scale_factor, |
1468 | .maxlen = sizeof(watermark_scale_factor), | 1476 | .maxlen = sizeof(watermark_scale_factor), |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b3c91b9e32f8..2b5a4256e88b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -593,6 +593,21 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF | |||
593 | Say Y here to disable kmemleak by default. It can then be enabled | 593 | Say Y here to disable kmemleak by default. It can then be enabled |
594 | on the command line via kmemleak=on. | 594 | on the command line via kmemleak=on. |
595 | 595 | ||
596 | config DEBUG_KMEMLEAK_AUTO_SCAN | ||
597 | bool "Enable kmemleak auto scan thread on boot up" | ||
598 | default y | ||
599 | depends on DEBUG_KMEMLEAK | ||
600 | help | ||
601 | Depending on the cpu, kmemleak scan may be cpu intensive and can | ||
602 | stall user tasks at times. This option enables/disables automatic | ||
603 | kmemleak scan at boot up. | ||
604 | |||
605 | Say N here to disable kmemleak auto scan thread to stop automatic | ||
606 | scanning. Disabling this option disables automatic reporting of | ||
607 | memory leaks. | ||
608 | |||
609 | If unsure, say Y. | ||
610 | |||
596 | config DEBUG_STACK_USAGE | 611 | config DEBUG_STACK_USAGE |
597 | bool "Stack utilization instrumentation" | 612 | bool "Stack utilization instrumentation" |
598 | depends on DEBUG_KERNEL && !IA64 | 613 | depends on DEBUG_KERNEL && !IA64 |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index d0bad1bd9a2b..d8c474b6691e 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
@@ -1,36 +1,92 @@ | |||
1 | # This config refers to the generic KASAN mode. | ||
1 | config HAVE_ARCH_KASAN | 2 | config HAVE_ARCH_KASAN |
2 | bool | 3 | bool |
3 | 4 | ||
4 | if HAVE_ARCH_KASAN | 5 | config HAVE_ARCH_KASAN_SW_TAGS |
6 | bool | ||
7 | |||
8 | config CC_HAS_KASAN_GENERIC | ||
9 | def_bool $(cc-option, -fsanitize=kernel-address) | ||
10 | |||
11 | config CC_HAS_KASAN_SW_TAGS | ||
12 | def_bool $(cc-option, -fsanitize=kernel-hwaddress) | ||
5 | 13 | ||
6 | config KASAN | 14 | config KASAN |
7 | bool "KASan: runtime memory debugger" | 15 | bool "KASAN: runtime memory debugger" |
16 | depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ | ||
17 | (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS) | ||
18 | depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) | ||
19 | help | ||
20 | Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, | ||
21 | designed to find out-of-bounds accesses and use-after-free bugs. | ||
22 | See Documentation/dev-tools/kasan.rst for details. | ||
23 | |||
24 | choice | ||
25 | prompt "KASAN mode" | ||
26 | depends on KASAN | ||
27 | default KASAN_GENERIC | ||
28 | help | ||
29 | KASAN has two modes: generic KASAN (similar to userspace ASan, | ||
30 | x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and | ||
31 | software tag-based KASAN (a version based on software memory | ||
32 | tagging, arm64 only, similar to userspace HWASan, enabled with | ||
33 | CONFIG_KASAN_SW_TAGS). | ||
34 | Both generic and tag-based KASAN are strictly debugging features. | ||
35 | |||
36 | config KASAN_GENERIC | ||
37 | bool "Generic mode" | ||
38 | depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC | ||
8 | depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) | 39 | depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) |
9 | select SLUB_DEBUG if SLUB | 40 | select SLUB_DEBUG if SLUB |
10 | select CONSTRUCTORS | 41 | select CONSTRUCTORS |
11 | select STACKDEPOT | 42 | select STACKDEPOT |
12 | help | 43 | help |
13 | Enables kernel address sanitizer - runtime memory debugger, | 44 | Enables generic KASAN mode. |
14 | designed to find out-of-bounds accesses and use-after-free bugs. | 45 | Supported in both GCC and Clang. With GCC it requires version 4.9.2 |
15 | This is strictly a debugging feature and it requires a gcc version | 46 | or later for basic support and version 5.0 or later for detection of |
16 | of 4.9.2 or later. Detection of out of bounds accesses to stack or | 47 | out-of-bounds accesses for stack and global variables and for inline |
17 | global variables requires gcc 5.0 or later. | 48 | instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires |
18 | This feature consumes about 1/8 of available memory and brings about | 49 | version 3.7.0 or later and it doesn't support detection of |
19 | ~x3 performance slowdown. | 50 | out-of-bounds accesses for global variables yet. |
51 | This mode consumes about 1/8th of available memory at kernel start | ||
52 | and introduces an overhead of ~x1.5 for the rest of the allocations. | ||
53 | The performance slowdown is ~x3. | ||
20 | For better error detection enable CONFIG_STACKTRACE. | 54 | For better error detection enable CONFIG_STACKTRACE. |
21 | Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB | 55 | Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB |
22 | (the resulting kernel does not boot). | 56 | (the resulting kernel does not boot). |
23 | 57 | ||
58 | config KASAN_SW_TAGS | ||
59 | bool "Software tag-based mode" | ||
60 | depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS | ||
61 | depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) | ||
62 | select SLUB_DEBUG if SLUB | ||
63 | select CONSTRUCTORS | ||
64 | select STACKDEPOT | ||
65 | help | ||
66 | Enables software tag-based KASAN mode. | ||
67 | This mode requires Top Byte Ignore support by the CPU and therefore | ||
68 | is only supported for arm64. | ||
69 | This mode requires Clang version 7.0.0 or later. | ||
70 | This mode consumes about 1/16th of available memory at kernel start | ||
71 | and introduces an overhead of ~20% for the rest of the allocations. | ||
72 | This mode may potentially introduce problems relating to pointer | ||
73 | casting and comparison, as it embeds tags into the top byte of each | ||
74 | pointer. | ||
75 | For better error detection enable CONFIG_STACKTRACE. | ||
76 | Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB | ||
77 | (the resulting kernel does not boot). | ||
78 | |||
79 | endchoice | ||
80 | |||
24 | config KASAN_EXTRA | 81 | config KASAN_EXTRA |
25 | bool "KAsan: extra checks" | 82 | bool "KASAN: extra checks" |
26 | depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST | 83 | depends on KASAN_GENERIC && DEBUG_KERNEL && !COMPILE_TEST |
27 | help | 84 | help |
28 | This enables further checks in the kernel address sanitizer, for now | 85 | This enables further checks in generic KASAN, for now it only |
29 | it only includes the address-use-after-scope check that can lead | 86 | includes the address-use-after-scope check that can lead to |
30 | to excessive kernel stack usage, frame size warnings and longer | 87 | excessive kernel stack usage, frame size warnings and longer |
31 | compile time. | 88 | compile time. |
32 | https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more | 89 | See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 |
33 | |||
34 | 90 | ||
35 | choice | 91 | choice |
36 | prompt "Instrumentation type" | 92 | prompt "Instrumentation type" |
@@ -53,7 +109,7 @@ config KASAN_INLINE | |||
53 | memory accesses. This is faster than outline (in some workloads | 109 | memory accesses. This is faster than outline (in some workloads |
54 | it gives about x2 boost over outline instrumentation), but | 110 | it gives about x2 boost over outline instrumentation), but |
55 | make kernel's .text size much bigger. | 111 | make kernel's .text size much bigger. |
56 | This requires a gcc version of 5.0 or later. | 112 | For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later. |
57 | 113 | ||
58 | endchoice | 114 | endchoice |
59 | 115 | ||
@@ -67,11 +123,9 @@ config KASAN_S390_4_LEVEL_PAGING | |||
67 | 4-level paging instead. | 123 | 4-level paging instead. |
68 | 124 | ||
69 | config TEST_KASAN | 125 | config TEST_KASAN |
70 | tristate "Module for testing kasan for bug detection" | 126 | tristate "Module for testing KASAN for bug detection" |
71 | depends on m && KASAN | 127 | depends on m && KASAN |
72 | help | 128 | help |
73 | This is a test module doing various nasty things like | 129 | This is a test module doing various nasty things like |
74 | out of bounds accesses, use after free. It is useful for testing | 130 | out of bounds accesses, use after free. It is useful for testing |
75 | kernel debugging features like kernel address sanitizer. | 131 | kernel debugging features like KASAN. |
76 | |||
77 | endif | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 14afeeb7d6ef..55437fd5128b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -1131,11 +1131,10 @@ static int __init debug_objects_replace_static_objects(void) | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | /* | 1133 | /* |
1134 | * When debug_objects_mem_init() is called we know that only | 1134 | * debug_objects_mem_init() is now called early that only one CPU is up |
1135 | * one CPU is up, so disabling interrupts is enough | 1135 | * and interrupts have been disabled, so it is safe to replace the |
1136 | * protection. This avoids the lockdep hell of lock ordering. | 1136 | * active object references. |
1137 | */ | 1137 | */ |
1138 | local_irq_disable(); | ||
1139 | 1138 | ||
1140 | /* Remove the statically allocated objects from the pool */ | 1139 | /* Remove the statically allocated objects from the pool */ |
1141 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) | 1140 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
@@ -1156,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void) | |||
1156 | cnt++; | 1155 | cnt++; |
1157 | } | 1156 | } |
1158 | } | 1157 | } |
1159 | local_irq_enable(); | ||
1160 | 1158 | ||
1161 | pr_debug("%d of %d active objects replaced\n", | 1159 | pr_debug("%d of %d active objects replaced\n", |
1162 | cnt, obj_pool_used); | 1160 | cnt, obj_pool_used); |
diff --git a/lib/ioremap.c b/lib/ioremap.c index 517f5853ffed..063213685563 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -76,83 +76,123 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | |||
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, | ||
80 | unsigned long end, phys_addr_t phys_addr, | ||
81 | pgprot_t prot) | ||
82 | { | ||
83 | if (!ioremap_pmd_enabled()) | ||
84 | return 0; | ||
85 | |||
86 | if ((end - addr) != PMD_SIZE) | ||
87 | return 0; | ||
88 | |||
89 | if (!IS_ALIGNED(phys_addr, PMD_SIZE)) | ||
90 | return 0; | ||
91 | |||
92 | if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) | ||
93 | return 0; | ||
94 | |||
95 | return pmd_set_huge(pmd, phys_addr, prot); | ||
96 | } | ||
97 | |||
79 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | 98 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, |
80 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | 99 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
81 | { | 100 | { |
82 | pmd_t *pmd; | 101 | pmd_t *pmd; |
83 | unsigned long next; | 102 | unsigned long next; |
84 | 103 | ||
85 | phys_addr -= addr; | ||
86 | pmd = pmd_alloc(&init_mm, pud, addr); | 104 | pmd = pmd_alloc(&init_mm, pud, addr); |
87 | if (!pmd) | 105 | if (!pmd) |
88 | return -ENOMEM; | 106 | return -ENOMEM; |
89 | do { | 107 | do { |
90 | next = pmd_addr_end(addr, end); | 108 | next = pmd_addr_end(addr, end); |
91 | 109 | ||
92 | if (ioremap_pmd_enabled() && | 110 | if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) |
93 | ((next - addr) == PMD_SIZE) && | 111 | continue; |
94 | IS_ALIGNED(phys_addr + addr, PMD_SIZE) && | ||
95 | pmd_free_pte_page(pmd, addr)) { | ||
96 | if (pmd_set_huge(pmd, phys_addr + addr, prot)) | ||
97 | continue; | ||
98 | } | ||
99 | 112 | ||
100 | if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) | 113 | if (ioremap_pte_range(pmd, addr, next, phys_addr, prot)) |
101 | return -ENOMEM; | 114 | return -ENOMEM; |
102 | } while (pmd++, addr = next, addr != end); | 115 | } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); |
103 | return 0; | 116 | return 0; |
104 | } | 117 | } |
105 | 118 | ||
119 | static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, | ||
120 | unsigned long end, phys_addr_t phys_addr, | ||
121 | pgprot_t prot) | ||
122 | { | ||
123 | if (!ioremap_pud_enabled()) | ||
124 | return 0; | ||
125 | |||
126 | if ((end - addr) != PUD_SIZE) | ||
127 | return 0; | ||
128 | |||
129 | if (!IS_ALIGNED(phys_addr, PUD_SIZE)) | ||
130 | return 0; | ||
131 | |||
132 | if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) | ||
133 | return 0; | ||
134 | |||
135 | return pud_set_huge(pud, phys_addr, prot); | ||
136 | } | ||
137 | |||
106 | static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, | 138 | static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, |
107 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | 139 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
108 | { | 140 | { |
109 | pud_t *pud; | 141 | pud_t *pud; |
110 | unsigned long next; | 142 | unsigned long next; |
111 | 143 | ||
112 | phys_addr -= addr; | ||
113 | pud = pud_alloc(&init_mm, p4d, addr); | 144 | pud = pud_alloc(&init_mm, p4d, addr); |
114 | if (!pud) | 145 | if (!pud) |
115 | return -ENOMEM; | 146 | return -ENOMEM; |
116 | do { | 147 | do { |
117 | next = pud_addr_end(addr, end); | 148 | next = pud_addr_end(addr, end); |
118 | 149 | ||
119 | if (ioremap_pud_enabled() && | 150 | if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) |
120 | ((next - addr) == PUD_SIZE) && | 151 | continue; |
121 | IS_ALIGNED(phys_addr + addr, PUD_SIZE) && | ||
122 | pud_free_pmd_page(pud, addr)) { | ||
123 | if (pud_set_huge(pud, phys_addr + addr, prot)) | ||
124 | continue; | ||
125 | } | ||
126 | 152 | ||
127 | if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) | 153 | if (ioremap_pmd_range(pud, addr, next, phys_addr, prot)) |
128 | return -ENOMEM; | 154 | return -ENOMEM; |
129 | } while (pud++, addr = next, addr != end); | 155 | } while (pud++, phys_addr += (next - addr), addr = next, addr != end); |
130 | return 0; | 156 | return 0; |
131 | } | 157 | } |
132 | 158 | ||
159 | static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, | ||
160 | unsigned long end, phys_addr_t phys_addr, | ||
161 | pgprot_t prot) | ||
162 | { | ||
163 | if (!ioremap_p4d_enabled()) | ||
164 | return 0; | ||
165 | |||
166 | if ((end - addr) != P4D_SIZE) | ||
167 | return 0; | ||
168 | |||
169 | if (!IS_ALIGNED(phys_addr, P4D_SIZE)) | ||
170 | return 0; | ||
171 | |||
172 | if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) | ||
173 | return 0; | ||
174 | |||
175 | return p4d_set_huge(p4d, phys_addr, prot); | ||
176 | } | ||
177 | |||
133 | static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, | 178 | static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, |
134 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | 179 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
135 | { | 180 | { |
136 | p4d_t *p4d; | 181 | p4d_t *p4d; |
137 | unsigned long next; | 182 | unsigned long next; |
138 | 183 | ||
139 | phys_addr -= addr; | ||
140 | p4d = p4d_alloc(&init_mm, pgd, addr); | 184 | p4d = p4d_alloc(&init_mm, pgd, addr); |
141 | if (!p4d) | 185 | if (!p4d) |
142 | return -ENOMEM; | 186 | return -ENOMEM; |
143 | do { | 187 | do { |
144 | next = p4d_addr_end(addr, end); | 188 | next = p4d_addr_end(addr, end); |
145 | 189 | ||
146 | if (ioremap_p4d_enabled() && | 190 | if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) |
147 | ((next - addr) == P4D_SIZE) && | 191 | continue; |
148 | IS_ALIGNED(phys_addr + addr, P4D_SIZE)) { | ||
149 | if (p4d_set_huge(p4d, phys_addr + addr, prot)) | ||
150 | continue; | ||
151 | } | ||
152 | 192 | ||
153 | if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) | 193 | if (ioremap_pud_range(p4d, addr, next, phys_addr, prot)) |
154 | return -ENOMEM; | 194 | return -ENOMEM; |
155 | } while (p4d++, addr = next, addr != end); | 195 | } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); |
156 | return 0; | 196 | return 0; |
157 | } | 197 | } |
158 | 198 | ||
@@ -168,14 +208,13 @@ int ioremap_page_range(unsigned long addr, | |||
168 | BUG_ON(addr >= end); | 208 | BUG_ON(addr >= end); |
169 | 209 | ||
170 | start = addr; | 210 | start = addr; |
171 | phys_addr -= addr; | ||
172 | pgd = pgd_offset_k(addr); | 211 | pgd = pgd_offset_k(addr); |
173 | do { | 212 | do { |
174 | next = pgd_addr_end(addr, end); | 213 | next = pgd_addr_end(addr, end); |
175 | err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); | 214 | err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot); |
176 | if (err) | 215 | if (err) |
177 | break; | 216 | break; |
178 | } while (pgd++, addr = next, addr != end); | 217 | } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); |
179 | 218 | ||
180 | flush_cache_vmap(start, end); | 219 | flush_cache_vmap(start, end); |
181 | 220 | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 0beaa1d899aa..6a042f53e7bb 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
@@ -18,22 +18,19 @@ void show_mem(unsigned int filter, nodemask_t *nodemask) | |||
18 | show_free_areas(filter, nodemask); | 18 | show_free_areas(filter, nodemask); |
19 | 19 | ||
20 | for_each_online_pgdat(pgdat) { | 20 | for_each_online_pgdat(pgdat) { |
21 | unsigned long flags; | ||
22 | int zoneid; | 21 | int zoneid; |
23 | 22 | ||
24 | pgdat_resize_lock(pgdat, &flags); | ||
25 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { | 23 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
26 | struct zone *zone = &pgdat->node_zones[zoneid]; | 24 | struct zone *zone = &pgdat->node_zones[zoneid]; |
27 | if (!populated_zone(zone)) | 25 | if (!populated_zone(zone)) |
28 | continue; | 26 | continue; |
29 | 27 | ||
30 | total += zone->present_pages; | 28 | total += zone->present_pages; |
31 | reserved += zone->present_pages - zone->managed_pages; | 29 | reserved += zone->present_pages - zone_managed_pages(zone); |
32 | 30 | ||
33 | if (is_highmem_idx(zoneid)) | 31 | if (is_highmem_idx(zoneid)) |
34 | highmem += zone->present_pages; | 32 | highmem += zone->present_pages; |
35 | } | 33 | } |
36 | pgdat_resize_unlock(pgdat, &flags); | ||
37 | } | 34 | } |
38 | 35 | ||
39 | printk("%lu pages RAM\n", total); | 36 | printk("%lu pages RAM\n", total); |
diff --git a/mm/Kconfig b/mm/Kconfig index d85e39da47ae..25c71eb8a7db 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -291,6 +291,7 @@ config MMU_NOTIFIER | |||
291 | config KSM | 291 | config KSM |
292 | bool "Enable KSM for page merging" | 292 | bool "Enable KSM for page merging" |
293 | depends on MMU | 293 | depends on MMU |
294 | select XXHASH | ||
294 | help | 295 | help |
295 | Enable Kernel Samepage Merging: KSM periodically scans those areas | 296 | Enable Kernel Samepage Merging: KSM periodically scans those areas |
296 | of an application's address space that an app has advised may be | 297 | of an application's address space that an app has advised may be |
@@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
407 | unsigned long pfn = -1; | 407 | unsigned long pfn = -1; |
408 | unsigned long start = 0; | 408 | unsigned long start = 0; |
409 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 409 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
410 | size_t i; | ||
410 | struct page *page = NULL; | 411 | struct page *page = NULL; |
411 | int ret = -ENOMEM; | 412 | int ret = -ENOMEM; |
412 | 413 | ||
@@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
466 | 467 | ||
467 | trace_cma_alloc(pfn, page, count, align); | 468 | trace_cma_alloc(pfn, page, count, align); |
468 | 469 | ||
470 | /* | ||
471 | * CMA can allocate multiple page blocks, which results in different | ||
472 | * blocks being marked with different tags. Reset the tags to ignore | ||
473 | * those page blocks. | ||
474 | */ | ||
475 | if (page) { | ||
476 | for (i = 0; i < count; i++) | ||
477 | page_kasan_tag_reset(page + i); | ||
478 | } | ||
479 | |||
469 | if (ret && !no_warn) { | 480 | if (ret && !no_warn) { |
470 | pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", | 481 | pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", |
471 | __func__, count, ret); | 482 | __func__, count, ret); |
diff --git a/mm/compaction.c b/mm/compaction.c index 7c607479de4a..ef29490b0f46 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1431,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, | |||
1431 | if (is_via_compact_memory(order)) | 1431 | if (is_via_compact_memory(order)) |
1432 | return COMPACT_CONTINUE; | 1432 | return COMPACT_CONTINUE; |
1433 | 1433 | ||
1434 | watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; | 1434 | watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); |
1435 | /* | 1435 | /* |
1436 | * If watermarks for high-order allocation are already met, there | 1436 | * If watermarks for high-order allocation are already met, there |
1437 | * should be no need for compaction at all. | 1437 | * should be no need for compaction at all. |
diff --git a/mm/debug.c b/mm/debug.c index cdacba12e09a..0abb987dad9b 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include "internal.h" | 18 | #include "internal.h" |
19 | 19 | ||
20 | char *migrate_reason_names[MR_TYPES] = { | 20 | const char *migrate_reason_names[MR_TYPES] = { |
21 | "compaction", | 21 | "compaction", |
22 | "memory_failure", | 22 | "memory_failure", |
23 | "memory_hotplug", | 23 | "memory_hotplug", |
@@ -44,6 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { | |||
44 | 44 | ||
45 | void __dump_page(struct page *page, const char *reason) | 45 | void __dump_page(struct page *page, const char *reason) |
46 | { | 46 | { |
47 | struct address_space *mapping = page_mapping(page); | ||
47 | bool page_poisoned = PagePoisoned(page); | 48 | bool page_poisoned = PagePoisoned(page); |
48 | int mapcount; | 49 | int mapcount; |
49 | 50 | ||
@@ -53,7 +54,7 @@ void __dump_page(struct page *page, const char *reason) | |||
53 | * dump_page() when detected. | 54 | * dump_page() when detected. |
54 | */ | 55 | */ |
55 | if (page_poisoned) { | 56 | if (page_poisoned) { |
56 | pr_emerg("page:%px is uninitialized and poisoned", page); | 57 | pr_warn("page:%px is uninitialized and poisoned", page); |
57 | goto hex_only; | 58 | goto hex_only; |
58 | } | 59 | } |
59 | 60 | ||
@@ -64,27 +65,39 @@ void __dump_page(struct page *page, const char *reason) | |||
64 | */ | 65 | */ |
65 | mapcount = PageSlab(page) ? 0 : page_mapcount(page); | 66 | mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
66 | 67 | ||
67 | pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", | 68 | pr_warn("page:%px count:%d mapcount:%d mapping:%px index:%#lx", |
68 | page, page_ref_count(page), mapcount, | 69 | page, page_ref_count(page), mapcount, |
69 | page->mapping, page_to_pgoff(page)); | 70 | page->mapping, page_to_pgoff(page)); |
70 | if (PageCompound(page)) | 71 | if (PageCompound(page)) |
71 | pr_cont(" compound_mapcount: %d", compound_mapcount(page)); | 72 | pr_cont(" compound_mapcount: %d", compound_mapcount(page)); |
72 | pr_cont("\n"); | 73 | pr_cont("\n"); |
74 | if (PageAnon(page)) | ||
75 | pr_warn("anon "); | ||
76 | else if (PageKsm(page)) | ||
77 | pr_warn("ksm "); | ||
78 | else if (mapping) { | ||
79 | pr_warn("%ps ", mapping->a_ops); | ||
80 | if (mapping->host->i_dentry.first) { | ||
81 | struct dentry *dentry; | ||
82 | dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); | ||
83 | pr_warn("name:\"%pd\" ", dentry); | ||
84 | } | ||
85 | } | ||
73 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); | 86 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
74 | 87 | ||
75 | pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); | 88 | pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags); |
76 | 89 | ||
77 | hex_only: | 90 | hex_only: |
78 | print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, | 91 | print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, |
79 | sizeof(unsigned long), page, | 92 | sizeof(unsigned long), page, |
80 | sizeof(struct page), false); | 93 | sizeof(struct page), false); |
81 | 94 | ||
82 | if (reason) | 95 | if (reason) |
83 | pr_alert("page dumped because: %s\n", reason); | 96 | pr_warn("page dumped because: %s\n", reason); |
84 | 97 | ||
85 | #ifdef CONFIG_MEMCG | 98 | #ifdef CONFIG_MEMCG |
86 | if (!page_poisoned && page->mem_cgroup) | 99 | if (!page_poisoned && page->mem_cgroup) |
87 | pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); | 100 | pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup); |
88 | #endif | 101 | #endif |
89 | } | 102 | } |
90 | 103 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 81adec8ee02c..29655fb47a2c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -981,7 +981,14 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, | |||
981 | if (wait_page->bit_nr != key->bit_nr) | 981 | if (wait_page->bit_nr != key->bit_nr) |
982 | return 0; | 982 | return 0; |
983 | 983 | ||
984 | /* Stop walking if it's locked */ | 984 | /* |
985 | * Stop walking if it's locked. | ||
986 | * Is this safe if put_and_wait_on_page_locked() is in use? | ||
987 | * Yes: the waker must hold a reference to this page, and if PG_locked | ||
988 | * has now already been set by another task, that task must also hold | ||
989 | * a reference to the *same usage* of this page; so there is no need | ||
990 | * to walk on to wake even the put_and_wait_on_page_locked() callers. | ||
991 | */ | ||
985 | if (test_bit(key->bit_nr, &key->page->flags)) | 992 | if (test_bit(key->bit_nr, &key->page->flags)) |
986 | return -1; | 993 | return -1; |
987 | 994 | ||
@@ -1049,25 +1056,44 @@ static void wake_up_page(struct page *page, int bit) | |||
1049 | wake_up_page_bit(page, bit); | 1056 | wake_up_page_bit(page, bit); |
1050 | } | 1057 | } |
1051 | 1058 | ||
1059 | /* | ||
1060 | * A choice of three behaviors for wait_on_page_bit_common(): | ||
1061 | */ | ||
1062 | enum behavior { | ||
1063 | EXCLUSIVE, /* Hold ref to page and take the bit when woken, like | ||
1064 | * __lock_page() waiting on then setting PG_locked. | ||
1065 | */ | ||
1066 | SHARED, /* Hold ref to page and check the bit when woken, like | ||
1067 | * wait_on_page_writeback() waiting on PG_writeback. | ||
1068 | */ | ||
1069 | DROP, /* Drop ref to page before wait, no check when woken, | ||
1070 | * like put_and_wait_on_page_locked() on PG_locked. | ||
1071 | */ | ||
1072 | }; | ||
1073 | |||
1052 | static inline int wait_on_page_bit_common(wait_queue_head_t *q, | 1074 | static inline int wait_on_page_bit_common(wait_queue_head_t *q, |
1053 | struct page *page, int bit_nr, int state, bool lock) | 1075 | struct page *page, int bit_nr, int state, enum behavior behavior) |
1054 | { | 1076 | { |
1055 | struct wait_page_queue wait_page; | 1077 | struct wait_page_queue wait_page; |
1056 | wait_queue_entry_t *wait = &wait_page.wait; | 1078 | wait_queue_entry_t *wait = &wait_page.wait; |
1079 | bool bit_is_set; | ||
1057 | bool thrashing = false; | 1080 | bool thrashing = false; |
1081 | bool delayacct = false; | ||
1058 | unsigned long pflags; | 1082 | unsigned long pflags; |
1059 | int ret = 0; | 1083 | int ret = 0; |
1060 | 1084 | ||
1061 | if (bit_nr == PG_locked && | 1085 | if (bit_nr == PG_locked && |
1062 | !PageUptodate(page) && PageWorkingset(page)) { | 1086 | !PageUptodate(page) && PageWorkingset(page)) { |
1063 | if (!PageSwapBacked(page)) | 1087 | if (!PageSwapBacked(page)) { |
1064 | delayacct_thrashing_start(); | 1088 | delayacct_thrashing_start(); |
1089 | delayacct = true; | ||
1090 | } | ||
1065 | psi_memstall_enter(&pflags); | 1091 | psi_memstall_enter(&pflags); |
1066 | thrashing = true; | 1092 | thrashing = true; |
1067 | } | 1093 | } |
1068 | 1094 | ||
1069 | init_wait(wait); | 1095 | init_wait(wait); |
1070 | wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0; | 1096 | wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0; |
1071 | wait->func = wake_page_function; | 1097 | wait->func = wake_page_function; |
1072 | wait_page.page = page; | 1098 | wait_page.page = page; |
1073 | wait_page.bit_nr = bit_nr; | 1099 | wait_page.bit_nr = bit_nr; |
@@ -1084,14 +1110,17 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, | |||
1084 | 1110 | ||
1085 | spin_unlock_irq(&q->lock); | 1111 | spin_unlock_irq(&q->lock); |
1086 | 1112 | ||
1087 | if (likely(test_bit(bit_nr, &page->flags))) { | 1113 | bit_is_set = test_bit(bit_nr, &page->flags); |
1114 | if (behavior == DROP) | ||
1115 | put_page(page); | ||
1116 | |||
1117 | if (likely(bit_is_set)) | ||
1088 | io_schedule(); | 1118 | io_schedule(); |
1089 | } | ||
1090 | 1119 | ||
1091 | if (lock) { | 1120 | if (behavior == EXCLUSIVE) { |
1092 | if (!test_and_set_bit_lock(bit_nr, &page->flags)) | 1121 | if (!test_and_set_bit_lock(bit_nr, &page->flags)) |
1093 | break; | 1122 | break; |
1094 | } else { | 1123 | } else if (behavior == SHARED) { |
1095 | if (!test_bit(bit_nr, &page->flags)) | 1124 | if (!test_bit(bit_nr, &page->flags)) |
1096 | break; | 1125 | break; |
1097 | } | 1126 | } |
@@ -1100,12 +1129,23 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, | |||
1100 | ret = -EINTR; | 1129 | ret = -EINTR; |
1101 | break; | 1130 | break; |
1102 | } | 1131 | } |
1132 | |||
1133 | if (behavior == DROP) { | ||
1134 | /* | ||
1135 | * We can no longer safely access page->flags: | ||
1136 | * even if CONFIG_MEMORY_HOTREMOVE is not enabled, | ||
1137 | * there is a risk of waiting forever on a page reused | ||
1138 | * for something that keeps it locked indefinitely. | ||
1139 | * But best check for -EINTR above before breaking. | ||
1140 | */ | ||
1141 | break; | ||
1142 | } | ||
1103 | } | 1143 | } |
1104 | 1144 | ||
1105 | finish_wait(q, wait); | 1145 | finish_wait(q, wait); |
1106 | 1146 | ||
1107 | if (thrashing) { | 1147 | if (thrashing) { |
1108 | if (!PageSwapBacked(page)) | 1148 | if (delayacct) |
1109 | delayacct_thrashing_end(); | 1149 | delayacct_thrashing_end(); |
1110 | psi_memstall_leave(&pflags); | 1150 | psi_memstall_leave(&pflags); |
1111 | } | 1151 | } |
@@ -1124,18 +1164,37 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, | |||
1124 | void wait_on_page_bit(struct page *page, int bit_nr) | 1164 | void wait_on_page_bit(struct page *page, int bit_nr) |
1125 | { | 1165 | { |
1126 | wait_queue_head_t *q = page_waitqueue(page); | 1166 | wait_queue_head_t *q = page_waitqueue(page); |
1127 | wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false); | 1167 | wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); |
1128 | } | 1168 | } |
1129 | EXPORT_SYMBOL(wait_on_page_bit); | 1169 | EXPORT_SYMBOL(wait_on_page_bit); |
1130 | 1170 | ||
1131 | int wait_on_page_bit_killable(struct page *page, int bit_nr) | 1171 | int wait_on_page_bit_killable(struct page *page, int bit_nr) |
1132 | { | 1172 | { |
1133 | wait_queue_head_t *q = page_waitqueue(page); | 1173 | wait_queue_head_t *q = page_waitqueue(page); |
1134 | return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false); | 1174 | return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); |
1135 | } | 1175 | } |
1136 | EXPORT_SYMBOL(wait_on_page_bit_killable); | 1176 | EXPORT_SYMBOL(wait_on_page_bit_killable); |
1137 | 1177 | ||
1138 | /** | 1178 | /** |
1179 | * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked | ||
1180 | * @page: The page to wait for. | ||
1181 | * | ||
1182 | * The caller should hold a reference on @page. They expect the page to | ||
1183 | * become unlocked relatively soon, but do not wish to hold up migration | ||
1184 | * (for example) by holding the reference while waiting for the page to | ||
1185 | * come unlocked. After this function returns, the caller should not | ||
1186 | * dereference @page. | ||
1187 | */ | ||
1188 | void put_and_wait_on_page_locked(struct page *page) | ||
1189 | { | ||
1190 | wait_queue_head_t *q; | ||
1191 | |||
1192 | page = compound_head(page); | ||
1193 | q = page_waitqueue(page); | ||
1194 | wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); | ||
1195 | } | ||
1196 | |||
1197 | /** | ||
1139 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue | 1198 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue |
1140 | * @page: Page defining the wait queue of interest | 1199 | * @page: Page defining the wait queue of interest |
1141 | * @waiter: Waiter to add to the queue | 1200 | * @waiter: Waiter to add to the queue |
@@ -1264,7 +1323,8 @@ void __lock_page(struct page *__page) | |||
1264 | { | 1323 | { |
1265 | struct page *page = compound_head(__page); | 1324 | struct page *page = compound_head(__page); |
1266 | wait_queue_head_t *q = page_waitqueue(page); | 1325 | wait_queue_head_t *q = page_waitqueue(page); |
1267 | wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true); | 1326 | wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, |
1327 | EXCLUSIVE); | ||
1268 | } | 1328 | } |
1269 | EXPORT_SYMBOL(__lock_page); | 1329 | EXPORT_SYMBOL(__lock_page); |
1270 | 1330 | ||
@@ -1272,7 +1332,8 @@ int __lock_page_killable(struct page *__page) | |||
1272 | { | 1332 | { |
1273 | struct page *page = compound_head(__page); | 1333 | struct page *page = compound_head(__page); |
1274 | wait_queue_head_t *q = page_waitqueue(page); | 1334 | wait_queue_head_t *q = page_waitqueue(page); |
1275 | return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true); | 1335 | return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, |
1336 | EXCLUSIVE); | ||
1276 | } | 1337 | } |
1277 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 1338 | EXPORT_SYMBOL_GPL(__lock_page_killable); |
1278 | 1339 | ||
@@ -1540,7 +1601,7 @@ repeat: | |||
1540 | VM_BUG_ON_PAGE(page->index != offset, page); | 1601 | VM_BUG_ON_PAGE(page->index != offset, page); |
1541 | } | 1602 | } |
1542 | 1603 | ||
1543 | if (page && (fgp_flags & FGP_ACCESSED)) | 1604 | if (fgp_flags & FGP_ACCESSED) |
1544 | mark_page_accessed(page); | 1605 | mark_page_accessed(page); |
1545 | 1606 | ||
1546 | no_page: | 1607 | no_page: |
@@ -2553,6 +2614,13 @@ void filemap_map_pages(struct vm_fault *vmf, | |||
2553 | goto next; | 2614 | goto next; |
2554 | 2615 | ||
2555 | head = compound_head(page); | 2616 | head = compound_head(page); |
2617 | |||
2618 | /* | ||
2619 | * Check for a locked page first, as a speculative | ||
2620 | * reference may adversely influence page migration. | ||
2621 | */ | ||
2622 | if (PageLocked(head)) | ||
2623 | goto next; | ||
2556 | if (!page_cache_get_speculative(head)) | 2624 | if (!page_cache_get_speculative(head)) |
2557 | goto next; | 2625 | goto next; |
2558 | 2626 | ||
diff --git a/mm/highmem.c b/mm/highmem.c index 59db3223a5d6..107b10f9878e 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -105,9 +105,8 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) | |||
105 | } | 105 | } |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | unsigned long totalhigh_pages __read_mostly; | 108 | atomic_long_t _totalhigh_pages __read_mostly; |
109 | EXPORT_SYMBOL(totalhigh_pages); | 109 | EXPORT_SYMBOL(_totalhigh_pages); |
110 | |||
111 | 110 | ||
112 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); | 111 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
113 | 112 | ||
@@ -189,35 +189,30 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, | 191 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
192 | struct mm_struct *mm, | 192 | const struct mmu_notifier_range *range) |
193 | unsigned long start, | ||
194 | unsigned long end, | ||
195 | bool blockable) | ||
196 | { | 193 | { |
197 | struct hmm_update update; | 194 | struct hmm_update update; |
198 | struct hmm *hmm = mm->hmm; | 195 | struct hmm *hmm = range->mm->hmm; |
199 | 196 | ||
200 | VM_BUG_ON(!hmm); | 197 | VM_BUG_ON(!hmm); |
201 | 198 | ||
202 | update.start = start; | 199 | update.start = range->start; |
203 | update.end = end; | 200 | update.end = range->end; |
204 | update.event = HMM_UPDATE_INVALIDATE; | 201 | update.event = HMM_UPDATE_INVALIDATE; |
205 | update.blockable = blockable; | 202 | update.blockable = range->blockable; |
206 | return hmm_invalidate_range(hmm, true, &update); | 203 | return hmm_invalidate_range(hmm, true, &update); |
207 | } | 204 | } |
208 | 205 | ||
209 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | 206 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, |
210 | struct mm_struct *mm, | 207 | const struct mmu_notifier_range *range) |
211 | unsigned long start, | ||
212 | unsigned long end) | ||
213 | { | 208 | { |
214 | struct hmm_update update; | 209 | struct hmm_update update; |
215 | struct hmm *hmm = mm->hmm; | 210 | struct hmm *hmm = range->mm->hmm; |
216 | 211 | ||
217 | VM_BUG_ON(!hmm); | 212 | VM_BUG_ON(!hmm); |
218 | 213 | ||
219 | update.start = start; | 214 | update.start = range->start; |
220 | update.end = end; | 215 | update.end = range->end; |
221 | update.event = HMM_UPDATE_INVALIDATE; | 216 | update.event = HMM_UPDATE_INVALIDATE; |
222 | update.blockable = true; | 217 | update.blockable = true; |
223 | hmm_invalidate_range(hmm, false, &update); | 218 | hmm_invalidate_range(hmm, false, &update); |
@@ -986,19 +981,13 @@ static void hmm_devmem_ref_exit(void *data) | |||
986 | struct hmm_devmem *devmem; | 981 | struct hmm_devmem *devmem; |
987 | 982 | ||
988 | devmem = container_of(ref, struct hmm_devmem, ref); | 983 | devmem = container_of(ref, struct hmm_devmem, ref); |
984 | wait_for_completion(&devmem->completion); | ||
989 | percpu_ref_exit(ref); | 985 | percpu_ref_exit(ref); |
990 | devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data); | ||
991 | } | 986 | } |
992 | 987 | ||
993 | static void hmm_devmem_ref_kill(void *data) | 988 | static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
994 | { | 989 | { |
995 | struct percpu_ref *ref = data; | ||
996 | struct hmm_devmem *devmem; | ||
997 | |||
998 | devmem = container_of(ref, struct hmm_devmem, ref); | ||
999 | percpu_ref_kill(ref); | 990 | percpu_ref_kill(ref); |
1000 | wait_for_completion(&devmem->completion); | ||
1001 | devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data); | ||
1002 | } | 991 | } |
1003 | 992 | ||
1004 | static int hmm_devmem_fault(struct vm_area_struct *vma, | 993 | static int hmm_devmem_fault(struct vm_area_struct *vma, |
@@ -1021,172 +1010,6 @@ static void hmm_devmem_free(struct page *page, void *data) | |||
1021 | devmem->ops->free(devmem, page); | 1010 | devmem->ops->free(devmem, page); |
1022 | } | 1011 | } |
1023 | 1012 | ||
1024 | static DEFINE_MUTEX(hmm_devmem_lock); | ||
1025 | static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); | ||
1026 | |||
1027 | static void hmm_devmem_radix_release(struct resource *resource) | ||
1028 | { | ||
1029 | resource_size_t key; | ||
1030 | |||
1031 | mutex_lock(&hmm_devmem_lock); | ||
1032 | for (key = resource->start; | ||
1033 | key <= resource->end; | ||
1034 | key += PA_SECTION_SIZE) | ||
1035 | radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); | ||
1036 | mutex_unlock(&hmm_devmem_lock); | ||
1037 | } | ||
1038 | |||
1039 | static void hmm_devmem_release(struct device *dev, void *data) | ||
1040 | { | ||
1041 | struct hmm_devmem *devmem = data; | ||
1042 | struct resource *resource = devmem->resource; | ||
1043 | unsigned long start_pfn, npages; | ||
1044 | struct zone *zone; | ||
1045 | struct page *page; | ||
1046 | |||
1047 | if (percpu_ref_tryget_live(&devmem->ref)) { | ||
1048 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | ||
1049 | percpu_ref_put(&devmem->ref); | ||
1050 | } | ||
1051 | |||
1052 | /* pages are dead and unused, undo the arch mapping */ | ||
1053 | start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; | ||
1054 | npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; | ||
1055 | |||
1056 | page = pfn_to_page(start_pfn); | ||
1057 | zone = page_zone(page); | ||
1058 | |||
1059 | mem_hotplug_begin(); | ||
1060 | if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) | ||
1061 | __remove_pages(zone, start_pfn, npages, NULL); | ||
1062 | else | ||
1063 | arch_remove_memory(start_pfn << PAGE_SHIFT, | ||
1064 | npages << PAGE_SHIFT, NULL); | ||
1065 | mem_hotplug_done(); | ||
1066 | |||
1067 | hmm_devmem_radix_release(resource); | ||
1068 | } | ||
1069 | |||
1070 | static int hmm_devmem_pages_create(struct hmm_devmem *devmem) | ||
1071 | { | ||
1072 | resource_size_t key, align_start, align_size, align_end; | ||
1073 | struct device *device = devmem->device; | ||
1074 | int ret, nid, is_ram; | ||
1075 | |||
1076 | align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); | ||
1077 | align_size = ALIGN(devmem->resource->start + | ||
1078 | resource_size(devmem->resource), | ||
1079 | PA_SECTION_SIZE) - align_start; | ||
1080 | |||
1081 | is_ram = region_intersects(align_start, align_size, | ||
1082 | IORESOURCE_SYSTEM_RAM, | ||
1083 | IORES_DESC_NONE); | ||
1084 | if (is_ram == REGION_MIXED) { | ||
1085 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | ||
1086 | __func__, devmem->resource); | ||
1087 | return -ENXIO; | ||
1088 | } | ||
1089 | if (is_ram == REGION_INTERSECTS) | ||
1090 | return -ENXIO; | ||
1091 | |||
1092 | if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) | ||
1093 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; | ||
1094 | else | ||
1095 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; | ||
1096 | |||
1097 | devmem->pagemap.res = *devmem->resource; | ||
1098 | devmem->pagemap.page_fault = hmm_devmem_fault; | ||
1099 | devmem->pagemap.page_free = hmm_devmem_free; | ||
1100 | devmem->pagemap.dev = devmem->device; | ||
1101 | devmem->pagemap.ref = &devmem->ref; | ||
1102 | devmem->pagemap.data = devmem; | ||
1103 | |||
1104 | mutex_lock(&hmm_devmem_lock); | ||
1105 | align_end = align_start + align_size - 1; | ||
1106 | for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { | ||
1107 | struct hmm_devmem *dup; | ||
1108 | |||
1109 | dup = radix_tree_lookup(&hmm_devmem_radix, | ||
1110 | key >> PA_SECTION_SHIFT); | ||
1111 | if (dup) { | ||
1112 | dev_err(device, "%s: collides with mapping for %s\n", | ||
1113 | __func__, dev_name(dup->device)); | ||
1114 | mutex_unlock(&hmm_devmem_lock); | ||
1115 | ret = -EBUSY; | ||
1116 | goto error; | ||
1117 | } | ||
1118 | ret = radix_tree_insert(&hmm_devmem_radix, | ||
1119 | key >> PA_SECTION_SHIFT, | ||
1120 | devmem); | ||
1121 | if (ret) { | ||
1122 | dev_err(device, "%s: failed: %d\n", __func__, ret); | ||
1123 | mutex_unlock(&hmm_devmem_lock); | ||
1124 | goto error_radix; | ||
1125 | } | ||
1126 | } | ||
1127 | mutex_unlock(&hmm_devmem_lock); | ||
1128 | |||
1129 | nid = dev_to_node(device); | ||
1130 | if (nid < 0) | ||
1131 | nid = numa_mem_id(); | ||
1132 | |||
1133 | mem_hotplug_begin(); | ||
1134 | /* | ||
1135 | * For device private memory we call add_pages() as we only need to | ||
1136 | * allocate and initialize struct page for the device memory. More- | ||
1137 | * over the device memory is un-accessible thus we do not want to | ||
1138 | * create a linear mapping for the memory like arch_add_memory() | ||
1139 | * would do. | ||
1140 | * | ||
1141 | * For device public memory, which is accesible by the CPU, we do | ||
1142 | * want the linear mapping and thus use arch_add_memory(). | ||
1143 | */ | ||
1144 | if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) | ||
1145 | ret = arch_add_memory(nid, align_start, align_size, NULL, | ||
1146 | false); | ||
1147 | else | ||
1148 | ret = add_pages(nid, align_start >> PAGE_SHIFT, | ||
1149 | align_size >> PAGE_SHIFT, NULL, false); | ||
1150 | if (ret) { | ||
1151 | mem_hotplug_done(); | ||
1152 | goto error_add_memory; | ||
1153 | } | ||
1154 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||
1155 | align_start >> PAGE_SHIFT, | ||
1156 | align_size >> PAGE_SHIFT, NULL); | ||
1157 | mem_hotplug_done(); | ||
1158 | |||
1159 | /* | ||
1160 | * Initialization of the pages has been deferred until now in order | ||
1161 | * to allow us to do the work while not holding the hotplug lock. | ||
1162 | */ | ||
1163 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||
1164 | align_start >> PAGE_SHIFT, | ||
1165 | align_size >> PAGE_SHIFT, &devmem->pagemap); | ||
1166 | |||
1167 | return 0; | ||
1168 | |||
1169 | error_add_memory: | ||
1170 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | ||
1171 | error_radix: | ||
1172 | hmm_devmem_radix_release(devmem->resource); | ||
1173 | error: | ||
1174 | return ret; | ||
1175 | } | ||
1176 | |||
1177 | static int hmm_devmem_match(struct device *dev, void *data, void *match_data) | ||
1178 | { | ||
1179 | struct hmm_devmem *devmem = data; | ||
1180 | |||
1181 | return devmem->resource == match_data; | ||
1182 | } | ||
1183 | |||
1184 | static void hmm_devmem_pages_remove(struct hmm_devmem *devmem) | ||
1185 | { | ||
1186 | devres_release(devmem->device, &hmm_devmem_release, | ||
1187 | &hmm_devmem_match, devmem->resource); | ||
1188 | } | ||
1189 | |||
1190 | /* | 1013 | /* |
1191 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | 1014 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory |
1192 | * | 1015 | * |
@@ -1210,12 +1033,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
1210 | { | 1033 | { |
1211 | struct hmm_devmem *devmem; | 1034 | struct hmm_devmem *devmem; |
1212 | resource_size_t addr; | 1035 | resource_size_t addr; |
1036 | void *result; | ||
1213 | int ret; | 1037 | int ret; |
1214 | 1038 | ||
1215 | dev_pagemap_get_ops(); | 1039 | dev_pagemap_get_ops(); |
1216 | 1040 | ||
1217 | devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), | 1041 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
1218 | GFP_KERNEL, dev_to_node(device)); | ||
1219 | if (!devmem) | 1042 | if (!devmem) |
1220 | return ERR_PTR(-ENOMEM); | 1043 | return ERR_PTR(-ENOMEM); |
1221 | 1044 | ||
@@ -1229,11 +1052,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
1229 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | 1052 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, |
1230 | 0, GFP_KERNEL); | 1053 | 0, GFP_KERNEL); |
1231 | if (ret) | 1054 | if (ret) |
1232 | goto error_percpu_ref; | 1055 | return ERR_PTR(ret); |
1233 | 1056 | ||
1234 | ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); | 1057 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
1235 | if (ret) | 1058 | if (ret) |
1236 | goto error_devm_add_action; | 1059 | return ERR_PTR(ret); |
1237 | 1060 | ||
1238 | size = ALIGN(size, PA_SECTION_SIZE); | 1061 | size = ALIGN(size, PA_SECTION_SIZE); |
1239 | addr = min((unsigned long)iomem_resource.end, | 1062 | addr = min((unsigned long)iomem_resource.end, |
@@ -1253,54 +1076,40 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
1253 | 1076 | ||
1254 | devmem->resource = devm_request_mem_region(device, addr, size, | 1077 | devmem->resource = devm_request_mem_region(device, addr, size, |
1255 | dev_name(device)); | 1078 | dev_name(device)); |
1256 | if (!devmem->resource) { | 1079 | if (!devmem->resource) |
1257 | ret = -ENOMEM; | 1080 | return ERR_PTR(-ENOMEM); |
1258 | goto error_no_resource; | ||
1259 | } | ||
1260 | break; | 1081 | break; |
1261 | } | 1082 | } |
1262 | if (!devmem->resource) { | 1083 | if (!devmem->resource) |
1263 | ret = -ERANGE; | 1084 | return ERR_PTR(-ERANGE); |
1264 | goto error_no_resource; | ||
1265 | } | ||
1266 | 1085 | ||
1267 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; | 1086 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; |
1268 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | 1087 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; |
1269 | devmem->pfn_last = devmem->pfn_first + | 1088 | devmem->pfn_last = devmem->pfn_first + |
1270 | (resource_size(devmem->resource) >> PAGE_SHIFT); | 1089 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
1090 | devmem->page_fault = hmm_devmem_fault; | ||
1271 | 1091 | ||
1272 | ret = hmm_devmem_pages_create(devmem); | 1092 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1273 | if (ret) | 1093 | devmem->pagemap.res = *devmem->resource; |
1274 | goto error_pages; | 1094 | devmem->pagemap.page_free = hmm_devmem_free; |
1275 | 1095 | devmem->pagemap.altmap_valid = false; | |
1276 | devres_add(device, devmem); | 1096 | devmem->pagemap.ref = &devmem->ref; |
1277 | 1097 | devmem->pagemap.data = devmem; | |
1278 | ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); | 1098 | devmem->pagemap.kill = hmm_devmem_ref_kill; |
1279 | if (ret) { | ||
1280 | hmm_devmem_remove(devmem); | ||
1281 | return ERR_PTR(ret); | ||
1282 | } | ||
1283 | 1099 | ||
1100 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); | ||
1101 | if (IS_ERR(result)) | ||
1102 | return result; | ||
1284 | return devmem; | 1103 | return devmem; |
1285 | |||
1286 | error_pages: | ||
1287 | devm_release_mem_region(device, devmem->resource->start, | ||
1288 | resource_size(devmem->resource)); | ||
1289 | error_no_resource: | ||
1290 | error_devm_add_action: | ||
1291 | hmm_devmem_ref_kill(&devmem->ref); | ||
1292 | hmm_devmem_ref_exit(&devmem->ref); | ||
1293 | error_percpu_ref: | ||
1294 | devres_free(devmem); | ||
1295 | return ERR_PTR(ret); | ||
1296 | } | 1104 | } |
1297 | EXPORT_SYMBOL(hmm_devmem_add); | 1105 | EXPORT_SYMBOL_GPL(hmm_devmem_add); |
1298 | 1106 | ||
1299 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | 1107 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
1300 | struct device *device, | 1108 | struct device *device, |
1301 | struct resource *res) | 1109 | struct resource *res) |
1302 | { | 1110 | { |
1303 | struct hmm_devmem *devmem; | 1111 | struct hmm_devmem *devmem; |
1112 | void *result; | ||
1304 | int ret; | 1113 | int ret; |
1305 | 1114 | ||
1306 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) | 1115 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) |
@@ -1308,8 +1117,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | |||
1308 | 1117 | ||
1309 | dev_pagemap_get_ops(); | 1118 | dev_pagemap_get_ops(); |
1310 | 1119 | ||
1311 | devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), | 1120 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
1312 | GFP_KERNEL, dev_to_node(device)); | ||
1313 | if (!devmem) | 1121 | if (!devmem) |
1314 | return ERR_PTR(-ENOMEM); | 1122 | return ERR_PTR(-ENOMEM); |
1315 | 1123 | ||
@@ -1323,71 +1131,32 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | |||
1323 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | 1131 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, |
1324 | 0, GFP_KERNEL); | 1132 | 0, GFP_KERNEL); |
1325 | if (ret) | 1133 | if (ret) |
1326 | goto error_percpu_ref; | 1134 | return ERR_PTR(ret); |
1327 | 1135 | ||
1328 | ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); | 1136 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, |
1137 | &devmem->ref); | ||
1329 | if (ret) | 1138 | if (ret) |
1330 | goto error_devm_add_action; | 1139 | return ERR_PTR(ret); |
1331 | |||
1332 | 1140 | ||
1333 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | 1141 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; |
1334 | devmem->pfn_last = devmem->pfn_first + | 1142 | devmem->pfn_last = devmem->pfn_first + |
1335 | (resource_size(devmem->resource) >> PAGE_SHIFT); | 1143 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
1144 | devmem->page_fault = hmm_devmem_fault; | ||
1336 | 1145 | ||
1337 | ret = hmm_devmem_pages_create(devmem); | 1146 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1338 | if (ret) | 1147 | devmem->pagemap.res = *devmem->resource; |
1339 | goto error_devm_add_action; | 1148 | devmem->pagemap.page_free = hmm_devmem_free; |
1340 | 1149 | devmem->pagemap.altmap_valid = false; | |
1341 | devres_add(device, devmem); | 1150 | devmem->pagemap.ref = &devmem->ref; |
1342 | 1151 | devmem->pagemap.data = devmem; | |
1343 | ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); | 1152 | devmem->pagemap.kill = hmm_devmem_ref_kill; |
1344 | if (ret) { | ||
1345 | hmm_devmem_remove(devmem); | ||
1346 | return ERR_PTR(ret); | ||
1347 | } | ||
1348 | 1153 | ||
1154 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); | ||
1155 | if (IS_ERR(result)) | ||
1156 | return result; | ||
1349 | return devmem; | 1157 | return devmem; |
1350 | |||
1351 | error_devm_add_action: | ||
1352 | hmm_devmem_ref_kill(&devmem->ref); | ||
1353 | hmm_devmem_ref_exit(&devmem->ref); | ||
1354 | error_percpu_ref: | ||
1355 | devres_free(devmem); | ||
1356 | return ERR_PTR(ret); | ||
1357 | } | ||
1358 | EXPORT_SYMBOL(hmm_devmem_add_resource); | ||
1359 | |||
1360 | /* | ||
1361 | * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE) | ||
1362 | * | ||
1363 | * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory | ||
1364 | * | ||
1365 | * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf | ||
1366 | * of the device driver. It will free struct page and remove the resource that | ||
1367 | * reserved the physical address range for this device memory. | ||
1368 | */ | ||
1369 | void hmm_devmem_remove(struct hmm_devmem *devmem) | ||
1370 | { | ||
1371 | resource_size_t start, size; | ||
1372 | struct device *device; | ||
1373 | bool cdm = false; | ||
1374 | |||
1375 | if (!devmem) | ||
1376 | return; | ||
1377 | |||
1378 | device = devmem->device; | ||
1379 | start = devmem->resource->start; | ||
1380 | size = resource_size(devmem->resource); | ||
1381 | |||
1382 | cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY; | ||
1383 | hmm_devmem_ref_kill(&devmem->ref); | ||
1384 | hmm_devmem_ref_exit(&devmem->ref); | ||
1385 | hmm_devmem_pages_remove(devmem); | ||
1386 | |||
1387 | if (!cdm) | ||
1388 | devm_release_mem_region(device, start, size); | ||
1389 | } | 1158 | } |
1390 | EXPORT_SYMBOL(hmm_devmem_remove); | 1159 | EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
1391 | 1160 | ||
1392 | /* | 1161 | /* |
1393 | * A device driver that wants to handle multiple devices memory through a | 1162 | * A device driver that wants to handle multiple devices memory through a |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e84a10b0d310..cbd977b1d60d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -62,6 +62,16 @@ static struct shrinker deferred_split_shrinker; | |||
62 | static atomic_t huge_zero_refcount; | 62 | static atomic_t huge_zero_refcount; |
63 | struct page *huge_zero_page __read_mostly; | 63 | struct page *huge_zero_page __read_mostly; |
64 | 64 | ||
65 | bool transparent_hugepage_enabled(struct vm_area_struct *vma) | ||
66 | { | ||
67 | if (vma_is_anonymous(vma)) | ||
68 | return __transparent_hugepage_enabled(vma); | ||
69 | if (vma_is_shmem(vma) && shmem_huge_enabled(vma)) | ||
70 | return __transparent_hugepage_enabled(vma); | ||
71 | |||
72 | return false; | ||
73 | } | ||
74 | |||
65 | static struct page *get_huge_zero_page(void) | 75 | static struct page *get_huge_zero_page(void) |
66 | { | 76 | { |
67 | struct page *zero_page; | 77 | struct page *zero_page; |
@@ -420,7 +430,7 @@ static int __init hugepage_init(void) | |||
420 | * where the extra memory used could hurt more than TLB overhead | 430 | * where the extra memory used could hurt more than TLB overhead |
421 | * is likely to save. The admin can still enable it through /sys. | 431 | * is likely to save. The admin can still enable it through /sys. |
422 | */ | 432 | */ |
423 | if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { | 433 | if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { |
424 | transparent_hugepage_flags = 0; | 434 | transparent_hugepage_flags = 0; |
425 | return 0; | 435 | return 0; |
426 | } | 436 | } |
@@ -1134,8 +1144,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
1134 | int i; | 1144 | int i; |
1135 | vm_fault_t ret = 0; | 1145 | vm_fault_t ret = 0; |
1136 | struct page **pages; | 1146 | struct page **pages; |
1137 | unsigned long mmun_start; /* For mmu_notifiers */ | 1147 | struct mmu_notifier_range range; |
1138 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
1139 | 1148 | ||
1140 | pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), | 1149 | pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), |
1141 | GFP_KERNEL); | 1150 | GFP_KERNEL); |
@@ -1173,9 +1182,9 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
1173 | cond_resched(); | 1182 | cond_resched(); |
1174 | } | 1183 | } |
1175 | 1184 | ||
1176 | mmun_start = haddr; | 1185 | mmu_notifier_range_init(&range, vma->vm_mm, haddr, |
1177 | mmun_end = haddr + HPAGE_PMD_SIZE; | 1186 | haddr + HPAGE_PMD_SIZE); |
1178 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | 1187 | mmu_notifier_invalidate_range_start(&range); |
1179 | 1188 | ||
1180 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); | 1189 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
1181 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) | 1190 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
@@ -1220,8 +1229,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
1220 | * No need to double call mmu_notifier->invalidate_range() callback as | 1229 | * No need to double call mmu_notifier->invalidate_range() callback as |
1221 | * the above pmdp_huge_clear_flush_notify() did already call it. | 1230 | * the above pmdp_huge_clear_flush_notify() did already call it. |
1222 | */ | 1231 | */ |
1223 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, | 1232 | mmu_notifier_invalidate_range_only_end(&range); |
1224 | mmun_end); | ||
1225 | 1233 | ||
1226 | ret |= VM_FAULT_WRITE; | 1234 | ret |= VM_FAULT_WRITE; |
1227 | put_page(page); | 1235 | put_page(page); |
@@ -1231,7 +1239,7 @@ out: | |||
1231 | 1239 | ||
1232 | out_free_pages: | 1240 | out_free_pages: |
1233 | spin_unlock(vmf->ptl); | 1241 | spin_unlock(vmf->ptl); |
1234 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 1242 | mmu_notifier_invalidate_range_end(&range); |
1235 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 1243 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
1236 | memcg = (void *)page_private(pages[i]); | 1244 | memcg = (void *)page_private(pages[i]); |
1237 | set_page_private(pages[i], 0); | 1245 | set_page_private(pages[i], 0); |
@@ -1248,8 +1256,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) | |||
1248 | struct page *page = NULL, *new_page; | 1256 | struct page *page = NULL, *new_page; |
1249 | struct mem_cgroup *memcg; | 1257 | struct mem_cgroup *memcg; |
1250 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; | 1258 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
1251 | unsigned long mmun_start; /* For mmu_notifiers */ | 1259 | struct mmu_notifier_range range; |
1252 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
1253 | gfp_t huge_gfp; /* for allocation and charge */ | 1260 | gfp_t huge_gfp; /* for allocation and charge */ |
1254 | vm_fault_t ret = 0; | 1261 | vm_fault_t ret = 0; |
1255 | 1262 | ||
@@ -1293,7 +1300,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) | |||
1293 | get_page(page); | 1300 | get_page(page); |
1294 | spin_unlock(vmf->ptl); | 1301 | spin_unlock(vmf->ptl); |
1295 | alloc: | 1302 | alloc: |
1296 | if (transparent_hugepage_enabled(vma) && | 1303 | if (__transparent_hugepage_enabled(vma) && |
1297 | !transparent_hugepage_debug_cow()) { | 1304 | !transparent_hugepage_debug_cow()) { |
1298 | huge_gfp = alloc_hugepage_direct_gfpmask(vma); | 1305 | huge_gfp = alloc_hugepage_direct_gfpmask(vma); |
1299 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); | 1306 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
@@ -1338,9 +1345,9 @@ alloc: | |||
1338 | vma, HPAGE_PMD_NR); | 1345 | vma, HPAGE_PMD_NR); |
1339 | __SetPageUptodate(new_page); | 1346 | __SetPageUptodate(new_page); |
1340 | 1347 | ||
1341 | mmun_start = haddr; | 1348 | mmu_notifier_range_init(&range, vma->vm_mm, haddr, |
1342 | mmun_end = haddr + HPAGE_PMD_SIZE; | 1349 | haddr + HPAGE_PMD_SIZE); |
1343 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | 1350 | mmu_notifier_invalidate_range_start(&range); |
1344 | 1351 | ||
1345 | spin_lock(vmf->ptl); | 1352 | spin_lock(vmf->ptl); |
1346 | if (page) | 1353 | if (page) |
@@ -1375,8 +1382,7 @@ out_mn: | |||
1375 | * No need to double call mmu_notifier->invalidate_range() callback as | 1382 | * No need to double call mmu_notifier->invalidate_range() callback as |
1376 | * the above pmdp_huge_clear_flush_notify() did already call it. | 1383 | * the above pmdp_huge_clear_flush_notify() did already call it. |
1377 | */ | 1384 | */ |
1378 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, | 1385 | mmu_notifier_invalidate_range_only_end(&range); |
1379 | mmun_end); | ||
1380 | out: | 1386 | out: |
1381 | return ret; | 1387 | return ret; |
1382 | out_unlock: | 1388 | out_unlock: |
@@ -1490,8 +1496,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1490 | if (!get_page_unless_zero(page)) | 1496 | if (!get_page_unless_zero(page)) |
1491 | goto out_unlock; | 1497 | goto out_unlock; |
1492 | spin_unlock(vmf->ptl); | 1498 | spin_unlock(vmf->ptl); |
1493 | wait_on_page_locked(page); | 1499 | put_and_wait_on_page_locked(page); |
1494 | put_page(page); | ||
1495 | goto out; | 1500 | goto out; |
1496 | } | 1501 | } |
1497 | 1502 | ||
@@ -1527,8 +1532,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1527 | if (!get_page_unless_zero(page)) | 1532 | if (!get_page_unless_zero(page)) |
1528 | goto out_unlock; | 1533 | goto out_unlock; |
1529 | spin_unlock(vmf->ptl); | 1534 | spin_unlock(vmf->ptl); |
1530 | wait_on_page_locked(page); | 1535 | put_and_wait_on_page_locked(page); |
1531 | put_page(page); | ||
1532 | goto out; | 1536 | goto out; |
1533 | } | 1537 | } |
1534 | 1538 | ||
@@ -2017,14 +2021,15 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, | |||
2017 | unsigned long address) | 2021 | unsigned long address) |
2018 | { | 2022 | { |
2019 | spinlock_t *ptl; | 2023 | spinlock_t *ptl; |
2020 | struct mm_struct *mm = vma->vm_mm; | 2024 | struct mmu_notifier_range range; |
2021 | unsigned long haddr = address & HPAGE_PUD_MASK; | ||
2022 | 2025 | ||
2023 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); | 2026 | mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK, |
2024 | ptl = pud_lock(mm, pud); | 2027 | (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); |
2028 | mmu_notifier_invalidate_range_start(&range); | ||
2029 | ptl = pud_lock(vma->vm_mm, pud); | ||
2025 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) | 2030 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) |
2026 | goto out; | 2031 | goto out; |
2027 | __split_huge_pud_locked(vma, pud, haddr); | 2032 | __split_huge_pud_locked(vma, pud, range.start); |
2028 | 2033 | ||
2029 | out: | 2034 | out: |
2030 | spin_unlock(ptl); | 2035 | spin_unlock(ptl); |
@@ -2032,8 +2037,7 @@ out: | |||
2032 | * No need to double call mmu_notifier->invalidate_range() callback as | 2037 | * No need to double call mmu_notifier->invalidate_range() callback as |
2033 | * the above pudp_huge_clear_flush_notify() did already call it. | 2038 | * the above pudp_huge_clear_flush_notify() did already call it. |
2034 | */ | 2039 | */ |
2035 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + | 2040 | mmu_notifier_invalidate_range_only_end(&range); |
2036 | HPAGE_PUD_SIZE); | ||
2037 | } | 2041 | } |
2038 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | 2042 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
2039 | 2043 | ||
@@ -2235,11 +2239,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
2235 | unsigned long address, bool freeze, struct page *page) | 2239 | unsigned long address, bool freeze, struct page *page) |
2236 | { | 2240 | { |
2237 | spinlock_t *ptl; | 2241 | spinlock_t *ptl; |
2238 | struct mm_struct *mm = vma->vm_mm; | 2242 | struct mmu_notifier_range range; |
2239 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
2240 | 2243 | ||
2241 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); | 2244 | mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK, |
2242 | ptl = pmd_lock(mm, pmd); | 2245 | (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); |
2246 | mmu_notifier_invalidate_range_start(&range); | ||
2247 | ptl = pmd_lock(vma->vm_mm, pmd); | ||
2243 | 2248 | ||
2244 | /* | 2249 | /* |
2245 | * If caller asks to setup a migration entries, we need a page to check | 2250 | * If caller asks to setup a migration entries, we need a page to check |
@@ -2255,7 +2260,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
2255 | clear_page_mlock(page); | 2260 | clear_page_mlock(page); |
2256 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) | 2261 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) |
2257 | goto out; | 2262 | goto out; |
2258 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); | 2263 | __split_huge_pmd_locked(vma, pmd, range.start, freeze); |
2259 | out: | 2264 | out: |
2260 | spin_unlock(ptl); | 2265 | spin_unlock(ptl); |
2261 | /* | 2266 | /* |
@@ -2271,8 +2276,7 @@ out: | |||
2271 | * any further changes to individual pte will notify. So no need | 2276 | * any further changes to individual pte will notify. So no need |
2272 | * to call mmu_notifier->invalidate_range() | 2277 | * to call mmu_notifier->invalidate_range() |
2273 | */ | 2278 | */ |
2274 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + | 2279 | mmu_notifier_invalidate_range_only_end(&range); |
2275 | HPAGE_PMD_SIZE); | ||
2276 | } | 2280 | } |
2277 | 2281 | ||
2278 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | 2282 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a80832487981..e37efd5d8318 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3238,24 +3238,35 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3238 | struct page *ptepage; | 3238 | struct page *ptepage; |
3239 | unsigned long addr; | 3239 | unsigned long addr; |
3240 | int cow; | 3240 | int cow; |
3241 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
3241 | struct hstate *h = hstate_vma(vma); | 3242 | struct hstate *h = hstate_vma(vma); |
3242 | unsigned long sz = huge_page_size(h); | 3243 | unsigned long sz = huge_page_size(h); |
3243 | unsigned long mmun_start; /* For mmu_notifiers */ | 3244 | struct mmu_notifier_range range; |
3244 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
3245 | int ret = 0; | 3245 | int ret = 0; |
3246 | 3246 | ||
3247 | cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 3247 | cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
3248 | 3248 | ||
3249 | mmun_start = vma->vm_start; | 3249 | if (cow) { |
3250 | mmun_end = vma->vm_end; | 3250 | mmu_notifier_range_init(&range, src, vma->vm_start, |
3251 | if (cow) | 3251 | vma->vm_end); |
3252 | mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); | 3252 | mmu_notifier_invalidate_range_start(&range); |
3253 | } else { | ||
3254 | /* | ||
3255 | * For shared mappings i_mmap_rwsem must be held to call | ||
3256 | * huge_pte_alloc, otherwise the returned ptep could go | ||
3257 | * away if part of a shared pmd and another thread calls | ||
3258 | * huge_pmd_unshare. | ||
3259 | */ | ||
3260 | i_mmap_lock_read(mapping); | ||
3261 | } | ||
3253 | 3262 | ||
3254 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { | 3263 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { |
3255 | spinlock_t *src_ptl, *dst_ptl; | 3264 | spinlock_t *src_ptl, *dst_ptl; |
3265 | |||
3256 | src_pte = huge_pte_offset(src, addr, sz); | 3266 | src_pte = huge_pte_offset(src, addr, sz); |
3257 | if (!src_pte) | 3267 | if (!src_pte) |
3258 | continue; | 3268 | continue; |
3269 | |||
3259 | dst_pte = huge_pte_alloc(dst, addr, sz); | 3270 | dst_pte = huge_pte_alloc(dst, addr, sz); |
3260 | if (!dst_pte) { | 3271 | if (!dst_pte) { |
3261 | ret = -ENOMEM; | 3272 | ret = -ENOMEM; |
@@ -3325,7 +3336,9 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3325 | } | 3336 | } |
3326 | 3337 | ||
3327 | if (cow) | 3338 | if (cow) |
3328 | mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); | 3339 | mmu_notifier_invalidate_range_end(&range); |
3340 | else | ||
3341 | i_mmap_unlock_read(mapping); | ||
3329 | 3342 | ||
3330 | return ret; | 3343 | return ret; |
3331 | } | 3344 | } |
@@ -3342,8 +3355,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
3342 | struct page *page; | 3355 | struct page *page; |
3343 | struct hstate *h = hstate_vma(vma); | 3356 | struct hstate *h = hstate_vma(vma); |
3344 | unsigned long sz = huge_page_size(h); | 3357 | unsigned long sz = huge_page_size(h); |
3345 | unsigned long mmun_start = start; /* For mmu_notifiers */ | 3358 | struct mmu_notifier_range range; |
3346 | unsigned long mmun_end = end; /* For mmu_notifiers */ | ||
3347 | 3359 | ||
3348 | WARN_ON(!is_vm_hugetlb_page(vma)); | 3360 | WARN_ON(!is_vm_hugetlb_page(vma)); |
3349 | BUG_ON(start & ~huge_page_mask(h)); | 3361 | BUG_ON(start & ~huge_page_mask(h)); |
@@ -3359,8 +3371,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
3359 | /* | 3371 | /* |
3360 | * If sharing possible, alert mmu notifiers of worst case. | 3372 | * If sharing possible, alert mmu notifiers of worst case. |
3361 | */ | 3373 | */ |
3362 | adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end); | 3374 | mmu_notifier_range_init(&range, mm, start, end); |
3363 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 3375 | adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); |
3376 | mmu_notifier_invalidate_range_start(&range); | ||
3364 | address = start; | 3377 | address = start; |
3365 | for (; address < end; address += sz) { | 3378 | for (; address < end; address += sz) { |
3366 | ptep = huge_pte_offset(mm, address, sz); | 3379 | ptep = huge_pte_offset(mm, address, sz); |
@@ -3428,7 +3441,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
3428 | if (ref_page) | 3441 | if (ref_page) |
3429 | break; | 3442 | break; |
3430 | } | 3443 | } |
3431 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 3444 | mmu_notifier_invalidate_range_end(&range); |
3432 | tlb_end_vma(tlb, vma); | 3445 | tlb_end_vma(tlb, vma); |
3433 | } | 3446 | } |
3434 | 3447 | ||
@@ -3546,9 +3559,8 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3546 | struct page *old_page, *new_page; | 3559 | struct page *old_page, *new_page; |
3547 | int outside_reserve = 0; | 3560 | int outside_reserve = 0; |
3548 | vm_fault_t ret = 0; | 3561 | vm_fault_t ret = 0; |
3549 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
3550 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
3551 | unsigned long haddr = address & huge_page_mask(h); | 3562 | unsigned long haddr = address & huge_page_mask(h); |
3563 | struct mmu_notifier_range range; | ||
3552 | 3564 | ||
3553 | pte = huge_ptep_get(ptep); | 3565 | pte = huge_ptep_get(ptep); |
3554 | old_page = pte_page(pte); | 3566 | old_page = pte_page(pte); |
@@ -3627,9 +3639,8 @@ retry_avoidcopy: | |||
3627 | __SetPageUptodate(new_page); | 3639 | __SetPageUptodate(new_page); |
3628 | set_page_huge_active(new_page); | 3640 | set_page_huge_active(new_page); |
3629 | 3641 | ||
3630 | mmun_start = haddr; | 3642 | mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); |
3631 | mmun_end = mmun_start + huge_page_size(h); | 3643 | mmu_notifier_invalidate_range_start(&range); |
3632 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
3633 | 3644 | ||
3634 | /* | 3645 | /* |
3635 | * Retake the page table lock to check for racing updates | 3646 | * Retake the page table lock to check for racing updates |
@@ -3642,7 +3653,7 @@ retry_avoidcopy: | |||
3642 | 3653 | ||
3643 | /* Break COW */ | 3654 | /* Break COW */ |
3644 | huge_ptep_clear_flush(vma, haddr, ptep); | 3655 | huge_ptep_clear_flush(vma, haddr, ptep); |
3645 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | 3656 | mmu_notifier_invalidate_range(mm, range.start, range.end); |
3646 | set_huge_pte_at(mm, haddr, ptep, | 3657 | set_huge_pte_at(mm, haddr, ptep, |
3647 | make_huge_pte(vma, new_page, 1)); | 3658 | make_huge_pte(vma, new_page, 1)); |
3648 | page_remove_rmap(old_page, true); | 3659 | page_remove_rmap(old_page, true); |
@@ -3651,7 +3662,7 @@ retry_avoidcopy: | |||
3651 | new_page = old_page; | 3662 | new_page = old_page; |
3652 | } | 3663 | } |
3653 | spin_unlock(ptl); | 3664 | spin_unlock(ptl); |
3654 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 3665 | mmu_notifier_invalidate_range_end(&range); |
3655 | out_release_all: | 3666 | out_release_all: |
3656 | restore_reserve_on_error(h, vma, haddr, new_page); | 3667 | restore_reserve_on_error(h, vma, haddr, new_page); |
3657 | put_page(new_page); | 3668 | put_page(new_page); |
@@ -3744,16 +3755,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, | |||
3744 | } | 3755 | } |
3745 | 3756 | ||
3746 | /* | 3757 | /* |
3747 | * Use page lock to guard against racing truncation | 3758 | * We can not race with truncation due to holding i_mmap_rwsem. |
3748 | * before we get page_table_lock. | 3759 | * Check once here for faults beyond end of file. |
3749 | */ | 3760 | */ |
3761 | size = i_size_read(mapping->host) >> huge_page_shift(h); | ||
3762 | if (idx >= size) | ||
3763 | goto out; | ||
3764 | |||
3750 | retry: | 3765 | retry: |
3751 | page = find_lock_page(mapping, idx); | 3766 | page = find_lock_page(mapping, idx); |
3752 | if (!page) { | 3767 | if (!page) { |
3753 | size = i_size_read(mapping->host) >> huge_page_shift(h); | ||
3754 | if (idx >= size) | ||
3755 | goto out; | ||
3756 | |||
3757 | /* | 3768 | /* |
3758 | * Check for page in userfault range | 3769 | * Check for page in userfault range |
3759 | */ | 3770 | */ |
@@ -3773,14 +3784,18 @@ retry: | |||
3773 | }; | 3784 | }; |
3774 | 3785 | ||
3775 | /* | 3786 | /* |
3776 | * hugetlb_fault_mutex must be dropped before | 3787 | * hugetlb_fault_mutex and i_mmap_rwsem must be |
3777 | * handling userfault. Reacquire after handling | 3788 | * dropped before handling userfault. Reacquire |
3778 | * fault to make calling code simpler. | 3789 | * after handling fault to make calling code simpler. |
3779 | */ | 3790 | */ |
3780 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, | 3791 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, |
3781 | idx, haddr); | 3792 | idx, haddr); |
3782 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 3793 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
3794 | i_mmap_unlock_read(mapping); | ||
3795 | |||
3783 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); | 3796 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); |
3797 | |||
3798 | i_mmap_lock_read(mapping); | ||
3784 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3799 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3785 | goto out; | 3800 | goto out; |
3786 | } | 3801 | } |
@@ -3839,9 +3854,6 @@ retry: | |||
3839 | } | 3854 | } |
3840 | 3855 | ||
3841 | ptl = huge_pte_lock(h, mm, ptep); | 3856 | ptl = huge_pte_lock(h, mm, ptep); |
3842 | size = i_size_read(mapping->host) >> huge_page_shift(h); | ||
3843 | if (idx >= size) | ||
3844 | goto backout; | ||
3845 | 3857 | ||
3846 | ret = 0; | 3858 | ret = 0; |
3847 | if (!huge_pte_none(huge_ptep_get(ptep))) | 3859 | if (!huge_pte_none(huge_ptep_get(ptep))) |
@@ -3928,6 +3940,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3928 | 3940 | ||
3929 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); | 3941 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); |
3930 | if (ptep) { | 3942 | if (ptep) { |
3943 | /* | ||
3944 | * Since we hold no locks, ptep could be stale. That is | ||
3945 | * OK as we are only making decisions based on content and | ||
3946 | * not actually modifying content here. | ||
3947 | */ | ||
3931 | entry = huge_ptep_get(ptep); | 3948 | entry = huge_ptep_get(ptep); |
3932 | if (unlikely(is_hugetlb_entry_migration(entry))) { | 3949 | if (unlikely(is_hugetlb_entry_migration(entry))) { |
3933 | migration_entry_wait_huge(vma, mm, ptep); | 3950 | migration_entry_wait_huge(vma, mm, ptep); |
@@ -3935,20 +3952,33 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3935 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | 3952 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) |
3936 | return VM_FAULT_HWPOISON_LARGE | | 3953 | return VM_FAULT_HWPOISON_LARGE | |
3937 | VM_FAULT_SET_HINDEX(hstate_index(h)); | 3954 | VM_FAULT_SET_HINDEX(hstate_index(h)); |
3938 | } else { | ||
3939 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3940 | if (!ptep) | ||
3941 | return VM_FAULT_OOM; | ||
3942 | } | 3955 | } |
3943 | 3956 | ||
3957 | /* | ||
3958 | * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold | ||
3959 | * until finished with ptep. This serves two purposes: | ||
3960 | * 1) It prevents huge_pmd_unshare from being called elsewhere | ||
3961 | * and making the ptep no longer valid. | ||
3962 | * 2) It synchronizes us with file truncation. | ||
3963 | * | ||
3964 | * ptep could have already be assigned via huge_pte_offset. That | ||
3965 | * is OK, as huge_pte_alloc will return the same value unless | ||
3966 | * something changed. | ||
3967 | */ | ||
3944 | mapping = vma->vm_file->f_mapping; | 3968 | mapping = vma->vm_file->f_mapping; |
3945 | idx = vma_hugecache_offset(h, vma, haddr); | 3969 | i_mmap_lock_read(mapping); |
3970 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3971 | if (!ptep) { | ||
3972 | i_mmap_unlock_read(mapping); | ||
3973 | return VM_FAULT_OOM; | ||
3974 | } | ||
3946 | 3975 | ||
3947 | /* | 3976 | /* |
3948 | * Serialize hugepage allocation and instantiation, so that we don't | 3977 | * Serialize hugepage allocation and instantiation, so that we don't |
3949 | * get spurious allocation failures if two CPUs race to instantiate | 3978 | * get spurious allocation failures if two CPUs race to instantiate |
3950 | * the same page in the page cache. | 3979 | * the same page in the page cache. |
3951 | */ | 3980 | */ |
3981 | idx = vma_hugecache_offset(h, vma, haddr); | ||
3952 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); | 3982 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); |
3953 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3983 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3954 | 3984 | ||
@@ -4036,6 +4066,7 @@ out_ptl: | |||
4036 | } | 4066 | } |
4037 | out_mutex: | 4067 | out_mutex: |
4038 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 4068 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
4069 | i_mmap_unlock_read(mapping); | ||
4039 | /* | 4070 | /* |
4040 | * Generally it's safe to hold refcount during waiting page lock. But | 4071 | * Generally it's safe to hold refcount during waiting page lock. But |
4041 | * here we just wait to defer the next page fault to avoid busy loop and | 4072 | * here we just wait to defer the next page fault to avoid busy loop and |
@@ -4340,21 +4371,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
4340 | pte_t pte; | 4371 | pte_t pte; |
4341 | struct hstate *h = hstate_vma(vma); | 4372 | struct hstate *h = hstate_vma(vma); |
4342 | unsigned long pages = 0; | 4373 | unsigned long pages = 0; |
4343 | unsigned long f_start = start; | ||
4344 | unsigned long f_end = end; | ||
4345 | bool shared_pmd = false; | 4374 | bool shared_pmd = false; |
4375 | struct mmu_notifier_range range; | ||
4346 | 4376 | ||
4347 | /* | 4377 | /* |
4348 | * In the case of shared PMDs, the area to flush could be beyond | 4378 | * In the case of shared PMDs, the area to flush could be beyond |
4349 | * start/end. Set f_start/f_end to cover the maximum possible | 4379 | * start/end. Set range.start/range.end to cover the maximum possible |
4350 | * range if PMD sharing is possible. | 4380 | * range if PMD sharing is possible. |
4351 | */ | 4381 | */ |
4352 | adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end); | 4382 | mmu_notifier_range_init(&range, mm, start, end); |
4383 | adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); | ||
4353 | 4384 | ||
4354 | BUG_ON(address >= end); | 4385 | BUG_ON(address >= end); |
4355 | flush_cache_range(vma, f_start, f_end); | 4386 | flush_cache_range(vma, range.start, range.end); |
4356 | 4387 | ||
4357 | mmu_notifier_invalidate_range_start(mm, f_start, f_end); | 4388 | mmu_notifier_invalidate_range_start(&range); |
4358 | i_mmap_lock_write(vma->vm_file->f_mapping); | 4389 | i_mmap_lock_write(vma->vm_file->f_mapping); |
4359 | for (; address < end; address += huge_page_size(h)) { | 4390 | for (; address < end; address += huge_page_size(h)) { |
4360 | spinlock_t *ptl; | 4391 | spinlock_t *ptl; |
@@ -4405,7 +4436,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
4405 | * did unshare a page of pmds, flush the range corresponding to the pud. | 4436 | * did unshare a page of pmds, flush the range corresponding to the pud. |
4406 | */ | 4437 | */ |
4407 | if (shared_pmd) | 4438 | if (shared_pmd) |
4408 | flush_hugetlb_tlb_range(vma, f_start, f_end); | 4439 | flush_hugetlb_tlb_range(vma, range.start, range.end); |
4409 | else | 4440 | else |
4410 | flush_hugetlb_tlb_range(vma, start, end); | 4441 | flush_hugetlb_tlb_range(vma, start, end); |
4411 | /* | 4442 | /* |
@@ -4415,7 +4446,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
4415 | * See Documentation/vm/mmu_notifier.rst | 4446 | * See Documentation/vm/mmu_notifier.rst |
4416 | */ | 4447 | */ |
4417 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 4448 | i_mmap_unlock_write(vma->vm_file->f_mapping); |
4418 | mmu_notifier_invalidate_range_end(mm, f_start, f_end); | 4449 | mmu_notifier_invalidate_range_end(&range); |
4419 | 4450 | ||
4420 | return pages << h->order; | 4451 | return pages << h->order; |
4421 | } | 4452 | } |
@@ -4640,10 +4671,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, | |||
4640 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() | 4671 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() |
4641 | * and returns the corresponding pte. While this is not necessary for the | 4672 | * and returns the corresponding pte. While this is not necessary for the |
4642 | * !shared pmd case because we can allocate the pmd later as well, it makes the | 4673 | * !shared pmd case because we can allocate the pmd later as well, it makes the |
4643 | * code much cleaner. pmd allocation is essential for the shared case because | 4674 | * code much cleaner. |
4644 | * pud has to be populated inside the same i_mmap_rwsem section - otherwise | 4675 | * |
4645 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | 4676 | * This routine must be called with i_mmap_rwsem held in at least read mode. |
4646 | * bad pmd for sharing. | 4677 | * For hugetlbfs, this prevents removal of any page table entries associated |
4678 | * with the address space. This is important as we are setting up sharing | ||
4679 | * based on existing page table entries (mappings). | ||
4647 | */ | 4680 | */ |
4648 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 4681 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) |
4649 | { | 4682 | { |
@@ -4660,7 +4693,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4660 | if (!vma_shareable(vma, addr)) | 4693 | if (!vma_shareable(vma, addr)) |
4661 | return (pte_t *)pmd_alloc(mm, pud, addr); | 4694 | return (pte_t *)pmd_alloc(mm, pud, addr); |
4662 | 4695 | ||
4663 | i_mmap_lock_write(mapping); | ||
4664 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { | 4696 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
4665 | if (svma == vma) | 4697 | if (svma == vma) |
4666 | continue; | 4698 | continue; |
@@ -4690,7 +4722,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4690 | spin_unlock(ptl); | 4722 | spin_unlock(ptl); |
4691 | out: | 4723 | out: |
4692 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 4724 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
4693 | i_mmap_unlock_write(mapping); | ||
4694 | return pte; | 4725 | return pte; |
4695 | } | 4726 | } |
4696 | 4727 | ||
@@ -4701,7 +4732,7 @@ out: | |||
4701 | * indicated by page_count > 1, unmap is achieved by clearing pud and | 4732 | * indicated by page_count > 1, unmap is achieved by clearing pud and |
4702 | * decrementing the ref count. If count == 1, the pte page is not shared. | 4733 | * decrementing the ref count. If count == 1, the pte page is not shared. |
4703 | * | 4734 | * |
4704 | * called with page table lock held. | 4735 | * Called with page table lock held and i_mmap_rwsem held in write mode. |
4705 | * | 4736 | * |
4706 | * returns: 1 successfully unmapped a shared pte page | 4737 | * returns: 1 successfully unmapped a shared pte page |
4707 | * 0 the underlying pte page is not shared, or it is the last user | 4738 | * 0 the underlying pte page is not shared, or it is the last user |
diff --git a/mm/internal.h b/mm/internal.h index 291eb2b6d1d8..f4a7bb02decf 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -444,6 +444,16 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
444 | #define NODE_RECLAIM_SOME 0 | 444 | #define NODE_RECLAIM_SOME 0 |
445 | #define NODE_RECLAIM_SUCCESS 1 | 445 | #define NODE_RECLAIM_SUCCESS 1 |
446 | 446 | ||
447 | #ifdef CONFIG_NUMA | ||
448 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | ||
449 | #else | ||
450 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | ||
451 | unsigned int order) | ||
452 | { | ||
453 | return NODE_RECLAIM_NOSCAN; | ||
454 | } | ||
455 | #endif | ||
456 | |||
447 | extern int hwpoison_filter(struct page *p); | 457 | extern int hwpoison_filter(struct page *p); |
448 | 458 | ||
449 | extern u32 hwpoison_filter_dev_major; | 459 | extern u32 hwpoison_filter_dev_major; |
@@ -480,10 +490,16 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
480 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | 490 | #define ALLOC_OOM ALLOC_NO_WATERMARKS |
481 | #endif | 491 | #endif |
482 | 492 | ||
483 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | 493 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
484 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | 494 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
485 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | 495 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
486 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | 496 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
497 | #ifdef CONFIG_ZONE_DMA32 | ||
498 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | ||
499 | #else | ||
500 | #define ALLOC_NOFRAGMENT 0x0 | ||
501 | #endif | ||
502 | #define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ | ||
487 | 503 | ||
488 | enum ttu_flags; | 504 | enum ttu_flags; |
489 | struct tlbflush_unmap_batch; | 505 | struct tlbflush_unmap_batch; |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 3289db38bc87..0a14fcff70ed 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -1,11 +1,18 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | KASAN_SANITIZE := n | 2 | KASAN_SANITIZE := n |
3 | UBSAN_SANITIZE_kasan.o := n | 3 | UBSAN_SANITIZE_common.o := n |
4 | UBSAN_SANITIZE_generic.o := n | ||
5 | UBSAN_SANITIZE_tags.o := n | ||
4 | KCOV_INSTRUMENT := n | 6 | KCOV_INSTRUMENT := n |
5 | 7 | ||
6 | CFLAGS_REMOVE_kasan.o = -pg | 8 | CFLAGS_REMOVE_generic.o = -pg |
7 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 9 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
8 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 10 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
9 | CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) | ||
10 | 11 | ||
11 | obj-y := kasan.o report.o kasan_init.o quarantine.o | 12 | CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) |
13 | CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) | ||
14 | CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) | ||
15 | |||
16 | obj-$(CONFIG_KASAN) := common.o init.o report.o | ||
17 | obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o | ||
18 | obj-$(CONFIG_KASAN_SW_TAGS) += tags.o tags_report.o | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/common.c index c3bd5209da38..03d5d1374ca7 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/common.c | |||
@@ -1,5 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * This file contains shadow memory manipulation code. | 3 | * This file contains common generic and tag-based KASAN code. |
3 | * | 4 | * |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
@@ -13,9 +14,6 @@ | |||
13 | * | 14 | * |
14 | */ | 15 | */ |
15 | 16 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | #define DISABLE_BRANCH_PROFILING | ||
18 | |||
19 | #include <linux/export.h> | 17 | #include <linux/export.h> |
20 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
21 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -40,6 +38,53 @@ | |||
40 | #include "kasan.h" | 38 | #include "kasan.h" |
41 | #include "../slab.h" | 39 | #include "../slab.h" |
42 | 40 | ||
41 | static inline int in_irqentry_text(unsigned long ptr) | ||
42 | { | ||
43 | return (ptr >= (unsigned long)&__irqentry_text_start && | ||
44 | ptr < (unsigned long)&__irqentry_text_end) || | ||
45 | (ptr >= (unsigned long)&__softirqentry_text_start && | ||
46 | ptr < (unsigned long)&__softirqentry_text_end); | ||
47 | } | ||
48 | |||
49 | static inline void filter_irq_stacks(struct stack_trace *trace) | ||
50 | { | ||
51 | int i; | ||
52 | |||
53 | if (!trace->nr_entries) | ||
54 | return; | ||
55 | for (i = 0; i < trace->nr_entries; i++) | ||
56 | if (in_irqentry_text(trace->entries[i])) { | ||
57 | /* Include the irqentry function into the stack. */ | ||
58 | trace->nr_entries = i + 1; | ||
59 | break; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static inline depot_stack_handle_t save_stack(gfp_t flags) | ||
64 | { | ||
65 | unsigned long entries[KASAN_STACK_DEPTH]; | ||
66 | struct stack_trace trace = { | ||
67 | .nr_entries = 0, | ||
68 | .entries = entries, | ||
69 | .max_entries = KASAN_STACK_DEPTH, | ||
70 | .skip = 0 | ||
71 | }; | ||
72 | |||
73 | save_stack_trace(&trace); | ||
74 | filter_irq_stacks(&trace); | ||
75 | if (trace.nr_entries != 0 && | ||
76 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | ||
77 | trace.nr_entries--; | ||
78 | |||
79 | return depot_save_stack(&trace, flags); | ||
80 | } | ||
81 | |||
82 | static inline void set_track(struct kasan_track *track, gfp_t flags) | ||
83 | { | ||
84 | track->pid = current->pid; | ||
85 | track->stack = save_stack(flags); | ||
86 | } | ||
87 | |||
43 | void kasan_enable_current(void) | 88 | void kasan_enable_current(void) |
44 | { | 89 | { |
45 | current->kasan_depth++; | 90 | current->kasan_depth++; |
@@ -50,27 +95,85 @@ void kasan_disable_current(void) | |||
50 | current->kasan_depth--; | 95 | current->kasan_depth--; |
51 | } | 96 | } |
52 | 97 | ||
98 | void kasan_check_read(const volatile void *p, unsigned int size) | ||
99 | { | ||
100 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | ||
101 | } | ||
102 | EXPORT_SYMBOL(kasan_check_read); | ||
103 | |||
104 | void kasan_check_write(const volatile void *p, unsigned int size) | ||
105 | { | ||
106 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | ||
107 | } | ||
108 | EXPORT_SYMBOL(kasan_check_write); | ||
109 | |||
110 | #undef memset | ||
111 | void *memset(void *addr, int c, size_t len) | ||
112 | { | ||
113 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); | ||
114 | |||
115 | return __memset(addr, c, len); | ||
116 | } | ||
117 | |||
118 | #undef memmove | ||
119 | void *memmove(void *dest, const void *src, size_t len) | ||
120 | { | ||
121 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
122 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
123 | |||
124 | return __memmove(dest, src, len); | ||
125 | } | ||
126 | |||
127 | #undef memcpy | ||
128 | void *memcpy(void *dest, const void *src, size_t len) | ||
129 | { | ||
130 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
131 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
132 | |||
133 | return __memcpy(dest, src, len); | ||
134 | } | ||
135 | |||
53 | /* | 136 | /* |
54 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | 137 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. |
55 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | 138 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. |
56 | */ | 139 | */ |
57 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) | 140 | void kasan_poison_shadow(const void *address, size_t size, u8 value) |
58 | { | 141 | { |
59 | void *shadow_start, *shadow_end; | 142 | void *shadow_start, *shadow_end; |
60 | 143 | ||
144 | /* | ||
145 | * Perform shadow offset calculation based on untagged address, as | ||
146 | * some of the callers (e.g. kasan_poison_object_data) pass tagged | ||
147 | * addresses to this function. | ||
148 | */ | ||
149 | address = reset_tag(address); | ||
150 | |||
61 | shadow_start = kasan_mem_to_shadow(address); | 151 | shadow_start = kasan_mem_to_shadow(address); |
62 | shadow_end = kasan_mem_to_shadow(address + size); | 152 | shadow_end = kasan_mem_to_shadow(address + size); |
63 | 153 | ||
64 | memset(shadow_start, value, shadow_end - shadow_start); | 154 | __memset(shadow_start, value, shadow_end - shadow_start); |
65 | } | 155 | } |
66 | 156 | ||
67 | void kasan_unpoison_shadow(const void *address, size_t size) | 157 | void kasan_unpoison_shadow(const void *address, size_t size) |
68 | { | 158 | { |
69 | kasan_poison_shadow(address, size, 0); | 159 | u8 tag = get_tag(address); |
160 | |||
161 | /* | ||
162 | * Perform shadow offset calculation based on untagged address, as | ||
163 | * some of the callers (e.g. kasan_unpoison_object_data) pass tagged | ||
164 | * addresses to this function. | ||
165 | */ | ||
166 | address = reset_tag(address); | ||
167 | |||
168 | kasan_poison_shadow(address, size, tag); | ||
70 | 169 | ||
71 | if (size & KASAN_SHADOW_MASK) { | 170 | if (size & KASAN_SHADOW_MASK) { |
72 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | 171 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); |
73 | *shadow = size & KASAN_SHADOW_MASK; | 172 | |
173 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
174 | *shadow = tag; | ||
175 | else | ||
176 | *shadow = size & KASAN_SHADOW_MASK; | ||
74 | } | 177 | } |
75 | } | 178 | } |
76 | 179 | ||
@@ -116,199 +219,18 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark) | |||
116 | kasan_unpoison_shadow(sp, size); | 219 | kasan_unpoison_shadow(sp, size); |
117 | } | 220 | } |
118 | 221 | ||
119 | /* | 222 | void kasan_alloc_pages(struct page *page, unsigned int order) |
120 | * All functions below always inlined so compiler could | ||
121 | * perform better optimizations in each of __asan_loadX/__assn_storeX | ||
122 | * depending on memory access size X. | ||
123 | */ | ||
124 | |||
125 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) | ||
126 | { | ||
127 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | ||
128 | |||
129 | if (unlikely(shadow_value)) { | ||
130 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | ||
131 | return unlikely(last_accessible_byte >= shadow_value); | ||
132 | } | ||
133 | |||
134 | return false; | ||
135 | } | ||
136 | |||
137 | static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, | ||
138 | unsigned long size) | ||
139 | { | ||
140 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); | ||
141 | |||
142 | /* | ||
143 | * Access crosses 8(shadow size)-byte boundary. Such access maps | ||
144 | * into 2 shadow bytes, so we need to check them both. | ||
145 | */ | ||
146 | if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) | ||
147 | return *shadow_addr || memory_is_poisoned_1(addr + size - 1); | ||
148 | |||
149 | return memory_is_poisoned_1(addr + size - 1); | ||
150 | } | ||
151 | |||
152 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) | ||
153 | { | ||
154 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | ||
155 | |||
156 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ | ||
157 | if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | ||
158 | return *shadow_addr || memory_is_poisoned_1(addr + 15); | ||
159 | |||
160 | return *shadow_addr; | ||
161 | } | ||
162 | |||
163 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, | ||
164 | size_t size) | ||
165 | { | ||
166 | while (size) { | ||
167 | if (unlikely(*start)) | ||
168 | return (unsigned long)start; | ||
169 | start++; | ||
170 | size--; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static __always_inline unsigned long memory_is_nonzero(const void *start, | ||
177 | const void *end) | ||
178 | { | ||
179 | unsigned int words; | ||
180 | unsigned long ret; | ||
181 | unsigned int prefix = (unsigned long)start % 8; | ||
182 | |||
183 | if (end - start <= 16) | ||
184 | return bytes_is_nonzero(start, end - start); | ||
185 | |||
186 | if (prefix) { | ||
187 | prefix = 8 - prefix; | ||
188 | ret = bytes_is_nonzero(start, prefix); | ||
189 | if (unlikely(ret)) | ||
190 | return ret; | ||
191 | start += prefix; | ||
192 | } | ||
193 | |||
194 | words = (end - start) / 8; | ||
195 | while (words) { | ||
196 | if (unlikely(*(u64 *)start)) | ||
197 | return bytes_is_nonzero(start, 8); | ||
198 | start += 8; | ||
199 | words--; | ||
200 | } | ||
201 | |||
202 | return bytes_is_nonzero(start, (end - start) % 8); | ||
203 | } | ||
204 | |||
205 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, | ||
206 | size_t size) | ||
207 | { | ||
208 | unsigned long ret; | ||
209 | |||
210 | ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), | ||
211 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); | ||
212 | |||
213 | if (unlikely(ret)) { | ||
214 | unsigned long last_byte = addr + size - 1; | ||
215 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | ||
216 | |||
217 | if (unlikely(ret != (unsigned long)last_shadow || | ||
218 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) | ||
219 | return true; | ||
220 | } | ||
221 | return false; | ||
222 | } | ||
223 | |||
224 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | ||
225 | { | ||
226 | if (__builtin_constant_p(size)) { | ||
227 | switch (size) { | ||
228 | case 1: | ||
229 | return memory_is_poisoned_1(addr); | ||
230 | case 2: | ||
231 | case 4: | ||
232 | case 8: | ||
233 | return memory_is_poisoned_2_4_8(addr, size); | ||
234 | case 16: | ||
235 | return memory_is_poisoned_16(addr); | ||
236 | default: | ||
237 | BUILD_BUG(); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | return memory_is_poisoned_n(addr, size); | ||
242 | } | ||
243 | |||
244 | static __always_inline void check_memory_region_inline(unsigned long addr, | ||
245 | size_t size, bool write, | ||
246 | unsigned long ret_ip) | ||
247 | { | 223 | { |
248 | if (unlikely(size == 0)) | 224 | u8 tag; |
249 | return; | 225 | unsigned long i; |
250 | 226 | ||
251 | if (unlikely((void *)addr < | 227 | if (unlikely(PageHighMem(page))) |
252 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | ||
253 | kasan_report(addr, size, write, ret_ip); | ||
254 | return; | 228 | return; |
255 | } | ||
256 | 229 | ||
257 | if (likely(!memory_is_poisoned(addr, size))) | 230 | tag = random_tag(); |
258 | return; | 231 | for (i = 0; i < (1 << order); i++) |
259 | 232 | page_kasan_tag_set(page + i, tag); | |
260 | kasan_report(addr, size, write, ret_ip); | 233 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
261 | } | ||
262 | |||
263 | static void check_memory_region(unsigned long addr, | ||
264 | size_t size, bool write, | ||
265 | unsigned long ret_ip) | ||
266 | { | ||
267 | check_memory_region_inline(addr, size, write, ret_ip); | ||
268 | } | ||
269 | |||
270 | void kasan_check_read(const volatile void *p, unsigned int size) | ||
271 | { | ||
272 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | ||
273 | } | ||
274 | EXPORT_SYMBOL(kasan_check_read); | ||
275 | |||
276 | void kasan_check_write(const volatile void *p, unsigned int size) | ||
277 | { | ||
278 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | ||
279 | } | ||
280 | EXPORT_SYMBOL(kasan_check_write); | ||
281 | |||
282 | #undef memset | ||
283 | void *memset(void *addr, int c, size_t len) | ||
284 | { | ||
285 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); | ||
286 | |||
287 | return __memset(addr, c, len); | ||
288 | } | ||
289 | |||
290 | #undef memmove | ||
291 | void *memmove(void *dest, const void *src, size_t len) | ||
292 | { | ||
293 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
294 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
295 | |||
296 | return __memmove(dest, src, len); | ||
297 | } | ||
298 | |||
299 | #undef memcpy | ||
300 | void *memcpy(void *dest, const void *src, size_t len) | ||
301 | { | ||
302 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
303 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
304 | |||
305 | return __memcpy(dest, src, len); | ||
306 | } | ||
307 | |||
308 | void kasan_alloc_pages(struct page *page, unsigned int order) | ||
309 | { | ||
310 | if (likely(!PageHighMem(page))) | ||
311 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | ||
312 | } | 234 | } |
313 | 235 | ||
314 | void kasan_free_pages(struct page *page, unsigned int order) | 236 | void kasan_free_pages(struct page *page, unsigned int order) |
@@ -323,8 +245,11 @@ void kasan_free_pages(struct page *page, unsigned int order) | |||
323 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | 245 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
324 | * For larger allocations larger redzones are used. | 246 | * For larger allocations larger redzones are used. |
325 | */ | 247 | */ |
326 | static unsigned int optimal_redzone(unsigned int object_size) | 248 | static inline unsigned int optimal_redzone(unsigned int object_size) |
327 | { | 249 | { |
250 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
251 | return 0; | ||
252 | |||
328 | return | 253 | return |
329 | object_size <= 64 - 16 ? 16 : | 254 | object_size <= 64 - 16 ? 16 : |
330 | object_size <= 128 - 32 ? 32 : | 255 | object_size <= 128 - 32 ? 32 : |
@@ -339,6 +264,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | |||
339 | slab_flags_t *flags) | 264 | slab_flags_t *flags) |
340 | { | 265 | { |
341 | unsigned int orig_size = *size; | 266 | unsigned int orig_size = *size; |
267 | unsigned int redzone_size; | ||
342 | int redzone_adjust; | 268 | int redzone_adjust; |
343 | 269 | ||
344 | /* Add alloc meta. */ | 270 | /* Add alloc meta. */ |
@@ -346,20 +272,20 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | |||
346 | *size += sizeof(struct kasan_alloc_meta); | 272 | *size += sizeof(struct kasan_alloc_meta); |
347 | 273 | ||
348 | /* Add free meta. */ | 274 | /* Add free meta. */ |
349 | if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || | 275 | if (IS_ENABLED(CONFIG_KASAN_GENERIC) && |
350 | cache->object_size < sizeof(struct kasan_free_meta)) { | 276 | (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || |
277 | cache->object_size < sizeof(struct kasan_free_meta))) { | ||
351 | cache->kasan_info.free_meta_offset = *size; | 278 | cache->kasan_info.free_meta_offset = *size; |
352 | *size += sizeof(struct kasan_free_meta); | 279 | *size += sizeof(struct kasan_free_meta); |
353 | } | 280 | } |
354 | redzone_adjust = optimal_redzone(cache->object_size) - | ||
355 | (*size - cache->object_size); | ||
356 | 281 | ||
282 | redzone_size = optimal_redzone(cache->object_size); | ||
283 | redzone_adjust = redzone_size - (*size - cache->object_size); | ||
357 | if (redzone_adjust > 0) | 284 | if (redzone_adjust > 0) |
358 | *size += redzone_adjust; | 285 | *size += redzone_adjust; |
359 | 286 | ||
360 | *size = min_t(unsigned int, KMALLOC_MAX_SIZE, | 287 | *size = min_t(unsigned int, KMALLOC_MAX_SIZE, |
361 | max(*size, cache->object_size + | 288 | max(*size, cache->object_size + redzone_size)); |
362 | optimal_redzone(cache->object_size))); | ||
363 | 289 | ||
364 | /* | 290 | /* |
365 | * If the metadata doesn't fit, don't enable KASAN at all. | 291 | * If the metadata doesn't fit, don't enable KASAN at all. |
@@ -372,30 +298,39 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | |||
372 | return; | 298 | return; |
373 | } | 299 | } |
374 | 300 | ||
301 | cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE); | ||
302 | |||
375 | *flags |= SLAB_KASAN; | 303 | *flags |= SLAB_KASAN; |
376 | } | 304 | } |
377 | 305 | ||
378 | void kasan_cache_shrink(struct kmem_cache *cache) | 306 | size_t kasan_metadata_size(struct kmem_cache *cache) |
379 | { | 307 | { |
380 | quarantine_remove_cache(cache); | 308 | return (cache->kasan_info.alloc_meta_offset ? |
309 | sizeof(struct kasan_alloc_meta) : 0) + | ||
310 | (cache->kasan_info.free_meta_offset ? | ||
311 | sizeof(struct kasan_free_meta) : 0); | ||
381 | } | 312 | } |
382 | 313 | ||
383 | void kasan_cache_shutdown(struct kmem_cache *cache) | 314 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
315 | const void *object) | ||
384 | { | 316 | { |
385 | if (!__kmem_cache_empty(cache)) | 317 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); |
386 | quarantine_remove_cache(cache); | 318 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
387 | } | 319 | } |
388 | 320 | ||
389 | size_t kasan_metadata_size(struct kmem_cache *cache) | 321 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, |
322 | const void *object) | ||
390 | { | 323 | { |
391 | return (cache->kasan_info.alloc_meta_offset ? | 324 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
392 | sizeof(struct kasan_alloc_meta) : 0) + | 325 | return (void *)object + cache->kasan_info.free_meta_offset; |
393 | (cache->kasan_info.free_meta_offset ? | ||
394 | sizeof(struct kasan_free_meta) : 0); | ||
395 | } | 326 | } |
396 | 327 | ||
397 | void kasan_poison_slab(struct page *page) | 328 | void kasan_poison_slab(struct page *page) |
398 | { | 329 | { |
330 | unsigned long i; | ||
331 | |||
332 | for (i = 0; i < (1 << compound_order(page)); i++) | ||
333 | page_kasan_tag_reset(page + i); | ||
399 | kasan_poison_shadow(page_address(page), | 334 | kasan_poison_shadow(page_address(page), |
400 | PAGE_SIZE << compound_order(page), | 335 | PAGE_SIZE << compound_order(page), |
401 | KASAN_KMALLOC_REDZONE); | 336 | KASAN_KMALLOC_REDZONE); |
@@ -413,92 +348,79 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |||
413 | KASAN_KMALLOC_REDZONE); | 348 | KASAN_KMALLOC_REDZONE); |
414 | } | 349 | } |
415 | 350 | ||
416 | static inline int in_irqentry_text(unsigned long ptr) | 351 | /* |
417 | { | 352 | * Since it's desirable to only call object contructors once during slab |
418 | return (ptr >= (unsigned long)&__irqentry_text_start && | 353 | * allocation, we preassign tags to all such objects. Also preassign tags for |
419 | ptr < (unsigned long)&__irqentry_text_end) || | 354 | * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. |
420 | (ptr >= (unsigned long)&__softirqentry_text_start && | 355 | * For SLAB allocator we can't preassign tags randomly since the freelist is |
421 | ptr < (unsigned long)&__softirqentry_text_end); | 356 | * stored as an array of indexes instead of a linked list. Assign tags based |
422 | } | 357 | * on objects indexes, so that objects that are next to each other get |
423 | 358 | * different tags. | |
424 | static inline void filter_irq_stacks(struct stack_trace *trace) | 359 | * After a tag is assigned, the object always gets allocated with the same tag. |
360 | * The reason is that we can't change tags for objects with constructors on | ||
361 | * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor | ||
362 | * code can save the pointer to the object somewhere (e.g. in the object | ||
363 | * itself). Then if we retag it, the old saved pointer will become invalid. | ||
364 | */ | ||
365 | static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) | ||
425 | { | 366 | { |
426 | int i; | 367 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
368 | return new ? KASAN_TAG_KERNEL : random_tag(); | ||
427 | 369 | ||
428 | if (!trace->nr_entries) | 370 | #ifdef CONFIG_SLAB |
429 | return; | 371 | return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); |
430 | for (i = 0; i < trace->nr_entries; i++) | 372 | #else |
431 | if (in_irqentry_text(trace->entries[i])) { | 373 | return new ? random_tag() : get_tag(object); |
432 | /* Include the irqentry function into the stack. */ | 374 | #endif |
433 | trace->nr_entries = i + 1; | ||
434 | break; | ||
435 | } | ||
436 | } | 375 | } |
437 | 376 | ||
438 | static inline depot_stack_handle_t save_stack(gfp_t flags) | 377 | void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, |
378 | const void *object) | ||
439 | { | 379 | { |
440 | unsigned long entries[KASAN_STACK_DEPTH]; | 380 | struct kasan_alloc_meta *alloc_info; |
441 | struct stack_trace trace = { | ||
442 | .nr_entries = 0, | ||
443 | .entries = entries, | ||
444 | .max_entries = KASAN_STACK_DEPTH, | ||
445 | .skip = 0 | ||
446 | }; | ||
447 | |||
448 | save_stack_trace(&trace); | ||
449 | filter_irq_stacks(&trace); | ||
450 | if (trace.nr_entries != 0 && | ||
451 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | ||
452 | trace.nr_entries--; | ||
453 | 381 | ||
454 | return depot_save_stack(&trace, flags); | 382 | if (!(cache->flags & SLAB_KASAN)) |
455 | } | 383 | return (void *)object; |
456 | 384 | ||
457 | static inline void set_track(struct kasan_track *track, gfp_t flags) | 385 | alloc_info = get_alloc_info(cache, object); |
458 | { | 386 | __memset(alloc_info, 0, sizeof(*alloc_info)); |
459 | track->pid = current->pid; | ||
460 | track->stack = save_stack(flags); | ||
461 | } | ||
462 | 387 | ||
463 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, | 388 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
464 | const void *object) | 389 | object = set_tag(object, assign_tag(cache, object, true)); |
465 | { | ||
466 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); | ||
467 | return (void *)object + cache->kasan_info.alloc_meta_offset; | ||
468 | } | ||
469 | 390 | ||
470 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | 391 | return (void *)object; |
471 | const void *object) | ||
472 | { | ||
473 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); | ||
474 | return (void *)object + cache->kasan_info.free_meta_offset; | ||
475 | } | 392 | } |
476 | 393 | ||
477 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) | 394 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, |
395 | gfp_t flags) | ||
478 | { | 396 | { |
479 | struct kasan_alloc_meta *alloc_info; | 397 | return kasan_kmalloc(cache, object, cache->object_size, flags); |
480 | |||
481 | if (!(cache->flags & SLAB_KASAN)) | ||
482 | return; | ||
483 | |||
484 | alloc_info = get_alloc_info(cache, object); | ||
485 | __memset(alloc_info, 0, sizeof(*alloc_info)); | ||
486 | } | 398 | } |
487 | 399 | ||
488 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) | 400 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
489 | { | 401 | { |
490 | kasan_kmalloc(cache, object, cache->object_size, flags); | 402 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
403 | return shadow_byte < 0 || | ||
404 | shadow_byte >= KASAN_SHADOW_SCALE_SIZE; | ||
405 | else | ||
406 | return tag != (u8)shadow_byte; | ||
491 | } | 407 | } |
492 | 408 | ||
493 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, | 409 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
494 | unsigned long ip, bool quarantine) | 410 | unsigned long ip, bool quarantine) |
495 | { | 411 | { |
496 | s8 shadow_byte; | 412 | s8 shadow_byte; |
413 | u8 tag; | ||
414 | void *tagged_object; | ||
497 | unsigned long rounded_up_size; | 415 | unsigned long rounded_up_size; |
498 | 416 | ||
417 | tag = get_tag(object); | ||
418 | tagged_object = object; | ||
419 | object = reset_tag(object); | ||
420 | |||
499 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != | 421 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != |
500 | object)) { | 422 | object)) { |
501 | kasan_report_invalid_free(object, ip); | 423 | kasan_report_invalid_free(tagged_object, ip); |
502 | return true; | 424 | return true; |
503 | } | 425 | } |
504 | 426 | ||
@@ -507,20 +429,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, | |||
507 | return false; | 429 | return false; |
508 | 430 | ||
509 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); | 431 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
510 | if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { | 432 | if (shadow_invalid(tag, shadow_byte)) { |
511 | kasan_report_invalid_free(object, ip); | 433 | kasan_report_invalid_free(tagged_object, ip); |
512 | return true; | 434 | return true; |
513 | } | 435 | } |
514 | 436 | ||
515 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); | 437 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); |
516 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | 438 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
517 | 439 | ||
518 | if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) | 440 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || |
441 | unlikely(!(cache->flags & SLAB_KASAN))) | ||
519 | return false; | 442 | return false; |
520 | 443 | ||
521 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); | 444 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); |
522 | quarantine_put(get_free_info(cache, object), cache); | 445 | quarantine_put(get_free_info(cache, object), cache); |
523 | return true; | 446 | |
447 | return IS_ENABLED(CONFIG_KASAN_GENERIC); | ||
524 | } | 448 | } |
525 | 449 | ||
526 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | 450 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) |
@@ -528,33 +452,41 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | |||
528 | return __kasan_slab_free(cache, object, ip, true); | 452 | return __kasan_slab_free(cache, object, ip, true); |
529 | } | 453 | } |
530 | 454 | ||
531 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, | 455 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
532 | gfp_t flags) | 456 | size_t size, gfp_t flags) |
533 | { | 457 | { |
534 | unsigned long redzone_start; | 458 | unsigned long redzone_start; |
535 | unsigned long redzone_end; | 459 | unsigned long redzone_end; |
460 | u8 tag; | ||
536 | 461 | ||
537 | if (gfpflags_allow_blocking(flags)) | 462 | if (gfpflags_allow_blocking(flags)) |
538 | quarantine_reduce(); | 463 | quarantine_reduce(); |
539 | 464 | ||
540 | if (unlikely(object == NULL)) | 465 | if (unlikely(object == NULL)) |
541 | return; | 466 | return NULL; |
542 | 467 | ||
543 | redzone_start = round_up((unsigned long)(object + size), | 468 | redzone_start = round_up((unsigned long)(object + size), |
544 | KASAN_SHADOW_SCALE_SIZE); | 469 | KASAN_SHADOW_SCALE_SIZE); |
545 | redzone_end = round_up((unsigned long)object + cache->object_size, | 470 | redzone_end = round_up((unsigned long)object + cache->object_size, |
546 | KASAN_SHADOW_SCALE_SIZE); | 471 | KASAN_SHADOW_SCALE_SIZE); |
547 | 472 | ||
548 | kasan_unpoison_shadow(object, size); | 473 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
474 | tag = assign_tag(cache, object, false); | ||
475 | |||
476 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ | ||
477 | kasan_unpoison_shadow(set_tag(object, tag), size); | ||
549 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | 478 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
550 | KASAN_KMALLOC_REDZONE); | 479 | KASAN_KMALLOC_REDZONE); |
551 | 480 | ||
552 | if (cache->flags & SLAB_KASAN) | 481 | if (cache->flags & SLAB_KASAN) |
553 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); | 482 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); |
483 | |||
484 | return set_tag(object, tag); | ||
554 | } | 485 | } |
555 | EXPORT_SYMBOL(kasan_kmalloc); | 486 | EXPORT_SYMBOL(kasan_kmalloc); |
556 | 487 | ||
557 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | 488 | void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, |
489 | gfp_t flags) | ||
558 | { | 490 | { |
559 | struct page *page; | 491 | struct page *page; |
560 | unsigned long redzone_start; | 492 | unsigned long redzone_start; |
@@ -564,7 +496,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | |||
564 | quarantine_reduce(); | 496 | quarantine_reduce(); |
565 | 497 | ||
566 | if (unlikely(ptr == NULL)) | 498 | if (unlikely(ptr == NULL)) |
567 | return; | 499 | return NULL; |
568 | 500 | ||
569 | page = virt_to_page(ptr); | 501 | page = virt_to_page(ptr); |
570 | redzone_start = round_up((unsigned long)(ptr + size), | 502 | redzone_start = round_up((unsigned long)(ptr + size), |
@@ -574,21 +506,23 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | |||
574 | kasan_unpoison_shadow(ptr, size); | 506 | kasan_unpoison_shadow(ptr, size); |
575 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | 507 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
576 | KASAN_PAGE_REDZONE); | 508 | KASAN_PAGE_REDZONE); |
509 | |||
510 | return (void *)ptr; | ||
577 | } | 511 | } |
578 | 512 | ||
579 | void kasan_krealloc(const void *object, size_t size, gfp_t flags) | 513 | void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) |
580 | { | 514 | { |
581 | struct page *page; | 515 | struct page *page; |
582 | 516 | ||
583 | if (unlikely(object == ZERO_SIZE_PTR)) | 517 | if (unlikely(object == ZERO_SIZE_PTR)) |
584 | return; | 518 | return (void *)object; |
585 | 519 | ||
586 | page = virt_to_head_page(object); | 520 | page = virt_to_head_page(object); |
587 | 521 | ||
588 | if (unlikely(!PageSlab(page))) | 522 | if (unlikely(!PageSlab(page))) |
589 | kasan_kmalloc_large(object, size, flags); | 523 | return kasan_kmalloc_large(object, size, flags); |
590 | else | 524 | else |
591 | kasan_kmalloc(page->slab_cache, object, size, flags); | 525 | return kasan_kmalloc(page->slab_cache, object, size, flags); |
592 | } | 526 | } |
593 | 527 | ||
594 | void kasan_poison_kfree(void *ptr, unsigned long ip) | 528 | void kasan_poison_kfree(void *ptr, unsigned long ip) |
@@ -632,11 +566,12 @@ int kasan_module_alloc(void *addr, size_t size) | |||
632 | 566 | ||
633 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | 567 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
634 | shadow_start + shadow_size, | 568 | shadow_start + shadow_size, |
635 | GFP_KERNEL | __GFP_ZERO, | 569 | GFP_KERNEL, |
636 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 570 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
637 | __builtin_return_address(0)); | 571 | __builtin_return_address(0)); |
638 | 572 | ||
639 | if (ret) { | 573 | if (ret) { |
574 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); | ||
640 | find_vm_area(addr)->flags |= VM_KASAN; | 575 | find_vm_area(addr)->flags |= VM_KASAN; |
641 | kmemleak_ignore(ret); | 576 | kmemleak_ignore(ret); |
642 | return 0; | 577 | return 0; |
@@ -651,147 +586,6 @@ void kasan_free_shadow(const struct vm_struct *vm) | |||
651 | vfree(kasan_mem_to_shadow(vm->addr)); | 586 | vfree(kasan_mem_to_shadow(vm->addr)); |
652 | } | 587 | } |
653 | 588 | ||
654 | static void register_global(struct kasan_global *global) | ||
655 | { | ||
656 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | ||
657 | |||
658 | kasan_unpoison_shadow(global->beg, global->size); | ||
659 | |||
660 | kasan_poison_shadow(global->beg + aligned_size, | ||
661 | global->size_with_redzone - aligned_size, | ||
662 | KASAN_GLOBAL_REDZONE); | ||
663 | } | ||
664 | |||
665 | void __asan_register_globals(struct kasan_global *globals, size_t size) | ||
666 | { | ||
667 | int i; | ||
668 | |||
669 | for (i = 0; i < size; i++) | ||
670 | register_global(&globals[i]); | ||
671 | } | ||
672 | EXPORT_SYMBOL(__asan_register_globals); | ||
673 | |||
674 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) | ||
675 | { | ||
676 | } | ||
677 | EXPORT_SYMBOL(__asan_unregister_globals); | ||
678 | |||
679 | #define DEFINE_ASAN_LOAD_STORE(size) \ | ||
680 | void __asan_load##size(unsigned long addr) \ | ||
681 | { \ | ||
682 | check_memory_region_inline(addr, size, false, _RET_IP_);\ | ||
683 | } \ | ||
684 | EXPORT_SYMBOL(__asan_load##size); \ | ||
685 | __alias(__asan_load##size) \ | ||
686 | void __asan_load##size##_noabort(unsigned long); \ | ||
687 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ | ||
688 | void __asan_store##size(unsigned long addr) \ | ||
689 | { \ | ||
690 | check_memory_region_inline(addr, size, true, _RET_IP_); \ | ||
691 | } \ | ||
692 | EXPORT_SYMBOL(__asan_store##size); \ | ||
693 | __alias(__asan_store##size) \ | ||
694 | void __asan_store##size##_noabort(unsigned long); \ | ||
695 | EXPORT_SYMBOL(__asan_store##size##_noabort) | ||
696 | |||
697 | DEFINE_ASAN_LOAD_STORE(1); | ||
698 | DEFINE_ASAN_LOAD_STORE(2); | ||
699 | DEFINE_ASAN_LOAD_STORE(4); | ||
700 | DEFINE_ASAN_LOAD_STORE(8); | ||
701 | DEFINE_ASAN_LOAD_STORE(16); | ||
702 | |||
703 | void __asan_loadN(unsigned long addr, size_t size) | ||
704 | { | ||
705 | check_memory_region(addr, size, false, _RET_IP_); | ||
706 | } | ||
707 | EXPORT_SYMBOL(__asan_loadN); | ||
708 | |||
709 | __alias(__asan_loadN) | ||
710 | void __asan_loadN_noabort(unsigned long, size_t); | ||
711 | EXPORT_SYMBOL(__asan_loadN_noabort); | ||
712 | |||
713 | void __asan_storeN(unsigned long addr, size_t size) | ||
714 | { | ||
715 | check_memory_region(addr, size, true, _RET_IP_); | ||
716 | } | ||
717 | EXPORT_SYMBOL(__asan_storeN); | ||
718 | |||
719 | __alias(__asan_storeN) | ||
720 | void __asan_storeN_noabort(unsigned long, size_t); | ||
721 | EXPORT_SYMBOL(__asan_storeN_noabort); | ||
722 | |||
723 | /* to shut up compiler complaints */ | ||
724 | void __asan_handle_no_return(void) {} | ||
725 | EXPORT_SYMBOL(__asan_handle_no_return); | ||
726 | |||
727 | /* Emitted by compiler to poison large objects when they go out of scope. */ | ||
728 | void __asan_poison_stack_memory(const void *addr, size_t size) | ||
729 | { | ||
730 | /* | ||
731 | * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded | ||
732 | * by redzones, so we simply round up size to simplify logic. | ||
733 | */ | ||
734 | kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), | ||
735 | KASAN_USE_AFTER_SCOPE); | ||
736 | } | ||
737 | EXPORT_SYMBOL(__asan_poison_stack_memory); | ||
738 | |||
739 | /* Emitted by compiler to unpoison large objects when they go into scope. */ | ||
740 | void __asan_unpoison_stack_memory(const void *addr, size_t size) | ||
741 | { | ||
742 | kasan_unpoison_shadow(addr, size); | ||
743 | } | ||
744 | EXPORT_SYMBOL(__asan_unpoison_stack_memory); | ||
745 | |||
746 | /* Emitted by compiler to poison alloca()ed objects. */ | ||
747 | void __asan_alloca_poison(unsigned long addr, size_t size) | ||
748 | { | ||
749 | size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | ||
750 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - | ||
751 | rounded_up_size; | ||
752 | size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); | ||
753 | |||
754 | const void *left_redzone = (const void *)(addr - | ||
755 | KASAN_ALLOCA_REDZONE_SIZE); | ||
756 | const void *right_redzone = (const void *)(addr + rounded_up_size); | ||
757 | |||
758 | WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); | ||
759 | |||
760 | kasan_unpoison_shadow((const void *)(addr + rounded_down_size), | ||
761 | size - rounded_down_size); | ||
762 | kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, | ||
763 | KASAN_ALLOCA_LEFT); | ||
764 | kasan_poison_shadow(right_redzone, | ||
765 | padding_size + KASAN_ALLOCA_REDZONE_SIZE, | ||
766 | KASAN_ALLOCA_RIGHT); | ||
767 | } | ||
768 | EXPORT_SYMBOL(__asan_alloca_poison); | ||
769 | |||
770 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ | ||
771 | void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) | ||
772 | { | ||
773 | if (unlikely(!stack_top || stack_top > stack_bottom)) | ||
774 | return; | ||
775 | |||
776 | kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); | ||
777 | } | ||
778 | EXPORT_SYMBOL(__asan_allocas_unpoison); | ||
779 | |||
780 | /* Emitted by the compiler to [un]poison local variables. */ | ||
781 | #define DEFINE_ASAN_SET_SHADOW(byte) \ | ||
782 | void __asan_set_shadow_##byte(const void *addr, size_t size) \ | ||
783 | { \ | ||
784 | __memset((void *)addr, 0x##byte, size); \ | ||
785 | } \ | ||
786 | EXPORT_SYMBOL(__asan_set_shadow_##byte) | ||
787 | |||
788 | DEFINE_ASAN_SET_SHADOW(00); | ||
789 | DEFINE_ASAN_SET_SHADOW(f1); | ||
790 | DEFINE_ASAN_SET_SHADOW(f2); | ||
791 | DEFINE_ASAN_SET_SHADOW(f3); | ||
792 | DEFINE_ASAN_SET_SHADOW(f5); | ||
793 | DEFINE_ASAN_SET_SHADOW(f8); | ||
794 | |||
795 | #ifdef CONFIG_MEMORY_HOTPLUG | 589 | #ifdef CONFIG_MEMORY_HOTPLUG |
796 | static bool shadow_mapped(unsigned long addr) | 590 | static bool shadow_mapped(unsigned long addr) |
797 | { | 591 | { |
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c new file mode 100644 index 000000000000..ccb6207276e3 --- /dev/null +++ b/mm/kasan/generic.c | |||
@@ -0,0 +1,344 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains core generic KASAN code. | ||
4 | * | ||
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
7 | * | ||
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | ||
9 | * Andrey Konovalov <andreyknvl@gmail.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | #define DISABLE_BRANCH_PROFILING | ||
19 | |||
20 | #include <linux/export.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/kasan.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/kmemleak.h> | ||
26 | #include <linux/linkage.h> | ||
27 | #include <linux/memblock.h> | ||
28 | #include <linux/memory.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/printk.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/sched/task_stack.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/stacktrace.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/types.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | #include <linux/bug.h> | ||
40 | |||
41 | #include "kasan.h" | ||
42 | #include "../slab.h" | ||
43 | |||
44 | /* | ||
45 | * All functions below always inlined so compiler could | ||
46 | * perform better optimizations in each of __asan_loadX/__assn_storeX | ||
47 | * depending on memory access size X. | ||
48 | */ | ||
49 | |||
50 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) | ||
51 | { | ||
52 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | ||
53 | |||
54 | if (unlikely(shadow_value)) { | ||
55 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | ||
56 | return unlikely(last_accessible_byte >= shadow_value); | ||
57 | } | ||
58 | |||
59 | return false; | ||
60 | } | ||
61 | |||
62 | static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, | ||
63 | unsigned long size) | ||
64 | { | ||
65 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); | ||
66 | |||
67 | /* | ||
68 | * Access crosses 8(shadow size)-byte boundary. Such access maps | ||
69 | * into 2 shadow bytes, so we need to check them both. | ||
70 | */ | ||
71 | if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) | ||
72 | return *shadow_addr || memory_is_poisoned_1(addr + size - 1); | ||
73 | |||
74 | return memory_is_poisoned_1(addr + size - 1); | ||
75 | } | ||
76 | |||
77 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) | ||
78 | { | ||
79 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | ||
80 | |||
81 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ | ||
82 | if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | ||
83 | return *shadow_addr || memory_is_poisoned_1(addr + 15); | ||
84 | |||
85 | return *shadow_addr; | ||
86 | } | ||
87 | |||
88 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, | ||
89 | size_t size) | ||
90 | { | ||
91 | while (size) { | ||
92 | if (unlikely(*start)) | ||
93 | return (unsigned long)start; | ||
94 | start++; | ||
95 | size--; | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static __always_inline unsigned long memory_is_nonzero(const void *start, | ||
102 | const void *end) | ||
103 | { | ||
104 | unsigned int words; | ||
105 | unsigned long ret; | ||
106 | unsigned int prefix = (unsigned long)start % 8; | ||
107 | |||
108 | if (end - start <= 16) | ||
109 | return bytes_is_nonzero(start, end - start); | ||
110 | |||
111 | if (prefix) { | ||
112 | prefix = 8 - prefix; | ||
113 | ret = bytes_is_nonzero(start, prefix); | ||
114 | if (unlikely(ret)) | ||
115 | return ret; | ||
116 | start += prefix; | ||
117 | } | ||
118 | |||
119 | words = (end - start) / 8; | ||
120 | while (words) { | ||
121 | if (unlikely(*(u64 *)start)) | ||
122 | return bytes_is_nonzero(start, 8); | ||
123 | start += 8; | ||
124 | words--; | ||
125 | } | ||
126 | |||
127 | return bytes_is_nonzero(start, (end - start) % 8); | ||
128 | } | ||
129 | |||
130 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, | ||
131 | size_t size) | ||
132 | { | ||
133 | unsigned long ret; | ||
134 | |||
135 | ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), | ||
136 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); | ||
137 | |||
138 | if (unlikely(ret)) { | ||
139 | unsigned long last_byte = addr + size - 1; | ||
140 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | ||
141 | |||
142 | if (unlikely(ret != (unsigned long)last_shadow || | ||
143 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) | ||
144 | return true; | ||
145 | } | ||
146 | return false; | ||
147 | } | ||
148 | |||
149 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | ||
150 | { | ||
151 | if (__builtin_constant_p(size)) { | ||
152 | switch (size) { | ||
153 | case 1: | ||
154 | return memory_is_poisoned_1(addr); | ||
155 | case 2: | ||
156 | case 4: | ||
157 | case 8: | ||
158 | return memory_is_poisoned_2_4_8(addr, size); | ||
159 | case 16: | ||
160 | return memory_is_poisoned_16(addr); | ||
161 | default: | ||
162 | BUILD_BUG(); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | return memory_is_poisoned_n(addr, size); | ||
167 | } | ||
168 | |||
169 | static __always_inline void check_memory_region_inline(unsigned long addr, | ||
170 | size_t size, bool write, | ||
171 | unsigned long ret_ip) | ||
172 | { | ||
173 | if (unlikely(size == 0)) | ||
174 | return; | ||
175 | |||
176 | if (unlikely((void *)addr < | ||
177 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | ||
178 | kasan_report(addr, size, write, ret_ip); | ||
179 | return; | ||
180 | } | ||
181 | |||
182 | if (likely(!memory_is_poisoned(addr, size))) | ||
183 | return; | ||
184 | |||
185 | kasan_report(addr, size, write, ret_ip); | ||
186 | } | ||
187 | |||
188 | void check_memory_region(unsigned long addr, size_t size, bool write, | ||
189 | unsigned long ret_ip) | ||
190 | { | ||
191 | check_memory_region_inline(addr, size, write, ret_ip); | ||
192 | } | ||
193 | |||
194 | void kasan_cache_shrink(struct kmem_cache *cache) | ||
195 | { | ||
196 | quarantine_remove_cache(cache); | ||
197 | } | ||
198 | |||
199 | void kasan_cache_shutdown(struct kmem_cache *cache) | ||
200 | { | ||
201 | if (!__kmem_cache_empty(cache)) | ||
202 | quarantine_remove_cache(cache); | ||
203 | } | ||
204 | |||
205 | static void register_global(struct kasan_global *global) | ||
206 | { | ||
207 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | ||
208 | |||
209 | kasan_unpoison_shadow(global->beg, global->size); | ||
210 | |||
211 | kasan_poison_shadow(global->beg + aligned_size, | ||
212 | global->size_with_redzone - aligned_size, | ||
213 | KASAN_GLOBAL_REDZONE); | ||
214 | } | ||
215 | |||
216 | void __asan_register_globals(struct kasan_global *globals, size_t size) | ||
217 | { | ||
218 | int i; | ||
219 | |||
220 | for (i = 0; i < size; i++) | ||
221 | register_global(&globals[i]); | ||
222 | } | ||
223 | EXPORT_SYMBOL(__asan_register_globals); | ||
224 | |||
225 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) | ||
226 | { | ||
227 | } | ||
228 | EXPORT_SYMBOL(__asan_unregister_globals); | ||
229 | |||
230 | #define DEFINE_ASAN_LOAD_STORE(size) \ | ||
231 | void __asan_load##size(unsigned long addr) \ | ||
232 | { \ | ||
233 | check_memory_region_inline(addr, size, false, _RET_IP_);\ | ||
234 | } \ | ||
235 | EXPORT_SYMBOL(__asan_load##size); \ | ||
236 | __alias(__asan_load##size) \ | ||
237 | void __asan_load##size##_noabort(unsigned long); \ | ||
238 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ | ||
239 | void __asan_store##size(unsigned long addr) \ | ||
240 | { \ | ||
241 | check_memory_region_inline(addr, size, true, _RET_IP_); \ | ||
242 | } \ | ||
243 | EXPORT_SYMBOL(__asan_store##size); \ | ||
244 | __alias(__asan_store##size) \ | ||
245 | void __asan_store##size##_noabort(unsigned long); \ | ||
246 | EXPORT_SYMBOL(__asan_store##size##_noabort) | ||
247 | |||
248 | DEFINE_ASAN_LOAD_STORE(1); | ||
249 | DEFINE_ASAN_LOAD_STORE(2); | ||
250 | DEFINE_ASAN_LOAD_STORE(4); | ||
251 | DEFINE_ASAN_LOAD_STORE(8); | ||
252 | DEFINE_ASAN_LOAD_STORE(16); | ||
253 | |||
254 | void __asan_loadN(unsigned long addr, size_t size) | ||
255 | { | ||
256 | check_memory_region(addr, size, false, _RET_IP_); | ||
257 | } | ||
258 | EXPORT_SYMBOL(__asan_loadN); | ||
259 | |||
260 | __alias(__asan_loadN) | ||
261 | void __asan_loadN_noabort(unsigned long, size_t); | ||
262 | EXPORT_SYMBOL(__asan_loadN_noabort); | ||
263 | |||
264 | void __asan_storeN(unsigned long addr, size_t size) | ||
265 | { | ||
266 | check_memory_region(addr, size, true, _RET_IP_); | ||
267 | } | ||
268 | EXPORT_SYMBOL(__asan_storeN); | ||
269 | |||
270 | __alias(__asan_storeN) | ||
271 | void __asan_storeN_noabort(unsigned long, size_t); | ||
272 | EXPORT_SYMBOL(__asan_storeN_noabort); | ||
273 | |||
274 | /* to shut up compiler complaints */ | ||
275 | void __asan_handle_no_return(void) {} | ||
276 | EXPORT_SYMBOL(__asan_handle_no_return); | ||
277 | |||
278 | /* Emitted by compiler to poison large objects when they go out of scope. */ | ||
279 | void __asan_poison_stack_memory(const void *addr, size_t size) | ||
280 | { | ||
281 | /* | ||
282 | * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded | ||
283 | * by redzones, so we simply round up size to simplify logic. | ||
284 | */ | ||
285 | kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), | ||
286 | KASAN_USE_AFTER_SCOPE); | ||
287 | } | ||
288 | EXPORT_SYMBOL(__asan_poison_stack_memory); | ||
289 | |||
290 | /* Emitted by compiler to unpoison large objects when they go into scope. */ | ||
291 | void __asan_unpoison_stack_memory(const void *addr, size_t size) | ||
292 | { | ||
293 | kasan_unpoison_shadow(addr, size); | ||
294 | } | ||
295 | EXPORT_SYMBOL(__asan_unpoison_stack_memory); | ||
296 | |||
297 | /* Emitted by compiler to poison alloca()ed objects. */ | ||
298 | void __asan_alloca_poison(unsigned long addr, size_t size) | ||
299 | { | ||
300 | size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | ||
301 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - | ||
302 | rounded_up_size; | ||
303 | size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); | ||
304 | |||
305 | const void *left_redzone = (const void *)(addr - | ||
306 | KASAN_ALLOCA_REDZONE_SIZE); | ||
307 | const void *right_redzone = (const void *)(addr + rounded_up_size); | ||
308 | |||
309 | WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); | ||
310 | |||
311 | kasan_unpoison_shadow((const void *)(addr + rounded_down_size), | ||
312 | size - rounded_down_size); | ||
313 | kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, | ||
314 | KASAN_ALLOCA_LEFT); | ||
315 | kasan_poison_shadow(right_redzone, | ||
316 | padding_size + KASAN_ALLOCA_REDZONE_SIZE, | ||
317 | KASAN_ALLOCA_RIGHT); | ||
318 | } | ||
319 | EXPORT_SYMBOL(__asan_alloca_poison); | ||
320 | |||
321 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ | ||
322 | void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) | ||
323 | { | ||
324 | if (unlikely(!stack_top || stack_top > stack_bottom)) | ||
325 | return; | ||
326 | |||
327 | kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); | ||
328 | } | ||
329 | EXPORT_SYMBOL(__asan_allocas_unpoison); | ||
330 | |||
331 | /* Emitted by the compiler to [un]poison local variables. */ | ||
332 | #define DEFINE_ASAN_SET_SHADOW(byte) \ | ||
333 | void __asan_set_shadow_##byte(const void *addr, size_t size) \ | ||
334 | { \ | ||
335 | __memset((void *)addr, 0x##byte, size); \ | ||
336 | } \ | ||
337 | EXPORT_SYMBOL(__asan_set_shadow_##byte) | ||
338 | |||
339 | DEFINE_ASAN_SET_SHADOW(00); | ||
340 | DEFINE_ASAN_SET_SHADOW(f1); | ||
341 | DEFINE_ASAN_SET_SHADOW(f2); | ||
342 | DEFINE_ASAN_SET_SHADOW(f3); | ||
343 | DEFINE_ASAN_SET_SHADOW(f5); | ||
344 | DEFINE_ASAN_SET_SHADOW(f8); | ||
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c new file mode 100644 index 000000000000..5e12035888f2 --- /dev/null +++ b/mm/kasan/generic_report.c | |||
@@ -0,0 +1,153 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains generic KASAN specific error reporting code. | ||
4 | * | ||
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
7 | * | ||
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | ||
9 | * Andrey Konovalov <andreyknvl@gmail.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/bitops.h> | ||
18 | #include <linux/ftrace.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/printk.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/stackdepot.h> | ||
26 | #include <linux/stacktrace.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kasan.h> | ||
30 | #include <linux/module.h> | ||
31 | |||
32 | #include <asm/sections.h> | ||
33 | |||
34 | #include "kasan.h" | ||
35 | #include "../slab.h" | ||
36 | |||
37 | void *find_first_bad_addr(void *addr, size_t size) | ||
38 | { | ||
39 | void *p = addr; | ||
40 | |||
41 | while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) | ||
42 | p += KASAN_SHADOW_SCALE_SIZE; | ||
43 | return p; | ||
44 | } | ||
45 | |||
46 | static const char *get_shadow_bug_type(struct kasan_access_info *info) | ||
47 | { | ||
48 | const char *bug_type = "unknown-crash"; | ||
49 | u8 *shadow_addr; | ||
50 | |||
51 | shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr); | ||
52 | |||
53 | /* | ||
54 | * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look | ||
55 | * at the next shadow byte to determine the type of the bad access. | ||
56 | */ | ||
57 | if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) | ||
58 | shadow_addr++; | ||
59 | |||
60 | switch (*shadow_addr) { | ||
61 | case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: | ||
62 | /* | ||
63 | * In theory it's still possible to see these shadow values | ||
64 | * due to a data race in the kernel code. | ||
65 | */ | ||
66 | bug_type = "out-of-bounds"; | ||
67 | break; | ||
68 | case KASAN_PAGE_REDZONE: | ||
69 | case KASAN_KMALLOC_REDZONE: | ||
70 | bug_type = "slab-out-of-bounds"; | ||
71 | break; | ||
72 | case KASAN_GLOBAL_REDZONE: | ||
73 | bug_type = "global-out-of-bounds"; | ||
74 | break; | ||
75 | case KASAN_STACK_LEFT: | ||
76 | case KASAN_STACK_MID: | ||
77 | case KASAN_STACK_RIGHT: | ||
78 | case KASAN_STACK_PARTIAL: | ||
79 | bug_type = "stack-out-of-bounds"; | ||
80 | break; | ||
81 | case KASAN_FREE_PAGE: | ||
82 | case KASAN_KMALLOC_FREE: | ||
83 | bug_type = "use-after-free"; | ||
84 | break; | ||
85 | case KASAN_USE_AFTER_SCOPE: | ||
86 | bug_type = "use-after-scope"; | ||
87 | break; | ||
88 | case KASAN_ALLOCA_LEFT: | ||
89 | case KASAN_ALLOCA_RIGHT: | ||
90 | bug_type = "alloca-out-of-bounds"; | ||
91 | break; | ||
92 | } | ||
93 | |||
94 | return bug_type; | ||
95 | } | ||
96 | |||
97 | static const char *get_wild_bug_type(struct kasan_access_info *info) | ||
98 | { | ||
99 | const char *bug_type = "unknown-crash"; | ||
100 | |||
101 | if ((unsigned long)info->access_addr < PAGE_SIZE) | ||
102 | bug_type = "null-ptr-deref"; | ||
103 | else if ((unsigned long)info->access_addr < TASK_SIZE) | ||
104 | bug_type = "user-memory-access"; | ||
105 | else | ||
106 | bug_type = "wild-memory-access"; | ||
107 | |||
108 | return bug_type; | ||
109 | } | ||
110 | |||
111 | const char *get_bug_type(struct kasan_access_info *info) | ||
112 | { | ||
113 | if (addr_has_shadow(info->access_addr)) | ||
114 | return get_shadow_bug_type(info); | ||
115 | return get_wild_bug_type(info); | ||
116 | } | ||
117 | |||
118 | #define DEFINE_ASAN_REPORT_LOAD(size) \ | ||
119 | void __asan_report_load##size##_noabort(unsigned long addr) \ | ||
120 | { \ | ||
121 | kasan_report(addr, size, false, _RET_IP_); \ | ||
122 | } \ | ||
123 | EXPORT_SYMBOL(__asan_report_load##size##_noabort) | ||
124 | |||
125 | #define DEFINE_ASAN_REPORT_STORE(size) \ | ||
126 | void __asan_report_store##size##_noabort(unsigned long addr) \ | ||
127 | { \ | ||
128 | kasan_report(addr, size, true, _RET_IP_); \ | ||
129 | } \ | ||
130 | EXPORT_SYMBOL(__asan_report_store##size##_noabort) | ||
131 | |||
132 | DEFINE_ASAN_REPORT_LOAD(1); | ||
133 | DEFINE_ASAN_REPORT_LOAD(2); | ||
134 | DEFINE_ASAN_REPORT_LOAD(4); | ||
135 | DEFINE_ASAN_REPORT_LOAD(8); | ||
136 | DEFINE_ASAN_REPORT_LOAD(16); | ||
137 | DEFINE_ASAN_REPORT_STORE(1); | ||
138 | DEFINE_ASAN_REPORT_STORE(2); | ||
139 | DEFINE_ASAN_REPORT_STORE(4); | ||
140 | DEFINE_ASAN_REPORT_STORE(8); | ||
141 | DEFINE_ASAN_REPORT_STORE(16); | ||
142 | |||
143 | void __asan_report_load_n_noabort(unsigned long addr, size_t size) | ||
144 | { | ||
145 | kasan_report(addr, size, false, _RET_IP_); | ||
146 | } | ||
147 | EXPORT_SYMBOL(__asan_report_load_n_noabort); | ||
148 | |||
149 | void __asan_report_store_n_noabort(unsigned long addr, size_t size) | ||
150 | { | ||
151 | kasan_report(addr, size, true, _RET_IP_); | ||
152 | } | ||
153 | EXPORT_SYMBOL(__asan_report_store_n_noabort); | ||
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/init.c index c7550eb65922..34afad56497b 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/init.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * This file contains some kasan initialization code. | 3 | * This file contains some kasan initialization code. |
3 | * | 4 | * |
@@ -30,13 +31,13 @@ | |||
30 | * - Latter it reused it as zero shadow to cover large ranges of memory | 31 | * - Latter it reused it as zero shadow to cover large ranges of memory |
31 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). | 32 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). |
32 | */ | 33 | */ |
33 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | 34 | unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss; |
34 | 35 | ||
35 | #if CONFIG_PGTABLE_LEVELS > 4 | 36 | #if CONFIG_PGTABLE_LEVELS > 4 |
36 | p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; | 37 | p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; |
37 | static inline bool kasan_p4d_table(pgd_t pgd) | 38 | static inline bool kasan_p4d_table(pgd_t pgd) |
38 | { | 39 | { |
39 | return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d)); | 40 | return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); |
40 | } | 41 | } |
41 | #else | 42 | #else |
42 | static inline bool kasan_p4d_table(pgd_t pgd) | 43 | static inline bool kasan_p4d_table(pgd_t pgd) |
@@ -45,10 +46,10 @@ static inline bool kasan_p4d_table(pgd_t pgd) | |||
45 | } | 46 | } |
46 | #endif | 47 | #endif |
47 | #if CONFIG_PGTABLE_LEVELS > 3 | 48 | #if CONFIG_PGTABLE_LEVELS > 3 |
48 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | 49 | pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss; |
49 | static inline bool kasan_pud_table(p4d_t p4d) | 50 | static inline bool kasan_pud_table(p4d_t p4d) |
50 | { | 51 | { |
51 | return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud)); | 52 | return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); |
52 | } | 53 | } |
53 | #else | 54 | #else |
54 | static inline bool kasan_pud_table(p4d_t p4d) | 55 | static inline bool kasan_pud_table(p4d_t p4d) |
@@ -57,10 +58,10 @@ static inline bool kasan_pud_table(p4d_t p4d) | |||
57 | } | 58 | } |
58 | #endif | 59 | #endif |
59 | #if CONFIG_PGTABLE_LEVELS > 2 | 60 | #if CONFIG_PGTABLE_LEVELS > 2 |
60 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; | 61 | pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss; |
61 | static inline bool kasan_pmd_table(pud_t pud) | 62 | static inline bool kasan_pmd_table(pud_t pud) |
62 | { | 63 | { |
63 | return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd)); | 64 | return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); |
64 | } | 65 | } |
65 | #else | 66 | #else |
66 | static inline bool kasan_pmd_table(pud_t pud) | 67 | static inline bool kasan_pmd_table(pud_t pud) |
@@ -68,16 +69,16 @@ static inline bool kasan_pmd_table(pud_t pud) | |||
68 | return 0; | 69 | return 0; |
69 | } | 70 | } |
70 | #endif | 71 | #endif |
71 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; | 72 | pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; |
72 | 73 | ||
73 | static inline bool kasan_pte_table(pmd_t pmd) | 74 | static inline bool kasan_pte_table(pmd_t pmd) |
74 | { | 75 | { |
75 | return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte)); | 76 | return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); |
76 | } | 77 | } |
77 | 78 | ||
78 | static inline bool kasan_zero_page_entry(pte_t pte) | 79 | static inline bool kasan_early_shadow_page_entry(pte_t pte) |
79 | { | 80 | { |
80 | return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page)); | 81 | return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); |
81 | } | 82 | } |
82 | 83 | ||
83 | static __init void *early_alloc(size_t size, int node) | 84 | static __init void *early_alloc(size_t size, int node) |
@@ -92,7 +93,8 @@ static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, | |||
92 | pte_t *pte = pte_offset_kernel(pmd, addr); | 93 | pte_t *pte = pte_offset_kernel(pmd, addr); |
93 | pte_t zero_pte; | 94 | pte_t zero_pte; |
94 | 95 | ||
95 | zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL); | 96 | zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)), |
97 | PAGE_KERNEL); | ||
96 | zero_pte = pte_wrprotect(zero_pte); | 98 | zero_pte = pte_wrprotect(zero_pte); |
97 | 99 | ||
98 | while (addr + PAGE_SIZE <= end) { | 100 | while (addr + PAGE_SIZE <= end) { |
@@ -112,7 +114,8 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, | |||
112 | next = pmd_addr_end(addr, end); | 114 | next = pmd_addr_end(addr, end); |
113 | 115 | ||
114 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { | 116 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { |
115 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); | 117 | pmd_populate_kernel(&init_mm, pmd, |
118 | lm_alias(kasan_early_shadow_pte)); | ||
116 | continue; | 119 | continue; |
117 | } | 120 | } |
118 | 121 | ||
@@ -145,9 +148,11 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, | |||
145 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { | 148 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { |
146 | pmd_t *pmd; | 149 | pmd_t *pmd; |
147 | 150 | ||
148 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 151 | pud_populate(&init_mm, pud, |
152 | lm_alias(kasan_early_shadow_pmd)); | ||
149 | pmd = pmd_offset(pud, addr); | 153 | pmd = pmd_offset(pud, addr); |
150 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); | 154 | pmd_populate_kernel(&init_mm, pmd, |
155 | lm_alias(kasan_early_shadow_pte)); | ||
151 | continue; | 156 | continue; |
152 | } | 157 | } |
153 | 158 | ||
@@ -181,12 +186,14 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, | |||
181 | pud_t *pud; | 186 | pud_t *pud; |
182 | pmd_t *pmd; | 187 | pmd_t *pmd; |
183 | 188 | ||
184 | p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); | 189 | p4d_populate(&init_mm, p4d, |
190 | lm_alias(kasan_early_shadow_pud)); | ||
185 | pud = pud_offset(p4d, addr); | 191 | pud = pud_offset(p4d, addr); |
186 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 192 | pud_populate(&init_mm, pud, |
193 | lm_alias(kasan_early_shadow_pmd)); | ||
187 | pmd = pmd_offset(pud, addr); | 194 | pmd = pmd_offset(pud, addr); |
188 | pmd_populate_kernel(&init_mm, pmd, | 195 | pmd_populate_kernel(&init_mm, pmd, |
189 | lm_alias(kasan_zero_pte)); | 196 | lm_alias(kasan_early_shadow_pte)); |
190 | continue; | 197 | continue; |
191 | } | 198 | } |
192 | 199 | ||
@@ -209,13 +216,13 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, | |||
209 | } | 216 | } |
210 | 217 | ||
211 | /** | 218 | /** |
212 | * kasan_populate_zero_shadow - populate shadow memory region with | 219 | * kasan_populate_early_shadow - populate shadow memory region with |
213 | * kasan_zero_page | 220 | * kasan_early_shadow_page |
214 | * @shadow_start - start of the memory range to populate | 221 | * @shadow_start - start of the memory range to populate |
215 | * @shadow_end - end of the memory range to populate | 222 | * @shadow_end - end of the memory range to populate |
216 | */ | 223 | */ |
217 | int __ref kasan_populate_zero_shadow(const void *shadow_start, | 224 | int __ref kasan_populate_early_shadow(const void *shadow_start, |
218 | const void *shadow_end) | 225 | const void *shadow_end) |
219 | { | 226 | { |
220 | unsigned long addr = (unsigned long)shadow_start; | 227 | unsigned long addr = (unsigned long)shadow_start; |
221 | unsigned long end = (unsigned long)shadow_end; | 228 | unsigned long end = (unsigned long)shadow_end; |
@@ -231,7 +238,7 @@ int __ref kasan_populate_zero_shadow(const void *shadow_start, | |||
231 | pmd_t *pmd; | 238 | pmd_t *pmd; |
232 | 239 | ||
233 | /* | 240 | /* |
234 | * kasan_zero_pud should be populated with pmds | 241 | * kasan_early_shadow_pud should be populated with pmds |
235 | * at this moment. | 242 | * at this moment. |
236 | * [pud,pmd]_populate*() below needed only for | 243 | * [pud,pmd]_populate*() below needed only for |
237 | * 3,2 - level page tables where we don't have | 244 | * 3,2 - level page tables where we don't have |
@@ -241,21 +248,25 @@ int __ref kasan_populate_zero_shadow(const void *shadow_start, | |||
241 | * The ifndef is required to avoid build breakage. | 248 | * The ifndef is required to avoid build breakage. |
242 | * | 249 | * |
243 | * With 5level-fixup.h, pgd_populate() is not nop and | 250 | * With 5level-fixup.h, pgd_populate() is not nop and |
244 | * we reference kasan_zero_p4d. It's not defined | 251 | * we reference kasan_early_shadow_p4d. It's not defined |
245 | * unless 5-level paging enabled. | 252 | * unless 5-level paging enabled. |
246 | * | 253 | * |
247 | * The ifndef can be dropped once all KASAN-enabled | 254 | * The ifndef can be dropped once all KASAN-enabled |
248 | * architectures will switch to pgtable-nop4d.h. | 255 | * architectures will switch to pgtable-nop4d.h. |
249 | */ | 256 | */ |
250 | #ifndef __ARCH_HAS_5LEVEL_HACK | 257 | #ifndef __ARCH_HAS_5LEVEL_HACK |
251 | pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d)); | 258 | pgd_populate(&init_mm, pgd, |
259 | lm_alias(kasan_early_shadow_p4d)); | ||
252 | #endif | 260 | #endif |
253 | p4d = p4d_offset(pgd, addr); | 261 | p4d = p4d_offset(pgd, addr); |
254 | p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); | 262 | p4d_populate(&init_mm, p4d, |
263 | lm_alias(kasan_early_shadow_pud)); | ||
255 | pud = pud_offset(p4d, addr); | 264 | pud = pud_offset(p4d, addr); |
256 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 265 | pud_populate(&init_mm, pud, |
266 | lm_alias(kasan_early_shadow_pmd)); | ||
257 | pmd = pmd_offset(pud, addr); | 267 | pmd = pmd_offset(pud, addr); |
258 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); | 268 | pmd_populate_kernel(&init_mm, pmd, |
269 | lm_alias(kasan_early_shadow_pte)); | ||
259 | continue; | 270 | continue; |
260 | } | 271 | } |
261 | 272 | ||
@@ -350,7 +361,7 @@ static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, | |||
350 | if (!pte_present(*pte)) | 361 | if (!pte_present(*pte)) |
351 | continue; | 362 | continue; |
352 | 363 | ||
353 | if (WARN_ON(!kasan_zero_page_entry(*pte))) | 364 | if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) |
354 | continue; | 365 | continue; |
355 | pte_clear(&init_mm, addr, pte); | 366 | pte_clear(&init_mm, addr, pte); |
356 | } | 367 | } |
@@ -480,7 +491,7 @@ int kasan_add_zero_shadow(void *start, unsigned long size) | |||
480 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) | 491 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) |
481 | return -EINVAL; | 492 | return -EINVAL; |
482 | 493 | ||
483 | ret = kasan_populate_zero_shadow(shadow_start, shadow_end); | 494 | ret = kasan_populate_early_shadow(shadow_start, shadow_end); |
484 | if (ret) | 495 | if (ret) |
485 | kasan_remove_zero_shadow(shadow_start, | 496 | kasan_remove_zero_shadow(shadow_start, |
486 | size >> KASAN_SHADOW_SCALE_SHIFT); | 497 | size >> KASAN_SHADOW_SCALE_SHIFT); |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index c12dcfde2ebd..ea51b2d898ec 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
@@ -8,10 +8,22 @@ | |||
8 | #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) | 8 | #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) |
9 | #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) | 9 | #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) |
10 | 10 | ||
11 | #define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ | ||
12 | #define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ | ||
13 | #define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ | ||
14 | |||
15 | #ifdef CONFIG_KASAN_GENERIC | ||
11 | #define KASAN_FREE_PAGE 0xFF /* page was freed */ | 16 | #define KASAN_FREE_PAGE 0xFF /* page was freed */ |
12 | #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ | 17 | #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ |
13 | #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ | 18 | #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ |
14 | #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ | 19 | #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ |
20 | #else | ||
21 | #define KASAN_FREE_PAGE KASAN_TAG_INVALID | ||
22 | #define KASAN_PAGE_REDZONE KASAN_TAG_INVALID | ||
23 | #define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID | ||
24 | #define KASAN_KMALLOC_FREE KASAN_TAG_INVALID | ||
25 | #endif | ||
26 | |||
15 | #define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ | 27 | #define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ |
16 | 28 | ||
17 | /* | 29 | /* |
@@ -105,11 +117,25 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) | |||
105 | << KASAN_SHADOW_SCALE_SHIFT); | 117 | << KASAN_SHADOW_SCALE_SHIFT); |
106 | } | 118 | } |
107 | 119 | ||
120 | static inline bool addr_has_shadow(const void *addr) | ||
121 | { | ||
122 | return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); | ||
123 | } | ||
124 | |||
125 | void kasan_poison_shadow(const void *address, size_t size, u8 value); | ||
126 | |||
127 | void check_memory_region(unsigned long addr, size_t size, bool write, | ||
128 | unsigned long ret_ip); | ||
129 | |||
130 | void *find_first_bad_addr(void *addr, size_t size); | ||
131 | const char *get_bug_type(struct kasan_access_info *info); | ||
132 | |||
108 | void kasan_report(unsigned long addr, size_t size, | 133 | void kasan_report(unsigned long addr, size_t size, |
109 | bool is_write, unsigned long ip); | 134 | bool is_write, unsigned long ip); |
110 | void kasan_report_invalid_free(void *object, unsigned long ip); | 135 | void kasan_report_invalid_free(void *object, unsigned long ip); |
111 | 136 | ||
112 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) | 137 | #if defined(CONFIG_KASAN_GENERIC) && \ |
138 | (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) | ||
113 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); | 139 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); |
114 | void quarantine_reduce(void); | 140 | void quarantine_reduce(void); |
115 | void quarantine_remove_cache(struct kmem_cache *cache); | 141 | void quarantine_remove_cache(struct kmem_cache *cache); |
@@ -120,6 +146,37 @@ static inline void quarantine_reduce(void) { } | |||
120 | static inline void quarantine_remove_cache(struct kmem_cache *cache) { } | 146 | static inline void quarantine_remove_cache(struct kmem_cache *cache) { } |
121 | #endif | 147 | #endif |
122 | 148 | ||
149 | #ifdef CONFIG_KASAN_SW_TAGS | ||
150 | |||
151 | void print_tags(u8 addr_tag, const void *addr); | ||
152 | |||
153 | u8 random_tag(void); | ||
154 | |||
155 | #else | ||
156 | |||
157 | static inline void print_tags(u8 addr_tag, const void *addr) { } | ||
158 | |||
159 | static inline u8 random_tag(void) | ||
160 | { | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | #endif | ||
165 | |||
166 | #ifndef arch_kasan_set_tag | ||
167 | #define arch_kasan_set_tag(addr, tag) ((void *)(addr)) | ||
168 | #endif | ||
169 | #ifndef arch_kasan_reset_tag | ||
170 | #define arch_kasan_reset_tag(addr) ((void *)(addr)) | ||
171 | #endif | ||
172 | #ifndef arch_kasan_get_tag | ||
173 | #define arch_kasan_get_tag(addr) 0 | ||
174 | #endif | ||
175 | |||
176 | #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) | ||
177 | #define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr)) | ||
178 | #define get_tag(addr) arch_kasan_get_tag(addr) | ||
179 | |||
123 | /* | 180 | /* |
124 | * Exported functions for interfaces called from assembly or from generated | 181 | * Exported functions for interfaces called from assembly or from generated |
125 | * code. Declarations here to avoid warning about missing declarations. | 182 | * code. Declarations here to avoid warning about missing declarations. |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index b209dbaefde8..978bc4a3eb51 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * KASAN quarantine. | 3 | * KASAN quarantine. |
3 | * | 4 | * |
@@ -236,7 +237,7 @@ void quarantine_reduce(void) | |||
236 | * Update quarantine size in case of hotplug. Allocate a fraction of | 237 | * Update quarantine size in case of hotplug. Allocate a fraction of |
237 | * the installed memory to quarantine minus per-cpu queue limits. | 238 | * the installed memory to quarantine minus per-cpu queue limits. |
238 | */ | 239 | */ |
239 | total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / | 240 | total_size = (totalram_pages() << PAGE_SHIFT) / |
240 | QUARANTINE_FRACTION; | 241 | QUARANTINE_FRACTION; |
241 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); | 242 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
242 | new_quarantine_size = (total_size < percpu_quarantines) ? | 243 | new_quarantine_size = (total_size < percpu_quarantines) ? |
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 5c169aa688fd..ca9418fe9232 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c | |||
@@ -1,5 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * This file contains error reporting code. | 3 | * This file contains common generic and tag-based KASAN error reporting code. |
3 | * | 4 | * |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
@@ -39,129 +40,43 @@ | |||
39 | #define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK) | 40 | #define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK) |
40 | #define SHADOW_ROWS_AROUND_ADDR 2 | 41 | #define SHADOW_ROWS_AROUND_ADDR 2 |
41 | 42 | ||
42 | static const void *find_first_bad_addr(const void *addr, size_t size) | 43 | static unsigned long kasan_flags; |
43 | { | ||
44 | u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr); | ||
45 | const void *first_bad_addr = addr; | ||
46 | |||
47 | while (!shadow_val && first_bad_addr < addr + size) { | ||
48 | first_bad_addr += KASAN_SHADOW_SCALE_SIZE; | ||
49 | shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr); | ||
50 | } | ||
51 | return first_bad_addr; | ||
52 | } | ||
53 | 44 | ||
54 | static bool addr_has_shadow(struct kasan_access_info *info) | 45 | #define KASAN_BIT_REPORTED 0 |
55 | { | 46 | #define KASAN_BIT_MULTI_SHOT 1 |
56 | return (info->access_addr >= | ||
57 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); | ||
58 | } | ||
59 | 47 | ||
60 | static const char *get_shadow_bug_type(struct kasan_access_info *info) | 48 | bool kasan_save_enable_multi_shot(void) |
61 | { | 49 | { |
62 | const char *bug_type = "unknown-crash"; | 50 | return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); |
63 | u8 *shadow_addr; | ||
64 | |||
65 | info->first_bad_addr = find_first_bad_addr(info->access_addr, | ||
66 | info->access_size); | ||
67 | |||
68 | shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr); | ||
69 | |||
70 | /* | ||
71 | * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look | ||
72 | * at the next shadow byte to determine the type of the bad access. | ||
73 | */ | ||
74 | if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) | ||
75 | shadow_addr++; | ||
76 | |||
77 | switch (*shadow_addr) { | ||
78 | case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: | ||
79 | /* | ||
80 | * In theory it's still possible to see these shadow values | ||
81 | * due to a data race in the kernel code. | ||
82 | */ | ||
83 | bug_type = "out-of-bounds"; | ||
84 | break; | ||
85 | case KASAN_PAGE_REDZONE: | ||
86 | case KASAN_KMALLOC_REDZONE: | ||
87 | bug_type = "slab-out-of-bounds"; | ||
88 | break; | ||
89 | case KASAN_GLOBAL_REDZONE: | ||
90 | bug_type = "global-out-of-bounds"; | ||
91 | break; | ||
92 | case KASAN_STACK_LEFT: | ||
93 | case KASAN_STACK_MID: | ||
94 | case KASAN_STACK_RIGHT: | ||
95 | case KASAN_STACK_PARTIAL: | ||
96 | bug_type = "stack-out-of-bounds"; | ||
97 | break; | ||
98 | case KASAN_FREE_PAGE: | ||
99 | case KASAN_KMALLOC_FREE: | ||
100 | bug_type = "use-after-free"; | ||
101 | break; | ||
102 | case KASAN_USE_AFTER_SCOPE: | ||
103 | bug_type = "use-after-scope"; | ||
104 | break; | ||
105 | case KASAN_ALLOCA_LEFT: | ||
106 | case KASAN_ALLOCA_RIGHT: | ||
107 | bug_type = "alloca-out-of-bounds"; | ||
108 | break; | ||
109 | } | ||
110 | |||
111 | return bug_type; | ||
112 | } | 51 | } |
52 | EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); | ||
113 | 53 | ||
114 | static const char *get_wild_bug_type(struct kasan_access_info *info) | 54 | void kasan_restore_multi_shot(bool enabled) |
115 | { | 55 | { |
116 | const char *bug_type = "unknown-crash"; | 56 | if (!enabled) |
117 | 57 | clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); | |
118 | if ((unsigned long)info->access_addr < PAGE_SIZE) | ||
119 | bug_type = "null-ptr-deref"; | ||
120 | else if ((unsigned long)info->access_addr < TASK_SIZE) | ||
121 | bug_type = "user-memory-access"; | ||
122 | else | ||
123 | bug_type = "wild-memory-access"; | ||
124 | |||
125 | return bug_type; | ||
126 | } | 58 | } |
59 | EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); | ||
127 | 60 | ||
128 | static const char *get_bug_type(struct kasan_access_info *info) | 61 | static int __init kasan_set_multi_shot(char *str) |
129 | { | 62 | { |
130 | if (addr_has_shadow(info)) | 63 | set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); |
131 | return get_shadow_bug_type(info); | 64 | return 1; |
132 | return get_wild_bug_type(info); | ||
133 | } | 65 | } |
66 | __setup("kasan_multi_shot", kasan_set_multi_shot); | ||
134 | 67 | ||
135 | static void print_error_description(struct kasan_access_info *info) | 68 | static void print_error_description(struct kasan_access_info *info) |
136 | { | 69 | { |
137 | const char *bug_type = get_bug_type(info); | ||
138 | |||
139 | pr_err("BUG: KASAN: %s in %pS\n", | 70 | pr_err("BUG: KASAN: %s in %pS\n", |
140 | bug_type, (void *)info->ip); | 71 | get_bug_type(info), (void *)info->ip); |
141 | pr_err("%s of size %zu at addr %px by task %s/%d\n", | 72 | pr_err("%s of size %zu at addr %px by task %s/%d\n", |
142 | info->is_write ? "Write" : "Read", info->access_size, | 73 | info->is_write ? "Write" : "Read", info->access_size, |
143 | info->access_addr, current->comm, task_pid_nr(current)); | 74 | info->access_addr, current->comm, task_pid_nr(current)); |
144 | } | 75 | } |
145 | 76 | ||
146 | static inline bool kernel_or_module_addr(const void *addr) | ||
147 | { | ||
148 | if (addr >= (void *)_stext && addr < (void *)_end) | ||
149 | return true; | ||
150 | if (is_module_address((unsigned long)addr)) | ||
151 | return true; | ||
152 | return false; | ||
153 | } | ||
154 | |||
155 | static inline bool init_task_stack_addr(const void *addr) | ||
156 | { | ||
157 | return addr >= (void *)&init_thread_union.stack && | ||
158 | (addr <= (void *)&init_thread_union.stack + | ||
159 | sizeof(init_thread_union.stack)); | ||
160 | } | ||
161 | |||
162 | static DEFINE_SPINLOCK(report_lock); | 77 | static DEFINE_SPINLOCK(report_lock); |
163 | 78 | ||
164 | static void kasan_start_report(unsigned long *flags) | 79 | static void start_report(unsigned long *flags) |
165 | { | 80 | { |
166 | /* | 81 | /* |
167 | * Make sure we don't end up in loop. | 82 | * Make sure we don't end up in loop. |
@@ -171,7 +86,7 @@ static void kasan_start_report(unsigned long *flags) | |||
171 | pr_err("==================================================================\n"); | 86 | pr_err("==================================================================\n"); |
172 | } | 87 | } |
173 | 88 | ||
174 | static void kasan_end_report(unsigned long *flags) | 89 | static void end_report(unsigned long *flags) |
175 | { | 90 | { |
176 | pr_err("==================================================================\n"); | 91 | pr_err("==================================================================\n"); |
177 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 92 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
@@ -249,6 +164,22 @@ static void describe_object(struct kmem_cache *cache, void *object, | |||
249 | describe_object_addr(cache, object, addr); | 164 | describe_object_addr(cache, object, addr); |
250 | } | 165 | } |
251 | 166 | ||
167 | static inline bool kernel_or_module_addr(const void *addr) | ||
168 | { | ||
169 | if (addr >= (void *)_stext && addr < (void *)_end) | ||
170 | return true; | ||
171 | if (is_module_address((unsigned long)addr)) | ||
172 | return true; | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | static inline bool init_task_stack_addr(const void *addr) | ||
177 | { | ||
178 | return addr >= (void *)&init_thread_union.stack && | ||
179 | (addr <= (void *)&init_thread_union.stack + | ||
180 | sizeof(init_thread_union.stack)); | ||
181 | } | ||
182 | |||
252 | static void print_address_description(void *addr) | 183 | static void print_address_description(void *addr) |
253 | { | 184 | { |
254 | struct page *page = addr_to_page(addr); | 185 | struct page *page = addr_to_page(addr); |
@@ -326,126 +257,69 @@ static void print_shadow_for_address(const void *addr) | |||
326 | } | 257 | } |
327 | } | 258 | } |
328 | 259 | ||
260 | static bool report_enabled(void) | ||
261 | { | ||
262 | if (current->kasan_depth) | ||
263 | return false; | ||
264 | if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) | ||
265 | return true; | ||
266 | return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); | ||
267 | } | ||
268 | |||
329 | void kasan_report_invalid_free(void *object, unsigned long ip) | 269 | void kasan_report_invalid_free(void *object, unsigned long ip) |
330 | { | 270 | { |
331 | unsigned long flags; | 271 | unsigned long flags; |
332 | 272 | ||
333 | kasan_start_report(&flags); | 273 | start_report(&flags); |
334 | pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); | 274 | pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); |
275 | print_tags(get_tag(object), reset_tag(object)); | ||
276 | object = reset_tag(object); | ||
335 | pr_err("\n"); | 277 | pr_err("\n"); |
336 | print_address_description(object); | 278 | print_address_description(object); |
337 | pr_err("\n"); | 279 | pr_err("\n"); |
338 | print_shadow_for_address(object); | 280 | print_shadow_for_address(object); |
339 | kasan_end_report(&flags); | 281 | end_report(&flags); |
340 | } | ||
341 | |||
342 | static void kasan_report_error(struct kasan_access_info *info) | ||
343 | { | ||
344 | unsigned long flags; | ||
345 | |||
346 | kasan_start_report(&flags); | ||
347 | |||
348 | print_error_description(info); | ||
349 | pr_err("\n"); | ||
350 | |||
351 | if (!addr_has_shadow(info)) { | ||
352 | dump_stack(); | ||
353 | } else { | ||
354 | print_address_description((void *)info->access_addr); | ||
355 | pr_err("\n"); | ||
356 | print_shadow_for_address(info->first_bad_addr); | ||
357 | } | ||
358 | |||
359 | kasan_end_report(&flags); | ||
360 | } | ||
361 | |||
362 | static unsigned long kasan_flags; | ||
363 | |||
364 | #define KASAN_BIT_REPORTED 0 | ||
365 | #define KASAN_BIT_MULTI_SHOT 1 | ||
366 | |||
367 | bool kasan_save_enable_multi_shot(void) | ||
368 | { | ||
369 | return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); | ||
370 | } | ||
371 | EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); | ||
372 | |||
373 | void kasan_restore_multi_shot(bool enabled) | ||
374 | { | ||
375 | if (!enabled) | ||
376 | clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); | ||
377 | } | ||
378 | EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); | ||
379 | |||
380 | static int __init kasan_set_multi_shot(char *str) | ||
381 | { | ||
382 | set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); | ||
383 | return 1; | ||
384 | } | ||
385 | __setup("kasan_multi_shot", kasan_set_multi_shot); | ||
386 | |||
387 | static inline bool kasan_report_enabled(void) | ||
388 | { | ||
389 | if (current->kasan_depth) | ||
390 | return false; | ||
391 | if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) | ||
392 | return true; | ||
393 | return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); | ||
394 | } | 282 | } |
395 | 283 | ||
396 | void kasan_report(unsigned long addr, size_t size, | 284 | void kasan_report(unsigned long addr, size_t size, |
397 | bool is_write, unsigned long ip) | 285 | bool is_write, unsigned long ip) |
398 | { | 286 | { |
399 | struct kasan_access_info info; | 287 | struct kasan_access_info info; |
288 | void *tagged_addr; | ||
289 | void *untagged_addr; | ||
290 | unsigned long flags; | ||
400 | 291 | ||
401 | if (likely(!kasan_report_enabled())) | 292 | if (likely(!report_enabled())) |
402 | return; | 293 | return; |
403 | 294 | ||
404 | disable_trace_on_warning(); | 295 | disable_trace_on_warning(); |
405 | 296 | ||
406 | info.access_addr = (void *)addr; | 297 | tagged_addr = (void *)addr; |
407 | info.first_bad_addr = (void *)addr; | 298 | untagged_addr = reset_tag(tagged_addr); |
299 | |||
300 | info.access_addr = tagged_addr; | ||
301 | if (addr_has_shadow(untagged_addr)) | ||
302 | info.first_bad_addr = find_first_bad_addr(tagged_addr, size); | ||
303 | else | ||
304 | info.first_bad_addr = untagged_addr; | ||
408 | info.access_size = size; | 305 | info.access_size = size; |
409 | info.is_write = is_write; | 306 | info.is_write = is_write; |
410 | info.ip = ip; | 307 | info.ip = ip; |
411 | 308 | ||
412 | kasan_report_error(&info); | 309 | start_report(&flags); |
413 | } | ||
414 | 310 | ||
311 | print_error_description(&info); | ||
312 | if (addr_has_shadow(untagged_addr)) | ||
313 | print_tags(get_tag(tagged_addr), info.first_bad_addr); | ||
314 | pr_err("\n"); | ||
415 | 315 | ||
416 | #define DEFINE_ASAN_REPORT_LOAD(size) \ | 316 | if (addr_has_shadow(untagged_addr)) { |
417 | void __asan_report_load##size##_noabort(unsigned long addr) \ | 317 | print_address_description(untagged_addr); |
418 | { \ | 318 | pr_err("\n"); |
419 | kasan_report(addr, size, false, _RET_IP_); \ | 319 | print_shadow_for_address(info.first_bad_addr); |
420 | } \ | 320 | } else { |
421 | EXPORT_SYMBOL(__asan_report_load##size##_noabort) | 321 | dump_stack(); |
422 | 322 | } | |
423 | #define DEFINE_ASAN_REPORT_STORE(size) \ | ||
424 | void __asan_report_store##size##_noabort(unsigned long addr) \ | ||
425 | { \ | ||
426 | kasan_report(addr, size, true, _RET_IP_); \ | ||
427 | } \ | ||
428 | EXPORT_SYMBOL(__asan_report_store##size##_noabort) | ||
429 | |||
430 | DEFINE_ASAN_REPORT_LOAD(1); | ||
431 | DEFINE_ASAN_REPORT_LOAD(2); | ||
432 | DEFINE_ASAN_REPORT_LOAD(4); | ||
433 | DEFINE_ASAN_REPORT_LOAD(8); | ||
434 | DEFINE_ASAN_REPORT_LOAD(16); | ||
435 | DEFINE_ASAN_REPORT_STORE(1); | ||
436 | DEFINE_ASAN_REPORT_STORE(2); | ||
437 | DEFINE_ASAN_REPORT_STORE(4); | ||
438 | DEFINE_ASAN_REPORT_STORE(8); | ||
439 | DEFINE_ASAN_REPORT_STORE(16); | ||
440 | |||
441 | void __asan_report_load_n_noabort(unsigned long addr, size_t size) | ||
442 | { | ||
443 | kasan_report(addr, size, false, _RET_IP_); | ||
444 | } | ||
445 | EXPORT_SYMBOL(__asan_report_load_n_noabort); | ||
446 | 323 | ||
447 | void __asan_report_store_n_noabort(unsigned long addr, size_t size) | 324 | end_report(&flags); |
448 | { | ||
449 | kasan_report(addr, size, true, _RET_IP_); | ||
450 | } | 325 | } |
451 | EXPORT_SYMBOL(__asan_report_store_n_noabort); | ||
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c new file mode 100644 index 000000000000..0777649e07c4 --- /dev/null +++ b/mm/kasan/tags.c | |||
@@ -0,0 +1,161 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains core tag-based KASAN code. | ||
4 | * | ||
5 | * Copyright (c) 2018 Google, Inc. | ||
6 | * Author: Andrey Konovalov <andreyknvl@google.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | #define DISABLE_BRANCH_PROFILING | ||
16 | |||
17 | #include <linux/export.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kasan.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/kmemleak.h> | ||
23 | #include <linux/linkage.h> | ||
24 | #include <linux/memblock.h> | ||
25 | #include <linux/memory.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/printk.h> | ||
29 | #include <linux/random.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/sched/task_stack.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/stacktrace.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/bug.h> | ||
38 | |||
39 | #include "kasan.h" | ||
40 | #include "../slab.h" | ||
41 | |||
42 | static DEFINE_PER_CPU(u32, prng_state); | ||
43 | |||
44 | void kasan_init_tags(void) | ||
45 | { | ||
46 | int cpu; | ||
47 | |||
48 | for_each_possible_cpu(cpu) | ||
49 | per_cpu(prng_state, cpu) = get_random_u32(); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * If a preemption happens between this_cpu_read and this_cpu_write, the only | ||
54 | * side effect is that we'll give a few allocated in different contexts objects | ||
55 | * the same tag. Since tag-based KASAN is meant to be used a probabilistic | ||
56 | * bug-detection debug feature, this doesn't have significant negative impact. | ||
57 | * | ||
58 | * Ideally the tags use strong randomness to prevent any attempts to predict | ||
59 | * them during explicit exploit attempts. But strong randomness is expensive, | ||
60 | * and we did an intentional trade-off to use a PRNG. This non-atomic RMW | ||
61 | * sequence has in fact positive effect, since interrupts that randomly skew | ||
62 | * PRNG at unpredictable points do only good. | ||
63 | */ | ||
64 | u8 random_tag(void) | ||
65 | { | ||
66 | u32 state = this_cpu_read(prng_state); | ||
67 | |||
68 | state = 1664525 * state + 1013904223; | ||
69 | this_cpu_write(prng_state, state); | ||
70 | |||
71 | return (u8)(state % (KASAN_TAG_MAX + 1)); | ||
72 | } | ||
73 | |||
74 | void *kasan_reset_tag(const void *addr) | ||
75 | { | ||
76 | return reset_tag(addr); | ||
77 | } | ||
78 | |||
79 | void check_memory_region(unsigned long addr, size_t size, bool write, | ||
80 | unsigned long ret_ip) | ||
81 | { | ||
82 | u8 tag; | ||
83 | u8 *shadow_first, *shadow_last, *shadow; | ||
84 | void *untagged_addr; | ||
85 | |||
86 | if (unlikely(size == 0)) | ||
87 | return; | ||
88 | |||
89 | tag = get_tag((const void *)addr); | ||
90 | |||
91 | /* | ||
92 | * Ignore accesses for pointers tagged with 0xff (native kernel | ||
93 | * pointer tag) to suppress false positives caused by kmap. | ||
94 | * | ||
95 | * Some kernel code was written to account for archs that don't keep | ||
96 | * high memory mapped all the time, but rather map and unmap particular | ||
97 | * pages when needed. Instead of storing a pointer to the kernel memory, | ||
98 | * this code saves the address of the page structure and offset within | ||
99 | * that page for later use. Those pages are then mapped and unmapped | ||
100 | * with kmap/kunmap when necessary and virt_to_page is used to get the | ||
101 | * virtual address of the page. For arm64 (that keeps the high memory | ||
102 | * mapped all the time), kmap is turned into a page_address call. | ||
103 | |||
104 | * The issue is that with use of the page_address + virt_to_page | ||
105 | * sequence the top byte value of the original pointer gets lost (gets | ||
106 | * set to KASAN_TAG_KERNEL (0xFF)). | ||
107 | */ | ||
108 | if (tag == KASAN_TAG_KERNEL) | ||
109 | return; | ||
110 | |||
111 | untagged_addr = reset_tag((const void *)addr); | ||
112 | if (unlikely(untagged_addr < | ||
113 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | ||
114 | kasan_report(addr, size, write, ret_ip); | ||
115 | return; | ||
116 | } | ||
117 | shadow_first = kasan_mem_to_shadow(untagged_addr); | ||
118 | shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); | ||
119 | for (shadow = shadow_first; shadow <= shadow_last; shadow++) { | ||
120 | if (*shadow != tag) { | ||
121 | kasan_report(addr, size, write, ret_ip); | ||
122 | return; | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | #define DEFINE_HWASAN_LOAD_STORE(size) \ | ||
128 | void __hwasan_load##size##_noabort(unsigned long addr) \ | ||
129 | { \ | ||
130 | check_memory_region(addr, size, false, _RET_IP_); \ | ||
131 | } \ | ||
132 | EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ | ||
133 | void __hwasan_store##size##_noabort(unsigned long addr) \ | ||
134 | { \ | ||
135 | check_memory_region(addr, size, true, _RET_IP_); \ | ||
136 | } \ | ||
137 | EXPORT_SYMBOL(__hwasan_store##size##_noabort) | ||
138 | |||
139 | DEFINE_HWASAN_LOAD_STORE(1); | ||
140 | DEFINE_HWASAN_LOAD_STORE(2); | ||
141 | DEFINE_HWASAN_LOAD_STORE(4); | ||
142 | DEFINE_HWASAN_LOAD_STORE(8); | ||
143 | DEFINE_HWASAN_LOAD_STORE(16); | ||
144 | |||
145 | void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) | ||
146 | { | ||
147 | check_memory_region(addr, size, false, _RET_IP_); | ||
148 | } | ||
149 | EXPORT_SYMBOL(__hwasan_loadN_noabort); | ||
150 | |||
151 | void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) | ||
152 | { | ||
153 | check_memory_region(addr, size, true, _RET_IP_); | ||
154 | } | ||
155 | EXPORT_SYMBOL(__hwasan_storeN_noabort); | ||
156 | |||
157 | void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) | ||
158 | { | ||
159 | kasan_poison_shadow((void *)addr, size, tag); | ||
160 | } | ||
161 | EXPORT_SYMBOL(__hwasan_tag_memory); | ||
diff --git a/mm/kasan/tags_report.c b/mm/kasan/tags_report.c new file mode 100644 index 000000000000..8eaf5f722271 --- /dev/null +++ b/mm/kasan/tags_report.c | |||
@@ -0,0 +1,58 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains tag-based KASAN specific error reporting code. | ||
4 | * | ||
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
7 | * | ||
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | ||
9 | * Andrey Konovalov <andreyknvl@gmail.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/bitops.h> | ||
18 | #include <linux/ftrace.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/printk.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/stackdepot.h> | ||
26 | #include <linux/stacktrace.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kasan.h> | ||
30 | #include <linux/module.h> | ||
31 | |||
32 | #include <asm/sections.h> | ||
33 | |||
34 | #include "kasan.h" | ||
35 | #include "../slab.h" | ||
36 | |||
37 | const char *get_bug_type(struct kasan_access_info *info) | ||
38 | { | ||
39 | return "invalid-access"; | ||
40 | } | ||
41 | |||
42 | void *find_first_bad_addr(void *addr, size_t size) | ||
43 | { | ||
44 | u8 tag = get_tag(addr); | ||
45 | void *p = reset_tag(addr); | ||
46 | void *end = p + size; | ||
47 | |||
48 | while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) | ||
49 | p += KASAN_SHADOW_SCALE_SIZE; | ||
50 | return p; | ||
51 | } | ||
52 | |||
53 | void print_tags(u8 addr_tag, const void *addr) | ||
54 | { | ||
55 | u8 *shadow = (u8 *)kasan_mem_to_shadow(addr); | ||
56 | |||
57 | pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag, *shadow); | ||
58 | } | ||
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 43ce2f4d2551..4f017339ddb2 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c | |||
@@ -944,8 +944,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
944 | int isolated = 0, result = 0; | 944 | int isolated = 0, result = 0; |
945 | struct mem_cgroup *memcg; | 945 | struct mem_cgroup *memcg; |
946 | struct vm_area_struct *vma; | 946 | struct vm_area_struct *vma; |
947 | unsigned long mmun_start; /* For mmu_notifiers */ | 947 | struct mmu_notifier_range range; |
948 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
949 | gfp_t gfp; | 948 | gfp_t gfp; |
950 | 949 | ||
951 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 950 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
@@ -1017,9 +1016,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1017 | pte = pte_offset_map(pmd, address); | 1016 | pte = pte_offset_map(pmd, address); |
1018 | pte_ptl = pte_lockptr(mm, pmd); | 1017 | pte_ptl = pte_lockptr(mm, pmd); |
1019 | 1018 | ||
1020 | mmun_start = address; | 1019 | mmu_notifier_range_init(&range, mm, address, address + HPAGE_PMD_SIZE); |
1021 | mmun_end = address + HPAGE_PMD_SIZE; | 1020 | mmu_notifier_invalidate_range_start(&range); |
1022 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
1023 | pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ | 1021 | pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ |
1024 | /* | 1022 | /* |
1025 | * After this gup_fast can't run anymore. This also removes | 1023 | * After this gup_fast can't run anymore. This also removes |
@@ -1029,7 +1027,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1029 | */ | 1027 | */ |
1030 | _pmd = pmdp_collapse_flush(vma, address, pmd); | 1028 | _pmd = pmdp_collapse_flush(vma, address, pmd); |
1031 | spin_unlock(pmd_ptl); | 1029 | spin_unlock(pmd_ptl); |
1032 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 1030 | mmu_notifier_invalidate_range_end(&range); |
1033 | 1031 | ||
1034 | spin_lock(pte_ptl); | 1032 | spin_lock(pte_ptl); |
1035 | isolated = __collapse_huge_page_isolate(vma, address, pte); | 1033 | isolated = __collapse_huge_page_isolate(vma, address, pte); |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 877de4fa0720..f9d9dc250428 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1547,11 +1547,14 @@ static void kmemleak_scan(void) | |||
1547 | unsigned long pfn; | 1547 | unsigned long pfn; |
1548 | 1548 | ||
1549 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 1549 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
1550 | struct page *page; | 1550 | struct page *page = pfn_to_online_page(pfn); |
1551 | 1551 | ||
1552 | if (!pfn_valid(pfn)) | 1552 | if (!page) |
1553 | continue; | ||
1554 | |||
1555 | /* only scan pages belonging to this node */ | ||
1556 | if (page_to_nid(page) != i) | ||
1553 | continue; | 1557 | continue; |
1554 | page = pfn_to_page(pfn); | ||
1555 | /* only scan if page is in use */ | 1558 | /* only scan if page is in use */ |
1556 | if (page_count(page) == 0) | 1559 | if (page_count(page) == 0) |
1557 | continue; | 1560 | continue; |
@@ -1647,7 +1650,7 @@ static void kmemleak_scan(void) | |||
1647 | */ | 1650 | */ |
1648 | static int kmemleak_scan_thread(void *arg) | 1651 | static int kmemleak_scan_thread(void *arg) |
1649 | { | 1652 | { |
1650 | static int first_run = 1; | 1653 | static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN); |
1651 | 1654 | ||
1652 | pr_info("Automatic memory scanning thread started\n"); | 1655 | pr_info("Automatic memory scanning thread started\n"); |
1653 | set_user_nice(current, 10); | 1656 | set_user_nice(current, 10); |
@@ -2141,9 +2144,11 @@ static int __init kmemleak_late_init(void) | |||
2141 | return -ENOMEM; | 2144 | return -ENOMEM; |
2142 | } | 2145 | } |
2143 | 2146 | ||
2144 | mutex_lock(&scan_mutex); | 2147 | if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) { |
2145 | start_scan_thread(); | 2148 | mutex_lock(&scan_mutex); |
2146 | mutex_unlock(&scan_mutex); | 2149 | start_scan_thread(); |
2150 | mutex_unlock(&scan_mutex); | ||
2151 | } | ||
2147 | 2152 | ||
2148 | pr_info("Kernel memory leak detector initialized\n"); | 2153 | pr_info("Kernel memory leak detector initialized\n"); |
2149 | 2154 | ||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/pagemap.h> | 25 | #include <linux/pagemap.h> |
26 | #include <linux/rmap.h> | 26 | #include <linux/rmap.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/jhash.h> | 28 | #include <linux/xxhash.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/kthread.h> | 30 | #include <linux/kthread.h> |
31 | #include <linux/wait.h> | 31 | #include <linux/wait.h> |
@@ -296,6 +296,7 @@ static unsigned long ksm_run = KSM_RUN_STOP; | |||
296 | static void wait_while_offlining(void); | 296 | static void wait_while_offlining(void); |
297 | 297 | ||
298 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | 298 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
299 | static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); | ||
299 | static DEFINE_MUTEX(ksm_thread_mutex); | 300 | static DEFINE_MUTEX(ksm_thread_mutex); |
300 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | 301 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
301 | 302 | ||
@@ -1009,7 +1010,7 @@ static u32 calc_checksum(struct page *page) | |||
1009 | { | 1010 | { |
1010 | u32 checksum; | 1011 | u32 checksum; |
1011 | void *addr = kmap_atomic(page); | 1012 | void *addr = kmap_atomic(page); |
1012 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); | 1013 | checksum = xxhash(addr, PAGE_SIZE, 0); |
1013 | kunmap_atomic(addr); | 1014 | kunmap_atomic(addr); |
1014 | return checksum; | 1015 | return checksum; |
1015 | } | 1016 | } |
@@ -1042,8 +1043,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
1042 | }; | 1043 | }; |
1043 | int swapped; | 1044 | int swapped; |
1044 | int err = -EFAULT; | 1045 | int err = -EFAULT; |
1045 | unsigned long mmun_start; /* For mmu_notifiers */ | 1046 | struct mmu_notifier_range range; |
1046 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
1047 | 1047 | ||
1048 | pvmw.address = page_address_in_vma(page, vma); | 1048 | pvmw.address = page_address_in_vma(page, vma); |
1049 | if (pvmw.address == -EFAULT) | 1049 | if (pvmw.address == -EFAULT) |
@@ -1051,9 +1051,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
1051 | 1051 | ||
1052 | BUG_ON(PageTransCompound(page)); | 1052 | BUG_ON(PageTransCompound(page)); |
1053 | 1053 | ||
1054 | mmun_start = pvmw.address; | 1054 | mmu_notifier_range_init(&range, mm, pvmw.address, |
1055 | mmun_end = pvmw.address + PAGE_SIZE; | 1055 | pvmw.address + PAGE_SIZE); |
1056 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 1056 | mmu_notifier_invalidate_range_start(&range); |
1057 | 1057 | ||
1058 | if (!page_vma_mapped_walk(&pvmw)) | 1058 | if (!page_vma_mapped_walk(&pvmw)) |
1059 | goto out_mn; | 1059 | goto out_mn; |
@@ -1105,7 +1105,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
1105 | out_unlock: | 1105 | out_unlock: |
1106 | page_vma_mapped_walk_done(&pvmw); | 1106 | page_vma_mapped_walk_done(&pvmw); |
1107 | out_mn: | 1107 | out_mn: |
1108 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 1108 | mmu_notifier_invalidate_range_end(&range); |
1109 | out: | 1109 | out: |
1110 | return err; | 1110 | return err; |
1111 | } | 1111 | } |
@@ -1129,8 +1129,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
1129 | spinlock_t *ptl; | 1129 | spinlock_t *ptl; |
1130 | unsigned long addr; | 1130 | unsigned long addr; |
1131 | int err = -EFAULT; | 1131 | int err = -EFAULT; |
1132 | unsigned long mmun_start; /* For mmu_notifiers */ | 1132 | struct mmu_notifier_range range; |
1133 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
1134 | 1133 | ||
1135 | addr = page_address_in_vma(page, vma); | 1134 | addr = page_address_in_vma(page, vma); |
1136 | if (addr == -EFAULT) | 1135 | if (addr == -EFAULT) |
@@ -1140,9 +1139,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
1140 | if (!pmd) | 1139 | if (!pmd) |
1141 | goto out; | 1140 | goto out; |
1142 | 1141 | ||
1143 | mmun_start = addr; | 1142 | mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); |
1144 | mmun_end = addr + PAGE_SIZE; | 1143 | mmu_notifier_invalidate_range_start(&range); |
1145 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
1146 | 1144 | ||
1147 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); | 1145 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
1148 | if (!pte_same(*ptep, orig_pte)) { | 1146 | if (!pte_same(*ptep, orig_pte)) { |
@@ -1188,7 +1186,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
1188 | pte_unmap_unlock(ptep, ptl); | 1186 | pte_unmap_unlock(ptep, ptl); |
1189 | err = 0; | 1187 | err = 0; |
1190 | out_mn: | 1188 | out_mn: |
1191 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 1189 | mmu_notifier_invalidate_range_end(&range); |
1192 | out: | 1190 | out: |
1193 | return err; | 1191 | return err; |
1194 | } | 1192 | } |
@@ -2391,6 +2389,8 @@ static int ksmd_should_run(void) | |||
2391 | 2389 | ||
2392 | static int ksm_scan_thread(void *nothing) | 2390 | static int ksm_scan_thread(void *nothing) |
2393 | { | 2391 | { |
2392 | unsigned int sleep_ms; | ||
2393 | |||
2394 | set_freezable(); | 2394 | set_freezable(); |
2395 | set_user_nice(current, 5); | 2395 | set_user_nice(current, 5); |
2396 | 2396 | ||
@@ -2404,8 +2404,10 @@ static int ksm_scan_thread(void *nothing) | |||
2404 | try_to_freeze(); | 2404 | try_to_freeze(); |
2405 | 2405 | ||
2406 | if (ksmd_should_run()) { | 2406 | if (ksmd_should_run()) { |
2407 | schedule_timeout_interruptible( | 2407 | sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); |
2408 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); | 2408 | wait_event_interruptible_timeout(ksm_iter_wait, |
2409 | sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), | ||
2410 | msecs_to_jiffies(sleep_ms)); | ||
2409 | } else { | 2411 | } else { |
2410 | wait_event_freezable(ksm_thread_wait, | 2412 | wait_event_freezable(ksm_thread_wait, |
2411 | ksmd_should_run() || kthread_should_stop()); | 2413 | ksmd_should_run() || kthread_should_stop()); |
@@ -2824,6 +2826,7 @@ static ssize_t sleep_millisecs_store(struct kobject *kobj, | |||
2824 | return -EINVAL; | 2826 | return -EINVAL; |
2825 | 2827 | ||
2826 | ksm_thread_sleep_millisecs = msecs; | 2828 | ksm_thread_sleep_millisecs = msecs; |
2829 | wake_up_interruptible(&ksm_iter_wait); | ||
2827 | 2830 | ||
2828 | return count; | 2831 | return count; |
2829 | } | 2832 | } |
diff --git a/mm/madvise.c b/mm/madvise.c index 6cb1ca93e290..21a7881a2db4 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -458,29 +458,30 @@ static void madvise_free_page_range(struct mmu_gather *tlb, | |||
458 | static int madvise_free_single_vma(struct vm_area_struct *vma, | 458 | static int madvise_free_single_vma(struct vm_area_struct *vma, |
459 | unsigned long start_addr, unsigned long end_addr) | 459 | unsigned long start_addr, unsigned long end_addr) |
460 | { | 460 | { |
461 | unsigned long start, end; | ||
462 | struct mm_struct *mm = vma->vm_mm; | 461 | struct mm_struct *mm = vma->vm_mm; |
462 | struct mmu_notifier_range range; | ||
463 | struct mmu_gather tlb; | 463 | struct mmu_gather tlb; |
464 | 464 | ||
465 | /* MADV_FREE works for only anon vma at the moment */ | 465 | /* MADV_FREE works for only anon vma at the moment */ |
466 | if (!vma_is_anonymous(vma)) | 466 | if (!vma_is_anonymous(vma)) |
467 | return -EINVAL; | 467 | return -EINVAL; |
468 | 468 | ||
469 | start = max(vma->vm_start, start_addr); | 469 | range.start = max(vma->vm_start, start_addr); |
470 | if (start >= vma->vm_end) | 470 | if (range.start >= vma->vm_end) |
471 | return -EINVAL; | 471 | return -EINVAL; |
472 | end = min(vma->vm_end, end_addr); | 472 | range.end = min(vma->vm_end, end_addr); |
473 | if (end <= vma->vm_start) | 473 | if (range.end <= vma->vm_start) |
474 | return -EINVAL; | 474 | return -EINVAL; |
475 | mmu_notifier_range_init(&range, mm, range.start, range.end); | ||
475 | 476 | ||
476 | lru_add_drain(); | 477 | lru_add_drain(); |
477 | tlb_gather_mmu(&tlb, mm, start, end); | 478 | tlb_gather_mmu(&tlb, mm, range.start, range.end); |
478 | update_hiwater_rss(mm); | 479 | update_hiwater_rss(mm); |
479 | 480 | ||
480 | mmu_notifier_invalidate_range_start(mm, start, end); | 481 | mmu_notifier_invalidate_range_start(&range); |
481 | madvise_free_page_range(&tlb, vma, start, end); | 482 | madvise_free_page_range(&tlb, vma, range.start, range.end); |
482 | mmu_notifier_invalidate_range_end(mm, start, end); | 483 | mmu_notifier_invalidate_range_end(&range); |
483 | tlb_finish_mmu(&tlb, start, end); | 484 | tlb_finish_mmu(&tlb, range.start, range.end); |
484 | 485 | ||
485 | return 0; | 486 | return 0; |
486 | } | 487 | } |
diff --git a/mm/memblock.c b/mm/memblock.c index 81ae63ca78d0..022d4cbb3618 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -262,7 +262,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | |||
262 | phys_addr_t kernel_end, ret; | 262 | phys_addr_t kernel_end, ret; |
263 | 263 | ||
264 | /* pump up @end */ | 264 | /* pump up @end */ |
265 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 265 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE || |
266 | end == MEMBLOCK_ALLOC_KASAN) | ||
266 | end = memblock.current_limit; | 267 | end = memblock.current_limit; |
267 | 268 | ||
268 | /* avoid allocating the first page */ | 269 | /* avoid allocating the first page */ |
@@ -800,7 +801,14 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) | |||
800 | return memblock_remove_range(&memblock.memory, base, size); | 801 | return memblock_remove_range(&memblock.memory, base, size); |
801 | } | 802 | } |
802 | 803 | ||
803 | 804 | /** | |
805 | * memblock_free - free boot memory block | ||
806 | * @base: phys starting address of the boot memory block | ||
807 | * @size: size of the boot memory block in bytes | ||
808 | * | ||
809 | * Free boot memory block previously allocated by memblock_alloc_xx() API. | ||
810 | * The freeing memory will not be released to the buddy allocator. | ||
811 | */ | ||
804 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | 812 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
805 | { | 813 | { |
806 | phys_addr_t end = base + size - 1; | 814 | phys_addr_t end = base + size - 1; |
@@ -1412,13 +1420,15 @@ again: | |||
1412 | done: | 1420 | done: |
1413 | ptr = phys_to_virt(alloc); | 1421 | ptr = phys_to_virt(alloc); |
1414 | 1422 | ||
1415 | /* | 1423 | /* Skip kmemleak for kasan_init() due to high volume. */ |
1416 | * The min_count is set to 0 so that bootmem allocated blocks | 1424 | if (max_addr != MEMBLOCK_ALLOC_KASAN) |
1417 | * are never reported as leaks. This is because many of these blocks | 1425 | /* |
1418 | * are only referred via the physical address which is not | 1426 | * The min_count is set to 0 so that bootmem allocated |
1419 | * looked up by kmemleak. | 1427 | * blocks are never reported as leaks. This is because many |
1420 | */ | 1428 | * of these blocks are only referred via the physical |
1421 | kmemleak_alloc(ptr, size, 0, 0); | 1429 | * address which is not looked up by kmemleak. |
1430 | */ | ||
1431 | kmemleak_alloc(ptr, size, 0, 0); | ||
1422 | 1432 | ||
1423 | return ptr; | 1433 | return ptr; |
1424 | } | 1434 | } |
@@ -1537,24 +1547,6 @@ void * __init memblock_alloc_try_nid( | |||
1537 | } | 1547 | } |
1538 | 1548 | ||
1539 | /** | 1549 | /** |
1540 | * __memblock_free_early - free boot memory block | ||
1541 | * @base: phys starting address of the boot memory block | ||
1542 | * @size: size of the boot memory block in bytes | ||
1543 | * | ||
1544 | * Free boot memory block previously allocated by memblock_alloc_xx() API. | ||
1545 | * The freeing memory will not be released to the buddy allocator. | ||
1546 | */ | ||
1547 | void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) | ||
1548 | { | ||
1549 | phys_addr_t end = base + size - 1; | ||
1550 | |||
1551 | memblock_dbg("%s: [%pa-%pa] %pF\n", | ||
1552 | __func__, &base, &end, (void *)_RET_IP_); | ||
1553 | kmemleak_free_part_phys(base, size); | ||
1554 | memblock_remove_range(&memblock.reserved, base, size); | ||
1555 | } | ||
1556 | |||
1557 | /** | ||
1558 | * __memblock_free_late - free bootmem block pages directly to buddy allocator | 1550 | * __memblock_free_late - free bootmem block pages directly to buddy allocator |
1559 | * @base: phys starting address of the boot memory block | 1551 | * @base: phys starting address of the boot memory block |
1560 | * @size: size of the boot memory block in bytes | 1552 | * @size: size of the boot memory block in bytes |
@@ -1576,7 +1568,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) | |||
1576 | 1568 | ||
1577 | for (; cursor < end; cursor++) { | 1569 | for (; cursor < end; cursor++) { |
1578 | memblock_free_pages(pfn_to_page(cursor), cursor, 0); | 1570 | memblock_free_pages(pfn_to_page(cursor), cursor, 0); |
1579 | totalram_pages++; | 1571 | totalram_pages_inc(); |
1580 | } | 1572 | } |
1581 | } | 1573 | } |
1582 | 1574 | ||
@@ -1950,7 +1942,7 @@ void reset_node_managed_pages(pg_data_t *pgdat) | |||
1950 | struct zone *z; | 1942 | struct zone *z; |
1951 | 1943 | ||
1952 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | 1944 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
1953 | z->managed_pages = 0; | 1945 | atomic_long_set(&z->managed_pages, 0); |
1954 | } | 1946 | } |
1955 | 1947 | ||
1956 | void __init reset_all_zones_managed_pages(void) | 1948 | void __init reset_all_zones_managed_pages(void) |
@@ -1978,7 +1970,7 @@ unsigned long __init memblock_free_all(void) | |||
1978 | reset_all_zones_managed_pages(); | 1970 | reset_all_zones_managed_pages(); |
1979 | 1971 | ||
1980 | pages = free_low_memory_core_early(); | 1972 | pages = free_low_memory_core_early(); |
1981 | totalram_pages += pages; | 1973 | totalram_pages_add(pages); |
1982 | 1974 | ||
1983 | return pages; | 1975 | return pages; |
1984 | } | 1976 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6e1469b80cb7..af7f18b32389 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1293,32 +1293,39 @@ static const char *const memcg1_stat_names[] = { | |||
1293 | 1293 | ||
1294 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 1294 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
1295 | /** | 1295 | /** |
1296 | * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. | 1296 | * mem_cgroup_print_oom_context: Print OOM information relevant to |
1297 | * memory controller. | ||
1297 | * @memcg: The memory cgroup that went over limit | 1298 | * @memcg: The memory cgroup that went over limit |
1298 | * @p: Task that is going to be killed | 1299 | * @p: Task that is going to be killed |
1299 | * | 1300 | * |
1300 | * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is | 1301 | * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is |
1301 | * enabled | 1302 | * enabled |
1302 | */ | 1303 | */ |
1303 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 1304 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1304 | { | 1305 | { |
1305 | struct mem_cgroup *iter; | ||
1306 | unsigned int i; | ||
1307 | |||
1308 | rcu_read_lock(); | 1306 | rcu_read_lock(); |
1309 | 1307 | ||
1308 | if (memcg) { | ||
1309 | pr_cont(",oom_memcg="); | ||
1310 | pr_cont_cgroup_path(memcg->css.cgroup); | ||
1311 | } else | ||
1312 | pr_cont(",global_oom"); | ||
1310 | if (p) { | 1313 | if (p) { |
1311 | pr_info("Task in "); | 1314 | pr_cont(",task_memcg="); |
1312 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); | 1315 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); |
1313 | pr_cont(" killed as a result of limit of "); | ||
1314 | } else { | ||
1315 | pr_info("Memory limit reached of cgroup "); | ||
1316 | } | 1316 | } |
1317 | |||
1318 | pr_cont_cgroup_path(memcg->css.cgroup); | ||
1319 | pr_cont("\n"); | ||
1320 | |||
1321 | rcu_read_unlock(); | 1317 | rcu_read_unlock(); |
1318 | } | ||
1319 | |||
1320 | /** | ||
1321 | * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to | ||
1322 | * memory controller. | ||
1323 | * @memcg: The memory cgroup that went over limit | ||
1324 | */ | ||
1325 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | ||
1326 | { | ||
1327 | struct mem_cgroup *iter; | ||
1328 | unsigned int i; | ||
1322 | 1329 | ||
1323 | pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", | 1330 | pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", |
1324 | K((u64)page_counter_read(&memcg->memory)), | 1331 | K((u64)page_counter_read(&memcg->memory)), |
@@ -1666,6 +1673,9 @@ enum oom_status { | |||
1666 | 1673 | ||
1667 | static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) | 1674 | static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) |
1668 | { | 1675 | { |
1676 | enum oom_status ret; | ||
1677 | bool locked; | ||
1678 | |||
1669 | if (order > PAGE_ALLOC_COSTLY_ORDER) | 1679 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
1670 | return OOM_SKIPPED; | 1680 | return OOM_SKIPPED; |
1671 | 1681 | ||
@@ -1700,10 +1710,23 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int | |||
1700 | return OOM_ASYNC; | 1710 | return OOM_ASYNC; |
1701 | } | 1711 | } |
1702 | 1712 | ||
1713 | mem_cgroup_mark_under_oom(memcg); | ||
1714 | |||
1715 | locked = mem_cgroup_oom_trylock(memcg); | ||
1716 | |||
1717 | if (locked) | ||
1718 | mem_cgroup_oom_notify(memcg); | ||
1719 | |||
1720 | mem_cgroup_unmark_under_oom(memcg); | ||
1703 | if (mem_cgroup_out_of_memory(memcg, mask, order)) | 1721 | if (mem_cgroup_out_of_memory(memcg, mask, order)) |
1704 | return OOM_SUCCESS; | 1722 | ret = OOM_SUCCESS; |
1723 | else | ||
1724 | ret = OOM_FAILED; | ||
1705 | 1725 | ||
1706 | return OOM_FAILED; | 1726 | if (locked) |
1727 | mem_cgroup_oom_unlock(memcg); | ||
1728 | |||
1729 | return ret; | ||
1707 | } | 1730 | } |
1708 | 1731 | ||
1709 | /** | 1732 | /** |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7c72f2a95785..6379fff1a5ff 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; | 966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; |
967 | struct address_space *mapping; | 967 | struct address_space *mapping; |
968 | LIST_HEAD(tokill); | 968 | LIST_HEAD(tokill); |
969 | bool unmap_success; | 969 | bool unmap_success = true; |
970 | int kill = 1, forcekill; | 970 | int kill = 1, forcekill; |
971 | struct page *hpage = *hpagep; | 971 | struct page *hpage = *hpagep; |
972 | bool mlocked = PageMlocked(hpage); | 972 | bool mlocked = PageMlocked(hpage); |
@@ -1028,7 +1028,19 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
1028 | if (kill) | 1028 | if (kill) |
1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); | 1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); |
1030 | 1030 | ||
1031 | unmap_success = try_to_unmap(hpage, ttu); | 1031 | if (!PageHuge(hpage)) { |
1032 | unmap_success = try_to_unmap(hpage, ttu); | ||
1033 | } else if (mapping) { | ||
1034 | /* | ||
1035 | * For hugetlb pages, try_to_unmap could potentially call | ||
1036 | * huge_pmd_unshare. Because of this, take semaphore in | ||
1037 | * write mode here and set TTU_RMAP_LOCKED to indicate we | ||
1038 | * have taken the lock at this higer level. | ||
1039 | */ | ||
1040 | i_mmap_lock_write(mapping); | ||
1041 | unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); | ||
1042 | i_mmap_unlock_write(mapping); | ||
1043 | } | ||
1032 | if (!unmap_success) | 1044 | if (!unmap_success) |
1033 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", | 1045 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", |
1034 | pfn, page_mapcount(hpage)); | 1046 | pfn, page_mapcount(hpage)); |
diff --git a/mm/memory.c b/mm/memory.c index 4ad2d293ddc2..2dd2f9ab57f4 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -973,8 +973,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
973 | unsigned long next; | 973 | unsigned long next; |
974 | unsigned long addr = vma->vm_start; | 974 | unsigned long addr = vma->vm_start; |
975 | unsigned long end = vma->vm_end; | 975 | unsigned long end = vma->vm_end; |
976 | unsigned long mmun_start; /* For mmu_notifiers */ | 976 | struct mmu_notifier_range range; |
977 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
978 | bool is_cow; | 977 | bool is_cow; |
979 | int ret; | 978 | int ret; |
980 | 979 | ||
@@ -1008,11 +1007,11 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
1008 | * is_cow_mapping() returns true. | 1007 | * is_cow_mapping() returns true. |
1009 | */ | 1008 | */ |
1010 | is_cow = is_cow_mapping(vma->vm_flags); | 1009 | is_cow = is_cow_mapping(vma->vm_flags); |
1011 | mmun_start = addr; | 1010 | |
1012 | mmun_end = end; | 1011 | if (is_cow) { |
1013 | if (is_cow) | 1012 | mmu_notifier_range_init(&range, src_mm, addr, end); |
1014 | mmu_notifier_invalidate_range_start(src_mm, mmun_start, | 1013 | mmu_notifier_invalidate_range_start(&range); |
1015 | mmun_end); | 1014 | } |
1016 | 1015 | ||
1017 | ret = 0; | 1016 | ret = 0; |
1018 | dst_pgd = pgd_offset(dst_mm, addr); | 1017 | dst_pgd = pgd_offset(dst_mm, addr); |
@@ -1029,7 +1028,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
1029 | } while (dst_pgd++, src_pgd++, addr = next, addr != end); | 1028 | } while (dst_pgd++, src_pgd++, addr = next, addr != end); |
1030 | 1029 | ||
1031 | if (is_cow) | 1030 | if (is_cow) |
1032 | mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); | 1031 | mmu_notifier_invalidate_range_end(&range); |
1033 | return ret; | 1032 | return ret; |
1034 | } | 1033 | } |
1035 | 1034 | ||
@@ -1332,12 +1331,13 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1332 | struct vm_area_struct *vma, unsigned long start_addr, | 1331 | struct vm_area_struct *vma, unsigned long start_addr, |
1333 | unsigned long end_addr) | 1332 | unsigned long end_addr) |
1334 | { | 1333 | { |
1335 | struct mm_struct *mm = vma->vm_mm; | 1334 | struct mmu_notifier_range range; |
1336 | 1335 | ||
1337 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); | 1336 | mmu_notifier_range_init(&range, vma->vm_mm, start_addr, end_addr); |
1337 | mmu_notifier_invalidate_range_start(&range); | ||
1338 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) | 1338 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) |
1339 | unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); | 1339 | unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); |
1340 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); | 1340 | mmu_notifier_invalidate_range_end(&range); |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | /** | 1343 | /** |
@@ -1351,18 +1351,18 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1351 | void zap_page_range(struct vm_area_struct *vma, unsigned long start, | 1351 | void zap_page_range(struct vm_area_struct *vma, unsigned long start, |
1352 | unsigned long size) | 1352 | unsigned long size) |
1353 | { | 1353 | { |
1354 | struct mm_struct *mm = vma->vm_mm; | 1354 | struct mmu_notifier_range range; |
1355 | struct mmu_gather tlb; | 1355 | struct mmu_gather tlb; |
1356 | unsigned long end = start + size; | ||
1357 | 1356 | ||
1358 | lru_add_drain(); | 1357 | lru_add_drain(); |
1359 | tlb_gather_mmu(&tlb, mm, start, end); | 1358 | mmu_notifier_range_init(&range, vma->vm_mm, start, start + size); |
1360 | update_hiwater_rss(mm); | 1359 | tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); |
1361 | mmu_notifier_invalidate_range_start(mm, start, end); | 1360 | update_hiwater_rss(vma->vm_mm); |
1362 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1361 | mmu_notifier_invalidate_range_start(&range); |
1363 | unmap_single_vma(&tlb, vma, start, end, NULL); | 1362 | for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) |
1364 | mmu_notifier_invalidate_range_end(mm, start, end); | 1363 | unmap_single_vma(&tlb, vma, start, range.end, NULL); |
1365 | tlb_finish_mmu(&tlb, start, end); | 1364 | mmu_notifier_invalidate_range_end(&range); |
1365 | tlb_finish_mmu(&tlb, start, range.end); | ||
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | /** | 1368 | /** |
@@ -1377,17 +1377,17 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
1377 | static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, | 1377 | static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, |
1378 | unsigned long size, struct zap_details *details) | 1378 | unsigned long size, struct zap_details *details) |
1379 | { | 1379 | { |
1380 | struct mm_struct *mm = vma->vm_mm; | 1380 | struct mmu_notifier_range range; |
1381 | struct mmu_gather tlb; | 1381 | struct mmu_gather tlb; |
1382 | unsigned long end = address + size; | ||
1383 | 1382 | ||
1384 | lru_add_drain(); | 1383 | lru_add_drain(); |
1385 | tlb_gather_mmu(&tlb, mm, address, end); | 1384 | mmu_notifier_range_init(&range, vma->vm_mm, address, address + size); |
1386 | update_hiwater_rss(mm); | 1385 | tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); |
1387 | mmu_notifier_invalidate_range_start(mm, address, end); | 1386 | update_hiwater_rss(vma->vm_mm); |
1388 | unmap_single_vma(&tlb, vma, address, end, details); | 1387 | mmu_notifier_invalidate_range_start(&range); |
1389 | mmu_notifier_invalidate_range_end(mm, address, end); | 1388 | unmap_single_vma(&tlb, vma, address, range.end, details); |
1390 | tlb_finish_mmu(&tlb, address, end); | 1389 | mmu_notifier_invalidate_range_end(&range); |
1390 | tlb_finish_mmu(&tlb, address, range.end); | ||
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | /** | 1393 | /** |
@@ -2247,9 +2247,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) | |||
2247 | struct page *new_page = NULL; | 2247 | struct page *new_page = NULL; |
2248 | pte_t entry; | 2248 | pte_t entry; |
2249 | int page_copied = 0; | 2249 | int page_copied = 0; |
2250 | const unsigned long mmun_start = vmf->address & PAGE_MASK; | ||
2251 | const unsigned long mmun_end = mmun_start + PAGE_SIZE; | ||
2252 | struct mem_cgroup *memcg; | 2250 | struct mem_cgroup *memcg; |
2251 | struct mmu_notifier_range range; | ||
2253 | 2252 | ||
2254 | if (unlikely(anon_vma_prepare(vma))) | 2253 | if (unlikely(anon_vma_prepare(vma))) |
2255 | goto oom; | 2254 | goto oom; |
@@ -2272,7 +2271,9 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) | |||
2272 | 2271 | ||
2273 | __SetPageUptodate(new_page); | 2272 | __SetPageUptodate(new_page); |
2274 | 2273 | ||
2275 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 2274 | mmu_notifier_range_init(&range, mm, vmf->address & PAGE_MASK, |
2275 | (vmf->address & PAGE_MASK) + PAGE_SIZE); | ||
2276 | mmu_notifier_invalidate_range_start(&range); | ||
2276 | 2277 | ||
2277 | /* | 2278 | /* |
2278 | * Re-check the pte - we dropped the lock | 2279 | * Re-check the pte - we dropped the lock |
@@ -2349,7 +2350,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) | |||
2349 | * No need to double call mmu_notifier->invalidate_range() callback as | 2350 | * No need to double call mmu_notifier->invalidate_range() callback as |
2350 | * the above ptep_clear_flush_notify() did already call it. | 2351 | * the above ptep_clear_flush_notify() did already call it. |
2351 | */ | 2352 | */ |
2352 | mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end); | 2353 | mmu_notifier_invalidate_range_only_end(&range); |
2353 | if (old_page) { | 2354 | if (old_page) { |
2354 | /* | 2355 | /* |
2355 | * Don't let another task, with possibly unlocked vma, | 2356 | * Don't let another task, with possibly unlocked vma, |
@@ -3830,7 +3831,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, | |||
3830 | vmf.pud = pud_alloc(mm, p4d, address); | 3831 | vmf.pud = pud_alloc(mm, p4d, address); |
3831 | if (!vmf.pud) | 3832 | if (!vmf.pud) |
3832 | return VM_FAULT_OOM; | 3833 | return VM_FAULT_OOM; |
3833 | if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { | 3834 | if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { |
3834 | ret = create_huge_pud(&vmf); | 3835 | ret = create_huge_pud(&vmf); |
3835 | if (!(ret & VM_FAULT_FALLBACK)) | 3836 | if (!(ret & VM_FAULT_FALLBACK)) |
3836 | return ret; | 3837 | return ret; |
@@ -3856,7 +3857,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, | |||
3856 | vmf.pmd = pmd_alloc(mm, vmf.pud, address); | 3857 | vmf.pmd = pmd_alloc(mm, vmf.pud, address); |
3857 | if (!vmf.pmd) | 3858 | if (!vmf.pmd) |
3858 | return VM_FAULT_OOM; | 3859 | return VM_FAULT_OOM; |
3859 | if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { | 3860 | if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { |
3860 | ret = create_huge_pmd(&vmf); | 3861 | ret = create_huge_pmd(&vmf); |
3861 | if (!(ret & VM_FAULT_FALLBACK)) | 3862 | if (!(ret & VM_FAULT_FALLBACK)) |
3862 | return ret; | 3863 | return ret; |
@@ -4030,7 +4031,7 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |||
4030 | #endif /* __PAGETABLE_PMD_FOLDED */ | 4031 | #endif /* __PAGETABLE_PMD_FOLDED */ |
4031 | 4032 | ||
4032 | static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | 4033 | static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, |
4033 | unsigned long *start, unsigned long *end, | 4034 | struct mmu_notifier_range *range, |
4034 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) | 4035 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) |
4035 | { | 4036 | { |
4036 | pgd_t *pgd; | 4037 | pgd_t *pgd; |
@@ -4058,10 +4059,10 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | |||
4058 | if (!pmdpp) | 4059 | if (!pmdpp) |
4059 | goto out; | 4060 | goto out; |
4060 | 4061 | ||
4061 | if (start && end) { | 4062 | if (range) { |
4062 | *start = address & PMD_MASK; | 4063 | mmu_notifier_range_init(range, mm, address & PMD_MASK, |
4063 | *end = *start + PMD_SIZE; | 4064 | (address & PMD_MASK) + PMD_SIZE); |
4064 | mmu_notifier_invalidate_range_start(mm, *start, *end); | 4065 | mmu_notifier_invalidate_range_start(range); |
4065 | } | 4066 | } |
4066 | *ptlp = pmd_lock(mm, pmd); | 4067 | *ptlp = pmd_lock(mm, pmd); |
4067 | if (pmd_huge(*pmd)) { | 4068 | if (pmd_huge(*pmd)) { |
@@ -4069,17 +4070,17 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | |||
4069 | return 0; | 4070 | return 0; |
4070 | } | 4071 | } |
4071 | spin_unlock(*ptlp); | 4072 | spin_unlock(*ptlp); |
4072 | if (start && end) | 4073 | if (range) |
4073 | mmu_notifier_invalidate_range_end(mm, *start, *end); | 4074 | mmu_notifier_invalidate_range_end(range); |
4074 | } | 4075 | } |
4075 | 4076 | ||
4076 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | 4077 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
4077 | goto out; | 4078 | goto out; |
4078 | 4079 | ||
4079 | if (start && end) { | 4080 | if (range) { |
4080 | *start = address & PAGE_MASK; | 4081 | range->start = address & PAGE_MASK; |
4081 | *end = *start + PAGE_SIZE; | 4082 | range->end = range->start + PAGE_SIZE; |
4082 | mmu_notifier_invalidate_range_start(mm, *start, *end); | 4083 | mmu_notifier_invalidate_range_start(range); |
4083 | } | 4084 | } |
4084 | ptep = pte_offset_map_lock(mm, pmd, address, ptlp); | 4085 | ptep = pte_offset_map_lock(mm, pmd, address, ptlp); |
4085 | if (!pte_present(*ptep)) | 4086 | if (!pte_present(*ptep)) |
@@ -4088,8 +4089,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | |||
4088 | return 0; | 4089 | return 0; |
4089 | unlock: | 4090 | unlock: |
4090 | pte_unmap_unlock(ptep, *ptlp); | 4091 | pte_unmap_unlock(ptep, *ptlp); |
4091 | if (start && end) | 4092 | if (range) |
4092 | mmu_notifier_invalidate_range_end(mm, *start, *end); | 4093 | mmu_notifier_invalidate_range_end(range); |
4093 | out: | 4094 | out: |
4094 | return -EINVAL; | 4095 | return -EINVAL; |
4095 | } | 4096 | } |
@@ -4101,20 +4102,20 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address, | |||
4101 | 4102 | ||
4102 | /* (void) is needed to make gcc happy */ | 4103 | /* (void) is needed to make gcc happy */ |
4103 | (void) __cond_lock(*ptlp, | 4104 | (void) __cond_lock(*ptlp, |
4104 | !(res = __follow_pte_pmd(mm, address, NULL, NULL, | 4105 | !(res = __follow_pte_pmd(mm, address, NULL, |
4105 | ptepp, NULL, ptlp))); | 4106 | ptepp, NULL, ptlp))); |
4106 | return res; | 4107 | return res; |
4107 | } | 4108 | } |
4108 | 4109 | ||
4109 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, | 4110 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, |
4110 | unsigned long *start, unsigned long *end, | 4111 | struct mmu_notifier_range *range, |
4111 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) | 4112 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) |
4112 | { | 4113 | { |
4113 | int res; | 4114 | int res; |
4114 | 4115 | ||
4115 | /* (void) is needed to make gcc happy */ | 4116 | /* (void) is needed to make gcc happy */ |
4116 | (void) __cond_lock(*ptlp, | 4117 | (void) __cond_lock(*ptlp, |
4117 | !(res = __follow_pte_pmd(mm, address, start, end, | 4118 | !(res = __follow_pte_pmd(mm, address, range, |
4118 | ptepp, pmdpp, ptlp))); | 4119 | ptepp, pmdpp, ptlp))); |
4119 | return res; | 4120 | return res; |
4120 | } | 4121 | } |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2b2b3ccbbfb5..b9a667d36c55 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/hugetlb.h> | 34 | #include <linux/hugetlb.h> |
35 | #include <linux/memblock.h> | 35 | #include <linux/memblock.h> |
36 | #include <linux/compaction.h> | 36 | #include <linux/compaction.h> |
37 | #include <linux/rmap.h> | ||
37 | 38 | ||
38 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
39 | 40 | ||
@@ -253,7 +254,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, | |||
253 | if (pfn_valid(phys_start_pfn)) | 254 | if (pfn_valid(phys_start_pfn)) |
254 | return -EEXIST; | 255 | return -EEXIST; |
255 | 256 | ||
256 | ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap); | 257 | ret = sparse_add_one_section(nid, phys_start_pfn, altmap); |
257 | if (ret < 0) | 258 | if (ret < 0) |
258 | return ret; | 259 | return ret; |
259 | 260 | ||
@@ -743,14 +744,13 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, | |||
743 | int nid = pgdat->node_id; | 744 | int nid = pgdat->node_id; |
744 | unsigned long flags; | 745 | unsigned long flags; |
745 | 746 | ||
746 | if (zone_is_empty(zone)) | ||
747 | init_currently_empty_zone(zone, start_pfn, nr_pages); | ||
748 | |||
749 | clear_zone_contiguous(zone); | 747 | clear_zone_contiguous(zone); |
750 | 748 | ||
751 | /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ | 749 | /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ |
752 | pgdat_resize_lock(pgdat, &flags); | 750 | pgdat_resize_lock(pgdat, &flags); |
753 | zone_span_writelock(zone); | 751 | zone_span_writelock(zone); |
752 | if (zone_is_empty(zone)) | ||
753 | init_currently_empty_zone(zone, start_pfn, nr_pages); | ||
754 | resize_zone_range(zone, start_pfn, nr_pages); | 754 | resize_zone_range(zone, start_pfn, nr_pages); |
755 | zone_span_writeunlock(zone); | 755 | zone_span_writeunlock(zone); |
756 | resize_pgdat_range(pgdat, start_pfn, nr_pages); | 756 | resize_pgdat_range(pgdat, start_pfn, nr_pages); |
@@ -1078,7 +1078,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) | |||
1078 | * | 1078 | * |
1079 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | 1079 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG |
1080 | */ | 1080 | */ |
1081 | int __ref add_memory_resource(int nid, struct resource *res, bool online) | 1081 | int __ref add_memory_resource(int nid, struct resource *res) |
1082 | { | 1082 | { |
1083 | u64 start, size; | 1083 | u64 start, size; |
1084 | bool new_node = false; | 1084 | bool new_node = false; |
@@ -1133,7 +1133,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) | |||
1133 | mem_hotplug_done(); | 1133 | mem_hotplug_done(); |
1134 | 1134 | ||
1135 | /* online pages if requested */ | 1135 | /* online pages if requested */ |
1136 | if (online) | 1136 | if (memhp_auto_online) |
1137 | walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), | 1137 | walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), |
1138 | NULL, online_memory_block); | 1138 | NULL, online_memory_block); |
1139 | 1139 | ||
@@ -1157,7 +1157,7 @@ int __ref __add_memory(int nid, u64 start, u64 size) | |||
1157 | if (IS_ERR(res)) | 1157 | if (IS_ERR(res)) |
1158 | return PTR_ERR(res); | 1158 | return PTR_ERR(res); |
1159 | 1159 | ||
1160 | ret = add_memory_resource(nid, res, memhp_auto_online); | 1160 | ret = add_memory_resource(nid, res); |
1161 | if (ret < 0) | 1161 | if (ret < 0) |
1162 | release_memory_resource(res); | 1162 | release_memory_resource(res); |
1163 | return ret; | 1163 | return ret; |
@@ -1226,7 +1226,7 @@ static bool is_pageblock_removable_nolock(struct page *page) | |||
1226 | if (!zone_spans_pfn(zone, pfn)) | 1226 | if (!zone_spans_pfn(zone, pfn)) |
1227 | return false; | 1227 | return false; |
1228 | 1228 | ||
1229 | return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true); | 1229 | return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | /* Checks if this range of memory is likely to be hot-removable. */ | 1232 | /* Checks if this range of memory is likely to be hot-removable. */ |
@@ -1339,18 +1339,16 @@ static struct page *new_node_page(struct page *page, unsigned long private) | |||
1339 | return new_page_nodemask(page, nid, &nmask); | 1339 | return new_page_nodemask(page, nid, &nmask); |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | ||
1343 | static int | 1342 | static int |
1344 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | 1343 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
1345 | { | 1344 | { |
1346 | unsigned long pfn; | 1345 | unsigned long pfn; |
1347 | struct page *page; | 1346 | struct page *page; |
1348 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | ||
1349 | int not_managed = 0; | 1347 | int not_managed = 0; |
1350 | int ret = 0; | 1348 | int ret = 0; |
1351 | LIST_HEAD(source); | 1349 | LIST_HEAD(source); |
1352 | 1350 | ||
1353 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | 1351 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
1354 | if (!pfn_valid(pfn)) | 1352 | if (!pfn_valid(pfn)) |
1355 | continue; | 1353 | continue; |
1356 | page = pfn_to_page(pfn); | 1354 | page = pfn_to_page(pfn); |
@@ -1362,13 +1360,27 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1362 | ret = -EBUSY; | 1360 | ret = -EBUSY; |
1363 | break; | 1361 | break; |
1364 | } | 1362 | } |
1365 | if (isolate_huge_page(page, &source)) | 1363 | isolate_huge_page(page, &source); |
1366 | move_pages -= 1 << compound_order(head); | ||
1367 | continue; | 1364 | continue; |
1368 | } else if (PageTransHuge(page)) | 1365 | } else if (PageTransHuge(page)) |
1369 | pfn = page_to_pfn(compound_head(page)) | 1366 | pfn = page_to_pfn(compound_head(page)) |
1370 | + hpage_nr_pages(page) - 1; | 1367 | + hpage_nr_pages(page) - 1; |
1371 | 1368 | ||
1369 | /* | ||
1370 | * HWPoison pages have elevated reference counts so the migration would | ||
1371 | * fail on them. It also doesn't make any sense to migrate them in the | ||
1372 | * first place. Still try to unmap such a page in case it is still mapped | ||
1373 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | ||
1374 | * the unmap as the catch all safety net). | ||
1375 | */ | ||
1376 | if (PageHWPoison(page)) { | ||
1377 | if (WARN_ON(PageLRU(page))) | ||
1378 | isolate_lru_page(page); | ||
1379 | if (page_mapped(page)) | ||
1380 | try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); | ||
1381 | continue; | ||
1382 | } | ||
1383 | |||
1372 | if (!get_page_unless_zero(page)) | 1384 | if (!get_page_unless_zero(page)) |
1373 | continue; | 1385 | continue; |
1374 | /* | 1386 | /* |
@@ -1382,16 +1394,13 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1382 | if (!ret) { /* Success */ | 1394 | if (!ret) { /* Success */ |
1383 | put_page(page); | 1395 | put_page(page); |
1384 | list_add_tail(&page->lru, &source); | 1396 | list_add_tail(&page->lru, &source); |
1385 | move_pages--; | ||
1386 | if (!__PageMovable(page)) | 1397 | if (!__PageMovable(page)) |
1387 | inc_node_page_state(page, NR_ISOLATED_ANON + | 1398 | inc_node_page_state(page, NR_ISOLATED_ANON + |
1388 | page_is_file_cache(page)); | 1399 | page_is_file_cache(page)); |
1389 | 1400 | ||
1390 | } else { | 1401 | } else { |
1391 | #ifdef CONFIG_DEBUG_VM | 1402 | pr_warn("failed to isolate pfn %lx\n", pfn); |
1392 | pr_alert("failed to isolate pfn %lx\n", pfn); | ||
1393 | dump_page(page, "isolation failed"); | 1403 | dump_page(page, "isolation failed"); |
1394 | #endif | ||
1395 | put_page(page); | 1404 | put_page(page); |
1396 | /* Because we don't have big zone->lock. we should | 1405 | /* Because we don't have big zone->lock. we should |
1397 | check this again here. */ | 1406 | check this again here. */ |
@@ -1411,8 +1420,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1411 | /* Allocate a new page from the nearest neighbor node */ | 1420 | /* Allocate a new page from the nearest neighbor node */ |
1412 | ret = migrate_pages(&source, new_node_page, NULL, 0, | 1421 | ret = migrate_pages(&source, new_node_page, NULL, 0, |
1413 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | 1422 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); |
1414 | if (ret) | 1423 | if (ret) { |
1424 | list_for_each_entry(page, &source, lru) { | ||
1425 | pr_warn("migrating pfn %lx failed ret:%d ", | ||
1426 | page_to_pfn(page), ret); | ||
1427 | dump_page(page, "migration failure"); | ||
1428 | } | ||
1415 | putback_movable_pages(&source); | 1429 | putback_movable_pages(&source); |
1430 | } | ||
1416 | } | 1431 | } |
1417 | out: | 1432 | out: |
1418 | return ret; | 1433 | return ret; |
@@ -1553,12 +1568,7 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1553 | unsigned long valid_start, valid_end; | 1568 | unsigned long valid_start, valid_end; |
1554 | struct zone *zone; | 1569 | struct zone *zone; |
1555 | struct memory_notify arg; | 1570 | struct memory_notify arg; |
1556 | 1571 | char *reason; | |
1557 | /* at least, alignment against pageblock is necessary */ | ||
1558 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | ||
1559 | return -EINVAL; | ||
1560 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | ||
1561 | return -EINVAL; | ||
1562 | 1572 | ||
1563 | mem_hotplug_begin(); | 1573 | mem_hotplug_begin(); |
1564 | 1574 | ||
@@ -1567,7 +1577,9 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1567 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, | 1577 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, |
1568 | &valid_end)) { | 1578 | &valid_end)) { |
1569 | mem_hotplug_done(); | 1579 | mem_hotplug_done(); |
1570 | return -EINVAL; | 1580 | ret = -EINVAL; |
1581 | reason = "multizone range"; | ||
1582 | goto failed_removal; | ||
1571 | } | 1583 | } |
1572 | 1584 | ||
1573 | zone = page_zone(pfn_to_page(valid_start)); | 1585 | zone = page_zone(pfn_to_page(valid_start)); |
@@ -1576,10 +1588,12 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1576 | 1588 | ||
1577 | /* set above range as isolated */ | 1589 | /* set above range as isolated */ |
1578 | ret = start_isolate_page_range(start_pfn, end_pfn, | 1590 | ret = start_isolate_page_range(start_pfn, end_pfn, |
1579 | MIGRATE_MOVABLE, true); | 1591 | MIGRATE_MOVABLE, |
1592 | SKIP_HWPOISON | REPORT_FAILURE); | ||
1580 | if (ret) { | 1593 | if (ret) { |
1581 | mem_hotplug_done(); | 1594 | mem_hotplug_done(); |
1582 | return ret; | 1595 | reason = "failure to isolate range"; |
1596 | goto failed_removal; | ||
1583 | } | 1597 | } |
1584 | 1598 | ||
1585 | arg.start_pfn = start_pfn; | 1599 | arg.start_pfn = start_pfn; |
@@ -1588,37 +1602,47 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1588 | 1602 | ||
1589 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | 1603 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); |
1590 | ret = notifier_to_errno(ret); | 1604 | ret = notifier_to_errno(ret); |
1591 | if (ret) | 1605 | if (ret) { |
1592 | goto failed_removal; | 1606 | reason = "notifier failure"; |
1607 | goto failed_removal_isolated; | ||
1608 | } | ||
1593 | 1609 | ||
1594 | pfn = start_pfn; | 1610 | do { |
1595 | repeat: | 1611 | for (pfn = start_pfn; pfn;) { |
1596 | /* start memory hot removal */ | 1612 | if (signal_pending(current)) { |
1597 | ret = -EINTR; | 1613 | ret = -EINTR; |
1598 | if (signal_pending(current)) | 1614 | reason = "signal backoff"; |
1599 | goto failed_removal; | 1615 | goto failed_removal_isolated; |
1616 | } | ||
1600 | 1617 | ||
1601 | cond_resched(); | 1618 | cond_resched(); |
1602 | lru_add_drain_all(); | 1619 | lru_add_drain_all(); |
1603 | drain_all_pages(zone); | 1620 | drain_all_pages(zone); |
1621 | |||
1622 | pfn = scan_movable_pages(pfn, end_pfn); | ||
1623 | if (pfn) { | ||
1624 | /* | ||
1625 | * TODO: fatal migration failures should bail | ||
1626 | * out | ||
1627 | */ | ||
1628 | do_migrate_range(pfn, end_pfn); | ||
1629 | } | ||
1630 | } | ||
1604 | 1631 | ||
1605 | pfn = scan_movable_pages(start_pfn, end_pfn); | 1632 | /* |
1606 | if (pfn) { /* We have movable pages */ | 1633 | * Dissolve free hugepages in the memory block before doing |
1607 | ret = do_migrate_range(pfn, end_pfn); | 1634 | * offlining actually in order to make hugetlbfs's object |
1608 | goto repeat; | 1635 | * counting consistent. |
1609 | } | 1636 | */ |
1637 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | ||
1638 | if (ret) { | ||
1639 | reason = "failure to dissolve huge pages"; | ||
1640 | goto failed_removal_isolated; | ||
1641 | } | ||
1642 | /* check again */ | ||
1643 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | ||
1644 | } while (offlined_pages < 0); | ||
1610 | 1645 | ||
1611 | /* | ||
1612 | * dissolve free hugepages in the memory block before doing offlining | ||
1613 | * actually in order to make hugetlbfs's object counting consistent. | ||
1614 | */ | ||
1615 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | ||
1616 | if (ret) | ||
1617 | goto failed_removal; | ||
1618 | /* check again */ | ||
1619 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | ||
1620 | if (offlined_pages < 0) | ||
1621 | goto repeat; | ||
1622 | pr_info("Offlined Pages %ld\n", offlined_pages); | 1646 | pr_info("Offlined Pages %ld\n", offlined_pages); |
1623 | /* Ok, all of our target is isolated. | 1647 | /* Ok, all of our target is isolated. |
1624 | We cannot do rollback at this point. */ | 1648 | We cannot do rollback at this point. */ |
@@ -1654,13 +1678,15 @@ repeat: | |||
1654 | mem_hotplug_done(); | 1678 | mem_hotplug_done(); |
1655 | return 0; | 1679 | return 0; |
1656 | 1680 | ||
1681 | failed_removal_isolated: | ||
1682 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | ||
1657 | failed_removal: | 1683 | failed_removal: |
1658 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n", | 1684 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
1659 | (unsigned long long) start_pfn << PAGE_SHIFT, | 1685 | (unsigned long long) start_pfn << PAGE_SHIFT, |
1660 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); | 1686 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
1687 | reason); | ||
1661 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | 1688 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
1662 | /* pushback to free area */ | 1689 | /* pushback to free area */ |
1663 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | ||
1664 | mem_hotplug_done(); | 1690 | mem_hotplug_done(); |
1665 | return ret; | 1691 | return ret; |
1666 | } | 1692 | } |
@@ -1753,34 +1779,6 @@ static int check_cpu_on_node(pg_data_t *pgdat) | |||
1753 | return 0; | 1779 | return 0; |
1754 | } | 1780 | } |
1755 | 1781 | ||
1756 | static void unmap_cpu_on_node(pg_data_t *pgdat) | ||
1757 | { | ||
1758 | #ifdef CONFIG_ACPI_NUMA | ||
1759 | int cpu; | ||
1760 | |||
1761 | for_each_possible_cpu(cpu) | ||
1762 | if (cpu_to_node(cpu) == pgdat->node_id) | ||
1763 | numa_clear_node(cpu); | ||
1764 | #endif | ||
1765 | } | ||
1766 | |||
1767 | static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) | ||
1768 | { | ||
1769 | int ret; | ||
1770 | |||
1771 | ret = check_cpu_on_node(pgdat); | ||
1772 | if (ret) | ||
1773 | return ret; | ||
1774 | |||
1775 | /* | ||
1776 | * the node will be offlined when we come here, so we can clear | ||
1777 | * the cpu_to_node() now. | ||
1778 | */ | ||
1779 | |||
1780 | unmap_cpu_on_node(pgdat); | ||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1784 | /** | 1782 | /** |
1785 | * try_offline_node | 1783 | * try_offline_node |
1786 | * @nid: the node ID | 1784 | * @nid: the node ID |
@@ -1813,7 +1811,7 @@ void try_offline_node(int nid) | |||
1813 | return; | 1811 | return; |
1814 | } | 1812 | } |
1815 | 1813 | ||
1816 | if (check_and_unmap_cpu_on_node(pgdat)) | 1814 | if (check_cpu_on_node(pgdat)) |
1817 | return; | 1815 | return; |
1818 | 1816 | ||
1819 | /* | 1817 | /* |
@@ -1858,7 +1856,7 @@ void __ref __remove_memory(int nid, u64 start, u64 size) | |||
1858 | memblock_free(start, size); | 1856 | memblock_free(start, size); |
1859 | memblock_remove(start, size); | 1857 | memblock_remove(start, size); |
1860 | 1858 | ||
1861 | arch_remove_memory(start, size, NULL); | 1859 | arch_remove_memory(nid, start, size, NULL); |
1862 | 1860 | ||
1863 | try_offline_node(nid); | 1861 | try_offline_node(nid); |
1864 | 1862 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index f7e4bfdc13b7..5d1839a9148d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -327,16 +327,13 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, | |||
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Once page cache replacement of page migration started, page_count | 329 | * Once page cache replacement of page migration started, page_count |
330 | * *must* be zero. And, we don't want to call wait_on_page_locked() | 330 | * is zero; but we must not call put_and_wait_on_page_locked() without |
331 | * against a page without get_page(). | 331 | * a ref. Use get_page_unless_zero(), and just fault again if it fails. |
332 | * So, we use get_page_unless_zero(), here. Even failed, page fault | ||
333 | * will occur again. | ||
334 | */ | 332 | */ |
335 | if (!get_page_unless_zero(page)) | 333 | if (!get_page_unless_zero(page)) |
336 | goto out; | 334 | goto out; |
337 | pte_unmap_unlock(ptep, ptl); | 335 | pte_unmap_unlock(ptep, ptl); |
338 | wait_on_page_locked(page); | 336 | put_and_wait_on_page_locked(page); |
339 | put_page(page); | ||
340 | return; | 337 | return; |
341 | out: | 338 | out: |
342 | pte_unmap_unlock(ptep, ptl); | 339 | pte_unmap_unlock(ptep, ptl); |
@@ -370,63 +367,28 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) | |||
370 | if (!get_page_unless_zero(page)) | 367 | if (!get_page_unless_zero(page)) |
371 | goto unlock; | 368 | goto unlock; |
372 | spin_unlock(ptl); | 369 | spin_unlock(ptl); |
373 | wait_on_page_locked(page); | 370 | put_and_wait_on_page_locked(page); |
374 | put_page(page); | ||
375 | return; | 371 | return; |
376 | unlock: | 372 | unlock: |
377 | spin_unlock(ptl); | 373 | spin_unlock(ptl); |
378 | } | 374 | } |
379 | #endif | 375 | #endif |
380 | 376 | ||
381 | #ifdef CONFIG_BLOCK | 377 | static int expected_page_refs(struct page *page) |
382 | /* Returns true if all buffers are successfully locked */ | ||
383 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, | ||
384 | enum migrate_mode mode) | ||
385 | { | 378 | { |
386 | struct buffer_head *bh = head; | 379 | int expected_count = 1; |
387 | |||
388 | /* Simple case, sync compaction */ | ||
389 | if (mode != MIGRATE_ASYNC) { | ||
390 | do { | ||
391 | get_bh(bh); | ||
392 | lock_buffer(bh); | ||
393 | bh = bh->b_this_page; | ||
394 | |||
395 | } while (bh != head); | ||
396 | 380 | ||
397 | return true; | 381 | /* |
398 | } | 382 | * Device public or private pages have an extra refcount as they are |
399 | 383 | * ZONE_DEVICE pages. | |
400 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ | 384 | */ |
401 | do { | 385 | expected_count += is_device_private_page(page); |
402 | get_bh(bh); | 386 | expected_count += is_device_public_page(page); |
403 | if (!trylock_buffer(bh)) { | 387 | if (page_mapping(page)) |
404 | /* | 388 | expected_count += hpage_nr_pages(page) + page_has_private(page); |
405 | * We failed to lock the buffer and cannot stall in | ||
406 | * async migration. Release the taken locks | ||
407 | */ | ||
408 | struct buffer_head *failed_bh = bh; | ||
409 | put_bh(failed_bh); | ||
410 | bh = head; | ||
411 | while (bh != failed_bh) { | ||
412 | unlock_buffer(bh); | ||
413 | put_bh(bh); | ||
414 | bh = bh->b_this_page; | ||
415 | } | ||
416 | return false; | ||
417 | } | ||
418 | 389 | ||
419 | bh = bh->b_this_page; | 390 | return expected_count; |
420 | } while (bh != head); | ||
421 | return true; | ||
422 | } | ||
423 | #else | ||
424 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | ||
425 | enum migrate_mode mode) | ||
426 | { | ||
427 | return true; | ||
428 | } | 391 | } |
429 | #endif /* CONFIG_BLOCK */ | ||
430 | 392 | ||
431 | /* | 393 | /* |
432 | * Replace the page in the mapping. | 394 | * Replace the page in the mapping. |
@@ -437,21 +399,13 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
437 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 399 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. |
438 | */ | 400 | */ |
439 | int migrate_page_move_mapping(struct address_space *mapping, | 401 | int migrate_page_move_mapping(struct address_space *mapping, |
440 | struct page *newpage, struct page *page, | 402 | struct page *newpage, struct page *page, enum migrate_mode mode, |
441 | struct buffer_head *head, enum migrate_mode mode, | ||
442 | int extra_count) | 403 | int extra_count) |
443 | { | 404 | { |
444 | XA_STATE(xas, &mapping->i_pages, page_index(page)); | 405 | XA_STATE(xas, &mapping->i_pages, page_index(page)); |
445 | struct zone *oldzone, *newzone; | 406 | struct zone *oldzone, *newzone; |
446 | int dirty; | 407 | int dirty; |
447 | int expected_count = 1 + extra_count; | 408 | int expected_count = expected_page_refs(page) + extra_count; |
448 | |||
449 | /* | ||
450 | * Device public or private pages have an extra refcount as they are | ||
451 | * ZONE_DEVICE pages. | ||
452 | */ | ||
453 | expected_count += is_device_private_page(page); | ||
454 | expected_count += is_device_public_page(page); | ||
455 | 409 | ||
456 | if (!mapping) { | 410 | if (!mapping) { |
457 | /* Anonymous page without mapping */ | 411 | /* Anonymous page without mapping */ |
@@ -471,8 +425,6 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
471 | newzone = page_zone(newpage); | 425 | newzone = page_zone(newpage); |
472 | 426 | ||
473 | xas_lock_irq(&xas); | 427 | xas_lock_irq(&xas); |
474 | |||
475 | expected_count += hpage_nr_pages(page) + page_has_private(page); | ||
476 | if (page_count(page) != expected_count || xas_load(&xas) != page) { | 428 | if (page_count(page) != expected_count || xas_load(&xas) != page) { |
477 | xas_unlock_irq(&xas); | 429 | xas_unlock_irq(&xas); |
478 | return -EAGAIN; | 430 | return -EAGAIN; |
@@ -484,20 +436,6 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
484 | } | 436 | } |
485 | 437 | ||
486 | /* | 438 | /* |
487 | * In the async migration case of moving a page with buffers, lock the | ||
488 | * buffers using trylock before the mapping is moved. If the mapping | ||
489 | * was moved, we later failed to lock the buffers and could not move | ||
490 | * the mapping back due to an elevated page count, we would have to | ||
491 | * block waiting on other references to be dropped. | ||
492 | */ | ||
493 | if (mode == MIGRATE_ASYNC && head && | ||
494 | !buffer_migrate_lock_buffers(head, mode)) { | ||
495 | page_ref_unfreeze(page, expected_count); | ||
496 | xas_unlock_irq(&xas); | ||
497 | return -EAGAIN; | ||
498 | } | ||
499 | |||
500 | /* | ||
501 | * Now we know that no one else is looking at the page: | 439 | * Now we know that no one else is looking at the page: |
502 | * no turning back from here. | 440 | * no turning back from here. |
503 | */ | 441 | */ |
@@ -748,7 +686,7 @@ int migrate_page(struct address_space *mapping, | |||
748 | 686 | ||
749 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 687 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
750 | 688 | ||
751 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); | 689 | rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
752 | 690 | ||
753 | if (rc != MIGRATEPAGE_SUCCESS) | 691 | if (rc != MIGRATEPAGE_SUCCESS) |
754 | return rc; | 692 | return rc; |
@@ -762,34 +700,98 @@ int migrate_page(struct address_space *mapping, | |||
762 | EXPORT_SYMBOL(migrate_page); | 700 | EXPORT_SYMBOL(migrate_page); |
763 | 701 | ||
764 | #ifdef CONFIG_BLOCK | 702 | #ifdef CONFIG_BLOCK |
765 | /* | 703 | /* Returns true if all buffers are successfully locked */ |
766 | * Migration function for pages with buffers. This function can only be used | 704 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, |
767 | * if the underlying filesystem guarantees that no other references to "page" | 705 | enum migrate_mode mode) |
768 | * exist. | 706 | { |
769 | */ | 707 | struct buffer_head *bh = head; |
770 | int buffer_migrate_page(struct address_space *mapping, | 708 | |
771 | struct page *newpage, struct page *page, enum migrate_mode mode) | 709 | /* Simple case, sync compaction */ |
710 | if (mode != MIGRATE_ASYNC) { | ||
711 | do { | ||
712 | get_bh(bh); | ||
713 | lock_buffer(bh); | ||
714 | bh = bh->b_this_page; | ||
715 | |||
716 | } while (bh != head); | ||
717 | |||
718 | return true; | ||
719 | } | ||
720 | |||
721 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ | ||
722 | do { | ||
723 | get_bh(bh); | ||
724 | if (!trylock_buffer(bh)) { | ||
725 | /* | ||
726 | * We failed to lock the buffer and cannot stall in | ||
727 | * async migration. Release the taken locks | ||
728 | */ | ||
729 | struct buffer_head *failed_bh = bh; | ||
730 | put_bh(failed_bh); | ||
731 | bh = head; | ||
732 | while (bh != failed_bh) { | ||
733 | unlock_buffer(bh); | ||
734 | put_bh(bh); | ||
735 | bh = bh->b_this_page; | ||
736 | } | ||
737 | return false; | ||
738 | } | ||
739 | |||
740 | bh = bh->b_this_page; | ||
741 | } while (bh != head); | ||
742 | return true; | ||
743 | } | ||
744 | |||
745 | static int __buffer_migrate_page(struct address_space *mapping, | ||
746 | struct page *newpage, struct page *page, enum migrate_mode mode, | ||
747 | bool check_refs) | ||
772 | { | 748 | { |
773 | struct buffer_head *bh, *head; | 749 | struct buffer_head *bh, *head; |
774 | int rc; | 750 | int rc; |
751 | int expected_count; | ||
775 | 752 | ||
776 | if (!page_has_buffers(page)) | 753 | if (!page_has_buffers(page)) |
777 | return migrate_page(mapping, newpage, page, mode); | 754 | return migrate_page(mapping, newpage, page, mode); |
778 | 755 | ||
756 | /* Check whether page does not have extra refs before we do more work */ | ||
757 | expected_count = expected_page_refs(page); | ||
758 | if (page_count(page) != expected_count) | ||
759 | return -EAGAIN; | ||
760 | |||
779 | head = page_buffers(page); | 761 | head = page_buffers(page); |
762 | if (!buffer_migrate_lock_buffers(head, mode)) | ||
763 | return -EAGAIN; | ||
780 | 764 | ||
781 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); | 765 | if (check_refs) { |
766 | bool busy; | ||
767 | bool invalidated = false; | ||
782 | 768 | ||
783 | if (rc != MIGRATEPAGE_SUCCESS) | 769 | recheck_buffers: |
784 | return rc; | 770 | busy = false; |
771 | spin_lock(&mapping->private_lock); | ||
772 | bh = head; | ||
773 | do { | ||
774 | if (atomic_read(&bh->b_count)) { | ||
775 | busy = true; | ||
776 | break; | ||
777 | } | ||
778 | bh = bh->b_this_page; | ||
779 | } while (bh != head); | ||
780 | spin_unlock(&mapping->private_lock); | ||
781 | if (busy) { | ||
782 | if (invalidated) { | ||
783 | rc = -EAGAIN; | ||
784 | goto unlock_buffers; | ||
785 | } | ||
786 | invalidate_bh_lrus(); | ||
787 | invalidated = true; | ||
788 | goto recheck_buffers; | ||
789 | } | ||
790 | } | ||
785 | 791 | ||
786 | /* | 792 | rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
787 | * In the async case, migrate_page_move_mapping locked the buffers | 793 | if (rc != MIGRATEPAGE_SUCCESS) |
788 | * with an IRQ-safe spinlock held. In the sync case, the buffers | 794 | goto unlock_buffers; |
789 | * need to be locked now | ||
790 | */ | ||
791 | if (mode != MIGRATE_ASYNC) | ||
792 | BUG_ON(!buffer_migrate_lock_buffers(head, mode)); | ||
793 | 795 | ||
794 | ClearPagePrivate(page); | 796 | ClearPagePrivate(page); |
795 | set_page_private(newpage, page_private(page)); | 797 | set_page_private(newpage, page_private(page)); |
@@ -811,6 +813,8 @@ int buffer_migrate_page(struct address_space *mapping, | |||
811 | else | 813 | else |
812 | migrate_page_states(newpage, page); | 814 | migrate_page_states(newpage, page); |
813 | 815 | ||
816 | rc = MIGRATEPAGE_SUCCESS; | ||
817 | unlock_buffers: | ||
814 | bh = head; | 818 | bh = head; |
815 | do { | 819 | do { |
816 | unlock_buffer(bh); | 820 | unlock_buffer(bh); |
@@ -819,9 +823,32 @@ int buffer_migrate_page(struct address_space *mapping, | |||
819 | 823 | ||
820 | } while (bh != head); | 824 | } while (bh != head); |
821 | 825 | ||
822 | return MIGRATEPAGE_SUCCESS; | 826 | return rc; |
827 | } | ||
828 | |||
829 | /* | ||
830 | * Migration function for pages with buffers. This function can only be used | ||
831 | * if the underlying filesystem guarantees that no other references to "page" | ||
832 | * exist. For example attached buffer heads are accessed only under page lock. | ||
833 | */ | ||
834 | int buffer_migrate_page(struct address_space *mapping, | ||
835 | struct page *newpage, struct page *page, enum migrate_mode mode) | ||
836 | { | ||
837 | return __buffer_migrate_page(mapping, newpage, page, mode, false); | ||
823 | } | 838 | } |
824 | EXPORT_SYMBOL(buffer_migrate_page); | 839 | EXPORT_SYMBOL(buffer_migrate_page); |
840 | |||
841 | /* | ||
842 | * Same as above except that this variant is more careful and checks that there | ||
843 | * are also no buffer head references. This function is the right one for | ||
844 | * mappings where buffer heads are directly looked up and referenced (such as | ||
845 | * block device mappings). | ||
846 | */ | ||
847 | int buffer_migrate_page_norefs(struct address_space *mapping, | ||
848 | struct page *newpage, struct page *page, enum migrate_mode mode) | ||
849 | { | ||
850 | return __buffer_migrate_page(mapping, newpage, page, mode, true); | ||
851 | } | ||
825 | #endif | 852 | #endif |
826 | 853 | ||
827 | /* | 854 | /* |
@@ -1297,8 +1324,19 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |||
1297 | goto put_anon; | 1324 | goto put_anon; |
1298 | 1325 | ||
1299 | if (page_mapped(hpage)) { | 1326 | if (page_mapped(hpage)) { |
1327 | struct address_space *mapping = page_mapping(hpage); | ||
1328 | |||
1329 | /* | ||
1330 | * try_to_unmap could potentially call huge_pmd_unshare. | ||
1331 | * Because of this, take semaphore in write mode here and | ||
1332 | * set TTU_RMAP_LOCKED to let lower levels know we have | ||
1333 | * taken the lock. | ||
1334 | */ | ||
1335 | i_mmap_lock_write(mapping); | ||
1300 | try_to_unmap(hpage, | 1336 | try_to_unmap(hpage, |
1301 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 1337 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| |
1338 | TTU_RMAP_LOCKED); | ||
1339 | i_mmap_unlock_write(mapping); | ||
1302 | page_was_mapped = 1; | 1340 | page_was_mapped = 1; |
1303 | } | 1341 | } |
1304 | 1342 | ||
@@ -2303,6 +2341,7 @@ next: | |||
2303 | */ | 2341 | */ |
2304 | static void migrate_vma_collect(struct migrate_vma *migrate) | 2342 | static void migrate_vma_collect(struct migrate_vma *migrate) |
2305 | { | 2343 | { |
2344 | struct mmu_notifier_range range; | ||
2306 | struct mm_walk mm_walk; | 2345 | struct mm_walk mm_walk; |
2307 | 2346 | ||
2308 | mm_walk.pmd_entry = migrate_vma_collect_pmd; | 2347 | mm_walk.pmd_entry = migrate_vma_collect_pmd; |
@@ -2314,13 +2353,11 @@ static void migrate_vma_collect(struct migrate_vma *migrate) | |||
2314 | mm_walk.mm = migrate->vma->vm_mm; | 2353 | mm_walk.mm = migrate->vma->vm_mm; |
2315 | mm_walk.private = migrate; | 2354 | mm_walk.private = migrate; |
2316 | 2355 | ||
2317 | mmu_notifier_invalidate_range_start(mm_walk.mm, | 2356 | mmu_notifier_range_init(&range, mm_walk.mm, migrate->start, |
2318 | migrate->start, | 2357 | migrate->end); |
2319 | migrate->end); | 2358 | mmu_notifier_invalidate_range_start(&range); |
2320 | walk_page_range(migrate->start, migrate->end, &mm_walk); | 2359 | walk_page_range(migrate->start, migrate->end, &mm_walk); |
2321 | mmu_notifier_invalidate_range_end(mm_walk.mm, | 2360 | mmu_notifier_invalidate_range_end(&range); |
2322 | migrate->start, | ||
2323 | migrate->end); | ||
2324 | 2361 | ||
2325 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); | 2362 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); |
2326 | } | 2363 | } |
@@ -2701,9 +2738,8 @@ static void migrate_vma_pages(struct migrate_vma *migrate) | |||
2701 | { | 2738 | { |
2702 | const unsigned long npages = migrate->npages; | 2739 | const unsigned long npages = migrate->npages; |
2703 | const unsigned long start = migrate->start; | 2740 | const unsigned long start = migrate->start; |
2704 | struct vm_area_struct *vma = migrate->vma; | 2741 | struct mmu_notifier_range range; |
2705 | struct mm_struct *mm = vma->vm_mm; | 2742 | unsigned long addr, i; |
2706 | unsigned long addr, i, mmu_start; | ||
2707 | bool notified = false; | 2743 | bool notified = false; |
2708 | 2744 | ||
2709 | for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { | 2745 | for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { |
@@ -2722,11 +2758,12 @@ static void migrate_vma_pages(struct migrate_vma *migrate) | |||
2722 | continue; | 2758 | continue; |
2723 | } | 2759 | } |
2724 | if (!notified) { | 2760 | if (!notified) { |
2725 | mmu_start = addr; | ||
2726 | notified = true; | 2761 | notified = true; |
2727 | mmu_notifier_invalidate_range_start(mm, | 2762 | |
2728 | mmu_start, | 2763 | mmu_notifier_range_init(&range, |
2729 | migrate->end); | 2764 | migrate->vma->vm_mm, |
2765 | addr, migrate->end); | ||
2766 | mmu_notifier_invalidate_range_start(&range); | ||
2730 | } | 2767 | } |
2731 | migrate_vma_insert_page(migrate, addr, newpage, | 2768 | migrate_vma_insert_page(migrate, addr, newpage, |
2732 | &migrate->src[i], | 2769 | &migrate->src[i], |
@@ -2767,8 +2804,7 @@ static void migrate_vma_pages(struct migrate_vma *migrate) | |||
2767 | * did already call it. | 2804 | * did already call it. |
2768 | */ | 2805 | */ |
2769 | if (notified) | 2806 | if (notified) |
2770 | mmu_notifier_invalidate_range_only_end(mm, mmu_start, | 2807 | mmu_notifier_invalidate_range_only_end(&range); |
2771 | migrate->end); | ||
2772 | } | 2808 | } |
2773 | 2809 | ||
2774 | /* | 2810 | /* |
diff --git a/mm/mm_init.c b/mm/mm_init.c index 6838a530789b..33917105a3a2 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c | |||
@@ -146,7 +146,7 @@ static void __meminit mm_compute_batch(void) | |||
146 | s32 batch = max_t(s32, nr*2, 32); | 146 | s32 batch = max_t(s32, nr*2, 32); |
147 | 147 | ||
148 | /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ | 148 | /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ |
149 | memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff); | 149 | memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff); |
150 | 150 | ||
151 | vm_committed_as_batch = max_t(s32, memsized_batch, batch); | 151 | vm_committed_as_batch = max_t(s32, memsized_batch, batch); |
152 | } | 152 | } |
@@ -2973,16 +2973,6 @@ out: | |||
2973 | return ret; | 2973 | return ret; |
2974 | } | 2974 | } |
2975 | 2975 | ||
2976 | static inline void verify_mm_writelocked(struct mm_struct *mm) | ||
2977 | { | ||
2978 | #ifdef CONFIG_DEBUG_VM | ||
2979 | if (unlikely(down_read_trylock(&mm->mmap_sem))) { | ||
2980 | WARN_ON(1); | ||
2981 | up_read(&mm->mmap_sem); | ||
2982 | } | ||
2983 | #endif | ||
2984 | } | ||
2985 | |||
2986 | /* | 2976 | /* |
2987 | * this is really a simplified "do_mmap". it only handles | 2977 | * this is really a simplified "do_mmap". it only handles |
2988 | * anonymous maps. eventually we may be able to do some | 2978 | * anonymous maps. eventually we may be able to do some |
@@ -3010,12 +3000,6 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla | |||
3010 | return error; | 3000 | return error; |
3011 | 3001 | ||
3012 | /* | 3002 | /* |
3013 | * mm->mmap_sem is required to protect against another thread | ||
3014 | * changing the mappings in case we sleep. | ||
3015 | */ | ||
3016 | verify_mm_writelocked(mm); | ||
3017 | |||
3018 | /* | ||
3019 | * Clear old maps. this also does some error checking for us | 3003 | * Clear old maps. this also does some error checking for us |
3020 | */ | 3004 | */ |
3021 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, | 3005 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, |
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 5119ff846769..9c884abc7850 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c | |||
@@ -35,13 +35,6 @@ void mmu_notifier_call_srcu(struct rcu_head *rcu, | |||
35 | } | 35 | } |
36 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); | 36 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); |
37 | 37 | ||
38 | void mmu_notifier_synchronize(void) | ||
39 | { | ||
40 | /* Wait for any running method to finish. */ | ||
41 | srcu_barrier(&srcu); | ||
42 | } | ||
43 | EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); | ||
44 | |||
45 | /* | 38 | /* |
46 | * This function can't run concurrently against mmu_notifier_register | 39 | * This function can't run concurrently against mmu_notifier_register |
47 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | 40 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
@@ -174,22 +167,20 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, | |||
174 | srcu_read_unlock(&srcu, id); | 167 | srcu_read_unlock(&srcu, id); |
175 | } | 168 | } |
176 | 169 | ||
177 | int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | 170 | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
178 | unsigned long start, unsigned long end, | ||
179 | bool blockable) | ||
180 | { | 171 | { |
181 | struct mmu_notifier *mn; | 172 | struct mmu_notifier *mn; |
182 | int ret = 0; | 173 | int ret = 0; |
183 | int id; | 174 | int id; |
184 | 175 | ||
185 | id = srcu_read_lock(&srcu); | 176 | id = srcu_read_lock(&srcu); |
186 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 177 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
187 | if (mn->ops->invalidate_range_start) { | 178 | if (mn->ops->invalidate_range_start) { |
188 | int _ret = mn->ops->invalidate_range_start(mn, mm, start, end, blockable); | 179 | int _ret = mn->ops->invalidate_range_start(mn, range); |
189 | if (_ret) { | 180 | if (_ret) { |
190 | pr_info("%pS callback failed with %d in %sblockable context.\n", | 181 | pr_info("%pS callback failed with %d in %sblockable context.\n", |
191 | mn->ops->invalidate_range_start, _ret, | 182 | mn->ops->invalidate_range_start, _ret, |
192 | !blockable ? "non-" : ""); | 183 | !range->blockable ? "non-" : ""); |
193 | ret = _ret; | 184 | ret = _ret; |
194 | } | 185 | } |
195 | } | 186 | } |
@@ -200,16 +191,14 @@ int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |||
200 | } | 191 | } |
201 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); | 192 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); |
202 | 193 | ||
203 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 194 | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, |
204 | unsigned long start, | ||
205 | unsigned long end, | ||
206 | bool only_end) | 195 | bool only_end) |
207 | { | 196 | { |
208 | struct mmu_notifier *mn; | 197 | struct mmu_notifier *mn; |
209 | int id; | 198 | int id; |
210 | 199 | ||
211 | id = srcu_read_lock(&srcu); | 200 | id = srcu_read_lock(&srcu); |
212 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 201 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
213 | /* | 202 | /* |
214 | * Call invalidate_range here too to avoid the need for the | 203 | * Call invalidate_range here too to avoid the need for the |
215 | * subsystem of having to register an invalidate_range_end | 204 | * subsystem of having to register an invalidate_range_end |
@@ -224,9 +213,11 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
224 | * already happen under page table lock. | 213 | * already happen under page table lock. |
225 | */ | 214 | */ |
226 | if (!only_end && mn->ops->invalidate_range) | 215 | if (!only_end && mn->ops->invalidate_range) |
227 | mn->ops->invalidate_range(mn, mm, start, end); | 216 | mn->ops->invalidate_range(mn, range->mm, |
217 | range->start, | ||
218 | range->end); | ||
228 | if (mn->ops->invalidate_range_end) | 219 | if (mn->ops->invalidate_range_end) |
229 | mn->ops->invalidate_range_end(mn, mm, start, end); | 220 | mn->ops->invalidate_range_end(mn, range); |
230 | } | 221 | } |
231 | srcu_read_unlock(&srcu, id); | 222 | srcu_read_unlock(&srcu, id); |
232 | } | 223 | } |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 6d331620b9e5..36cb358db170 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -167,11 +167,12 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, | |||
167 | pgprot_t newprot, int dirty_accountable, int prot_numa) | 167 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
168 | { | 168 | { |
169 | pmd_t *pmd; | 169 | pmd_t *pmd; |
170 | struct mm_struct *mm = vma->vm_mm; | ||
171 | unsigned long next; | 170 | unsigned long next; |
172 | unsigned long pages = 0; | 171 | unsigned long pages = 0; |
173 | unsigned long nr_huge_updates = 0; | 172 | unsigned long nr_huge_updates = 0; |
174 | unsigned long mni_start = 0; | 173 | struct mmu_notifier_range range; |
174 | |||
175 | range.start = 0; | ||
175 | 176 | ||
176 | pmd = pmd_offset(pud, addr); | 177 | pmd = pmd_offset(pud, addr); |
177 | do { | 178 | do { |
@@ -183,9 +184,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, | |||
183 | goto next; | 184 | goto next; |
184 | 185 | ||
185 | /* invoke the mmu notifier if the pmd is populated */ | 186 | /* invoke the mmu notifier if the pmd is populated */ |
186 | if (!mni_start) { | 187 | if (!range.start) { |
187 | mni_start = addr; | 188 | mmu_notifier_range_init(&range, vma->vm_mm, addr, end); |
188 | mmu_notifier_invalidate_range_start(mm, mni_start, end); | 189 | mmu_notifier_invalidate_range_start(&range); |
189 | } | 190 | } |
190 | 191 | ||
191 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { | 192 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
@@ -214,8 +215,8 @@ next: | |||
214 | cond_resched(); | 215 | cond_resched(); |
215 | } while (pmd++, addr = next, addr != end); | 216 | } while (pmd++, addr = next, addr != end); |
216 | 217 | ||
217 | if (mni_start) | 218 | if (range.start) |
218 | mmu_notifier_invalidate_range_end(mm, mni_start, end); | 219 | mmu_notifier_invalidate_range_end(&range); |
219 | 220 | ||
220 | if (nr_huge_updates) | 221 | if (nr_huge_updates) |
221 | count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); | 222 | count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); |
diff --git a/mm/mremap.c b/mm/mremap.c index 7f9f9180e401..def01d86e36f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -197,16 +197,14 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
197 | bool need_rmap_locks) | 197 | bool need_rmap_locks) |
198 | { | 198 | { |
199 | unsigned long extent, next, old_end; | 199 | unsigned long extent, next, old_end; |
200 | struct mmu_notifier_range range; | ||
200 | pmd_t *old_pmd, *new_pmd; | 201 | pmd_t *old_pmd, *new_pmd; |
201 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
202 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
203 | 202 | ||
204 | old_end = old_addr + len; | 203 | old_end = old_addr + len; |
205 | flush_cache_range(vma, old_addr, old_end); | 204 | flush_cache_range(vma, old_addr, old_end); |
206 | 205 | ||
207 | mmun_start = old_addr; | 206 | mmu_notifier_range_init(&range, vma->vm_mm, old_addr, old_end); |
208 | mmun_end = old_end; | 207 | mmu_notifier_invalidate_range_start(&range); |
209 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | ||
210 | 208 | ||
211 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { | 209 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
212 | cond_resched(); | 210 | cond_resched(); |
@@ -247,7 +245,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
247 | new_pmd, new_addr, need_rmap_locks); | 245 | new_pmd, new_addr, need_rmap_locks); |
248 | } | 246 | } |
249 | 247 | ||
250 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 248 | mmu_notifier_invalidate_range_end(&range); |
251 | 249 | ||
252 | return len + old_addr - old_end; /* how much done */ | 250 | return len + old_addr - old_end; /* how much done */ |
253 | } | 251 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6589f60d5018..f0e8cd9edb1a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -245,11 +245,11 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |||
245 | return points > 0 ? points : 1; | 245 | return points > 0 ? points : 1; |
246 | } | 246 | } |
247 | 247 | ||
248 | enum oom_constraint { | 248 | static const char * const oom_constraint_text[] = { |
249 | CONSTRAINT_NONE, | 249 | [CONSTRAINT_NONE] = "CONSTRAINT_NONE", |
250 | CONSTRAINT_CPUSET, | 250 | [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET", |
251 | CONSTRAINT_MEMORY_POLICY, | 251 | [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY", |
252 | CONSTRAINT_MEMCG, | 252 | [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG", |
253 | }; | 253 | }; |
254 | 254 | ||
255 | /* | 255 | /* |
@@ -269,7 +269,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) | |||
269 | } | 269 | } |
270 | 270 | ||
271 | /* Default to all available memory */ | 271 | /* Default to all available memory */ |
272 | oc->totalpages = totalram_pages + total_swap_pages; | 272 | oc->totalpages = totalram_pages() + total_swap_pages; |
273 | 273 | ||
274 | if (!IS_ENABLED(CONFIG_NUMA)) | 274 | if (!IS_ENABLED(CONFIG_NUMA)) |
275 | return CONSTRAINT_NONE; | 275 | return CONSTRAINT_NONE; |
@@ -428,19 +428,29 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | |||
428 | rcu_read_unlock(); | 428 | rcu_read_unlock(); |
429 | } | 429 | } |
430 | 430 | ||
431 | static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) | ||
432 | { | ||
433 | /* one line summary of the oom killer context. */ | ||
434 | pr_info("oom-kill:constraint=%s,nodemask=%*pbl", | ||
435 | oom_constraint_text[oc->constraint], | ||
436 | nodemask_pr_args(oc->nodemask)); | ||
437 | cpuset_print_current_mems_allowed(); | ||
438 | mem_cgroup_print_oom_context(oc->memcg, victim); | ||
439 | pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, | ||
440 | from_kuid(&init_user_ns, task_uid(victim))); | ||
441 | } | ||
442 | |||
431 | static void dump_header(struct oom_control *oc, struct task_struct *p) | 443 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
432 | { | 444 | { |
433 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", | 445 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", |
434 | current->comm, oc->gfp_mask, &oc->gfp_mask, | 446 | current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, |
435 | nodemask_pr_args(oc->nodemask), oc->order, | ||
436 | current->signal->oom_score_adj); | 447 | current->signal->oom_score_adj); |
437 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) | 448 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
438 | pr_warn("COMPACTION is disabled!!!\n"); | 449 | pr_warn("COMPACTION is disabled!!!\n"); |
439 | 450 | ||
440 | cpuset_print_current_mems_allowed(); | ||
441 | dump_stack(); | 451 | dump_stack(); |
442 | if (is_memcg_oom(oc)) | 452 | if (is_memcg_oom(oc)) |
443 | mem_cgroup_print_oom_info(oc->memcg, p); | 453 | mem_cgroup_print_oom_meminfo(oc->memcg); |
444 | else { | 454 | else { |
445 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); | 455 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
446 | if (is_dump_unreclaim_slabs()) | 456 | if (is_dump_unreclaim_slabs()) |
@@ -448,6 +458,8 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) | |||
448 | } | 458 | } |
449 | if (sysctl_oom_dump_tasks) | 459 | if (sysctl_oom_dump_tasks) |
450 | dump_tasks(oc->memcg, oc->nodemask); | 460 | dump_tasks(oc->memcg, oc->nodemask); |
461 | if (p) | ||
462 | dump_oom_summary(oc, p); | ||
451 | } | 463 | } |
452 | 464 | ||
453 | /* | 465 | /* |
@@ -516,19 +528,20 @@ bool __oom_reap_task_mm(struct mm_struct *mm) | |||
516 | * count elevated without a good reason. | 528 | * count elevated without a good reason. |
517 | */ | 529 | */ |
518 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { | 530 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { |
519 | const unsigned long start = vma->vm_start; | 531 | struct mmu_notifier_range range; |
520 | const unsigned long end = vma->vm_end; | ||
521 | struct mmu_gather tlb; | 532 | struct mmu_gather tlb; |
522 | 533 | ||
523 | tlb_gather_mmu(&tlb, mm, start, end); | 534 | mmu_notifier_range_init(&range, mm, vma->vm_start, |
524 | if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { | 535 | vma->vm_end); |
525 | tlb_finish_mmu(&tlb, start, end); | 536 | tlb_gather_mmu(&tlb, mm, range.start, range.end); |
537 | if (mmu_notifier_invalidate_range_start_nonblock(&range)) { | ||
538 | tlb_finish_mmu(&tlb, range.start, range.end); | ||
526 | ret = false; | 539 | ret = false; |
527 | continue; | 540 | continue; |
528 | } | 541 | } |
529 | unmap_page_range(&tlb, vma, start, end, NULL); | 542 | unmap_page_range(&tlb, vma, range.start, range.end, NULL); |
530 | mmu_notifier_invalidate_range_end(mm, start, end); | 543 | mmu_notifier_invalidate_range_end(&range); |
531 | tlb_finish_mmu(&tlb, start, end); | 544 | tlb_finish_mmu(&tlb, range.start, range.end); |
532 | } | 545 | } |
533 | } | 546 | } |
534 | 547 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3f690bae6b78..7d1010453fb9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -2154,6 +2154,7 @@ int write_cache_pages(struct address_space *mapping, | |||
2154 | { | 2154 | { |
2155 | int ret = 0; | 2155 | int ret = 0; |
2156 | int done = 0; | 2156 | int done = 0; |
2157 | int error; | ||
2157 | struct pagevec pvec; | 2158 | struct pagevec pvec; |
2158 | int nr_pages; | 2159 | int nr_pages; |
2159 | pgoff_t uninitialized_var(writeback_index); | 2160 | pgoff_t uninitialized_var(writeback_index); |
@@ -2227,25 +2228,31 @@ continue_unlock: | |||
2227 | goto continue_unlock; | 2228 | goto continue_unlock; |
2228 | 2229 | ||
2229 | trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); | 2230 | trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); |
2230 | ret = (*writepage)(page, wbc, data); | 2231 | error = (*writepage)(page, wbc, data); |
2231 | if (unlikely(ret)) { | 2232 | if (unlikely(error)) { |
2232 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | 2233 | /* |
2234 | * Handle errors according to the type of | ||
2235 | * writeback. There's no need to continue for | ||
2236 | * background writeback. Just push done_index | ||
2237 | * past this page so media errors won't choke | ||
2238 | * writeout for the entire file. For integrity | ||
2239 | * writeback, we must process the entire dirty | ||
2240 | * set regardless of errors because the fs may | ||
2241 | * still have state to clear for each page. In | ||
2242 | * that case we continue processing and return | ||
2243 | * the first error. | ||
2244 | */ | ||
2245 | if (error == AOP_WRITEPAGE_ACTIVATE) { | ||
2233 | unlock_page(page); | 2246 | unlock_page(page); |
2234 | ret = 0; | 2247 | error = 0; |
2235 | } else { | 2248 | } else if (wbc->sync_mode != WB_SYNC_ALL) { |
2236 | /* | 2249 | ret = error; |
2237 | * done_index is set past this page, | ||
2238 | * so media errors will not choke | ||
2239 | * background writeout for the entire | ||
2240 | * file. This has consequences for | ||
2241 | * range_cyclic semantics (ie. it may | ||
2242 | * not be suitable for data integrity | ||
2243 | * writeout). | ||
2244 | */ | ||
2245 | done_index = page->index + 1; | 2250 | done_index = page->index + 1; |
2246 | done = 1; | 2251 | done = 1; |
2247 | break; | 2252 | break; |
2248 | } | 2253 | } |
2254 | if (!ret) | ||
2255 | ret = error; | ||
2249 | } | 2256 | } |
2250 | 2257 | ||
2251 | /* | 2258 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e95b5b7c9c3d..cde5dac6229a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/highmem.h> | ||
19 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
20 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
21 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
@@ -96,8 +97,12 @@ int _node_numa_mem_[MAX_NUMNODES]; | |||
96 | #endif | 97 | #endif |
97 | 98 | ||
98 | /* work_structs for global per-cpu drains */ | 99 | /* work_structs for global per-cpu drains */ |
100 | struct pcpu_drain { | ||
101 | struct zone *zone; | ||
102 | struct work_struct work; | ||
103 | }; | ||
99 | DEFINE_MUTEX(pcpu_drain_mutex); | 104 | DEFINE_MUTEX(pcpu_drain_mutex); |
100 | DEFINE_PER_CPU(struct work_struct, pcpu_drain); | 105 | DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); |
101 | 106 | ||
102 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY | 107 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY |
103 | volatile unsigned long latent_entropy __latent_entropy; | 108 | volatile unsigned long latent_entropy __latent_entropy; |
@@ -121,10 +126,8 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { | |||
121 | }; | 126 | }; |
122 | EXPORT_SYMBOL(node_states); | 127 | EXPORT_SYMBOL(node_states); |
123 | 128 | ||
124 | /* Protect totalram_pages and zone->managed_pages */ | 129 | atomic_long_t _totalram_pages __read_mostly; |
125 | static DEFINE_SPINLOCK(managed_page_count_lock); | 130 | EXPORT_SYMBOL(_totalram_pages); |
126 | |||
127 | unsigned long totalram_pages __read_mostly; | ||
128 | unsigned long totalreserve_pages __read_mostly; | 131 | unsigned long totalreserve_pages __read_mostly; |
129 | unsigned long totalcma_pages __read_mostly; | 132 | unsigned long totalcma_pages __read_mostly; |
130 | 133 | ||
@@ -237,7 +240,7 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
237 | #endif | 240 | #endif |
238 | }; | 241 | }; |
239 | 242 | ||
240 | char * const migratetype_names[MIGRATE_TYPES] = { | 243 | const char * const migratetype_names[MIGRATE_TYPES] = { |
241 | "Unmovable", | 244 | "Unmovable", |
242 | "Movable", | 245 | "Movable", |
243 | "Reclaimable", | 246 | "Reclaimable", |
@@ -263,20 +266,21 @@ compound_page_dtor * const compound_page_dtors[] = { | |||
263 | 266 | ||
264 | int min_free_kbytes = 1024; | 267 | int min_free_kbytes = 1024; |
265 | int user_min_free_kbytes = -1; | 268 | int user_min_free_kbytes = -1; |
269 | int watermark_boost_factor __read_mostly = 15000; | ||
266 | int watermark_scale_factor = 10; | 270 | int watermark_scale_factor = 10; |
267 | 271 | ||
268 | static unsigned long nr_kernel_pages __meminitdata; | 272 | static unsigned long nr_kernel_pages __initdata; |
269 | static unsigned long nr_all_pages __meminitdata; | 273 | static unsigned long nr_all_pages __initdata; |
270 | static unsigned long dma_reserve __meminitdata; | 274 | static unsigned long dma_reserve __initdata; |
271 | 275 | ||
272 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 276 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
273 | static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __meminitdata; | 277 | static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; |
274 | static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __meminitdata; | 278 | static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; |
275 | static unsigned long required_kernelcore __initdata; | 279 | static unsigned long required_kernelcore __initdata; |
276 | static unsigned long required_kernelcore_percent __initdata; | 280 | static unsigned long required_kernelcore_percent __initdata; |
277 | static unsigned long required_movablecore __initdata; | 281 | static unsigned long required_movablecore __initdata; |
278 | static unsigned long required_movablecore_percent __initdata; | 282 | static unsigned long required_movablecore_percent __initdata; |
279 | static unsigned long zone_movable_pfn[MAX_NUMNODES] __meminitdata; | 283 | static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; |
280 | static bool mirrored_kernelcore __meminitdata; | 284 | static bool mirrored_kernelcore __meminitdata; |
281 | 285 | ||
282 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ | 286 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
@@ -294,6 +298,32 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
294 | int page_group_by_mobility_disabled __read_mostly; | 298 | int page_group_by_mobility_disabled __read_mostly; |
295 | 299 | ||
296 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 300 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
301 | /* | ||
302 | * During boot we initialize deferred pages on-demand, as needed, but once | ||
303 | * page_alloc_init_late() has finished, the deferred pages are all initialized, | ||
304 | * and we can permanently disable that path. | ||
305 | */ | ||
306 | static DEFINE_STATIC_KEY_TRUE(deferred_pages); | ||
307 | |||
308 | /* | ||
309 | * Calling kasan_free_pages() only after deferred memory initialization | ||
310 | * has completed. Poisoning pages during deferred memory init will greatly | ||
311 | * lengthen the process and cause problem in large memory systems as the | ||
312 | * deferred pages initialization is done with interrupt disabled. | ||
313 | * | ||
314 | * Assuming that there will be no reference to those newly initialized | ||
315 | * pages before they are ever allocated, this should have no effect on | ||
316 | * KASAN memory tracking as the poison will be properly inserted at page | ||
317 | * allocation time. The only corner case is when pages are allocated by | ||
318 | * on-demand allocation and then freed again before the deferred pages | ||
319 | * initialization is done, but this is not likely to happen. | ||
320 | */ | ||
321 | static inline void kasan_free_nondeferred_pages(struct page *page, int order) | ||
322 | { | ||
323 | if (!static_branch_unlikely(&deferred_pages)) | ||
324 | kasan_free_pages(page, order); | ||
325 | } | ||
326 | |||
297 | /* Returns true if the struct page for the pfn is uninitialised */ | 327 | /* Returns true if the struct page for the pfn is uninitialised */ |
298 | static inline bool __meminit early_page_uninitialised(unsigned long pfn) | 328 | static inline bool __meminit early_page_uninitialised(unsigned long pfn) |
299 | { | 329 | { |
@@ -326,8 +356,13 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) | |||
326 | /* Always populate low zones for address-constrained allocations */ | 356 | /* Always populate low zones for address-constrained allocations */ |
327 | if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) | 357 | if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) |
328 | return false; | 358 | return false; |
359 | |||
360 | /* | ||
361 | * We start only with one section of pages, more pages are added as | ||
362 | * needed until the rest of deferred pages are initialized. | ||
363 | */ | ||
329 | nr_initialised++; | 364 | nr_initialised++; |
330 | if ((nr_initialised > NODE_DATA(nid)->static_init_pgcnt) && | 365 | if ((nr_initialised > PAGES_PER_SECTION) && |
331 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { | 366 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { |
332 | NODE_DATA(nid)->first_deferred_pfn = pfn; | 367 | NODE_DATA(nid)->first_deferred_pfn = pfn; |
333 | return true; | 368 | return true; |
@@ -335,6 +370,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) | |||
335 | return false; | 370 | return false; |
336 | } | 371 | } |
337 | #else | 372 | #else |
373 | #define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o) | ||
374 | |||
338 | static inline bool early_page_uninitialised(unsigned long pfn) | 375 | static inline bool early_page_uninitialised(unsigned long pfn) |
339 | { | 376 | { |
340 | return false; | 377 | return false; |
@@ -426,6 +463,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, | |||
426 | unsigned long old_word, word; | 463 | unsigned long old_word, word; |
427 | 464 | ||
428 | BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); | 465 | BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); |
466 | BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); | ||
429 | 467 | ||
430 | bitmap = get_pageblock_bitmap(page, pfn); | 468 | bitmap = get_pageblock_bitmap(page, pfn); |
431 | bitidx = pfn_to_bitidx(page, pfn); | 469 | bitidx = pfn_to_bitidx(page, pfn); |
@@ -1037,7 +1075,7 @@ static __always_inline bool free_pages_prepare(struct page *page, | |||
1037 | arch_free_page(page, order); | 1075 | arch_free_page(page, order); |
1038 | kernel_poison_pages(page, 1 << order, 0); | 1076 | kernel_poison_pages(page, 1 << order, 0); |
1039 | kernel_map_pages(page, 1 << order, 0); | 1077 | kernel_map_pages(page, 1 << order, 0); |
1040 | kasan_free_pages(page, order); | 1078 | kasan_free_nondeferred_pages(page, order); |
1041 | 1079 | ||
1042 | return true; | 1080 | return true; |
1043 | } | 1081 | } |
@@ -1183,6 +1221,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, | |||
1183 | init_page_count(page); | 1221 | init_page_count(page); |
1184 | page_mapcount_reset(page); | 1222 | page_mapcount_reset(page); |
1185 | page_cpupid_reset_last(page); | 1223 | page_cpupid_reset_last(page); |
1224 | page_kasan_tag_reset(page); | ||
1186 | 1225 | ||
1187 | INIT_LIST_HEAD(&page->lru); | 1226 | INIT_LIST_HEAD(&page->lru); |
1188 | #ifdef WANT_PAGE_VIRTUAL | 1227 | #ifdef WANT_PAGE_VIRTUAL |
@@ -1279,7 +1318,7 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order) | |||
1279 | __ClearPageReserved(p); | 1318 | __ClearPageReserved(p); |
1280 | set_page_count(p, 0); | 1319 | set_page_count(p, 0); |
1281 | 1320 | ||
1282 | page_zone(page)->managed_pages += nr_pages; | 1321 | atomic_long_add(nr_pages, &page_zone(page)->managed_pages); |
1283 | set_page_refcounted(page); | 1322 | set_page_refcounted(page); |
1284 | __free_pages(page, order); | 1323 | __free_pages(page, order); |
1285 | } | 1324 | } |
@@ -1606,13 +1645,6 @@ static int __init deferred_init_memmap(void *data) | |||
1606 | } | 1645 | } |
1607 | 1646 | ||
1608 | /* | 1647 | /* |
1609 | * During boot we initialize deferred pages on-demand, as needed, but once | ||
1610 | * page_alloc_init_late() has finished, the deferred pages are all initialized, | ||
1611 | * and we can permanently disable that path. | ||
1612 | */ | ||
1613 | static DEFINE_STATIC_KEY_TRUE(deferred_pages); | ||
1614 | |||
1615 | /* | ||
1616 | * If this zone has deferred pages, try to grow it by initializing enough | 1648 | * If this zone has deferred pages, try to grow it by initializing enough |
1617 | * deferred pages to satisfy the allocation specified by order, rounded up to | 1649 | * deferred pages to satisfy the allocation specified by order, rounded up to |
1618 | * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments | 1650 | * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments |
@@ -1981,8 +2013,8 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | |||
1981 | */ | 2013 | */ |
1982 | static int fallbacks[MIGRATE_TYPES][4] = { | 2014 | static int fallbacks[MIGRATE_TYPES][4] = { |
1983 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, | 2015 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, |
1984 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, | ||
1985 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, | 2016 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, |
2017 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, | ||
1986 | #ifdef CONFIG_CMA | 2018 | #ifdef CONFIG_CMA |
1987 | [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ | 2019 | [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ |
1988 | #endif | 2020 | #endif |
@@ -2129,6 +2161,21 @@ static bool can_steal_fallback(unsigned int order, int start_mt) | |||
2129 | return false; | 2161 | return false; |
2130 | } | 2162 | } |
2131 | 2163 | ||
2164 | static inline void boost_watermark(struct zone *zone) | ||
2165 | { | ||
2166 | unsigned long max_boost; | ||
2167 | |||
2168 | if (!watermark_boost_factor) | ||
2169 | return; | ||
2170 | |||
2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], | ||
2172 | watermark_boost_factor, 10000); | ||
2173 | max_boost = max(pageblock_nr_pages, max_boost); | ||
2174 | |||
2175 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, | ||
2176 | max_boost); | ||
2177 | } | ||
2178 | |||
2132 | /* | 2179 | /* |
2133 | * This function implements actual steal behaviour. If order is large enough, | 2180 | * This function implements actual steal behaviour. If order is large enough, |
2134 | * we can steal whole pageblock. If not, we first move freepages in this | 2181 | * we can steal whole pageblock. If not, we first move freepages in this |
@@ -2138,7 +2185,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt) | |||
2138 | * itself, so pages freed in the future will be put on the correct free list. | 2185 | * itself, so pages freed in the future will be put on the correct free list. |
2139 | */ | 2186 | */ |
2140 | static void steal_suitable_fallback(struct zone *zone, struct page *page, | 2187 | static void steal_suitable_fallback(struct zone *zone, struct page *page, |
2141 | int start_type, bool whole_block) | 2188 | unsigned int alloc_flags, int start_type, bool whole_block) |
2142 | { | 2189 | { |
2143 | unsigned int current_order = page_order(page); | 2190 | unsigned int current_order = page_order(page); |
2144 | struct free_area *area; | 2191 | struct free_area *area; |
@@ -2160,6 +2207,15 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, | |||
2160 | goto single_page; | 2207 | goto single_page; |
2161 | } | 2208 | } |
2162 | 2209 | ||
2210 | /* | ||
2211 | * Boost watermarks to increase reclaim pressure to reduce the | ||
2212 | * likelihood of future fallbacks. Wake kswapd now as the node | ||
2213 | * may be balanced overall and kswapd will not wake naturally. | ||
2214 | */ | ||
2215 | boost_watermark(zone); | ||
2216 | if (alloc_flags & ALLOC_KSWAPD) | ||
2217 | wakeup_kswapd(zone, 0, 0, zone_idx(zone)); | ||
2218 | |||
2163 | /* We are not allowed to try stealing from the whole block */ | 2219 | /* We are not allowed to try stealing from the whole block */ |
2164 | if (!whole_block) | 2220 | if (!whole_block) |
2165 | goto single_page; | 2221 | goto single_page; |
@@ -2258,7 +2314,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, | |||
2258 | * Limit the number reserved to 1 pageblock or roughly 1% of a zone. | 2314 | * Limit the number reserved to 1 pageblock or roughly 1% of a zone. |
2259 | * Check is race-prone but harmless. | 2315 | * Check is race-prone but harmless. |
2260 | */ | 2316 | */ |
2261 | max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; | 2317 | max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; |
2262 | if (zone->nr_reserved_highatomic >= max_managed) | 2318 | if (zone->nr_reserved_highatomic >= max_managed) |
2263 | return; | 2319 | return; |
2264 | 2320 | ||
@@ -2375,20 +2431,30 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, | |||
2375 | * condition simpler. | 2431 | * condition simpler. |
2376 | */ | 2432 | */ |
2377 | static __always_inline bool | 2433 | static __always_inline bool |
2378 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | 2434 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, |
2435 | unsigned int alloc_flags) | ||
2379 | { | 2436 | { |
2380 | struct free_area *area; | 2437 | struct free_area *area; |
2381 | int current_order; | 2438 | int current_order; |
2439 | int min_order = order; | ||
2382 | struct page *page; | 2440 | struct page *page; |
2383 | int fallback_mt; | 2441 | int fallback_mt; |
2384 | bool can_steal; | 2442 | bool can_steal; |
2385 | 2443 | ||
2386 | /* | 2444 | /* |
2445 | * Do not steal pages from freelists belonging to other pageblocks | ||
2446 | * i.e. orders < pageblock_order. If there are no local zones free, | ||
2447 | * the zonelists will be reiterated without ALLOC_NOFRAGMENT. | ||
2448 | */ | ||
2449 | if (alloc_flags & ALLOC_NOFRAGMENT) | ||
2450 | min_order = pageblock_order; | ||
2451 | |||
2452 | /* | ||
2387 | * Find the largest available free page in the other list. This roughly | 2453 | * Find the largest available free page in the other list. This roughly |
2388 | * approximates finding the pageblock with the most free pages, which | 2454 | * approximates finding the pageblock with the most free pages, which |
2389 | * would be too costly to do exactly. | 2455 | * would be too costly to do exactly. |
2390 | */ | 2456 | */ |
2391 | for (current_order = MAX_ORDER - 1; current_order >= order; | 2457 | for (current_order = MAX_ORDER - 1; current_order >= min_order; |
2392 | --current_order) { | 2458 | --current_order) { |
2393 | area = &(zone->free_area[current_order]); | 2459 | area = &(zone->free_area[current_order]); |
2394 | fallback_mt = find_suitable_fallback(area, current_order, | 2460 | fallback_mt = find_suitable_fallback(area, current_order, |
@@ -2433,7 +2499,8 @@ do_steal: | |||
2433 | page = list_first_entry(&area->free_list[fallback_mt], | 2499 | page = list_first_entry(&area->free_list[fallback_mt], |
2434 | struct page, lru); | 2500 | struct page, lru); |
2435 | 2501 | ||
2436 | steal_suitable_fallback(zone, page, start_migratetype, can_steal); | 2502 | steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, |
2503 | can_steal); | ||
2437 | 2504 | ||
2438 | trace_mm_page_alloc_extfrag(page, order, current_order, | 2505 | trace_mm_page_alloc_extfrag(page, order, current_order, |
2439 | start_migratetype, fallback_mt); | 2506 | start_migratetype, fallback_mt); |
@@ -2447,7 +2514,8 @@ do_steal: | |||
2447 | * Call me with the zone->lock already held. | 2514 | * Call me with the zone->lock already held. |
2448 | */ | 2515 | */ |
2449 | static __always_inline struct page * | 2516 | static __always_inline struct page * |
2450 | __rmqueue(struct zone *zone, unsigned int order, int migratetype) | 2517 | __rmqueue(struct zone *zone, unsigned int order, int migratetype, |
2518 | unsigned int alloc_flags) | ||
2451 | { | 2519 | { |
2452 | struct page *page; | 2520 | struct page *page; |
2453 | 2521 | ||
@@ -2457,7 +2525,8 @@ retry: | |||
2457 | if (migratetype == MIGRATE_MOVABLE) | 2525 | if (migratetype == MIGRATE_MOVABLE) |
2458 | page = __rmqueue_cma_fallback(zone, order); | 2526 | page = __rmqueue_cma_fallback(zone, order); |
2459 | 2527 | ||
2460 | if (!page && __rmqueue_fallback(zone, order, migratetype)) | 2528 | if (!page && __rmqueue_fallback(zone, order, migratetype, |
2529 | alloc_flags)) | ||
2461 | goto retry; | 2530 | goto retry; |
2462 | } | 2531 | } |
2463 | 2532 | ||
@@ -2472,13 +2541,14 @@ retry: | |||
2472 | */ | 2541 | */ |
2473 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 2542 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
2474 | unsigned long count, struct list_head *list, | 2543 | unsigned long count, struct list_head *list, |
2475 | int migratetype) | 2544 | int migratetype, unsigned int alloc_flags) |
2476 | { | 2545 | { |
2477 | int i, alloced = 0; | 2546 | int i, alloced = 0; |
2478 | 2547 | ||
2479 | spin_lock(&zone->lock); | 2548 | spin_lock(&zone->lock); |
2480 | for (i = 0; i < count; ++i) { | 2549 | for (i = 0; i < count; ++i) { |
2481 | struct page *page = __rmqueue(zone, order, migratetype); | 2550 | struct page *page = __rmqueue(zone, order, migratetype, |
2551 | alloc_flags); | ||
2482 | if (unlikely(page == NULL)) | 2552 | if (unlikely(page == NULL)) |
2483 | break; | 2553 | break; |
2484 | 2554 | ||
@@ -2592,6 +2662,10 @@ void drain_local_pages(struct zone *zone) | |||
2592 | 2662 | ||
2593 | static void drain_local_pages_wq(struct work_struct *work) | 2663 | static void drain_local_pages_wq(struct work_struct *work) |
2594 | { | 2664 | { |
2665 | struct pcpu_drain *drain; | ||
2666 | |||
2667 | drain = container_of(work, struct pcpu_drain, work); | ||
2668 | |||
2595 | /* | 2669 | /* |
2596 | * drain_all_pages doesn't use proper cpu hotplug protection so | 2670 | * drain_all_pages doesn't use proper cpu hotplug protection so |
2597 | * we can race with cpu offline when the WQ can move this from | 2671 | * we can race with cpu offline when the WQ can move this from |
@@ -2600,7 +2674,7 @@ static void drain_local_pages_wq(struct work_struct *work) | |||
2600 | * a different one. | 2674 | * a different one. |
2601 | */ | 2675 | */ |
2602 | preempt_disable(); | 2676 | preempt_disable(); |
2603 | drain_local_pages(NULL); | 2677 | drain_local_pages(drain->zone); |
2604 | preempt_enable(); | 2678 | preempt_enable(); |
2605 | } | 2679 | } |
2606 | 2680 | ||
@@ -2671,12 +2745,14 @@ void drain_all_pages(struct zone *zone) | |||
2671 | } | 2745 | } |
2672 | 2746 | ||
2673 | for_each_cpu(cpu, &cpus_with_pcps) { | 2747 | for_each_cpu(cpu, &cpus_with_pcps) { |
2674 | struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); | 2748 | struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); |
2675 | INIT_WORK(work, drain_local_pages_wq); | 2749 | |
2676 | queue_work_on(cpu, mm_percpu_wq, work); | 2750 | drain->zone = zone; |
2751 | INIT_WORK(&drain->work, drain_local_pages_wq); | ||
2752 | queue_work_on(cpu, mm_percpu_wq, &drain->work); | ||
2677 | } | 2753 | } |
2678 | for_each_cpu(cpu, &cpus_with_pcps) | 2754 | for_each_cpu(cpu, &cpus_with_pcps) |
2679 | flush_work(per_cpu_ptr(&pcpu_drain, cpu)); | 2755 | flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); |
2680 | 2756 | ||
2681 | mutex_unlock(&pcpu_drain_mutex); | 2757 | mutex_unlock(&pcpu_drain_mutex); |
2682 | } | 2758 | } |
@@ -2934,6 +3010,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) | |||
2934 | 3010 | ||
2935 | /* Remove page from the per-cpu list, caller must protect the list */ | 3011 | /* Remove page from the per-cpu list, caller must protect the list */ |
2936 | static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, | 3012 | static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, |
3013 | unsigned int alloc_flags, | ||
2937 | struct per_cpu_pages *pcp, | 3014 | struct per_cpu_pages *pcp, |
2938 | struct list_head *list) | 3015 | struct list_head *list) |
2939 | { | 3016 | { |
@@ -2943,7 +3020,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, | |||
2943 | if (list_empty(list)) { | 3020 | if (list_empty(list)) { |
2944 | pcp->count += rmqueue_bulk(zone, 0, | 3021 | pcp->count += rmqueue_bulk(zone, 0, |
2945 | pcp->batch, list, | 3022 | pcp->batch, list, |
2946 | migratetype); | 3023 | migratetype, alloc_flags); |
2947 | if (unlikely(list_empty(list))) | 3024 | if (unlikely(list_empty(list))) |
2948 | return NULL; | 3025 | return NULL; |
2949 | } | 3026 | } |
@@ -2959,7 +3036,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, | |||
2959 | /* Lock and remove page from the per-cpu list */ | 3036 | /* Lock and remove page from the per-cpu list */ |
2960 | static struct page *rmqueue_pcplist(struct zone *preferred_zone, | 3037 | static struct page *rmqueue_pcplist(struct zone *preferred_zone, |
2961 | struct zone *zone, unsigned int order, | 3038 | struct zone *zone, unsigned int order, |
2962 | gfp_t gfp_flags, int migratetype) | 3039 | gfp_t gfp_flags, int migratetype, |
3040 | unsigned int alloc_flags) | ||
2963 | { | 3041 | { |
2964 | struct per_cpu_pages *pcp; | 3042 | struct per_cpu_pages *pcp; |
2965 | struct list_head *list; | 3043 | struct list_head *list; |
@@ -2969,7 +3047,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, | |||
2969 | local_irq_save(flags); | 3047 | local_irq_save(flags); |
2970 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | 3048 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
2971 | list = &pcp->lists[migratetype]; | 3049 | list = &pcp->lists[migratetype]; |
2972 | page = __rmqueue_pcplist(zone, migratetype, pcp, list); | 3050 | page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); |
2973 | if (page) { | 3051 | if (page) { |
2974 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); | 3052 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); |
2975 | zone_statistics(preferred_zone, zone); | 3053 | zone_statistics(preferred_zone, zone); |
@@ -2992,7 +3070,7 @@ struct page *rmqueue(struct zone *preferred_zone, | |||
2992 | 3070 | ||
2993 | if (likely(order == 0)) { | 3071 | if (likely(order == 0)) { |
2994 | page = rmqueue_pcplist(preferred_zone, zone, order, | 3072 | page = rmqueue_pcplist(preferred_zone, zone, order, |
2995 | gfp_flags, migratetype); | 3073 | gfp_flags, migratetype, alloc_flags); |
2996 | goto out; | 3074 | goto out; |
2997 | } | 3075 | } |
2998 | 3076 | ||
@@ -3011,7 +3089,7 @@ struct page *rmqueue(struct zone *preferred_zone, | |||
3011 | trace_mm_page_alloc_zone_locked(page, order, migratetype); | 3089 | trace_mm_page_alloc_zone_locked(page, order, migratetype); |
3012 | } | 3090 | } |
3013 | if (!page) | 3091 | if (!page) |
3014 | page = __rmqueue(zone, order, migratetype); | 3092 | page = __rmqueue(zone, order, migratetype, alloc_flags); |
3015 | } while (page && check_new_pages(page, order)); | 3093 | } while (page && check_new_pages(page, order)); |
3016 | spin_unlock(&zone->lock); | 3094 | spin_unlock(&zone->lock); |
3017 | if (!page) | 3095 | if (!page) |
@@ -3053,7 +3131,7 @@ static int __init setup_fail_page_alloc(char *str) | |||
3053 | } | 3131 | } |
3054 | __setup("fail_page_alloc=", setup_fail_page_alloc); | 3132 | __setup("fail_page_alloc=", setup_fail_page_alloc); |
3055 | 3133 | ||
3056 | static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) | 3134 | static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) |
3057 | { | 3135 | { |
3058 | if (order < fail_page_alloc.min_order) | 3136 | if (order < fail_page_alloc.min_order) |
3059 | return false; | 3137 | return false; |
@@ -3103,13 +3181,19 @@ late_initcall(fail_page_alloc_debugfs); | |||
3103 | 3181 | ||
3104 | #else /* CONFIG_FAIL_PAGE_ALLOC */ | 3182 | #else /* CONFIG_FAIL_PAGE_ALLOC */ |
3105 | 3183 | ||
3106 | static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) | 3184 | static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) |
3107 | { | 3185 | { |
3108 | return false; | 3186 | return false; |
3109 | } | 3187 | } |
3110 | 3188 | ||
3111 | #endif /* CONFIG_FAIL_PAGE_ALLOC */ | 3189 | #endif /* CONFIG_FAIL_PAGE_ALLOC */ |
3112 | 3190 | ||
3191 | static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) | ||
3192 | { | ||
3193 | return __should_fail_alloc_page(gfp_mask, order); | ||
3194 | } | ||
3195 | ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); | ||
3196 | |||
3113 | /* | 3197 | /* |
3114 | * Return true if free base pages are above 'mark'. For high-order checks it | 3198 | * Return true if free base pages are above 'mark'. For high-order checks it |
3115 | * will return true of the order-0 watermark is reached and there is at least | 3199 | * will return true of the order-0 watermark is reached and there is at least |
@@ -3254,6 +3338,40 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | |||
3254 | #endif /* CONFIG_NUMA */ | 3338 | #endif /* CONFIG_NUMA */ |
3255 | 3339 | ||
3256 | /* | 3340 | /* |
3341 | * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid | ||
3342 | * fragmentation is subtle. If the preferred zone was HIGHMEM then | ||
3343 | * premature use of a lower zone may cause lowmem pressure problems that | ||
3344 | * are worse than fragmentation. If the next zone is ZONE_DMA then it is | ||
3345 | * probably too small. It only makes sense to spread allocations to avoid | ||
3346 | * fragmentation between the Normal and DMA32 zones. | ||
3347 | */ | ||
3348 | static inline unsigned int | ||
3349 | alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) | ||
3350 | { | ||
3351 | unsigned int alloc_flags = 0; | ||
3352 | |||
3353 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) | ||
3354 | alloc_flags |= ALLOC_KSWAPD; | ||
3355 | |||
3356 | #ifdef CONFIG_ZONE_DMA32 | ||
3357 | if (zone_idx(zone) != ZONE_NORMAL) | ||
3358 | goto out; | ||
3359 | |||
3360 | /* | ||
3361 | * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and | ||
3362 | * the pointer is within zone->zone_pgdat->node_zones[]. Also assume | ||
3363 | * on UMA that if Normal is populated then so is DMA32. | ||
3364 | */ | ||
3365 | BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); | ||
3366 | if (nr_online_nodes > 1 && !populated_zone(--zone)) | ||
3367 | goto out; | ||
3368 | |||
3369 | out: | ||
3370 | #endif /* CONFIG_ZONE_DMA32 */ | ||
3371 | return alloc_flags; | ||
3372 | } | ||
3373 | |||
3374 | /* | ||
3257 | * get_page_from_freelist goes through the zonelist trying to allocate | 3375 | * get_page_from_freelist goes through the zonelist trying to allocate |
3258 | * a page. | 3376 | * a page. |
3259 | */ | 3377 | */ |
@@ -3261,14 +3379,18 @@ static struct page * | |||
3261 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, | 3379 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, |
3262 | const struct alloc_context *ac) | 3380 | const struct alloc_context *ac) |
3263 | { | 3381 | { |
3264 | struct zoneref *z = ac->preferred_zoneref; | 3382 | struct zoneref *z; |
3265 | struct zone *zone; | 3383 | struct zone *zone; |
3266 | struct pglist_data *last_pgdat_dirty_limit = NULL; | 3384 | struct pglist_data *last_pgdat_dirty_limit = NULL; |
3385 | bool no_fallback; | ||
3267 | 3386 | ||
3387 | retry: | ||
3268 | /* | 3388 | /* |
3269 | * Scan zonelist, looking for a zone with enough free. | 3389 | * Scan zonelist, looking for a zone with enough free. |
3270 | * See also __cpuset_node_allowed() comment in kernel/cpuset.c. | 3390 | * See also __cpuset_node_allowed() comment in kernel/cpuset.c. |
3271 | */ | 3391 | */ |
3392 | no_fallback = alloc_flags & ALLOC_NOFRAGMENT; | ||
3393 | z = ac->preferred_zoneref; | ||
3272 | for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, | 3394 | for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
3273 | ac->nodemask) { | 3395 | ac->nodemask) { |
3274 | struct page *page; | 3396 | struct page *page; |
@@ -3307,7 +3429,23 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, | |||
3307 | } | 3429 | } |
3308 | } | 3430 | } |
3309 | 3431 | ||
3310 | mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; | 3432 | if (no_fallback && nr_online_nodes > 1 && |
3433 | zone != ac->preferred_zoneref->zone) { | ||
3434 | int local_nid; | ||
3435 | |||
3436 | /* | ||
3437 | * If moving to a remote node, retry but allow | ||
3438 | * fragmenting fallbacks. Locality is more important | ||
3439 | * than fragmentation avoidance. | ||
3440 | */ | ||
3441 | local_nid = zone_to_nid(ac->preferred_zoneref->zone); | ||
3442 | if (zone_to_nid(zone) != local_nid) { | ||
3443 | alloc_flags &= ~ALLOC_NOFRAGMENT; | ||
3444 | goto retry; | ||
3445 | } | ||
3446 | } | ||
3447 | |||
3448 | mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); | ||
3311 | if (!zone_watermark_fast(zone, order, mark, | 3449 | if (!zone_watermark_fast(zone, order, mark, |
3312 | ac_classzone_idx(ac), alloc_flags)) { | 3450 | ac_classzone_idx(ac), alloc_flags)) { |
3313 | int ret; | 3451 | int ret; |
@@ -3374,6 +3512,15 @@ try_this_zone: | |||
3374 | } | 3512 | } |
3375 | } | 3513 | } |
3376 | 3514 | ||
3515 | /* | ||
3516 | * It's possible on a UMA machine to get through all zones that are | ||
3517 | * fragmented. If avoiding fragmentation, reset and try again. | ||
3518 | */ | ||
3519 | if (no_fallback) { | ||
3520 | alloc_flags &= ~ALLOC_NOFRAGMENT; | ||
3521 | goto retry; | ||
3522 | } | ||
3523 | |||
3377 | return NULL; | 3524 | return NULL; |
3378 | } | 3525 | } |
3379 | 3526 | ||
@@ -3413,13 +3560,13 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) | |||
3413 | va_start(args, fmt); | 3560 | va_start(args, fmt); |
3414 | vaf.fmt = fmt; | 3561 | vaf.fmt = fmt; |
3415 | vaf.va = &args; | 3562 | vaf.va = &args; |
3416 | pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl\n", | 3563 | pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", |
3417 | current->comm, &vaf, gfp_mask, &gfp_mask, | 3564 | current->comm, &vaf, gfp_mask, &gfp_mask, |
3418 | nodemask_pr_args(nodemask)); | 3565 | nodemask_pr_args(nodemask)); |
3419 | va_end(args); | 3566 | va_end(args); |
3420 | 3567 | ||
3421 | cpuset_print_current_mems_allowed(); | 3568 | cpuset_print_current_mems_allowed(); |
3422 | 3569 | pr_cont("\n"); | |
3423 | dump_stack(); | 3570 | dump_stack(); |
3424 | warn_alloc_show_mem(gfp_mask, nodemask); | 3571 | warn_alloc_show_mem(gfp_mask, nodemask); |
3425 | } | 3572 | } |
@@ -3861,6 +4008,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
3861 | } else if (unlikely(rt_task(current)) && !in_interrupt()) | 4008 | } else if (unlikely(rt_task(current)) && !in_interrupt()) |
3862 | alloc_flags |= ALLOC_HARDER; | 4009 | alloc_flags |= ALLOC_HARDER; |
3863 | 4010 | ||
4011 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) | ||
4012 | alloc_flags |= ALLOC_KSWAPD; | ||
4013 | |||
3864 | #ifdef CONFIG_CMA | 4014 | #ifdef CONFIG_CMA |
3865 | if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | 4015 | if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) |
3866 | alloc_flags |= ALLOC_CMA; | 4016 | alloc_flags |= ALLOC_CMA; |
@@ -4092,7 +4242,7 @@ retry_cpuset: | |||
4092 | if (!ac->preferred_zoneref->zone) | 4242 | if (!ac->preferred_zoneref->zone) |
4093 | goto nopage; | 4243 | goto nopage; |
4094 | 4244 | ||
4095 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) | 4245 | if (alloc_flags & ALLOC_KSWAPD) |
4096 | wake_all_kswapds(order, gfp_mask, ac); | 4246 | wake_all_kswapds(order, gfp_mask, ac); |
4097 | 4247 | ||
4098 | /* | 4248 | /* |
@@ -4150,7 +4300,7 @@ retry_cpuset: | |||
4150 | 4300 | ||
4151 | retry: | 4301 | retry: |
4152 | /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ | 4302 | /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ |
4153 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) | 4303 | if (alloc_flags & ALLOC_KSWAPD) |
4154 | wake_all_kswapds(order, gfp_mask, ac); | 4304 | wake_all_kswapds(order, gfp_mask, ac); |
4155 | 4305 | ||
4156 | reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); | 4306 | reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); |
@@ -4369,6 +4519,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, | |||
4369 | 4519 | ||
4370 | finalise_ac(gfp_mask, &ac); | 4520 | finalise_ac(gfp_mask, &ac); |
4371 | 4521 | ||
4522 | /* | ||
4523 | * Forbid the first pass from falling back to types that fragment | ||
4524 | * memory until all local zones are considered. | ||
4525 | */ | ||
4526 | alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); | ||
4527 | |||
4372 | /* First allocation attempt */ | 4528 | /* First allocation attempt */ |
4373 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); | 4529 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); |
4374 | if (likely(page)) | 4530 | if (likely(page)) |
@@ -4427,16 +4583,19 @@ unsigned long get_zeroed_page(gfp_t gfp_mask) | |||
4427 | } | 4583 | } |
4428 | EXPORT_SYMBOL(get_zeroed_page); | 4584 | EXPORT_SYMBOL(get_zeroed_page); |
4429 | 4585 | ||
4430 | void __free_pages(struct page *page, unsigned int order) | 4586 | static inline void free_the_page(struct page *page, unsigned int order) |
4431 | { | 4587 | { |
4432 | if (put_page_testzero(page)) { | 4588 | if (order == 0) /* Via pcp? */ |
4433 | if (order == 0) | 4589 | free_unref_page(page); |
4434 | free_unref_page(page); | 4590 | else |
4435 | else | 4591 | __free_pages_ok(page, order); |
4436 | __free_pages_ok(page, order); | ||
4437 | } | ||
4438 | } | 4592 | } |
4439 | 4593 | ||
4594 | void __free_pages(struct page *page, unsigned int order) | ||
4595 | { | ||
4596 | if (put_page_testzero(page)) | ||
4597 | free_the_page(page, order); | ||
4598 | } | ||
4440 | EXPORT_SYMBOL(__free_pages); | 4599 | EXPORT_SYMBOL(__free_pages); |
4441 | 4600 | ||
4442 | void free_pages(unsigned long addr, unsigned int order) | 4601 | void free_pages(unsigned long addr, unsigned int order) |
@@ -4485,14 +4644,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) | |||
4485 | { | 4644 | { |
4486 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); | 4645 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); |
4487 | 4646 | ||
4488 | if (page_ref_sub_and_test(page, count)) { | 4647 | if (page_ref_sub_and_test(page, count)) |
4489 | unsigned int order = compound_order(page); | 4648 | free_the_page(page, compound_order(page)); |
4490 | |||
4491 | if (order == 0) | ||
4492 | free_unref_page(page); | ||
4493 | else | ||
4494 | __free_pages_ok(page, order); | ||
4495 | } | ||
4496 | } | 4649 | } |
4497 | EXPORT_SYMBOL(__page_frag_cache_drain); | 4650 | EXPORT_SYMBOL(__page_frag_cache_drain); |
4498 | 4651 | ||
@@ -4558,7 +4711,7 @@ void page_frag_free(void *addr) | |||
4558 | struct page *page = virt_to_head_page(addr); | 4711 | struct page *page = virt_to_head_page(addr); |
4559 | 4712 | ||
4560 | if (unlikely(put_page_testzero(page))) | 4713 | if (unlikely(put_page_testzero(page))) |
4561 | __free_pages_ok(page, compound_order(page)); | 4714 | free_the_page(page, compound_order(page)); |
4562 | } | 4715 | } |
4563 | EXPORT_SYMBOL(page_frag_free); | 4716 | EXPORT_SYMBOL(page_frag_free); |
4564 | 4717 | ||
@@ -4660,7 +4813,7 @@ static unsigned long nr_free_zone_pages(int offset) | |||
4660 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); | 4813 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); |
4661 | 4814 | ||
4662 | for_each_zone_zonelist(zone, z, zonelist, offset) { | 4815 | for_each_zone_zonelist(zone, z, zonelist, offset) { |
4663 | unsigned long size = zone->managed_pages; | 4816 | unsigned long size = zone_managed_pages(zone); |
4664 | unsigned long high = high_wmark_pages(zone); | 4817 | unsigned long high = high_wmark_pages(zone); |
4665 | if (size > high) | 4818 | if (size > high) |
4666 | sum += size - high; | 4819 | sum += size - high; |
@@ -4712,7 +4865,7 @@ long si_mem_available(void) | |||
4712 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); | 4865 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
4713 | 4866 | ||
4714 | for_each_zone(zone) | 4867 | for_each_zone(zone) |
4715 | wmark_low += zone->watermark[WMARK_LOW]; | 4868 | wmark_low += low_wmark_pages(zone); |
4716 | 4869 | ||
4717 | /* | 4870 | /* |
4718 | * Estimate the amount of memory available for userspace allocations, | 4871 | * Estimate the amount of memory available for userspace allocations, |
@@ -4746,11 +4899,11 @@ EXPORT_SYMBOL_GPL(si_mem_available); | |||
4746 | 4899 | ||
4747 | void si_meminfo(struct sysinfo *val) | 4900 | void si_meminfo(struct sysinfo *val) |
4748 | { | 4901 | { |
4749 | val->totalram = totalram_pages; | 4902 | val->totalram = totalram_pages(); |
4750 | val->sharedram = global_node_page_state(NR_SHMEM); | 4903 | val->sharedram = global_node_page_state(NR_SHMEM); |
4751 | val->freeram = global_zone_page_state(NR_FREE_PAGES); | 4904 | val->freeram = global_zone_page_state(NR_FREE_PAGES); |
4752 | val->bufferram = nr_blockdev_pages(); | 4905 | val->bufferram = nr_blockdev_pages(); |
4753 | val->totalhigh = totalhigh_pages; | 4906 | val->totalhigh = totalhigh_pages(); |
4754 | val->freehigh = nr_free_highpages(); | 4907 | val->freehigh = nr_free_highpages(); |
4755 | val->mem_unit = PAGE_SIZE; | 4908 | val->mem_unit = PAGE_SIZE; |
4756 | } | 4909 | } |
@@ -4767,7 +4920,7 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
4767 | pg_data_t *pgdat = NODE_DATA(nid); | 4920 | pg_data_t *pgdat = NODE_DATA(nid); |
4768 | 4921 | ||
4769 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) | 4922 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) |
4770 | managed_pages += pgdat->node_zones[zone_type].managed_pages; | 4923 | managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); |
4771 | val->totalram = managed_pages; | 4924 | val->totalram = managed_pages; |
4772 | val->sharedram = node_page_state(pgdat, NR_SHMEM); | 4925 | val->sharedram = node_page_state(pgdat, NR_SHMEM); |
4773 | val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); | 4926 | val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); |
@@ -4776,7 +4929,7 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
4776 | struct zone *zone = &pgdat->node_zones[zone_type]; | 4929 | struct zone *zone = &pgdat->node_zones[zone_type]; |
4777 | 4930 | ||
4778 | if (is_highmem(zone)) { | 4931 | if (is_highmem(zone)) { |
4779 | managed_highpages += zone->managed_pages; | 4932 | managed_highpages += zone_managed_pages(zone); |
4780 | free_highpages += zone_page_state(zone, NR_FREE_PAGES); | 4933 | free_highpages += zone_page_state(zone, NR_FREE_PAGES); |
4781 | } | 4934 | } |
4782 | } | 4935 | } |
@@ -4983,7 +5136,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) | |||
4983 | K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), | 5136 | K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), |
4984 | K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), | 5137 | K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), |
4985 | K(zone->present_pages), | 5138 | K(zone->present_pages), |
4986 | K(zone->managed_pages), | 5139 | K(zone_managed_pages(zone)), |
4987 | K(zone_page_state(zone, NR_MLOCK)), | 5140 | K(zone_page_state(zone, NR_MLOCK)), |
4988 | zone_page_state(zone, NR_KERNEL_STACK_KB), | 5141 | zone_page_state(zone, NR_KERNEL_STACK_KB), |
4989 | K(zone_page_state(zone, NR_PAGETABLE)), | 5142 | K(zone_page_state(zone, NR_PAGETABLE)), |
@@ -5655,7 +5808,7 @@ static int zone_batchsize(struct zone *zone) | |||
5655 | * The per-cpu-pages pools are set to around 1000th of the | 5808 | * The per-cpu-pages pools are set to around 1000th of the |
5656 | * size of the zone. | 5809 | * size of the zone. |
5657 | */ | 5810 | */ |
5658 | batch = zone->managed_pages / 1024; | 5811 | batch = zone_managed_pages(zone) / 1024; |
5659 | /* But no more than a meg. */ | 5812 | /* But no more than a meg. */ |
5660 | if (batch * PAGE_SIZE > 1024 * 1024) | 5813 | if (batch * PAGE_SIZE > 1024 * 1024) |
5661 | batch = (1024 * 1024) / PAGE_SIZE; | 5814 | batch = (1024 * 1024) / PAGE_SIZE; |
@@ -5736,7 +5889,6 @@ static void pageset_init(struct per_cpu_pageset *p) | |||
5736 | memset(p, 0, sizeof(*p)); | 5889 | memset(p, 0, sizeof(*p)); |
5737 | 5890 | ||
5738 | pcp = &p->pcp; | 5891 | pcp = &p->pcp; |
5739 | pcp->count = 0; | ||
5740 | for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) | 5892 | for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) |
5741 | INIT_LIST_HEAD(&pcp->lists[migratetype]); | 5893 | INIT_LIST_HEAD(&pcp->lists[migratetype]); |
5742 | } | 5894 | } |
@@ -5766,7 +5918,7 @@ static void pageset_set_high_and_batch(struct zone *zone, | |||
5766 | { | 5918 | { |
5767 | if (percpu_pagelist_fraction) | 5919 | if (percpu_pagelist_fraction) |
5768 | pageset_set_high(pcp, | 5920 | pageset_set_high(pcp, |
5769 | (zone->managed_pages / | 5921 | (zone_managed_pages(zone) / |
5770 | percpu_pagelist_fraction)); | 5922 | percpu_pagelist_fraction)); |
5771 | else | 5923 | else |
5772 | pageset_set_batch(pcp, zone_batchsize(zone)); | 5924 | pageset_set_batch(pcp, zone_batchsize(zone)); |
@@ -5920,7 +6072,7 @@ void __init sparse_memory_present_with_active_regions(int nid) | |||
5920 | * with no available memory, a warning is printed and the start and end | 6072 | * with no available memory, a warning is printed and the start and end |
5921 | * PFNs will be 0. | 6073 | * PFNs will be 0. |
5922 | */ | 6074 | */ |
5923 | void __meminit get_pfn_range_for_nid(unsigned int nid, | 6075 | void __init get_pfn_range_for_nid(unsigned int nid, |
5924 | unsigned long *start_pfn, unsigned long *end_pfn) | 6076 | unsigned long *start_pfn, unsigned long *end_pfn) |
5925 | { | 6077 | { |
5926 | unsigned long this_start_pfn, this_end_pfn; | 6078 | unsigned long this_start_pfn, this_end_pfn; |
@@ -5969,7 +6121,7 @@ static void __init find_usable_zone_for_movable(void) | |||
5969 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | 6121 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
5970 | * zones within a node are in order of monotonic increases memory addresses | 6122 | * zones within a node are in order of monotonic increases memory addresses |
5971 | */ | 6123 | */ |
5972 | static void __meminit adjust_zone_range_for_zone_movable(int nid, | 6124 | static void __init adjust_zone_range_for_zone_movable(int nid, |
5973 | unsigned long zone_type, | 6125 | unsigned long zone_type, |
5974 | unsigned long node_start_pfn, | 6126 | unsigned long node_start_pfn, |
5975 | unsigned long node_end_pfn, | 6127 | unsigned long node_end_pfn, |
@@ -6000,7 +6152,7 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid, | |||
6000 | * Return the number of pages a zone spans in a node, including holes | 6152 | * Return the number of pages a zone spans in a node, including holes |
6001 | * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() | 6153 | * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() |
6002 | */ | 6154 | */ |
6003 | static unsigned long __meminit zone_spanned_pages_in_node(int nid, | 6155 | static unsigned long __init zone_spanned_pages_in_node(int nid, |
6004 | unsigned long zone_type, | 6156 | unsigned long zone_type, |
6005 | unsigned long node_start_pfn, | 6157 | unsigned long node_start_pfn, |
6006 | unsigned long node_end_pfn, | 6158 | unsigned long node_end_pfn, |
@@ -6035,7 +6187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
6035 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 6187 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
6036 | * then all holes in the requested range will be accounted for. | 6188 | * then all holes in the requested range will be accounted for. |
6037 | */ | 6189 | */ |
6038 | unsigned long __meminit __absent_pages_in_range(int nid, | 6190 | unsigned long __init __absent_pages_in_range(int nid, |
6039 | unsigned long range_start_pfn, | 6191 | unsigned long range_start_pfn, |
6040 | unsigned long range_end_pfn) | 6192 | unsigned long range_end_pfn) |
6041 | { | 6193 | { |
@@ -6065,7 +6217,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, | |||
6065 | } | 6217 | } |
6066 | 6218 | ||
6067 | /* Return the number of page frames in holes in a zone on a node */ | 6219 | /* Return the number of page frames in holes in a zone on a node */ |
6068 | static unsigned long __meminit zone_absent_pages_in_node(int nid, | 6220 | static unsigned long __init zone_absent_pages_in_node(int nid, |
6069 | unsigned long zone_type, | 6221 | unsigned long zone_type, |
6070 | unsigned long node_start_pfn, | 6222 | unsigned long node_start_pfn, |
6071 | unsigned long node_end_pfn, | 6223 | unsigned long node_end_pfn, |
@@ -6117,7 +6269,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, | |||
6117 | } | 6269 | } |
6118 | 6270 | ||
6119 | #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 6271 | #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
6120 | static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, | 6272 | static inline unsigned long __init zone_spanned_pages_in_node(int nid, |
6121 | unsigned long zone_type, | 6273 | unsigned long zone_type, |
6122 | unsigned long node_start_pfn, | 6274 | unsigned long node_start_pfn, |
6123 | unsigned long node_end_pfn, | 6275 | unsigned long node_end_pfn, |
@@ -6136,7 +6288,7 @@ static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
6136 | return zones_size[zone_type]; | 6288 | return zones_size[zone_type]; |
6137 | } | 6289 | } |
6138 | 6290 | ||
6139 | static inline unsigned long __meminit zone_absent_pages_in_node(int nid, | 6291 | static inline unsigned long __init zone_absent_pages_in_node(int nid, |
6140 | unsigned long zone_type, | 6292 | unsigned long zone_type, |
6141 | unsigned long node_start_pfn, | 6293 | unsigned long node_start_pfn, |
6142 | unsigned long node_end_pfn, | 6294 | unsigned long node_end_pfn, |
@@ -6150,7 +6302,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid, | |||
6150 | 6302 | ||
6151 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 6303 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
6152 | 6304 | ||
6153 | static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, | 6305 | static void __init calculate_node_totalpages(struct pglist_data *pgdat, |
6154 | unsigned long node_start_pfn, | 6306 | unsigned long node_start_pfn, |
6155 | unsigned long node_end_pfn, | 6307 | unsigned long node_end_pfn, |
6156 | unsigned long *zones_size, | 6308 | unsigned long *zones_size, |
@@ -6323,7 +6475,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) | |||
6323 | static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, | 6475 | static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, |
6324 | unsigned long remaining_pages) | 6476 | unsigned long remaining_pages) |
6325 | { | 6477 | { |
6326 | zone->managed_pages = remaining_pages; | 6478 | atomic_long_set(&zone->managed_pages, remaining_pages); |
6327 | zone_set_nid(zone, nid); | 6479 | zone_set_nid(zone, nid); |
6328 | zone->name = zone_names[idx]; | 6480 | zone->name = zone_names[idx]; |
6329 | zone->zone_pgdat = NODE_DATA(nid); | 6481 | zone->zone_pgdat = NODE_DATA(nid); |
@@ -6476,12 +6628,6 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { } | |||
6476 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 6628 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
6477 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) | 6629 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) |
6478 | { | 6630 | { |
6479 | /* | ||
6480 | * We start only with one section of pages, more pages are added as | ||
6481 | * needed until the rest of deferred pages are initialized. | ||
6482 | */ | ||
6483 | pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION, | ||
6484 | pgdat->node_spanned_pages); | ||
6485 | pgdat->first_deferred_pfn = ULONG_MAX; | 6631 | pgdat->first_deferred_pfn = ULONG_MAX; |
6486 | } | 6632 | } |
6487 | #else | 6633 | #else |
@@ -7075,18 +7221,16 @@ early_param("movablecore", cmdline_parse_movablecore); | |||
7075 | 7221 | ||
7076 | void adjust_managed_page_count(struct page *page, long count) | 7222 | void adjust_managed_page_count(struct page *page, long count) |
7077 | { | 7223 | { |
7078 | spin_lock(&managed_page_count_lock); | 7224 | atomic_long_add(count, &page_zone(page)->managed_pages); |
7079 | page_zone(page)->managed_pages += count; | 7225 | totalram_pages_add(count); |
7080 | totalram_pages += count; | ||
7081 | #ifdef CONFIG_HIGHMEM | 7226 | #ifdef CONFIG_HIGHMEM |
7082 | if (PageHighMem(page)) | 7227 | if (PageHighMem(page)) |
7083 | totalhigh_pages += count; | 7228 | totalhigh_pages_add(count); |
7084 | #endif | 7229 | #endif |
7085 | spin_unlock(&managed_page_count_lock); | ||
7086 | } | 7230 | } |
7087 | EXPORT_SYMBOL(adjust_managed_page_count); | 7231 | EXPORT_SYMBOL(adjust_managed_page_count); |
7088 | 7232 | ||
7089 | unsigned long free_reserved_area(void *start, void *end, int poison, char *s) | 7233 | unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) |
7090 | { | 7234 | { |
7091 | void *pos; | 7235 | void *pos; |
7092 | unsigned long pages = 0; | 7236 | unsigned long pages = 0; |
@@ -7123,9 +7267,9 @@ EXPORT_SYMBOL(free_reserved_area); | |||
7123 | void free_highmem_page(struct page *page) | 7267 | void free_highmem_page(struct page *page) |
7124 | { | 7268 | { |
7125 | __free_reserved_page(page); | 7269 | __free_reserved_page(page); |
7126 | totalram_pages++; | 7270 | totalram_pages_inc(); |
7127 | page_zone(page)->managed_pages++; | 7271 | atomic_long_inc(&page_zone(page)->managed_pages); |
7128 | totalhigh_pages++; | 7272 | totalhigh_pages_inc(); |
7129 | } | 7273 | } |
7130 | #endif | 7274 | #endif |
7131 | 7275 | ||
@@ -7174,10 +7318,10 @@ void __init mem_init_print_info(const char *str) | |||
7174 | physpages << (PAGE_SHIFT - 10), | 7318 | physpages << (PAGE_SHIFT - 10), |
7175 | codesize >> 10, datasize >> 10, rosize >> 10, | 7319 | codesize >> 10, datasize >> 10, rosize >> 10, |
7176 | (init_data_size + init_code_size) >> 10, bss_size >> 10, | 7320 | (init_data_size + init_code_size) >> 10, bss_size >> 10, |
7177 | (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10), | 7321 | (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), |
7178 | totalcma_pages << (PAGE_SHIFT - 10), | 7322 | totalcma_pages << (PAGE_SHIFT - 10), |
7179 | #ifdef CONFIG_HIGHMEM | 7323 | #ifdef CONFIG_HIGHMEM |
7180 | totalhigh_pages << (PAGE_SHIFT - 10), | 7324 | totalhigh_pages() << (PAGE_SHIFT - 10), |
7181 | #endif | 7325 | #endif |
7182 | str ? ", " : "", str ? str : ""); | 7326 | str ? ", " : "", str ? str : ""); |
7183 | } | 7327 | } |
@@ -7257,6 +7401,7 @@ static void calculate_totalreserve_pages(void) | |||
7257 | for (i = 0; i < MAX_NR_ZONES; i++) { | 7401 | for (i = 0; i < MAX_NR_ZONES; i++) { |
7258 | struct zone *zone = pgdat->node_zones + i; | 7402 | struct zone *zone = pgdat->node_zones + i; |
7259 | long max = 0; | 7403 | long max = 0; |
7404 | unsigned long managed_pages = zone_managed_pages(zone); | ||
7260 | 7405 | ||
7261 | /* Find valid and maximum lowmem_reserve in the zone */ | 7406 | /* Find valid and maximum lowmem_reserve in the zone */ |
7262 | for (j = i; j < MAX_NR_ZONES; j++) { | 7407 | for (j = i; j < MAX_NR_ZONES; j++) { |
@@ -7267,8 +7412,8 @@ static void calculate_totalreserve_pages(void) | |||
7267 | /* we treat the high watermark as reserved pages. */ | 7412 | /* we treat the high watermark as reserved pages. */ |
7268 | max += high_wmark_pages(zone); | 7413 | max += high_wmark_pages(zone); |
7269 | 7414 | ||
7270 | if (max > zone->managed_pages) | 7415 | if (max > managed_pages) |
7271 | max = zone->managed_pages; | 7416 | max = managed_pages; |
7272 | 7417 | ||
7273 | pgdat->totalreserve_pages += max; | 7418 | pgdat->totalreserve_pages += max; |
7274 | 7419 | ||
@@ -7292,7 +7437,7 @@ static void setup_per_zone_lowmem_reserve(void) | |||
7292 | for_each_online_pgdat(pgdat) { | 7437 | for_each_online_pgdat(pgdat) { |
7293 | for (j = 0; j < MAX_NR_ZONES; j++) { | 7438 | for (j = 0; j < MAX_NR_ZONES; j++) { |
7294 | struct zone *zone = pgdat->node_zones + j; | 7439 | struct zone *zone = pgdat->node_zones + j; |
7295 | unsigned long managed_pages = zone->managed_pages; | 7440 | unsigned long managed_pages = zone_managed_pages(zone); |
7296 | 7441 | ||
7297 | zone->lowmem_reserve[j] = 0; | 7442 | zone->lowmem_reserve[j] = 0; |
7298 | 7443 | ||
@@ -7310,7 +7455,7 @@ static void setup_per_zone_lowmem_reserve(void) | |||
7310 | lower_zone->lowmem_reserve[j] = | 7455 | lower_zone->lowmem_reserve[j] = |
7311 | managed_pages / sysctl_lowmem_reserve_ratio[idx]; | 7456 | managed_pages / sysctl_lowmem_reserve_ratio[idx]; |
7312 | } | 7457 | } |
7313 | managed_pages += lower_zone->managed_pages; | 7458 | managed_pages += zone_managed_pages(lower_zone); |
7314 | } | 7459 | } |
7315 | } | 7460 | } |
7316 | } | 7461 | } |
@@ -7329,14 +7474,14 @@ static void __setup_per_zone_wmarks(void) | |||
7329 | /* Calculate total number of !ZONE_HIGHMEM pages */ | 7474 | /* Calculate total number of !ZONE_HIGHMEM pages */ |
7330 | for_each_zone(zone) { | 7475 | for_each_zone(zone) { |
7331 | if (!is_highmem(zone)) | 7476 | if (!is_highmem(zone)) |
7332 | lowmem_pages += zone->managed_pages; | 7477 | lowmem_pages += zone_managed_pages(zone); |
7333 | } | 7478 | } |
7334 | 7479 | ||
7335 | for_each_zone(zone) { | 7480 | for_each_zone(zone) { |
7336 | u64 tmp; | 7481 | u64 tmp; |
7337 | 7482 | ||
7338 | spin_lock_irqsave(&zone->lock, flags); | 7483 | spin_lock_irqsave(&zone->lock, flags); |
7339 | tmp = (u64)pages_min * zone->managed_pages; | 7484 | tmp = (u64)pages_min * zone_managed_pages(zone); |
7340 | do_div(tmp, lowmem_pages); | 7485 | do_div(tmp, lowmem_pages); |
7341 | if (is_highmem(zone)) { | 7486 | if (is_highmem(zone)) { |
7342 | /* | 7487 | /* |
@@ -7350,15 +7495,15 @@ static void __setup_per_zone_wmarks(void) | |||
7350 | */ | 7495 | */ |
7351 | unsigned long min_pages; | 7496 | unsigned long min_pages; |
7352 | 7497 | ||
7353 | min_pages = zone->managed_pages / 1024; | 7498 | min_pages = zone_managed_pages(zone) / 1024; |
7354 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); | 7499 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); |
7355 | zone->watermark[WMARK_MIN] = min_pages; | 7500 | zone->_watermark[WMARK_MIN] = min_pages; |
7356 | } else { | 7501 | } else { |
7357 | /* | 7502 | /* |
7358 | * If it's a lowmem zone, reserve a number of pages | 7503 | * If it's a lowmem zone, reserve a number of pages |
7359 | * proportionate to the zone's size. | 7504 | * proportionate to the zone's size. |
7360 | */ | 7505 | */ |
7361 | zone->watermark[WMARK_MIN] = tmp; | 7506 | zone->_watermark[WMARK_MIN] = tmp; |
7362 | } | 7507 | } |
7363 | 7508 | ||
7364 | /* | 7509 | /* |
@@ -7367,11 +7512,12 @@ static void __setup_per_zone_wmarks(void) | |||
7367 | * ensure a minimum size on small systems. | 7512 | * ensure a minimum size on small systems. |
7368 | */ | 7513 | */ |
7369 | tmp = max_t(u64, tmp >> 2, | 7514 | tmp = max_t(u64, tmp >> 2, |
7370 | mult_frac(zone->managed_pages, | 7515 | mult_frac(zone_managed_pages(zone), |
7371 | watermark_scale_factor, 10000)); | 7516 | watermark_scale_factor, 10000)); |
7372 | 7517 | ||
7373 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; | 7518 | zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; |
7374 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; | 7519 | zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; |
7520 | zone->watermark_boost = 0; | ||
7375 | 7521 | ||
7376 | spin_unlock_irqrestore(&zone->lock, flags); | 7522 | spin_unlock_irqrestore(&zone->lock, flags); |
7377 | } | 7523 | } |
@@ -7472,6 +7618,18 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, | |||
7472 | return 0; | 7618 | return 0; |
7473 | } | 7619 | } |
7474 | 7620 | ||
7621 | int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write, | ||
7622 | void __user *buffer, size_t *length, loff_t *ppos) | ||
7623 | { | ||
7624 | int rc; | ||
7625 | |||
7626 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
7627 | if (rc) | ||
7628 | return rc; | ||
7629 | |||
7630 | return 0; | ||
7631 | } | ||
7632 | |||
7475 | int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, | 7633 | int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, |
7476 | void __user *buffer, size_t *length, loff_t *ppos) | 7634 | void __user *buffer, size_t *length, loff_t *ppos) |
7477 | { | 7635 | { |
@@ -7497,8 +7655,8 @@ static void setup_min_unmapped_ratio(void) | |||
7497 | pgdat->min_unmapped_pages = 0; | 7655 | pgdat->min_unmapped_pages = 0; |
7498 | 7656 | ||
7499 | for_each_zone(zone) | 7657 | for_each_zone(zone) |
7500 | zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * | 7658 | zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * |
7501 | sysctl_min_unmapped_ratio) / 100; | 7659 | sysctl_min_unmapped_ratio) / 100; |
7502 | } | 7660 | } |
7503 | 7661 | ||
7504 | 7662 | ||
@@ -7525,8 +7683,8 @@ static void setup_min_slab_ratio(void) | |||
7525 | pgdat->min_slab_pages = 0; | 7683 | pgdat->min_slab_pages = 0; |
7526 | 7684 | ||
7527 | for_each_zone(zone) | 7685 | for_each_zone(zone) |
7528 | zone->zone_pgdat->min_slab_pages += (zone->managed_pages * | 7686 | zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * |
7529 | sysctl_min_slab_ratio) / 100; | 7687 | sysctl_min_slab_ratio) / 100; |
7530 | } | 7688 | } |
7531 | 7689 | ||
7532 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, | 7690 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, |
@@ -7766,8 +7924,7 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
7766 | * race condition. So you can't expect this function should be exact. | 7924 | * race condition. So you can't expect this function should be exact. |
7767 | */ | 7925 | */ |
7768 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | 7926 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
7769 | int migratetype, | 7927 | int migratetype, int flags) |
7770 | bool skip_hwpoisoned_pages) | ||
7771 | { | 7928 | { |
7772 | unsigned long pfn, iter, found; | 7929 | unsigned long pfn, iter, found; |
7773 | 7930 | ||
@@ -7841,7 +7998,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7841 | * The HWPoisoned page may be not in buddy system, and | 7998 | * The HWPoisoned page may be not in buddy system, and |
7842 | * page_count() is not 0. | 7999 | * page_count() is not 0. |
7843 | */ | 8000 | */ |
7844 | if (skip_hwpoisoned_pages && PageHWPoison(page)) | 8001 | if ((flags & SKIP_HWPOISON) && PageHWPoison(page)) |
7845 | continue; | 8002 | continue; |
7846 | 8003 | ||
7847 | if (__PageMovable(page)) | 8004 | if (__PageMovable(page)) |
@@ -7868,6 +8025,8 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7868 | return false; | 8025 | return false; |
7869 | unmovable: | 8026 | unmovable: |
7870 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); | 8027 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); |
8028 | if (flags & REPORT_FAILURE) | ||
8029 | dump_page(pfn_to_page(pfn+iter), "unmovable page"); | ||
7871 | return true; | 8030 | return true; |
7872 | } | 8031 | } |
7873 | 8032 | ||
@@ -7994,8 +8153,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
7994 | */ | 8153 | */ |
7995 | 8154 | ||
7996 | ret = start_isolate_page_range(pfn_max_align_down(start), | 8155 | ret = start_isolate_page_range(pfn_max_align_down(start), |
7997 | pfn_max_align_up(end), migratetype, | 8156 | pfn_max_align_up(end), migratetype, 0); |
7998 | false); | ||
7999 | if (ret) | 8157 | if (ret) |
8000 | return ret; | 8158 | return ret; |
8001 | 8159 | ||
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 43e085608846..ce323e56b34d 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -15,8 +15,7 @@ | |||
15 | #define CREATE_TRACE_POINTS | 15 | #define CREATE_TRACE_POINTS |
16 | #include <trace/events/page_isolation.h> | 16 | #include <trace/events/page_isolation.h> |
17 | 17 | ||
18 | static int set_migratetype_isolate(struct page *page, int migratetype, | 18 | static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) |
19 | bool skip_hwpoisoned_pages) | ||
20 | { | 19 | { |
21 | struct zone *zone; | 20 | struct zone *zone; |
22 | unsigned long flags, pfn; | 21 | unsigned long flags, pfn; |
@@ -60,8 +59,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, | |||
60 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | 59 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
61 | * We just check MOVABLE pages. | 60 | * We just check MOVABLE pages. |
62 | */ | 61 | */ |
63 | if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, | 62 | if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags)) |
64 | skip_hwpoisoned_pages)) | ||
65 | ret = 0; | 63 | ret = 0; |
66 | 64 | ||
67 | /* | 65 | /* |
@@ -185,7 +183,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) | |||
185 | * prevents two threads from simultaneously working on overlapping ranges. | 183 | * prevents two threads from simultaneously working on overlapping ranges. |
186 | */ | 184 | */ |
187 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 185 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
188 | unsigned migratetype, bool skip_hwpoisoned_pages) | 186 | unsigned migratetype, int flags) |
189 | { | 187 | { |
190 | unsigned long pfn; | 188 | unsigned long pfn; |
191 | unsigned long undo_pfn; | 189 | unsigned long undo_pfn; |
@@ -199,7 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
199 | pfn += pageblock_nr_pages) { | 197 | pfn += pageblock_nr_pages) { |
200 | page = __first_valid_page(pfn, pageblock_nr_pages); | 198 | page = __first_valid_page(pfn, pageblock_nr_pages); |
201 | if (page && | 199 | if (page && |
202 | set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) { | 200 | set_migratetype_isolate(page, migratetype, flags)) { |
203 | undo_pfn = pfn; | 201 | undo_pfn = pfn; |
204 | goto undo; | 202 | goto undo; |
205 | } | 203 | } |
diff --git a/mm/page_owner.c b/mm/page_owner.c index 87bc0dfdb52b..28b06524939f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c | |||
@@ -351,6 +351,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |||
351 | .skip = 0 | 351 | .skip = 0 |
352 | }; | 352 | }; |
353 | 353 | ||
354 | count = min_t(size_t, count, PAGE_SIZE); | ||
354 | kbuf = kmalloc(count, GFP_KERNEL); | 355 | kbuf = kmalloc(count, GFP_KERNEL); |
355 | if (!kbuf) | 356 | if (!kbuf) |
356 | return -ENOMEM; | 357 | return -ENOMEM; |
diff --git a/mm/readahead.c b/mm/readahead.c index f3d6f9656a3c..1ae16522412a 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -270,17 +270,15 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |||
270 | * return it as the new window size. | 270 | * return it as the new window size. |
271 | */ | 271 | */ |
272 | static unsigned long get_next_ra_size(struct file_ra_state *ra, | 272 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
273 | unsigned long max) | 273 | unsigned long max) |
274 | { | 274 | { |
275 | unsigned long cur = ra->size; | 275 | unsigned long cur = ra->size; |
276 | unsigned long newsize; | ||
277 | 276 | ||
278 | if (cur < max / 16) | 277 | if (cur < max / 16) |
279 | newsize = 4 * cur; | 278 | return 4 * cur; |
280 | else | 279 | if (cur <= max / 2) |
281 | newsize = 2 * cur; | 280 | return 2 * cur; |
282 | 281 | return max; | |
283 | return min(newsize, max); | ||
284 | } | 282 | } |
285 | 283 | ||
286 | /* | 284 | /* |
@@ -25,6 +25,7 @@ | |||
25 | * page->flags PG_locked (lock_page) | 25 | * page->flags PG_locked (lock_page) |
26 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) | 26 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) |
27 | * mapping->i_mmap_rwsem | 27 | * mapping->i_mmap_rwsem |
28 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) | ||
28 | * anon_vma->rwsem | 29 | * anon_vma->rwsem |
29 | * mm->page_table_lock or pte_lock | 30 | * mm->page_table_lock or pte_lock |
30 | * zone_lru_lock (in mark_page_accessed, isolate_lru_page) | 31 | * zone_lru_lock (in mark_page_accessed, isolate_lru_page) |
@@ -889,15 +890,17 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
889 | .address = address, | 890 | .address = address, |
890 | .flags = PVMW_SYNC, | 891 | .flags = PVMW_SYNC, |
891 | }; | 892 | }; |
892 | unsigned long start = address, end; | 893 | struct mmu_notifier_range range; |
893 | int *cleaned = arg; | 894 | int *cleaned = arg; |
894 | 895 | ||
895 | /* | 896 | /* |
896 | * We have to assume the worse case ie pmd for invalidation. Note that | 897 | * We have to assume the worse case ie pmd for invalidation. Note that |
897 | * the page can not be free from this function. | 898 | * the page can not be free from this function. |
898 | */ | 899 | */ |
899 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | 900 | mmu_notifier_range_init(&range, vma->vm_mm, address, |
900 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | 901 | min(vma->vm_end, address + |
902 | (PAGE_SIZE << compound_order(page)))); | ||
903 | mmu_notifier_invalidate_range_start(&range); | ||
901 | 904 | ||
902 | while (page_vma_mapped_walk(&pvmw)) { | 905 | while (page_vma_mapped_walk(&pvmw)) { |
903 | unsigned long cstart; | 906 | unsigned long cstart; |
@@ -949,7 +952,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
949 | (*cleaned)++; | 952 | (*cleaned)++; |
950 | } | 953 | } |
951 | 954 | ||
952 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | 955 | mmu_notifier_invalidate_range_end(&range); |
953 | 956 | ||
954 | return true; | 957 | return true; |
955 | } | 958 | } |
@@ -1017,7 +1020,7 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) | |||
1017 | 1020 | ||
1018 | /** | 1021 | /** |
1019 | * __page_set_anon_rmap - set up new anonymous rmap | 1022 | * __page_set_anon_rmap - set up new anonymous rmap |
1020 | * @page: Page to add to rmap | 1023 | * @page: Page or Hugepage to add to rmap |
1021 | * @vma: VM area to add page to. | 1024 | * @vma: VM area to add page to. |
1022 | * @address: User virtual address of the mapping | 1025 | * @address: User virtual address of the mapping |
1023 | * @exclusive: the page is exclusively owned by the current process | 1026 | * @exclusive: the page is exclusively owned by the current process |
@@ -1345,7 +1348,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1345 | pte_t pteval; | 1348 | pte_t pteval; |
1346 | struct page *subpage; | 1349 | struct page *subpage; |
1347 | bool ret = true; | 1350 | bool ret = true; |
1348 | unsigned long start = address, end; | 1351 | struct mmu_notifier_range range; |
1349 | enum ttu_flags flags = (enum ttu_flags)arg; | 1352 | enum ttu_flags flags = (enum ttu_flags)arg; |
1350 | 1353 | ||
1351 | /* munlock has nothing to gain from examining un-locked vmas */ | 1354 | /* munlock has nothing to gain from examining un-locked vmas */ |
@@ -1369,15 +1372,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1369 | * Note that the page can not be free in this function as call of | 1372 | * Note that the page can not be free in this function as call of |
1370 | * try_to_unmap() must hold a reference on the page. | 1373 | * try_to_unmap() must hold a reference on the page. |
1371 | */ | 1374 | */ |
1372 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | 1375 | mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, |
1376 | min(vma->vm_end, vma->vm_start + | ||
1377 | (PAGE_SIZE << compound_order(page)))); | ||
1373 | if (PageHuge(page)) { | 1378 | if (PageHuge(page)) { |
1374 | /* | 1379 | /* |
1375 | * If sharing is possible, start and end will be adjusted | 1380 | * If sharing is possible, start and end will be adjusted |
1376 | * accordingly. | 1381 | * accordingly. |
1382 | * | ||
1383 | * If called for a huge page, caller must hold i_mmap_rwsem | ||
1384 | * in write mode as it is possible to call huge_pmd_unshare. | ||
1377 | */ | 1385 | */ |
1378 | adjust_range_if_pmd_sharing_possible(vma, &start, &end); | 1386 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
1387 | &range.end); | ||
1379 | } | 1388 | } |
1380 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | 1389 | mmu_notifier_invalidate_range_start(&range); |
1381 | 1390 | ||
1382 | while (page_vma_mapped_walk(&pvmw)) { | 1391 | while (page_vma_mapped_walk(&pvmw)) { |
1383 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 1392 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
@@ -1428,9 +1437,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1428 | * we must flush them all. start/end were | 1437 | * we must flush them all. start/end were |
1429 | * already adjusted above to cover this range. | 1438 | * already adjusted above to cover this range. |
1430 | */ | 1439 | */ |
1431 | flush_cache_range(vma, start, end); | 1440 | flush_cache_range(vma, range.start, range.end); |
1432 | flush_tlb_range(vma, start, end); | 1441 | flush_tlb_range(vma, range.start, range.end); |
1433 | mmu_notifier_invalidate_range(mm, start, end); | 1442 | mmu_notifier_invalidate_range(mm, range.start, |
1443 | range.end); | ||
1434 | 1444 | ||
1435 | /* | 1445 | /* |
1436 | * The ref count of the PMD page was dropped | 1446 | * The ref count of the PMD page was dropped |
@@ -1650,7 +1660,7 @@ discard: | |||
1650 | put_page(page); | 1660 | put_page(page); |
1651 | } | 1661 | } |
1652 | 1662 | ||
1653 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | 1663 | mmu_notifier_invalidate_range_end(&range); |
1654 | 1664 | ||
1655 | return ret; | 1665 | return ret; |
1656 | } | 1666 | } |
@@ -1910,27 +1920,10 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) | |||
1910 | 1920 | ||
1911 | #ifdef CONFIG_HUGETLB_PAGE | 1921 | #ifdef CONFIG_HUGETLB_PAGE |
1912 | /* | 1922 | /* |
1913 | * The following three functions are for anonymous (private mapped) hugepages. | 1923 | * The following two functions are for anonymous (private mapped) hugepages. |
1914 | * Unlike common anonymous pages, anonymous hugepages have no accounting code | 1924 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
1915 | * and no lru code, because we handle hugepages differently from common pages. | 1925 | * and no lru code, because we handle hugepages differently from common pages. |
1916 | */ | 1926 | */ |
1917 | static void __hugepage_set_anon_rmap(struct page *page, | ||
1918 | struct vm_area_struct *vma, unsigned long address, int exclusive) | ||
1919 | { | ||
1920 | struct anon_vma *anon_vma = vma->anon_vma; | ||
1921 | |||
1922 | BUG_ON(!anon_vma); | ||
1923 | |||
1924 | if (PageAnon(page)) | ||
1925 | return; | ||
1926 | if (!exclusive) | ||
1927 | anon_vma = anon_vma->root; | ||
1928 | |||
1929 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
1930 | page->mapping = (struct address_space *) anon_vma; | ||
1931 | page->index = linear_page_index(vma, address); | ||
1932 | } | ||
1933 | |||
1934 | void hugepage_add_anon_rmap(struct page *page, | 1927 | void hugepage_add_anon_rmap(struct page *page, |
1935 | struct vm_area_struct *vma, unsigned long address) | 1928 | struct vm_area_struct *vma, unsigned long address) |
1936 | { | 1929 | { |
@@ -1942,7 +1935,7 @@ void hugepage_add_anon_rmap(struct page *page, | |||
1942 | /* address might be in next vma when migration races vma_adjust */ | 1935 | /* address might be in next vma when migration races vma_adjust */ |
1943 | first = atomic_inc_and_test(compound_mapcount_ptr(page)); | 1936 | first = atomic_inc_and_test(compound_mapcount_ptr(page)); |
1944 | if (first) | 1937 | if (first) |
1945 | __hugepage_set_anon_rmap(page, vma, address, 0); | 1938 | __page_set_anon_rmap(page, vma, address, 0); |
1946 | } | 1939 | } |
1947 | 1940 | ||
1948 | void hugepage_add_new_anon_rmap(struct page *page, | 1941 | void hugepage_add_new_anon_rmap(struct page *page, |
@@ -1950,6 +1943,6 @@ void hugepage_add_new_anon_rmap(struct page *page, | |||
1950 | { | 1943 | { |
1951 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1944 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
1952 | atomic_set(compound_mapcount_ptr(page), 0); | 1945 | atomic_set(compound_mapcount_ptr(page), 0); |
1953 | __hugepage_set_anon_rmap(page, vma, address, 1); | 1946 | __page_set_anon_rmap(page, vma, address, 1); |
1954 | } | 1947 | } |
1955 | #endif /* CONFIG_HUGETLB_PAGE */ | 1948 | #endif /* CONFIG_HUGETLB_PAGE */ |
diff --git a/mm/shmem.c b/mm/shmem.c index 375f3ac19bb8..6ece1e2fe76e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -109,12 +109,14 @@ struct shmem_falloc { | |||
109 | #ifdef CONFIG_TMPFS | 109 | #ifdef CONFIG_TMPFS |
110 | static unsigned long shmem_default_max_blocks(void) | 110 | static unsigned long shmem_default_max_blocks(void) |
111 | { | 111 | { |
112 | return totalram_pages / 2; | 112 | return totalram_pages() / 2; |
113 | } | 113 | } |
114 | 114 | ||
115 | static unsigned long shmem_default_max_inodes(void) | 115 | static unsigned long shmem_default_max_inodes(void) |
116 | { | 116 | { |
117 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); | 117 | unsigned long nr_pages = totalram_pages(); |
118 | |||
119 | return min(nr_pages - totalhigh_pages(), nr_pages / 2); | ||
118 | } | 120 | } |
119 | #endif | 121 | #endif |
120 | 122 | ||
@@ -3301,7 +3303,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |||
3301 | size = memparse(value,&rest); | 3303 | size = memparse(value,&rest); |
3302 | if (*rest == '%') { | 3304 | if (*rest == '%') { |
3303 | size <<= PAGE_SHIFT; | 3305 | size <<= PAGE_SHIFT; |
3304 | size *= totalram_pages; | 3306 | size *= totalram_pages(); |
3305 | do_div(size, 100); | 3307 | do_div(size, 100); |
3306 | rest++; | 3308 | rest++; |
3307 | } | 3309 | } |
@@ -406,19 +406,6 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, | |||
406 | return page->s_mem + cache->size * idx; | 406 | return page->s_mem + cache->size * idx; |
407 | } | 407 | } |
408 | 408 | ||
409 | /* | ||
410 | * We want to avoid an expensive divide : (offset / cache->size) | ||
411 | * Using the fact that size is a constant for a particular cache, | ||
412 | * we can replace (offset / cache->size) by | ||
413 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | ||
414 | */ | ||
415 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | ||
416 | const struct page *page, void *obj) | ||
417 | { | ||
418 | u32 offset = (obj - page->s_mem); | ||
419 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | ||
420 | } | ||
421 | |||
422 | #define BOOT_CPUCACHE_ENTRIES 1 | 409 | #define BOOT_CPUCACHE_ENTRIES 1 |
423 | /* internal cache of cache description objs */ | 410 | /* internal cache of cache description objs */ |
424 | static struct kmem_cache kmem_cache_boot = { | 411 | static struct kmem_cache kmem_cache_boot = { |
@@ -1248,7 +1235,7 @@ void __init kmem_cache_init(void) | |||
1248 | * page orders on machines with more than 32MB of memory if | 1235 | * page orders on machines with more than 32MB of memory if |
1249 | * not overridden on the command line. | 1236 | * not overridden on the command line. |
1250 | */ | 1237 | */ |
1251 | if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) | 1238 | if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) |
1252 | slab_max_order = SLAB_MAX_ORDER_HI; | 1239 | slab_max_order = SLAB_MAX_ORDER_HI; |
1253 | 1240 | ||
1254 | /* Bootstrap is tricky, because several objects are allocated | 1241 | /* Bootstrap is tricky, because several objects are allocated |
@@ -2370,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
2370 | void *freelist; | 2357 | void *freelist; |
2371 | void *addr = page_address(page); | 2358 | void *addr = page_address(page); |
2372 | 2359 | ||
2373 | page->s_mem = addr + colour_off; | 2360 | page->s_mem = kasan_reset_tag(addr) + colour_off; |
2374 | page->active = 0; | 2361 | page->active = 0; |
2375 | 2362 | ||
2376 | if (OBJFREELIST_SLAB(cachep)) | 2363 | if (OBJFREELIST_SLAB(cachep)) |
@@ -2574,7 +2561,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2574 | 2561 | ||
2575 | for (i = 0; i < cachep->num; i++) { | 2562 | for (i = 0; i < cachep->num; i++) { |
2576 | objp = index_to_obj(cachep, page, i); | 2563 | objp = index_to_obj(cachep, page, i); |
2577 | kasan_init_slab_obj(cachep, objp); | 2564 | objp = kasan_init_slab_obj(cachep, objp); |
2578 | 2565 | ||
2579 | /* constructor could break poison info */ | 2566 | /* constructor could break poison info */ |
2580 | if (DEBUG == 0 && cachep->ctor) { | 2567 | if (DEBUG == 0 && cachep->ctor) { |
@@ -3551,7 +3538,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3551 | { | 3538 | { |
3552 | void *ret = slab_alloc(cachep, flags, _RET_IP_); | 3539 | void *ret = slab_alloc(cachep, flags, _RET_IP_); |
3553 | 3540 | ||
3554 | kasan_slab_alloc(cachep, ret, flags); | 3541 | ret = kasan_slab_alloc(cachep, ret, flags); |
3555 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3542 | trace_kmem_cache_alloc(_RET_IP_, ret, |
3556 | cachep->object_size, cachep->size, flags); | 3543 | cachep->object_size, cachep->size, flags); |
3557 | 3544 | ||
@@ -3617,7 +3604,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) | |||
3617 | 3604 | ||
3618 | ret = slab_alloc(cachep, flags, _RET_IP_); | 3605 | ret = slab_alloc(cachep, flags, _RET_IP_); |
3619 | 3606 | ||
3620 | kasan_kmalloc(cachep, ret, size, flags); | 3607 | ret = kasan_kmalloc(cachep, ret, size, flags); |
3621 | trace_kmalloc(_RET_IP_, ret, | 3608 | trace_kmalloc(_RET_IP_, ret, |
3622 | size, cachep->size, flags); | 3609 | size, cachep->size, flags); |
3623 | return ret; | 3610 | return ret; |
@@ -3641,7 +3628,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3641 | { | 3628 | { |
3642 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | 3629 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
3643 | 3630 | ||
3644 | kasan_slab_alloc(cachep, ret, flags); | 3631 | ret = kasan_slab_alloc(cachep, ret, flags); |
3645 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3632 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
3646 | cachep->object_size, cachep->size, | 3633 | cachep->object_size, cachep->size, |
3647 | flags, nodeid); | 3634 | flags, nodeid); |
@@ -3660,7 +3647,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | |||
3660 | 3647 | ||
3661 | ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | 3648 | ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
3662 | 3649 | ||
3663 | kasan_kmalloc(cachep, ret, size, flags); | 3650 | ret = kasan_kmalloc(cachep, ret, size, flags); |
3664 | trace_kmalloc_node(_RET_IP_, ret, | 3651 | trace_kmalloc_node(_RET_IP_, ret, |
3665 | size, cachep->size, | 3652 | size, cachep->size, |
3666 | flags, nodeid); | 3653 | flags, nodeid); |
@@ -3681,7 +3668,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) | |||
3681 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3668 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3682 | return cachep; | 3669 | return cachep; |
3683 | ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); | 3670 | ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); |
3684 | kasan_kmalloc(cachep, ret, size, flags); | 3671 | ret = kasan_kmalloc(cachep, ret, size, flags); |
3685 | 3672 | ||
3686 | return ret; | 3673 | return ret; |
3687 | } | 3674 | } |
@@ -3719,7 +3706,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3719 | return cachep; | 3706 | return cachep; |
3720 | ret = slab_alloc(cachep, flags, caller); | 3707 | ret = slab_alloc(cachep, flags, caller); |
3721 | 3708 | ||
3722 | kasan_kmalloc(cachep, ret, size, flags); | 3709 | ret = kasan_kmalloc(cachep, ret, size, flags); |
3723 | trace_kmalloc(caller, ret, | 3710 | trace_kmalloc(caller, ret, |
3724 | size, cachep->size, flags); | 3711 | size, cachep->size, flags); |
3725 | 3712 | ||
@@ -441,7 +441,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |||
441 | 441 | ||
442 | kmemleak_alloc_recursive(object, s->object_size, 1, | 442 | kmemleak_alloc_recursive(object, s->object_size, 1, |
443 | s->flags, flags); | 443 | s->flags, flags); |
444 | kasan_slab_alloc(s, object, flags); | 444 | p[i] = kasan_slab_alloc(s, object, flags); |
445 | } | 445 | } |
446 | 446 | ||
447 | if (memcg_kmem_enabled()) | 447 | if (memcg_kmem_enabled()) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 9c11e8a937d2..70b0cc85db67 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -1029,10 +1029,8 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) | |||
1029 | 1029 | ||
1030 | index = size_index[size_index_elem(size)]; | 1030 | index = size_index[size_index_elem(size)]; |
1031 | } else { | 1031 | } else { |
1032 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { | 1032 | if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) |
1033 | WARN_ON(1); | ||
1034 | return NULL; | 1033 | return NULL; |
1035 | } | ||
1036 | index = fls(size - 1); | 1034 | index = fls(size - 1); |
1037 | } | 1035 | } |
1038 | 1036 | ||
@@ -1204,7 +1202,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |||
1204 | page = alloc_pages(flags, order); | 1202 | page = alloc_pages(flags, order); |
1205 | ret = page ? page_address(page) : NULL; | 1203 | ret = page ? page_address(page) : NULL; |
1206 | kmemleak_alloc(ret, size, 1, flags); | 1204 | kmemleak_alloc(ret, size, 1, flags); |
1207 | kasan_kmalloc_large(ret, size, flags); | 1205 | ret = kasan_kmalloc_large(ret, size, flags); |
1208 | return ret; | 1206 | return ret; |
1209 | } | 1207 | } |
1210 | EXPORT_SYMBOL(kmalloc_order); | 1208 | EXPORT_SYMBOL(kmalloc_order); |
@@ -1482,7 +1480,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, | |||
1482 | ks = ksize(p); | 1480 | ks = ksize(p); |
1483 | 1481 | ||
1484 | if (ks >= new_size) { | 1482 | if (ks >= new_size) { |
1485 | kasan_krealloc((void *)p, new_size, flags); | 1483 | p = kasan_krealloc((void *)p, new_size, flags); |
1486 | return (void *)p; | 1484 | return (void *)p; |
1487 | } | 1485 | } |
1488 | 1486 | ||
@@ -1534,7 +1532,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
1534 | } | 1532 | } |
1535 | 1533 | ||
1536 | ret = __do_krealloc(p, new_size, flags); | 1534 | ret = __do_krealloc(p, new_size, flags); |
1537 | if (ret && p != ret) | 1535 | if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) |
1538 | kfree(p); | 1536 | kfree(p); |
1539 | 1537 | ||
1540 | return ret; | 1538 | return ret; |
@@ -1372,10 +1372,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, | |||
1372 | * Hooks for other subsystems that check memory allocations. In a typical | 1372 | * Hooks for other subsystems that check memory allocations. In a typical |
1373 | * production configuration these hooks all should produce no code at all. | 1373 | * production configuration these hooks all should produce no code at all. |
1374 | */ | 1374 | */ |
1375 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | 1375 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
1376 | { | 1376 | { |
1377 | kmemleak_alloc(ptr, size, 1, flags); | 1377 | kmemleak_alloc(ptr, size, 1, flags); |
1378 | kasan_kmalloc_large(ptr, size, flags); | 1378 | return kasan_kmalloc_large(ptr, size, flags); |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | static __always_inline void kfree_hook(void *x) | 1381 | static __always_inline void kfree_hook(void *x) |
@@ -1451,16 +1451,17 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, | |||
1451 | #endif | 1451 | #endif |
1452 | } | 1452 | } |
1453 | 1453 | ||
1454 | static void setup_object(struct kmem_cache *s, struct page *page, | 1454 | static void *setup_object(struct kmem_cache *s, struct page *page, |
1455 | void *object) | 1455 | void *object) |
1456 | { | 1456 | { |
1457 | setup_object_debug(s, page, object); | 1457 | setup_object_debug(s, page, object); |
1458 | kasan_init_slab_obj(s, object); | 1458 | object = kasan_init_slab_obj(s, object); |
1459 | if (unlikely(s->ctor)) { | 1459 | if (unlikely(s->ctor)) { |
1460 | kasan_unpoison_object_data(s, object); | 1460 | kasan_unpoison_object_data(s, object); |
1461 | s->ctor(object); | 1461 | s->ctor(object); |
1462 | kasan_poison_object_data(s, object); | 1462 | kasan_poison_object_data(s, object); |
1463 | } | 1463 | } |
1464 | return object; | ||
1464 | } | 1465 | } |
1465 | 1466 | ||
1466 | /* | 1467 | /* |
@@ -1568,16 +1569,16 @@ static bool shuffle_freelist(struct kmem_cache *s, struct page *page) | |||
1568 | /* First entry is used as the base of the freelist */ | 1569 | /* First entry is used as the base of the freelist */ |
1569 | cur = next_freelist_entry(s, page, &pos, start, page_limit, | 1570 | cur = next_freelist_entry(s, page, &pos, start, page_limit, |
1570 | freelist_count); | 1571 | freelist_count); |
1572 | cur = setup_object(s, page, cur); | ||
1571 | page->freelist = cur; | 1573 | page->freelist = cur; |
1572 | 1574 | ||
1573 | for (idx = 1; idx < page->objects; idx++) { | 1575 | for (idx = 1; idx < page->objects; idx++) { |
1574 | setup_object(s, page, cur); | ||
1575 | next = next_freelist_entry(s, page, &pos, start, page_limit, | 1576 | next = next_freelist_entry(s, page, &pos, start, page_limit, |
1576 | freelist_count); | 1577 | freelist_count); |
1578 | next = setup_object(s, page, next); | ||
1577 | set_freepointer(s, cur, next); | 1579 | set_freepointer(s, cur, next); |
1578 | cur = next; | 1580 | cur = next; |
1579 | } | 1581 | } |
1580 | setup_object(s, page, cur); | ||
1581 | set_freepointer(s, cur, NULL); | 1582 | set_freepointer(s, cur, NULL); |
1582 | 1583 | ||
1583 | return true; | 1584 | return true; |
@@ -1599,7 +1600,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1599 | struct page *page; | 1600 | struct page *page; |
1600 | struct kmem_cache_order_objects oo = s->oo; | 1601 | struct kmem_cache_order_objects oo = s->oo; |
1601 | gfp_t alloc_gfp; | 1602 | gfp_t alloc_gfp; |
1602 | void *start, *p; | 1603 | void *start, *p, *next; |
1603 | int idx, order; | 1604 | int idx, order; |
1604 | bool shuffle; | 1605 | bool shuffle; |
1605 | 1606 | ||
@@ -1651,13 +1652,16 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1651 | 1652 | ||
1652 | if (!shuffle) { | 1653 | if (!shuffle) { |
1653 | for_each_object_idx(p, idx, s, start, page->objects) { | 1654 | for_each_object_idx(p, idx, s, start, page->objects) { |
1654 | setup_object(s, page, p); | 1655 | if (likely(idx < page->objects)) { |
1655 | if (likely(idx < page->objects)) | 1656 | next = p + s->size; |
1656 | set_freepointer(s, p, p + s->size); | 1657 | next = setup_object(s, page, next); |
1657 | else | 1658 | set_freepointer(s, p, next); |
1659 | } else | ||
1658 | set_freepointer(s, p, NULL); | 1660 | set_freepointer(s, p, NULL); |
1659 | } | 1661 | } |
1660 | page->freelist = fixup_red_left(s, start); | 1662 | start = fixup_red_left(s, start); |
1663 | start = setup_object(s, page, start); | ||
1664 | page->freelist = start; | ||
1661 | } | 1665 | } |
1662 | 1666 | ||
1663 | page->inuse = page->objects; | 1667 | page->inuse = page->objects; |
@@ -2127,26 +2131,15 @@ redo: | |||
2127 | } | 2131 | } |
2128 | 2132 | ||
2129 | if (l != m) { | 2133 | if (l != m) { |
2130 | |||
2131 | if (l == M_PARTIAL) | 2134 | if (l == M_PARTIAL) |
2132 | |||
2133 | remove_partial(n, page); | 2135 | remove_partial(n, page); |
2134 | |||
2135 | else if (l == M_FULL) | 2136 | else if (l == M_FULL) |
2136 | |||
2137 | remove_full(s, n, page); | 2137 | remove_full(s, n, page); |
2138 | 2138 | ||
2139 | if (m == M_PARTIAL) { | 2139 | if (m == M_PARTIAL) |
2140 | |||
2141 | add_partial(n, page, tail); | 2140 | add_partial(n, page, tail); |
2142 | stat(s, tail); | 2141 | else if (m == M_FULL) |
2143 | |||
2144 | } else if (m == M_FULL) { | ||
2145 | |||
2146 | stat(s, DEACTIVATE_FULL); | ||
2147 | add_full(s, n, page); | 2142 | add_full(s, n, page); |
2148 | |||
2149 | } | ||
2150 | } | 2143 | } |
2151 | 2144 | ||
2152 | l = m; | 2145 | l = m; |
@@ -2159,7 +2152,11 @@ redo: | |||
2159 | if (lock) | 2152 | if (lock) |
2160 | spin_unlock(&n->list_lock); | 2153 | spin_unlock(&n->list_lock); |
2161 | 2154 | ||
2162 | if (m == M_FREE) { | 2155 | if (m == M_PARTIAL) |
2156 | stat(s, tail); | ||
2157 | else if (m == M_FULL) | ||
2158 | stat(s, DEACTIVATE_FULL); | ||
2159 | else if (m == M_FREE) { | ||
2163 | stat(s, DEACTIVATE_EMPTY); | 2160 | stat(s, DEACTIVATE_EMPTY); |
2164 | discard_slab(s, page); | 2161 | discard_slab(s, page); |
2165 | stat(s, FREE_SLAB); | 2162 | stat(s, FREE_SLAB); |
@@ -2313,12 +2310,10 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) | |||
2313 | { | 2310 | { |
2314 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 2311 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
2315 | 2312 | ||
2316 | if (likely(c)) { | 2313 | if (c->page) |
2317 | if (c->page) | 2314 | flush_slab(s, c); |
2318 | flush_slab(s, c); | ||
2319 | 2315 | ||
2320 | unfreeze_partials(s, c); | 2316 | unfreeze_partials(s, c); |
2321 | } | ||
2322 | } | 2317 | } |
2323 | 2318 | ||
2324 | static void flush_cpu_slab(void *d) | 2319 | static void flush_cpu_slab(void *d) |
@@ -2367,7 +2362,7 @@ static int slub_cpu_dead(unsigned int cpu) | |||
2367 | static inline int node_match(struct page *page, int node) | 2362 | static inline int node_match(struct page *page, int node) |
2368 | { | 2363 | { |
2369 | #ifdef CONFIG_NUMA | 2364 | #ifdef CONFIG_NUMA |
2370 | if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) | 2365 | if (node != NUMA_NO_NODE && page_to_nid(page) != node) |
2371 | return 0; | 2366 | return 0; |
2372 | #endif | 2367 | #endif |
2373 | return 1; | 2368 | return 1; |
@@ -2768,7 +2763,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | |||
2768 | { | 2763 | { |
2769 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); | 2764 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
2770 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); | 2765 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); |
2771 | kasan_kmalloc(s, ret, size, gfpflags); | 2766 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
2772 | return ret; | 2767 | return ret; |
2773 | } | 2768 | } |
2774 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 2769 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
@@ -2796,7 +2791,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
2796 | trace_kmalloc_node(_RET_IP_, ret, | 2791 | trace_kmalloc_node(_RET_IP_, ret, |
2797 | size, s->size, gfpflags, node); | 2792 | size, s->size, gfpflags, node); |
2798 | 2793 | ||
2799 | kasan_kmalloc(s, ret, size, gfpflags); | 2794 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
2800 | return ret; | 2795 | return ret; |
2801 | } | 2796 | } |
2802 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | 2797 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
@@ -2992,7 +2987,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, | |||
2992 | do_slab_free(s, page, head, tail, cnt, addr); | 2987 | do_slab_free(s, page, head, tail, cnt, addr); |
2993 | } | 2988 | } |
2994 | 2989 | ||
2995 | #ifdef CONFIG_KASAN | 2990 | #ifdef CONFIG_KASAN_GENERIC |
2996 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) | 2991 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) |
2997 | { | 2992 | { |
2998 | do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); | 2993 | do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); |
@@ -3364,16 +3359,16 @@ static void early_kmem_cache_node_alloc(int node) | |||
3364 | 3359 | ||
3365 | n = page->freelist; | 3360 | n = page->freelist; |
3366 | BUG_ON(!n); | 3361 | BUG_ON(!n); |
3367 | page->freelist = get_freepointer(kmem_cache_node, n); | ||
3368 | page->inuse = 1; | ||
3369 | page->frozen = 0; | ||
3370 | kmem_cache_node->node[node] = n; | ||
3371 | #ifdef CONFIG_SLUB_DEBUG | 3362 | #ifdef CONFIG_SLUB_DEBUG |
3372 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); | 3363 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
3373 | init_tracking(kmem_cache_node, n); | 3364 | init_tracking(kmem_cache_node, n); |
3374 | #endif | 3365 | #endif |
3375 | kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), | 3366 | n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), |
3376 | GFP_KERNEL); | 3367 | GFP_KERNEL); |
3368 | page->freelist = get_freepointer(kmem_cache_node, n); | ||
3369 | page->inuse = 1; | ||
3370 | page->frozen = 0; | ||
3371 | kmem_cache_node->node[node] = n; | ||
3377 | init_kmem_cache_node(n); | 3372 | init_kmem_cache_node(n); |
3378 | inc_slabs_node(kmem_cache_node, node, page->objects); | 3373 | inc_slabs_node(kmem_cache_node, node, page->objects); |
3379 | 3374 | ||
@@ -3784,7 +3779,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3784 | 3779 | ||
3785 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 3780 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
3786 | 3781 | ||
3787 | kasan_kmalloc(s, ret, size, flags); | 3782 | ret = kasan_kmalloc(s, ret, size, flags); |
3788 | 3783 | ||
3789 | return ret; | 3784 | return ret; |
3790 | } | 3785 | } |
@@ -3801,8 +3796,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
3801 | if (page) | 3796 | if (page) |
3802 | ptr = page_address(page); | 3797 | ptr = page_address(page); |
3803 | 3798 | ||
3804 | kmalloc_large_node_hook(ptr, size, flags); | 3799 | return kmalloc_large_node_hook(ptr, size, flags); |
3805 | return ptr; | ||
3806 | } | 3800 | } |
3807 | 3801 | ||
3808 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3802 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
@@ -3829,7 +3823,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3829 | 3823 | ||
3830 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); | 3824 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); |
3831 | 3825 | ||
3832 | kasan_kmalloc(s, ret, size, flags); | 3826 | ret = kasan_kmalloc(s, ret, size, flags); |
3833 | 3827 | ||
3834 | return ret; | 3828 | return ret; |
3835 | } | 3829 | } |
diff --git a/mm/sparse.c b/mm/sparse.c index 3abc8cc50201..7ea5dc6c6b19 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -678,25 +678,24 @@ static void free_map_bootmem(struct page *memmap) | |||
678 | * set. If this is <=0, then that means that the passed-in | 678 | * set. If this is <=0, then that means that the passed-in |
679 | * map was not consumed and must be freed. | 679 | * map was not consumed and must be freed. |
680 | */ | 680 | */ |
681 | int __meminit sparse_add_one_section(struct pglist_data *pgdat, | 681 | int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, |
682 | unsigned long start_pfn, struct vmem_altmap *altmap) | 682 | struct vmem_altmap *altmap) |
683 | { | 683 | { |
684 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | 684 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
685 | struct mem_section *ms; | 685 | struct mem_section *ms; |
686 | struct page *memmap; | 686 | struct page *memmap; |
687 | unsigned long *usemap; | 687 | unsigned long *usemap; |
688 | unsigned long flags; | ||
689 | int ret; | 688 | int ret; |
690 | 689 | ||
691 | /* | 690 | /* |
692 | * no locking for this, because it does its own | 691 | * no locking for this, because it does its own |
693 | * plus, it does a kmalloc | 692 | * plus, it does a kmalloc |
694 | */ | 693 | */ |
695 | ret = sparse_index_init(section_nr, pgdat->node_id); | 694 | ret = sparse_index_init(section_nr, nid); |
696 | if (ret < 0 && ret != -EEXIST) | 695 | if (ret < 0 && ret != -EEXIST) |
697 | return ret; | 696 | return ret; |
698 | ret = 0; | 697 | ret = 0; |
699 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap); | 698 | memmap = kmalloc_section_memmap(section_nr, nid, altmap); |
700 | if (!memmap) | 699 | if (!memmap) |
701 | return -ENOMEM; | 700 | return -ENOMEM; |
702 | usemap = __kmalloc_section_usemap(); | 701 | usemap = __kmalloc_section_usemap(); |
@@ -705,8 +704,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, | |||
705 | return -ENOMEM; | 704 | return -ENOMEM; |
706 | } | 705 | } |
707 | 706 | ||
708 | pgdat_resize_lock(pgdat, &flags); | ||
709 | |||
710 | ms = __pfn_to_section(start_pfn); | 707 | ms = __pfn_to_section(start_pfn); |
711 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | 708 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
712 | ret = -EEXIST; | 709 | ret = -EEXIST; |
@@ -723,7 +720,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, | |||
723 | sparse_init_one_section(ms, section_nr, memmap, usemap); | 720 | sparse_init_one_section(ms, section_nr, memmap, usemap); |
724 | 721 | ||
725 | out: | 722 | out: |
726 | pgdat_resize_unlock(pgdat, &flags); | ||
727 | if (ret < 0) { | 723 | if (ret < 0) { |
728 | kfree(usemap); | 724 | kfree(usemap); |
729 | __kfree_section_memmap(memmap, altmap); | 725 | __kfree_section_memmap(memmap, altmap); |
@@ -740,6 +736,15 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
740 | if (!memmap) | 736 | if (!memmap) |
741 | return; | 737 | return; |
742 | 738 | ||
739 | /* | ||
740 | * A further optimization is to have per section refcounted | ||
741 | * num_poisoned_pages. But that would need more space per memmap, so | ||
742 | * for now just do a quick global check to speed up this routine in the | ||
743 | * absence of bad pages. | ||
744 | */ | ||
745 | if (atomic_long_read(&num_poisoned_pages) == 0) | ||
746 | return; | ||
747 | |||
743 | for (i = 0; i < nr_pages; i++) { | 748 | for (i = 0; i < nr_pages; i++) { |
744 | if (PageHWPoison(&memmap[i])) { | 749 | if (PageHWPoison(&memmap[i])) { |
745 | atomic_long_sub(1, &num_poisoned_pages); | 750 | atomic_long_sub(1, &num_poisoned_pages); |
@@ -785,10 +790,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | |||
785 | unsigned long map_offset, struct vmem_altmap *altmap) | 790 | unsigned long map_offset, struct vmem_altmap *altmap) |
786 | { | 791 | { |
787 | struct page *memmap = NULL; | 792 | struct page *memmap = NULL; |
788 | unsigned long *usemap = NULL, flags; | 793 | unsigned long *usemap = NULL; |
789 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
790 | 794 | ||
791 | pgdat_resize_lock(pgdat, &flags); | ||
792 | if (ms->section_mem_map) { | 795 | if (ms->section_mem_map) { |
793 | usemap = ms->pageblock_flags; | 796 | usemap = ms->pageblock_flags; |
794 | memmap = sparse_decode_mem_map(ms->section_mem_map, | 797 | memmap = sparse_decode_mem_map(ms->section_mem_map, |
@@ -796,7 +799,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | |||
796 | ms->section_mem_map = 0; | 799 | ms->section_mem_map = 0; |
797 | ms->pageblock_flags = NULL; | 800 | ms->pageblock_flags = NULL; |
798 | } | 801 | } |
799 | pgdat_resize_unlock(pgdat, &flags); | ||
800 | 802 | ||
801 | clear_hwpoisoned_pages(memmap + map_offset, | 803 | clear_hwpoisoned_pages(memmap + map_offset, |
802 | PAGES_PER_SECTION - map_offset); | 804 | PAGES_PER_SECTION - map_offset); |
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); | |||
1022 | */ | 1022 | */ |
1023 | void __init swap_setup(void) | 1023 | void __init swap_setup(void) |
1024 | { | 1024 | { |
1025 | unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); | 1025 | unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); |
1026 | 1026 | ||
1027 | /* Use a smaller cluster for small-memory machines */ | 1027 | /* Use a smaller cluster for small-memory machines */ |
1028 | if (megs < 16) | 1028 | if (megs < 16) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 8688ae65ef58..dbac1d49469d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -2197,7 +2197,8 @@ int try_to_unuse(unsigned int type, bool frontswap, | |||
2197 | */ | 2197 | */ |
2198 | if (PageSwapCache(page) && | 2198 | if (PageSwapCache(page) && |
2199 | likely(page_private(page) == entry.val) && | 2199 | likely(page_private(page) == entry.val) && |
2200 | !page_swapped(page)) | 2200 | (!PageTransCompound(page) || |
2201 | !swap_page_trans_huge_swapped(si, entry))) | ||
2201 | delete_from_swap_cache(compound_head(page)); | 2202 | delete_from_swap_cache(compound_head(page)); |
2202 | 2203 | ||
2203 | /* | 2204 | /* |
@@ -2812,8 +2813,9 @@ static struct swap_info_struct *alloc_swap_info(void) | |||
2812 | struct swap_info_struct *p; | 2813 | struct swap_info_struct *p; |
2813 | unsigned int type; | 2814 | unsigned int type; |
2814 | int i; | 2815 | int i; |
2816 | int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node); | ||
2815 | 2817 | ||
2816 | p = kvzalloc(sizeof(*p), GFP_KERNEL); | 2818 | p = kvzalloc(size, GFP_KERNEL); |
2817 | if (!p) | 2819 | if (!p) |
2818 | return ERR_PTR(-ENOMEM); | 2820 | return ERR_PTR(-ENOMEM); |
2819 | 2821 | ||
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 458acda96f20..48368589f519 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
@@ -267,10 +267,14 @@ retry: | |||
267 | VM_BUG_ON(dst_addr & ~huge_page_mask(h)); | 267 | VM_BUG_ON(dst_addr & ~huge_page_mask(h)); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Serialize via hugetlb_fault_mutex | 270 | * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. |
271 | * i_mmap_rwsem ensures the dst_pte remains valid even | ||
272 | * in the case of shared pmds. fault mutex prevents | ||
273 | * races with other faulting threads. | ||
271 | */ | 274 | */ |
272 | idx = linear_page_index(dst_vma, dst_addr); | ||
273 | mapping = dst_vma->vm_file->f_mapping; | 275 | mapping = dst_vma->vm_file->f_mapping; |
276 | i_mmap_lock_read(mapping); | ||
277 | idx = linear_page_index(dst_vma, dst_addr); | ||
274 | hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, | 278 | hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, |
275 | idx, dst_addr); | 279 | idx, dst_addr); |
276 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 280 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
@@ -279,6 +283,7 @@ retry: | |||
279 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); | 283 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); |
280 | if (!dst_pte) { | 284 | if (!dst_pte) { |
281 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 285 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
286 | i_mmap_unlock_read(mapping); | ||
282 | goto out_unlock; | 287 | goto out_unlock; |
283 | } | 288 | } |
284 | 289 | ||
@@ -286,6 +291,7 @@ retry: | |||
286 | dst_pteval = huge_ptep_get(dst_pte); | 291 | dst_pteval = huge_ptep_get(dst_pte); |
287 | if (!huge_pte_none(dst_pteval)) { | 292 | if (!huge_pte_none(dst_pteval)) { |
288 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 293 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
294 | i_mmap_unlock_read(mapping); | ||
289 | goto out_unlock; | 295 | goto out_unlock; |
290 | } | 296 | } |
291 | 297 | ||
@@ -293,6 +299,7 @@ retry: | |||
293 | dst_addr, src_addr, &page); | 299 | dst_addr, src_addr, &page); |
294 | 300 | ||
295 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 301 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
302 | i_mmap_unlock_read(mapping); | ||
296 | vm_alloc_shared = vm_shared; | 303 | vm_alloc_shared = vm_shared; |
297 | 304 | ||
298 | cond_resched(); | 305 | cond_resched(); |
@@ -593,7 +593,7 @@ unsigned long vm_commit_limit(void) | |||
593 | if (sysctl_overcommit_kbytes) | 593 | if (sysctl_overcommit_kbytes) |
594 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | 594 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); |
595 | else | 595 | else |
596 | allowed = ((totalram_pages - hugetlb_total_pages()) | 596 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
597 | * sysctl_overcommit_ratio / 100); | 597 | * sysctl_overcommit_ratio / 100); |
598 | allowed += total_swap_pages; | 598 | allowed += total_swap_pages; |
599 | 599 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 97d4b25d0373..871e41c55e23 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1634,7 +1634,7 @@ void *vmap(struct page **pages, unsigned int count, | |||
1634 | 1634 | ||
1635 | might_sleep(); | 1635 | might_sleep(); |
1636 | 1636 | ||
1637 | if (count > totalram_pages) | 1637 | if (count > totalram_pages()) |
1638 | return NULL; | 1638 | return NULL; |
1639 | 1639 | ||
1640 | size = (unsigned long)count << PAGE_SHIFT; | 1640 | size = (unsigned long)count << PAGE_SHIFT; |
@@ -1739,7 +1739,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
1739 | unsigned long real_size = size; | 1739 | unsigned long real_size = size; |
1740 | 1740 | ||
1741 | size = PAGE_ALIGN(size); | 1741 | size = PAGE_ALIGN(size); |
1742 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1742 | if (!size || (size >> PAGE_SHIFT) > totalram_pages()) |
1743 | goto fail; | 1743 | goto fail; |
1744 | 1744 | ||
1745 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | | 1745 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 24ab1f7394ab..a714c4f800e9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -88,6 +88,9 @@ struct scan_control { | |||
88 | /* Can pages be swapped as part of reclaim? */ | 88 | /* Can pages be swapped as part of reclaim? */ |
89 | unsigned int may_swap:1; | 89 | unsigned int may_swap:1; |
90 | 90 | ||
91 | /* e.g. boosted watermark reclaim leaves slabs alone */ | ||
92 | unsigned int may_shrinkslab:1; | ||
93 | |||
91 | /* | 94 | /* |
92 | * Cgroups are not reclaimed below their configured memory.low, | 95 | * Cgroups are not reclaimed below their configured memory.low, |
93 | * unless we threaten to OOM. If any cgroups are skipped due to | 96 | * unless we threaten to OOM. If any cgroups are skipped due to |
@@ -1457,14 +1460,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1457 | count_memcg_page_event(page, PGLAZYFREED); | 1460 | count_memcg_page_event(page, PGLAZYFREED); |
1458 | } else if (!mapping || !__remove_mapping(mapping, page, true)) | 1461 | } else if (!mapping || !__remove_mapping(mapping, page, true)) |
1459 | goto keep_locked; | 1462 | goto keep_locked; |
1460 | /* | 1463 | |
1461 | * At this point, we have no other references and there is | 1464 | unlock_page(page); |
1462 | * no way to pick any more up (removed from LRU, removed | ||
1463 | * from pagecache). Can use non-atomic bitops now (and | ||
1464 | * we obviously don't have to worry about waking up a process | ||
1465 | * waiting on the page lock, because there are no references. | ||
1466 | */ | ||
1467 | __ClearPageLocked(page); | ||
1468 | free_it: | 1465 | free_it: |
1469 | nr_reclaimed++; | 1466 | nr_reclaimed++; |
1470 | 1467 | ||
@@ -2756,8 +2753,10 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) | |||
2756 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); | 2753 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); |
2757 | node_lru_pages += lru_pages; | 2754 | node_lru_pages += lru_pages; |
2758 | 2755 | ||
2759 | shrink_slab(sc->gfp_mask, pgdat->node_id, | 2756 | if (sc->may_shrinkslab) { |
2757 | shrink_slab(sc->gfp_mask, pgdat->node_id, | ||
2760 | memcg, sc->priority); | 2758 | memcg, sc->priority); |
2759 | } | ||
2761 | 2760 | ||
2762 | /* Record the group's reclaim efficiency */ | 2761 | /* Record the group's reclaim efficiency */ |
2763 | vmpressure(sc->gfp_mask, memcg, false, | 2762 | vmpressure(sc->gfp_mask, memcg, false, |
@@ -3239,6 +3238,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
3239 | .may_writepage = !laptop_mode, | 3238 | .may_writepage = !laptop_mode, |
3240 | .may_unmap = 1, | 3239 | .may_unmap = 1, |
3241 | .may_swap = 1, | 3240 | .may_swap = 1, |
3241 | .may_shrinkslab = 1, | ||
3242 | }; | 3242 | }; |
3243 | 3243 | ||
3244 | /* | 3244 | /* |
@@ -3283,6 +3283,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | |||
3283 | .may_unmap = 1, | 3283 | .may_unmap = 1, |
3284 | .reclaim_idx = MAX_NR_ZONES - 1, | 3284 | .reclaim_idx = MAX_NR_ZONES - 1, |
3285 | .may_swap = !noswap, | 3285 | .may_swap = !noswap, |
3286 | .may_shrinkslab = 1, | ||
3286 | }; | 3287 | }; |
3287 | unsigned long lru_pages; | 3288 | unsigned long lru_pages; |
3288 | 3289 | ||
@@ -3329,6 +3330,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3329 | .may_writepage = !laptop_mode, | 3330 | .may_writepage = !laptop_mode, |
3330 | .may_unmap = 1, | 3331 | .may_unmap = 1, |
3331 | .may_swap = may_swap, | 3332 | .may_swap = may_swap, |
3333 | .may_shrinkslab = 1, | ||
3332 | }; | 3334 | }; |
3333 | 3335 | ||
3334 | /* | 3336 | /* |
@@ -3379,6 +3381,30 @@ static void age_active_anon(struct pglist_data *pgdat, | |||
3379 | } while (memcg); | 3381 | } while (memcg); |
3380 | } | 3382 | } |
3381 | 3383 | ||
3384 | static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx) | ||
3385 | { | ||
3386 | int i; | ||
3387 | struct zone *zone; | ||
3388 | |||
3389 | /* | ||
3390 | * Check for watermark boosts top-down as the higher zones | ||
3391 | * are more likely to be boosted. Both watermarks and boosts | ||
3392 | * should not be checked at the time time as reclaim would | ||
3393 | * start prematurely when there is no boosting and a lower | ||
3394 | * zone is balanced. | ||
3395 | */ | ||
3396 | for (i = classzone_idx; i >= 0; i--) { | ||
3397 | zone = pgdat->node_zones + i; | ||
3398 | if (!managed_zone(zone)) | ||
3399 | continue; | ||
3400 | |||
3401 | if (zone->watermark_boost) | ||
3402 | return true; | ||
3403 | } | ||
3404 | |||
3405 | return false; | ||
3406 | } | ||
3407 | |||
3382 | /* | 3408 | /* |
3383 | * Returns true if there is an eligible zone balanced for the request order | 3409 | * Returns true if there is an eligible zone balanced for the request order |
3384 | * and classzone_idx | 3410 | * and classzone_idx |
@@ -3389,6 +3415,10 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) | |||
3389 | unsigned long mark = -1; | 3415 | unsigned long mark = -1; |
3390 | struct zone *zone; | 3416 | struct zone *zone; |
3391 | 3417 | ||
3418 | /* | ||
3419 | * Check watermarks bottom-up as lower zones are more likely to | ||
3420 | * meet watermarks. | ||
3421 | */ | ||
3392 | for (i = 0; i <= classzone_idx; i++) { | 3422 | for (i = 0; i <= classzone_idx; i++) { |
3393 | zone = pgdat->node_zones + i; | 3423 | zone = pgdat->node_zones + i; |
3394 | 3424 | ||
@@ -3517,14 +3547,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3517 | unsigned long nr_soft_reclaimed; | 3547 | unsigned long nr_soft_reclaimed; |
3518 | unsigned long nr_soft_scanned; | 3548 | unsigned long nr_soft_scanned; |
3519 | unsigned long pflags; | 3549 | unsigned long pflags; |
3550 | unsigned long nr_boost_reclaim; | ||
3551 | unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; | ||
3552 | bool boosted; | ||
3520 | struct zone *zone; | 3553 | struct zone *zone; |
3521 | struct scan_control sc = { | 3554 | struct scan_control sc = { |
3522 | .gfp_mask = GFP_KERNEL, | 3555 | .gfp_mask = GFP_KERNEL, |
3523 | .order = order, | 3556 | .order = order, |
3524 | .priority = DEF_PRIORITY, | ||
3525 | .may_writepage = !laptop_mode, | ||
3526 | .may_unmap = 1, | 3557 | .may_unmap = 1, |
3527 | .may_swap = 1, | ||
3528 | }; | 3558 | }; |
3529 | 3559 | ||
3530 | psi_memstall_enter(&pflags); | 3560 | psi_memstall_enter(&pflags); |
@@ -3532,9 +3562,28 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3532 | 3562 | ||
3533 | count_vm_event(PAGEOUTRUN); | 3563 | count_vm_event(PAGEOUTRUN); |
3534 | 3564 | ||
3565 | /* | ||
3566 | * Account for the reclaim boost. Note that the zone boost is left in | ||
3567 | * place so that parallel allocations that are near the watermark will | ||
3568 | * stall or direct reclaim until kswapd is finished. | ||
3569 | */ | ||
3570 | nr_boost_reclaim = 0; | ||
3571 | for (i = 0; i <= classzone_idx; i++) { | ||
3572 | zone = pgdat->node_zones + i; | ||
3573 | if (!managed_zone(zone)) | ||
3574 | continue; | ||
3575 | |||
3576 | nr_boost_reclaim += zone->watermark_boost; | ||
3577 | zone_boosts[i] = zone->watermark_boost; | ||
3578 | } | ||
3579 | boosted = nr_boost_reclaim; | ||
3580 | |||
3581 | restart: | ||
3582 | sc.priority = DEF_PRIORITY; | ||
3535 | do { | 3583 | do { |
3536 | unsigned long nr_reclaimed = sc.nr_reclaimed; | 3584 | unsigned long nr_reclaimed = sc.nr_reclaimed; |
3537 | bool raise_priority = true; | 3585 | bool raise_priority = true; |
3586 | bool balanced; | ||
3538 | bool ret; | 3587 | bool ret; |
3539 | 3588 | ||
3540 | sc.reclaim_idx = classzone_idx; | 3589 | sc.reclaim_idx = classzone_idx; |
@@ -3561,13 +3610,40 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3561 | } | 3610 | } |
3562 | 3611 | ||
3563 | /* | 3612 | /* |
3564 | * Only reclaim if there are no eligible zones. Note that | 3613 | * If the pgdat is imbalanced then ignore boosting and preserve |
3565 | * sc.reclaim_idx is not used as buffer_heads_over_limit may | 3614 | * the watermarks for a later time and restart. Note that the |
3566 | * have adjusted it. | 3615 | * zone watermarks will be still reset at the end of balancing |
3616 | * on the grounds that the normal reclaim should be enough to | ||
3617 | * re-evaluate if boosting is required when kswapd next wakes. | ||
3567 | */ | 3618 | */ |
3568 | if (pgdat_balanced(pgdat, sc.order, classzone_idx)) | 3619 | balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); |
3620 | if (!balanced && nr_boost_reclaim) { | ||
3621 | nr_boost_reclaim = 0; | ||
3622 | goto restart; | ||
3623 | } | ||
3624 | |||
3625 | /* | ||
3626 | * If boosting is not active then only reclaim if there are no | ||
3627 | * eligible zones. Note that sc.reclaim_idx is not used as | ||
3628 | * buffer_heads_over_limit may have adjusted it. | ||
3629 | */ | ||
3630 | if (!nr_boost_reclaim && balanced) | ||
3569 | goto out; | 3631 | goto out; |
3570 | 3632 | ||
3633 | /* Limit the priority of boosting to avoid reclaim writeback */ | ||
3634 | if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) | ||
3635 | raise_priority = false; | ||
3636 | |||
3637 | /* | ||
3638 | * Do not writeback or swap pages for boosted reclaim. The | ||
3639 | * intent is to relieve pressure not issue sub-optimal IO | ||
3640 | * from reclaim context. If no pages are reclaimed, the | ||
3641 | * reclaim will be aborted. | ||
3642 | */ | ||
3643 | sc.may_writepage = !laptop_mode && !nr_boost_reclaim; | ||
3644 | sc.may_swap = !nr_boost_reclaim; | ||
3645 | sc.may_shrinkslab = !nr_boost_reclaim; | ||
3646 | |||
3571 | /* | 3647 | /* |
3572 | * Do some background aging of the anon list, to give | 3648 | * Do some background aging of the anon list, to give |
3573 | * pages a chance to be referenced before reclaiming. All | 3649 | * pages a chance to be referenced before reclaiming. All |
@@ -3619,6 +3695,16 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3619 | * progress in reclaiming pages | 3695 | * progress in reclaiming pages |
3620 | */ | 3696 | */ |
3621 | nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; | 3697 | nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; |
3698 | nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); | ||
3699 | |||
3700 | /* | ||
3701 | * If reclaim made no progress for a boost, stop reclaim as | ||
3702 | * IO cannot be queued and it could be an infinite loop in | ||
3703 | * extreme circumstances. | ||
3704 | */ | ||
3705 | if (nr_boost_reclaim && !nr_reclaimed) | ||
3706 | break; | ||
3707 | |||
3622 | if (raise_priority || !nr_reclaimed) | 3708 | if (raise_priority || !nr_reclaimed) |
3623 | sc.priority--; | 3709 | sc.priority--; |
3624 | } while (sc.priority >= 1); | 3710 | } while (sc.priority >= 1); |
@@ -3627,6 +3713,28 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3627 | pgdat->kswapd_failures++; | 3713 | pgdat->kswapd_failures++; |
3628 | 3714 | ||
3629 | out: | 3715 | out: |
3716 | /* If reclaim was boosted, account for the reclaim done in this pass */ | ||
3717 | if (boosted) { | ||
3718 | unsigned long flags; | ||
3719 | |||
3720 | for (i = 0; i <= classzone_idx; i++) { | ||
3721 | if (!zone_boosts[i]) | ||
3722 | continue; | ||
3723 | |||
3724 | /* Increments are under the zone lock */ | ||
3725 | zone = pgdat->node_zones + i; | ||
3726 | spin_lock_irqsave(&zone->lock, flags); | ||
3727 | zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); | ||
3728 | spin_unlock_irqrestore(&zone->lock, flags); | ||
3729 | } | ||
3730 | |||
3731 | /* | ||
3732 | * As there is now likely space, wakeup kcompact to defragment | ||
3733 | * pageblocks. | ||
3734 | */ | ||
3735 | wakeup_kcompactd(pgdat, pageblock_order, classzone_idx); | ||
3736 | } | ||
3737 | |||
3630 | snapshot_refaults(NULL, pgdat); | 3738 | snapshot_refaults(NULL, pgdat); |
3631 | __fs_reclaim_release(); | 3739 | __fs_reclaim_release(); |
3632 | psi_memstall_leave(&pflags); | 3740 | psi_memstall_leave(&pflags); |
@@ -3855,7 +3963,8 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, | |||
3855 | 3963 | ||
3856 | /* Hopeless node, leave it to direct reclaim if possible */ | 3964 | /* Hopeless node, leave it to direct reclaim if possible */ |
3857 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || | 3965 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || |
3858 | pgdat_balanced(pgdat, order, classzone_idx)) { | 3966 | (pgdat_balanced(pgdat, order, classzone_idx) && |
3967 | !pgdat_watermark_boosted(pgdat, classzone_idx))) { | ||
3859 | /* | 3968 | /* |
3860 | * There may be plenty of free memory available, but it's too | 3969 | * There may be plenty of free memory available, but it's too |
3861 | * fragmented for high-order allocations. Wake up kcompactd | 3970 | * fragmented for high-order allocations. Wake up kcompactd |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 9c624595e904..83b30edc2f7f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -227,7 +227,7 @@ int calculate_normal_threshold(struct zone *zone) | |||
227 | * 125 1024 10 16-32 GB 9 | 227 | * 125 1024 10 16-32 GB 9 |
228 | */ | 228 | */ |
229 | 229 | ||
230 | mem = zone->managed_pages >> (27 - PAGE_SHIFT); | 230 | mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); |
231 | 231 | ||
232 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | 232 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
233 | 233 | ||
@@ -1569,7 +1569,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
1569 | high_wmark_pages(zone), | 1569 | high_wmark_pages(zone), |
1570 | zone->spanned_pages, | 1570 | zone->spanned_pages, |
1571 | zone->present_pages, | 1571 | zone->present_pages, |
1572 | zone->managed_pages); | 1572 | zone_managed_pages(zone)); |
1573 | 1573 | ||
1574 | seq_printf(m, | 1574 | seq_printf(m, |
1575 | "\n protection: (%ld", | 1575 | "\n protection: (%ld", |
diff --git a/mm/workingset.c b/mm/workingset.c index d46f8c92aa2f..dcb994f2acc2 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -549,7 +549,7 @@ static int __init workingset_init(void) | |||
549 | * double the initial memory by using totalram_pages as-is. | 549 | * double the initial memory by using totalram_pages as-is. |
550 | */ | 550 | */ |
551 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | 551 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; |
552 | max_order = fls_long(totalram_pages - 1); | 552 | max_order = fls_long(totalram_pages() - 1); |
553 | if (max_order > timestamp_bits) | 553 | if (max_order > timestamp_bits) |
554 | bucket_order = max_order - timestamp_bits; | 554 | bucket_order = max_order - timestamp_bits; |
555 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", | 555 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
diff --git a/mm/zswap.c b/mm/zswap.c index cd91fd9d96b8..a4e4d36ec085 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -219,8 +219,8 @@ static const struct zpool_ops zswap_zpool_ops = { | |||
219 | 219 | ||
220 | static bool zswap_is_full(void) | 220 | static bool zswap_is_full(void) |
221 | { | 221 | { |
222 | return totalram_pages * zswap_max_pool_percent / 100 < | 222 | return totalram_pages() * zswap_max_pool_percent / 100 < |
223 | DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); | 223 | DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); |
224 | } | 224 | } |
225 | 225 | ||
226 | static void zswap_update_total_size(void) | 226 | static void zswap_update_total_size(void) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 2cc5fbb1b29e..0e2f71ab8367 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1131,6 +1131,7 @@ EXPORT_SYMBOL_GPL(dccp_debug); | |||
1131 | static int __init dccp_init(void) | 1131 | static int __init dccp_init(void) |
1132 | { | 1132 | { |
1133 | unsigned long goal; | 1133 | unsigned long goal; |
1134 | unsigned long nr_pages = totalram_pages(); | ||
1134 | int ehash_order, bhash_order, i; | 1135 | int ehash_order, bhash_order, i; |
1135 | int rc; | 1136 | int rc; |
1136 | 1137 | ||
@@ -1157,10 +1158,10 @@ static int __init dccp_init(void) | |||
1157 | * | 1158 | * |
1158 | * The methodology is similar to that of the buffer cache. | 1159 | * The methodology is similar to that of the buffer cache. |
1159 | */ | 1160 | */ |
1160 | if (totalram_pages >= (128 * 1024)) | 1161 | if (nr_pages >= (128 * 1024)) |
1161 | goal = totalram_pages >> (21 - PAGE_SHIFT); | 1162 | goal = nr_pages >> (21 - PAGE_SHIFT); |
1162 | else | 1163 | else |
1163 | goal = totalram_pages >> (23 - PAGE_SHIFT); | 1164 | goal = nr_pages >> (23 - PAGE_SHIFT); |
1164 | 1165 | ||
1165 | if (thash_entries) | 1166 | if (thash_entries) |
1166 | goal = (thash_entries * | 1167 | goal = (thash_entries * |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 1c002c0fb712..950613ee7881 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -1866,7 +1866,7 @@ void __init dn_route_init(void) | |||
1866 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; | 1866 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; |
1867 | add_timer(&dn_route_timer); | 1867 | add_timer(&dn_route_timer); |
1868 | 1868 | ||
1869 | goal = totalram_pages >> (26 - PAGE_SHIFT); | 1869 | goal = totalram_pages() >> (26 - PAGE_SHIFT); |
1870 | 1870 | ||
1871 | for(order = 0; (1UL << order) < goal; order++) | 1871 | for(order = 0; (1UL << order) < goal; order++) |
1872 | /* NOTHING */; | 1872 | /* NOTHING */; |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 03b51cdcc731..b467a7cabf40 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -1000,7 +1000,7 @@ static int __net_init tcp_net_metrics_init(struct net *net) | |||
1000 | 1000 | ||
1001 | slots = tcpmhash_entries; | 1001 | slots = tcpmhash_entries; |
1002 | if (!slots) { | 1002 | if (!slots) { |
1003 | if (totalram_pages >= 128 * 1024) | 1003 | if (totalram_pages() >= 128 * 1024) |
1004 | slots = 16 * 1024; | 1004 | slots = 16 * 1024; |
1005 | else | 1005 | else |
1006 | slots = 8 * 1024; | 1006 | slots = 8 * 1024; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e87c21e47efe..741b533148ba 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -2248,6 +2248,7 @@ static __always_inline unsigned int total_extension_size(void) | |||
2248 | 2248 | ||
2249 | int nf_conntrack_init_start(void) | 2249 | int nf_conntrack_init_start(void) |
2250 | { | 2250 | { |
2251 | unsigned long nr_pages = totalram_pages(); | ||
2251 | int max_factor = 8; | 2252 | int max_factor = 8; |
2252 | int ret = -ENOMEM; | 2253 | int ret = -ENOMEM; |
2253 | int i; | 2254 | int i; |
@@ -2267,11 +2268,11 @@ int nf_conntrack_init_start(void) | |||
2267 | * >= 4GB machines have 65536 buckets. | 2268 | * >= 4GB machines have 65536 buckets. |
2268 | */ | 2269 | */ |
2269 | nf_conntrack_htable_size | 2270 | nf_conntrack_htable_size |
2270 | = (((totalram_pages << PAGE_SHIFT) / 16384) | 2271 | = (((nr_pages << PAGE_SHIFT) / 16384) |
2271 | / sizeof(struct hlist_head)); | 2272 | / sizeof(struct hlist_head)); |
2272 | if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) | 2273 | if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) |
2273 | nf_conntrack_htable_size = 65536; | 2274 | nf_conntrack_htable_size = 65536; |
2274 | else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 2275 | else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
2275 | nf_conntrack_htable_size = 16384; | 2276 | nf_conntrack_htable_size = 16384; |
2276 | if (nf_conntrack_htable_size < 32) | 2277 | if (nf_conntrack_htable_size < 32) |
2277 | nf_conntrack_htable_size = 32; | 2278 | nf_conntrack_htable_size = 32; |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 28e27a32d9b9..8d86e39d6280 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -274,14 +274,15 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, | |||
274 | struct xt_hashlimit_htable *hinfo; | 274 | struct xt_hashlimit_htable *hinfo; |
275 | const struct seq_operations *ops; | 275 | const struct seq_operations *ops; |
276 | unsigned int size, i; | 276 | unsigned int size, i; |
277 | unsigned long nr_pages = totalram_pages(); | ||
277 | int ret; | 278 | int ret; |
278 | 279 | ||
279 | if (cfg->size) { | 280 | if (cfg->size) { |
280 | size = cfg->size; | 281 | size = cfg->size; |
281 | } else { | 282 | } else { |
282 | size = (totalram_pages << PAGE_SHIFT) / 16384 / | 283 | size = (nr_pages << PAGE_SHIFT) / 16384 / |
283 | sizeof(struct hlist_head); | 284 | sizeof(struct hlist_head); |
284 | if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) | 285 | if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE) |
285 | size = 8192; | 286 | size = 8192; |
286 | if (size < 16) | 287 | if (size < 16) |
287 | size = 16; | 288 | size = 16; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 9b277bd36d1a..d5878ae55840 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1368,6 +1368,7 @@ static __init int sctp_init(void) | |||
1368 | int status = -EINVAL; | 1368 | int status = -EINVAL; |
1369 | unsigned long goal; | 1369 | unsigned long goal; |
1370 | unsigned long limit; | 1370 | unsigned long limit; |
1371 | unsigned long nr_pages = totalram_pages(); | ||
1371 | int max_share; | 1372 | int max_share; |
1372 | int order; | 1373 | int order; |
1373 | int num_entries; | 1374 | int num_entries; |
@@ -1426,10 +1427,10 @@ static __init int sctp_init(void) | |||
1426 | * The methodology is similar to that of the tcp hash tables. | 1427 | * The methodology is similar to that of the tcp hash tables. |
1427 | * Though not identical. Start by getting a goal size | 1428 | * Though not identical. Start by getting a goal size |
1428 | */ | 1429 | */ |
1429 | if (totalram_pages >= (128 * 1024)) | 1430 | if (nr_pages >= (128 * 1024)) |
1430 | goal = totalram_pages >> (22 - PAGE_SHIFT); | 1431 | goal = nr_pages >> (22 - PAGE_SHIFT); |
1431 | else | 1432 | else |
1432 | goal = totalram_pages >> (24 - PAGE_SHIFT); | 1433 | goal = nr_pages >> (24 - PAGE_SHIFT); |
1433 | 1434 | ||
1434 | /* Then compute the page order for said goal */ | 1435 | /* Then compute the page order for said goal */ |
1435 | order = get_order(goal); | 1436 | order = get_order(goal); |
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 69552a39951d..25c259df8ffa 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan | |||
@@ -1,5 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | ifdef CONFIG_KASAN | 2 | ifdef CONFIG_KASAN_GENERIC |
3 | |||
3 | ifdef CONFIG_KASAN_INLINE | 4 | ifdef CONFIG_KASAN_INLINE |
4 | call_threshold := 10000 | 5 | call_threshold := 10000 |
5 | else | 6 | else |
@@ -12,36 +13,44 @@ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address | |||
12 | 13 | ||
13 | cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) | 14 | cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) |
14 | 15 | ||
15 | ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),) | 16 | # -fasan-shadow-offset fails without -fsanitize |
16 | ifneq ($(CONFIG_COMPILE_TEST),y) | 17 | CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ |
17 | $(warning Cannot use CONFIG_KASAN: \ | ||
18 | -fsanitize=kernel-address is not supported by compiler) | ||
19 | endif | ||
20 | else | ||
21 | # -fasan-shadow-offset fails without -fsanitize | ||
22 | CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ | ||
23 | -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \ | 18 | -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \ |
24 | $(call cc-option, -fsanitize=kernel-address \ | 19 | $(call cc-option, -fsanitize=kernel-address \ |
25 | -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET))) | 20 | -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET))) |
26 | 21 | ||
27 | ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),) | 22 | ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),) |
28 | CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) | 23 | CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) |
29 | else | 24 | else |
30 | # Now add all the compiler specific options that are valid standalone | 25 | # Now add all the compiler specific options that are valid standalone |
31 | CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ | 26 | CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ |
32 | $(call cc-param,asan-globals=1) \ | 27 | $(call cc-param,asan-globals=1) \ |
33 | $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ | 28 | $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ |
34 | $(call cc-param,asan-stack=1) \ | 29 | $(call cc-param,asan-stack=1) \ |
35 | $(call cc-param,asan-use-after-scope=1) \ | 30 | $(call cc-param,asan-use-after-scope=1) \ |
36 | $(call cc-param,asan-instrument-allocas=1) | 31 | $(call cc-param,asan-instrument-allocas=1) |
37 | endif | ||
38 | |||
39 | endif | 32 | endif |
40 | 33 | ||
41 | ifdef CONFIG_KASAN_EXTRA | 34 | ifdef CONFIG_KASAN_EXTRA |
42 | CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) | 35 | CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) |
43 | endif | 36 | endif |
44 | 37 | ||
45 | CFLAGS_KASAN_NOSANITIZE := -fno-builtin | 38 | endif # CONFIG_KASAN_GENERIC |
46 | 39 | ||
40 | ifdef CONFIG_KASAN_SW_TAGS | ||
41 | |||
42 | ifdef CONFIG_KASAN_INLINE | ||
43 | instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET) | ||
44 | else | ||
45 | instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1 | ||
46 | endif | ||
47 | |||
48 | CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ | ||
49 | -mllvm -hwasan-instrument-stack=0 \ | ||
50 | $(instrumentation_flags) | ||
51 | |||
52 | endif # CONFIG_KASAN_SW_TAGS | ||
53 | |||
54 | ifdef CONFIG_KASAN | ||
55 | CFLAGS_KASAN_NOSANITIZE := -fno-builtin | ||
47 | endif | 56 | endif |
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index a923f05edb36..8c965f6a9881 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter | |||
@@ -32,6 +32,7 @@ def getsizes(file, format): | |||
32 | if name.startswith("__mod_"): continue | 32 | if name.startswith("__mod_"): continue |
33 | if name.startswith("__se_sys"): continue | 33 | if name.startswith("__se_sys"): continue |
34 | if name.startswith("__se_compat_sys"): continue | 34 | if name.startswith("__se_compat_sys"): continue |
35 | if name.startswith("__addressable_"): continue | ||
35 | if name == "linux_banner": continue | 36 | if name == "linux_banner": continue |
36 | # statics and some other optimizations adds random .NUMBER | 37 | # statics and some other optimizations adds random .NUMBER |
37 | name = re_NUMBER.sub('', name) | 38 | name = re_NUMBER.sub('', name) |
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index 34414c6efad6..122aef5e4e14 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl | |||
@@ -48,7 +48,9 @@ my (@stack, $re, $dre, $x, $xs, $funcre); | |||
48 | $funcre = qr/^$x* <(.*)>:$/; | 48 | $funcre = qr/^$x* <(.*)>:$/; |
49 | if ($arch eq 'aarch64') { | 49 | if ($arch eq 'aarch64') { |
50 | #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! | 50 | #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! |
51 | #a110: d11643ff sub sp, sp, #0x590 | ||
51 | $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; | 52 | $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; |
53 | $dre = qr/^.*sub.*sp, sp, #(0x$x{1,8})/o; | ||
52 | } elsif ($arch eq 'arm') { | 54 | } elsif ($arch eq 'arm') { |
53 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 | 55 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 |
54 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; | 56 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; |
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 64220e36ce3b..98a7d63a723e 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh | |||
@@ -78,7 +78,7 @@ parse_symbol() { | |||
78 | fi | 78 | fi |
79 | 79 | ||
80 | # Strip out the base of the path | 80 | # Strip out the base of the path |
81 | code=${code//$basepath/""} | 81 | code=${code//^$basepath/""} |
82 | 82 | ||
83 | # In the case of inlines, move everything to same line | 83 | # In the case of inlines, move everything to same line |
84 | code=${code//$'\n'/' '} | 84 | code=${code//$'\n'/' '} |
diff --git a/scripts/decodecode b/scripts/decodecode index 9cef558528aa..ba8b8d5834e6 100755 --- a/scripts/decodecode +++ b/scripts/decodecode | |||
@@ -60,6 +60,13 @@ case $width in | |||
60 | 4) type=4byte ;; | 60 | 4) type=4byte ;; |
61 | esac | 61 | esac |
62 | 62 | ||
63 | if [ -z "$ARCH" ]; then | ||
64 | case `uname -m` in | ||
65 | aarch64*) ARCH=arm64 ;; | ||
66 | arm*) ARCH=arm ;; | ||
67 | esac | ||
68 | fi | ||
69 | |||
63 | disas() { | 70 | disas() { |
64 | ${CROSS_COMPILE}as $AFLAGS -o $1.o $1.s > /dev/null 2>&1 | 71 | ${CROSS_COMPILE}as $AFLAGS -o $1.o $1.s > /dev/null 2>&1 |
65 | 72 | ||
diff --git a/scripts/spdxcheck-test.sh b/scripts/spdxcheck-test.sh new file mode 100644 index 000000000000..cfea6a0d1cc0 --- /dev/null +++ b/scripts/spdxcheck-test.sh | |||
@@ -0,0 +1,12 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | for PYTHON in python2 python3; do | ||
4 | # run check on a text and a binary file | ||
5 | for FILE in Makefile Documentation/logo.gif; do | ||
6 | $PYTHON scripts/spdxcheck.py $FILE | ||
7 | $PYTHON scripts/spdxcheck.py - < $FILE | ||
8 | done | ||
9 | |||
10 | # run check on complete tree to catch any other issues | ||
11 | $PYTHON scripts/spdxcheck.py > /dev/null | ||
12 | done | ||
diff --git a/scripts/tags.sh b/scripts/tags.sh index 4fa070f9231a..f470d9919ed7 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -191,7 +191,7 @@ regex_c=( | |||
191 | '/^DEF_PCI_AC_\(\|NO\)RET(\([[:alnum:]_]*\).*/\2/' | 191 | '/^DEF_PCI_AC_\(\|NO\)RET(\([[:alnum:]_]*\).*/\2/' |
192 | '/^PCI_OP_READ(\(\w*\).*[1-4])/pci_bus_read_config_\1/' | 192 | '/^PCI_OP_READ(\(\w*\).*[1-4])/pci_bus_read_config_\1/' |
193 | '/^PCI_OP_WRITE(\(\w*\).*[1-4])/pci_bus_write_config_\1/' | 193 | '/^PCI_OP_WRITE(\(\w*\).*[1-4])/pci_bus_write_config_\1/' |
194 | '/\<DEFINE_\(MUTEX\|SEMAPHORE\|SPINLOCK\)(\([[:alnum:]_]*\)/\2/v/' | 194 | '/\<DEFINE_\(RT_MUTEX\|MUTEX\|SEMAPHORE\|SPINLOCK\)(\([[:alnum:]_]*\)/\2/v/' |
195 | '/\<DEFINE_\(RAW_SPINLOCK\|RWLOCK\|SEQLOCK\)(\([[:alnum:]_]*\)/\2/v/' | 195 | '/\<DEFINE_\(RAW_SPINLOCK\|RWLOCK\|SEQLOCK\)(\([[:alnum:]_]*\)/\2/v/' |
196 | '/\<DECLARE_\(RWSEM\|COMPLETION\)(\([[:alnum:]_]\+\)/\2/v/' | 196 | '/\<DECLARE_\(RWSEM\|COMPLETION\)(\([[:alnum:]_]\+\)/\2/v/' |
197 | '/\<DECLARE_BITMAP(\([[:alnum:]_]*\)/\1/v/' | 197 | '/\<DECLARE_BITMAP(\([[:alnum:]_]*\)/\1/v/' |
@@ -204,6 +204,15 @@ regex_c=( | |||
204 | '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/' | 204 | '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/' |
205 | '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/' | 205 | '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/' |
206 | '/\<\(DEFINE\|DECLARE\)_HASHTABLE(\([[:alnum:]_]*\)/\2/v/' | 206 | '/\<\(DEFINE\|DECLARE\)_HASHTABLE(\([[:alnum:]_]*\)/\2/v/' |
207 | '/\<DEFINE_ID\(R\|A\)(\([[:alnum:]_]\+\)/\2/' | ||
208 | '/\<DEFINE_WD_CLASS(\([[:alnum:]_]\+\)/\1/' | ||
209 | '/\<ATOMIC_NOTIFIER_HEAD(\([[:alnum:]_]\+\)/\1/' | ||
210 | '/\<RAW_NOTIFIER_HEAD(\([[:alnum:]_]\+\)/\1/' | ||
211 | '/\<DECLARE_FAULT_ATTR(\([[:alnum:]_]\+\)/\1/' | ||
212 | '/\<BLOCKING_NOTIFIER_HEAD(\([[:alnum:]_]\+\)/\1/' | ||
213 | '/\<DEVICE_ATTR_\(RW\|RO\|WO\)(\([[:alnum:]_]\+\)/dev_attr_\2/' | ||
214 | '/\<DRIVER_ATTR_\(RW\|RO\|WO\)(\([[:alnum:]_]\+\)/driver_attr_\2/' | ||
215 | '/\<\(DEFINE\|DECLARE\)_STATIC_KEY_\(TRUE\|FALSE\)\(\|_RO\)(\([[:alnum:]_]\+\)/\4/' | ||
207 | ) | 216 | ) |
208 | regex_kconfig=( | 217 | regex_kconfig=( |
209 | '/^[[:blank:]]*\(menu\|\)config[[:blank:]]\+\([[:alnum:]_]\+\)/\2/' | 218 | '/^[[:blank:]]*\(menu\|\)config[[:blank:]]\+\([[:alnum:]_]\+\)/\2/' |
@@ -249,7 +258,7 @@ exuberant() | |||
249 | -I __initdata,__exitdata,__initconst,__ro_after_init \ | 258 | -I __initdata,__exitdata,__initconst,__ro_after_init \ |
250 | -I __initdata_memblock \ | 259 | -I __initdata_memblock \ |
251 | -I __refdata,__attribute,__maybe_unused,__always_unused \ | 260 | -I __refdata,__attribute,__maybe_unused,__always_unused \ |
252 | -I __acquires,__releases,__deprecated \ | 261 | -I __acquires,__releases,__deprecated,__always_inline \ |
253 | -I __read_mostly,__aligned,____cacheline_aligned \ | 262 | -I __read_mostly,__aligned,____cacheline_aligned \ |
254 | -I ____cacheline_aligned_in_smp \ | 263 | -I ____cacheline_aligned_in_smp \ |
255 | -I __cacheline_aligned,__cacheline_aligned_in_smp \ | 264 | -I __cacheline_aligned,__cacheline_aligned_in_smp \ |
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 16bd18747cfa..d6f32807b347 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c | |||
@@ -106,7 +106,7 @@ void ima_add_kexec_buffer(struct kimage *image) | |||
106 | kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + | 106 | kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + |
107 | PAGE_SIZE / 2, PAGE_SIZE); | 107 | PAGE_SIZE / 2, PAGE_SIZE); |
108 | if ((kexec_segment_size == ULONG_MAX) || | 108 | if ((kexec_segment_size == ULONG_MAX) || |
109 | ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) { | 109 | ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) { |
110 | pr_err("Binary measurement list too large.\n"); | 110 | pr_err("Binary measurement list too large.\n"); |
111 | return; | 111 | return; |
112 | } | 112 | } |
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index ff9d3a5825e1..c6635fee27d8 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c | |||
@@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, | |||
104 | } | 104 | } |
105 | EXPORT_SYMBOL(__wrap_devm_memremap); | 105 | EXPORT_SYMBOL(__wrap_devm_memremap); |
106 | 106 | ||
107 | static void nfit_test_kill(void *_pgmap) | ||
108 | { | ||
109 | struct dev_pagemap *pgmap = _pgmap; | ||
110 | |||
111 | pgmap->kill(pgmap->ref); | ||
112 | } | ||
113 | |||
107 | void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | 114 | void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
108 | { | 115 | { |
109 | resource_size_t offset = pgmap->res.start; | 116 | resource_size_t offset = pgmap->res.start; |
110 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); | 117 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); |
111 | 118 | ||
112 | if (nfit_res) | 119 | if (nfit_res) { |
120 | int rc; | ||
121 | |||
122 | rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap); | ||
123 | if (rc) | ||
124 | return ERR_PTR(rc); | ||
113 | return nfit_res->buf + offset - nfit_res->res.start; | 125 | return nfit_res->buf + offset - nfit_res->res.start; |
126 | } | ||
114 | return devm_memremap_pages(dev, pgmap); | 127 | return devm_memremap_pages(dev, pgmap); |
115 | } | 128 | } |
116 | EXPORT_SYMBOL(__wrap_devm_memremap_pages); | 129 | EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages); |
117 | 130 | ||
118 | pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) | 131 | pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) |
119 | { | 132 | { |
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 37908a83ddc2..1ff3a6c0367b 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c | |||
@@ -701,7 +701,7 @@ static void walk_pfn(unsigned long voffset, | |||
701 | if (kpagecgroup_read(cgi, index, pages) != pages) | 701 | if (kpagecgroup_read(cgi, index, pages) != pages) |
702 | fatal("kpagecgroup returned fewer pages than expected"); | 702 | fatal("kpagecgroup returned fewer pages than expected"); |
703 | 703 | ||
704 | if (kpagecount_read(cnt, index, batch) != pages) | 704 | if (kpagecount_read(cnt, index, pages) != pages) |
705 | fatal("kpagecount returned fewer pages than expected"); | 705 | fatal("kpagecount returned fewer pages than expected"); |
706 | 706 | ||
707 | for (i = 0; i < pages; i++) | 707 | for (i = 0; i < pages; i++) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cf7cc0554094..666d0155662d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -363,10 +363,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, | |||
363 | } | 363 | } |
364 | 364 | ||
365 | static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 365 | static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
366 | struct mm_struct *mm, | 366 | const struct mmu_notifier_range *range) |
367 | unsigned long start, | ||
368 | unsigned long end, | ||
369 | bool blockable) | ||
370 | { | 367 | { |
371 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 368 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
372 | int need_tlb_flush = 0, idx; | 369 | int need_tlb_flush = 0, idx; |
@@ -380,7 +377,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | |||
380 | * count is also read inside the mmu_lock critical section. | 377 | * count is also read inside the mmu_lock critical section. |
381 | */ | 378 | */ |
382 | kvm->mmu_notifier_count++; | 379 | kvm->mmu_notifier_count++; |
383 | need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); | 380 | need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); |
384 | need_tlb_flush |= kvm->tlbs_dirty; | 381 | need_tlb_flush |= kvm->tlbs_dirty; |
385 | /* we've to flush the tlb before the pages can be freed */ | 382 | /* we've to flush the tlb before the pages can be freed */ |
386 | if (need_tlb_flush) | 383 | if (need_tlb_flush) |
@@ -388,7 +385,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | |||
388 | 385 | ||
389 | spin_unlock(&kvm->mmu_lock); | 386 | spin_unlock(&kvm->mmu_lock); |
390 | 387 | ||
391 | ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable); | 388 | ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, |
389 | range->end, range->blockable); | ||
392 | 390 | ||
393 | srcu_read_unlock(&kvm->srcu, idx); | 391 | srcu_read_unlock(&kvm->srcu, idx); |
394 | 392 | ||
@@ -396,9 +394,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | |||
396 | } | 394 | } |
397 | 395 | ||
398 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | 396 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
399 | struct mm_struct *mm, | 397 | const struct mmu_notifier_range *range) |
400 | unsigned long start, | ||
401 | unsigned long end) | ||
402 | { | 398 | { |
403 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 399 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
404 | 400 | ||