diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 15:32:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 15:32:08 -0500 |
commit | eae21770b4fed5597623aad0d618190fa60426ff (patch) | |
tree | 23c59fb7a33e93a79525e2b10d56df54d40049d1 | |
parent | e9f57ebcba563e0cd532926cab83c92bb4d79360 (diff) | |
parent | 9f273c24ec5f4a6f785bb83e931b3808a07b459e (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton:
"I'm pretty much done for -rc1 now:
- the rest of MM, basically
- lib/ updates
- checkpatch, epoll, hfs, fatfs, ptrace, coredump, exit
- cpu_mask simplifications
- kexec, rapidio, MAINTAINERS etc, etc.
- more dma-mapping cleanups/simplifications from hch"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits)
MAINTAINERS: add/fix git URLs for various subsystems
mm: memcontrol: add "sock" to cgroup2 memory.stat
mm: memcontrol: basic memory statistics in cgroup2 memory controller
mm: memcontrol: do not uncharge old page in page cache replacement
Documentation: cgroup: add memory.swap.{current,max} description
mm: free swap cache aggressively if memcg swap is full
mm: vmscan: do not scan anon pages if memcg swap limit is hit
swap.h: move memcg related stuff to the end of the file
mm: memcontrol: replace mem_cgroup_lruvec_online with mem_cgroup_online
mm: vmscan: pass memcg to get_scan_count()
mm: memcontrol: charge swap to cgroup2
mm: memcontrol: clean up alloc, online, offline, free functions
mm: memcontrol: flatten struct cg_proto
mm: memcontrol: rein in the CONFIG space madness
net: drop tcp_memcontrol.c
mm: memcontrol: introduce CONFIG_MEMCG_LEGACY_KMEM
mm: memcontrol: allow to disable kmem accounting for cgroup2
mm: memcontrol: account "kmem" consumers in cgroup2 memory controller
mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
mm: memcontrol: separate kmem code from legacy tcp accounting code
...
203 files changed, 3642 insertions, 3991 deletions
@@ -1856,6 +1856,16 @@ S: Korte Heul 95 | |||
1856 | S: 1403 ND BUSSUM | 1856 | S: 1403 ND BUSSUM |
1857 | S: The Netherlands | 1857 | S: The Netherlands |
1858 | 1858 | ||
1859 | N: Martin Kepplinger | ||
1860 | E: martink@posteo.de | ||
1861 | E: martin.kepplinger@theobroma-systems.com | ||
1862 | W: http://www.martinkepplinger.com | ||
1863 | D: mma8452 accelerators iio driver | ||
1864 | D: Kernel cleanups | ||
1865 | S: Garnisonstraße 26 | ||
1866 | S: 4020 Linz | ||
1867 | S: Austria | ||
1868 | |||
1859 | N: Karl Keyte | 1869 | N: Karl Keyte |
1860 | E: karl@koft.com | 1870 | E: karl@koft.com |
1861 | D: Disk usage statistics and modifications to line printer driver | 1871 | D: Disk usage statistics and modifications to line printer driver |
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index d69b3fc64e14..781024ef9050 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt | |||
@@ -951,16 +951,6 @@ to "Closing". | |||
951 | alignment constraints (e.g. the alignment constraints about 64-bit | 951 | alignment constraints (e.g. the alignment constraints about 64-bit |
952 | objects). | 952 | objects). |
953 | 953 | ||
954 | 3) Supporting multiple types of IOMMUs | ||
955 | |||
956 | If your architecture needs to support multiple types of IOMMUs, you | ||
957 | can use include/linux/asm-generic/dma-mapping-common.h. It's a | ||
958 | library to support the DMA API with multiple types of IOMMUs. Lots | ||
959 | of architectures (x86, powerpc, sh, alpha, ia64, microblaze and | ||
960 | sparc) use it. Choose one to see how it can be used. If you need to | ||
961 | support multiple types of IOMMUs in a single system, the example of | ||
962 | x86 or powerpc helps. | ||
963 | |||
964 | Closing | 954 | Closing |
965 | 955 | ||
966 | This document, and the API itself, would not be in its current | 956 | This document, and the API itself, would not be in its current |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 31d1f7bf12a1..65b3eac8856c 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt | |||
@@ -819,6 +819,78 @@ PAGE_SIZE multiple when read back. | |||
819 | the cgroup. This may not exactly match the number of | 819 | the cgroup. This may not exactly match the number of |
820 | processes killed but should generally be close. | 820 | processes killed but should generally be close. |
821 | 821 | ||
822 | memory.stat | ||
823 | |||
824 | A read-only flat-keyed file which exists on non-root cgroups. | ||
825 | |||
826 | This breaks down the cgroup's memory footprint into different | ||
827 | types of memory, type-specific details, and other information | ||
828 | on the state and past events of the memory management system. | ||
829 | |||
830 | All memory amounts are in bytes. | ||
831 | |||
832 | The entries are ordered to be human readable, and new entries | ||
833 | can show up in the middle. Don't rely on items remaining in a | ||
834 | fixed position; use the keys to look up specific values! | ||
835 | |||
836 | anon | ||
837 | |||
838 | Amount of memory used in anonymous mappings such as | ||
839 | brk(), sbrk(), and mmap(MAP_ANONYMOUS) | ||
840 | |||
841 | file | ||
842 | |||
843 | Amount of memory used to cache filesystem data, | ||
844 | including tmpfs and shared memory. | ||
845 | |||
846 | file_mapped | ||
847 | |||
848 | Amount of cached filesystem data mapped with mmap() | ||
849 | |||
850 | file_dirty | ||
851 | |||
852 | Amount of cached filesystem data that was modified but | ||
853 | not yet written back to disk | ||
854 | |||
855 | file_writeback | ||
856 | |||
857 | Amount of cached filesystem data that was modified and | ||
858 | is currently being written back to disk | ||
859 | |||
860 | inactive_anon | ||
861 | active_anon | ||
862 | inactive_file | ||
863 | active_file | ||
864 | unevictable | ||
865 | |||
866 | Amount of memory, swap-backed and filesystem-backed, | ||
867 | on the internal memory management lists used by the | ||
868 | page reclaim algorithm | ||
869 | |||
870 | pgfault | ||
871 | |||
872 | Total number of page faults incurred | ||
873 | |||
874 | pgmajfault | ||
875 | |||
876 | Number of major page faults incurred | ||
877 | |||
878 | memory.swap.current | ||
879 | |||
880 | A read-only single value file which exists on non-root | ||
881 | cgroups. | ||
882 | |||
883 | The total amount of swap currently being used by the cgroup | ||
884 | and its descendants. | ||
885 | |||
886 | memory.swap.max | ||
887 | |||
888 | A read-write single value file which exists on non-root | ||
889 | cgroups. The default is "max". | ||
890 | |||
891 | Swap usage hard limit. If a cgroup's swap usage reaches this | ||
892 | limit, anonymous meomry of the cgroup will not be swapped out. | ||
893 | |||
822 | 894 | ||
823 | 5-2-2. General Usage | 895 | 5-2-2. General Usage |
824 | 896 | ||
@@ -1291,3 +1363,20 @@ allocation from the slack available in other groups or the rest of the | |||
1291 | system than killing the group. Otherwise, memory.max is there to | 1363 | system than killing the group. Otherwise, memory.max is there to |
1292 | limit this type of spillover and ultimately contain buggy or even | 1364 | limit this type of spillover and ultimately contain buggy or even |
1293 | malicious applications. | 1365 | malicious applications. |
1366 | |||
1367 | The combined memory+swap accounting and limiting is replaced by real | ||
1368 | control over swap space. | ||
1369 | |||
1370 | The main argument for a combined memory+swap facility in the original | ||
1371 | cgroup design was that global or parental pressure would always be | ||
1372 | able to swap all anonymous memory of a child group, regardless of the | ||
1373 | child's own (possibly untrusted) configuration. However, untrusted | ||
1374 | groups can sabotage swapping by other means - such as referencing its | ||
1375 | anonymous memory in a tight loop - and an admin can not assume full | ||
1376 | swappability when overcommitting untrusted jobs. | ||
1377 | |||
1378 | For trusted jobs, on the other hand, a combined counter is not an | ||
1379 | intuitive userspace interface, and it flies in the face of the idea | ||
1380 | that cgroup controllers should account and limit specific physical | ||
1381 | resources. Swap space is a resource like all others in the system, | ||
1382 | and that's why unified hierarchy allows distributing it separately. | ||
diff --git a/Documentation/features/io/dma_map_attrs/arch-support.txt b/Documentation/features/io/dma_map_attrs/arch-support.txt deleted file mode 100644 index 51d0f1c02a3e..000000000000 --- a/Documentation/features/io/dma_map_attrs/arch-support.txt +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | # | ||
2 | # Feature name: dma_map_attrs | ||
3 | # Kconfig: HAVE_DMA_ATTRS | ||
4 | # description: arch provides dma_*map*_attrs() APIs | ||
5 | # | ||
6 | ----------------------- | ||
7 | | arch |status| | ||
8 | ----------------------- | ||
9 | | alpha: | ok | | ||
10 | | arc: | TODO | | ||
11 | | arm: | ok | | ||
12 | | arm64: | ok | | ||
13 | | avr32: | TODO | | ||
14 | | blackfin: | TODO | | ||
15 | | c6x: | TODO | | ||
16 | | cris: | TODO | | ||
17 | | frv: | TODO | | ||
18 | | h8300: | ok | | ||
19 | | hexagon: | ok | | ||
20 | | ia64: | ok | | ||
21 | | m32r: | TODO | | ||
22 | | m68k: | TODO | | ||
23 | | metag: | TODO | | ||
24 | | microblaze: | ok | | ||
25 | | mips: | ok | | ||
26 | | mn10300: | TODO | | ||
27 | | nios2: | TODO | | ||
28 | | openrisc: | ok | | ||
29 | | parisc: | TODO | | ||
30 | | powerpc: | ok | | ||
31 | | s390: | ok | | ||
32 | | score: | TODO | | ||
33 | | sh: | ok | | ||
34 | | sparc: | ok | | ||
35 | | tile: | ok | | ||
36 | | um: | TODO | | ||
37 | | unicore32: | ok | | ||
38 | | x86: | ok | | ||
39 | | xtensa: | TODO | | ||
40 | ----------------------- | ||
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt index ce1126aceed8..223c32171dcc 100644 --- a/Documentation/filesystems/vfat.txt +++ b/Documentation/filesystems/vfat.txt | |||
@@ -180,6 +180,16 @@ dos1xfloppy -- If set, use a fallback default BIOS Parameter Block | |||
180 | 180 | ||
181 | <bool>: 0,1,yes,no,true,false | 181 | <bool>: 0,1,yes,no,true,false |
182 | 182 | ||
183 | LIMITATION | ||
184 | --------------------------------------------------------------------- | ||
185 | * The fallocated region of file is discarded at umount/evict time | ||
186 | when using fallocate with FALLOC_FL_KEEP_SIZE. | ||
187 | So, User should assume that fallocated region can be discarded at | ||
188 | last close if there is memory pressure resulting in eviction of | ||
189 | the inode from the memory. As a result, for any dependency on | ||
190 | the fallocated region, user should make sure to recheck fallocate | ||
191 | after reopening the file. | ||
192 | |||
183 | TODO | 193 | TODO |
184 | ---------------------------------------------------------------------- | 194 | ---------------------------------------------------------------------- |
185 | * Need to get rid of the raw scanning stuff. Instead, always use | 195 | * Need to get rid of the raw scanning stuff. Instead, always use |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 3ea869d7a31c..cfb2c0f1a4a8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -611,6 +611,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
611 | cgroup.memory= [KNL] Pass options to the cgroup memory controller. | 611 | cgroup.memory= [KNL] Pass options to the cgroup memory controller. |
612 | Format: <string> | 612 | Format: <string> |
613 | nosocket -- Disable socket memory accounting. | 613 | nosocket -- Disable socket memory accounting. |
614 | nokmem -- Disable kernel memory accounting. | ||
614 | 615 | ||
615 | checkreqprot [SELINUX] Set initial checkreqprot flag value. | 616 | checkreqprot [SELINUX] Set initial checkreqprot flag value. |
616 | Format: { "0" | "1" } | 617 | Format: { "0" | "1" } |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 73c6b1ef0e84..a93b414672a7 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -825,14 +825,13 @@ via the /proc/sys interface: | |||
825 | Each write syscall must fully contain the sysctl value to be | 825 | Each write syscall must fully contain the sysctl value to be |
826 | written, and multiple writes on the same sysctl file descriptor | 826 | written, and multiple writes on the same sysctl file descriptor |
827 | will rewrite the sysctl value, regardless of file position. | 827 | will rewrite the sysctl value, regardless of file position. |
828 | 0 - (default) Same behavior as above, but warn about processes that | 828 | 0 - Same behavior as above, but warn about processes that perform writes |
829 | perform writes to a sysctl file descriptor when the file position | 829 | to a sysctl file descriptor when the file position is not 0. |
830 | is not 0. | 830 | 1 - (default) Respect file position when writing sysctl strings. Multiple |
831 | 1 - Respect file position when writing sysctl strings. Multiple writes | 831 | writes will append to the sysctl value buffer. Anything past the max |
832 | will append to the sysctl value buffer. Anything past the max length | 832 | length of the sysctl value buffer will be ignored. Writes to numeric |
833 | of the sysctl value buffer will be ignored. Writes to numeric sysctl | 833 | sysctl entries must always be at file position 0 and the value must |
834 | entries must always be at file position 0 and the value must be | 834 | be fully contained in the buffer sent in the write syscall. |
835 | fully contained in the buffer sent in the write syscall. | ||
836 | 835 | ||
837 | ============================================================== | 836 | ============================================================== |
838 | 837 | ||
diff --git a/Documentation/ubsan.txt b/Documentation/ubsan.txt new file mode 100644 index 000000000000..f58215ef5797 --- /dev/null +++ b/Documentation/ubsan.txt | |||
@@ -0,0 +1,84 @@ | |||
1 | Undefined Behavior Sanitizer - UBSAN | ||
2 | |||
3 | Overview | ||
4 | -------- | ||
5 | |||
6 | UBSAN is a runtime undefined behaviour checker. | ||
7 | |||
8 | UBSAN uses compile-time instrumentation to catch undefined behavior (UB). | ||
9 | Compiler inserts code that perform certain kinds of checks before operations | ||
10 | that may cause UB. If check fails (i.e. UB detected) __ubsan_handle_* | ||
11 | function called to print error message. | ||
12 | |||
13 | GCC has that feature since 4.9.x [1] (see -fsanitize=undefined option and | ||
14 | its suboptions). GCC 5.x has more checkers implemented [2]. | ||
15 | |||
16 | Report example | ||
17 | --------------- | ||
18 | |||
19 | ================================================================================ | ||
20 | UBSAN: Undefined behaviour in ../include/linux/bitops.h:110:33 | ||
21 | shift exponent 32 is to large for 32-bit type 'unsigned int' | ||
22 | CPU: 0 PID: 0 Comm: swapper Not tainted 4.4.0-rc1+ #26 | ||
23 | 0000000000000000 ffffffff82403cc8 ffffffff815e6cd6 0000000000000001 | ||
24 | ffffffff82403cf8 ffffffff82403ce0 ffffffff8163a5ed 0000000000000020 | ||
25 | ffffffff82403d78 ffffffff8163ac2b ffffffff815f0001 0000000000000002 | ||
26 | Call Trace: | ||
27 | [<ffffffff815e6cd6>] dump_stack+0x45/0x5f | ||
28 | [<ffffffff8163a5ed>] ubsan_epilogue+0xd/0x40 | ||
29 | [<ffffffff8163ac2b>] __ubsan_handle_shift_out_of_bounds+0xeb/0x130 | ||
30 | [<ffffffff815f0001>] ? radix_tree_gang_lookup_slot+0x51/0x150 | ||
31 | [<ffffffff8173c586>] _mix_pool_bytes+0x1e6/0x480 | ||
32 | [<ffffffff83105653>] ? dmi_walk_early+0x48/0x5c | ||
33 | [<ffffffff8173c881>] add_device_randomness+0x61/0x130 | ||
34 | [<ffffffff83105b35>] ? dmi_save_one_device+0xaa/0xaa | ||
35 | [<ffffffff83105653>] dmi_walk_early+0x48/0x5c | ||
36 | [<ffffffff831066ae>] dmi_scan_machine+0x278/0x4b4 | ||
37 | [<ffffffff8111d58a>] ? vprintk_default+0x1a/0x20 | ||
38 | [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120 | ||
39 | [<ffffffff830b2240>] setup_arch+0x405/0xc2c | ||
40 | [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120 | ||
41 | [<ffffffff830ae053>] start_kernel+0x83/0x49a | ||
42 | [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120 | ||
43 | [<ffffffff830ad386>] x86_64_start_reservations+0x2a/0x2c | ||
44 | [<ffffffff830ad4f3>] x86_64_start_kernel+0x16b/0x17a | ||
45 | ================================================================================ | ||
46 | |||
47 | Usage | ||
48 | ----- | ||
49 | |||
50 | To enable UBSAN configure kernel with: | ||
51 | |||
52 | CONFIG_UBSAN=y | ||
53 | |||
54 | and to check the entire kernel: | ||
55 | |||
56 | CONFIG_UBSAN_SANITIZE_ALL=y | ||
57 | |||
58 | To enable instrumentation for specific files or directories, add a line | ||
59 | similar to the following to the respective kernel Makefile: | ||
60 | |||
61 | For a single file (e.g. main.o): | ||
62 | UBSAN_SANITIZE_main.o := y | ||
63 | |||
64 | For all files in one directory: | ||
65 | UBSAN_SANITIZE := y | ||
66 | |||
67 | To exclude files from being instrumented even if | ||
68 | CONFIG_UBSAN_SANITIZE_ALL=y, use: | ||
69 | |||
70 | UBSAN_SANITIZE_main.o := n | ||
71 | and: | ||
72 | UBSAN_SANITIZE := n | ||
73 | |||
74 | Detection of unaligned accesses controlled through the separate option - | ||
75 | CONFIG_UBSAN_ALIGNMENT. It's off by default on architectures that support | ||
76 | unaligned accesses (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y). One could | ||
77 | still enable it in config, just note that it will produce a lot of UBSAN | ||
78 | reports. | ||
79 | |||
80 | References | ||
81 | ---------- | ||
82 | |||
83 | [1] - https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html | ||
84 | [2] - https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 84e08e626e10..45d2717760fc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -781,6 +781,7 @@ F: sound/aoa/ | |||
781 | APM DRIVER | 781 | APM DRIVER |
782 | M: Jiri Kosina <jikos@kernel.org> | 782 | M: Jiri Kosina <jikos@kernel.org> |
783 | S: Odd fixes | 783 | S: Odd fixes |
784 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/apm.git | ||
784 | F: arch/x86/kernel/apm_32.c | 785 | F: arch/x86/kernel/apm_32.c |
785 | F: include/linux/apm_bios.h | 786 | F: include/linux/apm_bios.h |
786 | F: include/uapi/linux/apm_bios.h | 787 | F: include/uapi/linux/apm_bios.h |
@@ -946,6 +947,7 @@ M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | |||
946 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> | 947 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> |
947 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 948 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
948 | W: http://www.linux4sam.org | 949 | W: http://www.linux4sam.org |
950 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git | ||
949 | S: Supported | 951 | S: Supported |
950 | F: arch/arm/mach-at91/ | 952 | F: arch/arm/mach-at91/ |
951 | F: include/soc/at91/ | 953 | F: include/soc/at91/ |
@@ -1464,6 +1466,7 @@ ARM/Rockchip SoC support | |||
1464 | M: Heiko Stuebner <heiko@sntech.de> | 1466 | M: Heiko Stuebner <heiko@sntech.de> |
1465 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1467 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1466 | L: linux-rockchip@lists.infradead.org | 1468 | L: linux-rockchip@lists.infradead.org |
1469 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git | ||
1467 | S: Maintained | 1470 | S: Maintained |
1468 | F: arch/arm/boot/dts/rk3* | 1471 | F: arch/arm/boot/dts/rk3* |
1469 | F: arch/arm/mach-rockchip/ | 1472 | F: arch/arm/mach-rockchip/ |
@@ -1796,6 +1799,7 @@ ARM64 PORT (AARCH64 ARCHITECTURE) | |||
1796 | M: Catalin Marinas <catalin.marinas@arm.com> | 1799 | M: Catalin Marinas <catalin.marinas@arm.com> |
1797 | M: Will Deacon <will.deacon@arm.com> | 1800 | M: Will Deacon <will.deacon@arm.com> |
1798 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1801 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1802 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git | ||
1799 | S: Maintained | 1803 | S: Maintained |
1800 | F: arch/arm64/ | 1804 | F: arch/arm64/ |
1801 | F: Documentation/arm64/ | 1805 | F: Documentation/arm64/ |
@@ -1881,7 +1885,7 @@ ATHEROS ATH6KL WIRELESS DRIVER | |||
1881 | M: Kalle Valo <kvalo@qca.qualcomm.com> | 1885 | M: Kalle Valo <kvalo@qca.qualcomm.com> |
1882 | L: linux-wireless@vger.kernel.org | 1886 | L: linux-wireless@vger.kernel.org |
1883 | W: http://wireless.kernel.org/en/users/Drivers/ath6kl | 1887 | W: http://wireless.kernel.org/en/users/Drivers/ath6kl |
1884 | T: git git://github.com/kvalo/ath.git | 1888 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git |
1885 | S: Supported | 1889 | S: Supported |
1886 | F: drivers/net/wireless/ath/ath6kl/ | 1890 | F: drivers/net/wireless/ath/ath6kl/ |
1887 | 1891 | ||
@@ -2133,6 +2137,7 @@ F: drivers/net/wireless/broadcom/b43legacy/ | |||
2133 | BACKLIGHT CLASS/SUBSYSTEM | 2137 | BACKLIGHT CLASS/SUBSYSTEM |
2134 | M: Jingoo Han <jingoohan1@gmail.com> | 2138 | M: Jingoo Han <jingoohan1@gmail.com> |
2135 | M: Lee Jones <lee.jones@linaro.org> | 2139 | M: Lee Jones <lee.jones@linaro.org> |
2140 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/backlight.git | ||
2136 | S: Maintained | 2141 | S: Maintained |
2137 | F: drivers/video/backlight/ | 2142 | F: drivers/video/backlight/ |
2138 | F: include/linux/backlight.h | 2143 | F: include/linux/backlight.h |
@@ -2815,6 +2820,7 @@ F: drivers/input/touchscreen/chipone_icn8318.c | |||
2815 | CHROME HARDWARE PLATFORM SUPPORT | 2820 | CHROME HARDWARE PLATFORM SUPPORT |
2816 | M: Olof Johansson <olof@lixom.net> | 2821 | M: Olof Johansson <olof@lixom.net> |
2817 | S: Maintained | 2822 | S: Maintained |
2823 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/chrome-platform.git | ||
2818 | F: drivers/platform/chrome/ | 2824 | F: drivers/platform/chrome/ |
2819 | 2825 | ||
2820 | CISCO VIC ETHERNET NIC DRIVER | 2826 | CISCO VIC ETHERNET NIC DRIVER |
@@ -3113,6 +3119,7 @@ M: Mikael Starvik <starvik@axis.com> | |||
3113 | M: Jesper Nilsson <jesper.nilsson@axis.com> | 3119 | M: Jesper Nilsson <jesper.nilsson@axis.com> |
3114 | L: linux-cris-kernel@axis.com | 3120 | L: linux-cris-kernel@axis.com |
3115 | W: http://developer.axis.com | 3121 | W: http://developer.axis.com |
3122 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris.git | ||
3116 | S: Maintained | 3123 | S: Maintained |
3117 | F: arch/cris/ | 3124 | F: arch/cris/ |
3118 | F: drivers/tty/serial/crisv10.* | 3125 | F: drivers/tty/serial/crisv10.* |
@@ -3121,6 +3128,7 @@ CRYPTO API | |||
3121 | M: Herbert Xu <herbert@gondor.apana.org.au> | 3128 | M: Herbert Xu <herbert@gondor.apana.org.au> |
3122 | M: "David S. Miller" <davem@davemloft.net> | 3129 | M: "David S. Miller" <davem@davemloft.net> |
3123 | L: linux-crypto@vger.kernel.org | 3130 | L: linux-crypto@vger.kernel.org |
3131 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git | ||
3124 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git | 3132 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git |
3125 | S: Maintained | 3133 | S: Maintained |
3126 | F: Documentation/crypto/ | 3134 | F: Documentation/crypto/ |
@@ -3583,7 +3591,7 @@ M: Christine Caulfield <ccaulfie@redhat.com> | |||
3583 | M: David Teigland <teigland@redhat.com> | 3591 | M: David Teigland <teigland@redhat.com> |
3584 | L: cluster-devel@redhat.com | 3592 | L: cluster-devel@redhat.com |
3585 | W: http://sources.redhat.com/cluster/ | 3593 | W: http://sources.redhat.com/cluster/ |
3586 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git | 3594 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git |
3587 | S: Supported | 3595 | S: Supported |
3588 | F: fs/dlm/ | 3596 | F: fs/dlm/ |
3589 | 3597 | ||
@@ -3997,6 +4005,7 @@ M: Tyler Hicks <tyhicks@canonical.com> | |||
3997 | L: ecryptfs@vger.kernel.org | 4005 | L: ecryptfs@vger.kernel.org |
3998 | W: http://ecryptfs.org | 4006 | W: http://ecryptfs.org |
3999 | W: https://launchpad.net/ecryptfs | 4007 | W: https://launchpad.net/ecryptfs |
4008 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git | ||
4000 | S: Supported | 4009 | S: Supported |
4001 | F: Documentation/filesystems/ecryptfs.txt | 4010 | F: Documentation/filesystems/ecryptfs.txt |
4002 | F: fs/ecryptfs/ | 4011 | F: fs/ecryptfs/ |
@@ -4275,6 +4284,7 @@ M: Andreas Dilger <adilger.kernel@dilger.ca> | |||
4275 | L: linux-ext4@vger.kernel.org | 4284 | L: linux-ext4@vger.kernel.org |
4276 | W: http://ext4.wiki.kernel.org | 4285 | W: http://ext4.wiki.kernel.org |
4277 | Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ | 4286 | Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ |
4287 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git | ||
4278 | S: Maintained | 4288 | S: Maintained |
4279 | F: Documentation/filesystems/ext4.txt | 4289 | F: Documentation/filesystems/ext4.txt |
4280 | F: fs/ext4/ | 4290 | F: fs/ext4/ |
@@ -4957,6 +4967,7 @@ F: include/linux/hw_random.h | |||
4957 | HARDWARE SPINLOCK CORE | 4967 | HARDWARE SPINLOCK CORE |
4958 | M: Ohad Ben-Cohen <ohad@wizery.com> | 4968 | M: Ohad Ben-Cohen <ohad@wizery.com> |
4959 | S: Maintained | 4969 | S: Maintained |
4970 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git | ||
4960 | F: Documentation/hwspinlock.txt | 4971 | F: Documentation/hwspinlock.txt |
4961 | F: drivers/hwspinlock/hwspinlock_* | 4972 | F: drivers/hwspinlock/hwspinlock_* |
4962 | F: include/linux/hwspinlock.h | 4973 | F: include/linux/hwspinlock.h |
@@ -5495,6 +5506,7 @@ M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> | |||
5495 | L: linux-ima-devel@lists.sourceforge.net | 5506 | L: linux-ima-devel@lists.sourceforge.net |
5496 | L: linux-ima-user@lists.sourceforge.net | 5507 | L: linux-ima-user@lists.sourceforge.net |
5497 | L: linux-security-module@vger.kernel.org | 5508 | L: linux-security-module@vger.kernel.org |
5509 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git | ||
5498 | S: Supported | 5510 | S: Supported |
5499 | F: security/integrity/ima/ | 5511 | F: security/integrity/ima/ |
5500 | 5512 | ||
@@ -5750,11 +5762,11 @@ F: include/linux/mic_bus.h | |||
5750 | F: include/linux/scif.h | 5762 | F: include/linux/scif.h |
5751 | F: include/uapi/linux/mic_common.h | 5763 | F: include/uapi/linux/mic_common.h |
5752 | F: include/uapi/linux/mic_ioctl.h | 5764 | F: include/uapi/linux/mic_ioctl.h |
5753 | F include/uapi/linux/scif_ioctl.h | 5765 | F: include/uapi/linux/scif_ioctl.h |
5754 | F: drivers/misc/mic/ | 5766 | F: drivers/misc/mic/ |
5755 | F: drivers/dma/mic_x100_dma.c | 5767 | F: drivers/dma/mic_x100_dma.c |
5756 | F: drivers/dma/mic_x100_dma.h | 5768 | F: drivers/dma/mic_x100_dma.h |
5757 | F Documentation/mic/ | 5769 | F: Documentation/mic/ |
5758 | 5770 | ||
5759 | INTEL PMC/P-Unit IPC DRIVER | 5771 | INTEL PMC/P-Unit IPC DRIVER |
5760 | M: Zha Qipeng<qipeng.zha@intel.com> | 5772 | M: Zha Qipeng<qipeng.zha@intel.com> |
@@ -5835,6 +5847,8 @@ M: Julian Anastasov <ja@ssi.bg> | |||
5835 | L: netdev@vger.kernel.org | 5847 | L: netdev@vger.kernel.org |
5836 | L: lvs-devel@vger.kernel.org | 5848 | L: lvs-devel@vger.kernel.org |
5837 | S: Maintained | 5849 | S: Maintained |
5850 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs-next.git | ||
5851 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs.git | ||
5838 | F: Documentation/networking/ipvs-sysctl.txt | 5852 | F: Documentation/networking/ipvs-sysctl.txt |
5839 | F: include/net/ip_vs.h | 5853 | F: include/net/ip_vs.h |
5840 | F: include/uapi/linux/ip_vs.h | 5854 | F: include/uapi/linux/ip_vs.h |
@@ -6118,6 +6132,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org> | |||
6118 | M: Jeff Layton <jlayton@poochiereds.net> | 6132 | M: Jeff Layton <jlayton@poochiereds.net> |
6119 | L: linux-nfs@vger.kernel.org | 6133 | L: linux-nfs@vger.kernel.org |
6120 | W: http://nfs.sourceforge.net/ | 6134 | W: http://nfs.sourceforge.net/ |
6135 | T: git git://linux-nfs.org/~bfields/linux.git | ||
6121 | S: Supported | 6136 | S: Supported |
6122 | F: fs/nfsd/ | 6137 | F: fs/nfsd/ |
6123 | F: include/uapi/linux/nfsd/ | 6138 | F: include/uapi/linux/nfsd/ |
@@ -6174,6 +6189,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com> | |||
6174 | M: Cornelia Huck <cornelia.huck@de.ibm.com> | 6189 | M: Cornelia Huck <cornelia.huck@de.ibm.com> |
6175 | L: linux-s390@vger.kernel.org | 6190 | L: linux-s390@vger.kernel.org |
6176 | W: http://www.ibm.com/developerworks/linux/linux390/ | 6191 | W: http://www.ibm.com/developerworks/linux/linux390/ |
6192 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git | ||
6177 | S: Supported | 6193 | S: Supported |
6178 | F: Documentation/s390/kvm.txt | 6194 | F: Documentation/s390/kvm.txt |
6179 | F: arch/s390/include/asm/kvm* | 6195 | F: arch/s390/include/asm/kvm* |
@@ -6247,6 +6263,7 @@ KGDB / KDB /debug_core | |||
6247 | M: Jason Wessel <jason.wessel@windriver.com> | 6263 | M: Jason Wessel <jason.wessel@windriver.com> |
6248 | W: http://kgdb.wiki.kernel.org/ | 6264 | W: http://kgdb.wiki.kernel.org/ |
6249 | L: kgdb-bugreport@lists.sourceforge.net | 6265 | L: kgdb-bugreport@lists.sourceforge.net |
6266 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git | ||
6250 | S: Maintained | 6267 | S: Maintained |
6251 | F: Documentation/DocBook/kgdb.tmpl | 6268 | F: Documentation/DocBook/kgdb.tmpl |
6252 | F: drivers/misc/kgdbts.c | 6269 | F: drivers/misc/kgdbts.c |
@@ -6418,6 +6435,7 @@ LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM | |||
6418 | M: Dan Williams <dan.j.williams@intel.com> | 6435 | M: Dan Williams <dan.j.williams@intel.com> |
6419 | L: linux-nvdimm@lists.01.org | 6436 | L: linux-nvdimm@lists.01.org |
6420 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 6437 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
6438 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git | ||
6421 | S: Supported | 6439 | S: Supported |
6422 | F: drivers/nvdimm/* | 6440 | F: drivers/nvdimm/* |
6423 | F: include/linux/nd.h | 6441 | F: include/linux/nd.h |
@@ -7087,6 +7105,7 @@ F: Documentation/hwmon/menf21bmc | |||
7087 | METAG ARCHITECTURE | 7105 | METAG ARCHITECTURE |
7088 | M: James Hogan <james.hogan@imgtec.com> | 7106 | M: James Hogan <james.hogan@imgtec.com> |
7089 | L: linux-metag@vger.kernel.org | 7107 | L: linux-metag@vger.kernel.org |
7108 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git | ||
7090 | S: Odd Fixes | 7109 | S: Odd Fixes |
7091 | F: arch/metag/ | 7110 | F: arch/metag/ |
7092 | F: Documentation/metag/ | 7111 | F: Documentation/metag/ |
@@ -7568,7 +7587,8 @@ NETWORKING DRIVERS (WIRELESS) | |||
7568 | M: Kalle Valo <kvalo@codeaurora.org> | 7587 | M: Kalle Valo <kvalo@codeaurora.org> |
7569 | L: linux-wireless@vger.kernel.org | 7588 | L: linux-wireless@vger.kernel.org |
7570 | Q: http://patchwork.kernel.org/project/linux-wireless/list/ | 7589 | Q: http://patchwork.kernel.org/project/linux-wireless/list/ |
7571 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/ | 7590 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git |
7591 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git | ||
7572 | S: Maintained | 7592 | S: Maintained |
7573 | F: drivers/net/wireless/ | 7593 | F: drivers/net/wireless/ |
7574 | 7594 | ||
@@ -7974,6 +7994,7 @@ M: Mark Rutland <mark.rutland@arm.com> | |||
7974 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> | 7994 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> |
7975 | M: Kumar Gala <galak@codeaurora.org> | 7995 | M: Kumar Gala <galak@codeaurora.org> |
7976 | L: devicetree@vger.kernel.org | 7996 | L: devicetree@vger.kernel.org |
7997 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git | ||
7977 | S: Maintained | 7998 | S: Maintained |
7978 | F: Documentation/devicetree/ | 7999 | F: Documentation/devicetree/ |
7979 | F: arch/*/boot/dts/ | 8000 | F: arch/*/boot/dts/ |
@@ -8364,7 +8385,7 @@ PCMCIA SUBSYSTEM | |||
8364 | P: Linux PCMCIA Team | 8385 | P: Linux PCMCIA Team |
8365 | L: linux-pcmcia@lists.infradead.org | 8386 | L: linux-pcmcia@lists.infradead.org |
8366 | W: http://lists.infradead.org/mailman/listinfo/linux-pcmcia | 8387 | W: http://lists.infradead.org/mailman/listinfo/linux-pcmcia |
8367 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git | 8388 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git |
8368 | S: Maintained | 8389 | S: Maintained |
8369 | F: Documentation/pcmcia/ | 8390 | F: Documentation/pcmcia/ |
8370 | F: drivers/pcmcia/ | 8391 | F: drivers/pcmcia/ |
@@ -8686,7 +8707,7 @@ M: Colin Cross <ccross@android.com> | |||
8686 | M: Kees Cook <keescook@chromium.org> | 8707 | M: Kees Cook <keescook@chromium.org> |
8687 | M: Tony Luck <tony.luck@intel.com> | 8708 | M: Tony Luck <tony.luck@intel.com> |
8688 | S: Maintained | 8709 | S: Maintained |
8689 | T: git git://git.infradead.org/users/cbou/linux-pstore.git | 8710 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git |
8690 | F: fs/pstore/ | 8711 | F: fs/pstore/ |
8691 | F: include/linux/pstore* | 8712 | F: include/linux/pstore* |
8692 | F: drivers/firmware/efi/efi-pstore.c | 8713 | F: drivers/firmware/efi/efi-pstore.c |
@@ -8895,13 +8916,14 @@ QUALCOMM ATHEROS ATH10K WIRELESS DRIVER | |||
8895 | M: Kalle Valo <kvalo@qca.qualcomm.com> | 8916 | M: Kalle Valo <kvalo@qca.qualcomm.com> |
8896 | L: ath10k@lists.infradead.org | 8917 | L: ath10k@lists.infradead.org |
8897 | W: http://wireless.kernel.org/en/users/Drivers/ath10k | 8918 | W: http://wireless.kernel.org/en/users/Drivers/ath10k |
8898 | T: git git://github.com/kvalo/ath.git | 8919 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git |
8899 | S: Supported | 8920 | S: Supported |
8900 | F: drivers/net/wireless/ath/ath10k/ | 8921 | F: drivers/net/wireless/ath/ath10k/ |
8901 | 8922 | ||
8902 | QUALCOMM HEXAGON ARCHITECTURE | 8923 | QUALCOMM HEXAGON ARCHITECTURE |
8903 | M: Richard Kuo <rkuo@codeaurora.org> | 8924 | M: Richard Kuo <rkuo@codeaurora.org> |
8904 | L: linux-hexagon@vger.kernel.org | 8925 | L: linux-hexagon@vger.kernel.org |
8926 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.git | ||
8905 | S: Supported | 8927 | S: Supported |
8906 | F: arch/hexagon/ | 8928 | F: arch/hexagon/ |
8907 | 8929 | ||
@@ -9100,6 +9122,7 @@ F: drivers/phy/phy-rcar-gen3-usb2.c | |||
9100 | 9122 | ||
9101 | RESET CONTROLLER FRAMEWORK | 9123 | RESET CONTROLLER FRAMEWORK |
9102 | M: Philipp Zabel <p.zabel@pengutronix.de> | 9124 | M: Philipp Zabel <p.zabel@pengutronix.de> |
9125 | T: git git://git.pengutronix.de/git/pza/linux | ||
9103 | S: Maintained | 9126 | S: Maintained |
9104 | F: drivers/reset/ | 9127 | F: drivers/reset/ |
9105 | F: Documentation/devicetree/bindings/reset/ | 9128 | F: Documentation/devicetree/bindings/reset/ |
@@ -9247,6 +9270,7 @@ M: Martin Schwidefsky <schwidefsky@de.ibm.com> | |||
9247 | M: Heiko Carstens <heiko.carstens@de.ibm.com> | 9270 | M: Heiko Carstens <heiko.carstens@de.ibm.com> |
9248 | L: linux-s390@vger.kernel.org | 9271 | L: linux-s390@vger.kernel.org |
9249 | W: http://www.ibm.com/developerworks/linux/linux390/ | 9272 | W: http://www.ibm.com/developerworks/linux/linux390/ |
9273 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git | ||
9250 | S: Supported | 9274 | S: Supported |
9251 | F: arch/s390/ | 9275 | F: arch/s390/ |
9252 | F: drivers/s390/ | 9276 | F: drivers/s390/ |
@@ -9439,7 +9463,7 @@ M: Lukasz Majewski <l.majewski@samsung.com> | |||
9439 | L: linux-pm@vger.kernel.org | 9463 | L: linux-pm@vger.kernel.org |
9440 | L: linux-samsung-soc@vger.kernel.org | 9464 | L: linux-samsung-soc@vger.kernel.org |
9441 | S: Supported | 9465 | S: Supported |
9442 | T: https://github.com/lmajewski/linux-samsung-thermal.git | 9466 | T: git https://github.com/lmajewski/linux-samsung-thermal.git |
9443 | F: drivers/thermal/samsung/ | 9467 | F: drivers/thermal/samsung/ |
9444 | 9468 | ||
9445 | SAMSUNG USB2 PHY DRIVER | 9469 | SAMSUNG USB2 PHY DRIVER |
@@ -10092,6 +10116,7 @@ F: drivers/media/pci/solo6x10/ | |||
10092 | 10116 | ||
10093 | SOFTWARE RAID (Multiple Disks) SUPPORT | 10117 | SOFTWARE RAID (Multiple Disks) SUPPORT |
10094 | L: linux-raid@vger.kernel.org | 10118 | L: linux-raid@vger.kernel.org |
10119 | T: git git://neil.brown.name/md | ||
10095 | S: Supported | 10120 | S: Supported |
10096 | F: drivers/md/ | 10121 | F: drivers/md/ |
10097 | F: include/linux/raid/ | 10122 | F: include/linux/raid/ |
@@ -10263,6 +10288,7 @@ SQUASHFS FILE SYSTEM | |||
10263 | M: Phillip Lougher <phillip@squashfs.org.uk> | 10288 | M: Phillip Lougher <phillip@squashfs.org.uk> |
10264 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) | 10289 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) |
10265 | W: http://squashfs.org.uk | 10290 | W: http://squashfs.org.uk |
10291 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-next.git | ||
10266 | S: Maintained | 10292 | S: Maintained |
10267 | F: Documentation/filesystems/squashfs.txt | 10293 | F: Documentation/filesystems/squashfs.txt |
10268 | F: fs/squashfs/ | 10294 | F: fs/squashfs/ |
@@ -10459,6 +10485,7 @@ F: arch/x86/boot/video* | |||
10459 | SWIOTLB SUBSYSTEM | 10485 | SWIOTLB SUBSYSTEM |
10460 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 10486 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
10461 | L: linux-kernel@vger.kernel.org | 10487 | L: linux-kernel@vger.kernel.org |
10488 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git | ||
10462 | S: Supported | 10489 | S: Supported |
10463 | F: lib/swiotlb.c | 10490 | F: lib/swiotlb.c |
10464 | F: arch/*/kernel/pci-swiotlb.c | 10491 | F: arch/*/kernel/pci-swiotlb.c |
@@ -10722,6 +10749,7 @@ TENSILICA XTENSA PORT (xtensa) | |||
10722 | M: Chris Zankel <chris@zankel.net> | 10749 | M: Chris Zankel <chris@zankel.net> |
10723 | M: Max Filippov <jcmvbkbc@gmail.com> | 10750 | M: Max Filippov <jcmvbkbc@gmail.com> |
10724 | L: linux-xtensa@linux-xtensa.org | 10751 | L: linux-xtensa@linux-xtensa.org |
10752 | T: git git://github.com/czankel/xtensa-linux.git | ||
10725 | S: Maintained | 10753 | S: Maintained |
10726 | F: arch/xtensa/ | 10754 | F: arch/xtensa/ |
10727 | F: drivers/irqchip/irq-xtensa-* | 10755 | F: drivers/irqchip/irq-xtensa-* |
@@ -11004,7 +11032,7 @@ R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> | |||
11004 | W: http://tpmdd.sourceforge.net | 11032 | W: http://tpmdd.sourceforge.net |
11005 | L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers) | 11033 | L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers) |
11006 | Q: git git://github.com/PeterHuewe/linux-tpmdd.git | 11034 | Q: git git://github.com/PeterHuewe/linux-tpmdd.git |
11007 | T: https://github.com/PeterHuewe/linux-tpmdd | 11035 | T: git https://github.com/PeterHuewe/linux-tpmdd |
11008 | S: Maintained | 11036 | S: Maintained |
11009 | F: drivers/char/tpm/ | 11037 | F: drivers/char/tpm/ |
11010 | 11038 | ||
@@ -11461,6 +11489,7 @@ M: Richard Weinberger <richard@nod.at> | |||
11461 | L: user-mode-linux-devel@lists.sourceforge.net | 11489 | L: user-mode-linux-devel@lists.sourceforge.net |
11462 | L: user-mode-linux-user@lists.sourceforge.net | 11490 | L: user-mode-linux-user@lists.sourceforge.net |
11463 | W: http://user-mode-linux.sourceforge.net | 11491 | W: http://user-mode-linux.sourceforge.net |
11492 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git | ||
11464 | S: Maintained | 11493 | S: Maintained |
11465 | F: Documentation/virtual/uml/ | 11494 | F: Documentation/virtual/uml/ |
11466 | F: arch/um/ | 11495 | F: arch/um/ |
@@ -11507,6 +11536,7 @@ F: fs/fat/ | |||
11507 | VFIO DRIVER | 11536 | VFIO DRIVER |
11508 | M: Alex Williamson <alex.williamson@redhat.com> | 11537 | M: Alex Williamson <alex.williamson@redhat.com> |
11509 | L: kvm@vger.kernel.org | 11538 | L: kvm@vger.kernel.org |
11539 | T: git git://github.com/awilliam/linux-vfio.git | ||
11510 | S: Maintained | 11540 | S: Maintained |
11511 | F: Documentation/vfio.txt | 11541 | F: Documentation/vfio.txt |
11512 | F: drivers/vfio/ | 11542 | F: drivers/vfio/ |
@@ -11576,6 +11606,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com> | |||
11576 | L: kvm@vger.kernel.org | 11606 | L: kvm@vger.kernel.org |
11577 | L: virtualization@lists.linux-foundation.org | 11607 | L: virtualization@lists.linux-foundation.org |
11578 | L: netdev@vger.kernel.org | 11608 | L: netdev@vger.kernel.org |
11609 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git | ||
11579 | S: Maintained | 11610 | S: Maintained |
11580 | F: drivers/vhost/ | 11611 | F: drivers/vhost/ |
11581 | F: include/uapi/linux/vhost.h | 11612 | F: include/uapi/linux/vhost.h |
@@ -11992,7 +12023,7 @@ M: Dave Chinner <david@fromorbit.com> | |||
11992 | M: xfs@oss.sgi.com | 12023 | M: xfs@oss.sgi.com |
11993 | L: xfs@oss.sgi.com | 12024 | L: xfs@oss.sgi.com |
11994 | W: http://oss.sgi.com/projects/xfs | 12025 | W: http://oss.sgi.com/projects/xfs |
11995 | T: git git://oss.sgi.com/xfs/xfs.git | 12026 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git |
11996 | S: Supported | 12027 | S: Supported |
11997 | F: Documentation/filesystems/xfs.txt | 12028 | F: Documentation/filesystems/xfs.txt |
11998 | F: fs/xfs/ | 12029 | F: fs/xfs/ |
@@ -411,7 +411,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE | |||
411 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS | 411 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS |
412 | 412 | ||
413 | export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS | 413 | export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS |
414 | export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN | 414 | export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN CFLAGS_UBSAN |
415 | export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE | 415 | export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE |
416 | export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE | 416 | export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE |
417 | export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL | 417 | export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL |
@@ -784,6 +784,7 @@ endif | |||
784 | 784 | ||
785 | include scripts/Makefile.kasan | 785 | include scripts/Makefile.kasan |
786 | include scripts/Makefile.extrawarn | 786 | include scripts/Makefile.extrawarn |
787 | include scripts/Makefile.ubsan | ||
787 | 788 | ||
788 | # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the | 789 | # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the |
789 | # last assignments | 790 | # last assignments |
diff --git a/arch/Kconfig b/arch/Kconfig index ba1b626bca00..f6b649d88ec8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG | |||
205 | config HAVE_ARCH_TRACEHOOK | 205 | config HAVE_ARCH_TRACEHOOK |
206 | bool | 206 | bool |
207 | 207 | ||
208 | config HAVE_DMA_ATTRS | ||
209 | bool | ||
210 | |||
211 | config HAVE_DMA_CONTIGUOUS | 208 | config HAVE_DMA_CONTIGUOUS |
212 | bool | 209 | bool |
213 | 210 | ||
@@ -632,4 +629,7 @@ config OLD_SIGACTION | |||
632 | config COMPAT_OLD_SIGACTION | 629 | config COMPAT_OLD_SIGACTION |
633 | bool | 630 | bool |
634 | 631 | ||
632 | config ARCH_NO_COHERENT_DMA_MMAP | ||
633 | bool | ||
634 | |||
635 | source "kernel/gcov/Kconfig" | 635 | source "kernel/gcov/Kconfig" |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index f515a4dbf7a0..9d8a85801ed1 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -9,7 +9,6 @@ config ALPHA | |||
9 | select HAVE_OPROFILE | 9 | select HAVE_OPROFILE |
10 | select HAVE_PCSPKR_PLATFORM | 10 | select HAVE_PCSPKR_PLATFORM |
11 | select HAVE_PERF_EVENTS | 11 | select HAVE_PERF_EVENTS |
12 | select HAVE_DMA_ATTRS | ||
13 | select VIRT_TO_BUS | 12 | select VIRT_TO_BUS |
14 | select GENERIC_IRQ_PROBE | 13 | select GENERIC_IRQ_PROBE |
15 | select AUTO_IRQ_AFFINITY if SMP | 14 | select AUTO_IRQ_AFFINITY if SMP |
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 72a8ca7796d9..3c3451f58ff4 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h | |||
@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
10 | return dma_ops; | 10 | return dma_ops; |
11 | } | 11 | } |
12 | 12 | ||
13 | #include <asm-generic/dma-mapping-common.h> | ||
14 | |||
15 | #define dma_cache_sync(dev, va, size, dir) ((void)0) | 13 | #define dma_cache_sync(dev, va, size, dir) ((void)0) |
16 | 14 | ||
17 | #endif /* _ALPHA_DMA_MAPPING_H */ | 15 | #endif /* _ALPHA_DMA_MAPPING_H */ |
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index ab336c06153e..fec1947b8dbc 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h | |||
@@ -47,7 +47,6 @@ | |||
47 | #define MADV_WILLNEED 3 /* will need these pages */ | 47 | #define MADV_WILLNEED 3 /* will need these pages */ |
48 | #define MADV_SPACEAVAIL 5 /* ensure resources are available */ | 48 | #define MADV_SPACEAVAIL 5 /* ensure resources are available */ |
49 | #define MADV_DONTNEED 6 /* don't need these pages */ | 49 | #define MADV_DONTNEED 6 /* don't need these pages */ |
50 | #define MADV_FREE 7 /* free pages only if memory pressure */ | ||
51 | 50 | ||
52 | /* common/generic parameters */ | 51 | /* common/generic parameters */ |
53 | #define MADV_FREE 8 /* free pages only if memory pressure */ | 52 | #define MADV_FREE 8 /* free pages only if memory pressure */ |
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 2d28ba939d8e..660205414f1d 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h | |||
@@ -11,192 +11,11 @@ | |||
11 | #ifndef ASM_ARC_DMA_MAPPING_H | 11 | #ifndef ASM_ARC_DMA_MAPPING_H |
12 | #define ASM_ARC_DMA_MAPPING_H | 12 | #define ASM_ARC_DMA_MAPPING_H |
13 | 13 | ||
14 | #include <asm-generic/dma-coherent.h> | 14 | extern struct dma_map_ops arc_dma_ops; |
15 | #include <asm/cacheflush.h> | ||
16 | 15 | ||
17 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 16 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
18 | dma_addr_t *dma_handle, gfp_t gfp); | ||
19 | |||
20 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
21 | dma_addr_t dma_handle); | ||
22 | |||
23 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
24 | dma_addr_t *dma_handle, gfp_t gfp); | ||
25 | |||
26 | void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, | ||
27 | dma_addr_t dma_handle); | ||
28 | |||
29 | /* drivers/base/dma-mapping.c */ | ||
30 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
31 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
32 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
33 | void *cpu_addr, dma_addr_t dma_addr, | ||
34 | size_t size); | ||
35 | |||
36 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
37 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
38 | |||
39 | /* | ||
40 | * streaming DMA Mapping API... | ||
41 | * CPU accesses page via normal paddr, thus needs to explicitly made | ||
42 | * consistent before each use | ||
43 | */ | ||
44 | |||
45 | static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size, | ||
46 | enum dma_data_direction dir) | ||
47 | { | ||
48 | switch (dir) { | ||
49 | case DMA_FROM_DEVICE: | ||
50 | dma_cache_inv(paddr, size); | ||
51 | break; | ||
52 | case DMA_TO_DEVICE: | ||
53 | dma_cache_wback(paddr, size); | ||
54 | break; | ||
55 | case DMA_BIDIRECTIONAL: | ||
56 | dma_cache_wback_inv(paddr, size); | ||
57 | break; | ||
58 | default: | ||
59 | pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | void __arc_dma_cache_sync(unsigned long paddr, size_t size, | ||
64 | enum dma_data_direction dir); | ||
65 | |||
66 | #define _dma_cache_sync(addr, sz, dir) \ | ||
67 | do { \ | ||
68 | if (__builtin_constant_p(dir)) \ | ||
69 | __inline_dma_cache_sync(addr, sz, dir); \ | ||
70 | else \ | ||
71 | __arc_dma_cache_sync(addr, sz, dir); \ | ||
72 | } \ | ||
73 | while (0); | ||
74 | |||
75 | static inline dma_addr_t | ||
76 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
77 | enum dma_data_direction dir) | ||
78 | { | ||
79 | _dma_cache_sync((unsigned long)cpu_addr, size, dir); | ||
80 | return (dma_addr_t)cpu_addr; | ||
81 | } | ||
82 | |||
83 | static inline void | ||
84 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
85 | size_t size, enum dma_data_direction dir) | ||
86 | { | ||
87 | } | ||
88 | |||
89 | static inline dma_addr_t | ||
90 | dma_map_page(struct device *dev, struct page *page, | ||
91 | unsigned long offset, size_t size, | ||
92 | enum dma_data_direction dir) | ||
93 | { | ||
94 | unsigned long paddr = page_to_phys(page) + offset; | ||
95 | return dma_map_single(dev, (void *)paddr, size, dir); | ||
96 | } | ||
97 | |||
98 | static inline void | ||
99 | dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
100 | size_t size, enum dma_data_direction dir) | ||
101 | { | ||
102 | } | ||
103 | |||
104 | static inline int | ||
105 | dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
106 | int nents, enum dma_data_direction dir) | ||
107 | { | ||
108 | struct scatterlist *s; | ||
109 | int i; | ||
110 | |||
111 | for_each_sg(sg, s, nents, i) | ||
112 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | ||
113 | s->length, dir); | ||
114 | |||
115 | return nents; | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
120 | int nents, enum dma_data_direction dir) | ||
121 | { | 17 | { |
122 | struct scatterlist *s; | 18 | return &arc_dma_ops; |
123 | int i; | ||
124 | |||
125 | for_each_sg(sg, s, nents, i) | ||
126 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | ||
127 | } | ||
128 | |||
129 | static inline void | ||
130 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
131 | size_t size, enum dma_data_direction dir) | ||
132 | { | ||
133 | _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
138 | size_t size, enum dma_data_direction dir) | ||
139 | { | ||
140 | _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); | ||
141 | } | ||
142 | |||
143 | static inline void | ||
144 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
145 | unsigned long offset, size_t size, | ||
146 | enum dma_data_direction direction) | ||
147 | { | ||
148 | _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE); | ||
149 | } | ||
150 | |||
151 | static inline void | ||
152 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
153 | unsigned long offset, size_t size, | ||
154 | enum dma_data_direction direction) | ||
155 | { | ||
156 | _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE); | ||
157 | } | ||
158 | |||
159 | static inline void | ||
160 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, | ||
161 | enum dma_data_direction dir) | ||
162 | { | ||
163 | int i; | ||
164 | struct scatterlist *sg; | ||
165 | |||
166 | for_each_sg(sglist, sg, nelems, i) | ||
167 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
168 | } | ||
169 | |||
170 | static inline void | ||
171 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
172 | int nelems, enum dma_data_direction dir) | ||
173 | { | ||
174 | int i; | ||
175 | struct scatterlist *sg; | ||
176 | |||
177 | for_each_sg(sglist, sg, nelems, i) | ||
178 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
179 | } | ||
180 | |||
181 | static inline int dma_supported(struct device *dev, u64 dma_mask) | ||
182 | { | ||
183 | /* Support 32 bit DMA mask exclusively */ | ||
184 | return dma_mask == DMA_BIT_MASK(32); | ||
185 | } | ||
186 | |||
187 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
188 | { | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
193 | { | ||
194 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
195 | return -EIO; | ||
196 | |||
197 | *dev->dma_mask = dma_mask; | ||
198 | |||
199 | return 0; | ||
200 | } | 19 | } |
201 | 20 | ||
202 | #endif | 21 | #endif |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 29a46bb198cc..01eaf88bf821 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -17,18 +17,14 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/dma-debug.h> | ||
21 | #include <linux/export.h> | ||
22 | #include <asm/cache.h> | 20 | #include <asm/cache.h> |
23 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
24 | 22 | ||
25 | /* | 23 | |
26 | * Helpers for Coherent DMA API. | 24 | static void *arc_dma_alloc(struct device *dev, size_t size, |
27 | */ | 25 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
28 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
29 | dma_addr_t *dma_handle, gfp_t gfp) | ||
30 | { | 26 | { |
31 | void *paddr; | 27 | void *paddr, *kvaddr; |
32 | 28 | ||
33 | /* This is linear addr (0x8000_0000 based) */ | 29 | /* This is linear addr (0x8000_0000 based) */ |
34 | paddr = alloc_pages_exact(size, gfp); | 30 | paddr = alloc_pages_exact(size, gfp); |
@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
38 | /* This is bus address, platform dependent */ | 34 | /* This is bus address, platform dependent */ |
39 | *dma_handle = (dma_addr_t)paddr; | 35 | *dma_handle = (dma_addr_t)paddr; |
40 | 36 | ||
41 | return paddr; | ||
42 | } | ||
43 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
44 | |||
45 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
46 | dma_addr_t dma_handle) | ||
47 | { | ||
48 | free_pages_exact((void *)dma_handle, size); | ||
49 | } | ||
50 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
51 | |||
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
53 | dma_addr_t *dma_handle, gfp_t gfp) | ||
54 | { | ||
55 | void *paddr, *kvaddr; | ||
56 | |||
57 | /* | 37 | /* |
58 | * IOC relies on all data (even coherent DMA data) being in cache | 38 | * IOC relies on all data (even coherent DMA data) being in cache |
59 | * Thus allocate normal cached memory | 39 | * Thus allocate normal cached memory |
@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
65 | * -For coherent data, Read/Write to buffers terminate early in cache | 45 | * -For coherent data, Read/Write to buffers terminate early in cache |
66 | * (vs. always going to memory - thus are faster) | 46 | * (vs. always going to memory - thus are faster) |
67 | */ | 47 | */ |
68 | if (is_isa_arcv2() && ioc_exists) | 48 | if ((is_isa_arcv2() && ioc_exists) || |
69 | return dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 49 | dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) |
70 | 50 | return paddr; | |
71 | /* This is linear addr (0x8000_0000 based) */ | ||
72 | paddr = alloc_pages_exact(size, gfp); | ||
73 | if (!paddr) | ||
74 | return NULL; | ||
75 | 51 | ||
76 | /* This is kernel Virtual address (0x7000_0000 based) */ | 52 | /* This is kernel Virtual address (0x7000_0000 based) */ |
77 | kvaddr = ioremap_nocache((unsigned long)paddr, size); | 53 | kvaddr = ioremap_nocache((unsigned long)paddr, size); |
78 | if (kvaddr == NULL) | 54 | if (kvaddr == NULL) |
79 | return NULL; | 55 | return NULL; |
80 | 56 | ||
81 | /* This is bus address, platform dependent */ | ||
82 | *dma_handle = (dma_addr_t)paddr; | ||
83 | |||
84 | /* | 57 | /* |
85 | * Evict any existing L1 and/or L2 lines for the backing page | 58 | * Evict any existing L1 and/or L2 lines for the backing page |
86 | * in case it was used earlier as a normal "cached" page. | 59 | * in case it was used earlier as a normal "cached" page. |
@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
95 | 68 | ||
96 | return kvaddr; | 69 | return kvaddr; |
97 | } | 70 | } |
98 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
99 | 71 | ||
100 | void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, | 72 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
101 | dma_addr_t dma_handle) | 73 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
102 | { | 74 | { |
103 | if (is_isa_arcv2() && ioc_exists) | 75 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && |
104 | return dma_free_noncoherent(dev, size, kvaddr, dma_handle); | 76 | !(is_isa_arcv2() && ioc_exists)) |
105 | 77 | iounmap((void __force __iomem *)vaddr); | |
106 | iounmap((void __force __iomem *)kvaddr); | ||
107 | 78 | ||
108 | free_pages_exact((void *)dma_handle, size); | 79 | free_pages_exact((void *)dma_handle, size); |
109 | } | 80 | } |
110 | EXPORT_SYMBOL(dma_free_coherent); | ||
111 | 81 | ||
112 | /* | 82 | /* |
113 | * Helper for streaming DMA... | 83 | * streaming DMA Mapping API... |
84 | * CPU accesses page via normal paddr, thus needs to explicitly made | ||
85 | * consistent before each use | ||
114 | */ | 86 | */ |
115 | void __arc_dma_cache_sync(unsigned long paddr, size_t size, | 87 | static void _dma_cache_sync(unsigned long paddr, size_t size, |
116 | enum dma_data_direction dir) | 88 | enum dma_data_direction dir) |
89 | { | ||
90 | switch (dir) { | ||
91 | case DMA_FROM_DEVICE: | ||
92 | dma_cache_inv(paddr, size); | ||
93 | break; | ||
94 | case DMA_TO_DEVICE: | ||
95 | dma_cache_wback(paddr, size); | ||
96 | break; | ||
97 | case DMA_BIDIRECTIONAL: | ||
98 | dma_cache_wback_inv(paddr, size); | ||
99 | break; | ||
100 | default: | ||
101 | pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | ||
106 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
107 | struct dma_attrs *attrs) | ||
108 | { | ||
109 | unsigned long paddr = page_to_phys(page) + offset; | ||
110 | _dma_cache_sync(paddr, size, dir); | ||
111 | return (dma_addr_t)paddr; | ||
112 | } | ||
113 | |||
114 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
115 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
116 | { | ||
117 | struct scatterlist *s; | ||
118 | int i; | ||
119 | |||
120 | for_each_sg(sg, s, nents, i) | ||
121 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | ||
122 | s->length, dir); | ||
123 | |||
124 | return nents; | ||
125 | } | ||
126 | |||
127 | static void arc_dma_sync_single_for_cpu(struct device *dev, | ||
128 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | ||
129 | { | ||
130 | _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); | ||
131 | } | ||
132 | |||
133 | static void arc_dma_sync_single_for_device(struct device *dev, | ||
134 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | ||
117 | { | 135 | { |
118 | __inline_dma_cache_sync(paddr, size, dir); | 136 | _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); |
119 | } | 137 | } |
120 | EXPORT_SYMBOL(__arc_dma_cache_sync); | 138 | |
139 | static void arc_dma_sync_sg_for_cpu(struct device *dev, | ||
140 | struct scatterlist *sglist, int nelems, | ||
141 | enum dma_data_direction dir) | ||
142 | { | ||
143 | int i; | ||
144 | struct scatterlist *sg; | ||
145 | |||
146 | for_each_sg(sglist, sg, nelems, i) | ||
147 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
148 | } | ||
149 | |||
150 | static void arc_dma_sync_sg_for_device(struct device *dev, | ||
151 | struct scatterlist *sglist, int nelems, | ||
152 | enum dma_data_direction dir) | ||
153 | { | ||
154 | int i; | ||
155 | struct scatterlist *sg; | ||
156 | |||
157 | for_each_sg(sglist, sg, nelems, i) | ||
158 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
159 | } | ||
160 | |||
161 | static int arc_dma_supported(struct device *dev, u64 dma_mask) | ||
162 | { | ||
163 | /* Support 32 bit DMA mask exclusively */ | ||
164 | return dma_mask == DMA_BIT_MASK(32); | ||
165 | } | ||
166 | |||
167 | struct dma_map_ops arc_dma_ops = { | ||
168 | .alloc = arc_dma_alloc, | ||
169 | .free = arc_dma_free, | ||
170 | .map_page = arc_dma_map_page, | ||
171 | .map_sg = arc_dma_map_sg, | ||
172 | .sync_single_for_device = arc_dma_sync_single_for_device, | ||
173 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, | ||
174 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, | ||
175 | .sync_sg_for_device = arc_dma_sync_sg_for_device, | ||
176 | .dma_supported = arc_dma_supported, | ||
177 | }; | ||
178 | EXPORT_SYMBOL(arc_dma_ops); | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 37c7951ca4f5..4f799e567fc8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -47,7 +47,6 @@ config ARM | |||
47 | select HAVE_C_RECORDMCOUNT | 47 | select HAVE_C_RECORDMCOUNT |
48 | select HAVE_DEBUG_KMEMLEAK | 48 | select HAVE_DEBUG_KMEMLEAK |
49 | select HAVE_DMA_API_DEBUG | 49 | select HAVE_DMA_API_DEBUG |
50 | select HAVE_DMA_ATTRS | ||
51 | select HAVE_DMA_CONTIGUOUS if MMU | 50 | select HAVE_DMA_CONTIGUOUS if MMU |
52 | select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU | 51 | select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU |
53 | select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU | 52 | select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ccb3aa64640d..6ad1ceda62a5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | |||
41 | #define HAVE_ARCH_DMA_SUPPORTED 1 | 41 | #define HAVE_ARCH_DMA_SUPPORTED 1 |
42 | extern int dma_supported(struct device *dev, u64 mask); | 42 | extern int dma_supported(struct device *dev, u64 mask); |
43 | 43 | ||
44 | /* | ||
45 | * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent | ||
46 | * implementations, we don't provide a dma_cache_sync function so drivers using | ||
47 | * this API are highlighted with build warnings. | ||
48 | */ | ||
49 | #include <asm-generic/dma-mapping-common.h> | ||
50 | |||
51 | #ifdef __arch_page_to_dma | 44 | #ifdef __arch_page_to_dma |
52 | #error Please update to __arch_pfn_to_dma | 45 | #error Please update to __arch_pfn_to_dma |
53 | #endif | 46 | #endif |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6be3fa2310ee..8cc62289a63e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -64,7 +64,6 @@ config ARM64 | |||
64 | select HAVE_DEBUG_BUGVERBOSE | 64 | select HAVE_DEBUG_BUGVERBOSE |
65 | select HAVE_DEBUG_KMEMLEAK | 65 | select HAVE_DEBUG_KMEMLEAK |
66 | select HAVE_DMA_API_DEBUG | 66 | select HAVE_DMA_API_DEBUG |
67 | select HAVE_DMA_ATTRS | ||
68 | select HAVE_DMA_CONTIGUOUS | 67 | select HAVE_DMA_CONTIGUOUS |
69 | select HAVE_DYNAMIC_FTRACE | 68 | select HAVE_DYNAMIC_FTRACE |
70 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 69 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 61e08f360e31..ba437f090a74 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev) | |||
64 | return dev->archdata.dma_coherent; | 64 | return dev->archdata.dma_coherent; |
65 | } | 65 | } |
66 | 66 | ||
67 | #include <asm-generic/dma-mapping-common.h> | ||
68 | |||
69 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 67 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
70 | { | 68 | { |
71 | return (dma_addr_t)paddr; | 69 | return (dma_addr_t)paddr; |
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index ae7ac9205d20..1115f2a645d1 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h | |||
@@ -1,350 +1,14 @@ | |||
1 | #ifndef __ASM_AVR32_DMA_MAPPING_H | 1 | #ifndef __ASM_AVR32_DMA_MAPPING_H |
2 | #define __ASM_AVR32_DMA_MAPPING_H | 2 | #define __ASM_AVR32_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/mm.h> | ||
5 | #include <linux/device.h> | ||
6 | #include <linux/scatterlist.h> | ||
7 | #include <asm/processor.h> | ||
8 | #include <asm/cacheflush.h> | ||
9 | #include <asm/io.h> | ||
10 | |||
11 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 4 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
12 | int direction); | 5 | int direction); |
13 | 6 | ||
14 | /* | 7 | extern struct dma_map_ops avr32_dma_ops; |
15 | * Return whether the given device DMA address mask can be supported | ||
16 | * properly. For example, if your device can only drive the low 24-bits | ||
17 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
18 | * to this function. | ||
19 | */ | ||
20 | static inline int dma_supported(struct device *dev, u64 mask) | ||
21 | { | ||
22 | /* Fix when needed. I really don't know of any limitations */ | ||
23 | return 1; | ||
24 | } | ||
25 | |||
26 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
27 | { | ||
28 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
29 | return -EIO; | ||
30 | |||
31 | *dev->dma_mask = dma_mask; | ||
32 | return 0; | ||
33 | } | ||
34 | 8 | ||
35 | /* | 9 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
36 | * dma_map_single can't fail as it is implemented now. | ||
37 | */ | ||
38 | static inline int dma_mapping_error(struct device *dev, dma_addr_t addr) | ||
39 | { | 10 | { |
40 | return 0; | 11 | return &avr32_dma_ops; |
41 | } | 12 | } |
42 | 13 | ||
43 | /** | ||
44 | * dma_alloc_coherent - allocate consistent memory for DMA | ||
45 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
46 | * @size: required memory size | ||
47 | * @handle: bus-specific DMA address | ||
48 | * | ||
49 | * Allocate some uncached, unbuffered memory for a device for | ||
50 | * performing DMA. This function allocates pages, and will | ||
51 | * return the CPU-viewed address, and sets @handle to be the | ||
52 | * device-viewed address. | ||
53 | */ | ||
54 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | ||
55 | dma_addr_t *handle, gfp_t gfp); | ||
56 | |||
57 | /** | ||
58 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | ||
59 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
60 | * @size: size of memory originally requested in dma_alloc_coherent | ||
61 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | ||
62 | * @handle: device-view address returned from dma_alloc_coherent | ||
63 | * | ||
64 | * Free (and unmap) a DMA buffer previously allocated by | ||
65 | * dma_alloc_coherent(). | ||
66 | * | ||
67 | * References to memory and mappings associated with cpu_addr/handle | ||
68 | * during and after this call executing are illegal. | ||
69 | */ | ||
70 | extern void dma_free_coherent(struct device *dev, size_t size, | ||
71 | void *cpu_addr, dma_addr_t handle); | ||
72 | |||
73 | /** | ||
74 | * dma_alloc_writecombine - allocate write-combining memory for DMA | ||
75 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
76 | * @size: required memory size | ||
77 | * @handle: bus-specific DMA address | ||
78 | * | ||
79 | * Allocate some uncached, buffered memory for a device for | ||
80 | * performing DMA. This function allocates pages, and will | ||
81 | * return the CPU-viewed address, and sets @handle to be the | ||
82 | * device-viewed address. | ||
83 | */ | ||
84 | extern void *dma_alloc_writecombine(struct device *dev, size_t size, | ||
85 | dma_addr_t *handle, gfp_t gfp); | ||
86 | |||
87 | /** | ||
88 | * dma_free_coherent - free memory allocated by dma_alloc_writecombine | ||
89 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
90 | * @size: size of memory originally requested in dma_alloc_writecombine | ||
91 | * @cpu_addr: CPU-view address returned from dma_alloc_writecombine | ||
92 | * @handle: device-view address returned from dma_alloc_writecombine | ||
93 | * | ||
94 | * Free (and unmap) a DMA buffer previously allocated by | ||
95 | * dma_alloc_writecombine(). | ||
96 | * | ||
97 | * References to memory and mappings associated with cpu_addr/handle | ||
98 | * during and after this call executing are illegal. | ||
99 | */ | ||
100 | extern void dma_free_writecombine(struct device *dev, size_t size, | ||
101 | void *cpu_addr, dma_addr_t handle); | ||
102 | |||
103 | /** | ||
104 | * dma_map_single - map a single buffer for streaming DMA | ||
105 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
106 | * @cpu_addr: CPU direct mapped address of buffer | ||
107 | * @size: size of buffer to map | ||
108 | * @dir: DMA transfer direction | ||
109 | * | ||
110 | * Ensure that any data held in the cache is appropriately discarded | ||
111 | * or written back. | ||
112 | * | ||
113 | * The device owns this memory once this call has completed. The CPU | ||
114 | * can regain ownership by calling dma_unmap_single() or dma_sync_single(). | ||
115 | */ | ||
116 | static inline dma_addr_t | ||
117 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
118 | enum dma_data_direction direction) | ||
119 | { | ||
120 | dma_cache_sync(dev, cpu_addr, size, direction); | ||
121 | return virt_to_bus(cpu_addr); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * dma_unmap_single - unmap a single buffer previously mapped | ||
126 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
127 | * @handle: DMA address of buffer | ||
128 | * @size: size of buffer to map | ||
129 | * @dir: DMA transfer direction | ||
130 | * | ||
131 | * Unmap a single streaming mode DMA translation. The handle and size | ||
132 | * must match what was provided in the previous dma_map_single() call. | ||
133 | * All other usages are undefined. | ||
134 | * | ||
135 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
136 | * whatever the device wrote there. | ||
137 | */ | ||
138 | static inline void | ||
139 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
140 | enum dma_data_direction direction) | ||
141 | { | ||
142 | |||
143 | } | ||
144 | |||
145 | /** | ||
146 | * dma_map_page - map a portion of a page for streaming DMA | ||
147 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
148 | * @page: page that buffer resides in | ||
149 | * @offset: offset into page for start of buffer | ||
150 | * @size: size of buffer to map | ||
151 | * @dir: DMA transfer direction | ||
152 | * | ||
153 | * Ensure that any data held in the cache is appropriately discarded | ||
154 | * or written back. | ||
155 | * | ||
156 | * The device owns this memory once this call has completed. The CPU | ||
157 | * can regain ownership by calling dma_unmap_page() or dma_sync_single(). | ||
158 | */ | ||
159 | static inline dma_addr_t | ||
160 | dma_map_page(struct device *dev, struct page *page, | ||
161 | unsigned long offset, size_t size, | ||
162 | enum dma_data_direction direction) | ||
163 | { | ||
164 | return dma_map_single(dev, page_address(page) + offset, | ||
165 | size, direction); | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
170 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
171 | * @handle: DMA address of buffer | ||
172 | * @size: size of buffer to map | ||
173 | * @dir: DMA transfer direction | ||
174 | * | ||
175 | * Unmap a single streaming mode DMA translation. The handle and size | ||
176 | * must match what was provided in the previous dma_map_single() call. | ||
177 | * All other usages are undefined. | ||
178 | * | ||
179 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
180 | * whatever the device wrote there. | ||
181 | */ | ||
182 | static inline void | ||
183 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
184 | enum dma_data_direction direction) | ||
185 | { | ||
186 | dma_unmap_single(dev, dma_address, size, direction); | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | ||
191 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
192 | * @sg: list of buffers | ||
193 | * @nents: number of buffers to map | ||
194 | * @dir: DMA transfer direction | ||
195 | * | ||
196 | * Map a set of buffers described by scatterlist in streaming | ||
197 | * mode for DMA. This is the scatter-gather version of the | ||
198 | * above pci_map_single interface. Here the scatter gather list | ||
199 | * elements are each tagged with the appropriate dma address | ||
200 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
201 | * | ||
202 | * NOTE: An implementation may be able to use a smaller number of | ||
203 | * DMA address/length pairs than there are SG table elements. | ||
204 | * (for example via virtual mapping capabilities) | ||
205 | * The routine returns the number of addr/length pairs actually | ||
206 | * used, at most nents. | ||
207 | * | ||
208 | * Device ownership issues as mentioned above for pci_map_single are | ||
209 | * the same here. | ||
210 | */ | ||
211 | static inline int | ||
212 | dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
213 | enum dma_data_direction direction) | ||
214 | { | ||
215 | int i; | ||
216 | struct scatterlist *sg; | ||
217 | |||
218 | for_each_sg(sglist, sg, nents, i) { | ||
219 | char *virt; | ||
220 | |||
221 | sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; | ||
222 | virt = sg_virt(sg); | ||
223 | dma_cache_sync(dev, virt, sg->length, direction); | ||
224 | } | ||
225 | |||
226 | return nents; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
231 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
232 | * @sg: list of buffers | ||
233 | * @nents: number of buffers to map | ||
234 | * @dir: DMA transfer direction | ||
235 | * | ||
236 | * Unmap a set of streaming mode DMA translations. | ||
237 | * Again, CPU read rules concerning calls here are the same as for | ||
238 | * pci_unmap_single() above. | ||
239 | */ | ||
240 | static inline void | ||
241 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
242 | enum dma_data_direction direction) | ||
243 | { | ||
244 | |||
245 | } | ||
246 | |||
247 | /** | ||
248 | * dma_sync_single_for_cpu | ||
249 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
250 | * @handle: DMA address of buffer | ||
251 | * @size: size of buffer to map | ||
252 | * @dir: DMA transfer direction | ||
253 | * | ||
254 | * Make physical memory consistent for a single streaming mode DMA | ||
255 | * translation after a transfer. | ||
256 | * | ||
257 | * If you perform a dma_map_single() but wish to interrogate the | ||
258 | * buffer using the cpu, yet do not wish to teardown the DMA mapping, | ||
259 | * you must call this function before doing so. At the next point you | ||
260 | * give the DMA address back to the card, you must first perform a | ||
261 | * dma_sync_single_for_device, and then the device again owns the | ||
262 | * buffer. | ||
263 | */ | ||
264 | static inline void | ||
265 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
266 | size_t size, enum dma_data_direction direction) | ||
267 | { | ||
268 | /* | ||
269 | * No need to do anything since the CPU isn't supposed to | ||
270 | * touch this memory after we flushed it at mapping- or | ||
271 | * sync-for-device time. | ||
272 | */ | ||
273 | } | ||
274 | |||
275 | static inline void | ||
276 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
277 | size_t size, enum dma_data_direction direction) | ||
278 | { | ||
279 | dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); | ||
280 | } | ||
281 | |||
282 | static inline void | ||
283 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
284 | unsigned long offset, size_t size, | ||
285 | enum dma_data_direction direction) | ||
286 | { | ||
287 | /* just sync everything, that's all the pci API can do */ | ||
288 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | ||
289 | } | ||
290 | |||
291 | static inline void | ||
292 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
293 | unsigned long offset, size_t size, | ||
294 | enum dma_data_direction direction) | ||
295 | { | ||
296 | /* just sync everything, that's all the pci API can do */ | ||
297 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * dma_sync_sg_for_cpu | ||
302 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
303 | * @sg: list of buffers | ||
304 | * @nents: number of buffers to map | ||
305 | * @dir: DMA transfer direction | ||
306 | * | ||
307 | * Make physical memory consistent for a set of streaming | ||
308 | * mode DMA translations after a transfer. | ||
309 | * | ||
310 | * The same as dma_sync_single_for_* but for a scatter-gather list, | ||
311 | * same rules and usage. | ||
312 | */ | ||
313 | static inline void | ||
314 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
315 | int nents, enum dma_data_direction direction) | ||
316 | { | ||
317 | /* | ||
318 | * No need to do anything since the CPU isn't supposed to | ||
319 | * touch this memory after we flushed it at mapping- or | ||
320 | * sync-for-device time. | ||
321 | */ | ||
322 | } | ||
323 | |||
324 | static inline void | ||
325 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
326 | int nents, enum dma_data_direction direction) | ||
327 | { | ||
328 | int i; | ||
329 | struct scatterlist *sg; | ||
330 | |||
331 | for_each_sg(sglist, sg, nents, i) | ||
332 | dma_cache_sync(dev, sg_virt(sg), sg->length, direction); | ||
333 | } | ||
334 | |||
335 | /* Now for the API extensions over the pci_ one */ | ||
336 | |||
337 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
338 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
339 | |||
340 | /* drivers/base/dma-mapping.c */ | ||
341 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
342 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
343 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
344 | void *cpu_addr, dma_addr_t dma_addr, | ||
345 | size_t size); | ||
346 | |||
347 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
348 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
349 | |||
350 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ | 14 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ |
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 50cdb5b10f0f..92cf1fb2b3e6 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c | |||
@@ -9,9 +9,14 @@ | |||
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/mm.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/scatterlist.h> | ||
12 | 15 | ||
13 | #include <asm/addrspace.h> | 16 | #include <asm/processor.h> |
14 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/io.h> | ||
19 | #include <asm/addrspace.h> | ||
15 | 20 | ||
16 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) | 21 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) |
17 | { | 22 | { |
@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size, | |||
93 | __free_page(page++); | 98 | __free_page(page++); |
94 | } | 99 | } |
95 | 100 | ||
96 | void *dma_alloc_coherent(struct device *dev, size_t size, | 101 | static void *avr32_dma_alloc(struct device *dev, size_t size, |
97 | dma_addr_t *handle, gfp_t gfp) | 102 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) |
98 | { | 103 | { |
99 | struct page *page; | 104 | struct page *page; |
100 | void *ret = NULL; | 105 | dma_addr_t phys; |
101 | 106 | ||
102 | page = __dma_alloc(dev, size, handle, gfp); | 107 | page = __dma_alloc(dev, size, handle, gfp); |
103 | if (page) | 108 | if (!page) |
104 | ret = phys_to_uncached(page_to_phys(page)); | 109 | return NULL; |
110 | phys = page_to_phys(page); | ||
105 | 111 | ||
106 | return ret; | 112 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { |
113 | /* Now, map the page into P3 with write-combining turned on */ | ||
114 | *handle = phys; | ||
115 | return __ioremap(phys, size, _PAGE_BUFFER); | ||
116 | } else { | ||
117 | return phys_to_uncached(phys); | ||
118 | } | ||
107 | } | 119 | } |
108 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
109 | 120 | ||
110 | void dma_free_coherent(struct device *dev, size_t size, | 121 | static void avr32_dma_free(struct device *dev, size_t size, |
111 | void *cpu_addr, dma_addr_t handle) | 122 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) |
112 | { | 123 | { |
113 | void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); | ||
114 | struct page *page; | 124 | struct page *page; |
115 | 125 | ||
116 | pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", | 126 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { |
117 | cpu_addr, (unsigned long)handle, (unsigned)size); | 127 | iounmap(cpu_addr); |
118 | BUG_ON(!virt_addr_valid(addr)); | 128 | |
119 | page = virt_to_page(addr); | 129 | page = phys_to_page(handle); |
130 | } else { | ||
131 | void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); | ||
132 | |||
133 | pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n", | ||
134 | cpu_addr, (unsigned long)handle, (unsigned)size); | ||
135 | |||
136 | BUG_ON(!virt_addr_valid(addr)); | ||
137 | page = virt_to_page(addr); | ||
138 | } | ||
139 | |||
120 | __dma_free(dev, size, page, handle); | 140 | __dma_free(dev, size, page, handle); |
121 | } | 141 | } |
122 | EXPORT_SYMBOL(dma_free_coherent); | ||
123 | 142 | ||
124 | void *dma_alloc_writecombine(struct device *dev, size_t size, | 143 | static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, |
125 | dma_addr_t *handle, gfp_t gfp) | 144 | unsigned long offset, size_t size, |
145 | enum dma_data_direction direction, struct dma_attrs *attrs) | ||
126 | { | 146 | { |
127 | struct page *page; | 147 | void *cpu_addr = page_address(page) + offset; |
128 | dma_addr_t phys; | ||
129 | 148 | ||
130 | page = __dma_alloc(dev, size, handle, gfp); | 149 | dma_cache_sync(dev, cpu_addr, size, direction); |
131 | if (!page) | 150 | return virt_to_bus(cpu_addr); |
132 | return NULL; | 151 | } |
133 | 152 | ||
134 | phys = page_to_phys(page); | 153 | static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
135 | *handle = phys; | 154 | int nents, enum dma_data_direction direction, |
155 | struct dma_attrs *attrs) | ||
156 | { | ||
157 | int i; | ||
158 | struct scatterlist *sg; | ||
159 | |||
160 | for_each_sg(sglist, sg, nents, i) { | ||
161 | char *virt; | ||
136 | 162 | ||
137 | /* Now, map the page into P3 with write-combining turned on */ | 163 | sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; |
138 | return __ioremap(phys, size, _PAGE_BUFFER); | 164 | virt = sg_virt(sg); |
165 | dma_cache_sync(dev, virt, sg->length, direction); | ||
166 | } | ||
167 | |||
168 | return nents; | ||
139 | } | 169 | } |
140 | EXPORT_SYMBOL(dma_alloc_writecombine); | ||
141 | 170 | ||
142 | void dma_free_writecombine(struct device *dev, size_t size, | 171 | static void avr32_dma_sync_single_for_device(struct device *dev, |
143 | void *cpu_addr, dma_addr_t handle) | 172 | dma_addr_t dma_handle, size_t size, |
173 | enum dma_data_direction direction) | ||
144 | { | 174 | { |
145 | struct page *page; | 175 | dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); |
176 | } | ||
146 | 177 | ||
147 | iounmap(cpu_addr); | 178 | static void avr32_dma_sync_sg_for_device(struct device *dev, |
179 | struct scatterlist *sglist, int nents, | ||
180 | enum dma_data_direction direction) | ||
181 | { | ||
182 | int i; | ||
183 | struct scatterlist *sg; | ||
148 | 184 | ||
149 | page = phys_to_page(handle); | 185 | for_each_sg(sglist, sg, nents, i) |
150 | __dma_free(dev, size, page, handle); | 186 | dma_cache_sync(dev, sg_virt(sg), sg->length, direction); |
151 | } | 187 | } |
152 | EXPORT_SYMBOL(dma_free_writecombine); | 188 | |
189 | struct dma_map_ops avr32_dma_ops = { | ||
190 | .alloc = avr32_dma_alloc, | ||
191 | .free = avr32_dma_free, | ||
192 | .map_page = avr32_dma_map_page, | ||
193 | .map_sg = avr32_dma_map_sg, | ||
194 | .sync_single_for_device = avr32_dma_sync_single_for_device, | ||
195 | .sync_sg_for_device = avr32_dma_sync_sg_for_device, | ||
196 | }; | ||
197 | EXPORT_SYMBOL(avr32_dma_ops); | ||
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index 054d9ec57d9d..3490570aaa82 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h | |||
@@ -8,36 +8,6 @@ | |||
8 | #define _BLACKFIN_DMA_MAPPING_H | 8 | #define _BLACKFIN_DMA_MAPPING_H |
9 | 9 | ||
10 | #include <asm/cacheflush.h> | 10 | #include <asm/cacheflush.h> |
11 | struct scatterlist; | ||
12 | |||
13 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
14 | dma_addr_t *dma_handle, gfp_t gfp); | ||
15 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
16 | dma_addr_t dma_handle); | ||
17 | |||
18 | /* | ||
19 | * Now for the API extensions over the pci_ one | ||
20 | */ | ||
21 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
22 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
23 | #define dma_supported(d, m) (1) | ||
24 | |||
25 | static inline int | ||
26 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
27 | { | ||
28 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
29 | return -EIO; | ||
30 | |||
31 | *dev->dma_mask = dma_mask; | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static inline int | ||
37 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
38 | { | ||
39 | return 0; | ||
40 | } | ||
41 | 11 | ||
42 | extern void | 12 | extern void |
43 | __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir); | 13 | __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir); |
@@ -66,102 +36,11 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) | |||
66 | __dma_sync(addr, size, dir); | 36 | __dma_sync(addr, size, dir); |
67 | } | 37 | } |
68 | 38 | ||
69 | static inline dma_addr_t | 39 | extern struct dma_map_ops bfin_dma_ops; |
70 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
71 | enum dma_data_direction dir) | ||
72 | { | ||
73 | _dma_sync((dma_addr_t)ptr, size, dir); | ||
74 | return (dma_addr_t) ptr; | ||
75 | } | ||
76 | |||
77 | static inline dma_addr_t | ||
78 | dma_map_page(struct device *dev, struct page *page, | ||
79 | unsigned long offset, size_t size, | ||
80 | enum dma_data_direction dir) | ||
81 | { | ||
82 | return dma_map_single(dev, page_address(page) + offset, size, dir); | ||
83 | } | ||
84 | |||
85 | static inline void | ||
86 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
87 | enum dma_data_direction dir) | ||
88 | { | ||
89 | BUG_ON(!valid_dma_direction(dir)); | ||
90 | } | ||
91 | |||
92 | static inline void | ||
93 | dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
94 | enum dma_data_direction dir) | ||
95 | { | ||
96 | dma_unmap_single(dev, dma_addr, size, dir); | ||
97 | } | ||
98 | 40 | ||
99 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 41 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
100 | enum dma_data_direction dir); | ||
101 | |||
102 | static inline void | ||
103 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
104 | int nhwentries, enum dma_data_direction dir) | ||
105 | { | 42 | { |
106 | BUG_ON(!valid_dma_direction(dir)); | 43 | return &bfin_dma_ops; |
107 | } | 44 | } |
108 | 45 | ||
109 | static inline void | ||
110 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | ||
111 | unsigned long offset, size_t size, | ||
112 | enum dma_data_direction dir) | ||
113 | { | ||
114 | BUG_ON(!valid_dma_direction(dir)); | ||
115 | } | ||
116 | |||
117 | static inline void | ||
118 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | ||
119 | unsigned long offset, size_t size, | ||
120 | enum dma_data_direction dir) | ||
121 | { | ||
122 | _dma_sync(handle + offset, size, dir); | ||
123 | } | ||
124 | |||
125 | static inline void | ||
126 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | ||
127 | enum dma_data_direction dir) | ||
128 | { | ||
129 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | ||
130 | } | ||
131 | |||
132 | static inline void | ||
133 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | ||
134 | enum dma_data_direction dir) | ||
135 | { | ||
136 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | ||
137 | } | ||
138 | |||
139 | static inline void | ||
140 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
141 | enum dma_data_direction dir) | ||
142 | { | ||
143 | BUG_ON(!valid_dma_direction(dir)); | ||
144 | } | ||
145 | |||
146 | extern void | ||
147 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
148 | int nents, enum dma_data_direction dir); | ||
149 | |||
150 | static inline void | ||
151 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
152 | enum dma_data_direction dir) | ||
153 | { | ||
154 | _dma_sync((dma_addr_t)vaddr, size, dir); | ||
155 | } | ||
156 | |||
157 | /* drivers/base/dma-mapping.c */ | ||
158 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
159 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
160 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
161 | void *cpu_addr, dma_addr_t dma_addr, | ||
162 | size_t size); | ||
163 | |||
164 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
165 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
166 | |||
167 | #endif /* _BLACKFIN_DMA_MAPPING_H */ | 46 | #endif /* _BLACKFIN_DMA_MAPPING_H */ |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index df437e52d9df..771afe6e4264 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages) | |||
78 | spin_unlock_irqrestore(&dma_page_lock, flags); | 78 | spin_unlock_irqrestore(&dma_page_lock, flags); |
79 | } | 79 | } |
80 | 80 | ||
81 | void *dma_alloc_coherent(struct device *dev, size_t size, | 81 | static void *bfin_dma_alloc(struct device *dev, size_t size, |
82 | dma_addr_t *dma_handle, gfp_t gfp) | 82 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
83 | { | 83 | { |
84 | void *ret; | 84 | void *ret; |
85 | 85 | ||
@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
92 | 92 | ||
93 | return ret; | 93 | return ret; |
94 | } | 94 | } |
95 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
96 | 95 | ||
97 | void | 96 | static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, |
98 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 97 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
99 | dma_addr_t dma_handle) | ||
100 | { | 98 | { |
101 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); | 99 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); |
102 | } | 100 | } |
103 | EXPORT_SYMBOL(dma_free_coherent); | ||
104 | 101 | ||
105 | /* | 102 | /* |
106 | * Streaming DMA mappings | 103 | * Streaming DMA mappings |
@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size, | |||
112 | } | 109 | } |
113 | EXPORT_SYMBOL(__dma_sync); | 110 | EXPORT_SYMBOL(__dma_sync); |
114 | 111 | ||
115 | int | 112 | static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, |
116 | dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, | 113 | int nents, enum dma_data_direction direction, |
117 | enum dma_data_direction direction) | 114 | struct dma_attrs *attrs) |
118 | { | 115 | { |
119 | struct scatterlist *sg; | 116 | struct scatterlist *sg; |
120 | int i; | 117 | int i; |
@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, | |||
126 | 123 | ||
127 | return nents; | 124 | return nents; |
128 | } | 125 | } |
129 | EXPORT_SYMBOL(dma_map_sg); | ||
130 | 126 | ||
131 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list, | 127 | static void bfin_dma_sync_sg_for_device(struct device *dev, |
132 | int nelems, enum dma_data_direction direction) | 128 | struct scatterlist *sg_list, int nelems, |
129 | enum dma_data_direction direction) | ||
133 | { | 130 | { |
134 | struct scatterlist *sg; | 131 | struct scatterlist *sg; |
135 | int i; | 132 | int i; |
@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list, | |||
139 | __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); | 136 | __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); |
140 | } | 137 | } |
141 | } | 138 | } |
142 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 139 | |
140 | static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, | ||
141 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
142 | struct dma_attrs *attrs) | ||
143 | { | ||
144 | dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); | ||
145 | |||
146 | _dma_sync(handle, size, dir); | ||
147 | return handle; | ||
148 | } | ||
149 | |||
150 | static inline void bfin_dma_sync_single_for_device(struct device *dev, | ||
151 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
152 | { | ||
153 | _dma_sync(handle, size, dir); | ||
154 | } | ||
155 | |||
156 | struct dma_map_ops bfin_dma_ops = { | ||
157 | .alloc = bfin_dma_alloc, | ||
158 | .free = bfin_dma_free, | ||
159 | |||
160 | .map_page = bfin_dma_map_page, | ||
161 | .map_sg = bfin_dma_map_sg, | ||
162 | |||
163 | .sync_single_for_device = bfin_dma_sync_single_for_device, | ||
164 | .sync_sg_for_device = bfin_dma_sync_sg_for_device, | ||
165 | }; | ||
166 | EXPORT_SYMBOL(bfin_dma_ops); | ||
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 77ea09b8bce1..79049d432d3c 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig | |||
@@ -17,6 +17,7 @@ config C6X | |||
17 | select OF_EARLY_FLATTREE | 17 | select OF_EARLY_FLATTREE |
18 | select GENERIC_CLOCKEVENTS | 18 | select GENERIC_CLOCKEVENTS |
19 | select MODULES_USE_ELF_RELA | 19 | select MODULES_USE_ELF_RELA |
20 | select ARCH_NO_COHERENT_DMA_MMAP | ||
20 | 21 | ||
21 | config MMU | 22 | config MMU |
22 | def_bool n | 23 | def_bool n |
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index bbd7774e4d4e..6b5cd7b0cf32 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
@@ -12,104 +12,22 @@ | |||
12 | #ifndef _ASM_C6X_DMA_MAPPING_H | 12 | #ifndef _ASM_C6X_DMA_MAPPING_H |
13 | #define _ASM_C6X_DMA_MAPPING_H | 13 | #define _ASM_C6X_DMA_MAPPING_H |
14 | 14 | ||
15 | #include <linux/dma-debug.h> | ||
16 | #include <asm-generic/dma-coherent.h> | ||
17 | |||
18 | #define dma_supported(d, m) 1 | ||
19 | |||
20 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
21 | dma_addr_t addr, | ||
22 | unsigned long offset, | ||
23 | size_t size, | ||
24 | enum dma_data_direction dir) | ||
25 | { | ||
26 | } | ||
27 | |||
28 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
29 | { | ||
30 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
31 | return -EIO; | ||
32 | |||
33 | *dev->dma_mask = dma_mask; | ||
34 | |||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | /* | 15 | /* |
39 | * DMA errors are defined by all-bits-set in the DMA address. | 16 | * DMA errors are defined by all-bits-set in the DMA address. |
40 | */ | 17 | */ |
41 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 18 | #define DMA_ERROR_CODE ~0 |
42 | { | ||
43 | debug_dma_mapping_error(dev, dma_addr); | ||
44 | return dma_addr == ~0; | ||
45 | } | ||
46 | |||
47 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
48 | size_t size, enum dma_data_direction dir); | ||
49 | 19 | ||
50 | extern void dma_unmap_single(struct device *dev, dma_addr_t handle, | 20 | extern struct dma_map_ops c6x_dma_ops; |
51 | size_t size, enum dma_data_direction dir); | ||
52 | 21 | ||
53 | extern int dma_map_sg(struct device *dev, struct scatterlist *sglist, | 22 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
54 | int nents, enum dma_data_direction direction); | ||
55 | |||
56 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
57 | int nents, enum dma_data_direction direction); | ||
58 | |||
59 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
60 | unsigned long offset, size_t size, | ||
61 | enum dma_data_direction dir) | ||
62 | { | 23 | { |
63 | dma_addr_t handle; | 24 | return &c6x_dma_ops; |
64 | |||
65 | handle = dma_map_single(dev, page_address(page) + offset, size, dir); | ||
66 | |||
67 | debug_dma_map_page(dev, page, offset, size, dir, handle, false); | ||
68 | |||
69 | return handle; | ||
70 | } | ||
71 | |||
72 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
73 | size_t size, enum dma_data_direction dir) | ||
74 | { | ||
75 | dma_unmap_single(dev, handle, size, dir); | ||
76 | |||
77 | debug_dma_unmap_page(dev, handle, size, dir, false); | ||
78 | } | 25 | } |
79 | 26 | ||
80 | extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | ||
81 | size_t size, enum dma_data_direction dir); | ||
82 | |||
83 | extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | ||
84 | size_t size, | ||
85 | enum dma_data_direction dir); | ||
86 | |||
87 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
88 | int nents, enum dma_data_direction dir); | ||
89 | |||
90 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
91 | int nents, enum dma_data_direction dir); | ||
92 | |||
93 | extern void coherent_mem_init(u32 start, u32 size); | 27 | extern void coherent_mem_init(u32 start, u32 size); |
94 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | 28 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
95 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | 29 | gfp_t gfp, struct dma_attrs *attrs); |
96 | 30 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | |
97 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | 31 | dma_addr_t dma_handle, struct dma_attrs *attrs); |
98 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | ||
99 | |||
100 | /* Not supported for now */ | ||
101 | static inline int dma_mmap_coherent(struct device *dev, | ||
102 | struct vm_area_struct *vma, void *cpu_addr, | ||
103 | dma_addr_t dma_addr, size_t size) | ||
104 | { | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
109 | void *cpu_addr, dma_addr_t dma_addr, | ||
110 | size_t size) | ||
111 | { | ||
112 | return -EINVAL; | ||
113 | } | ||
114 | 32 | ||
115 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 33 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index ab7b12de144d..8a80f3a250c0 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c | |||
@@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size, | |||
36 | } | 36 | } |
37 | } | 37 | } |
38 | 38 | ||
39 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, |
40 | enum dma_data_direction dir) | 40 | unsigned long offset, size_t size, enum dma_data_direction dir, |
41 | struct dma_attrs *attrs) | ||
41 | { | 42 | { |
42 | dma_addr_t addr = virt_to_phys(ptr); | 43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); |
43 | 44 | ||
44 | c6x_dma_sync(addr, size, dir); | 45 | c6x_dma_sync(handle, size, dir); |
45 | 46 | return handle; | |
46 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
47 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
48 | dir, addr, true); | ||
49 | return addr; | ||
50 | } | 47 | } |
51 | EXPORT_SYMBOL(dma_map_single); | ||
52 | |||
53 | 48 | ||
54 | void dma_unmap_single(struct device *dev, dma_addr_t handle, | 49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, |
55 | size_t size, enum dma_data_direction dir) | 50 | size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) |
56 | { | 51 | { |
57 | c6x_dma_sync(handle, size, dir); | 52 | c6x_dma_sync(handle, size, dir); |
58 | |||
59 | debug_dma_unmap_page(dev, handle, size, dir, true); | ||
60 | } | 53 | } |
61 | EXPORT_SYMBOL(dma_unmap_single); | ||
62 | |||
63 | 54 | ||
64 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, | 55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
65 | int nents, enum dma_data_direction dir) | 56 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) |
66 | { | 57 | { |
67 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
68 | int i; | 59 | int i; |
69 | 60 | ||
70 | for_each_sg(sglist, sg, nents, i) | 61 | for_each_sg(sglist, sg, nents, i) { |
71 | sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length, | 62 | sg->dma_address = sg_phys(sg); |
72 | dir); | 63 | c6x_dma_sync(sg->dma_address, sg->length, dir); |
73 | 64 | } | |
74 | debug_dma_map_sg(dev, sglist, nents, nents, dir); | ||
75 | 65 | ||
76 | return nents; | 66 | return nents; |
77 | } | 67 | } |
78 | EXPORT_SYMBOL(dma_map_sg); | ||
79 | |||
80 | 68 | ||
81 | void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
82 | int nents, enum dma_data_direction dir) | 70 | int nents, enum dma_data_direction dir, |
71 | struct dma_attrs *attrs) | ||
83 | { | 72 | { |
84 | struct scatterlist *sg; | 73 | struct scatterlist *sg; |
85 | int i; | 74 | int i; |
86 | 75 | ||
87 | for_each_sg(sglist, sg, nents, i) | 76 | for_each_sg(sglist, sg, nents, i) |
88 | dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir); | 77 | c6x_dma_sync(sg_dma_address(sg), sg->length, dir); |
89 | 78 | ||
90 | debug_dma_unmap_sg(dev, sglist, nents, dir); | ||
91 | } | 79 | } |
92 | EXPORT_SYMBOL(dma_unmap_sg); | ||
93 | 80 | ||
94 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | 81 | static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, |
95 | size_t size, enum dma_data_direction dir) | 82 | size_t size, enum dma_data_direction dir) |
96 | { | 83 | { |
97 | c6x_dma_sync(handle, size, dir); | 84 | c6x_dma_sync(handle, size, dir); |
98 | 85 | ||
99 | debug_dma_sync_single_for_cpu(dev, handle, size, dir); | ||
100 | } | 86 | } |
101 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
102 | 87 | ||
103 | 88 | static void c6x_dma_sync_single_for_device(struct device *dev, | |
104 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | 89 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
105 | size_t size, enum dma_data_direction dir) | ||
106 | { | 90 | { |
107 | c6x_dma_sync(handle, size, dir); | 91 | c6x_dma_sync(handle, size, dir); |
108 | 92 | ||
109 | debug_dma_sync_single_for_device(dev, handle, size, dir); | ||
110 | } | 93 | } |
111 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
112 | |||
113 | 94 | ||
114 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | 95 | static void c6x_dma_sync_sg_for_cpu(struct device *dev, |
115 | int nents, enum dma_data_direction dir) | 96 | struct scatterlist *sglist, int nents, |
97 | enum dma_data_direction dir) | ||
116 | { | 98 | { |
117 | struct scatterlist *sg; | 99 | struct scatterlist *sg; |
118 | int i; | 100 | int i; |
119 | 101 | ||
120 | for_each_sg(sglist, sg, nents, i) | 102 | for_each_sg(sglist, sg, nents, i) |
121 | dma_sync_single_for_cpu(dev, sg_dma_address(sg), | 103 | c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg), |
122 | sg->length, dir); | 104 | sg->length, dir); |
123 | 105 | ||
124 | debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir); | ||
125 | } | 106 | } |
126 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
127 | |||
128 | 107 | ||
129 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 108 | static void c6x_dma_sync_sg_for_device(struct device *dev, |
130 | int nents, enum dma_data_direction dir) | 109 | struct scatterlist *sglist, int nents, |
110 | enum dma_data_direction dir) | ||
131 | { | 111 | { |
132 | struct scatterlist *sg; | 112 | struct scatterlist *sg; |
133 | int i; | 113 | int i; |
134 | 114 | ||
135 | for_each_sg(sglist, sg, nents, i) | 115 | for_each_sg(sglist, sg, nents, i) |
136 | dma_sync_single_for_device(dev, sg_dma_address(sg), | 116 | c6x_dma_sync_single_for_device(dev, sg_dma_address(sg), |
137 | sg->length, dir); | 117 | sg->length, dir); |
138 | 118 | ||
139 | debug_dma_sync_sg_for_device(dev, sglist, nents, dir); | ||
140 | } | 119 | } |
141 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
142 | 120 | ||
121 | struct dma_map_ops c6x_dma_ops = { | ||
122 | .alloc = c6x_dma_alloc, | ||
123 | .free = c6x_dma_free, | ||
124 | .map_page = c6x_dma_map_page, | ||
125 | .unmap_page = c6x_dma_unmap_page, | ||
126 | .map_sg = c6x_dma_map_sg, | ||
127 | .unmap_sg = c6x_dma_unmap_sg, | ||
128 | .sync_single_for_device = c6x_dma_sync_single_for_device, | ||
129 | .sync_single_for_cpu = c6x_dma_sync_single_for_cpu, | ||
130 | .sync_sg_for_device = c6x_dma_sync_sg_for_device, | ||
131 | .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu, | ||
132 | }; | ||
133 | EXPORT_SYMBOL(c6x_dma_ops); | ||
143 | 134 | ||
144 | /* Number of entries preallocated for DMA-API debugging */ | 135 | /* Number of entries preallocated for DMA-API debugging */ |
145 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 136 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 4187e5180373..f7ee63af2541 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c | |||
@@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order) | |||
73 | * Allocate DMA coherent memory space and return both the kernel | 73 | * Allocate DMA coherent memory space and return both the kernel |
74 | * virtual and DMA address for that space. | 74 | * virtual and DMA address for that space. |
75 | */ | 75 | */ |
76 | void *dma_alloc_coherent(struct device *dev, size_t size, | 76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
77 | dma_addr_t *handle, gfp_t gfp) | 77 | gfp_t gfp, struct dma_attrs *attrs) |
78 | { | 78 | { |
79 | u32 paddr; | 79 | u32 paddr; |
80 | int order; | 80 | int order; |
@@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
94 | 94 | ||
95 | return phys_to_virt(paddr); | 95 | return phys_to_virt(paddr); |
96 | } | 96 | } |
97 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
98 | 97 | ||
99 | /* | 98 | /* |
100 | * Free DMA coherent memory as defined by the above mapping. | 99 | * Free DMA coherent memory as defined by the above mapping. |
101 | */ | 100 | */ |
102 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
103 | dma_addr_t dma_handle) | 102 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
104 | { | 103 | { |
105 | int order; | 104 | int order; |
106 | 105 | ||
@@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
111 | 110 | ||
112 | __free_dma_pages(virt_to_phys(vaddr), order); | 111 | __free_dma_pages(virt_to_phys(vaddr), order); |
113 | } | 112 | } |
114 | EXPORT_SYMBOL(dma_free_coherent); | ||
115 | 113 | ||
116 | /* | 114 | /* |
117 | * Initialise the coherent DMA memory allocator using the given uncached region. | 115 | * Initialise the coherent DMA memory allocator using the given uncached region. |
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index ee55578d9834..8d5efa58cce1 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c | |||
@@ -16,21 +16,18 @@ | |||
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | void *dma_alloc_coherent(struct device *dev, size_t size, | 19 | static void *v32_dma_alloc(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t gfp) | 20 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
21 | { | 21 | { |
22 | void *ret; | 22 | void *ret; |
23 | int order = get_order(size); | 23 | |
24 | /* ignore region specifiers */ | 24 | /* ignore region specifiers */ |
25 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | 25 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
26 | 26 | ||
27 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) | ||
28 | return ret; | ||
29 | |||
30 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | 27 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
31 | gfp |= GFP_DMA; | 28 | gfp |= GFP_DMA; |
32 | 29 | ||
33 | ret = (void *)__get_free_pages(gfp, order); | 30 | ret = (void *)__get_free_pages(gfp, get_order(size)); |
34 | 31 | ||
35 | if (ret != NULL) { | 32 | if (ret != NULL) { |
36 | memset(ret, 0, size); | 33 | memset(ret, 0, size); |
@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
39 | return ret; | 36 | return ret; |
40 | } | 37 | } |
41 | 38 | ||
42 | void dma_free_coherent(struct device *dev, size_t size, | 39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, |
43 | void *vaddr, dma_addr_t dma_handle) | 40 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
41 | { | ||
42 | free_pages((unsigned long)vaddr, get_order(size)); | ||
43 | } | ||
44 | |||
45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, | ||
46 | struct page *page, unsigned long offset, size_t size, | ||
47 | enum dma_data_direction direction, | ||
48 | struct dma_attrs *attrs) | ||
44 | { | 49 | { |
45 | int order = get_order(size); | 50 | return page_to_phys(page) + offset; |
51 | } | ||
46 | 52 | ||
47 | if (!dma_release_from_coherent(dev, order, vaddr)) | 53 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, |
48 | free_pages((unsigned long)vaddr, order); | 54 | int nents, enum dma_data_direction direction, |
55 | struct dma_attrs *attrs) | ||
56 | { | ||
57 | printk("Map sg\n"); | ||
58 | return nents; | ||
59 | } | ||
60 | |||
61 | static inline int v32_dma_supported(struct device *dev, u64 mask) | ||
62 | { | ||
63 | /* | ||
64 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
65 | * so we can't guarantee allocations that must be | ||
66 | * within a tighter range than GFP_DMA.. | ||
67 | */ | ||
68 | if (mask < 0x00ffffff) | ||
69 | return 0; | ||
70 | return 1; | ||
49 | } | 71 | } |
50 | 72 | ||
73 | struct dma_map_ops v32_dma_ops = { | ||
74 | .alloc = v32_dma_alloc, | ||
75 | .free = v32_dma_free, | ||
76 | .map_page = v32_dma_map_page, | ||
77 | .map_sg = v32_dma_map_sg, | ||
78 | .dma_supported = v32_dma_supported, | ||
79 | }; | ||
80 | EXPORT_SYMBOL(v32_dma_ops); | ||
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 57f794ee6039..5a370178a0e9 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h | |||
@@ -1,156 +1,20 @@ | |||
1 | /* DMA mapping. Nothing tricky here, just virt_to_phys */ | ||
2 | |||
3 | #ifndef _ASM_CRIS_DMA_MAPPING_H | 1 | #ifndef _ASM_CRIS_DMA_MAPPING_H |
4 | #define _ASM_CRIS_DMA_MAPPING_H | 2 | #define _ASM_CRIS_DMA_MAPPING_H |
5 | 3 | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/scatterlist.h> | ||
9 | |||
10 | #include <asm/cache.h> | ||
11 | #include <asm/io.h> | ||
12 | |||
13 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
15 | |||
16 | #ifdef CONFIG_PCI | 4 | #ifdef CONFIG_PCI |
17 | #include <asm-generic/dma-coherent.h> | 5 | extern struct dma_map_ops v32_dma_ops; |
18 | |||
19 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t flag); | ||
21 | 6 | ||
22 | void dma_free_coherent(struct device *dev, size_t size, | 7 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
23 | void *vaddr, dma_addr_t dma_handle); | ||
24 | #else | ||
25 | static inline void * | ||
26 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
27 | gfp_t flag) | ||
28 | { | 8 | { |
29 | BUG(); | 9 | return &v32_dma_ops; |
30 | return NULL; | ||
31 | } | 10 | } |
32 | 11 | #else | |
33 | static inline void | 12 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
34 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
35 | dma_addr_t dma_handle) | ||
36 | { | 13 | { |
37 | BUG(); | 14 | BUG(); |
15 | return NULL; | ||
38 | } | 16 | } |
39 | #endif | 17 | #endif |
40 | static inline dma_addr_t | ||
41 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
42 | enum dma_data_direction direction) | ||
43 | { | ||
44 | BUG_ON(direction == DMA_NONE); | ||
45 | return virt_to_phys(ptr); | ||
46 | } | ||
47 | |||
48 | static inline void | ||
49 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
50 | enum dma_data_direction direction) | ||
51 | { | ||
52 | BUG_ON(direction == DMA_NONE); | ||
53 | } | ||
54 | |||
55 | static inline int | ||
56 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
57 | enum dma_data_direction direction) | ||
58 | { | ||
59 | printk("Map sg\n"); | ||
60 | return nents; | ||
61 | } | ||
62 | |||
63 | static inline dma_addr_t | ||
64 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
65 | size_t size, enum dma_data_direction direction) | ||
66 | { | ||
67 | BUG_ON(direction == DMA_NONE); | ||
68 | return page_to_phys(page) + offset; | ||
69 | } | ||
70 | |||
71 | static inline void | ||
72 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
73 | enum dma_data_direction direction) | ||
74 | { | ||
75 | BUG_ON(direction == DMA_NONE); | ||
76 | } | ||
77 | |||
78 | |||
79 | static inline void | ||
80 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
81 | enum dma_data_direction direction) | ||
82 | { | ||
83 | BUG_ON(direction == DMA_NONE); | ||
84 | } | ||
85 | |||
86 | static inline void | ||
87 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | static inline void | ||
93 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
94 | enum dma_data_direction direction) | ||
95 | { | ||
96 | } | ||
97 | |||
98 | static inline void | ||
99 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
100 | unsigned long offset, size_t size, | ||
101 | enum dma_data_direction direction) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | static inline void | ||
106 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
107 | unsigned long offset, size_t size, | ||
108 | enum dma_data_direction direction) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void | ||
113 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static inline int | ||
125 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static inline int | ||
131 | dma_supported(struct device *dev, u64 mask) | ||
132 | { | ||
133 | /* | ||
134 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
135 | * so we can't guarantee allocations that must be | ||
136 | * within a tighter range than GFP_DMA.. | ||
137 | */ | ||
138 | if(mask < 0x00ffffff) | ||
139 | return 0; | ||
140 | |||
141 | return 1; | ||
142 | } | ||
143 | |||
144 | static inline int | ||
145 | dma_set_mask(struct device *dev, u64 mask) | ||
146 | { | ||
147 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
148 | return -EIO; | ||
149 | |||
150 | *dev->dma_mask = mask; | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | 18 | ||
155 | static inline void | 19 | static inline void |
156 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 20 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
@@ -158,15 +22,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
158 | { | 22 | { |
159 | } | 23 | } |
160 | 24 | ||
161 | /* drivers/base/dma-mapping.c */ | ||
162 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
163 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
164 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
165 | void *cpu_addr, dma_addr_t dma_addr, | ||
166 | size_t size); | ||
167 | |||
168 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
169 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
170 | |||
171 | |||
172 | #endif | 25 | #endif |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 03bfd6bf03e7..eefd9a4ed156 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -15,6 +15,7 @@ config FRV | |||
15 | select OLD_SIGSUSPEND3 | 15 | select OLD_SIGSUSPEND3 |
16 | select OLD_SIGACTION | 16 | select OLD_SIGACTION |
17 | select HAVE_DEBUG_STACKOVERFLOW | 17 | select HAVE_DEBUG_STACKOVERFLOW |
18 | select ARCH_NO_COHERENT_DMA_MMAP | ||
18 | 19 | ||
19 | config ZONE_DMA | 20 | config ZONE_DMA |
20 | bool | 21 | bool |
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index 2840adcd6d92..9a82bfa4303b 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h | |||
@@ -1,128 +1,17 @@ | |||
1 | #ifndef _ASM_DMA_MAPPING_H | 1 | #ifndef _ASM_DMA_MAPPING_H |
2 | #define _ASM_DMA_MAPPING_H | 2 | #define _ASM_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/device.h> | ||
5 | #include <linux/scatterlist.h> | ||
6 | #include <asm/cache.h> | 4 | #include <asm/cache.h> |
7 | #include <asm/cacheflush.h> | 5 | #include <asm/cacheflush.h> |
8 | #include <asm/io.h> | ||
9 | |||
10 | /* | ||
11 | * See Documentation/DMA-API.txt for the description of how the | ||
12 | * following DMA API should work. | ||
13 | */ | ||
14 | |||
15 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
16 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
17 | 6 | ||
18 | extern unsigned long __nongprelbss dma_coherent_mem_start; | 7 | extern unsigned long __nongprelbss dma_coherent_mem_start; |
19 | extern unsigned long __nongprelbss dma_coherent_mem_end; | 8 | extern unsigned long __nongprelbss dma_coherent_mem_end; |
20 | 9 | ||
21 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); | 10 | extern struct dma_map_ops frv_dma_ops; |
22 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); | ||
23 | |||
24 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
25 | enum dma_data_direction direction); | ||
26 | |||
27 | static inline | ||
28 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
29 | enum dma_data_direction direction) | ||
30 | { | ||
31 | BUG_ON(direction == DMA_NONE); | ||
32 | } | ||
33 | |||
34 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
35 | enum dma_data_direction direction); | ||
36 | |||
37 | static inline | ||
38 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
39 | enum dma_data_direction direction) | ||
40 | { | ||
41 | BUG_ON(direction == DMA_NONE); | ||
42 | } | ||
43 | |||
44 | extern | ||
45 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
46 | size_t size, enum dma_data_direction direction); | ||
47 | |||
48 | static inline | ||
49 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
50 | enum dma_data_direction direction) | ||
51 | { | ||
52 | BUG_ON(direction == DMA_NONE); | ||
53 | } | ||
54 | |||
55 | |||
56 | static inline | ||
57 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
58 | enum dma_data_direction direction) | ||
59 | { | ||
60 | } | ||
61 | |||
62 | static inline | ||
63 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
64 | enum dma_data_direction direction) | ||
65 | { | ||
66 | flush_write_buffers(); | ||
67 | } | ||
68 | |||
69 | static inline | ||
70 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
71 | unsigned long offset, size_t size, | ||
72 | enum dma_data_direction direction) | ||
73 | { | ||
74 | } | ||
75 | |||
76 | static inline | ||
77 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
78 | unsigned long offset, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | flush_write_buffers(); | ||
82 | } | ||
83 | |||
84 | static inline | ||
85 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
86 | enum dma_data_direction direction) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline | ||
91 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
92 | enum dma_data_direction direction) | ||
93 | { | ||
94 | flush_write_buffers(); | ||
95 | } | ||
96 | |||
97 | static inline | ||
98 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
99 | { | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static inline | ||
104 | int dma_supported(struct device *dev, u64 mask) | ||
105 | { | ||
106 | /* | ||
107 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
108 | * so we can't guarantee allocations that must be | ||
109 | * within a tighter range than GFP_DMA.. | ||
110 | */ | ||
111 | if (mask < 0x00ffffff) | ||
112 | return 0; | ||
113 | |||
114 | return 1; | ||
115 | } | ||
116 | 11 | ||
117 | static inline | 12 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
118 | int dma_set_mask(struct device *dev, u64 mask) | ||
119 | { | 13 | { |
120 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 14 | return &frv_dma_ops; |
121 | return -EIO; | ||
122 | |||
123 | *dev->dma_mask = mask; | ||
124 | |||
125 | return 0; | ||
126 | } | 15 | } |
127 | 16 | ||
128 | static inline | 17 | static inline |
@@ -132,19 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
132 | flush_write_buffers(); | 21 | flush_write_buffers(); |
133 | } | 22 | } |
134 | 23 | ||
135 | /* Not supported for now */ | ||
136 | static inline int dma_mmap_coherent(struct device *dev, | ||
137 | struct vm_area_struct *vma, void *cpu_addr, | ||
138 | dma_addr_t dma_addr, size_t size) | ||
139 | { | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | |||
143 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
144 | void *cpu_addr, dma_addr_t dma_addr, | ||
145 | size_t size) | ||
146 | { | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
150 | #endif /* _ASM_DMA_MAPPING_H */ | 24 | #endif /* _ASM_DMA_MAPPING_H */ |
diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h index 70dfbea8c8d7..8062fc73fad0 100644 --- a/arch/frv/include/asm/io.h +++ b/arch/frv/include/asm/io.h | |||
@@ -43,9 +43,20 @@ static inline unsigned long _swapl(unsigned long v) | |||
43 | //#define __iormb() asm volatile("membar") | 43 | //#define __iormb() asm volatile("membar") |
44 | //#define __iowmb() asm volatile("membar") | 44 | //#define __iowmb() asm volatile("membar") |
45 | 45 | ||
46 | #define __raw_readb __builtin_read8 | 46 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
47 | #define __raw_readw __builtin_read16 | 47 | { |
48 | #define __raw_readl __builtin_read32 | 48 | return __builtin_read8((volatile void __iomem *)addr); |
49 | } | ||
50 | |||
51 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
52 | { | ||
53 | return __builtin_read16((volatile void __iomem *)addr); | ||
54 | } | ||
55 | |||
56 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
57 | { | ||
58 | return __builtin_read32((volatile void __iomem *)addr); | ||
59 | } | ||
49 | 60 | ||
50 | #define __raw_writeb(datum, addr) __builtin_write8(addr, datum) | 61 | #define __raw_writeb(datum, addr) __builtin_write8(addr, datum) |
51 | #define __raw_writew(datum, addr) __builtin_write16(addr, datum) | 62 | #define __raw_writew(datum, addr) __builtin_write16(addr, datum) |
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 8eeea0d77aad..082be49b5df0 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c | |||
@@ -34,7 +34,8 @@ struct dma_alloc_record { | |||
34 | static DEFINE_SPINLOCK(dma_alloc_lock); | 34 | static DEFINE_SPINLOCK(dma_alloc_lock); |
35 | static LIST_HEAD(dma_alloc_list); | 35 | static LIST_HEAD(dma_alloc_list); |
36 | 36 | ||
37 | void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) | 37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
38 | gfp_t gfp, struct dma_attrs *attrs) | ||
38 | { | 39 | { |
39 | struct dma_alloc_record *new; | 40 | struct dma_alloc_record *new; |
40 | struct list_head *this = &dma_alloc_list; | 41 | struct list_head *this = &dma_alloc_list; |
@@ -84,9 +85,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand | |||
84 | return NULL; | 85 | return NULL; |
85 | } | 86 | } |
86 | 87 | ||
87 | EXPORT_SYMBOL(dma_alloc_coherent); | 88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
88 | 89 | dma_addr_t dma_handle, struct dma_attrs *attrs) | |
89 | void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
90 | { | 90 | { |
91 | struct dma_alloc_record *rec; | 91 | struct dma_alloc_record *rec; |
92 | unsigned long flags; | 92 | unsigned long flags; |
@@ -105,22 +105,9 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_ | |||
105 | BUG(); | 105 | BUG(); |
106 | } | 106 | } |
107 | 107 | ||
108 | EXPORT_SYMBOL(dma_free_coherent); | 108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
109 | 109 | int nents, enum dma_data_direction direction, | |
110 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 110 | struct dma_attrs *attrs) |
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | BUG_ON(direction == DMA_NONE); | ||
114 | |||
115 | frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size); | ||
116 | |||
117 | return virt_to_bus(ptr); | ||
118 | } | ||
119 | |||
120 | EXPORT_SYMBOL(dma_map_single); | ||
121 | |||
122 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
123 | enum dma_data_direction direction) | ||
124 | { | 111 | { |
125 | int i; | 112 | int i; |
126 | struct scatterlist *sg; | 113 | struct scatterlist *sg; |
@@ -135,14 +122,49 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
135 | return nents; | 122 | return nents; |
136 | } | 123 | } |
137 | 124 | ||
138 | EXPORT_SYMBOL(dma_map_sg); | 125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
139 | 126 | unsigned long offset, size_t size, | |
140 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | 127 | enum dma_data_direction direction, struct dma_attrs *attrs) |
141 | size_t size, enum dma_data_direction direction) | ||
142 | { | 128 | { |
143 | BUG_ON(direction == DMA_NONE); | 129 | BUG_ON(direction == DMA_NONE); |
144 | flush_dcache_page(page); | 130 | flush_dcache_page(page); |
145 | return (dma_addr_t) page_to_phys(page) + offset; | 131 | return (dma_addr_t) page_to_phys(page) + offset; |
146 | } | 132 | } |
147 | 133 | ||
148 | EXPORT_SYMBOL(dma_map_page); | 134 | static void frv_dma_sync_single_for_device(struct device *dev, |
135 | dma_addr_t dma_handle, size_t size, | ||
136 | enum dma_data_direction direction) | ||
137 | { | ||
138 | flush_write_buffers(); | ||
139 | } | ||
140 | |||
141 | static void frv_dma_sync_sg_for_device(struct device *dev, | ||
142 | struct scatterlist *sg, int nelems, | ||
143 | enum dma_data_direction direction) | ||
144 | { | ||
145 | flush_write_buffers(); | ||
146 | } | ||
147 | |||
148 | |||
149 | static int frv_dma_supported(struct device *dev, u64 mask) | ||
150 | { | ||
151 | /* | ||
152 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
153 | * so we can't guarantee allocations that must be | ||
154 | * within a tighter range than GFP_DMA.. | ||
155 | */ | ||
156 | if (mask < 0x00ffffff) | ||
157 | return 0; | ||
158 | return 1; | ||
159 | } | ||
160 | |||
161 | struct dma_map_ops frv_dma_ops = { | ||
162 | .alloc = frv_dma_alloc, | ||
163 | .free = frv_dma_free, | ||
164 | .map_page = frv_dma_map_page, | ||
165 | .map_sg = frv_dma_map_sg, | ||
166 | .sync_single_for_device = frv_dma_sync_single_for_device, | ||
167 | .sync_sg_for_device = frv_dma_sync_sg_for_device, | ||
168 | .dma_supported = frv_dma_supported, | ||
169 | }; | ||
170 | EXPORT_SYMBOL(frv_dma_ops); | ||
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 4d1f01dc46e5..316b7b65348d 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c | |||
@@ -18,7 +18,9 @@ | |||
18 | #include <linux/scatterlist.h> | 18 | #include <linux/scatterlist.h> |
19 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | 20 | ||
21 | void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) | 21 | static void *frv_dma_alloc(struct device *hwdev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp, | ||
23 | struct dma_attrs *attrs) | ||
22 | { | 24 | { |
23 | void *ret; | 25 | void *ret; |
24 | 26 | ||
@@ -29,29 +31,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand | |||
29 | return ret; | 31 | return ret; |
30 | } | 32 | } |
31 | 33 | ||
32 | EXPORT_SYMBOL(dma_alloc_coherent); | 34 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
33 | 35 | dma_addr_t dma_handle, struct dma_attrs *attrs) | |
34 | void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
35 | { | 36 | { |
36 | consistent_free(vaddr); | 37 | consistent_free(vaddr); |
37 | } | 38 | } |
38 | 39 | ||
39 | EXPORT_SYMBOL(dma_free_coherent); | 40 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
40 | 41 | int nents, enum dma_data_direction direction, | |
41 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 42 | struct dma_attrs *attrs) |
42 | enum dma_data_direction direction) | ||
43 | { | ||
44 | BUG_ON(direction == DMA_NONE); | ||
45 | |||
46 | frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size); | ||
47 | |||
48 | return virt_to_bus(ptr); | ||
49 | } | ||
50 | |||
51 | EXPORT_SYMBOL(dma_map_single); | ||
52 | |||
53 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
54 | enum dma_data_direction direction) | ||
55 | { | 43 | { |
56 | unsigned long dampr2; | 44 | unsigned long dampr2; |
57 | void *vaddr; | 45 | void *vaddr; |
@@ -79,14 +67,48 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
79 | return nents; | 67 | return nents; |
80 | } | 68 | } |
81 | 69 | ||
82 | EXPORT_SYMBOL(dma_map_sg); | 70 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
83 | 71 | unsigned long offset, size_t size, | |
84 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | 72 | enum dma_data_direction direction, struct dma_attrs *attrs) |
85 | size_t size, enum dma_data_direction direction) | ||
86 | { | 73 | { |
87 | BUG_ON(direction == DMA_NONE); | ||
88 | flush_dcache_page(page); | 74 | flush_dcache_page(page); |
89 | return (dma_addr_t) page_to_phys(page) + offset; | 75 | return (dma_addr_t) page_to_phys(page) + offset; |
90 | } | 76 | } |
91 | 77 | ||
92 | EXPORT_SYMBOL(dma_map_page); | 78 | static void frv_dma_sync_single_for_device(struct device *dev, |
79 | dma_addr_t dma_handle, size_t size, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | flush_write_buffers(); | ||
83 | } | ||
84 | |||
85 | static void frv_dma_sync_sg_for_device(struct device *dev, | ||
86 | struct scatterlist *sg, int nelems, | ||
87 | enum dma_data_direction direction) | ||
88 | { | ||
89 | flush_write_buffers(); | ||
90 | } | ||
91 | |||
92 | |||
93 | static int frv_dma_supported(struct device *dev, u64 mask) | ||
94 | { | ||
95 | /* | ||
96 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
97 | * so we can't guarantee allocations that must be | ||
98 | * within a tighter range than GFP_DMA.. | ||
99 | */ | ||
100 | if (mask < 0x00ffffff) | ||
101 | return 0; | ||
102 | return 1; | ||
103 | } | ||
104 | |||
105 | struct dma_map_ops frv_dma_ops = { | ||
106 | .alloc = frv_dma_alloc, | ||
107 | .free = frv_dma_free, | ||
108 | .map_page = frv_dma_map_page, | ||
109 | .map_sg = frv_dma_map_sg, | ||
110 | .sync_single_for_device = frv_dma_sync_single_for_device, | ||
111 | .sync_sg_for_device = frv_dma_sync_sg_for_device, | ||
112 | .dma_supported = frv_dma_supported, | ||
113 | }; | ||
114 | EXPORT_SYMBOL(frv_dma_ops); | ||
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index cd1f754c1336..986ea84caaed 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
@@ -15,7 +15,6 @@ config H8300 | |||
15 | select OF_IRQ | 15 | select OF_IRQ |
16 | select OF_EARLY_FLATTREE | 16 | select OF_EARLY_FLATTREE |
17 | select HAVE_MEMBLOCK | 17 | select HAVE_MEMBLOCK |
18 | select HAVE_DMA_ATTRS | ||
19 | select CLKSRC_OF | 18 | select CLKSRC_OF |
20 | select H8300_TMR8 | 19 | select H8300_TMR8 |
21 | select HAVE_KERNEL_GZIP | 20 | select HAVE_KERNEL_GZIP |
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index d9b5b806afe6..7ac7fadffed0 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h | |||
@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
8 | return &h8300_dma_map_ops; | 8 | return &h8300_dma_map_ops; |
9 | } | 9 | } |
10 | 10 | ||
11 | #include <asm-generic/dma-mapping-common.h> | ||
12 | |||
13 | #endif | 11 | #endif |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 4dc89d1f9c48..57298e7b4867 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -27,7 +27,6 @@ config HEXAGON | |||
27 | select GENERIC_CLOCKEVENTS_BROADCAST | 27 | select GENERIC_CLOCKEVENTS_BROADCAST |
28 | select MODULES_USE_ELF_RELA | 28 | select MODULES_USE_ELF_RELA |
29 | select GENERIC_CPU_DEVICES | 29 | select GENERIC_CPU_DEVICES |
30 | select HAVE_DMA_ATTRS | ||
31 | ---help--- | 30 | ---help--- |
32 | Qualcomm Hexagon is a processor architecture designed for high | 31 | Qualcomm Hexagon is a processor architecture designed for high |
33 | performance and low power across a wide variety of applications. | 32 | performance and low power across a wide variety of applications. |
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 268fde8a4575..aa6203464520 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h | |||
@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); | |||
49 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 49 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
50 | enum dma_data_direction direction); | 50 | enum dma_data_direction direction); |
51 | 51 | ||
52 | #include <asm-generic/dma-mapping-common.h> | ||
53 | |||
54 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 52 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
55 | { | 53 | { |
56 | if (!dev->dma_mask) | 54 | if (!dev->dma_mask) |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index eb0249e37981..fb0515eb639b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -25,7 +25,6 @@ config IA64 | |||
25 | select HAVE_FTRACE_MCOUNT_RECORD | 25 | select HAVE_FTRACE_MCOUNT_RECORD |
26 | select HAVE_DYNAMIC_FTRACE if (!ITANIUM) | 26 | select HAVE_DYNAMIC_FTRACE if (!ITANIUM) |
27 | select HAVE_FUNCTION_TRACER | 27 | select HAVE_FUNCTION_TRACER |
28 | select HAVE_DMA_ATTRS | ||
29 | select TTY | 28 | select TTY |
30 | select HAVE_ARCH_TRACEHOOK | 29 | select HAVE_ARCH_TRACEHOOK |
31 | select HAVE_DMA_API_DEBUG | 30 | select HAVE_DMA_API_DEBUG |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 9beccf8010bd..d472805edfa9 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, | |||
25 | 25 | ||
26 | #define get_dma_ops(dev) platform_dma_get_ops(dev) | 26 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
27 | 27 | ||
28 | #include <asm-generic/dma-mapping-common.h> | ||
29 | |||
30 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 28 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
31 | { | 29 | { |
32 | if (!dev->dma_mask) | 30 | if (!dev->dma_mask) |
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 05aa53594d49..96c536194287 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h | |||
@@ -1,123 +1,17 @@ | |||
1 | #ifndef _M68K_DMA_MAPPING_H | 1 | #ifndef _M68K_DMA_MAPPING_H |
2 | #define _M68K_DMA_MAPPING_H | 2 | #define _M68K_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <asm/cache.h> | 4 | extern struct dma_map_ops m68k_dma_ops; |
5 | 5 | ||
6 | struct scatterlist; | 6 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
7 | |||
8 | static inline int dma_supported(struct device *dev, u64 mask) | ||
9 | { | ||
10 | return 1; | ||
11 | } | ||
12 | |||
13 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
14 | { | ||
15 | return 0; | ||
16 | } | ||
17 | |||
18 | extern void *dma_alloc_coherent(struct device *, size_t, | ||
19 | dma_addr_t *, gfp_t); | ||
20 | extern void dma_free_coherent(struct device *, size_t, | ||
21 | void *, dma_addr_t); | ||
22 | |||
23 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
24 | dma_addr_t *dma_handle, gfp_t flag, | ||
25 | struct dma_attrs *attrs) | ||
26 | { | ||
27 | /* attrs is not supported and ignored */ | ||
28 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
29 | } | ||
30 | |||
31 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
32 | void *cpu_addr, dma_addr_t dma_handle, | ||
33 | struct dma_attrs *attrs) | ||
34 | { | 7 | { |
35 | /* attrs is not supported and ignored */ | 8 | return &m68k_dma_ops; |
36 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
37 | } | 9 | } |
38 | 10 | ||
39 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
40 | dma_addr_t *handle, gfp_t flag) | ||
41 | { | ||
42 | return dma_alloc_coherent(dev, size, handle, flag); | ||
43 | } | ||
44 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | ||
45 | void *addr, dma_addr_t handle) | ||
46 | { | ||
47 | dma_free_coherent(dev, size, addr, handle); | ||
48 | } | ||
49 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 11 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
50 | enum dma_data_direction dir) | 12 | enum dma_data_direction dir) |
51 | { | 13 | { |
52 | /* we use coherent allocation, so not much to do here. */ | 14 | /* we use coherent allocation, so not much to do here. */ |
53 | } | 15 | } |
54 | 16 | ||
55 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | ||
56 | enum dma_data_direction); | ||
57 | static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | ||
58 | size_t size, enum dma_data_direction dir) | ||
59 | { | ||
60 | } | ||
61 | |||
62 | extern dma_addr_t dma_map_page(struct device *, struct page *, | ||
63 | unsigned long, size_t size, | ||
64 | enum dma_data_direction); | ||
65 | static inline void dma_unmap_page(struct device *dev, dma_addr_t address, | ||
66 | size_t size, enum dma_data_direction dir) | ||
67 | { | ||
68 | } | ||
69 | |||
70 | extern int dma_map_sg(struct device *, struct scatterlist *, int, | ||
71 | enum dma_data_direction); | ||
72 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
73 | int nhwentries, enum dma_data_direction dir) | ||
74 | { | ||
75 | } | ||
76 | |||
77 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t, | ||
78 | enum dma_data_direction); | ||
79 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
80 | enum dma_data_direction); | ||
81 | |||
82 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
83 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | /* just sync everything for now */ | ||
87 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | ||
88 | } | ||
89 | |||
90 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | ||
91 | size_t size, enum dma_data_direction dir) | ||
92 | { | ||
93 | } | ||
94 | |||
95 | static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
96 | int nents, enum dma_data_direction dir) | ||
97 | { | ||
98 | } | ||
99 | |||
100 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
101 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
102 | enum dma_data_direction direction) | ||
103 | { | ||
104 | /* just sync everything for now */ | ||
105 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | ||
106 | } | ||
107 | |||
108 | static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) | ||
109 | { | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | /* drivers/base/dma-mapping.c */ | ||
114 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
115 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
116 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
117 | void *cpu_addr, dma_addr_t dma_addr, | ||
118 | size_t size); | ||
119 | |||
120 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
121 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
122 | |||
123 | #endif /* _M68K_DMA_MAPPING_H */ | 17 | #endif /* _M68K_DMA_MAPPING_H */ |
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 564665f9af30..cbc78b4117b5 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c | |||
@@ -18,8 +18,8 @@ | |||
18 | 18 | ||
19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) | 19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
20 | 20 | ||
21 | void *dma_alloc_coherent(struct device *dev, size_t size, | 21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
22 | dma_addr_t *handle, gfp_t flag) | 22 | gfp_t flag, struct dma_attrs *attrs) |
23 | { | 23 | { |
24 | struct page *page, **map; | 24 | struct page *page, **map; |
25 | pgprot_t pgprot; | 25 | pgprot_t pgprot; |
@@ -61,8 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
61 | return addr; | 61 | return addr; |
62 | } | 62 | } |
63 | 63 | ||
64 | void dma_free_coherent(struct device *dev, size_t size, | 64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, |
65 | void *addr, dma_addr_t handle) | 65 | dma_addr_t handle, struct dma_attrs *attrs) |
66 | { | 66 | { |
67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | 67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); |
68 | vfree(addr); | 68 | vfree(addr); |
@@ -72,8 +72,8 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
72 | 72 | ||
73 | #include <asm/cacheflush.h> | 73 | #include <asm/cacheflush.h> |
74 | 74 | ||
75 | void *dma_alloc_coherent(struct device *dev, size_t size, | 75 | static void *m68k_dma_alloc(struct device *dev, size_t size, |
76 | dma_addr_t *dma_handle, gfp_t gfp) | 76 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
77 | { | 77 | { |
78 | void *ret; | 78 | void *ret; |
79 | /* ignore region specifiers */ | 79 | /* ignore region specifiers */ |
@@ -90,19 +90,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
90 | return ret; | 90 | return ret; |
91 | } | 91 | } |
92 | 92 | ||
93 | void dma_free_coherent(struct device *dev, size_t size, | 93 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, |
94 | void *vaddr, dma_addr_t dma_handle) | 94 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
95 | { | 95 | { |
96 | free_pages((unsigned long)vaddr, get_order(size)); | 96 | free_pages((unsigned long)vaddr, get_order(size)); |
97 | } | 97 | } |
98 | 98 | ||
99 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ | 99 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ |
100 | 100 | ||
101 | EXPORT_SYMBOL(dma_alloc_coherent); | 101 | static void m68k_dma_sync_single_for_device(struct device *dev, |
102 | EXPORT_SYMBOL(dma_free_coherent); | 102 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
103 | |||
104 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | ||
105 | size_t size, enum dma_data_direction dir) | ||
106 | { | 103 | { |
107 | switch (dir) { | 104 | switch (dir) { |
108 | case DMA_BIDIRECTIONAL: | 105 | case DMA_BIDIRECTIONAL: |
@@ -118,10 +115,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | |||
118 | break; | 115 | break; |
119 | } | 116 | } |
120 | } | 117 | } |
121 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
122 | 118 | ||
123 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 119 | static void m68k_dma_sync_sg_for_device(struct device *dev, |
124 | int nents, enum dma_data_direction dir) | 120 | struct scatterlist *sglist, int nents, enum dma_data_direction dir) |
125 | { | 121 | { |
126 | int i; | 122 | int i; |
127 | struct scatterlist *sg; | 123 | struct scatterlist *sg; |
@@ -131,31 +127,19 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | |||
131 | dir); | 127 | dir); |
132 | } | 128 | } |
133 | } | 129 | } |
134 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
135 | |||
136 | dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, | ||
137 | enum dma_data_direction dir) | ||
138 | { | ||
139 | dma_addr_t handle = virt_to_bus(addr); | ||
140 | |||
141 | dma_sync_single_for_device(dev, handle, size, dir); | ||
142 | return handle; | ||
143 | } | ||
144 | EXPORT_SYMBOL(dma_map_single); | ||
145 | 130 | ||
146 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 131 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, |
147 | unsigned long offset, size_t size, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
148 | enum dma_data_direction dir) | 133 | struct dma_attrs *attrs) |
149 | { | 134 | { |
150 | dma_addr_t handle = page_to_phys(page) + offset; | 135 | dma_addr_t handle = page_to_phys(page) + offset; |
151 | 136 | ||
152 | dma_sync_single_for_device(dev, handle, size, dir); | 137 | dma_sync_single_for_device(dev, handle, size, dir); |
153 | return handle; | 138 | return handle; |
154 | } | 139 | } |
155 | EXPORT_SYMBOL(dma_map_page); | ||
156 | 140 | ||
157 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
158 | enum dma_data_direction dir) | 142 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) |
159 | { | 143 | { |
160 | int i; | 144 | int i; |
161 | struct scatterlist *sg; | 145 | struct scatterlist *sg; |
@@ -167,4 +151,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
167 | } | 151 | } |
168 | return nents; | 152 | return nents; |
169 | } | 153 | } |
170 | EXPORT_SYMBOL(dma_map_sg); | 154 | |
155 | struct dma_map_ops m68k_dma_ops = { | ||
156 | .alloc = m68k_dma_alloc, | ||
157 | .free = m68k_dma_free, | ||
158 | .map_page = m68k_dma_map_page, | ||
159 | .map_sg = m68k_dma_map_sg, | ||
160 | .sync_single_for_device = m68k_dma_sync_single_for_device, | ||
161 | .sync_sg_for_device = m68k_dma_sync_sg_for_device, | ||
162 | }; | ||
163 | EXPORT_SYMBOL(m68k_dma_ops); | ||
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index eb5cdec94be0..27af5d479ce6 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h | |||
@@ -1,177 +1,11 @@ | |||
1 | #ifndef _ASM_METAG_DMA_MAPPING_H | 1 | #ifndef _ASM_METAG_DMA_MAPPING_H |
2 | #define _ASM_METAG_DMA_MAPPING_H | 2 | #define _ASM_METAG_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | extern struct dma_map_ops metag_dma_ops; |
5 | 5 | ||
6 | #include <asm/cache.h> | 6 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
7 | #include <asm/io.h> | ||
8 | #include <linux/scatterlist.h> | ||
9 | #include <asm/bug.h> | ||
10 | |||
11 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
12 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
13 | |||
14 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
15 | dma_addr_t *dma_handle, gfp_t flag); | ||
16 | |||
17 | void dma_free_coherent(struct device *dev, size_t size, | ||
18 | void *vaddr, dma_addr_t dma_handle); | ||
19 | |||
20 | void dma_sync_for_device(void *vaddr, size_t size, int dma_direction); | ||
21 | void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction); | ||
22 | |||
23 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
24 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
25 | |||
26 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
27 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
28 | |||
29 | static inline dma_addr_t | ||
30 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
31 | enum dma_data_direction direction) | ||
32 | { | ||
33 | BUG_ON(!valid_dma_direction(direction)); | ||
34 | WARN_ON(size == 0); | ||
35 | dma_sync_for_device(ptr, size, direction); | ||
36 | return virt_to_phys(ptr); | ||
37 | } | ||
38 | |||
39 | static inline void | ||
40 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
41 | enum dma_data_direction direction) | ||
42 | { | ||
43 | BUG_ON(!valid_dma_direction(direction)); | ||
44 | dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction); | ||
45 | } | ||
46 | |||
47 | static inline int | ||
48 | dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
49 | enum dma_data_direction direction) | ||
50 | { | ||
51 | struct scatterlist *sg; | ||
52 | int i; | ||
53 | |||
54 | BUG_ON(!valid_dma_direction(direction)); | ||
55 | WARN_ON(nents == 0 || sglist[0].length == 0); | ||
56 | |||
57 | for_each_sg(sglist, sg, nents, i) { | ||
58 | BUG_ON(!sg_page(sg)); | ||
59 | |||
60 | sg->dma_address = sg_phys(sg); | ||
61 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
62 | } | ||
63 | |||
64 | return nents; | ||
65 | } | ||
66 | |||
67 | static inline dma_addr_t | ||
68 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
69 | size_t size, enum dma_data_direction direction) | ||
70 | { | ||
71 | BUG_ON(!valid_dma_direction(direction)); | ||
72 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, | ||
73 | direction); | ||
74 | return page_to_phys(page) + offset; | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | BUG_ON(!valid_dma_direction(direction)); | ||
82 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | ||
83 | } | ||
84 | |||
85 | |||
86 | static inline void | ||
87 | dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | struct scatterlist *sg; | ||
91 | int i; | ||
92 | |||
93 | BUG_ON(!valid_dma_direction(direction)); | ||
94 | WARN_ON(nhwentries == 0 || sglist[0].length == 0); | ||
95 | |||
96 | for_each_sg(sglist, sg, nhwentries, i) { | ||
97 | BUG_ON(!sg_page(sg)); | ||
98 | |||
99 | sg->dma_address = sg_phys(sg); | ||
100 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); | ||
109 | } | ||
110 | |||
111 | static inline void | ||
112 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
113 | size_t size, enum dma_data_direction direction) | ||
114 | { | ||
115 | dma_sync_for_device(phys_to_virt(dma_handle), size, direction); | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
120 | unsigned long offset, size_t size, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size, | ||
124 | direction); | ||
125 | } | ||
126 | |||
127 | static inline void | ||
128 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
129 | unsigned long offset, size_t size, | ||
130 | enum dma_data_direction direction) | ||
131 | { | ||
132 | dma_sync_for_device(phys_to_virt(dma_handle)+offset, size, | ||
133 | direction); | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, | ||
138 | enum dma_data_direction direction) | ||
139 | { | ||
140 | int i; | ||
141 | struct scatterlist *sg; | ||
142 | |||
143 | for_each_sg(sglist, sg, nelems, i) | ||
144 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
145 | } | ||
146 | |||
147 | static inline void | ||
148 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
149 | int nelems, enum dma_data_direction direction) | ||
150 | { | ||
151 | int i; | ||
152 | struct scatterlist *sg; | ||
153 | |||
154 | for_each_sg(sglist, sg, nelems, i) | ||
155 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
156 | } | ||
157 | |||
158 | static inline int | ||
159 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
160 | { | 7 | { |
161 | return 0; | 8 | return &metag_dma_ops; |
162 | } | ||
163 | |||
164 | #define dma_supported(dev, mask) (1) | ||
165 | |||
166 | static inline int | ||
167 | dma_set_mask(struct device *dev, u64 mask) | ||
168 | { | ||
169 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
170 | return -EIO; | ||
171 | |||
172 | *dev->dma_mask = mask; | ||
173 | |||
174 | return 0; | ||
175 | } | 9 | } |
176 | 10 | ||
177 | /* | 11 | /* |
@@ -184,11 +18,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
184 | { | 18 | { |
185 | } | 19 | } |
186 | 20 | ||
187 | /* drivers/base/dma-mapping.c */ | ||
188 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
189 | void *cpu_addr, dma_addr_t dma_addr, | ||
190 | size_t size); | ||
191 | |||
192 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
193 | |||
194 | #endif | 21 | #endif |
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index c700d625067a..e12368d02155 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c | |||
@@ -171,8 +171,8 @@ out: | |||
171 | * Allocate DMA-coherent memory space and return both the kernel remapped | 171 | * Allocate DMA-coherent memory space and return both the kernel remapped |
172 | * virtual and bus address for that space. | 172 | * virtual and bus address for that space. |
173 | */ | 173 | */ |
174 | void *dma_alloc_coherent(struct device *dev, size_t size, | 174 | static void *metag_dma_alloc(struct device *dev, size_t size, |
175 | dma_addr_t *handle, gfp_t gfp) | 175 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) |
176 | { | 176 | { |
177 | struct page *page; | 177 | struct page *page; |
178 | struct metag_vm_region *c; | 178 | struct metag_vm_region *c; |
@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
263 | no_page: | 263 | no_page: |
264 | return NULL; | 264 | return NULL; |
265 | } | 265 | } |
266 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
267 | 266 | ||
268 | /* | 267 | /* |
269 | * free a page as defined by the above mapping. | 268 | * free a page as defined by the above mapping. |
270 | */ | 269 | */ |
271 | void dma_free_coherent(struct device *dev, size_t size, | 270 | static void metag_dma_free(struct device *dev, size_t size, void *vaddr, |
272 | void *vaddr, dma_addr_t dma_handle) | 271 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
273 | { | 272 | { |
274 | struct metag_vm_region *c; | 273 | struct metag_vm_region *c; |
275 | unsigned long flags, addr; | 274 | unsigned long flags, addr; |
@@ -329,16 +328,19 @@ no_area: | |||
329 | __func__, vaddr); | 328 | __func__, vaddr); |
330 | dump_stack(); | 329 | dump_stack(); |
331 | } | 330 | } |
332 | EXPORT_SYMBOL(dma_free_coherent); | ||
333 | 331 | ||
334 | 332 | static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |
335 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 333 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
336 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 334 | struct dma_attrs *attrs) |
337 | { | 335 | { |
338 | int ret = -ENXIO; | ||
339 | |||
340 | unsigned long flags, user_size, kern_size; | 336 | unsigned long flags, user_size, kern_size; |
341 | struct metag_vm_region *c; | 337 | struct metag_vm_region *c; |
338 | int ret = -ENXIO; | ||
339 | |||
340 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | ||
341 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
342 | else | ||
343 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
342 | 344 | ||
343 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 345 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
344 | 346 | ||
@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
364 | return ret; | 366 | return ret; |
365 | } | 367 | } |
366 | 368 | ||
367 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
368 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
369 | { | ||
370 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
371 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
372 | } | ||
373 | EXPORT_SYMBOL(dma_mmap_coherent); | ||
374 | |||
375 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
376 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
377 | { | ||
378 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
379 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
380 | } | ||
381 | EXPORT_SYMBOL(dma_mmap_writecombine); | ||
382 | |||
383 | |||
384 | |||
385 | |||
386 | /* | 369 | /* |
387 | * Initialise the consistent memory allocation. | 370 | * Initialise the consistent memory allocation. |
388 | */ | 371 | */ |
@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init); | |||
423 | /* | 406 | /* |
424 | * make an area consistent to devices. | 407 | * make an area consistent to devices. |
425 | */ | 408 | */ |
426 | void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) | 409 | static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) |
427 | { | 410 | { |
428 | /* | 411 | /* |
429 | * Ensure any writes get through the write combiner. This is necessary | 412 | * Ensure any writes get through the write combiner. This is necessary |
@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) | |||
465 | 448 | ||
466 | wmb(); | 449 | wmb(); |
467 | } | 450 | } |
468 | EXPORT_SYMBOL(dma_sync_for_device); | ||
469 | 451 | ||
470 | /* | 452 | /* |
471 | * make an area consistent to the core. | 453 | * make an area consistent to the core. |
472 | */ | 454 | */ |
473 | void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) | 455 | static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) |
474 | { | 456 | { |
475 | /* | 457 | /* |
476 | * Hardware L2 cache prefetch doesn't occur across 4K physical | 458 | * Hardware L2 cache prefetch doesn't occur across 4K physical |
@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) | |||
497 | 479 | ||
498 | rmb(); | 480 | rmb(); |
499 | } | 481 | } |
500 | EXPORT_SYMBOL(dma_sync_for_cpu); | 482 | |
483 | static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, | ||
484 | unsigned long offset, size_t size, | ||
485 | enum dma_data_direction direction, struct dma_attrs *attrs) | ||
486 | { | ||
487 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, | ||
488 | direction); | ||
489 | return page_to_phys(page) + offset; | ||
490 | } | ||
491 | |||
492 | static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
493 | size_t size, enum dma_data_direction direction, | ||
494 | struct dma_attrs *attrs) | ||
495 | { | ||
496 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | ||
497 | } | ||
498 | |||
499 | static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
500 | int nents, enum dma_data_direction direction, | ||
501 | struct dma_attrs *attrs) | ||
502 | { | ||
503 | struct scatterlist *sg; | ||
504 | int i; | ||
505 | |||
506 | for_each_sg(sglist, sg, nents, i) { | ||
507 | BUG_ON(!sg_page(sg)); | ||
508 | |||
509 | sg->dma_address = sg_phys(sg); | ||
510 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
511 | } | ||
512 | |||
513 | return nents; | ||
514 | } | ||
515 | |||
516 | |||
517 | static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
518 | int nhwentries, enum dma_data_direction direction, | ||
519 | struct dma_attrs *attrs) | ||
520 | { | ||
521 | struct scatterlist *sg; | ||
522 | int i; | ||
523 | |||
524 | for_each_sg(sglist, sg, nhwentries, i) { | ||
525 | BUG_ON(!sg_page(sg)); | ||
526 | |||
527 | sg->dma_address = sg_phys(sg); | ||
528 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
529 | } | ||
530 | } | ||
531 | |||
532 | static void metag_dma_sync_single_for_cpu(struct device *dev, | ||
533 | dma_addr_t dma_handle, size_t size, | ||
534 | enum dma_data_direction direction) | ||
535 | { | ||
536 | dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); | ||
537 | } | ||
538 | |||
539 | static void metag_dma_sync_single_for_device(struct device *dev, | ||
540 | dma_addr_t dma_handle, size_t size, | ||
541 | enum dma_data_direction direction) | ||
542 | { | ||
543 | dma_sync_for_device(phys_to_virt(dma_handle), size, direction); | ||
544 | } | ||
545 | |||
546 | static void metag_dma_sync_sg_for_cpu(struct device *dev, | ||
547 | struct scatterlist *sglist, int nelems, | ||
548 | enum dma_data_direction direction) | ||
549 | { | ||
550 | int i; | ||
551 | struct scatterlist *sg; | ||
552 | |||
553 | for_each_sg(sglist, sg, nelems, i) | ||
554 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
555 | } | ||
556 | |||
557 | static void metag_dma_sync_sg_for_device(struct device *dev, | ||
558 | struct scatterlist *sglist, int nelems, | ||
559 | enum dma_data_direction direction) | ||
560 | { | ||
561 | int i; | ||
562 | struct scatterlist *sg; | ||
563 | |||
564 | for_each_sg(sglist, sg, nelems, i) | ||
565 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
566 | } | ||
567 | |||
568 | struct dma_map_ops metag_dma_ops = { | ||
569 | .alloc = metag_dma_alloc, | ||
570 | .free = metag_dma_free, | ||
571 | .map_page = metag_dma_map_page, | ||
572 | .map_sg = metag_dma_map_sg, | ||
573 | .sync_single_for_device = metag_dma_sync_single_for_device, | ||
574 | .sync_single_for_cpu = metag_dma_sync_single_for_cpu, | ||
575 | .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu, | ||
576 | .mmap = metag_dma_mmap, | ||
577 | }; | ||
578 | EXPORT_SYMBOL(metag_dma_ops); | ||
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 5ecd0287a874..53b69deceb99 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -19,7 +19,6 @@ config MICROBLAZE | |||
19 | select HAVE_ARCH_KGDB | 19 | select HAVE_ARCH_KGDB |
20 | select HAVE_DEBUG_KMEMLEAK | 20 | select HAVE_DEBUG_KMEMLEAK |
21 | select HAVE_DMA_API_DEBUG | 21 | select HAVE_DMA_API_DEBUG |
22 | select HAVE_DMA_ATTRS | ||
23 | select HAVE_DYNAMIC_FTRACE | 22 | select HAVE_DYNAMIC_FTRACE |
24 | select HAVE_FTRACE_MCOUNT_RECORD | 23 | select HAVE_FTRACE_MCOUNT_RECORD |
25 | select HAVE_FUNCTION_GRAPH_TRACER | 24 | select HAVE_FUNCTION_GRAPH_TRACER |
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 24b12970c9cf..1884783d15c0 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
44 | return &dma_direct_ops; | 44 | return &dma_direct_ops; |
45 | } | 45 | } |
46 | 46 | ||
47 | #include <asm-generic/dma-mapping-common.h> | ||
48 | |||
49 | static inline void __dma_sync(unsigned long paddr, | 47 | static inline void __dma_sync(unsigned long paddr, |
50 | size_t size, enum dma_data_direction direction) | 48 | size_t size, enum dma_data_direction direction) |
51 | { | 49 | { |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 71683a853372..fbf3f6670b69 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -31,7 +31,6 @@ config MIPS | |||
31 | select RTC_LIB if !MACH_LOONGSON64 | 31 | select RTC_LIB if !MACH_LOONGSON64 |
32 | select GENERIC_ATOMIC64 if !64BIT | 32 | select GENERIC_ATOMIC64 if !64BIT |
33 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 33 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
34 | select HAVE_DMA_ATTRS | ||
35 | select HAVE_DMA_CONTIGUOUS | 34 | select HAVE_DMA_CONTIGUOUS |
36 | select HAVE_DMA_API_DEBUG | 35 | select HAVE_DMA_API_DEBUG |
37 | select GENERIC_IRQ_PROBE | 36 | select GENERIC_IRQ_PROBE |
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index e604f760c4a0..12fa79e2f1b4 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h | |||
@@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
29 | 29 | ||
30 | static inline void dma_mark_clean(void *addr, size_t size) {} | 30 | static inline void dma_mark_clean(void *addr, size_t size) {} |
31 | 31 | ||
32 | #include <asm-generic/dma-mapping-common.h> | ||
33 | |||
34 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 32 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
35 | enum dma_data_direction direction); | 33 | enum dma_data_direction direction); |
36 | 34 | ||
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index b0ebe59f73fd..ccdcfcbb24aa 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h | |||
@@ -73,7 +73,6 @@ | |||
73 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ | 73 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ |
74 | #define MADV_WILLNEED 3 /* will need these pages */ | 74 | #define MADV_WILLNEED 3 /* will need these pages */ |
75 | #define MADV_DONTNEED 4 /* don't need these pages */ | 75 | #define MADV_DONTNEED 4 /* don't need these pages */ |
76 | #define MADV_FREE 5 /* free pages only if memory pressure */ | ||
77 | 76 | ||
78 | /* common parameters: try to keep these consistent across architectures */ | 77 | /* common parameters: try to keep these consistent across architectures */ |
79 | #define MADV_FREE 8 /* free pages only if memory pressure */ | 78 | #define MADV_FREE 8 /* free pages only if memory pressure */ |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 78ae5552fdb8..10607f0d2bcd 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -14,6 +14,7 @@ config MN10300 | |||
14 | select OLD_SIGSUSPEND3 | 14 | select OLD_SIGSUSPEND3 |
15 | select OLD_SIGACTION | 15 | select OLD_SIGACTION |
16 | select HAVE_DEBUG_STACKOVERFLOW | 16 | select HAVE_DEBUG_STACKOVERFLOW |
17 | select ARCH_NO_COHERENT_DMA_MMAP | ||
17 | 18 | ||
18 | config AM33_2 | 19 | config AM33_2 |
19 | def_bool n | 20 | def_bool n |
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index a18abfc558eb..1dcd44757f32 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h | |||
@@ -11,154 +11,14 @@ | |||
11 | #ifndef _ASM_DMA_MAPPING_H | 11 | #ifndef _ASM_DMA_MAPPING_H |
12 | #define _ASM_DMA_MAPPING_H | 12 | #define _ASM_DMA_MAPPING_H |
13 | 13 | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/scatterlist.h> | ||
16 | |||
17 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
18 | #include <asm/io.h> | 15 | #include <asm/io.h> |
19 | 16 | ||
20 | /* | 17 | extern struct dma_map_ops mn10300_dma_ops; |
21 | * See Documentation/DMA-API.txt for the description of how the | ||
22 | * following DMA API should work. | ||
23 | */ | ||
24 | |||
25 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | ||
26 | dma_addr_t *dma_handle, int flag); | ||
27 | |||
28 | extern void dma_free_coherent(struct device *dev, size_t size, | ||
29 | void *vaddr, dma_addr_t dma_handle); | ||
30 | |||
31 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | ||
32 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | ||
33 | |||
34 | static inline | ||
35 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
36 | enum dma_data_direction direction) | ||
37 | { | ||
38 | BUG_ON(direction == DMA_NONE); | ||
39 | mn10300_dcache_flush_inv(); | ||
40 | return virt_to_bus(ptr); | ||
41 | } | ||
42 | |||
43 | static inline | ||
44 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
45 | enum dma_data_direction direction) | ||
46 | { | ||
47 | BUG_ON(direction == DMA_NONE); | ||
48 | } | ||
49 | |||
50 | static inline | ||
51 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
52 | enum dma_data_direction direction) | ||
53 | { | ||
54 | struct scatterlist *sg; | ||
55 | int i; | ||
56 | |||
57 | BUG_ON(!valid_dma_direction(direction)); | ||
58 | WARN_ON(nents == 0 || sglist[0].length == 0); | ||
59 | |||
60 | for_each_sg(sglist, sg, nents, i) { | ||
61 | BUG_ON(!sg_page(sg)); | ||
62 | |||
63 | sg->dma_address = sg_phys(sg); | ||
64 | } | ||
65 | 18 | ||
66 | mn10300_dcache_flush_inv(); | 19 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
67 | return nents; | ||
68 | } | ||
69 | |||
70 | static inline | ||
71 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
72 | enum dma_data_direction direction) | ||
73 | { | ||
74 | BUG_ON(!valid_dma_direction(direction)); | ||
75 | } | ||
76 | |||
77 | static inline | ||
78 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
79 | unsigned long offset, size_t size, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | BUG_ON(direction == DMA_NONE); | ||
83 | return page_to_bus(page) + offset; | ||
84 | } | ||
85 | |||
86 | static inline | ||
87 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | BUG_ON(direction == DMA_NONE); | ||
91 | } | ||
92 | |||
93 | static inline | ||
94 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
95 | size_t size, enum dma_data_direction direction) | ||
96 | { | 20 | { |
97 | } | 21 | return &mn10300_dma_ops; |
98 | |||
99 | static inline | ||
100 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
101 | size_t size, enum dma_data_direction direction) | ||
102 | { | ||
103 | mn10300_dcache_flush_inv(); | ||
104 | } | ||
105 | |||
106 | static inline | ||
107 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
108 | unsigned long offset, size_t size, | ||
109 | enum dma_data_direction direction) | ||
110 | { | ||
111 | } | ||
112 | |||
113 | static inline void | ||
114 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
115 | unsigned long offset, size_t size, | ||
116 | enum dma_data_direction direction) | ||
117 | { | ||
118 | mn10300_dcache_flush_inv(); | ||
119 | } | ||
120 | |||
121 | |||
122 | static inline | ||
123 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
124 | int nelems, enum dma_data_direction direction) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline | ||
129 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
130 | int nelems, enum dma_data_direction direction) | ||
131 | { | ||
132 | mn10300_dcache_flush_inv(); | ||
133 | } | ||
134 | |||
135 | static inline | ||
136 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
137 | { | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static inline | ||
142 | int dma_supported(struct device *dev, u64 mask) | ||
143 | { | ||
144 | /* | ||
145 | * we fall back to GFP_DMA when the mask isn't all 1s, so we can't | ||
146 | * guarantee allocations that must be within a tighter range than | ||
147 | * GFP_DMA | ||
148 | */ | ||
149 | if (mask < 0x00ffffff) | ||
150 | return 0; | ||
151 | return 1; | ||
152 | } | ||
153 | |||
154 | static inline | ||
155 | int dma_set_mask(struct device *dev, u64 mask) | ||
156 | { | ||
157 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
158 | return -EIO; | ||
159 | |||
160 | *dev->dma_mask = mask; | ||
161 | return 0; | ||
162 | } | 22 | } |
163 | 23 | ||
164 | static inline | 24 | static inline |
@@ -168,19 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size, | |||
168 | mn10300_dcache_flush_inv(); | 28 | mn10300_dcache_flush_inv(); |
169 | } | 29 | } |
170 | 30 | ||
171 | /* Not supported for now */ | ||
172 | static inline int dma_mmap_coherent(struct device *dev, | ||
173 | struct vm_area_struct *vma, void *cpu_addr, | ||
174 | dma_addr_t dma_addr, size_t size) | ||
175 | { | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
180 | void *cpu_addr, dma_addr_t dma_addr, | ||
181 | size_t size) | ||
182 | { | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
186 | #endif | 31 | #endif |
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index e244ebe637e1..8842394cb49a 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c | |||
@@ -20,8 +20,8 @@ | |||
20 | 20 | ||
21 | static unsigned long pci_sram_allocated = 0xbc000000; | 21 | static unsigned long pci_sram_allocated = 0xbc000000; |
22 | 22 | ||
23 | void *dma_alloc_coherent(struct device *dev, size_t size, | 23 | static void *mn10300_dma_alloc(struct device *dev, size_t size, |
24 | dma_addr_t *dma_handle, int gfp) | 24 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
25 | { | 25 | { |
26 | unsigned long addr; | 26 | unsigned long addr; |
27 | void *ret; | 27 | void *ret; |
@@ -61,10 +61,9 @@ done: | |||
61 | printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle); | 61 | printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle); |
62 | return ret; | 62 | return ret; |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | 64 | ||
66 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 65 | static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, |
67 | dma_addr_t dma_handle) | 66 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
68 | { | 67 | { |
69 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; | 68 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; |
70 | 69 | ||
@@ -73,4 +72,60 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
73 | 72 | ||
74 | free_pages(addr, get_order(size)); | 73 | free_pages(addr, get_order(size)); |
75 | } | 74 | } |
76 | EXPORT_SYMBOL(dma_free_coherent); | 75 | |
76 | static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
77 | int nents, enum dma_data_direction direction, | ||
78 | struct dma_attrs *attrs) | ||
79 | { | ||
80 | struct scatterlist *sg; | ||
81 | int i; | ||
82 | |||
83 | for_each_sg(sglist, sg, nents, i) { | ||
84 | BUG_ON(!sg_page(sg)); | ||
85 | |||
86 | sg->dma_address = sg_phys(sg); | ||
87 | } | ||
88 | |||
89 | mn10300_dcache_flush_inv(); | ||
90 | return nents; | ||
91 | } | ||
92 | |||
93 | static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, | ||
94 | unsigned long offset, size_t size, | ||
95 | enum dma_data_direction direction, struct dma_attrs *attrs) | ||
96 | { | ||
97 | return page_to_bus(page) + offset; | ||
98 | } | ||
99 | |||
100 | static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
101 | size_t size, enum dma_data_direction direction) | ||
102 | { | ||
103 | mn10300_dcache_flush_inv(); | ||
104 | } | ||
105 | |||
106 | static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
107 | int nelems, enum dma_data_direction direction) | ||
108 | { | ||
109 | mn10300_dcache_flush_inv(); | ||
110 | } | ||
111 | |||
112 | static int mn10300_dma_supported(struct device *dev, u64 mask) | ||
113 | { | ||
114 | /* | ||
115 | * we fall back to GFP_DMA when the mask isn't all 1s, so we can't | ||
116 | * guarantee allocations that must be within a tighter range than | ||
117 | * GFP_DMA | ||
118 | */ | ||
119 | if (mask < 0x00ffffff) | ||
120 | return 0; | ||
121 | return 1; | ||
122 | } | ||
123 | |||
124 | struct dma_map_ops mn10300_dma_ops = { | ||
125 | .alloc = mn10300_dma_alloc, | ||
126 | .free = mn10300_dma_free, | ||
127 | .map_page = mn10300_dma_map_page, | ||
128 | .map_sg = mn10300_dma_map_sg, | ||
129 | .sync_single_for_device = mn10300_dma_sync_single_for_device, | ||
130 | .sync_sg_for_device = mn10300_dma_sync_sg_for_device, | ||
131 | }; | ||
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index b5567233f7f1..bec8ac8e6ad2 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h | |||
@@ -10,131 +10,20 @@ | |||
10 | #ifndef _ASM_NIOS2_DMA_MAPPING_H | 10 | #ifndef _ASM_NIOS2_DMA_MAPPING_H |
11 | #define _ASM_NIOS2_DMA_MAPPING_H | 11 | #define _ASM_NIOS2_DMA_MAPPING_H |
12 | 12 | ||
13 | #include <linux/scatterlist.h> | 13 | extern struct dma_map_ops nios2_dma_ops; |
14 | #include <linux/cache.h> | ||
15 | #include <asm/cacheflush.h> | ||
16 | 14 | ||
17 | static inline void __dma_sync_for_device(void *vaddr, size_t size, | 15 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
18 | enum dma_data_direction direction) | ||
19 | { | 16 | { |
20 | switch (direction) { | 17 | return &nios2_dma_ops; |
21 | case DMA_FROM_DEVICE: | ||
22 | invalidate_dcache_range((unsigned long)vaddr, | ||
23 | (unsigned long)(vaddr + size)); | ||
24 | break; | ||
25 | case DMA_TO_DEVICE: | ||
26 | /* | ||
27 | * We just need to flush the caches here , but Nios2 flush | ||
28 | * instruction will do both writeback and invalidate. | ||
29 | */ | ||
30 | case DMA_BIDIRECTIONAL: /* flush and invalidate */ | ||
31 | flush_dcache_range((unsigned long)vaddr, | ||
32 | (unsigned long)(vaddr + size)); | ||
33 | break; | ||
34 | default: | ||
35 | BUG(); | ||
36 | } | ||
37 | } | ||
38 | |||
39 | static inline void __dma_sync_for_cpu(void *vaddr, size_t size, | ||
40 | enum dma_data_direction direction) | ||
41 | { | ||
42 | switch (direction) { | ||
43 | case DMA_BIDIRECTIONAL: | ||
44 | case DMA_FROM_DEVICE: | ||
45 | invalidate_dcache_range((unsigned long)vaddr, | ||
46 | (unsigned long)(vaddr + size)); | ||
47 | break; | ||
48 | case DMA_TO_DEVICE: | ||
49 | break; | ||
50 | default: | ||
51 | BUG(); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
56 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
57 | |||
58 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
59 | dma_addr_t *dma_handle, gfp_t flag); | ||
60 | |||
61 | void dma_free_coherent(struct device *dev, size_t size, | ||
62 | void *vaddr, dma_addr_t dma_handle); | ||
63 | |||
64 | static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, | ||
65 | size_t size, | ||
66 | enum dma_data_direction direction) | ||
67 | { | ||
68 | BUG_ON(!valid_dma_direction(direction)); | ||
69 | __dma_sync_for_device(ptr, size, direction); | ||
70 | return virt_to_phys(ptr); | ||
71 | } | ||
72 | |||
73 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
74 | size_t size, enum dma_data_direction direction) | ||
75 | { | ||
76 | } | ||
77 | |||
78 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
79 | enum dma_data_direction direction); | ||
80 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
81 | unsigned long offset, size_t size, enum dma_data_direction direction); | ||
82 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
83 | size_t size, enum dma_data_direction direction); | ||
84 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
85 | int nhwentries, enum dma_data_direction direction); | ||
86 | extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
87 | size_t size, enum dma_data_direction direction); | ||
88 | extern void dma_sync_single_for_device(struct device *dev, | ||
89 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); | ||
90 | extern void dma_sync_single_range_for_cpu(struct device *dev, | ||
91 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
92 | enum dma_data_direction direction); | ||
93 | extern void dma_sync_single_range_for_device(struct device *dev, | ||
94 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
95 | enum dma_data_direction direction); | ||
96 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
97 | int nelems, enum dma_data_direction direction); | ||
98 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
99 | int nelems, enum dma_data_direction direction); | ||
100 | |||
101 | static inline int dma_supported(struct device *dev, u64 mask) | ||
102 | { | ||
103 | return 1; | ||
104 | } | ||
105 | |||
106 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
107 | { | ||
108 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
109 | return -EIO; | ||
110 | |||
111 | *dev->dma_mask = mask; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
117 | { | ||
118 | return 0; | ||
119 | } | 18 | } |
120 | 19 | ||
121 | /* | 20 | /* |
122 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to | 21 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to |
123 | * do any flushing here. | 22 | * do any flushing here. |
124 | */ | 23 | */ |
125 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 24 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
126 | enum dma_data_direction direction) | 25 | enum dma_data_direction direction) |
127 | { | 26 | { |
128 | } | 27 | } |
129 | 28 | ||
130 | /* drivers/base/dma-mapping.c */ | ||
131 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
132 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
133 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
134 | void *cpu_addr, dma_addr_t dma_addr, | ||
135 | size_t size); | ||
136 | |||
137 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
138 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
139 | |||
140 | #endif /* _ASM_NIOS2_DMA_MAPPING_H */ | 29 | #endif /* _ASM_NIOS2_DMA_MAPPING_H */ |
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index ac5da7594f0b..90422c367ed3 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c | |||
@@ -20,9 +20,46 @@ | |||
20 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
21 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
22 | 22 | ||
23 | static inline void __dma_sync_for_device(void *vaddr, size_t size, | ||
24 | enum dma_data_direction direction) | ||
25 | { | ||
26 | switch (direction) { | ||
27 | case DMA_FROM_DEVICE: | ||
28 | invalidate_dcache_range((unsigned long)vaddr, | ||
29 | (unsigned long)(vaddr + size)); | ||
30 | break; | ||
31 | case DMA_TO_DEVICE: | ||
32 | /* | ||
33 | * We just need to flush the caches here , but Nios2 flush | ||
34 | * instruction will do both writeback and invalidate. | ||
35 | */ | ||
36 | case DMA_BIDIRECTIONAL: /* flush and invalidate */ | ||
37 | flush_dcache_range((unsigned long)vaddr, | ||
38 | (unsigned long)(vaddr + size)); | ||
39 | break; | ||
40 | default: | ||
41 | BUG(); | ||
42 | } | ||
43 | } | ||
23 | 44 | ||
24 | void *dma_alloc_coherent(struct device *dev, size_t size, | 45 | static inline void __dma_sync_for_cpu(void *vaddr, size_t size, |
25 | dma_addr_t *dma_handle, gfp_t gfp) | 46 | enum dma_data_direction direction) |
47 | { | ||
48 | switch (direction) { | ||
49 | case DMA_BIDIRECTIONAL: | ||
50 | case DMA_FROM_DEVICE: | ||
51 | invalidate_dcache_range((unsigned long)vaddr, | ||
52 | (unsigned long)(vaddr + size)); | ||
53 | break; | ||
54 | case DMA_TO_DEVICE: | ||
55 | break; | ||
56 | default: | ||
57 | BUG(); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static void *nios2_dma_alloc(struct device *dev, size_t size, | ||
62 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | ||
26 | { | 63 | { |
27 | void *ret; | 64 | void *ret; |
28 | 65 | ||
@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
45 | 82 | ||
46 | return ret; | 83 | return ret; |
47 | } | 84 | } |
48 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
49 | 85 | ||
50 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, |
51 | dma_addr_t dma_handle) | 87 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
52 | { | 88 | { |
53 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); | 89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); |
54 | 90 | ||
55 | free_pages(addr, get_order(size)); | 91 | free_pages(addr, get_order(size)); |
56 | } | 92 | } |
57 | EXPORT_SYMBOL(dma_free_coherent); | ||
58 | 93 | ||
59 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, |
60 | enum dma_data_direction direction) | 95 | int nents, enum dma_data_direction direction, |
96 | struct dma_attrs *attrs) | ||
61 | { | 97 | { |
62 | int i; | 98 | int i; |
63 | 99 | ||
64 | BUG_ON(!valid_dma_direction(direction)); | ||
65 | |||
66 | for_each_sg(sg, sg, nents, i) { | 100 | for_each_sg(sg, sg, nents, i) { |
67 | void *addr; | 101 | void *addr; |
68 | 102 | ||
@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
75 | 109 | ||
76 | return nents; | 110 | return nents; |
77 | } | 111 | } |
78 | EXPORT_SYMBOL(dma_map_sg); | ||
79 | 112 | ||
80 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, |
81 | unsigned long offset, size_t size, | 114 | unsigned long offset, size_t size, |
82 | enum dma_data_direction direction) | 115 | enum dma_data_direction direction, |
116 | struct dma_attrs *attrs) | ||
83 | { | 117 | { |
84 | void *addr; | 118 | void *addr = page_address(page) + offset; |
85 | |||
86 | BUG_ON(!valid_dma_direction(direction)); | ||
87 | 119 | ||
88 | addr = page_address(page) + offset; | ||
89 | __dma_sync_for_device(addr, size, direction); | 120 | __dma_sync_for_device(addr, size, direction); |
90 | |||
91 | return page_to_phys(page) + offset; | 121 | return page_to_phys(page) + offset; |
92 | } | 122 | } |
93 | EXPORT_SYMBOL(dma_map_page); | ||
94 | 123 | ||
95 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
96 | enum dma_data_direction direction) | 125 | size_t size, enum dma_data_direction direction, |
126 | struct dma_attrs *attrs) | ||
97 | { | 127 | { |
98 | BUG_ON(!valid_dma_direction(direction)); | ||
99 | |||
100 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | 128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
101 | } | 129 | } |
102 | EXPORT_SYMBOL(dma_unmap_page); | ||
103 | 130 | ||
104 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
105 | enum dma_data_direction direction) | 132 | int nhwentries, enum dma_data_direction direction, |
133 | struct dma_attrs *attrs) | ||
106 | { | 134 | { |
107 | void *addr; | 135 | void *addr; |
108 | int i; | 136 | int i; |
109 | 137 | ||
110 | BUG_ON(!valid_dma_direction(direction)); | ||
111 | |||
112 | if (direction == DMA_TO_DEVICE) | 138 | if (direction == DMA_TO_DEVICE) |
113 | return; | 139 | return; |
114 | 140 | ||
@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
118 | __dma_sync_for_cpu(addr, sg->length, direction); | 144 | __dma_sync_for_cpu(addr, sg->length, direction); |
119 | } | 145 | } |
120 | } | 146 | } |
121 | EXPORT_SYMBOL(dma_unmap_sg); | ||
122 | |||
123 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
124 | size_t size, enum dma_data_direction direction) | ||
125 | { | ||
126 | BUG_ON(!valid_dma_direction(direction)); | ||
127 | 147 | ||
128 | __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); | 148 | static void nios2_dma_sync_single_for_cpu(struct device *dev, |
129 | } | 149 | dma_addr_t dma_handle, size_t size, |
130 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 150 | enum dma_data_direction direction) |
131 | |||
132 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
133 | size_t size, enum dma_data_direction direction) | ||
134 | { | ||
135 | BUG_ON(!valid_dma_direction(direction)); | ||
136 | |||
137 | __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); | ||
138 | } | ||
139 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
140 | |||
141 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
142 | unsigned long offset, size_t size, | ||
143 | enum dma_data_direction direction) | ||
144 | { | 151 | { |
145 | BUG_ON(!valid_dma_direction(direction)); | ||
146 | |||
147 | __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); | 152 | __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); |
148 | } | 153 | } |
149 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
150 | 154 | ||
151 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 155 | static void nios2_dma_sync_single_for_device(struct device *dev, |
152 | unsigned long offset, size_t size, | 156 | dma_addr_t dma_handle, size_t size, |
153 | enum dma_data_direction direction) | 157 | enum dma_data_direction direction) |
154 | { | 158 | { |
155 | BUG_ON(!valid_dma_direction(direction)); | ||
156 | |||
157 | __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); | 159 | __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); |
158 | } | 160 | } |
159 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
160 | 161 | ||
161 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 162 | static void nios2_dma_sync_sg_for_cpu(struct device *dev, |
162 | enum dma_data_direction direction) | 163 | struct scatterlist *sg, int nelems, |
164 | enum dma_data_direction direction) | ||
163 | { | 165 | { |
164 | int i; | 166 | int i; |
165 | 167 | ||
166 | BUG_ON(!valid_dma_direction(direction)); | ||
167 | |||
168 | /* Make sure that gcc doesn't leave the empty loop body. */ | 168 | /* Make sure that gcc doesn't leave the empty loop body. */ |
169 | for_each_sg(sg, sg, nelems, i) | 169 | for_each_sg(sg, sg, nelems, i) |
170 | __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | 170 | __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); |
171 | } | 171 | } |
172 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
173 | 172 | ||
174 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 173 | static void nios2_dma_sync_sg_for_device(struct device *dev, |
175 | int nelems, enum dma_data_direction direction) | 174 | struct scatterlist *sg, int nelems, |
175 | enum dma_data_direction direction) | ||
176 | { | 176 | { |
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | BUG_ON(!valid_dma_direction(direction)); | ||
180 | |||
181 | /* Make sure that gcc doesn't leave the empty loop body. */ | 179 | /* Make sure that gcc doesn't leave the empty loop body. */ |
182 | for_each_sg(sg, sg, nelems, i) | 180 | for_each_sg(sg, sg, nelems, i) |
183 | __dma_sync_for_device(sg_virt(sg), sg->length, direction); | 181 | __dma_sync_for_device(sg_virt(sg), sg->length, direction); |
184 | 182 | ||
185 | } | 183 | } |
186 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 184 | |
185 | struct dma_map_ops nios2_dma_ops = { | ||
186 | .alloc = nios2_dma_alloc, | ||
187 | .free = nios2_dma_free, | ||
188 | .map_page = nios2_dma_map_page, | ||
189 | .unmap_page = nios2_dma_unmap_page, | ||
190 | .map_sg = nios2_dma_map_sg, | ||
191 | .unmap_sg = nios2_dma_unmap_sg, | ||
192 | .sync_single_for_device = nios2_dma_sync_single_for_device, | ||
193 | .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, | ||
194 | .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, | ||
195 | .sync_sg_for_device = nios2_dma_sync_sg_for_device, | ||
196 | }; | ||
197 | EXPORT_SYMBOL(nios2_dma_ops); | ||
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 443f44de1020..e118c02cc79a 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig | |||
@@ -29,9 +29,6 @@ config OPENRISC | |||
29 | config MMU | 29 | config MMU |
30 | def_bool y | 30 | def_bool y |
31 | 31 | ||
32 | config HAVE_DMA_ATTRS | ||
33 | def_bool y | ||
34 | |||
35 | config RWSEM_GENERIC_SPINLOCK | 32 | config RWSEM_GENERIC_SPINLOCK |
36 | def_bool y | 33 | def_bool y |
37 | 34 | ||
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 413bfcf86384..1f260bccb368 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h | |||
@@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) | |||
42 | return dma_mask == DMA_BIT_MASK(32); | 42 | return dma_mask == DMA_BIT_MASK(32); |
43 | } | 43 | } |
44 | 44 | ||
45 | #include <asm-generic/dma-mapping-common.h> | ||
46 | |||
47 | #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ | 45 | #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7c34cafdf301..14f655cf542e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -29,6 +29,7 @@ config PARISC | |||
29 | select TTY # Needed for pdc_cons.c | 29 | select TTY # Needed for pdc_cons.c |
30 | select HAVE_DEBUG_STACKOVERFLOW | 30 | select HAVE_DEBUG_STACKOVERFLOW |
31 | select HAVE_ARCH_AUDITSYSCALL | 31 | select HAVE_ARCH_AUDITSYSCALL |
32 | select ARCH_NO_COHERENT_DMA_MMAP | ||
32 | 33 | ||
33 | help | 34 | help |
34 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 35 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index d8d60a57183f..16e024602737 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h | |||
@@ -1,30 +1,11 @@ | |||
1 | #ifndef _PARISC_DMA_MAPPING_H | 1 | #ifndef _PARISC_DMA_MAPPING_H |
2 | #define _PARISC_DMA_MAPPING_H | 2 | #define _PARISC_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/mm.h> | ||
5 | #include <linux/scatterlist.h> | ||
6 | #include <asm/cacheflush.h> | 4 | #include <asm/cacheflush.h> |
7 | 5 | ||
8 | /* See Documentation/DMA-API-HOWTO.txt */ | ||
9 | struct hppa_dma_ops { | ||
10 | int (*dma_supported)(struct device *dev, u64 mask); | ||
11 | void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); | ||
12 | void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); | ||
13 | void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova); | ||
14 | dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction); | ||
15 | void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction); | ||
16 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); | ||
17 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction); | ||
18 | void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction); | ||
19 | void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction); | ||
20 | void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); | ||
21 | void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); | ||
22 | }; | ||
23 | |||
24 | /* | 6 | /* |
25 | ** We could live without the hppa_dma_ops indirection if we didn't want | 7 | ** We need to support 4 different coherent dma models with one binary: |
26 | ** to support 4 different coherent dma models with one binary (they will | 8 | ** |
27 | ** someday be loadable modules): | ||
28 | ** I/O MMU consistent method dma_sync behavior | 9 | ** I/O MMU consistent method dma_sync behavior |
29 | ** ============= ====================== ======================= | 10 | ** ============= ====================== ======================= |
30 | ** a) PA-7x00LC uncachable host memory flush/purge | 11 | ** a) PA-7x00LC uncachable host memory flush/purge |
@@ -40,158 +21,22 @@ struct hppa_dma_ops { | |||
40 | */ | 21 | */ |
41 | 22 | ||
42 | #ifdef CONFIG_PA11 | 23 | #ifdef CONFIG_PA11 |
43 | extern struct hppa_dma_ops pcxl_dma_ops; | 24 | extern struct dma_map_ops pcxl_dma_ops; |
44 | extern struct hppa_dma_ops pcx_dma_ops; | 25 | extern struct dma_map_ops pcx_dma_ops; |
45 | #endif | 26 | #endif |
46 | 27 | ||
47 | extern struct hppa_dma_ops *hppa_dma_ops; | 28 | extern struct dma_map_ops *hppa_dma_ops; |
48 | |||
49 | #define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f) | ||
50 | #define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f) | ||
51 | |||
52 | static inline void * | ||
53 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
54 | gfp_t flag) | ||
55 | { | ||
56 | return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag); | ||
57 | } | ||
58 | |||
59 | static inline void * | ||
60 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
61 | gfp_t flag) | ||
62 | { | ||
63 | return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | dma_free_coherent(struct device *dev, size_t size, | ||
68 | void *vaddr, dma_addr_t dma_handle) | ||
69 | { | ||
70 | hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); | ||
71 | } | ||
72 | |||
73 | static inline void | ||
74 | dma_free_noncoherent(struct device *dev, size_t size, | ||
75 | void *vaddr, dma_addr_t dma_handle) | ||
76 | { | ||
77 | hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); | ||
78 | } | ||
79 | |||
80 | static inline dma_addr_t | ||
81 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
82 | enum dma_data_direction direction) | ||
83 | { | ||
84 | return hppa_dma_ops->map_single(dev, ptr, size, direction); | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
89 | enum dma_data_direction direction) | ||
90 | { | ||
91 | hppa_dma_ops->unmap_single(dev, dma_addr, size, direction); | ||
92 | } | ||
93 | |||
94 | static inline int | ||
95 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
96 | enum dma_data_direction direction) | ||
97 | { | ||
98 | return hppa_dma_ops->map_sg(dev, sg, nents, direction); | ||
99 | } | ||
100 | 29 | ||
101 | static inline void | 30 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
102 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
103 | enum dma_data_direction direction) | ||
104 | { | ||
105 | hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction); | ||
106 | } | ||
107 | |||
108 | static inline dma_addr_t | ||
109 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
110 | size_t size, enum dma_data_direction direction) | ||
111 | { | ||
112 | return dma_map_single(dev, (page_address(page) + (offset)), size, direction); | ||
113 | } | ||
114 | |||
115 | static inline void | ||
116 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
117 | enum dma_data_direction direction) | ||
118 | { | 31 | { |
119 | dma_unmap_single(dev, dma_address, size, direction); | 32 | return hppa_dma_ops; |
120 | } | ||
121 | |||
122 | |||
123 | static inline void | ||
124 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | if(hppa_dma_ops->dma_sync_single_for_cpu) | ||
128 | hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction); | ||
129 | } | ||
130 | |||
131 | static inline void | ||
132 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
133 | enum dma_data_direction direction) | ||
134 | { | ||
135 | if(hppa_dma_ops->dma_sync_single_for_device) | ||
136 | hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction); | ||
137 | } | ||
138 | |||
139 | static inline void | ||
140 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
141 | unsigned long offset, size_t size, | ||
142 | enum dma_data_direction direction) | ||
143 | { | ||
144 | if(hppa_dma_ops->dma_sync_single_for_cpu) | ||
145 | hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction); | ||
146 | } | ||
147 | |||
148 | static inline void | ||
149 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
150 | unsigned long offset, size_t size, | ||
151 | enum dma_data_direction direction) | ||
152 | { | ||
153 | if(hppa_dma_ops->dma_sync_single_for_device) | ||
154 | hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction); | ||
155 | } | ||
156 | |||
157 | static inline void | ||
158 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
159 | enum dma_data_direction direction) | ||
160 | { | ||
161 | if(hppa_dma_ops->dma_sync_sg_for_cpu) | ||
162 | hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction); | ||
163 | } | ||
164 | |||
165 | static inline void | ||
166 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
167 | enum dma_data_direction direction) | ||
168 | { | ||
169 | if(hppa_dma_ops->dma_sync_sg_for_device) | ||
170 | hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction); | ||
171 | } | ||
172 | |||
173 | static inline int | ||
174 | dma_supported(struct device *dev, u64 mask) | ||
175 | { | ||
176 | return hppa_dma_ops->dma_supported(dev, mask); | ||
177 | } | ||
178 | |||
179 | static inline int | ||
180 | dma_set_mask(struct device *dev, u64 mask) | ||
181 | { | ||
182 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
183 | return -EIO; | ||
184 | |||
185 | *dev->dma_mask = mask; | ||
186 | |||
187 | return 0; | ||
188 | } | 33 | } |
189 | 34 | ||
190 | static inline void | 35 | static inline void |
191 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 36 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
192 | enum dma_data_direction direction) | 37 | enum dma_data_direction direction) |
193 | { | 38 | { |
194 | if(hppa_dma_ops->dma_sync_single_for_cpu) | 39 | if (hppa_dma_ops->sync_single_for_cpu) |
195 | flush_kernel_dcache_range((unsigned long)vaddr, size); | 40 | flush_kernel_dcache_range((unsigned long)vaddr, size); |
196 | } | 41 | } |
197 | 42 | ||
@@ -238,22 +83,4 @@ struct parisc_device; | |||
238 | void * sba_get_iommu(struct parisc_device *dev); | 83 | void * sba_get_iommu(struct parisc_device *dev); |
239 | #endif | 84 | #endif |
240 | 85 | ||
241 | /* At the moment, we panic on error for IOMMU resource exaustion */ | ||
242 | #define dma_mapping_error(dev, x) 0 | ||
243 | |||
244 | /* This API cannot be supported on PA-RISC */ | ||
245 | static inline int dma_mmap_coherent(struct device *dev, | ||
246 | struct vm_area_struct *vma, void *cpu_addr, | ||
247 | dma_addr_t dma_addr, size_t size) | ||
248 | { | ||
249 | return -EINVAL; | ||
250 | } | ||
251 | |||
252 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
253 | void *cpu_addr, dma_addr_t dma_addr, | ||
254 | size_t size) | ||
255 | { | ||
256 | return -EINVAL; | ||
257 | } | ||
258 | |||
259 | #endif | 86 | #endif |
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index cf830d465f75..f3db7d8eb0c2 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h | |||
@@ -43,7 +43,6 @@ | |||
43 | #define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ | 43 | #define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ |
44 | #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ | 44 | #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ |
45 | #define MADV_VPS_INHERIT 7 /* Inherit parents page size */ | 45 | #define MADV_VPS_INHERIT 7 /* Inherit parents page size */ |
46 | #define MADV_FREE 8 /* free pages only if memory pressure */ | ||
47 | 46 | ||
48 | /* common/generic parameters */ | 47 | /* common/generic parameters */ |
49 | #define MADV_FREE 8 /* free pages only if memory pressure */ | 48 | #define MADV_FREE 8 /* free pages only if memory pressure */ |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index dba508fe1683..f8150669b8c6 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/parisc-device.h> | 40 | #include <asm/parisc-device.h> |
41 | 41 | ||
42 | /* See comments in include/asm-parisc/pci.h */ | 42 | /* See comments in include/asm-parisc/pci.h */ |
43 | struct hppa_dma_ops *hppa_dma_ops __read_mostly; | 43 | struct dma_map_ops *hppa_dma_ops __read_mostly; |
44 | EXPORT_SYMBOL(hppa_dma_ops); | 44 | EXPORT_SYMBOL(hppa_dma_ops); |
45 | 45 | ||
46 | static struct device root = { | 46 | static struct device root = { |
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index b9402c9b3454..a27e4928bf73 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -413,7 +413,8 @@ pcxl_dma_init(void) | |||
413 | 413 | ||
414 | __initcall(pcxl_dma_init); | 414 | __initcall(pcxl_dma_init); |
415 | 415 | ||
416 | static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) | 416 | static void *pa11_dma_alloc(struct device *dev, size_t size, |
417 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | ||
417 | { | 418 | { |
418 | unsigned long vaddr; | 419 | unsigned long vaddr; |
419 | unsigned long paddr; | 420 | unsigned long paddr; |
@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad | |||
439 | return (void *)vaddr; | 440 | return (void *)vaddr; |
440 | } | 441 | } |
441 | 442 | ||
442 | static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | 443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, |
444 | dma_addr_t dma_handle, struct dma_attrs *attrs) | ||
443 | { | 445 | { |
444 | int order; | 446 | int order; |
445 | 447 | ||
@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad | |||
450 | free_pages((unsigned long)__va(dma_handle), order); | 452 | free_pages((unsigned long)__va(dma_handle), order); |
451 | } | 453 | } |
452 | 454 | ||
453 | static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction) | 455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, |
456 | unsigned long offset, size_t size, | ||
457 | enum dma_data_direction direction, struct dma_attrs *attrs) | ||
454 | { | 458 | { |
459 | void *addr = page_address(page) + offset; | ||
455 | BUG_ON(direction == DMA_NONE); | 460 | BUG_ON(direction == DMA_NONE); |
456 | 461 | ||
457 | flush_kernel_dcache_range((unsigned long) addr, size); | 462 | flush_kernel_dcache_range((unsigned long) addr, size); |
458 | return virt_to_phys(addr); | 463 | return virt_to_phys(addr); |
459 | } | 464 | } |
460 | 465 | ||
461 | static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | 466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
467 | size_t size, enum dma_data_direction direction, | ||
468 | struct dma_attrs *attrs) | ||
462 | { | 469 | { |
463 | BUG_ON(direction == DMA_NONE); | 470 | BUG_ON(direction == DMA_NONE); |
464 | 471 | ||
@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz | |||
475 | return; | 482 | return; |
476 | } | 483 | } |
477 | 484 | ||
478 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | 485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
486 | int nents, enum dma_data_direction direction, | ||
487 | struct dma_attrs *attrs) | ||
479 | { | 488 | { |
480 | int i; | 489 | int i; |
481 | struct scatterlist *sg; | 490 | struct scatterlist *sg; |
@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n | |||
492 | return nents; | 501 | return nents; |
493 | } | 502 | } |
494 | 503 | ||
495 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | 504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
505 | int nents, enum dma_data_direction direction, | ||
506 | struct dma_attrs *attrs) | ||
496 | { | 507 | { |
497 | int i; | 508 | int i; |
498 | struct scatterlist *sg; | 509 | struct scatterlist *sg; |
@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in | |||
509 | return; | 520 | return; |
510 | } | 521 | } |
511 | 522 | ||
512 | static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) | 523 | static void pa11_dma_sync_single_for_cpu(struct device *dev, |
524 | dma_addr_t dma_handle, size_t size, | ||
525 | enum dma_data_direction direction) | ||
513 | { | 526 | { |
514 | BUG_ON(direction == DMA_NONE); | 527 | BUG_ON(direction == DMA_NONE); |
515 | 528 | ||
516 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); | 529 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), |
530 | size); | ||
517 | } | 531 | } |
518 | 532 | ||
519 | static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) | 533 | static void pa11_dma_sync_single_for_device(struct device *dev, |
534 | dma_addr_t dma_handle, size_t size, | ||
535 | enum dma_data_direction direction) | ||
520 | { | 536 | { |
521 | BUG_ON(direction == DMA_NONE); | 537 | BUG_ON(direction == DMA_NONE); |
522 | 538 | ||
523 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); | 539 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), |
540 | size); | ||
524 | } | 541 | } |
525 | 542 | ||
526 | static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | 543 | static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) |
@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * | |||
545 | flush_kernel_vmap_range(sg_virt(sg), sg->length); | 562 | flush_kernel_vmap_range(sg_virt(sg), sg->length); |
546 | } | 563 | } |
547 | 564 | ||
548 | struct hppa_dma_ops pcxl_dma_ops = { | 565 | struct dma_map_ops pcxl_dma_ops = { |
549 | .dma_supported = pa11_dma_supported, | 566 | .dma_supported = pa11_dma_supported, |
550 | .alloc_consistent = pa11_dma_alloc_consistent, | 567 | .alloc = pa11_dma_alloc, |
551 | .alloc_noncoherent = pa11_dma_alloc_consistent, | 568 | .free = pa11_dma_free, |
552 | .free_consistent = pa11_dma_free_consistent, | 569 | .map_page = pa11_dma_map_page, |
553 | .map_single = pa11_dma_map_single, | 570 | .unmap_page = pa11_dma_unmap_page, |
554 | .unmap_single = pa11_dma_unmap_single, | ||
555 | .map_sg = pa11_dma_map_sg, | 571 | .map_sg = pa11_dma_map_sg, |
556 | .unmap_sg = pa11_dma_unmap_sg, | 572 | .unmap_sg = pa11_dma_unmap_sg, |
557 | .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, | 573 | .sync_single_for_cpu = pa11_dma_sync_single_for_cpu, |
558 | .dma_sync_single_for_device = pa11_dma_sync_single_for_device, | 574 | .sync_single_for_device = pa11_dma_sync_single_for_device, |
559 | .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | 575 | .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, |
560 | .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, | 576 | .sync_sg_for_device = pa11_dma_sync_sg_for_device, |
561 | }; | 577 | }; |
562 | 578 | ||
563 | static void *fail_alloc_consistent(struct device *dev, size_t size, | 579 | static void *pcx_dma_alloc(struct device *dev, size_t size, |
564 | dma_addr_t *dma_handle, gfp_t flag) | 580 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) |
565 | { | ||
566 | return NULL; | ||
567 | } | ||
568 | |||
569 | static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size, | ||
570 | dma_addr_t *dma_handle, gfp_t flag) | ||
571 | { | 581 | { |
572 | void *addr; | 582 | void *addr; |
573 | 583 | ||
584 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | ||
585 | return NULL; | ||
586 | |||
574 | addr = (void *)__get_free_pages(flag, get_order(size)); | 587 | addr = (void *)__get_free_pages(flag, get_order(size)); |
575 | if (addr) | 588 | if (addr) |
576 | *dma_handle = (dma_addr_t)virt_to_phys(addr); | 589 | *dma_handle = (dma_addr_t)virt_to_phys(addr); |
@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size, | |||
578 | return addr; | 591 | return addr; |
579 | } | 592 | } |
580 | 593 | ||
581 | static void pa11_dma_free_noncoherent(struct device *dev, size_t size, | 594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, |
582 | void *vaddr, dma_addr_t iova) | 595 | dma_addr_t iova, struct dma_attrs *attrs) |
583 | { | 596 | { |
584 | free_pages((unsigned long)vaddr, get_order(size)); | 597 | free_pages((unsigned long)vaddr, get_order(size)); |
585 | return; | 598 | return; |
586 | } | 599 | } |
587 | 600 | ||
588 | struct hppa_dma_ops pcx_dma_ops = { | 601 | struct dma_map_ops pcx_dma_ops = { |
589 | .dma_supported = pa11_dma_supported, | 602 | .dma_supported = pa11_dma_supported, |
590 | .alloc_consistent = fail_alloc_consistent, | 603 | .alloc = pcx_dma_alloc, |
591 | .alloc_noncoherent = pa11_dma_alloc_noncoherent, | 604 | .free = pcx_dma_free, |
592 | .free_consistent = pa11_dma_free_noncoherent, | 605 | .map_page = pa11_dma_map_page, |
593 | .map_single = pa11_dma_map_single, | 606 | .unmap_page = pa11_dma_unmap_page, |
594 | .unmap_single = pa11_dma_unmap_single, | ||
595 | .map_sg = pa11_dma_map_sg, | 607 | .map_sg = pa11_dma_map_sg, |
596 | .unmap_sg = pa11_dma_unmap_sg, | 608 | .unmap_sg = pa11_dma_unmap_sg, |
597 | .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, | 609 | .sync_single_for_cpu = pa11_dma_sync_single_for_cpu, |
598 | .dma_sync_single_for_device = pa11_dma_sync_single_for_device, | 610 | .sync_single_for_device = pa11_dma_sync_single_for_device, |
599 | .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | 611 | .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, |
600 | .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, | 612 | .sync_sg_for_device = pa11_dma_sync_sg_for_device, |
601 | }; | 613 | }; |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 94f6c5089e0c..e4824fd04bb7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -108,7 +108,6 @@ config PPC | |||
108 | select HAVE_ARCH_TRACEHOOK | 108 | select HAVE_ARCH_TRACEHOOK |
109 | select HAVE_MEMBLOCK | 109 | select HAVE_MEMBLOCK |
110 | select HAVE_MEMBLOCK_NODE_MAP | 110 | select HAVE_MEMBLOCK_NODE_MAP |
111 | select HAVE_DMA_ATTRS | ||
112 | select HAVE_DMA_API_DEBUG | 111 | select HAVE_DMA_API_DEBUG |
113 | select HAVE_OPROFILE | 112 | select HAVE_OPROFILE |
114 | select HAVE_DEBUG_KMEMLEAK | 113 | select HAVE_DEBUG_KMEMLEAK |
@@ -158,6 +157,7 @@ config PPC | |||
158 | select ARCH_HAS_DMA_SET_COHERENT_MASK | 157 | select ARCH_HAS_DMA_SET_COHERENT_MASK |
159 | select ARCH_HAS_DEVMEM_IS_ALLOWED | 158 | select ARCH_HAS_DEVMEM_IS_ALLOWED |
160 | select HAVE_ARCH_SECCOMP_FILTER | 159 | select HAVE_ARCH_SECCOMP_FILTER |
160 | select ARCH_HAS_UBSAN_SANITIZE_ALL | ||
161 | 161 | ||
162 | config GENERIC_CSUM | 162 | config GENERIC_CSUM |
163 | def_bool CPU_LITTLE_ENDIAN | 163 | def_bool CPU_LITTLE_ENDIAN |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 7f522c021dc3..77816acd4fd9 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) | |||
125 | #define HAVE_ARCH_DMA_SET_MASK 1 | 125 | #define HAVE_ARCH_DMA_SET_MASK 1 |
126 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | 126 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
127 | 127 | ||
128 | #include <asm-generic/dma-mapping-common.h> | ||
129 | |||
130 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); | 128 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); |
131 | extern u64 __dma_get_required_mask(struct device *dev); | 129 | extern u64 __dma_get_required_mask(struct device *dev); |
132 | 130 | ||
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 493e72f64b35..b4407d0add27 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h | |||
@@ -191,7 +191,7 @@ struct fadump_crash_info_header { | |||
191 | u64 elfcorehdr_addr; | 191 | u64 elfcorehdr_addr; |
192 | u32 crashing_cpu; | 192 | u32 crashing_cpu; |
193 | struct pt_regs regs; | 193 | struct pt_regs regs; |
194 | struct cpumask cpu_online_mask; | 194 | struct cpumask online_mask; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | /* Crash memory ranges */ | 197 | /* Crash memory ranges */ |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index ba336930d448..794f22adf99d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -136,12 +136,18 @@ endif | |||
136 | obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o | 136 | obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o |
137 | obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o | 137 | obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o |
138 | 138 | ||
139 | # Disable GCOV in odd or sensitive code | 139 | # Disable GCOV & sanitizers in odd or sensitive code |
140 | GCOV_PROFILE_prom_init.o := n | 140 | GCOV_PROFILE_prom_init.o := n |
141 | UBSAN_SANITIZE_prom_init.o := n | ||
141 | GCOV_PROFILE_ftrace.o := n | 142 | GCOV_PROFILE_ftrace.o := n |
143 | UBSAN_SANITIZE_ftrace.o := n | ||
142 | GCOV_PROFILE_machine_kexec_64.o := n | 144 | GCOV_PROFILE_machine_kexec_64.o := n |
145 | UBSAN_SANITIZE_machine_kexec_64.o := n | ||
143 | GCOV_PROFILE_machine_kexec_32.o := n | 146 | GCOV_PROFILE_machine_kexec_32.o := n |
147 | UBSAN_SANITIZE_machine_kexec_32.o := n | ||
144 | GCOV_PROFILE_kprobes.o := n | 148 | GCOV_PROFILE_kprobes.o := n |
149 | UBSAN_SANITIZE_kprobes.o := n | ||
150 | UBSAN_SANITIZE_vdso.o := n | ||
145 | 151 | ||
146 | extra-$(CONFIG_PPC_FPU) += fpu.o | 152 | extra-$(CONFIG_PPC_FPU) += fpu.o |
147 | extra-$(CONFIG_ALTIVEC) += vector.o | 153 | extra-$(CONFIG_ALTIVEC) += vector.o |
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 26d091a1a54c..3cb3b02a13dd 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c | |||
@@ -415,7 +415,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) | |||
415 | else | 415 | else |
416 | ppc_save_regs(&fdh->regs); | 416 | ppc_save_regs(&fdh->regs); |
417 | 417 | ||
418 | fdh->cpu_online_mask = *cpu_online_mask; | 418 | fdh->online_mask = *cpu_online_mask; |
419 | 419 | ||
420 | /* Call ibm,os-term rtas call to trigger firmware assisted dump */ | 420 | /* Call ibm,os-term rtas call to trigger firmware assisted dump */ |
421 | rtas_os_term((char *)str); | 421 | rtas_os_term((char *)str); |
@@ -646,7 +646,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) | |||
646 | } | 646 | } |
647 | /* Lower 4 bytes of reg_value contains logical cpu id */ | 647 | /* Lower 4 bytes of reg_value contains logical cpu id */ |
648 | cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; | 648 | cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; |
649 | if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { | 649 | if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) { |
650 | SKIP_TO_NEXT_CPU(reg_entry); | 650 | SKIP_TO_NEXT_CPU(reg_entry); |
651 | continue; | 651 | continue; |
652 | } | 652 | } |
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 6abffb7a8cd9..cbabd143acae 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile | |||
@@ -15,6 +15,7 @@ targets := $(obj-vdso32) vdso32.so vdso32.so.dbg | |||
15 | obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | 15 | obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) |
16 | 16 | ||
17 | GCOV_PROFILE := n | 17 | GCOV_PROFILE := n |
18 | UBSAN_SANITIZE := n | ||
18 | 19 | ||
19 | ccflags-y := -shared -fno-common -fno-builtin | 20 | ccflags-y := -shared -fno-common -fno-builtin |
20 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | 21 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ |
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 8c8f2ae43935..c710802b8fb6 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -8,6 +8,7 @@ targets := $(obj-vdso64) vdso64.so vdso64.so.dbg | |||
8 | obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | 8 | obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) |
9 | 9 | ||
10 | GCOV_PROFILE := n | 10 | GCOV_PROFILE := n |
11 | UBSAN_SANITIZE := n | ||
11 | 12 | ||
12 | ccflags-y := -shared -fno-common -fno-builtin | 13 | ccflags-y := -shared -fno-common -fno-builtin |
13 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | 14 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ |
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 1278788d96e3..436062dbb6e2 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 3 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
4 | 4 | ||
5 | GCOV_PROFILE := n | 5 | GCOV_PROFILE := n |
6 | UBSAN_SANITIZE := n | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) | 8 | ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) |
8 | 9 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dbeeb3a049f2..3be9c832dec1 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -579,7 +579,6 @@ config QDIO | |||
579 | 579 | ||
580 | menuconfig PCI | 580 | menuconfig PCI |
581 | bool "PCI support" | 581 | bool "PCI support" |
582 | select HAVE_DMA_ATTRS | ||
583 | select PCI_MSI | 582 | select PCI_MSI |
584 | select IOMMU_SUPPORT | 583 | select IOMMU_SUPPORT |
585 | help | 584 | help |
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index b3fd54d93dd2..e64bfcb9702f 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h | |||
@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
23 | { | 23 | { |
24 | } | 24 | } |
25 | 25 | ||
26 | #include <asm-generic/dma-mapping-common.h> | ||
27 | |||
28 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 26 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
29 | { | 27 | { |
30 | if (!dev->dma_mask) | 28 | if (!dev->dma_mask) |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6c391a5d3e5c..e13da05505dc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -11,7 +11,6 @@ config SUPERH | |||
11 | select HAVE_GENERIC_DMA_COHERENT | 11 | select HAVE_GENERIC_DMA_COHERENT |
12 | select HAVE_ARCH_TRACEHOOK | 12 | select HAVE_ARCH_TRACEHOOK |
13 | select HAVE_DMA_API_DEBUG | 13 | select HAVE_DMA_API_DEBUG |
14 | select HAVE_DMA_ATTRS | ||
15 | select HAVE_PERF_EVENTS | 14 | select HAVE_PERF_EVENTS |
16 | select HAVE_DEBUG_BUGVERBOSE | 15 | select HAVE_DEBUG_BUGVERBOSE |
17 | select ARCH_HAVE_CUSTOM_GPIO_H | 16 | select ARCH_HAVE_CUSTOM_GPIO_H |
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index a3745a3fe029..e11cf0c8206b 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
11 | 11 | ||
12 | #define DMA_ERROR_CODE 0 | 12 | #define DMA_ERROR_CODE 0 |
13 | 13 | ||
14 | #include <asm-generic/dma-mapping-common.h> | ||
15 | |||
16 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 14 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
17 | enum dma_data_direction dir); | 15 | enum dma_data_direction dir); |
18 | 16 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3203e42190dd..57ffaf285c2f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -26,7 +26,6 @@ config SPARC | |||
26 | select RTC_CLASS | 26 | select RTC_CLASS |
27 | select RTC_DRV_M48T59 | 27 | select RTC_DRV_M48T59 |
28 | select RTC_SYSTOHC | 28 | select RTC_SYSTOHC |
29 | select HAVE_DMA_ATTRS | ||
30 | select HAVE_DMA_API_DEBUG | 29 | select HAVE_DMA_API_DEBUG |
31 | select HAVE_ARCH_JUMP_LABEL if SPARC64 | 30 | select HAVE_ARCH_JUMP_LABEL if SPARC64 |
32 | select GENERIC_IRQ_SHOW | 31 | select GENERIC_IRQ_SHOW |
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index a21da597b0b5..1180ae254154 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h | |||
@@ -37,21 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
37 | return dma_ops; | 37 | return dma_ops; |
38 | } | 38 | } |
39 | 39 | ||
40 | #define HAVE_ARCH_DMA_SET_MASK 1 | ||
41 | |||
42 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
43 | { | ||
44 | #ifdef CONFIG_PCI | ||
45 | if (dev->bus == &pci_bus_type) { | ||
46 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
47 | return -EINVAL; | ||
48 | *dev->dma_mask = mask; | ||
49 | return 0; | ||
50 | } | ||
51 | #endif | ||
52 | return -EINVAL; | ||
53 | } | ||
54 | |||
55 | #include <asm-generic/dma-mapping-common.h> | ||
56 | |||
57 | #endif | 40 | #endif |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 6bfbe8b71e7e..de4a4fff9323 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -5,7 +5,6 @@ config TILE | |||
5 | def_bool y | 5 | def_bool y |
6 | select HAVE_PERF_EVENTS | 6 | select HAVE_PERF_EVENTS |
7 | select USE_PMC if PERF_EVENTS | 7 | select USE_PMC if PERF_EVENTS |
8 | select HAVE_DMA_ATTRS | ||
9 | select HAVE_DMA_API_DEBUG | 8 | select HAVE_DMA_API_DEBUG |
10 | select HAVE_KVM if !TILEGX | 9 | select HAVE_KVM if !TILEGX |
11 | select GENERIC_FIND_FIRST_BIT | 10 | select GENERIC_FIND_FIRST_BIT |
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 96ac6cce4a32..01ceb4a895b0 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -73,37 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | #define HAVE_ARCH_DMA_SET_MASK 1 | 75 | #define HAVE_ARCH_DMA_SET_MASK 1 |
76 | 76 | int dma_set_mask(struct device *dev, u64 mask); | |
77 | #include <asm-generic/dma-mapping-common.h> | ||
78 | |||
79 | static inline int | ||
80 | dma_set_mask(struct device *dev, u64 mask) | ||
81 | { | ||
82 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
83 | |||
84 | /* | ||
85 | * For PCI devices with 64-bit DMA addressing capability, promote | ||
86 | * the dma_ops to hybrid, with the consistent memory DMA space limited | ||
87 | * to 32-bit. For 32-bit capable devices, limit the streaming DMA | ||
88 | * address range to max_direct_dma_addr. | ||
89 | */ | ||
90 | if (dma_ops == gx_pci_dma_map_ops || | ||
91 | dma_ops == gx_hybrid_pci_dma_map_ops || | ||
92 | dma_ops == gx_legacy_pci_dma_map_ops) { | ||
93 | if (mask == DMA_BIT_MASK(64) && | ||
94 | dma_ops == gx_legacy_pci_dma_map_ops) | ||
95 | set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); | ||
96 | else if (mask > dev->archdata.max_direct_dma_addr) | ||
97 | mask = dev->archdata.max_direct_dma_addr; | ||
98 | } | ||
99 | |||
100 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
101 | return -EIO; | ||
102 | |||
103 | *dev->dma_mask = mask; | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | 77 | ||
108 | /* | 78 | /* |
109 | * dma_alloc_noncoherent() is #defined to return coherent memory, | 79 | * dma_alloc_noncoherent() is #defined to return coherent memory, |
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 09b58703ac26..b6bc0547a4f6 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops; | |||
583 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | 583 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); |
584 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); | 584 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); |
585 | 585 | ||
586 | int dma_set_mask(struct device *dev, u64 mask) | ||
587 | { | ||
588 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
589 | |||
590 | /* | ||
591 | * For PCI devices with 64-bit DMA addressing capability, promote | ||
592 | * the dma_ops to hybrid, with the consistent memory DMA space limited | ||
593 | * to 32-bit. For 32-bit capable devices, limit the streaming DMA | ||
594 | * address range to max_direct_dma_addr. | ||
595 | */ | ||
596 | if (dma_ops == gx_pci_dma_map_ops || | ||
597 | dma_ops == gx_hybrid_pci_dma_map_ops || | ||
598 | dma_ops == gx_legacy_pci_dma_map_ops) { | ||
599 | if (mask == DMA_BIT_MASK(64) && | ||
600 | dma_ops == gx_legacy_pci_dma_map_ops) | ||
601 | set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); | ||
602 | else if (mask > dev->archdata.max_direct_dma_addr) | ||
603 | mask = dev->archdata.max_direct_dma_addr; | ||
604 | } | ||
605 | |||
606 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
607 | return -EIO; | ||
608 | |||
609 | *dev->dma_mask = mask; | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | EXPORT_SYMBOL(dma_set_mask); | ||
614 | |||
586 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | 615 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
587 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 616 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
588 | { | 617 | { |
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 877342640b6e..e5602ee9c610 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig | |||
@@ -5,7 +5,6 @@ config UNICORE32 | |||
5 | select ARCH_MIGHT_HAVE_PC_SERIO | 5 | select ARCH_MIGHT_HAVE_PC_SERIO |
6 | select HAVE_MEMBLOCK | 6 | select HAVE_MEMBLOCK |
7 | select HAVE_GENERIC_DMA_COHERENT | 7 | select HAVE_GENERIC_DMA_COHERENT |
8 | select HAVE_DMA_ATTRS | ||
9 | select HAVE_KERNEL_GZIP | 8 | select HAVE_KERNEL_GZIP |
10 | select HAVE_KERNEL_BZIP2 | 9 | select HAVE_KERNEL_BZIP2 |
11 | select GENERIC_ATOMIC64 | 10 | select GENERIC_ATOMIC64 |
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 8140e053ccd3..4749854afd03 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h | |||
@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
28 | return &swiotlb_dma_map_ops; | 28 | return &swiotlb_dma_map_ops; |
29 | } | 29 | } |
30 | 30 | ||
31 | #include <asm-generic/dma-mapping-common.h> | ||
32 | |||
33 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 31 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
34 | { | 32 | { |
35 | if (dev && dev->dma_mask) | 33 | if (dev && dev->dma_mask) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9bd3cc03d51d..330e738ccfc1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -31,6 +31,7 @@ config X86 | |||
31 | select ARCH_HAS_PMEM_API if X86_64 | 31 | select ARCH_HAS_PMEM_API if X86_64 |
32 | select ARCH_HAS_MMIO_FLUSH | 32 | select ARCH_HAS_MMIO_FLUSH |
33 | select ARCH_HAS_SG_CHAIN | 33 | select ARCH_HAS_SG_CHAIN |
34 | select ARCH_HAS_UBSAN_SANITIZE_ALL | ||
34 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 35 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
35 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI | 36 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI |
36 | select ARCH_MIGHT_HAVE_PC_PARPORT | 37 | select ARCH_MIGHT_HAVE_PC_PARPORT |
@@ -99,7 +100,6 @@ config X86 | |||
99 | select HAVE_DEBUG_KMEMLEAK | 100 | select HAVE_DEBUG_KMEMLEAK |
100 | select HAVE_DEBUG_STACKOVERFLOW | 101 | select HAVE_DEBUG_STACKOVERFLOW |
101 | select HAVE_DMA_API_DEBUG | 102 | select HAVE_DMA_API_DEBUG |
102 | select HAVE_DMA_ATTRS | ||
103 | select HAVE_DMA_CONTIGUOUS | 103 | select HAVE_DMA_CONTIGUOUS |
104 | select HAVE_DYNAMIC_FTRACE | 104 | select HAVE_DYNAMIC_FTRACE |
105 | select HAVE_DYNAMIC_FTRACE_WITH_REGS | 105 | select HAVE_DYNAMIC_FTRACE_WITH_REGS |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 2ee62dba0373..bbe1a62efc02 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -60,6 +60,7 @@ clean-files += cpustr.h | |||
60 | KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP | 60 | KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP |
61 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | 61 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ |
62 | GCOV_PROFILE := n | 62 | GCOV_PROFILE := n |
63 | UBSAN_SANITIZE := n | ||
63 | 64 | ||
64 | $(obj)/bzImage: asflags-y := $(SVGA_MODE) | 65 | $(obj)/bzImage: asflags-y := $(SVGA_MODE) |
65 | 66 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 0a291cdfaf77..f9ce75d80101 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -33,6 +33,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) | |||
33 | 33 | ||
34 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | 34 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ |
35 | GCOV_PROFILE := n | 35 | GCOV_PROFILE := n |
36 | UBSAN_SANITIZE :=n | ||
36 | 37 | ||
37 | LDFLAGS := -m elf_$(UTS_MACHINE) | 38 | LDFLAGS := -m elf_$(UTS_MACHINE) |
38 | LDFLAGS_vmlinux := -T | 39 | LDFLAGS_vmlinux := -T |
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 265c0ed68118..c854541d93ff 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | KBUILD_CFLAGS += $(DISABLE_LTO) | 5 | KBUILD_CFLAGS += $(DISABLE_LTO) |
6 | KASAN_SANITIZE := n | 6 | KASAN_SANITIZE := n |
7 | UBSAN_SANITIZE := n | ||
7 | 8 | ||
8 | VDSO64-$(CONFIG_X86_64) := y | 9 | VDSO64-$(CONFIG_X86_64) := y |
9 | VDSOX32-$(CONFIG_X86_X32_ABI) := y | 10 | VDSOX32-$(CONFIG_X86_X32_ABI) := y |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 953b7263f844..3a27b93e6261 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); | |||
46 | #define HAVE_ARCH_DMA_SUPPORTED 1 | 46 | #define HAVE_ARCH_DMA_SUPPORTED 1 |
47 | extern int dma_supported(struct device *hwdev, u64 mask); | 47 | extern int dma_supported(struct device *hwdev, u64 mask); |
48 | 48 | ||
49 | #include <asm-generic/dma-mapping-common.h> | ||
50 | |||
51 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 49 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
52 | dma_addr_t *dma_addr, gfp_t flag, | 50 | dma_addr_t *dma_addr, gfp_t flag, |
53 | struct dma_attrs *attrs); | 51 | struct dma_attrs *attrs); |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 819ab3f9c9c7..ba7fbba9831b 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -385,6 +385,7 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) | |||
385 | return image->fops->cleanup(image->image_loader_data); | 385 | return image->fops->cleanup(image->image_loader_data); |
386 | } | 386 | } |
387 | 387 | ||
388 | #ifdef CONFIG_KEXEC_VERIFY_SIG | ||
388 | int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel, | 389 | int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel, |
389 | unsigned long kernel_len) | 390 | unsigned long kernel_len) |
390 | { | 391 | { |
@@ -395,6 +396,7 @@ int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel, | |||
395 | 396 | ||
396 | return image->fops->verify_sig(kernel, kernel_len); | 397 | return image->fops->verify_sig(kernel, kernel_len); |
397 | } | 398 | } |
399 | #endif | ||
398 | 400 | ||
399 | /* | 401 | /* |
400 | * Apply purgatory relocations. | 402 | * Apply purgatory relocations. |
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 2730d775ef9a..3e75fcf6b836 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile | |||
@@ -70,3 +70,4 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \ | |||
70 | -I$(srctree)/arch/x86/boot | 70 | -I$(srctree)/arch/x86/boot |
71 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | 71 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ |
72 | GCOV_PROFILE := n | 72 | GCOV_PROFILE := n |
73 | UBSAN_SANITIZE := n | ||
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 82044f732323..e9df1567d778 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -15,7 +15,6 @@ config XTENSA | |||
15 | select GENERIC_PCI_IOMAP | 15 | select GENERIC_PCI_IOMAP |
16 | select GENERIC_SCHED_CLOCK | 16 | select GENERIC_SCHED_CLOCK |
17 | select HAVE_DMA_API_DEBUG | 17 | select HAVE_DMA_API_DEBUG |
18 | select HAVE_DMA_ATTRS | ||
19 | select HAVE_FUNCTION_TRACER | 18 | select HAVE_FUNCTION_TRACER |
20 | select HAVE_FUTEX_CMPXCHG if !MMU | 19 | select HAVE_FUTEX_CMPXCHG if !MMU |
21 | select HAVE_IRQ_TIME_ACCOUNTING | 20 | select HAVE_IRQ_TIME_ACCOUNTING |
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 66c9ba261e30..3fc1170a6488 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h | |||
@@ -13,8 +13,6 @@ | |||
13 | #include <asm/cache.h> | 13 | #include <asm/cache.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | 15 | ||
16 | #include <asm-generic/dma-coherent.h> | ||
17 | |||
18 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
19 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
20 | 18 | ||
@@ -30,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
30 | return &xtensa_dma_map_ops; | 28 | return &xtensa_dma_map_ops; |
31 | } | 29 | } |
32 | 30 | ||
33 | #include <asm-generic/dma-mapping-common.h> | ||
34 | |||
35 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 31 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
36 | enum dma_data_direction direction); | 32 | enum dma_data_direction direction); |
37 | 33 | ||
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index d030594ed22b..9e079d49e7f2 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h | |||
@@ -86,7 +86,6 @@ | |||
86 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ | 86 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ |
87 | #define MADV_WILLNEED 3 /* will need these pages */ | 87 | #define MADV_WILLNEED 3 /* will need these pages */ |
88 | #define MADV_DONTNEED 4 /* don't need these pages */ | 88 | #define MADV_DONTNEED 4 /* don't need these pages */ |
89 | #define MADV_FREE 5 /* free pages only if memory pressure */ | ||
90 | 89 | ||
91 | /* common parameters: try to keep these consistent across architectures */ | 90 | /* common parameters: try to keep these consistent across architectures */ |
92 | #define MADV_FREE 8 /* free pages only if memory pressure */ | 91 | #define MADV_FREE 8 /* free pages only if memory pressure */ |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 91bbb1959d8d..691eeea2f19a 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -200,7 +200,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = { | |||
200 | 200 | ||
201 | struct cpu_attr { | 201 | struct cpu_attr { |
202 | struct device_attribute attr; | 202 | struct device_attribute attr; |
203 | const struct cpumask *const * const map; | 203 | const struct cpumask *const map; |
204 | }; | 204 | }; |
205 | 205 | ||
206 | static ssize_t show_cpus_attr(struct device *dev, | 206 | static ssize_t show_cpus_attr(struct device *dev, |
@@ -209,7 +209,7 @@ static ssize_t show_cpus_attr(struct device *dev, | |||
209 | { | 209 | { |
210 | struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); | 210 | struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); |
211 | 211 | ||
212 | return cpumap_print_to_pagebuf(true, buf, *ca->map); | 212 | return cpumap_print_to_pagebuf(true, buf, ca->map); |
213 | } | 213 | } |
214 | 214 | ||
215 | #define _CPU_ATTR(name, map) \ | 215 | #define _CPU_ATTR(name, map) \ |
@@ -217,9 +217,9 @@ static ssize_t show_cpus_attr(struct device *dev, | |||
217 | 217 | ||
218 | /* Keep in sync with cpu_subsys_attrs */ | 218 | /* Keep in sync with cpu_subsys_attrs */ |
219 | static struct cpu_attr cpu_attrs[] = { | 219 | static struct cpu_attr cpu_attrs[] = { |
220 | _CPU_ATTR(online, &cpu_online_mask), | 220 | _CPU_ATTR(online, &__cpu_online_mask), |
221 | _CPU_ATTR(possible, &cpu_possible_mask), | 221 | _CPU_ATTR(possible, &__cpu_possible_mask), |
222 | _CPU_ATTR(present, &cpu_present_mask), | 222 | _CPU_ATTR(present, &__cpu_present_mask), |
223 | }; | 223 | }; |
224 | 224 | ||
225 | /* | 225 | /* |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index d95c5971c225..d799662f19eb 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <asm-generic/dma-coherent.h> | ||
16 | 15 | ||
17 | /* | 16 | /* |
18 | * Managed DMA API | 17 | * Managed DMA API |
@@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |||
167 | } | 166 | } |
168 | EXPORT_SYMBOL(dmam_free_noncoherent); | 167 | EXPORT_SYMBOL(dmam_free_noncoherent); |
169 | 168 | ||
170 | #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | 169 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
171 | 170 | ||
172 | static void dmam_coherent_decl_release(struct device *dev, void *res) | 171 | static void dmam_coherent_decl_release(struct device *dev, void *res) |
173 | { | 172 | { |
@@ -247,7 +246,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
247 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 246 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
248 | { | 247 | { |
249 | int ret = -ENXIO; | 248 | int ret = -ENXIO; |
250 | #ifdef CONFIG_MMU | 249 | #if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) |
251 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 250 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
252 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 251 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
253 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); | 252 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); |
@@ -264,7 +263,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
264 | user_count << PAGE_SHIFT, | 263 | user_count << PAGE_SHIFT, |
265 | vma->vm_page_prot); | 264 | vma->vm_page_prot); |
266 | } | 265 | } |
267 | #endif /* CONFIG_MMU */ | 266 | #endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ |
268 | 267 | ||
269 | return ret; | 268 | return ret; |
270 | } | 269 | } |
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index e41594510b97..0c2f0a61b0ea 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c | |||
@@ -56,9 +56,7 @@ static u32 find_nvram_size(void __iomem *end) | |||
56 | static int nvram_find_and_copy(void __iomem *iobase, u32 lim) | 56 | static int nvram_find_and_copy(void __iomem *iobase, u32 lim) |
57 | { | 57 | { |
58 | struct nvram_header __iomem *header; | 58 | struct nvram_header __iomem *header; |
59 | int i; | ||
60 | u32 off; | 59 | u32 off; |
61 | u32 *src, *dst; | ||
62 | u32 size; | 60 | u32 size; |
63 | 61 | ||
64 | if (nvram_len) { | 62 | if (nvram_len) { |
@@ -95,10 +93,7 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim) | |||
95 | return -ENXIO; | 93 | return -ENXIO; |
96 | 94 | ||
97 | found: | 95 | found: |
98 | src = (u32 *)header; | 96 | __ioread32_copy(nvram_buf, header, sizeof(*header) / 4); |
99 | dst = (u32 *)nvram_buf; | ||
100 | for (i = 0; i < sizeof(struct nvram_header); i += 4) | ||
101 | *dst++ = __raw_readl(src++); | ||
102 | header = (struct nvram_header *)nvram_buf; | 97 | header = (struct nvram_header *)nvram_buf; |
103 | nvram_len = header->len; | 98 | nvram_len = header->len; |
104 | if (nvram_len > size) { | 99 | if (nvram_len > size) { |
@@ -111,8 +106,8 @@ found: | |||
111 | nvram_len = NVRAM_SPACE - 1; | 106 | nvram_len = NVRAM_SPACE - 1; |
112 | } | 107 | } |
113 | /* proceed reading data after header */ | 108 | /* proceed reading data after header */ |
114 | for (; i < nvram_len; i += 4) | 109 | __ioread32_copy(nvram_buf + sizeof(*header), header + 1, |
115 | *dst++ = readl(src++); | 110 | DIV_ROUND_UP(nvram_len, 4)); |
116 | nvram_buf[NVRAM_SPACE - 1] = '\0'; | 111 | nvram_buf[NVRAM_SPACE - 1] = '\0'; |
117 | 112 | ||
118 | return 0; | 113 | return 0; |
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 9c12e18031d5..aaf9c0bab42e 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile | |||
@@ -22,6 +22,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ | |||
22 | 22 | ||
23 | GCOV_PROFILE := n | 23 | GCOV_PROFILE := n |
24 | KASAN_SANITIZE := n | 24 | KASAN_SANITIZE := n |
25 | UBSAN_SANITIZE := n | ||
25 | 26 | ||
26 | lib-y := efi-stub-helper.o | 27 | lib-y := efi-stub-helper.o |
27 | 28 | ||
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 59babd5a5396..8ae7ab68cb97 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -82,13 +82,13 @@ config DRM_TTM | |||
82 | 82 | ||
83 | config DRM_GEM_CMA_HELPER | 83 | config DRM_GEM_CMA_HELPER |
84 | bool | 84 | bool |
85 | depends on DRM && HAVE_DMA_ATTRS | 85 | depends on DRM |
86 | help | 86 | help |
87 | Choose this if you need the GEM CMA helper functions | 87 | Choose this if you need the GEM CMA helper functions |
88 | 88 | ||
89 | config DRM_KMS_CMA_HELPER | 89 | config DRM_KMS_CMA_HELPER |
90 | bool | 90 | bool |
91 | depends on DRM && HAVE_DMA_ATTRS | 91 | depends on DRM |
92 | select DRM_GEM_CMA_HELPER | 92 | select DRM_GEM_CMA_HELPER |
93 | select DRM_KMS_FB_HELPER | 93 | select DRM_KMS_FB_HELPER |
94 | select FB_SYS_FILLRECT | 94 | select FB_SYS_FILLRECT |
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 35ca4f007839..a1844b50546c 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig | |||
@@ -5,7 +5,7 @@ config DRM_IMX | |||
5 | select VIDEOMODE_HELPERS | 5 | select VIDEOMODE_HELPERS |
6 | select DRM_GEM_CMA_HELPER | 6 | select DRM_GEM_CMA_HELPER |
7 | select DRM_KMS_CMA_HELPER | 7 | select DRM_KMS_CMA_HELPER |
8 | depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS | 8 | depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) |
9 | depends on IMX_IPUV3_CORE | 9 | depends on IMX_IPUV3_CORE |
10 | help | 10 | help |
11 | enable i.MX graphics support | 11 | enable i.MX graphics support |
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index d4e0a39568f6..96dcd4a78951 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_RCAR_DU | 1 | config DRM_RCAR_DU |
2 | tristate "DRM Support for R-Car Display Unit" | 2 | tristate "DRM Support for R-Car Display Unit" |
3 | depends on DRM && ARM && HAVE_DMA_ATTRS && OF | 3 | depends on DRM && ARM && OF |
4 | depends on ARCH_SHMOBILE || COMPILE_TEST | 4 | depends on ARCH_SHMOBILE || COMPILE_TEST |
5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
6 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index b9202aa6f8ab..8d17d00ddb4b 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_SHMOBILE | 1 | config DRM_SHMOBILE |
2 | tristate "DRM Support for SH Mobile" | 2 | tristate "DRM Support for SH Mobile" |
3 | depends on DRM && ARM && HAVE_DMA_ATTRS | 3 | depends on DRM && ARM |
4 | depends on ARCH_SHMOBILE || COMPILE_TEST | 4 | depends on ARCH_SHMOBILE || COMPILE_TEST |
5 | depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM | 5 | depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM |
6 | select BACKLIGHT_CLASS_DEVICE | 6 | select BACKLIGHT_CLASS_DEVICE |
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 10c1b1926e6f..5ad43a1bb260 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_STI | 1 | config DRM_STI |
2 | tristate "DRM Support for STMicroelectronics SoC stiH41x Series" | 2 | tristate "DRM Support for STMicroelectronics SoC stiH41x Series" |
3 | depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS | 3 | depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) |
4 | select RESET_CONTROLLER | 4 | select RESET_CONTROLLER |
5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
6 | select DRM_GEM_CMA_HELPER | 6 | select DRM_GEM_CMA_HELPER |
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 78beafb0742c..f60a1ec84fa4 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_TILCDC | 1 | config DRM_TILCDC |
2 | tristate "DRM Support for TI LCDC Display Controller" | 2 | tristate "DRM Support for TI LCDC Display Controller" |
3 | depends on DRM && OF && ARM && HAVE_DMA_ATTRS | 3 | depends on DRM && OF && ARM |
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_FB_HELPER | 5 | select DRM_KMS_FB_HELPER |
6 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 2d7d115ddf3f..584810474e5b 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | config DRM_VC4 | 1 | config DRM_VC4 |
2 | tristate "Broadcom VC4 Graphics" | 2 | tristate "Broadcom VC4 Graphics" |
3 | depends on ARCH_BCM2835 || COMPILE_TEST | 3 | depends on ARCH_BCM2835 || COMPILE_TEST |
4 | depends on DRM && HAVE_DMA_ATTRS | 4 | depends on DRM |
5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
6 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
7 | select DRM_GEM_CMA_HELPER | 7 | select DRM_GEM_CMA_HELPER |
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c index 311f9fe5aa34..8d24fb159cc9 100644 --- a/drivers/iio/industrialio-sw-trigger.c +++ b/drivers/iio/industrialio-sw-trigger.c | |||
@@ -167,9 +167,7 @@ static int __init iio_sw_trigger_init(void) | |||
167 | configfs_register_default_group(&iio_configfs_subsys.su_group, | 167 | configfs_register_default_group(&iio_configfs_subsys.su_group, |
168 | "triggers", | 168 | "triggers", |
169 | &iio_triggers_group_type); | 169 | &iio_triggers_group_type); |
170 | if (IS_ERR(iio_triggers_group)) | 170 | return PTR_ERR_OR_ZERO(iio_triggers_group); |
171 | return PTR_ERR(iio_triggers_group); | ||
172 | return 0; | ||
173 | } | 171 | } |
174 | module_init(iio_sw_trigger_init); | 172 | module_init(iio_sw_trigger_init); |
175 | 173 | ||
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 0c53805dff0e..526359447ff9 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -216,7 +216,6 @@ config VIDEO_STI_BDISP | |||
216 | tristate "STMicroelectronics BDISP 2D blitter driver" | 216 | tristate "STMicroelectronics BDISP 2D blitter driver" |
217 | depends on VIDEO_DEV && VIDEO_V4L2 | 217 | depends on VIDEO_DEV && VIDEO_V4L2 |
218 | depends on ARCH_STI || COMPILE_TEST | 218 | depends on ARCH_STI || COMPILE_TEST |
219 | depends on HAVE_DMA_ATTRS | ||
220 | select VIDEOBUF2_DMA_CONTIG | 219 | select VIDEOBUF2_DMA_CONTIG |
221 | select V4L2_MEM2MEM_DEV | 220 | select V4L2_MEM2MEM_DEV |
222 | help | 221 | help |
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 24f2f8473dee..84abf9d3c24e 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c | |||
@@ -1909,7 +1909,7 @@ static void msb_io_work(struct work_struct *work) | |||
1909 | lba = blk_rq_pos(msb->req); | 1909 | lba = blk_rq_pos(msb->req); |
1910 | 1910 | ||
1911 | sector_div(lba, msb->page_size / 512); | 1911 | sector_div(lba, msb->page_size / 512); |
1912 | page = do_div(lba, msb->pages_in_block); | 1912 | page = sector_div(lba, msb->pages_in_block); |
1913 | 1913 | ||
1914 | if (rq_data_dir(msb->req) == READ) | 1914 | if (rq_data_dir(msb->req) == READ) |
1915 | error = msb_do_read_request(msb, lba, page, sg, | 1915 | error = msb_do_read_request(msb, lba, page, sg, |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 22892c701c63..054fc10cb3b6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -95,6 +95,7 @@ config DUMMY_IRQ | |||
95 | config IBM_ASM | 95 | config IBM_ASM |
96 | tristate "Device driver for IBM RSA service processor" | 96 | tristate "Device driver for IBM RSA service processor" |
97 | depends on X86 && PCI && INPUT | 97 | depends on X86 && PCI && INPUT |
98 | depends on SERIAL_8250 || SERIAL_8250=n | ||
98 | ---help--- | 99 | ---help--- |
99 | This option enables device driver support for in-band access to the | 100 | This option enables device driver support for in-band access to the |
100 | IBM RSA (Condor) service processor in eServer xSeries systems. | 101 | IBM RSA (Condor) service processor in eServer xSeries systems. |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 8e11fb2831cd..e24b05996a1b 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -786,18 +786,27 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
786 | return CCIO_IOVA(iovp, offset); | 786 | return CCIO_IOVA(iovp, offset); |
787 | } | 787 | } |
788 | 788 | ||
789 | |||
790 | static dma_addr_t | ||
791 | ccio_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
792 | size_t size, enum dma_data_direction direction, | ||
793 | struct dma_attrs *attrs) | ||
794 | { | ||
795 | return ccio_map_single(dev, page_address(page) + offset, size, | ||
796 | direction); | ||
797 | } | ||
798 | |||
799 | |||
789 | /** | 800 | /** |
790 | * ccio_unmap_single - Unmap an address range from the IOMMU. | 801 | * ccio_unmap_page - Unmap an address range from the IOMMU. |
791 | * @dev: The PCI device. | 802 | * @dev: The PCI device. |
792 | * @addr: The start address of the DMA region. | 803 | * @addr: The start address of the DMA region. |
793 | * @size: The length of the DMA region. | 804 | * @size: The length of the DMA region. |
794 | * @direction: The direction of the DMA transaction (to/from device). | 805 | * @direction: The direction of the DMA transaction (to/from device). |
795 | * | ||
796 | * This function implements the pci_unmap_single function. | ||
797 | */ | 806 | */ |
798 | static void | 807 | static void |
799 | ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | 808 | ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
800 | enum dma_data_direction direction) | 809 | enum dma_data_direction direction, struct dma_attrs *attrs) |
801 | { | 810 | { |
802 | struct ioc *ioc; | 811 | struct ioc *ioc; |
803 | unsigned long flags; | 812 | unsigned long flags; |
@@ -826,7 +835,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | |||
826 | } | 835 | } |
827 | 836 | ||
828 | /** | 837 | /** |
829 | * ccio_alloc_consistent - Allocate a consistent DMA mapping. | 838 | * ccio_alloc - Allocate a consistent DMA mapping. |
830 | * @dev: The PCI device. | 839 | * @dev: The PCI device. |
831 | * @size: The length of the DMA region. | 840 | * @size: The length of the DMA region. |
832 | * @dma_handle: The DMA address handed back to the device (not the cpu). | 841 | * @dma_handle: The DMA address handed back to the device (not the cpu). |
@@ -834,7 +843,8 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | |||
834 | * This function implements the pci_alloc_consistent function. | 843 | * This function implements the pci_alloc_consistent function. |
835 | */ | 844 | */ |
836 | static void * | 845 | static void * |
837 | ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) | 846 | ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, |
847 | struct dma_attrs *attrs) | ||
838 | { | 848 | { |
839 | void *ret; | 849 | void *ret; |
840 | #if 0 | 850 | #if 0 |
@@ -858,7 +868,7 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g | |||
858 | } | 868 | } |
859 | 869 | ||
860 | /** | 870 | /** |
861 | * ccio_free_consistent - Free a consistent DMA mapping. | 871 | * ccio_free - Free a consistent DMA mapping. |
862 | * @dev: The PCI device. | 872 | * @dev: The PCI device. |
863 | * @size: The length of the DMA region. | 873 | * @size: The length of the DMA region. |
864 | * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. | 874 | * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. |
@@ -867,10 +877,10 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g | |||
867 | * This function implements the pci_free_consistent function. | 877 | * This function implements the pci_free_consistent function. |
868 | */ | 878 | */ |
869 | static void | 879 | static void |
870 | ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, | 880 | ccio_free(struct device *dev, size_t size, void *cpu_addr, |
871 | dma_addr_t dma_handle) | 881 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
872 | { | 882 | { |
873 | ccio_unmap_single(dev, dma_handle, size, 0); | 883 | ccio_unmap_page(dev, dma_handle, size, 0, NULL); |
874 | free_pages((unsigned long)cpu_addr, get_order(size)); | 884 | free_pages((unsigned long)cpu_addr, get_order(size)); |
875 | } | 885 | } |
876 | 886 | ||
@@ -897,7 +907,7 @@ ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, | |||
897 | */ | 907 | */ |
898 | static int | 908 | static int |
899 | ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 909 | ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
900 | enum dma_data_direction direction) | 910 | enum dma_data_direction direction, struct dma_attrs *attrs) |
901 | { | 911 | { |
902 | struct ioc *ioc; | 912 | struct ioc *ioc; |
903 | int coalesced, filled = 0; | 913 | int coalesced, filled = 0; |
@@ -974,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
974 | */ | 984 | */ |
975 | static void | 985 | static void |
976 | ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | 986 | ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
977 | enum dma_data_direction direction) | 987 | enum dma_data_direction direction, struct dma_attrs *attrs) |
978 | { | 988 | { |
979 | struct ioc *ioc; | 989 | struct ioc *ioc; |
980 | 990 | ||
@@ -993,27 +1003,22 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
993 | #ifdef CCIO_COLLECT_STATS | 1003 | #ifdef CCIO_COLLECT_STATS |
994 | ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; | 1004 | ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; |
995 | #endif | 1005 | #endif |
996 | ccio_unmap_single(dev, sg_dma_address(sglist), | 1006 | ccio_unmap_page(dev, sg_dma_address(sglist), |
997 | sg_dma_len(sglist), direction); | 1007 | sg_dma_len(sglist), direction, NULL); |
998 | ++sglist; | 1008 | ++sglist; |
999 | } | 1009 | } |
1000 | 1010 | ||
1001 | DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); | 1011 | DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); |
1002 | } | 1012 | } |
1003 | 1013 | ||
1004 | static struct hppa_dma_ops ccio_ops = { | 1014 | static struct dma_map_ops ccio_ops = { |
1005 | .dma_supported = ccio_dma_supported, | 1015 | .dma_supported = ccio_dma_supported, |
1006 | .alloc_consistent = ccio_alloc_consistent, | 1016 | .alloc = ccio_alloc, |
1007 | .alloc_noncoherent = ccio_alloc_consistent, | 1017 | .free = ccio_free, |
1008 | .free_consistent = ccio_free_consistent, | 1018 | .map_page = ccio_map_page, |
1009 | .map_single = ccio_map_single, | 1019 | .unmap_page = ccio_unmap_page, |
1010 | .unmap_single = ccio_unmap_single, | ||
1011 | .map_sg = ccio_map_sg, | 1020 | .map_sg = ccio_map_sg, |
1012 | .unmap_sg = ccio_unmap_sg, | 1021 | .unmap_sg = ccio_unmap_sg, |
1013 | .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */ | ||
1014 | .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */ | ||
1015 | .dma_sync_sg_for_cpu = NULL, /* ditto */ | ||
1016 | .dma_sync_sg_for_device = NULL, /* ditto */ | ||
1017 | }; | 1022 | }; |
1018 | 1023 | ||
1019 | #ifdef CONFIG_PROC_FS | 1024 | #ifdef CONFIG_PROC_FS |
@@ -1062,7 +1067,7 @@ static int ccio_proc_info(struct seq_file *m, void *p) | |||
1062 | ioc->msingle_calls, ioc->msingle_pages, | 1067 | ioc->msingle_calls, ioc->msingle_pages, |
1063 | (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); | 1068 | (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); |
1064 | 1069 | ||
1065 | /* KLUGE - unmap_sg calls unmap_single for each mapped page */ | 1070 | /* KLUGE - unmap_sg calls unmap_page for each mapped page */ |
1066 | min = ioc->usingle_calls - ioc->usg_calls; | 1071 | min = ioc->usingle_calls - ioc->usg_calls; |
1067 | max = ioc->usingle_pages - ioc->usg_pages; | 1072 | max = ioc->usingle_pages - ioc->usg_pages; |
1068 | seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", | 1073 | seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 225049b492e5..42ec4600b7e4 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -780,8 +780,18 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
780 | } | 780 | } |
781 | 781 | ||
782 | 782 | ||
783 | static dma_addr_t | ||
784 | sba_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
785 | size_t size, enum dma_data_direction direction, | ||
786 | struct dma_attrs *attrs) | ||
787 | { | ||
788 | return sba_map_single(dev, page_address(page) + offset, size, | ||
789 | direction); | ||
790 | } | ||
791 | |||
792 | |||
783 | /** | 793 | /** |
784 | * sba_unmap_single - unmap one IOVA and free resources | 794 | * sba_unmap_page - unmap one IOVA and free resources |
785 | * @dev: instance of PCI owned by the driver that's asking. | 795 | * @dev: instance of PCI owned by the driver that's asking. |
786 | * @iova: IOVA of driver buffer previously mapped. | 796 | * @iova: IOVA of driver buffer previously mapped. |
787 | * @size: number of bytes mapped in driver buffer. | 797 | * @size: number of bytes mapped in driver buffer. |
@@ -790,8 +800,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
790 | * See Documentation/DMA-API-HOWTO.txt | 800 | * See Documentation/DMA-API-HOWTO.txt |
791 | */ | 801 | */ |
792 | static void | 802 | static void |
793 | sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | 803 | sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
794 | enum dma_data_direction direction) | 804 | enum dma_data_direction direction, struct dma_attrs *attrs) |
795 | { | 805 | { |
796 | struct ioc *ioc; | 806 | struct ioc *ioc; |
797 | #if DELAYED_RESOURCE_CNT > 0 | 807 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -858,15 +868,15 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | |||
858 | 868 | ||
859 | 869 | ||
860 | /** | 870 | /** |
861 | * sba_alloc_consistent - allocate/map shared mem for DMA | 871 | * sba_alloc - allocate/map shared mem for DMA |
862 | * @hwdev: instance of PCI owned by the driver that's asking. | 872 | * @hwdev: instance of PCI owned by the driver that's asking. |
863 | * @size: number of bytes mapped in driver buffer. | 873 | * @size: number of bytes mapped in driver buffer. |
864 | * @dma_handle: IOVA of new buffer. | 874 | * @dma_handle: IOVA of new buffer. |
865 | * | 875 | * |
866 | * See Documentation/DMA-API-HOWTO.txt | 876 | * See Documentation/DMA-API-HOWTO.txt |
867 | */ | 877 | */ |
868 | static void *sba_alloc_consistent(struct device *hwdev, size_t size, | 878 | static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
869 | dma_addr_t *dma_handle, gfp_t gfp) | 879 | gfp_t gfp, struct dma_attrs *attrs) |
870 | { | 880 | { |
871 | void *ret; | 881 | void *ret; |
872 | 882 | ||
@@ -888,7 +898,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size, | |||
888 | 898 | ||
889 | 899 | ||
890 | /** | 900 | /** |
891 | * sba_free_consistent - free/unmap shared mem for DMA | 901 | * sba_free - free/unmap shared mem for DMA |
892 | * @hwdev: instance of PCI owned by the driver that's asking. | 902 | * @hwdev: instance of PCI owned by the driver that's asking. |
893 | * @size: number of bytes mapped in driver buffer. | 903 | * @size: number of bytes mapped in driver buffer. |
894 | * @vaddr: virtual address IOVA of "consistent" buffer. | 904 | * @vaddr: virtual address IOVA of "consistent" buffer. |
@@ -897,10 +907,10 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size, | |||
897 | * See Documentation/DMA-API-HOWTO.txt | 907 | * See Documentation/DMA-API-HOWTO.txt |
898 | */ | 908 | */ |
899 | static void | 909 | static void |
900 | sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, | 910 | sba_free(struct device *hwdev, size_t size, void *vaddr, |
901 | dma_addr_t dma_handle) | 911 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
902 | { | 912 | { |
903 | sba_unmap_single(hwdev, dma_handle, size, 0); | 913 | sba_unmap_page(hwdev, dma_handle, size, 0, NULL); |
904 | free_pages((unsigned long) vaddr, get_order(size)); | 914 | free_pages((unsigned long) vaddr, get_order(size)); |
905 | } | 915 | } |
906 | 916 | ||
@@ -933,7 +943,7 @@ int dump_run_sg = 0; | |||
933 | */ | 943 | */ |
934 | static int | 944 | static int |
935 | sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 945 | sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
936 | enum dma_data_direction direction) | 946 | enum dma_data_direction direction, struct dma_attrs *attrs) |
937 | { | 947 | { |
938 | struct ioc *ioc; | 948 | struct ioc *ioc; |
939 | int coalesced, filled = 0; | 949 | int coalesced, filled = 0; |
@@ -1016,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1016 | */ | 1026 | */ |
1017 | static void | 1027 | static void |
1018 | sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | 1028 | sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
1019 | enum dma_data_direction direction) | 1029 | enum dma_data_direction direction, struct dma_attrs *attrs) |
1020 | { | 1030 | { |
1021 | struct ioc *ioc; | 1031 | struct ioc *ioc; |
1022 | #ifdef ASSERT_PDIR_SANITY | 1032 | #ifdef ASSERT_PDIR_SANITY |
@@ -1040,7 +1050,8 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1040 | 1050 | ||
1041 | while (sg_dma_len(sglist) && nents--) { | 1051 | while (sg_dma_len(sglist) && nents--) { |
1042 | 1052 | ||
1043 | sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); | 1053 | sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), |
1054 | direction, NULL); | ||
1044 | #ifdef SBA_COLLECT_STATS | 1055 | #ifdef SBA_COLLECT_STATS |
1045 | ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; | 1056 | ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; |
1046 | ioc->usingle_calls--; /* kluge since call is unmap_sg() */ | 1057 | ioc->usingle_calls--; /* kluge since call is unmap_sg() */ |
@@ -1058,19 +1069,14 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1058 | 1069 | ||
1059 | } | 1070 | } |
1060 | 1071 | ||
1061 | static struct hppa_dma_ops sba_ops = { | 1072 | static struct dma_map_ops sba_ops = { |
1062 | .dma_supported = sba_dma_supported, | 1073 | .dma_supported = sba_dma_supported, |
1063 | .alloc_consistent = sba_alloc_consistent, | 1074 | .alloc = sba_alloc, |
1064 | .alloc_noncoherent = sba_alloc_consistent, | 1075 | .free = sba_free, |
1065 | .free_consistent = sba_free_consistent, | 1076 | .map_page = sba_map_page, |
1066 | .map_single = sba_map_single, | 1077 | .unmap_page = sba_unmap_page, |
1067 | .unmap_single = sba_unmap_single, | ||
1068 | .map_sg = sba_map_sg, | 1078 | .map_sg = sba_map_sg, |
1069 | .unmap_sg = sba_unmap_sg, | 1079 | .unmap_sg = sba_unmap_sg, |
1070 | .dma_sync_single_for_cpu = NULL, | ||
1071 | .dma_sync_single_for_device = NULL, | ||
1072 | .dma_sync_sg_for_cpu = NULL, | ||
1073 | .dma_sync_sg_for_device = NULL, | ||
1074 | }; | 1080 | }; |
1075 | 1081 | ||
1076 | 1082 | ||
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index cdb005c0094d..eda41563d06d 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c | |||
@@ -125,8 +125,7 @@ rio_read_config(struct file *filp, struct kobject *kobj, | |||
125 | struct bin_attribute *bin_attr, | 125 | struct bin_attribute *bin_attr, |
126 | char *buf, loff_t off, size_t count) | 126 | char *buf, loff_t off, size_t count) |
127 | { | 127 | { |
128 | struct rio_dev *dev = | 128 | struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); |
129 | to_rio_dev(container_of(kobj, struct device, kobj)); | ||
130 | unsigned int size = 0x100; | 129 | unsigned int size = 0x100; |
131 | loff_t init_off = off; | 130 | loff_t init_off = off; |
132 | u8 *data = (u8 *) buf; | 131 | u8 *data = (u8 *) buf; |
@@ -197,8 +196,7 @@ rio_write_config(struct file *filp, struct kobject *kobj, | |||
197 | struct bin_attribute *bin_attr, | 196 | struct bin_attribute *bin_attr, |
198 | char *buf, loff_t off, size_t count) | 197 | char *buf, loff_t off, size_t count) |
199 | { | 198 | { |
200 | struct rio_dev *dev = | 199 | struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); |
201 | to_rio_dev(container_of(kobj, struct device, kobj)); | ||
202 | unsigned int size = count; | 200 | unsigned int size = count; |
203 | loff_t init_off = off; | 201 | loff_t init_off = off; |
204 | u8 *data = (u8 *) buf; | 202 | u8 *data = (u8 *) buf; |
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index 86b598cff91a..498fd0581a45 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c | |||
@@ -434,20 +434,15 @@ static void smd_copy_to_fifo(void __iomem *dst, | |||
434 | /* | 434 | /* |
435 | * Copy count bytes of data using 32bit accesses, if that is required. | 435 | * Copy count bytes of data using 32bit accesses, if that is required. |
436 | */ | 436 | */ |
437 | static void smd_copy_from_fifo(void *_dst, | 437 | static void smd_copy_from_fifo(void *dst, |
438 | const void __iomem *_src, | 438 | const void __iomem *src, |
439 | size_t count, | 439 | size_t count, |
440 | bool word_aligned) | 440 | bool word_aligned) |
441 | { | 441 | { |
442 | u32 *dst = (u32 *)_dst; | ||
443 | u32 *src = (u32 *)_src; | ||
444 | |||
445 | if (word_aligned) { | 442 | if (word_aligned) { |
446 | count /= sizeof(u32); | 443 | __ioread32_copy(dst, src, count / sizeof(u32)); |
447 | while (count--) | ||
448 | *dst++ = __raw_readl(src++); | ||
449 | } else { | 444 | } else { |
450 | memcpy_fromio(_dst, _src, count); | 445 | memcpy_fromio(dst, src, count); |
451 | } | 446 | } |
452 | } | 447 | } |
453 | 448 | ||
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h index ea4aba56f29d..fadf408bdd46 100644 --- a/fs/adfs/adfs.h +++ b/fs/adfs/adfs.h | |||
@@ -44,24 +44,24 @@ struct adfs_dir_ops; | |||
44 | */ | 44 | */ |
45 | struct adfs_sb_info { | 45 | struct adfs_sb_info { |
46 | union { struct { | 46 | union { struct { |
47 | struct adfs_discmap *s_map; /* bh list containing map */ | 47 | struct adfs_discmap *s_map; /* bh list containing map */ |
48 | const struct adfs_dir_ops *s_dir; /* directory operations */ | 48 | const struct adfs_dir_ops *s_dir; /* directory operations */ |
49 | }; | 49 | }; |
50 | struct rcu_head rcu; /* used only at shutdown time */ | 50 | struct rcu_head rcu; /* used only at shutdown time */ |
51 | }; | 51 | }; |
52 | kuid_t s_uid; /* owner uid */ | 52 | kuid_t s_uid; /* owner uid */ |
53 | kgid_t s_gid; /* owner gid */ | 53 | kgid_t s_gid; /* owner gid */ |
54 | umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ | 54 | umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ |
55 | umode_t s_other_mask; /* ADFS other perm -> unix perm */ | 55 | umode_t s_other_mask; /* ADFS other perm -> unix perm */ |
56 | int s_ftsuffix; /* ,xyz hex filetype suffix option */ | 56 | int s_ftsuffix; /* ,xyz hex filetype suffix option */ |
57 | 57 | ||
58 | __u32 s_ids_per_zone; /* max. no ids in one zone */ | 58 | __u32 s_ids_per_zone; /* max. no ids in one zone */ |
59 | __u32 s_idlen; /* length of ID in map */ | 59 | __u32 s_idlen; /* length of ID in map */ |
60 | __u32 s_map_size; /* sector size of a map */ | 60 | __u32 s_map_size; /* sector size of a map */ |
61 | unsigned long s_size; /* total size (in blocks) of this fs */ | 61 | unsigned long s_size; /* total size (in blocks) of this fs */ |
62 | signed int s_map2blk; /* shift left by this for map->sector */ | 62 | signed int s_map2blk; /* shift left by this for map->sector*/ |
63 | unsigned int s_log2sharesize;/* log2 share size */ | 63 | unsigned int s_log2sharesize;/* log2 share size */ |
64 | __le32 s_version; /* disc format version */ | 64 | __le32 s_version; /* disc format version */ |
65 | unsigned int s_namelen; /* maximum number of characters in name */ | 65 | unsigned int s_namelen; /* maximum number of characters in name */ |
66 | }; | 66 | }; |
67 | 67 | ||
diff --git a/fs/coredump.c b/fs/coredump.c index b3c153ca435d..9ea87e9fdccf 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -118,6 +118,26 @@ int cn_esc_printf(struct core_name *cn, const char *fmt, ...) | |||
118 | ret = cn_vprintf(cn, fmt, arg); | 118 | ret = cn_vprintf(cn, fmt, arg); |
119 | va_end(arg); | 119 | va_end(arg); |
120 | 120 | ||
121 | if (ret == 0) { | ||
122 | /* | ||
123 | * Ensure that this coredump name component can't cause the | ||
124 | * resulting corefile path to consist of a ".." or ".". | ||
125 | */ | ||
126 | if ((cn->used - cur == 1 && cn->corename[cur] == '.') || | ||
127 | (cn->used - cur == 2 && cn->corename[cur] == '.' | ||
128 | && cn->corename[cur+1] == '.')) | ||
129 | cn->corename[cur] = '!'; | ||
130 | |||
131 | /* | ||
132 | * Empty names are fishy and could be used to create a "//" in a | ||
133 | * corefile name, causing the coredump to happen one directory | ||
134 | * level too high. Enforce that all components of the core | ||
135 | * pattern are at least one character long. | ||
136 | */ | ||
137 | if (cn->used == cur) | ||
138 | ret = cn_printf(cn, "!"); | ||
139 | } | ||
140 | |||
121 | for (; cur < cn->used; ++cur) { | 141 | for (; cur < cn->used; ++cur) { |
122 | if (cn->corename[cur] == '/') | 142 | if (cn->corename[cur] == '/') |
123 | cn->corename[cur] = '!'; | 143 | cn->corename[cur] = '!'; |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1e009cad8d5c..ae1dbcf47e97 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -92,7 +92,7 @@ | |||
92 | */ | 92 | */ |
93 | 93 | ||
94 | /* Epoll private bits inside the event mask */ | 94 | /* Epoll private bits inside the event mask */ |
95 | #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET) | 95 | #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) |
96 | 96 | ||
97 | /* Maximum number of nesting allowed inside epoll sets */ | 97 | /* Maximum number of nesting allowed inside epoll sets */ |
98 | #define EP_MAX_NESTS 4 | 98 | #define EP_MAX_NESTS 4 |
@@ -1002,6 +1002,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
1002 | unsigned long flags; | 1002 | unsigned long flags; |
1003 | struct epitem *epi = ep_item_from_wait(wait); | 1003 | struct epitem *epi = ep_item_from_wait(wait); |
1004 | struct eventpoll *ep = epi->ep; | 1004 | struct eventpoll *ep = epi->ep; |
1005 | int ewake = 0; | ||
1005 | 1006 | ||
1006 | if ((unsigned long)key & POLLFREE) { | 1007 | if ((unsigned long)key & POLLFREE) { |
1007 | ep_pwq_from_wait(wait)->whead = NULL; | 1008 | ep_pwq_from_wait(wait)->whead = NULL; |
@@ -1066,8 +1067,10 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
1066 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() | 1067 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() |
1067 | * wait list. | 1068 | * wait list. |
1068 | */ | 1069 | */ |
1069 | if (waitqueue_active(&ep->wq)) | 1070 | if (waitqueue_active(&ep->wq)) { |
1071 | ewake = 1; | ||
1070 | wake_up_locked(&ep->wq); | 1072 | wake_up_locked(&ep->wq); |
1073 | } | ||
1071 | if (waitqueue_active(&ep->poll_wait)) | 1074 | if (waitqueue_active(&ep->poll_wait)) |
1072 | pwake++; | 1075 | pwake++; |
1073 | 1076 | ||
@@ -1078,6 +1081,9 @@ out_unlock: | |||
1078 | if (pwake) | 1081 | if (pwake) |
1079 | ep_poll_safewake(&ep->poll_wait); | 1082 | ep_poll_safewake(&ep->poll_wait); |
1080 | 1083 | ||
1084 | if (epi->event.events & EPOLLEXCLUSIVE) | ||
1085 | return ewake; | ||
1086 | |||
1081 | return 1; | 1087 | return 1; |
1082 | } | 1088 | } |
1083 | 1089 | ||
@@ -1095,7 +1101,10 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | |||
1095 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); | 1101 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); |
1096 | pwq->whead = whead; | 1102 | pwq->whead = whead; |
1097 | pwq->base = epi; | 1103 | pwq->base = epi; |
1098 | add_wait_queue(whead, &pwq->wait); | 1104 | if (epi->event.events & EPOLLEXCLUSIVE) |
1105 | add_wait_queue_exclusive(whead, &pwq->wait); | ||
1106 | else | ||
1107 | add_wait_queue(whead, &pwq->wait); | ||
1099 | list_add_tail(&pwq->llink, &epi->pwqlist); | 1108 | list_add_tail(&pwq->llink, &epi->pwqlist); |
1100 | epi->nwait++; | 1109 | epi->nwait++; |
1101 | } else { | 1110 | } else { |
@@ -1862,6 +1871,15 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1862 | goto error_tgt_fput; | 1871 | goto error_tgt_fput; |
1863 | 1872 | ||
1864 | /* | 1873 | /* |
1874 | * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only, | ||
1875 | * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation. | ||
1876 | * Also, we do not currently supported nested exclusive wakeups. | ||
1877 | */ | ||
1878 | if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD || | ||
1879 | (op == EPOLL_CTL_ADD && is_file_epoll(tf.file)))) | ||
1880 | goto error_tgt_fput; | ||
1881 | |||
1882 | /* | ||
1865 | * At this point it is safe to assume that the "private_data" contains | 1883 | * At this point it is safe to assume that the "private_data" contains |
1866 | * our own data structure. | 1884 | * our own data structure. |
1867 | */ | 1885 | */ |
diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 93fc62232ec2..5d384921524d 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c | |||
@@ -301,15 +301,59 @@ static int fat_bmap_cluster(struct inode *inode, int cluster) | |||
301 | return dclus; | 301 | return dclus; |
302 | } | 302 | } |
303 | 303 | ||
304 | int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, | 304 | int fat_get_mapped_cluster(struct inode *inode, sector_t sector, |
305 | unsigned long *mapped_blocks, int create) | 305 | sector_t last_block, |
306 | unsigned long *mapped_blocks, sector_t *bmap) | ||
306 | { | 307 | { |
307 | struct super_block *sb = inode->i_sb; | 308 | struct super_block *sb = inode->i_sb; |
308 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 309 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
310 | int cluster, offset; | ||
311 | |||
312 | cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); | ||
313 | offset = sector & (sbi->sec_per_clus - 1); | ||
314 | cluster = fat_bmap_cluster(inode, cluster); | ||
315 | if (cluster < 0) | ||
316 | return cluster; | ||
317 | else if (cluster) { | ||
318 | *bmap = fat_clus_to_blknr(sbi, cluster) + offset; | ||
319 | *mapped_blocks = sbi->sec_per_clus - offset; | ||
320 | if (*mapped_blocks > last_block - sector) | ||
321 | *mapped_blocks = last_block - sector; | ||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int is_exceed_eof(struct inode *inode, sector_t sector, | ||
328 | sector_t *last_block, int create) | ||
329 | { | ||
330 | struct super_block *sb = inode->i_sb; | ||
309 | const unsigned long blocksize = sb->s_blocksize; | 331 | const unsigned long blocksize = sb->s_blocksize; |
310 | const unsigned char blocksize_bits = sb->s_blocksize_bits; | 332 | const unsigned char blocksize_bits = sb->s_blocksize_bits; |
333 | |||
334 | *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; | ||
335 | if (sector >= *last_block) { | ||
336 | if (!create) | ||
337 | return 1; | ||
338 | |||
339 | /* | ||
340 | * ->mmu_private can access on only allocation path. | ||
341 | * (caller must hold ->i_mutex) | ||
342 | */ | ||
343 | *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1)) | ||
344 | >> blocksize_bits; | ||
345 | if (sector >= *last_block) | ||
346 | return 1; | ||
347 | } | ||
348 | |||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, | ||
353 | unsigned long *mapped_blocks, int create, bool from_bmap) | ||
354 | { | ||
355 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); | ||
311 | sector_t last_block; | 356 | sector_t last_block; |
312 | int cluster, offset; | ||
313 | 357 | ||
314 | *phys = 0; | 358 | *phys = 0; |
315 | *mapped_blocks = 0; | 359 | *mapped_blocks = 0; |
@@ -321,31 +365,16 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, | |||
321 | return 0; | 365 | return 0; |
322 | } | 366 | } |
323 | 367 | ||
324 | last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; | 368 | if (!from_bmap) { |
325 | if (sector >= last_block) { | 369 | if (is_exceed_eof(inode, sector, &last_block, create)) |
326 | if (!create) | ||
327 | return 0; | 370 | return 0; |
328 | 371 | } else { | |
329 | /* | 372 | last_block = inode->i_blocks >> |
330 | * ->mmu_private can access on only allocation path. | 373 | (inode->i_sb->s_blocksize_bits - 9); |
331 | * (caller must hold ->i_mutex) | ||
332 | */ | ||
333 | last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1)) | ||
334 | >> blocksize_bits; | ||
335 | if (sector >= last_block) | 374 | if (sector >= last_block) |
336 | return 0; | 375 | return 0; |
337 | } | 376 | } |
338 | 377 | ||
339 | cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); | 378 | return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks, |
340 | offset = sector & (sbi->sec_per_clus - 1); | 379 | phys); |
341 | cluster = fat_bmap_cluster(inode, cluster); | ||
342 | if (cluster < 0) | ||
343 | return cluster; | ||
344 | else if (cluster) { | ||
345 | *phys = fat_clus_to_blknr(sbi, cluster) + offset; | ||
346 | *mapped_blocks = sbi->sec_per_clus - offset; | ||
347 | if (*mapped_blocks > last_block - sector) | ||
348 | *mapped_blocks = last_block - sector; | ||
349 | } | ||
350 | return 0; | ||
351 | } | 380 | } |
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 8b2127ffb226..7def96caec5f 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
@@ -91,7 +91,7 @@ next: | |||
91 | 91 | ||
92 | *bh = NULL; | 92 | *bh = NULL; |
93 | iblock = *pos >> sb->s_blocksize_bits; | 93 | iblock = *pos >> sb->s_blocksize_bits; |
94 | err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); | 94 | err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false); |
95 | if (err || !phys) | 95 | if (err || !phys) |
96 | return -1; /* beyond EOF or error */ | 96 | return -1; /* beyond EOF or error */ |
97 | 97 | ||
diff --git a/fs/fat/fat.h b/fs/fat/fat.h index be5e15323bab..e6b764a17a9c 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h | |||
@@ -87,7 +87,7 @@ struct msdos_sb_info { | |||
87 | unsigned int vol_id; /*volume ID*/ | 87 | unsigned int vol_id; /*volume ID*/ |
88 | 88 | ||
89 | int fatent_shift; | 89 | int fatent_shift; |
90 | struct fatent_operations *fatent_ops; | 90 | const struct fatent_operations *fatent_ops; |
91 | struct inode *fat_inode; | 91 | struct inode *fat_inode; |
92 | struct inode *fsinfo_inode; | 92 | struct inode *fsinfo_inode; |
93 | 93 | ||
@@ -285,8 +285,11 @@ static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len) | |||
285 | extern void fat_cache_inval_inode(struct inode *inode); | 285 | extern void fat_cache_inval_inode(struct inode *inode); |
286 | extern int fat_get_cluster(struct inode *inode, int cluster, | 286 | extern int fat_get_cluster(struct inode *inode, int cluster, |
287 | int *fclus, int *dclus); | 287 | int *fclus, int *dclus); |
288 | extern int fat_get_mapped_cluster(struct inode *inode, sector_t sector, | ||
289 | sector_t last_block, | ||
290 | unsigned long *mapped_blocks, sector_t *bmap); | ||
288 | extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, | 291 | extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, |
289 | unsigned long *mapped_blocks, int create); | 292 | unsigned long *mapped_blocks, int create, bool from_bmap); |
290 | 293 | ||
291 | /* fat/dir.c */ | 294 | /* fat/dir.c */ |
292 | extern const struct file_operations fat_dir_operations; | 295 | extern const struct file_operations fat_dir_operations; |
@@ -384,6 +387,7 @@ static inline unsigned long fat_dir_hash(int logstart) | |||
384 | { | 387 | { |
385 | return hash_32(logstart, FAT_HASH_BITS); | 388 | return hash_32(logstart, FAT_HASH_BITS); |
386 | } | 389 | } |
390 | extern int fat_add_cluster(struct inode *inode); | ||
387 | 391 | ||
388 | /* fat/misc.c */ | 392 | /* fat/misc.c */ |
389 | extern __printf(3, 4) __cold | 393 | extern __printf(3, 4) __cold |
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 8226557130a2..1d9a8c4e9de0 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c | |||
@@ -99,7 +99,7 @@ err: | |||
99 | static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, | 99 | static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, |
100 | int offset, sector_t blocknr) | 100 | int offset, sector_t blocknr) |
101 | { | 101 | { |
102 | struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; | 102 | const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; |
103 | 103 | ||
104 | WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); | 104 | WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); |
105 | fatent->fat_inode = MSDOS_SB(sb)->fat_inode; | 105 | fatent->fat_inode = MSDOS_SB(sb)->fat_inode; |
@@ -246,7 +246,7 @@ static int fat32_ent_next(struct fat_entry *fatent) | |||
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | static struct fatent_operations fat12_ops = { | 249 | static const struct fatent_operations fat12_ops = { |
250 | .ent_blocknr = fat12_ent_blocknr, | 250 | .ent_blocknr = fat12_ent_blocknr, |
251 | .ent_set_ptr = fat12_ent_set_ptr, | 251 | .ent_set_ptr = fat12_ent_set_ptr, |
252 | .ent_bread = fat12_ent_bread, | 252 | .ent_bread = fat12_ent_bread, |
@@ -255,7 +255,7 @@ static struct fatent_operations fat12_ops = { | |||
255 | .ent_next = fat12_ent_next, | 255 | .ent_next = fat12_ent_next, |
256 | }; | 256 | }; |
257 | 257 | ||
258 | static struct fatent_operations fat16_ops = { | 258 | static const struct fatent_operations fat16_ops = { |
259 | .ent_blocknr = fat_ent_blocknr, | 259 | .ent_blocknr = fat_ent_blocknr, |
260 | .ent_set_ptr = fat16_ent_set_ptr, | 260 | .ent_set_ptr = fat16_ent_set_ptr, |
261 | .ent_bread = fat_ent_bread, | 261 | .ent_bread = fat_ent_bread, |
@@ -264,7 +264,7 @@ static struct fatent_operations fat16_ops = { | |||
264 | .ent_next = fat16_ent_next, | 264 | .ent_next = fat16_ent_next, |
265 | }; | 265 | }; |
266 | 266 | ||
267 | static struct fatent_operations fat32_ops = { | 267 | static const struct fatent_operations fat32_ops = { |
268 | .ent_blocknr = fat_ent_blocknr, | 268 | .ent_blocknr = fat_ent_blocknr, |
269 | .ent_set_ptr = fat32_ent_set_ptr, | 269 | .ent_set_ptr = fat32_ent_set_ptr, |
270 | .ent_bread = fat_ent_bread, | 270 | .ent_bread = fat_ent_bread, |
@@ -320,7 +320,7 @@ static inline int fat_ent_update_ptr(struct super_block *sb, | |||
320 | int offset, sector_t blocknr) | 320 | int offset, sector_t blocknr) |
321 | { | 321 | { |
322 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 322 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
323 | struct fatent_operations *ops = sbi->fatent_ops; | 323 | const struct fatent_operations *ops = sbi->fatent_ops; |
324 | struct buffer_head **bhs = fatent->bhs; | 324 | struct buffer_head **bhs = fatent->bhs; |
325 | 325 | ||
326 | /* Is this fatent's blocks including this entry? */ | 326 | /* Is this fatent's blocks including this entry? */ |
@@ -349,7 +349,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) | |||
349 | { | 349 | { |
350 | struct super_block *sb = inode->i_sb; | 350 | struct super_block *sb = inode->i_sb; |
351 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); | 351 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); |
352 | struct fatent_operations *ops = sbi->fatent_ops; | 352 | const struct fatent_operations *ops = sbi->fatent_ops; |
353 | int err, offset; | 353 | int err, offset; |
354 | sector_t blocknr; | 354 | sector_t blocknr; |
355 | 355 | ||
@@ -407,7 +407,7 @@ int fat_ent_write(struct inode *inode, struct fat_entry *fatent, | |||
407 | int new, int wait) | 407 | int new, int wait) |
408 | { | 408 | { |
409 | struct super_block *sb = inode->i_sb; | 409 | struct super_block *sb = inode->i_sb; |
410 | struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; | 410 | const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; |
411 | int err; | 411 | int err; |
412 | 412 | ||
413 | ops->ent_put(fatent, new); | 413 | ops->ent_put(fatent, new); |
@@ -432,7 +432,7 @@ static inline int fat_ent_next(struct msdos_sb_info *sbi, | |||
432 | static inline int fat_ent_read_block(struct super_block *sb, | 432 | static inline int fat_ent_read_block(struct super_block *sb, |
433 | struct fat_entry *fatent) | 433 | struct fat_entry *fatent) |
434 | { | 434 | { |
435 | struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; | 435 | const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; |
436 | sector_t blocknr; | 436 | sector_t blocknr; |
437 | int offset; | 437 | int offset; |
438 | 438 | ||
@@ -463,7 +463,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster) | |||
463 | { | 463 | { |
464 | struct super_block *sb = inode->i_sb; | 464 | struct super_block *sb = inode->i_sb; |
465 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 465 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
466 | struct fatent_operations *ops = sbi->fatent_ops; | 466 | const struct fatent_operations *ops = sbi->fatent_ops; |
467 | struct fat_entry fatent, prev_ent; | 467 | struct fat_entry fatent, prev_ent; |
468 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; | 468 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; |
469 | int i, count, err, nr_bhs, idx_clus; | 469 | int i, count, err, nr_bhs, idx_clus; |
@@ -551,7 +551,7 @@ int fat_free_clusters(struct inode *inode, int cluster) | |||
551 | { | 551 | { |
552 | struct super_block *sb = inode->i_sb; | 552 | struct super_block *sb = inode->i_sb; |
553 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 553 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
554 | struct fatent_operations *ops = sbi->fatent_ops; | 554 | const struct fatent_operations *ops = sbi->fatent_ops; |
555 | struct fat_entry fatent; | 555 | struct fat_entry fatent; |
556 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; | 556 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; |
557 | int i, err, nr_bhs; | 557 | int i, err, nr_bhs; |
@@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(fat_free_clusters); | |||
636 | static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, | 636 | static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, |
637 | unsigned long reada_blocks) | 637 | unsigned long reada_blocks) |
638 | { | 638 | { |
639 | struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; | 639 | const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; |
640 | sector_t blocknr; | 640 | sector_t blocknr; |
641 | int i, offset; | 641 | int i, offset; |
642 | 642 | ||
@@ -649,7 +649,7 @@ static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, | |||
649 | int fat_count_free_clusters(struct super_block *sb) | 649 | int fat_count_free_clusters(struct super_block *sb) |
650 | { | 650 | { |
651 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 651 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
652 | struct fatent_operations *ops = sbi->fatent_ops; | 652 | const struct fatent_operations *ops = sbi->fatent_ops; |
653 | struct fat_entry fatent; | 653 | struct fat_entry fatent; |
654 | unsigned long reada_blocks, reada_mask, cur_block; | 654 | unsigned long reada_blocks, reada_mask, cur_block; |
655 | int err = 0, free; | 655 | int err = 0, free; |
diff --git a/fs/fat/file.c b/fs/fat/file.c index a08f1039909a..43d3475da83a 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c | |||
@@ -14,8 +14,12 @@ | |||
14 | #include <linux/backing-dev.h> | 14 | #include <linux/backing-dev.h> |
15 | #include <linux/fsnotify.h> | 15 | #include <linux/fsnotify.h> |
16 | #include <linux/security.h> | 16 | #include <linux/security.h> |
17 | #include <linux/falloc.h> | ||
17 | #include "fat.h" | 18 | #include "fat.h" |
18 | 19 | ||
20 | static long fat_fallocate(struct file *file, int mode, | ||
21 | loff_t offset, loff_t len); | ||
22 | |||
19 | static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr) | 23 | static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr) |
20 | { | 24 | { |
21 | u32 attr; | 25 | u32 attr; |
@@ -177,6 +181,7 @@ const struct file_operations fat_file_operations = { | |||
177 | #endif | 181 | #endif |
178 | .fsync = fat_file_fsync, | 182 | .fsync = fat_file_fsync, |
179 | .splice_read = generic_file_splice_read, | 183 | .splice_read = generic_file_splice_read, |
184 | .fallocate = fat_fallocate, | ||
180 | }; | 185 | }; |
181 | 186 | ||
182 | static int fat_cont_expand(struct inode *inode, loff_t size) | 187 | static int fat_cont_expand(struct inode *inode, loff_t size) |
@@ -215,6 +220,62 @@ out: | |||
215 | return err; | 220 | return err; |
216 | } | 221 | } |
217 | 222 | ||
223 | /* | ||
224 | * Preallocate space for a file. This implements fat's fallocate file | ||
225 | * operation, which gets called from sys_fallocate system call. User | ||
226 | * space requests len bytes at offset. If FALLOC_FL_KEEP_SIZE is set | ||
227 | * we just allocate clusters without zeroing them out. Otherwise we | ||
228 | * allocate and zero out clusters via an expanding truncate. | ||
229 | */ | ||
230 | static long fat_fallocate(struct file *file, int mode, | ||
231 | loff_t offset, loff_t len) | ||
232 | { | ||
233 | int nr_cluster; /* Number of clusters to be allocated */ | ||
234 | loff_t mm_bytes; /* Number of bytes to be allocated for file */ | ||
235 | loff_t ondisksize; /* block aligned on-disk size in bytes*/ | ||
236 | struct inode *inode = file->f_mapping->host; | ||
237 | struct super_block *sb = inode->i_sb; | ||
238 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | ||
239 | int err = 0; | ||
240 | |||
241 | /* No support for hole punch or other fallocate flags. */ | ||
242 | if (mode & ~FALLOC_FL_KEEP_SIZE) | ||
243 | return -EOPNOTSUPP; | ||
244 | |||
245 | /* No support for dir */ | ||
246 | if (!S_ISREG(inode->i_mode)) | ||
247 | return -EOPNOTSUPP; | ||
248 | |||
249 | mutex_lock(&inode->i_mutex); | ||
250 | if (mode & FALLOC_FL_KEEP_SIZE) { | ||
251 | ondisksize = inode->i_blocks << 9; | ||
252 | if ((offset + len) <= ondisksize) | ||
253 | goto error; | ||
254 | |||
255 | /* First compute the number of clusters to be allocated */ | ||
256 | mm_bytes = offset + len - ondisksize; | ||
257 | nr_cluster = (mm_bytes + (sbi->cluster_size - 1)) >> | ||
258 | sbi->cluster_bits; | ||
259 | |||
260 | /* Start the allocation.We are not zeroing out the clusters */ | ||
261 | while (nr_cluster-- > 0) { | ||
262 | err = fat_add_cluster(inode); | ||
263 | if (err) | ||
264 | goto error; | ||
265 | } | ||
266 | } else { | ||
267 | if ((offset + len) <= i_size_read(inode)) | ||
268 | goto error; | ||
269 | |||
270 | /* This is just an expanding truncate */ | ||
271 | err = fat_cont_expand(inode, (offset + len)); | ||
272 | } | ||
273 | |||
274 | error: | ||
275 | mutex_unlock(&inode->i_mutex); | ||
276 | return err; | ||
277 | } | ||
278 | |||
218 | /* Free all clusters after the skip'th cluster. */ | 279 | /* Free all clusters after the skip'th cluster. */ |
219 | static int fat_free(struct inode *inode, int skip) | 280 | static int fat_free(struct inode *inode, int skip) |
220 | { | 281 | { |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 6aece96df19f..a5599052116c 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -93,7 +93,7 @@ static struct fat_floppy_defaults { | |||
93 | }, | 93 | }, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static int fat_add_cluster(struct inode *inode) | 96 | int fat_add_cluster(struct inode *inode) |
97 | { | 97 | { |
98 | int err, cluster; | 98 | int err, cluster; |
99 | 99 | ||
@@ -115,10 +115,10 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock, | |||
115 | struct super_block *sb = inode->i_sb; | 115 | struct super_block *sb = inode->i_sb; |
116 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 116 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
117 | unsigned long mapped_blocks; | 117 | unsigned long mapped_blocks; |
118 | sector_t phys; | 118 | sector_t phys, last_block; |
119 | int err, offset; | 119 | int err, offset; |
120 | 120 | ||
121 | err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create); | 121 | err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false); |
122 | if (err) | 122 | if (err) |
123 | return err; | 123 | return err; |
124 | if (phys) { | 124 | if (phys) { |
@@ -135,8 +135,14 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock, | |||
135 | return -EIO; | 135 | return -EIO; |
136 | } | 136 | } |
137 | 137 | ||
138 | last_block = inode->i_blocks >> (sb->s_blocksize_bits - 9); | ||
138 | offset = (unsigned long)iblock & (sbi->sec_per_clus - 1); | 139 | offset = (unsigned long)iblock & (sbi->sec_per_clus - 1); |
139 | if (!offset) { | 140 | /* |
141 | * allocate a cluster according to the following. | ||
142 | * 1) no more available blocks | ||
143 | * 2) not part of fallocate region | ||
144 | */ | ||
145 | if (!offset && !(iblock < last_block)) { | ||
140 | /* TODO: multiple cluster allocation would be desirable. */ | 146 | /* TODO: multiple cluster allocation would be desirable. */ |
141 | err = fat_add_cluster(inode); | 147 | err = fat_add_cluster(inode); |
142 | if (err) | 148 | if (err) |
@@ -148,7 +154,7 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock, | |||
148 | *max_blocks = min(mapped_blocks, *max_blocks); | 154 | *max_blocks = min(mapped_blocks, *max_blocks); |
149 | MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits; | 155 | MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits; |
150 | 156 | ||
151 | err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create); | 157 | err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false); |
152 | if (err) | 158 | if (err) |
153 | return err; | 159 | return err; |
154 | 160 | ||
@@ -273,13 +279,38 @@ static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |||
273 | return ret; | 279 | return ret; |
274 | } | 280 | } |
275 | 281 | ||
282 | static int fat_get_block_bmap(struct inode *inode, sector_t iblock, | ||
283 | struct buffer_head *bh_result, int create) | ||
284 | { | ||
285 | struct super_block *sb = inode->i_sb; | ||
286 | unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
287 | int err; | ||
288 | sector_t bmap; | ||
289 | unsigned long mapped_blocks; | ||
290 | |||
291 | BUG_ON(create != 0); | ||
292 | |||
293 | err = fat_bmap(inode, iblock, &bmap, &mapped_blocks, create, true); | ||
294 | if (err) | ||
295 | return err; | ||
296 | |||
297 | if (bmap) { | ||
298 | map_bh(bh_result, sb, bmap); | ||
299 | max_blocks = min(mapped_blocks, max_blocks); | ||
300 | } | ||
301 | |||
302 | bh_result->b_size = max_blocks << sb->s_blocksize_bits; | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
276 | static sector_t _fat_bmap(struct address_space *mapping, sector_t block) | 307 | static sector_t _fat_bmap(struct address_space *mapping, sector_t block) |
277 | { | 308 | { |
278 | sector_t blocknr; | 309 | sector_t blocknr; |
279 | 310 | ||
280 | /* fat_get_cluster() assumes the requested blocknr isn't truncated. */ | 311 | /* fat_get_cluster() assumes the requested blocknr isn't truncated. */ |
281 | down_read(&MSDOS_I(mapping->host)->truncate_lock); | 312 | down_read(&MSDOS_I(mapping->host)->truncate_lock); |
282 | blocknr = generic_block_bmap(mapping, block, fat_get_block); | 313 | blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap); |
283 | up_read(&MSDOS_I(mapping->host)->truncate_lock); | 314 | up_read(&MSDOS_I(mapping->host)->truncate_lock); |
284 | 315 | ||
285 | return blocknr; | 316 | return blocknr; |
@@ -449,6 +480,24 @@ static int fat_calc_dir_size(struct inode *inode) | |||
449 | return 0; | 480 | return 0; |
450 | } | 481 | } |
451 | 482 | ||
483 | static int fat_validate_dir(struct inode *dir) | ||
484 | { | ||
485 | struct super_block *sb = dir->i_sb; | ||
486 | |||
487 | if (dir->i_nlink < 2) { | ||
488 | /* Directory should have "."/".." entries at least. */ | ||
489 | fat_fs_error(sb, "corrupted directory (invalid entries)"); | ||
490 | return -EIO; | ||
491 | } | ||
492 | if (MSDOS_I(dir)->i_start == 0 || | ||
493 | MSDOS_I(dir)->i_start == MSDOS_SB(sb)->root_cluster) { | ||
494 | /* Directory should point valid cluster. */ | ||
495 | fat_fs_error(sb, "corrupted directory (invalid i_start)"); | ||
496 | return -EIO; | ||
497 | } | ||
498 | return 0; | ||
499 | } | ||
500 | |||
452 | /* doesn't deal with root inode */ | 501 | /* doesn't deal with root inode */ |
453 | int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | 502 | int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) |
454 | { | 503 | { |
@@ -475,6 +524,10 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | |||
475 | MSDOS_I(inode)->mmu_private = inode->i_size; | 524 | MSDOS_I(inode)->mmu_private = inode->i_size; |
476 | 525 | ||
477 | set_nlink(inode, fat_subdirs(inode)); | 526 | set_nlink(inode, fat_subdirs(inode)); |
527 | |||
528 | error = fat_validate_dir(inode); | ||
529 | if (error < 0) | ||
530 | return error; | ||
478 | } else { /* not a directory */ | 531 | } else { /* not a directory */ |
479 | inode->i_generation |= 1; | 532 | inode->i_generation |= 1; |
480 | inode->i_mode = fat_make_mode(sbi, de->attr, | 533 | inode->i_mode = fat_make_mode(sbi, de->attr, |
@@ -553,13 +606,43 @@ out: | |||
553 | 606 | ||
554 | EXPORT_SYMBOL_GPL(fat_build_inode); | 607 | EXPORT_SYMBOL_GPL(fat_build_inode); |
555 | 608 | ||
609 | static int __fat_write_inode(struct inode *inode, int wait); | ||
610 | |||
611 | static void fat_free_eofblocks(struct inode *inode) | ||
612 | { | ||
613 | /* Release unwritten fallocated blocks on inode eviction. */ | ||
614 | if ((inode->i_blocks << 9) > | ||
615 | round_up(MSDOS_I(inode)->mmu_private, | ||
616 | MSDOS_SB(inode->i_sb)->cluster_size)) { | ||
617 | int err; | ||
618 | |||
619 | fat_truncate_blocks(inode, MSDOS_I(inode)->mmu_private); | ||
620 | /* Fallocate results in updating the i_start/iogstart | ||
621 | * for the zero byte file. So, make it return to | ||
622 | * original state during evict and commit it to avoid | ||
623 | * any corruption on the next access to the cluster | ||
624 | * chain for the file. | ||
625 | */ | ||
626 | err = __fat_write_inode(inode, inode_needs_sync(inode)); | ||
627 | if (err) { | ||
628 | fat_msg(inode->i_sb, KERN_WARNING, "Failed to " | ||
629 | "update on disk inode for unused " | ||
630 | "fallocated blocks, inode could be " | ||
631 | "corrupted. Please run fsck"); | ||
632 | } | ||
633 | |||
634 | } | ||
635 | } | ||
636 | |||
556 | static void fat_evict_inode(struct inode *inode) | 637 | static void fat_evict_inode(struct inode *inode) |
557 | { | 638 | { |
558 | truncate_inode_pages_final(&inode->i_data); | 639 | truncate_inode_pages_final(&inode->i_data); |
559 | if (!inode->i_nlink) { | 640 | if (!inode->i_nlink) { |
560 | inode->i_size = 0; | 641 | inode->i_size = 0; |
561 | fat_truncate_blocks(inode, 0); | 642 | fat_truncate_blocks(inode, 0); |
562 | } | 643 | } else |
644 | fat_free_eofblocks(inode); | ||
645 | |||
563 | invalidate_inode_buffers(inode); | 646 | invalidate_inode_buffers(inode); |
564 | clear_inode(inode); | 647 | clear_inode(inode); |
565 | fat_cache_inval_inode(inode); | 648 | fat_cache_inval_inode(inode); |
@@ -1146,7 +1229,12 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, | |||
1146 | case Opt_time_offset: | 1229 | case Opt_time_offset: |
1147 | if (match_int(&args[0], &option)) | 1230 | if (match_int(&args[0], &option)) |
1148 | return -EINVAL; | 1231 | return -EINVAL; |
1149 | if (option < -12 * 60 || option > 12 * 60) | 1232 | /* |
1233 | * GMT+-12 zones may have DST corrections so at least | ||
1234 | * 13 hours difference is needed. Make the limit 24 | ||
1235 | * just in case someone invents something unusual. | ||
1236 | */ | ||
1237 | if (option < -24 * 60 || option > 24 * 60) | ||
1150 | return -EINVAL; | 1238 | return -EINVAL; |
1151 | opts->tz_set = 1; | 1239 | opts->tz_set = 1; |
1152 | opts->time_offset = option; | 1240 | opts->time_offset = option; |
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c index db458ee3a546..1eb5d415d434 100644 --- a/fs/hfs/catalog.c +++ b/fs/hfs/catalog.c | |||
@@ -214,7 +214,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str) | |||
214 | { | 214 | { |
215 | struct super_block *sb; | 215 | struct super_block *sb; |
216 | struct hfs_find_data fd; | 216 | struct hfs_find_data fd; |
217 | struct list_head *pos; | 217 | struct hfs_readdir_data *rd; |
218 | int res, type; | 218 | int res, type; |
219 | 219 | ||
220 | hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); | 220 | hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); |
@@ -240,9 +240,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str) | |||
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
243 | list_for_each(pos, &HFS_I(dir)->open_dir_list) { | 243 | list_for_each_entry(rd, &HFS_I(dir)->open_dir_list, list) { |
244 | struct hfs_readdir_data *rd = | ||
245 | list_entry(pos, struct hfs_readdir_data, list); | ||
246 | if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) | 244 | if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) |
247 | rd->file->f_pos--; | 245 | rd->file->f_pos--; |
248 | } | 246 | } |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index a4cbdf9824c7..d250604f985a 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/parser.h> | 17 | #include <linux/parser.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/pagemap.h> | ||
19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
20 | #include <linux/statfs.h> | 21 | #include <linux/statfs.h> |
21 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
diff --git a/fs/proc/array.c b/fs/proc/array.c index d73291f5f0fc..b6c00ce0e29e 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -395,7 +395,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
395 | 395 | ||
396 | state = *get_task_state(task); | 396 | state = *get_task_state(task); |
397 | vsize = eip = esp = 0; | 397 | vsize = eip = esp = 0; |
398 | permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT); | 398 | permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); |
399 | mm = get_task_mm(task); | 399 | mm = get_task_mm(task); |
400 | if (mm) { | 400 | if (mm) { |
401 | vsize = task_vsize(mm); | 401 | vsize = task_vsize(mm); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 2cf5d7e37375..4f764c2ac1a5 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -403,7 +403,7 @@ static const struct file_operations proc_pid_cmdline_ops = { | |||
403 | static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns, | 403 | static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns, |
404 | struct pid *pid, struct task_struct *task) | 404 | struct pid *pid, struct task_struct *task) |
405 | { | 405 | { |
406 | struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ); | 406 | struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); |
407 | if (mm && !IS_ERR(mm)) { | 407 | if (mm && !IS_ERR(mm)) { |
408 | unsigned int nwords = 0; | 408 | unsigned int nwords = 0; |
409 | do { | 409 | do { |
@@ -430,7 +430,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, | |||
430 | 430 | ||
431 | wchan = get_wchan(task); | 431 | wchan = get_wchan(task); |
432 | 432 | ||
433 | if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname)) | 433 | if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS) |
434 | && !lookup_symbol_name(wchan, symname)) | ||
434 | seq_printf(m, "%s", symname); | 435 | seq_printf(m, "%s", symname); |
435 | else | 436 | else |
436 | seq_putc(m, '0'); | 437 | seq_putc(m, '0'); |
@@ -444,7 +445,7 @@ static int lock_trace(struct task_struct *task) | |||
444 | int err = mutex_lock_killable(&task->signal->cred_guard_mutex); | 445 | int err = mutex_lock_killable(&task->signal->cred_guard_mutex); |
445 | if (err) | 446 | if (err) |
446 | return err; | 447 | return err; |
447 | if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) { | 448 | if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { |
448 | mutex_unlock(&task->signal->cred_guard_mutex); | 449 | mutex_unlock(&task->signal->cred_guard_mutex); |
449 | return -EPERM; | 450 | return -EPERM; |
450 | } | 451 | } |
@@ -697,7 +698,7 @@ static int proc_fd_access_allowed(struct inode *inode) | |||
697 | */ | 698 | */ |
698 | task = get_proc_task(inode); | 699 | task = get_proc_task(inode); |
699 | if (task) { | 700 | if (task) { |
700 | allowed = ptrace_may_access(task, PTRACE_MODE_READ); | 701 | allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); |
701 | put_task_struct(task); | 702 | put_task_struct(task); |
702 | } | 703 | } |
703 | return allowed; | 704 | return allowed; |
@@ -732,7 +733,7 @@ static bool has_pid_permissions(struct pid_namespace *pid, | |||
732 | return true; | 733 | return true; |
733 | if (in_group_p(pid->pid_gid)) | 734 | if (in_group_p(pid->pid_gid)) |
734 | return true; | 735 | return true; |
735 | return ptrace_may_access(task, PTRACE_MODE_READ); | 736 | return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); |
736 | } | 737 | } |
737 | 738 | ||
738 | 739 | ||
@@ -809,7 +810,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) | |||
809 | struct mm_struct *mm = ERR_PTR(-ESRCH); | 810 | struct mm_struct *mm = ERR_PTR(-ESRCH); |
810 | 811 | ||
811 | if (task) { | 812 | if (task) { |
812 | mm = mm_access(task, mode); | 813 | mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); |
813 | put_task_struct(task); | 814 | put_task_struct(task); |
814 | 815 | ||
815 | if (!IS_ERR_OR_NULL(mm)) { | 816 | if (!IS_ERR_OR_NULL(mm)) { |
@@ -952,6 +953,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
952 | unsigned long src = *ppos; | 953 | unsigned long src = *ppos; |
953 | int ret = 0; | 954 | int ret = 0; |
954 | struct mm_struct *mm = file->private_data; | 955 | struct mm_struct *mm = file->private_data; |
956 | unsigned long env_start, env_end; | ||
955 | 957 | ||
956 | if (!mm) | 958 | if (!mm) |
957 | return 0; | 959 | return 0; |
@@ -963,19 +965,25 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
963 | ret = 0; | 965 | ret = 0; |
964 | if (!atomic_inc_not_zero(&mm->mm_users)) | 966 | if (!atomic_inc_not_zero(&mm->mm_users)) |
965 | goto free; | 967 | goto free; |
968 | |||
969 | down_read(&mm->mmap_sem); | ||
970 | env_start = mm->env_start; | ||
971 | env_end = mm->env_end; | ||
972 | up_read(&mm->mmap_sem); | ||
973 | |||
966 | while (count > 0) { | 974 | while (count > 0) { |
967 | size_t this_len, max_len; | 975 | size_t this_len, max_len; |
968 | int retval; | 976 | int retval; |
969 | 977 | ||
970 | if (src >= (mm->env_end - mm->env_start)) | 978 | if (src >= (env_end - env_start)) |
971 | break; | 979 | break; |
972 | 980 | ||
973 | this_len = mm->env_end - (mm->env_start + src); | 981 | this_len = env_end - (env_start + src); |
974 | 982 | ||
975 | max_len = min_t(size_t, PAGE_SIZE, count); | 983 | max_len = min_t(size_t, PAGE_SIZE, count); |
976 | this_len = min(max_len, this_len); | 984 | this_len = min(max_len, this_len); |
977 | 985 | ||
978 | retval = access_remote_vm(mm, (mm->env_start + src), | 986 | retval = access_remote_vm(mm, (env_start + src), |
979 | page, this_len, 0); | 987 | page, this_len, 0); |
980 | 988 | ||
981 | if (retval <= 0) { | 989 | if (retval <= 0) { |
@@ -1860,7 +1868,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
1860 | if (!task) | 1868 | if (!task) |
1861 | goto out_notask; | 1869 | goto out_notask; |
1862 | 1870 | ||
1863 | mm = mm_access(task, PTRACE_MODE_READ); | 1871 | mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); |
1864 | if (IS_ERR_OR_NULL(mm)) | 1872 | if (IS_ERR_OR_NULL(mm)) |
1865 | goto out; | 1873 | goto out; |
1866 | 1874 | ||
@@ -2013,7 +2021,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, | |||
2013 | goto out; | 2021 | goto out; |
2014 | 2022 | ||
2015 | result = -EACCES; | 2023 | result = -EACCES; |
2016 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2024 | if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) |
2017 | goto out_put_task; | 2025 | goto out_put_task; |
2018 | 2026 | ||
2019 | result = -ENOENT; | 2027 | result = -ENOENT; |
@@ -2066,7 +2074,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) | |||
2066 | goto out; | 2074 | goto out; |
2067 | 2075 | ||
2068 | ret = -EACCES; | 2076 | ret = -EACCES; |
2069 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2077 | if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) |
2070 | goto out_put_task; | 2078 | goto out_put_task; |
2071 | 2079 | ||
2072 | ret = 0; | 2080 | ret = 0; |
@@ -2533,7 +2541,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh | |||
2533 | if (result) | 2541 | if (result) |
2534 | return result; | 2542 | return result; |
2535 | 2543 | ||
2536 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) { | 2544 | if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { |
2537 | result = -EACCES; | 2545 | result = -EACCES; |
2538 | goto out_unlock; | 2546 | goto out_unlock; |
2539 | } | 2547 | } |
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 1dece8781f91..276f12431dbf 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c | |||
@@ -46,7 +46,7 @@ static const char *proc_ns_get_link(struct dentry *dentry, | |||
46 | if (!task) | 46 | if (!task) |
47 | return error; | 47 | return error; |
48 | 48 | ||
49 | if (ptrace_may_access(task, PTRACE_MODE_READ)) { | 49 | if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { |
50 | error = ns_get_path(&ns_path, task, ns_ops); | 50 | error = ns_get_path(&ns_path, task, ns_ops); |
51 | if (!error) | 51 | if (!error) |
52 | nd_jump_link(&ns_path); | 52 | nd_jump_link(&ns_path); |
@@ -67,7 +67,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl | |||
67 | if (!task) | 67 | if (!task) |
68 | return res; | 68 | return res; |
69 | 69 | ||
70 | if (ptrace_may_access(task, PTRACE_MODE_READ)) { | 70 | if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { |
71 | res = ns_get_name(name, sizeof(name), task, ns_ops); | 71 | res = ns_get_name(name, sizeof(name), task, ns_ops); |
72 | if (res >= 0) | 72 | if (res >= 0) |
73 | res = readlink_copy(buffer, buflen, name); | 73 | res = readlink_copy(buffer, buflen, name); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 65a1b6c69c11..71ffc91060f6 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -468,7 +468,7 @@ struct mem_size_stats { | |||
468 | static void smaps_account(struct mem_size_stats *mss, struct page *page, | 468 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
469 | bool compound, bool young, bool dirty) | 469 | bool compound, bool young, bool dirty) |
470 | { | 470 | { |
471 | int i, nr = compound ? HPAGE_PMD_NR : 1; | 471 | int i, nr = compound ? 1 << compound_order(page) : 1; |
472 | unsigned long size = nr * PAGE_SIZE; | 472 | unsigned long size = nr * PAGE_SIZE; |
473 | 473 | ||
474 | if (PageAnon(page)) | 474 | if (PageAnon(page)) |
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h deleted file mode 100644 index 0297e5875798..000000000000 --- a/include/asm-generic/dma-coherent.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | #ifndef DMA_COHERENT_H | ||
2 | #define DMA_COHERENT_H | ||
3 | |||
4 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | ||
5 | /* | ||
6 | * These three functions are only for dma allocator. | ||
7 | * Don't use them in device drivers. | ||
8 | */ | ||
9 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
10 | dma_addr_t *dma_handle, void **ret); | ||
11 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | ||
12 | |||
13 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | ||
14 | void *cpu_addr, size_t size, int *ret); | ||
15 | /* | ||
16 | * Standard interface | ||
17 | */ | ||
18 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | ||
19 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | ||
20 | dma_addr_t device_addr, size_t size, int flags); | ||
21 | |||
22 | void dma_release_declared_memory(struct device *dev); | ||
23 | |||
24 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
25 | dma_addr_t device_addr, size_t size); | ||
26 | #else | ||
27 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | ||
28 | #define dma_release_from_coherent(dev, order, vaddr) (0) | ||
29 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | ||
30 | #endif | ||
31 | |||
32 | #endif | ||
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h deleted file mode 100644 index 6c32af918c2f..000000000000 --- a/include/asm-generic/dma-mapping-broken.h +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
2 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
3 | |||
4 | /* define the dma api to allow compilation but not linking of | ||
5 | * dma dependent code. Code that depends on the dma-mapping | ||
6 | * API needs to set 'depends on HAS_DMA' in its Kconfig | ||
7 | */ | ||
8 | |||
9 | struct scatterlist; | ||
10 | |||
11 | extern void * | ||
12 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
13 | gfp_t flag); | ||
14 | |||
15 | extern void | ||
16 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
17 | dma_addr_t dma_handle); | ||
18 | |||
19 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t flag, | ||
21 | struct dma_attrs *attrs) | ||
22 | { | ||
23 | /* attrs is not supported and ignored */ | ||
24 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
25 | } | ||
26 | |||
27 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
28 | void *cpu_addr, dma_addr_t dma_handle, | ||
29 | struct dma_attrs *attrs) | ||
30 | { | ||
31 | /* attrs is not supported and ignored */ | ||
32 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
33 | } | ||
34 | |||
35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
37 | |||
38 | extern dma_addr_t | ||
39 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
40 | enum dma_data_direction direction); | ||
41 | |||
42 | extern void | ||
43 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
44 | enum dma_data_direction direction); | ||
45 | |||
46 | extern int | ||
47 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
48 | enum dma_data_direction direction); | ||
49 | |||
50 | extern void | ||
51 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
52 | enum dma_data_direction direction); | ||
53 | |||
54 | extern dma_addr_t | ||
55 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
56 | size_t size, enum dma_data_direction direction); | ||
57 | |||
58 | extern void | ||
59 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
60 | enum dma_data_direction direction); | ||
61 | |||
62 | extern void | ||
63 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
64 | enum dma_data_direction direction); | ||
65 | |||
66 | extern void | ||
67 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
68 | unsigned long offset, size_t size, | ||
69 | enum dma_data_direction direction); | ||
70 | |||
71 | extern void | ||
72 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
73 | enum dma_data_direction direction); | ||
74 | |||
75 | #define dma_sync_single_for_device dma_sync_single_for_cpu | ||
76 | #define dma_sync_single_range_for_device dma_sync_single_range_for_cpu | ||
77 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu | ||
78 | |||
79 | extern int | ||
80 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | ||
81 | |||
82 | extern int | ||
83 | dma_supported(struct device *dev, u64 mask); | ||
84 | |||
85 | extern int | ||
86 | dma_set_mask(struct device *dev, u64 mask); | ||
87 | |||
88 | extern int | ||
89 | dma_get_cache_alignment(void); | ||
90 | |||
91 | extern void | ||
92 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
93 | enum dma_data_direction direction); | ||
94 | |||
95 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ | ||
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h deleted file mode 100644 index b1bc954eccf3..000000000000 --- a/include/asm-generic/dma-mapping-common.h +++ /dev/null | |||
@@ -1,358 +0,0 @@ | |||
1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
2 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/kmemcheck.h> | ||
5 | #include <linux/bug.h> | ||
6 | #include <linux/scatterlist.h> | ||
7 | #include <linux/dma-debug.h> | ||
8 | #include <linux/dma-attrs.h> | ||
9 | #include <asm-generic/dma-coherent.h> | ||
10 | |||
11 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | ||
12 | size_t size, | ||
13 | enum dma_data_direction dir, | ||
14 | struct dma_attrs *attrs) | ||
15 | { | ||
16 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
17 | dma_addr_t addr; | ||
18 | |||
19 | kmemcheck_mark_initialized(ptr, size); | ||
20 | BUG_ON(!valid_dma_direction(dir)); | ||
21 | addr = ops->map_page(dev, virt_to_page(ptr), | ||
22 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
23 | dir, attrs); | ||
24 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
25 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
26 | dir, addr, true); | ||
27 | return addr; | ||
28 | } | ||
29 | |||
30 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | ||
31 | size_t size, | ||
32 | enum dma_data_direction dir, | ||
33 | struct dma_attrs *attrs) | ||
34 | { | ||
35 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
36 | |||
37 | BUG_ON(!valid_dma_direction(dir)); | ||
38 | if (ops->unmap_page) | ||
39 | ops->unmap_page(dev, addr, size, dir, attrs); | ||
40 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | ||
45 | * It should never return a value < 0. | ||
46 | */ | ||
47 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
48 | int nents, enum dma_data_direction dir, | ||
49 | struct dma_attrs *attrs) | ||
50 | { | ||
51 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
52 | int i, ents; | ||
53 | struct scatterlist *s; | ||
54 | |||
55 | for_each_sg(sg, s, nents, i) | ||
56 | kmemcheck_mark_initialized(sg_virt(s), s->length); | ||
57 | BUG_ON(!valid_dma_direction(dir)); | ||
58 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | ||
59 | BUG_ON(ents < 0); | ||
60 | debug_dma_map_sg(dev, sg, nents, ents, dir); | ||
61 | |||
62 | return ents; | ||
63 | } | ||
64 | |||
65 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
66 | int nents, enum dma_data_direction dir, | ||
67 | struct dma_attrs *attrs) | ||
68 | { | ||
69 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
70 | |||
71 | BUG_ON(!valid_dma_direction(dir)); | ||
72 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
73 | if (ops->unmap_sg) | ||
74 | ops->unmap_sg(dev, sg, nents, dir, attrs); | ||
75 | } | ||
76 | |||
77 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
78 | size_t offset, size_t size, | ||
79 | enum dma_data_direction dir) | ||
80 | { | ||
81 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
82 | dma_addr_t addr; | ||
83 | |||
84 | kmemcheck_mark_initialized(page_address(page) + offset, size); | ||
85 | BUG_ON(!valid_dma_direction(dir)); | ||
86 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | ||
87 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
88 | |||
89 | return addr; | ||
90 | } | ||
91 | |||
92 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
93 | size_t size, enum dma_data_direction dir) | ||
94 | { | ||
95 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
96 | |||
97 | BUG_ON(!valid_dma_direction(dir)); | ||
98 | if (ops->unmap_page) | ||
99 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
100 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
101 | } | ||
102 | |||
103 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | ||
104 | size_t size, | ||
105 | enum dma_data_direction dir) | ||
106 | { | ||
107 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
108 | |||
109 | BUG_ON(!valid_dma_direction(dir)); | ||
110 | if (ops->sync_single_for_cpu) | ||
111 | ops->sync_single_for_cpu(dev, addr, size, dir); | ||
112 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | ||
113 | } | ||
114 | |||
115 | static inline void dma_sync_single_for_device(struct device *dev, | ||
116 | dma_addr_t addr, size_t size, | ||
117 | enum dma_data_direction dir) | ||
118 | { | ||
119 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
120 | |||
121 | BUG_ON(!valid_dma_direction(dir)); | ||
122 | if (ops->sync_single_for_device) | ||
123 | ops->sync_single_for_device(dev, addr, size, dir); | ||
124 | debug_dma_sync_single_for_device(dev, addr, size, dir); | ||
125 | } | ||
126 | |||
127 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
128 | dma_addr_t addr, | ||
129 | unsigned long offset, | ||
130 | size_t size, | ||
131 | enum dma_data_direction dir) | ||
132 | { | ||
133 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
134 | |||
135 | BUG_ON(!valid_dma_direction(dir)); | ||
136 | if (ops->sync_single_for_cpu) | ||
137 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | ||
138 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
139 | } | ||
140 | |||
141 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
142 | dma_addr_t addr, | ||
143 | unsigned long offset, | ||
144 | size_t size, | ||
145 | enum dma_data_direction dir) | ||
146 | { | ||
147 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
148 | |||
149 | BUG_ON(!valid_dma_direction(dir)); | ||
150 | if (ops->sync_single_for_device) | ||
151 | ops->sync_single_for_device(dev, addr + offset, size, dir); | ||
152 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | ||
153 | } | ||
154 | |||
155 | static inline void | ||
156 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
157 | int nelems, enum dma_data_direction dir) | ||
158 | { | ||
159 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
160 | |||
161 | BUG_ON(!valid_dma_direction(dir)); | ||
162 | if (ops->sync_sg_for_cpu) | ||
163 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | ||
164 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
165 | } | ||
166 | |||
167 | static inline void | ||
168 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
169 | int nelems, enum dma_data_direction dir) | ||
170 | { | ||
171 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
172 | |||
173 | BUG_ON(!valid_dma_direction(dir)); | ||
174 | if (ops->sync_sg_for_device) | ||
175 | ops->sync_sg_for_device(dev, sg, nelems, dir); | ||
176 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | ||
177 | |||
178 | } | ||
179 | |||
180 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
181 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
182 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
183 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
184 | |||
185 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
186 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
187 | |||
188 | void *dma_common_contiguous_remap(struct page *page, size_t size, | ||
189 | unsigned long vm_flags, | ||
190 | pgprot_t prot, const void *caller); | ||
191 | |||
192 | void *dma_common_pages_remap(struct page **pages, size_t size, | ||
193 | unsigned long vm_flags, pgprot_t prot, | ||
194 | const void *caller); | ||
195 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | ||
196 | |||
197 | /** | ||
198 | * dma_mmap_attrs - map a coherent DMA allocation into user space | ||
199 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
200 | * @vma: vm_area_struct describing requested user mapping | ||
201 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | ||
202 | * @handle: device-view address returned from dma_alloc_attrs | ||
203 | * @size: size of memory originally requested in dma_alloc_attrs | ||
204 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | ||
205 | * | ||
206 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | ||
207 | * into user space. The coherent DMA buffer must not be freed by the | ||
208 | * driver until the user space mapping has been released. | ||
209 | */ | ||
210 | static inline int | ||
211 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | ||
212 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
213 | { | ||
214 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
215 | BUG_ON(!ops); | ||
216 | if (ops->mmap) | ||
217 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
218 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
219 | } | ||
220 | |||
221 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
222 | |||
223 | int | ||
224 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
225 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
226 | |||
227 | static inline int | ||
228 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | ||
229 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
230 | { | ||
231 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
232 | BUG_ON(!ops); | ||
233 | if (ops->get_sgtable) | ||
234 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | ||
235 | attrs); | ||
236 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | ||
237 | } | ||
238 | |||
239 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) | ||
240 | |||
241 | #ifndef arch_dma_alloc_attrs | ||
242 | #define arch_dma_alloc_attrs(dev, flag) (true) | ||
243 | #endif | ||
244 | |||
245 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
246 | dma_addr_t *dma_handle, gfp_t flag, | ||
247 | struct dma_attrs *attrs) | ||
248 | { | ||
249 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
250 | void *cpu_addr; | ||
251 | |||
252 | BUG_ON(!ops); | ||
253 | |||
254 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | ||
255 | return cpu_addr; | ||
256 | |||
257 | if (!arch_dma_alloc_attrs(&dev, &flag)) | ||
258 | return NULL; | ||
259 | if (!ops->alloc) | ||
260 | return NULL; | ||
261 | |||
262 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | ||
263 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
264 | return cpu_addr; | ||
265 | } | ||
266 | |||
267 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
268 | void *cpu_addr, dma_addr_t dma_handle, | ||
269 | struct dma_attrs *attrs) | ||
270 | { | ||
271 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
272 | |||
273 | BUG_ON(!ops); | ||
274 | WARN_ON(irqs_disabled()); | ||
275 | |||
276 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
277 | return; | ||
278 | |||
279 | if (!ops->free) | ||
280 | return; | ||
281 | |||
282 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
283 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
284 | } | ||
285 | |||
286 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
287 | dma_addr_t *dma_handle, gfp_t flag) | ||
288 | { | ||
289 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); | ||
290 | } | ||
291 | |||
292 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
293 | void *cpu_addr, dma_addr_t dma_handle) | ||
294 | { | ||
295 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); | ||
296 | } | ||
297 | |||
298 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
299 | dma_addr_t *dma_handle, gfp_t gfp) | ||
300 | { | ||
301 | DEFINE_DMA_ATTRS(attrs); | ||
302 | |||
303 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
304 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); | ||
305 | } | ||
306 | |||
307 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | ||
308 | void *cpu_addr, dma_addr_t dma_handle) | ||
309 | { | ||
310 | DEFINE_DMA_ATTRS(attrs); | ||
311 | |||
312 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
313 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
314 | } | ||
315 | |||
316 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
317 | { | ||
318 | debug_dma_mapping_error(dev, dma_addr); | ||
319 | |||
320 | if (get_dma_ops(dev)->mapping_error) | ||
321 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | ||
322 | |||
323 | #ifdef DMA_ERROR_CODE | ||
324 | return dma_addr == DMA_ERROR_CODE; | ||
325 | #else | ||
326 | return 0; | ||
327 | #endif | ||
328 | } | ||
329 | |||
330 | #ifndef HAVE_ARCH_DMA_SUPPORTED | ||
331 | static inline int dma_supported(struct device *dev, u64 mask) | ||
332 | { | ||
333 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
334 | |||
335 | if (!ops) | ||
336 | return 0; | ||
337 | if (!ops->dma_supported) | ||
338 | return 1; | ||
339 | return ops->dma_supported(dev, mask); | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | #ifndef HAVE_ARCH_DMA_SET_MASK | ||
344 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
345 | { | ||
346 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
347 | |||
348 | if (ops->set_dma_mask) | ||
349 | return ops->set_dma_mask(dev, mask); | ||
350 | |||
351 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
352 | return -EIO; | ||
353 | *dev->dma_mask = mask; | ||
354 | return 0; | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | #endif | ||
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 59915ea5373c..fc14275ff34e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -85,10 +85,14 @@ extern int nr_cpu_ids; | |||
85 | * only one CPU. | 85 | * only one CPU. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | extern const struct cpumask *const cpu_possible_mask; | 88 | extern struct cpumask __cpu_possible_mask; |
89 | extern const struct cpumask *const cpu_online_mask; | 89 | extern struct cpumask __cpu_online_mask; |
90 | extern const struct cpumask *const cpu_present_mask; | 90 | extern struct cpumask __cpu_present_mask; |
91 | extern const struct cpumask *const cpu_active_mask; | 91 | extern struct cpumask __cpu_active_mask; |
92 | #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) | ||
93 | #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) | ||
94 | #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) | ||
95 | #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) | ||
92 | 96 | ||
93 | #if NR_CPUS > 1 | 97 | #if NR_CPUS > 1 |
94 | #define num_online_cpus() cpumask_weight(cpu_online_mask) | 98 | #define num_online_cpus() cpumask_weight(cpu_online_mask) |
@@ -716,14 +720,49 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | |||
716 | #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) | 720 | #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
717 | 721 | ||
718 | /* Wrappers for arch boot code to manipulate normally-constant masks */ | 722 | /* Wrappers for arch boot code to manipulate normally-constant masks */ |
719 | void set_cpu_possible(unsigned int cpu, bool possible); | ||
720 | void set_cpu_present(unsigned int cpu, bool present); | ||
721 | void set_cpu_online(unsigned int cpu, bool online); | ||
722 | void set_cpu_active(unsigned int cpu, bool active); | ||
723 | void init_cpu_present(const struct cpumask *src); | 723 | void init_cpu_present(const struct cpumask *src); |
724 | void init_cpu_possible(const struct cpumask *src); | 724 | void init_cpu_possible(const struct cpumask *src); |
725 | void init_cpu_online(const struct cpumask *src); | 725 | void init_cpu_online(const struct cpumask *src); |
726 | 726 | ||
727 | static inline void | ||
728 | set_cpu_possible(unsigned int cpu, bool possible) | ||
729 | { | ||
730 | if (possible) | ||
731 | cpumask_set_cpu(cpu, &__cpu_possible_mask); | ||
732 | else | ||
733 | cpumask_clear_cpu(cpu, &__cpu_possible_mask); | ||
734 | } | ||
735 | |||
736 | static inline void | ||
737 | set_cpu_present(unsigned int cpu, bool present) | ||
738 | { | ||
739 | if (present) | ||
740 | cpumask_set_cpu(cpu, &__cpu_present_mask); | ||
741 | else | ||
742 | cpumask_clear_cpu(cpu, &__cpu_present_mask); | ||
743 | } | ||
744 | |||
745 | static inline void | ||
746 | set_cpu_online(unsigned int cpu, bool online) | ||
747 | { | ||
748 | if (online) { | ||
749 | cpumask_set_cpu(cpu, &__cpu_online_mask); | ||
750 | cpumask_set_cpu(cpu, &__cpu_active_mask); | ||
751 | } else { | ||
752 | cpumask_clear_cpu(cpu, &__cpu_online_mask); | ||
753 | } | ||
754 | } | ||
755 | |||
756 | static inline void | ||
757 | set_cpu_active(unsigned int cpu, bool active) | ||
758 | { | ||
759 | if (active) | ||
760 | cpumask_set_cpu(cpu, &__cpu_active_mask); | ||
761 | else | ||
762 | cpumask_clear_cpu(cpu, &__cpu_active_mask); | ||
763 | } | ||
764 | |||
765 | |||
727 | /** | 766 | /** |
728 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | 767 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
729 | * @bitmap: the bitmap | 768 | * @bitmap: the bitmap |
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h index c8e1831d7572..99c0be00b47c 100644 --- a/include/linux/dma-attrs.h +++ b/include/linux/dma-attrs.h | |||
@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs) | |||
41 | bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); | 41 | bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); |
42 | } | 42 | } |
43 | 43 | ||
44 | #ifdef CONFIG_HAVE_DMA_ATTRS | ||
45 | /** | 44 | /** |
46 | * dma_set_attr - set a specific attribute | 45 | * dma_set_attr - set a specific attribute |
47 | * @attr: attribute to set | 46 | * @attr: attribute to set |
@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) | |||
67 | BUG_ON(attr >= DMA_ATTR_MAX); | 66 | BUG_ON(attr >= DMA_ATTR_MAX); |
68 | return test_bit(attr, attrs->flags); | 67 | return test_bit(attr, attrs->flags); |
69 | } | 68 | } |
70 | #else /* !CONFIG_HAVE_DMA_ATTRS */ | ||
71 | static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
72 | { | ||
73 | } | ||
74 | 69 | ||
75 | static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
79 | #endif /* CONFIG_HAVE_DMA_ATTRS */ | ||
80 | #endif /* _DMA_ATTR_H */ | 70 | #endif /* _DMA_ATTR_H */ |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2e551e2d2d03..75857cda38e9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -6,8 +6,11 @@ | |||
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/err.h> | 7 | #include <linux/err.h> |
8 | #include <linux/dma-attrs.h> | 8 | #include <linux/dma-attrs.h> |
9 | #include <linux/dma-debug.h> | ||
9 | #include <linux/dma-direction.h> | 10 | #include <linux/dma-direction.h> |
10 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
12 | #include <linux/kmemcheck.h> | ||
13 | #include <linux/bug.h> | ||
11 | 14 | ||
12 | /* | 15 | /* |
13 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | 16 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
@@ -83,10 +86,383 @@ static inline int is_device_dma_capable(struct device *dev) | |||
83 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | 86 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
84 | } | 87 | } |
85 | 88 | ||
89 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | ||
90 | /* | ||
91 | * These three functions are only for dma allocator. | ||
92 | * Don't use them in device drivers. | ||
93 | */ | ||
94 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
95 | dma_addr_t *dma_handle, void **ret); | ||
96 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | ||
97 | |||
98 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | ||
99 | void *cpu_addr, size_t size, int *ret); | ||
100 | #else | ||
101 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | ||
102 | #define dma_release_from_coherent(dev, order, vaddr) (0) | ||
103 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | ||
104 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | ||
105 | |||
86 | #ifdef CONFIG_HAS_DMA | 106 | #ifdef CONFIG_HAS_DMA |
87 | #include <asm/dma-mapping.h> | 107 | #include <asm/dma-mapping.h> |
88 | #else | 108 | #else |
89 | #include <asm-generic/dma-mapping-broken.h> | 109 | /* |
110 | * Define the dma api to allow compilation but not linking of | ||
111 | * dma dependent code. Code that depends on the dma-mapping | ||
112 | * API needs to set 'depends on HAS_DMA' in its Kconfig | ||
113 | */ | ||
114 | extern struct dma_map_ops bad_dma_ops; | ||
115 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
116 | { | ||
117 | return &bad_dma_ops; | ||
118 | } | ||
119 | #endif | ||
120 | |||
121 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | ||
122 | size_t size, | ||
123 | enum dma_data_direction dir, | ||
124 | struct dma_attrs *attrs) | ||
125 | { | ||
126 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
127 | dma_addr_t addr; | ||
128 | |||
129 | kmemcheck_mark_initialized(ptr, size); | ||
130 | BUG_ON(!valid_dma_direction(dir)); | ||
131 | addr = ops->map_page(dev, virt_to_page(ptr), | ||
132 | offset_in_page(ptr), size, | ||
133 | dir, attrs); | ||
134 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
135 | offset_in_page(ptr), size, | ||
136 | dir, addr, true); | ||
137 | return addr; | ||
138 | } | ||
139 | |||
140 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | ||
141 | size_t size, | ||
142 | enum dma_data_direction dir, | ||
143 | struct dma_attrs *attrs) | ||
144 | { | ||
145 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
146 | |||
147 | BUG_ON(!valid_dma_direction(dir)); | ||
148 | if (ops->unmap_page) | ||
149 | ops->unmap_page(dev, addr, size, dir, attrs); | ||
150 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | ||
155 | * It should never return a value < 0. | ||
156 | */ | ||
157 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
158 | int nents, enum dma_data_direction dir, | ||
159 | struct dma_attrs *attrs) | ||
160 | { | ||
161 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
162 | int i, ents; | ||
163 | struct scatterlist *s; | ||
164 | |||
165 | for_each_sg(sg, s, nents, i) | ||
166 | kmemcheck_mark_initialized(sg_virt(s), s->length); | ||
167 | BUG_ON(!valid_dma_direction(dir)); | ||
168 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | ||
169 | BUG_ON(ents < 0); | ||
170 | debug_dma_map_sg(dev, sg, nents, ents, dir); | ||
171 | |||
172 | return ents; | ||
173 | } | ||
174 | |||
175 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
176 | int nents, enum dma_data_direction dir, | ||
177 | struct dma_attrs *attrs) | ||
178 | { | ||
179 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
180 | |||
181 | BUG_ON(!valid_dma_direction(dir)); | ||
182 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
183 | if (ops->unmap_sg) | ||
184 | ops->unmap_sg(dev, sg, nents, dir, attrs); | ||
185 | } | ||
186 | |||
187 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
188 | size_t offset, size_t size, | ||
189 | enum dma_data_direction dir) | ||
190 | { | ||
191 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
192 | dma_addr_t addr; | ||
193 | |||
194 | kmemcheck_mark_initialized(page_address(page) + offset, size); | ||
195 | BUG_ON(!valid_dma_direction(dir)); | ||
196 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | ||
197 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
198 | |||
199 | return addr; | ||
200 | } | ||
201 | |||
202 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
203 | size_t size, enum dma_data_direction dir) | ||
204 | { | ||
205 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
206 | |||
207 | BUG_ON(!valid_dma_direction(dir)); | ||
208 | if (ops->unmap_page) | ||
209 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
210 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
211 | } | ||
212 | |||
213 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | ||
214 | size_t size, | ||
215 | enum dma_data_direction dir) | ||
216 | { | ||
217 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
218 | |||
219 | BUG_ON(!valid_dma_direction(dir)); | ||
220 | if (ops->sync_single_for_cpu) | ||
221 | ops->sync_single_for_cpu(dev, addr, size, dir); | ||
222 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | ||
223 | } | ||
224 | |||
225 | static inline void dma_sync_single_for_device(struct device *dev, | ||
226 | dma_addr_t addr, size_t size, | ||
227 | enum dma_data_direction dir) | ||
228 | { | ||
229 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
230 | |||
231 | BUG_ON(!valid_dma_direction(dir)); | ||
232 | if (ops->sync_single_for_device) | ||
233 | ops->sync_single_for_device(dev, addr, size, dir); | ||
234 | debug_dma_sync_single_for_device(dev, addr, size, dir); | ||
235 | } | ||
236 | |||
237 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
238 | dma_addr_t addr, | ||
239 | unsigned long offset, | ||
240 | size_t size, | ||
241 | enum dma_data_direction dir) | ||
242 | { | ||
243 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
244 | |||
245 | BUG_ON(!valid_dma_direction(dir)); | ||
246 | if (ops->sync_single_for_cpu) | ||
247 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | ||
248 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
249 | } | ||
250 | |||
251 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
252 | dma_addr_t addr, | ||
253 | unsigned long offset, | ||
254 | size_t size, | ||
255 | enum dma_data_direction dir) | ||
256 | { | ||
257 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
258 | |||
259 | BUG_ON(!valid_dma_direction(dir)); | ||
260 | if (ops->sync_single_for_device) | ||
261 | ops->sync_single_for_device(dev, addr + offset, size, dir); | ||
262 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | ||
263 | } | ||
264 | |||
265 | static inline void | ||
266 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
267 | int nelems, enum dma_data_direction dir) | ||
268 | { | ||
269 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
270 | |||
271 | BUG_ON(!valid_dma_direction(dir)); | ||
272 | if (ops->sync_sg_for_cpu) | ||
273 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | ||
274 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
275 | } | ||
276 | |||
277 | static inline void | ||
278 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
279 | int nelems, enum dma_data_direction dir) | ||
280 | { | ||
281 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
282 | |||
283 | BUG_ON(!valid_dma_direction(dir)); | ||
284 | if (ops->sync_sg_for_device) | ||
285 | ops->sync_sg_for_device(dev, sg, nelems, dir); | ||
286 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | ||
287 | |||
288 | } | ||
289 | |||
290 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
291 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
292 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
293 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
294 | |||
295 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
296 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
297 | |||
298 | void *dma_common_contiguous_remap(struct page *page, size_t size, | ||
299 | unsigned long vm_flags, | ||
300 | pgprot_t prot, const void *caller); | ||
301 | |||
302 | void *dma_common_pages_remap(struct page **pages, size_t size, | ||
303 | unsigned long vm_flags, pgprot_t prot, | ||
304 | const void *caller); | ||
305 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | ||
306 | |||
307 | /** | ||
308 | * dma_mmap_attrs - map a coherent DMA allocation into user space | ||
309 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
310 | * @vma: vm_area_struct describing requested user mapping | ||
311 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | ||
312 | * @handle: device-view address returned from dma_alloc_attrs | ||
313 | * @size: size of memory originally requested in dma_alloc_attrs | ||
314 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | ||
315 | * | ||
316 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | ||
317 | * into user space. The coherent DMA buffer must not be freed by the | ||
318 | * driver until the user space mapping has been released. | ||
319 | */ | ||
320 | static inline int | ||
321 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | ||
322 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
323 | { | ||
324 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
325 | BUG_ON(!ops); | ||
326 | if (ops->mmap) | ||
327 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
328 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
329 | } | ||
330 | |||
331 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
332 | |||
333 | int | ||
334 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
335 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
336 | |||
337 | static inline int | ||
338 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | ||
339 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
340 | { | ||
341 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
342 | BUG_ON(!ops); | ||
343 | if (ops->get_sgtable) | ||
344 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | ||
345 | attrs); | ||
346 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | ||
347 | } | ||
348 | |||
349 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) | ||
350 | |||
351 | #ifndef arch_dma_alloc_attrs | ||
352 | #define arch_dma_alloc_attrs(dev, flag) (true) | ||
353 | #endif | ||
354 | |||
355 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
356 | dma_addr_t *dma_handle, gfp_t flag, | ||
357 | struct dma_attrs *attrs) | ||
358 | { | ||
359 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
360 | void *cpu_addr; | ||
361 | |||
362 | BUG_ON(!ops); | ||
363 | |||
364 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | ||
365 | return cpu_addr; | ||
366 | |||
367 | if (!arch_dma_alloc_attrs(&dev, &flag)) | ||
368 | return NULL; | ||
369 | if (!ops->alloc) | ||
370 | return NULL; | ||
371 | |||
372 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | ||
373 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
374 | return cpu_addr; | ||
375 | } | ||
376 | |||
377 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
378 | void *cpu_addr, dma_addr_t dma_handle, | ||
379 | struct dma_attrs *attrs) | ||
380 | { | ||
381 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
382 | |||
383 | BUG_ON(!ops); | ||
384 | WARN_ON(irqs_disabled()); | ||
385 | |||
386 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
387 | return; | ||
388 | |||
389 | if (!ops->free) | ||
390 | return; | ||
391 | |||
392 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
393 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
394 | } | ||
395 | |||
396 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
397 | dma_addr_t *dma_handle, gfp_t flag) | ||
398 | { | ||
399 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); | ||
400 | } | ||
401 | |||
402 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
403 | void *cpu_addr, dma_addr_t dma_handle) | ||
404 | { | ||
405 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); | ||
406 | } | ||
407 | |||
408 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
409 | dma_addr_t *dma_handle, gfp_t gfp) | ||
410 | { | ||
411 | DEFINE_DMA_ATTRS(attrs); | ||
412 | |||
413 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
414 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); | ||
415 | } | ||
416 | |||
417 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | ||
418 | void *cpu_addr, dma_addr_t dma_handle) | ||
419 | { | ||
420 | DEFINE_DMA_ATTRS(attrs); | ||
421 | |||
422 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
423 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
424 | } | ||
425 | |||
426 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
427 | { | ||
428 | debug_dma_mapping_error(dev, dma_addr); | ||
429 | |||
430 | if (get_dma_ops(dev)->mapping_error) | ||
431 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | ||
432 | |||
433 | #ifdef DMA_ERROR_CODE | ||
434 | return dma_addr == DMA_ERROR_CODE; | ||
435 | #else | ||
436 | return 0; | ||
437 | #endif | ||
438 | } | ||
439 | |||
440 | #ifndef HAVE_ARCH_DMA_SUPPORTED | ||
441 | static inline int dma_supported(struct device *dev, u64 mask) | ||
442 | { | ||
443 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
444 | |||
445 | if (!ops) | ||
446 | return 0; | ||
447 | if (!ops->dma_supported) | ||
448 | return 1; | ||
449 | return ops->dma_supported(dev, mask); | ||
450 | } | ||
451 | #endif | ||
452 | |||
453 | #ifndef HAVE_ARCH_DMA_SET_MASK | ||
454 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
455 | { | ||
456 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
457 | |||
458 | if (ops->set_dma_mask) | ||
459 | return ops->set_dma_mask(dev, mask); | ||
460 | |||
461 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
462 | return -EIO; | ||
463 | *dev->dma_mask = mask; | ||
464 | return 0; | ||
465 | } | ||
90 | #endif | 466 | #endif |
91 | 467 | ||
92 | static inline u64 dma_get_mask(struct device *dev) | 468 | static inline u64 dma_get_mask(struct device *dev) |
@@ -208,7 +584,13 @@ static inline int dma_get_cache_alignment(void) | |||
208 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 | 584 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 |
209 | #define DMA_MEMORY_EXCLUSIVE 0x08 | 585 | #define DMA_MEMORY_EXCLUSIVE 0x08 |
210 | 586 | ||
211 | #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | 587 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
588 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | ||
589 | dma_addr_t device_addr, size_t size, int flags); | ||
590 | void dma_release_declared_memory(struct device *dev); | ||
591 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
592 | dma_addr_t device_addr, size_t size); | ||
593 | #else | ||
212 | static inline int | 594 | static inline int |
213 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | 595 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
214 | dma_addr_t device_addr, size_t size, int flags) | 596 | dma_addr_t device_addr, size_t size, int flags) |
@@ -227,7 +609,7 @@ dma_mark_declared_memory_occupied(struct device *dev, | |||
227 | { | 609 | { |
228 | return ERR_PTR(-EBUSY); | 610 | return ERR_PTR(-EBUSY); |
229 | } | 611 | } |
230 | #endif | 612 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
231 | 613 | ||
232 | /* | 614 | /* |
233 | * Managed DMA API | 615 | * Managed DMA API |
@@ -240,13 +622,13 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, | |||
240 | dma_addr_t *dma_handle, gfp_t gfp); | 622 | dma_addr_t *dma_handle, gfp_t gfp); |
241 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | 623 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
242 | dma_addr_t dma_handle); | 624 | dma_addr_t dma_handle); |
243 | #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | 625 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
244 | extern int dmam_declare_coherent_memory(struct device *dev, | 626 | extern int dmam_declare_coherent_memory(struct device *dev, |
245 | phys_addr_t phys_addr, | 627 | phys_addr_t phys_addr, |
246 | dma_addr_t device_addr, size_t size, | 628 | dma_addr_t device_addr, size_t size, |
247 | int flags); | 629 | int flags); |
248 | extern void dmam_release_declared_memory(struct device *dev); | 630 | extern void dmam_release_declared_memory(struct device *dev); |
249 | #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ | 631 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
250 | static inline int dmam_declare_coherent_memory(struct device *dev, | 632 | static inline int dmam_declare_coherent_memory(struct device *dev, |
251 | phys_addr_t phys_addr, dma_addr_t device_addr, | 633 | phys_addr_t phys_addr, dma_addr_t device_addr, |
252 | size_t size, gfp_t gfp) | 634 | size_t size, gfp_t gfp) |
@@ -257,24 +639,8 @@ static inline int dmam_declare_coherent_memory(struct device *dev, | |||
257 | static inline void dmam_release_declared_memory(struct device *dev) | 639 | static inline void dmam_release_declared_memory(struct device *dev) |
258 | { | 640 | { |
259 | } | 641 | } |
260 | #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ | 642 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
261 | |||
262 | #ifndef CONFIG_HAVE_DMA_ATTRS | ||
263 | struct dma_attrs; | ||
264 | |||
265 | #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ | ||
266 | dma_map_single(dev, cpu_addr, size, dir) | ||
267 | 643 | ||
268 | #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ | ||
269 | dma_unmap_single(dev, dma_addr, size, dir) | ||
270 | |||
271 | #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ | ||
272 | dma_map_sg(dev, sgl, nents, dir) | ||
273 | |||
274 | #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ | ||
275 | dma_unmap_sg(dev, sgl, nents, dir) | ||
276 | |||
277 | #else | ||
278 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, | 644 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
279 | dma_addr_t *dma_addr, gfp_t gfp) | 645 | dma_addr_t *dma_addr, gfp_t gfp) |
280 | { | 646 | { |
@@ -300,7 +666,6 @@ static inline int dma_mmap_writecombine(struct device *dev, | |||
300 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 666 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
301 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | 667 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); |
302 | } | 668 | } |
303 | #endif /* CONFIG_HAVE_DMA_ATTRS */ | ||
304 | 669 | ||
305 | #ifdef CONFIG_NEED_DMA_MAP_STATE | 670 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
306 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | 671 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
diff --git a/include/linux/io.h b/include/linux/io.h index fffd88d7f426..32403b5716e5 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -29,6 +29,7 @@ struct device; | |||
29 | struct resource; | 29 | struct resource; |
30 | 30 | ||
31 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); | 31 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); |
32 | void __ioread32_copy(void *to, const void __iomem *from, size_t count); | ||
32 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); | 33 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); |
33 | 34 | ||
34 | #ifdef CONFIG_MMU | 35 | #ifdef CONFIG_MMU |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 7b68d2788a56..2cc643c6e870 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -109,11 +109,7 @@ struct compat_kexec_segment { | |||
109 | }; | 109 | }; |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | struct kexec_sha_region { | 112 | #ifdef CONFIG_KEXEC_FILE |
113 | unsigned long start; | ||
114 | unsigned long len; | ||
115 | }; | ||
116 | |||
117 | struct purgatory_info { | 113 | struct purgatory_info { |
118 | /* Pointer to elf header of read only purgatory */ | 114 | /* Pointer to elf header of read only purgatory */ |
119 | Elf_Ehdr *ehdr; | 115 | Elf_Ehdr *ehdr; |
@@ -130,6 +126,28 @@ struct purgatory_info { | |||
130 | unsigned long purgatory_load_addr; | 126 | unsigned long purgatory_load_addr; |
131 | }; | 127 | }; |
132 | 128 | ||
129 | typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); | ||
130 | typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, | ||
131 | unsigned long kernel_len, char *initrd, | ||
132 | unsigned long initrd_len, char *cmdline, | ||
133 | unsigned long cmdline_len); | ||
134 | typedef int (kexec_cleanup_t)(void *loader_data); | ||
135 | |||
136 | #ifdef CONFIG_KEXEC_VERIFY_SIG | ||
137 | typedef int (kexec_verify_sig_t)(const char *kernel_buf, | ||
138 | unsigned long kernel_len); | ||
139 | #endif | ||
140 | |||
141 | struct kexec_file_ops { | ||
142 | kexec_probe_t *probe; | ||
143 | kexec_load_t *load; | ||
144 | kexec_cleanup_t *cleanup; | ||
145 | #ifdef CONFIG_KEXEC_VERIFY_SIG | ||
146 | kexec_verify_sig_t *verify_sig; | ||
147 | #endif | ||
148 | }; | ||
149 | #endif | ||
150 | |||
133 | struct kimage { | 151 | struct kimage { |
134 | kimage_entry_t head; | 152 | kimage_entry_t head; |
135 | kimage_entry_t *entry; | 153 | kimage_entry_t *entry; |
@@ -161,6 +179,7 @@ struct kimage { | |||
161 | struct kimage_arch arch; | 179 | struct kimage_arch arch; |
162 | #endif | 180 | #endif |
163 | 181 | ||
182 | #ifdef CONFIG_KEXEC_FILE | ||
164 | /* Additional fields for file based kexec syscall */ | 183 | /* Additional fields for file based kexec syscall */ |
165 | void *kernel_buf; | 184 | void *kernel_buf; |
166 | unsigned long kernel_buf_len; | 185 | unsigned long kernel_buf_len; |
@@ -179,38 +198,7 @@ struct kimage { | |||
179 | 198 | ||
180 | /* Information for loading purgatory */ | 199 | /* Information for loading purgatory */ |
181 | struct purgatory_info purgatory_info; | 200 | struct purgatory_info purgatory_info; |
182 | }; | 201 | #endif |
183 | |||
184 | /* | ||
185 | * Keeps track of buffer parameters as provided by caller for requesting | ||
186 | * memory placement of buffer. | ||
187 | */ | ||
188 | struct kexec_buf { | ||
189 | struct kimage *image; | ||
190 | char *buffer; | ||
191 | unsigned long bufsz; | ||
192 | unsigned long mem; | ||
193 | unsigned long memsz; | ||
194 | unsigned long buf_align; | ||
195 | unsigned long buf_min; | ||
196 | unsigned long buf_max; | ||
197 | bool top_down; /* allocate from top of memory hole */ | ||
198 | }; | ||
199 | |||
200 | typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); | ||
201 | typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, | ||
202 | unsigned long kernel_len, char *initrd, | ||
203 | unsigned long initrd_len, char *cmdline, | ||
204 | unsigned long cmdline_len); | ||
205 | typedef int (kexec_cleanup_t)(void *loader_data); | ||
206 | typedef int (kexec_verify_sig_t)(const char *kernel_buf, | ||
207 | unsigned long kernel_len); | ||
208 | |||
209 | struct kexec_file_ops { | ||
210 | kexec_probe_t *probe; | ||
211 | kexec_load_t *load; | ||
212 | kexec_cleanup_t *cleanup; | ||
213 | kexec_verify_sig_t *verify_sig; | ||
214 | }; | 202 | }; |
215 | 203 | ||
216 | /* kexec interface functions */ | 204 | /* kexec interface functions */ |
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 2a6b9947aaa3..cb0ba9f2a9a2 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h | |||
@@ -40,7 +40,7 @@ struct list_lru_node { | |||
40 | spinlock_t lock; | 40 | spinlock_t lock; |
41 | /* global list, used for the root cgroup in cgroup aware lrus */ | 41 | /* global list, used for the root cgroup in cgroup aware lrus */ |
42 | struct list_lru_one lru; | 42 | struct list_lru_one lru; |
43 | #ifdef CONFIG_MEMCG_KMEM | 43 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
44 | /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ | 44 | /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ |
45 | struct list_lru_memcg *memcg_lrus; | 45 | struct list_lru_memcg *memcg_lrus; |
46 | #endif | 46 | #endif |
@@ -48,7 +48,7 @@ struct list_lru_node { | |||
48 | 48 | ||
49 | struct list_lru { | 49 | struct list_lru { |
50 | struct list_lru_node *node; | 50 | struct list_lru_node *node; |
51 | #ifdef CONFIG_MEMCG_KMEM | 51 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
52 | struct list_head list; | 52 | struct list_head list; |
53 | #endif | 53 | #endif |
54 | }; | 54 | }; |
diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 4356686b0a39..6b784c59f321 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h | |||
@@ -9,8 +9,8 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | #define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *)) | 12 | #define LZ4_MEM_COMPRESS (16384) |
13 | #define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *)) | 13 | #define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *))) |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * lz4_compressbound() | 16 | * lz4_compressbound() |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 189f04d4d2ec..9ae48d4aeb5e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -50,6 +50,9 @@ enum mem_cgroup_stat_index { | |||
50 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ | 50 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ |
51 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ | 51 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ |
52 | MEM_CGROUP_STAT_NSTATS, | 52 | MEM_CGROUP_STAT_NSTATS, |
53 | /* default hierarchy stats */ | ||
54 | MEMCG_SOCK, | ||
55 | MEMCG_NR_STAT, | ||
53 | }; | 56 | }; |
54 | 57 | ||
55 | struct mem_cgroup_reclaim_cookie { | 58 | struct mem_cgroup_reclaim_cookie { |
@@ -85,15 +88,9 @@ enum mem_cgroup_events_target { | |||
85 | MEM_CGROUP_NTARGETS, | 88 | MEM_CGROUP_NTARGETS, |
86 | }; | 89 | }; |
87 | 90 | ||
88 | struct cg_proto { | ||
89 | struct page_counter memory_allocated; /* Current allocated memory. */ | ||
90 | int memory_pressure; | ||
91 | bool active; | ||
92 | }; | ||
93 | |||
94 | #ifdef CONFIG_MEMCG | 91 | #ifdef CONFIG_MEMCG |
95 | struct mem_cgroup_stat_cpu { | 92 | struct mem_cgroup_stat_cpu { |
96 | long count[MEM_CGROUP_STAT_NSTATS]; | 93 | long count[MEMCG_NR_STAT]; |
97 | unsigned long events[MEMCG_NR_EVENTS]; | 94 | unsigned long events[MEMCG_NR_EVENTS]; |
98 | unsigned long nr_page_events; | 95 | unsigned long nr_page_events; |
99 | unsigned long targets[MEM_CGROUP_NTARGETS]; | 96 | unsigned long targets[MEM_CGROUP_NTARGETS]; |
@@ -152,6 +149,12 @@ struct mem_cgroup_thresholds { | |||
152 | struct mem_cgroup_threshold_ary *spare; | 149 | struct mem_cgroup_threshold_ary *spare; |
153 | }; | 150 | }; |
154 | 151 | ||
152 | enum memcg_kmem_state { | ||
153 | KMEM_NONE, | ||
154 | KMEM_ALLOCATED, | ||
155 | KMEM_ONLINE, | ||
156 | }; | ||
157 | |||
155 | /* | 158 | /* |
156 | * The memory controller data structure. The memory controller controls both | 159 | * The memory controller data structure. The memory controller controls both |
157 | * page cache and RSS per cgroup. We would eventually like to provide | 160 | * page cache and RSS per cgroup. We would eventually like to provide |
@@ -163,8 +166,12 @@ struct mem_cgroup { | |||
163 | 166 | ||
164 | /* Accounted resources */ | 167 | /* Accounted resources */ |
165 | struct page_counter memory; | 168 | struct page_counter memory; |
169 | struct page_counter swap; | ||
170 | |||
171 | /* Legacy consumer-oriented counters */ | ||
166 | struct page_counter memsw; | 172 | struct page_counter memsw; |
167 | struct page_counter kmem; | 173 | struct page_counter kmem; |
174 | struct page_counter tcpmem; | ||
168 | 175 | ||
169 | /* Normal memory consumption range */ | 176 | /* Normal memory consumption range */ |
170 | unsigned long low; | 177 | unsigned long low; |
@@ -178,9 +185,6 @@ struct mem_cgroup { | |||
178 | /* vmpressure notifications */ | 185 | /* vmpressure notifications */ |
179 | struct vmpressure vmpressure; | 186 | struct vmpressure vmpressure; |
180 | 187 | ||
181 | /* css_online() has been completed */ | ||
182 | int initialized; | ||
183 | |||
184 | /* | 188 | /* |
185 | * Should the accounting and control be hierarchical, per subtree? | 189 | * Should the accounting and control be hierarchical, per subtree? |
186 | */ | 190 | */ |
@@ -227,14 +231,16 @@ struct mem_cgroup { | |||
227 | */ | 231 | */ |
228 | struct mem_cgroup_stat_cpu __percpu *stat; | 232 | struct mem_cgroup_stat_cpu __percpu *stat; |
229 | 233 | ||
230 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 234 | unsigned long socket_pressure; |
231 | struct cg_proto tcp_mem; | 235 | |
232 | #endif | 236 | /* Legacy tcp memory accounting */ |
233 | #if defined(CONFIG_MEMCG_KMEM) | 237 | bool tcpmem_active; |
238 | int tcpmem_pressure; | ||
239 | |||
240 | #ifndef CONFIG_SLOB | ||
234 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ | 241 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ |
235 | int kmemcg_id; | 242 | int kmemcg_id; |
236 | bool kmem_acct_activated; | 243 | enum memcg_kmem_state kmem_state; |
237 | bool kmem_acct_active; | ||
238 | #endif | 244 | #endif |
239 | 245 | ||
240 | int last_scanned_node; | 246 | int last_scanned_node; |
@@ -249,10 +255,6 @@ struct mem_cgroup { | |||
249 | struct wb_domain cgwb_domain; | 255 | struct wb_domain cgwb_domain; |
250 | #endif | 256 | #endif |
251 | 257 | ||
252 | #ifdef CONFIG_INET | ||
253 | unsigned long socket_pressure; | ||
254 | #endif | ||
255 | |||
256 | /* List of events which userspace want to receive */ | 258 | /* List of events which userspace want to receive */ |
257 | struct list_head event_list; | 259 | struct list_head event_list; |
258 | spinlock_t event_list_lock; | 260 | spinlock_t event_list_lock; |
@@ -356,6 +358,13 @@ static inline bool mem_cgroup_disabled(void) | |||
356 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | 358 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
357 | } | 359 | } |
358 | 360 | ||
361 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) | ||
362 | { | ||
363 | if (mem_cgroup_disabled()) | ||
364 | return true; | ||
365 | return !!(memcg->css.flags & CSS_ONLINE); | ||
366 | } | ||
367 | |||
359 | /* | 368 | /* |
360 | * For memory reclaim. | 369 | * For memory reclaim. |
361 | */ | 370 | */ |
@@ -364,20 +373,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | |||
364 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | 373 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
365 | int nr_pages); | 374 | int nr_pages); |
366 | 375 | ||
367 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | ||
368 | { | ||
369 | struct mem_cgroup_per_zone *mz; | ||
370 | struct mem_cgroup *memcg; | ||
371 | |||
372 | if (mem_cgroup_disabled()) | ||
373 | return true; | ||
374 | |||
375 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); | ||
376 | memcg = mz->memcg; | ||
377 | |||
378 | return !!(memcg->css.flags & CSS_ONLINE); | ||
379 | } | ||
380 | |||
381 | static inline | 376 | static inline |
382 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | 377 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
383 | { | 378 | { |
@@ -590,13 +585,13 @@ static inline bool mem_cgroup_disabled(void) | |||
590 | return true; | 585 | return true; |
591 | } | 586 | } |
592 | 587 | ||
593 | static inline bool | 588 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
594 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | ||
595 | { | 589 | { |
596 | return true; | 590 | return true; |
597 | } | 591 | } |
598 | 592 | ||
599 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | 593 | static inline bool |
594 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | ||
600 | { | 595 | { |
601 | return true; | 596 | return true; |
602 | } | 597 | } |
@@ -707,15 +702,13 @@ void sock_update_memcg(struct sock *sk); | |||
707 | void sock_release_memcg(struct sock *sk); | 702 | void sock_release_memcg(struct sock *sk); |
708 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | 703 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
709 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | 704 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
710 | #if defined(CONFIG_MEMCG) && defined(CONFIG_INET) | 705 | #ifdef CONFIG_MEMCG |
711 | extern struct static_key_false memcg_sockets_enabled_key; | 706 | extern struct static_key_false memcg_sockets_enabled_key; |
712 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | 707 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) |
713 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | 708 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
714 | { | 709 | { |
715 | #ifdef CONFIG_MEMCG_KMEM | 710 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
716 | if (memcg->tcp_mem.memory_pressure) | ||
717 | return true; | 711 | return true; |
718 | #endif | ||
719 | do { | 712 | do { |
720 | if (time_before(jiffies, memcg->socket_pressure)) | 713 | if (time_before(jiffies, memcg->socket_pressure)) |
721 | return true; | 714 | return true; |
@@ -730,7 +723,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |||
730 | } | 723 | } |
731 | #endif | 724 | #endif |
732 | 725 | ||
733 | #ifdef CONFIG_MEMCG_KMEM | 726 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
734 | extern struct static_key_false memcg_kmem_enabled_key; | 727 | extern struct static_key_false memcg_kmem_enabled_key; |
735 | 728 | ||
736 | extern int memcg_nr_cache_ids; | 729 | extern int memcg_nr_cache_ids; |
@@ -750,9 +743,9 @@ static inline bool memcg_kmem_enabled(void) | |||
750 | return static_branch_unlikely(&memcg_kmem_enabled_key); | 743 | return static_branch_unlikely(&memcg_kmem_enabled_key); |
751 | } | 744 | } |
752 | 745 | ||
753 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | 746 | static inline bool memcg_kmem_online(struct mem_cgroup *memcg) |
754 | { | 747 | { |
755 | return memcg->kmem_acct_active; | 748 | return memcg->kmem_state == KMEM_ONLINE; |
756 | } | 749 | } |
757 | 750 | ||
758 | /* | 751 | /* |
@@ -850,7 +843,7 @@ static inline bool memcg_kmem_enabled(void) | |||
850 | return false; | 843 | return false; |
851 | } | 844 | } |
852 | 845 | ||
853 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | 846 | static inline bool memcg_kmem_online(struct mem_cgroup *memcg) |
854 | { | 847 | { |
855 | return false; | 848 | return false; |
856 | } | 849 | } |
@@ -886,5 +879,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
886 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | 879 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) |
887 | { | 880 | { |
888 | } | 881 | } |
889 | #endif /* CONFIG_MEMCG_KMEM */ | 882 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
883 | |||
890 | #endif /* _LINUX_MEMCONTROL_H */ | 884 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 061265f92876..504c98a278d4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); | |||
57 | #define PTRACE_MODE_READ 0x01 | 57 | #define PTRACE_MODE_READ 0x01 |
58 | #define PTRACE_MODE_ATTACH 0x02 | 58 | #define PTRACE_MODE_ATTACH 0x02 |
59 | #define PTRACE_MODE_NOAUDIT 0x04 | 59 | #define PTRACE_MODE_NOAUDIT 0x04 |
60 | /* Returns true on success, false on denial. */ | 60 | #define PTRACE_MODE_FSCREDS 0x08 |
61 | #define PTRACE_MODE_REALCREDS 0x10 | ||
62 | |||
63 | /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ | ||
64 | #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) | ||
65 | #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) | ||
66 | #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) | ||
67 | #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) | ||
68 | |||
69 | /** | ||
70 | * ptrace_may_access - check whether the caller is permitted to access | ||
71 | * a target task. | ||
72 | * @task: target task | ||
73 | * @mode: selects type of access and caller credentials | ||
74 | * | ||
75 | * Returns true on success, false on denial. | ||
76 | * | ||
77 | * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must | ||
78 | * be set in @mode to specify whether the access was requested through | ||
79 | * a filesystem syscall (should use effective capabilities and fsuid | ||
80 | * of the caller) or through an explicit syscall such as | ||
81 | * process_vm_writev or ptrace (and should use the real credentials). | ||
82 | */ | ||
61 | extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); | 83 | extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); |
62 | 84 | ||
63 | static inline int ptrace_reparented(struct task_struct *child) | 85 | static inline int ptrace_reparented(struct task_struct *child) |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 33170dbd9db4..57e7d87d2d4c 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -154,7 +154,7 @@ do { \ | |||
154 | * radix_tree_gang_lookup_tag_slot | 154 | * radix_tree_gang_lookup_tag_slot |
155 | * radix_tree_tagged | 155 | * radix_tree_tagged |
156 | * | 156 | * |
157 | * The first 7 functions are able to be called locklessly, using RCU. The | 157 | * The first 8 functions are able to be called locklessly, using RCU. The |
158 | * caller must ensure calls to these functions are made within rcu_read_lock() | 158 | * caller must ensure calls to these functions are made within rcu_read_lock() |
159 | * regions. Other readers (lock-free or otherwise) and modifications may be | 159 | * regions. Other readers (lock-free or otherwise) and modifications may be |
160 | * running concurrently. | 160 | * running concurrently. |
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index a5aa7ae671f4..b6900099ea81 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
@@ -50,7 +50,7 @@ struct rb_root { | |||
50 | #define RB_ROOT (struct rb_root) { NULL, } | 50 | #define RB_ROOT (struct rb_root) { NULL, } |
51 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) | 51 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
52 | 52 | ||
53 | #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) | 53 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
54 | 54 | ||
55 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ | 55 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
56 | #define RB_EMPTY_NODE(node) \ | 56 | #define RB_EMPTY_NODE(node) \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 61aa9bbea871..f1e81e128592 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1476,10 +1476,10 @@ struct task_struct { | |||
1476 | unsigned in_iowait:1; | 1476 | unsigned in_iowait:1; |
1477 | #ifdef CONFIG_MEMCG | 1477 | #ifdef CONFIG_MEMCG |
1478 | unsigned memcg_may_oom:1; | 1478 | unsigned memcg_may_oom:1; |
1479 | #endif | 1479 | #ifndef CONFIG_SLOB |
1480 | #ifdef CONFIG_MEMCG_KMEM | ||
1481 | unsigned memcg_kmem_skip_account:1; | 1480 | unsigned memcg_kmem_skip_account:1; |
1482 | #endif | 1481 | #endif |
1482 | #endif | ||
1483 | #ifdef CONFIG_COMPAT_BRK | 1483 | #ifdef CONFIG_COMPAT_BRK |
1484 | unsigned brk_randomized:1; | 1484 | unsigned brk_randomized:1; |
1485 | #endif | 1485 | #endif |
@@ -1643,6 +1643,9 @@ struct task_struct { | |||
1643 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1643 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1644 | gfp_t lockdep_reclaim_gfp; | 1644 | gfp_t lockdep_reclaim_gfp; |
1645 | #endif | 1645 | #endif |
1646 | #ifdef CONFIG_UBSAN | ||
1647 | unsigned int in_ubsan; | ||
1648 | #endif | ||
1646 | 1649 | ||
1647 | /* journalling filesystem info */ | 1650 | /* journalling filesystem info */ |
1648 | void *journal_info; | 1651 | void *journal_info; |
diff --git a/include/linux/shm.h b/include/linux/shm.h index 6fb801686ad6..04e881829625 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h | |||
@@ -52,7 +52,7 @@ struct sysv_shm { | |||
52 | 52 | ||
53 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, | 53 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, |
54 | unsigned long shmlba); | 54 | unsigned long shmlba); |
55 | int is_file_shm_hugepages(struct file *file); | 55 | bool is_file_shm_hugepages(struct file *file); |
56 | void exit_shm(struct task_struct *task); | 56 | void exit_shm(struct task_struct *task); |
57 | #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) | 57 | #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) |
58 | #else | 58 | #else |
@@ -66,9 +66,9 @@ static inline long do_shmat(int shmid, char __user *shmaddr, | |||
66 | { | 66 | { |
67 | return -ENOSYS; | 67 | return -ENOSYS; |
68 | } | 68 | } |
69 | static inline int is_file_shm_hugepages(struct file *file) | 69 | static inline bool is_file_shm_hugepages(struct file *file) |
70 | { | 70 | { |
71 | return 0; | 71 | return false; |
72 | } | 72 | } |
73 | static inline void exit_shm(struct task_struct *task) | 73 | static inline void exit_shm(struct task_struct *task) |
74 | { | 74 | { |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 3ffee7422012..3627d5c1bc47 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -86,7 +86,7 @@ | |||
86 | #else | 86 | #else |
87 | # define SLAB_FAILSLAB 0x00000000UL | 87 | # define SLAB_FAILSLAB 0x00000000UL |
88 | #endif | 88 | #endif |
89 | #ifdef CONFIG_MEMCG_KMEM | 89 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
90 | # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ | 90 | # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ |
91 | #else | 91 | #else |
92 | # define SLAB_ACCOUNT 0x00000000UL | 92 | # define SLAB_ACCOUNT 0x00000000UL |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 33d049066c3d..cf139d3fa513 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -69,7 +69,8 @@ struct kmem_cache { | |||
69 | */ | 69 | */ |
70 | int obj_offset; | 70 | int obj_offset; |
71 | #endif /* CONFIG_DEBUG_SLAB */ | 71 | #endif /* CONFIG_DEBUG_SLAB */ |
72 | #ifdef CONFIG_MEMCG_KMEM | 72 | |
73 | #ifdef CONFIG_MEMCG | ||
73 | struct memcg_cache_params memcg_params; | 74 | struct memcg_cache_params memcg_params; |
74 | #endif | 75 | #endif |
75 | 76 | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 33885118523c..b7e57927f521 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -84,7 +84,7 @@ struct kmem_cache { | |||
84 | #ifdef CONFIG_SYSFS | 84 | #ifdef CONFIG_SYSFS |
85 | struct kobject kobj; /* For sysfs */ | 85 | struct kobject kobj; /* For sysfs */ |
86 | #endif | 86 | #endif |
87 | #ifdef CONFIG_MEMCG_KMEM | 87 | #ifdef CONFIG_MEMCG |
88 | struct memcg_cache_params memcg_params; | 88 | struct memcg_cache_params memcg_params; |
89 | int max_attr_size; /* for propagation, maximum size of a stored attr */ | 89 | int max_attr_size; /* for propagation, maximum size of a stored attr */ |
90 | #ifdef CONFIG_SYSFS | 90 | #ifdef CONFIG_SYSFS |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 414e101cd061..d18b65c53dbb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -350,33 +350,7 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages); | |||
350 | 350 | ||
351 | extern int kswapd_run(int nid); | 351 | extern int kswapd_run(int nid); |
352 | extern void kswapd_stop(int nid); | 352 | extern void kswapd_stop(int nid); |
353 | #ifdef CONFIG_MEMCG | ||
354 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) | ||
355 | { | ||
356 | /* root ? */ | ||
357 | if (mem_cgroup_disabled() || !memcg->css.parent) | ||
358 | return vm_swappiness; | ||
359 | |||
360 | return memcg->swappiness; | ||
361 | } | ||
362 | 353 | ||
363 | #else | ||
364 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | ||
365 | { | ||
366 | return vm_swappiness; | ||
367 | } | ||
368 | #endif | ||
369 | #ifdef CONFIG_MEMCG_SWAP | ||
370 | extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); | ||
371 | extern void mem_cgroup_uncharge_swap(swp_entry_t entry); | ||
372 | #else | ||
373 | static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | ||
374 | { | ||
375 | } | ||
376 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) | ||
377 | { | ||
378 | } | ||
379 | #endif | ||
380 | #ifdef CONFIG_SWAP | 354 | #ifdef CONFIG_SWAP |
381 | /* linux/mm/page_io.c */ | 355 | /* linux/mm/page_io.c */ |
382 | extern int swap_readpage(struct page *); | 356 | extern int swap_readpage(struct page *); |
@@ -555,5 +529,55 @@ static inline swp_entry_t get_swap_page(void) | |||
555 | } | 529 | } |
556 | 530 | ||
557 | #endif /* CONFIG_SWAP */ | 531 | #endif /* CONFIG_SWAP */ |
532 | |||
533 | #ifdef CONFIG_MEMCG | ||
534 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) | ||
535 | { | ||
536 | /* root ? */ | ||
537 | if (mem_cgroup_disabled() || !memcg->css.parent) | ||
538 | return vm_swappiness; | ||
539 | |||
540 | return memcg->swappiness; | ||
541 | } | ||
542 | |||
543 | #else | ||
544 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | ||
545 | { | ||
546 | return vm_swappiness; | ||
547 | } | ||
548 | #endif | ||
549 | |||
550 | #ifdef CONFIG_MEMCG_SWAP | ||
551 | extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); | ||
552 | extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); | ||
553 | extern void mem_cgroup_uncharge_swap(swp_entry_t entry); | ||
554 | extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); | ||
555 | extern bool mem_cgroup_swap_full(struct page *page); | ||
556 | #else | ||
557 | static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | ||
558 | { | ||
559 | } | ||
560 | |||
561 | static inline int mem_cgroup_try_charge_swap(struct page *page, | ||
562 | swp_entry_t entry) | ||
563 | { | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) | ||
568 | { | ||
569 | } | ||
570 | |||
571 | static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) | ||
572 | { | ||
573 | return get_nr_swap_pages(); | ||
574 | } | ||
575 | |||
576 | static inline bool mem_cgroup_swap_full(struct page *page) | ||
577 | { | ||
578 | return vm_swap_full(); | ||
579 | } | ||
580 | #endif | ||
581 | |||
558 | #endif /* __KERNEL__*/ | 582 | #endif /* __KERNEL__*/ |
559 | #endif /* _LINUX_SWAP_H */ | 583 | #endif /* _LINUX_SWAP_H */ |
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h deleted file mode 100644 index 01ff7c6efada..000000000000 --- a/include/net/tcp_memcontrol.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TCP_MEMCG_H | ||
2 | #define _TCP_MEMCG_H | ||
3 | |||
4 | struct cgroup_subsys; | ||
5 | struct mem_cgroup; | ||
6 | |||
7 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); | ||
8 | void tcp_destroy_cgroup(struct mem_cgroup *memcg); | ||
9 | #endif /* _TCP_MEMCG_H */ | ||
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index bc81fb2e1f0e..1c3154913a39 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #define EPOLL_CTL_DEL 2 | 26 | #define EPOLL_CTL_DEL 2 |
27 | #define EPOLL_CTL_MOD 3 | 27 | #define EPOLL_CTL_MOD 3 |
28 | 28 | ||
29 | /* Set exclusive wakeup mode for the target file descriptor */ | ||
30 | #define EPOLLEXCLUSIVE (1 << 28) | ||
31 | |||
29 | /* | 32 | /* |
30 | * Request the handling of system wakeup events so as to prevent system suspends | 33 | * Request the handling of system wakeup events so as to prevent system suspends |
31 | * from happening while those events are being processed. | 34 | * from happening while those events are being processed. |
diff --git a/init/Kconfig b/init/Kconfig index 5b86082fa238..22320804fbaf 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -964,17 +964,6 @@ config MEMCG_SWAP_ENABLED | |||
964 | For those who want to have the feature enabled by default should | 964 | For those who want to have the feature enabled by default should |
965 | select this option (if, for some reason, they need to disable it | 965 | select this option (if, for some reason, they need to disable it |
966 | then swapaccount=0 does the trick). | 966 | then swapaccount=0 does the trick). |
967 | config MEMCG_KMEM | ||
968 | bool "Memory Resource Controller Kernel Memory accounting" | ||
969 | depends on MEMCG | ||
970 | depends on SLUB || SLAB | ||
971 | help | ||
972 | The Kernel Memory extension for Memory Resource Controller can limit | ||
973 | the amount of memory used by kernel objects in the system. Those are | ||
974 | fundamentally different from the entities handled by the standard | ||
975 | Memory Controller, which are page-based, and can be swapped. Users of | ||
976 | the kmem extension can use it to guarantee that no group of processes | ||
977 | will ever exhaust kernel resources alone. | ||
978 | 967 | ||
979 | config BLK_CGROUP | 968 | config BLK_CGROUP |
980 | bool "IO controller" | 969 | bool "IO controller" |
@@ -1071,6 +1060,11 @@ config CGROUP_FREEZER | |||
1071 | Provides a way to freeze and unfreeze all tasks in a | 1060 | Provides a way to freeze and unfreeze all tasks in a |
1072 | cgroup. | 1061 | cgroup. |
1073 | 1062 | ||
1063 | This option affects the ORIGINAL cgroup interface. The cgroup2 memory | ||
1064 | controller includes important in-kernel memory consumers per default. | ||
1065 | |||
1066 | If you're using cgroup2, say N. | ||
1067 | |||
1074 | config CGROUP_HUGETLB | 1068 | config CGROUP_HUGETLB |
1075 | bool "HugeTLB controller" | 1069 | bool "HugeTLB controller" |
1076 | depends on HUGETLB_PAGE | 1070 | depends on HUGETLB_PAGE |
@@ -1182,10 +1176,9 @@ config USER_NS | |||
1182 | to provide different user info for different servers. | 1176 | to provide different user info for different servers. |
1183 | 1177 | ||
1184 | When user namespaces are enabled in the kernel it is | 1178 | When user namespaces are enabled in the kernel it is |
1185 | recommended that the MEMCG and MEMCG_KMEM options also be | 1179 | recommended that the MEMCG option also be enabled and that |
1186 | enabled and that user-space use the memory control groups to | 1180 | user-space use the memory control groups to limit the amount |
1187 | limit the amount of memory a memory unprivileged users can | 1181 | of memory a memory unprivileged users can use. |
1188 | use. | ||
1189 | 1182 | ||
1190 | If unsure, say N. | 1183 | If unsure, say N. |
1191 | 1184 | ||
diff --git a/init/do_mounts.h b/init/do_mounts.h index f5b978a9bb92..067af1d9e8b6 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h | |||
@@ -57,11 +57,11 @@ static inline int rd_load_image(char *from) { return 0; } | |||
57 | 57 | ||
58 | #ifdef CONFIG_BLK_DEV_INITRD | 58 | #ifdef CONFIG_BLK_DEV_INITRD |
59 | 59 | ||
60 | int __init initrd_load(void); | 60 | bool __init initrd_load(void); |
61 | 61 | ||
62 | #else | 62 | #else |
63 | 63 | ||
64 | static inline int initrd_load(void) { return 0; } | 64 | static inline bool initrd_load(void) { return false; } |
65 | 65 | ||
66 | #endif | 66 | #endif |
67 | 67 | ||
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 3e0878e8a80d..a1000ca29fc9 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c | |||
@@ -116,7 +116,7 @@ static void __init handle_initrd(void) | |||
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | int __init initrd_load(void) | 119 | bool __init initrd_load(void) |
120 | { | 120 | { |
121 | if (mount_initrd) { | 121 | if (mount_initrd) { |
122 | create_dev("/dev/ram", Root_RAM0); | 122 | create_dev("/dev/ram", Root_RAM0); |
@@ -129,9 +129,9 @@ int __init initrd_load(void) | |||
129 | if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { | 129 | if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { |
130 | sys_unlink("/initrd.image"); | 130 | sys_unlink("/initrd.image"); |
131 | handle_initrd(); | 131 | handle_initrd(); |
132 | return 1; | 132 | return true; |
133 | } | 133 | } |
134 | } | 134 | } |
135 | sys_unlink("/initrd.image"); | 135 | sys_unlink("/initrd.image"); |
136 | return 0; | 136 | return false; |
137 | } | 137 | } |
diff --git a/init/main.c b/init/main.c index c6ebefafa496..58c9e374704b 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -164,10 +164,10 @@ static const char *panic_later, *panic_param; | |||
164 | 164 | ||
165 | extern const struct obs_kernel_param __setup_start[], __setup_end[]; | 165 | extern const struct obs_kernel_param __setup_start[], __setup_end[]; |
166 | 166 | ||
167 | static int __init obsolete_checksetup(char *line) | 167 | static bool __init obsolete_checksetup(char *line) |
168 | { | 168 | { |
169 | const struct obs_kernel_param *p; | 169 | const struct obs_kernel_param *p; |
170 | int had_early_param = 0; | 170 | bool had_early_param = false; |
171 | 171 | ||
172 | p = __setup_start; | 172 | p = __setup_start; |
173 | do { | 173 | do { |
@@ -179,13 +179,13 @@ static int __init obsolete_checksetup(char *line) | |||
179 | * Keep iterating, as we can have early | 179 | * Keep iterating, as we can have early |
180 | * params and __setups of same names 8( */ | 180 | * params and __setups of same names 8( */ |
181 | if (line[n] == '\0' || line[n] == '=') | 181 | if (line[n] == '\0' || line[n] == '=') |
182 | had_early_param = 1; | 182 | had_early_param = true; |
183 | } else if (!p->setup_func) { | 183 | } else if (!p->setup_func) { |
184 | pr_warn("Parameter %s is obsolete, ignored\n", | 184 | pr_warn("Parameter %s is obsolete, ignored\n", |
185 | p->str); | 185 | p->str); |
186 | return 1; | 186 | return true; |
187 | } else if (p->setup_func(line + n)) | 187 | } else if (p->setup_func(line + n)) |
188 | return 1; | 188 | return true; |
189 | } | 189 | } |
190 | p++; | 190 | p++; |
191 | } while (p < __setup_end); | 191 | } while (p < __setup_end); |
@@ -459,7 +459,7 @@ static const struct file_operations shm_file_operations_huge = { | |||
459 | .fallocate = shm_fallocate, | 459 | .fallocate = shm_fallocate, |
460 | }; | 460 | }; |
461 | 461 | ||
462 | int is_file_shm_hugepages(struct file *file) | 462 | bool is_file_shm_hugepages(struct file *file) |
463 | { | 463 | { |
464 | return file->f_op == &shm_file_operations_huge; | 464 | return file->f_op == &shm_file_operations_huge; |
465 | } | 465 | } |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 85ff5e26e23b..5b9d39633ce9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -759,71 +759,33 @@ const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | |||
759 | EXPORT_SYMBOL(cpu_all_bits); | 759 | EXPORT_SYMBOL(cpu_all_bits); |
760 | 760 | ||
761 | #ifdef CONFIG_INIT_ALL_POSSIBLE | 761 | #ifdef CONFIG_INIT_ALL_POSSIBLE |
762 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | 762 | struct cpumask __cpu_possible_mask __read_mostly |
763 | = CPU_BITS_ALL; | 763 | = {CPU_BITS_ALL}; |
764 | #else | 764 | #else |
765 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | 765 | struct cpumask __cpu_possible_mask __read_mostly; |
766 | #endif | 766 | #endif |
767 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | 767 | EXPORT_SYMBOL(__cpu_possible_mask); |
768 | EXPORT_SYMBOL(cpu_possible_mask); | ||
769 | 768 | ||
770 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | 769 | struct cpumask __cpu_online_mask __read_mostly; |
771 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | 770 | EXPORT_SYMBOL(__cpu_online_mask); |
772 | EXPORT_SYMBOL(cpu_online_mask); | ||
773 | 771 | ||
774 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | 772 | struct cpumask __cpu_present_mask __read_mostly; |
775 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | 773 | EXPORT_SYMBOL(__cpu_present_mask); |
776 | EXPORT_SYMBOL(cpu_present_mask); | ||
777 | 774 | ||
778 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | 775 | struct cpumask __cpu_active_mask __read_mostly; |
779 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | 776 | EXPORT_SYMBOL(__cpu_active_mask); |
780 | EXPORT_SYMBOL(cpu_active_mask); | ||
781 | |||
782 | void set_cpu_possible(unsigned int cpu, bool possible) | ||
783 | { | ||
784 | if (possible) | ||
785 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
786 | else | ||
787 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
788 | } | ||
789 | |||
790 | void set_cpu_present(unsigned int cpu, bool present) | ||
791 | { | ||
792 | if (present) | ||
793 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
794 | else | ||
795 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
796 | } | ||
797 | |||
798 | void set_cpu_online(unsigned int cpu, bool online) | ||
799 | { | ||
800 | if (online) { | ||
801 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
802 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
803 | } else { | ||
804 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
805 | } | ||
806 | } | ||
807 | |||
808 | void set_cpu_active(unsigned int cpu, bool active) | ||
809 | { | ||
810 | if (active) | ||
811 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
812 | else | ||
813 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
814 | } | ||
815 | 777 | ||
816 | void init_cpu_present(const struct cpumask *src) | 778 | void init_cpu_present(const struct cpumask *src) |
817 | { | 779 | { |
818 | cpumask_copy(to_cpumask(cpu_present_bits), src); | 780 | cpumask_copy(&__cpu_present_mask, src); |
819 | } | 781 | } |
820 | 782 | ||
821 | void init_cpu_possible(const struct cpumask *src) | 783 | void init_cpu_possible(const struct cpumask *src) |
822 | { | 784 | { |
823 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | 785 | cpumask_copy(&__cpu_possible_mask, src); |
824 | } | 786 | } |
825 | 787 | ||
826 | void init_cpu_online(const struct cpumask *src) | 788 | void init_cpu_online(const struct cpumask *src) |
827 | { | 789 | { |
828 | cpumask_copy(to_cpumask(cpu_online_bits), src); | 790 | cpumask_copy(&__cpu_online_mask, src); |
829 | } | 791 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index bf8244190d0f..c0957416b32e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3376,7 +3376,7 @@ find_lively_task_by_vpid(pid_t vpid) | |||
3376 | 3376 | ||
3377 | /* Reuse ptrace permission checks for now. */ | 3377 | /* Reuse ptrace permission checks for now. */ |
3378 | err = -EACCES; | 3378 | err = -EACCES; |
3379 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 3379 | if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) |
3380 | goto errout; | 3380 | goto errout; |
3381 | 3381 | ||
3382 | return task; | 3382 | return task; |
diff --git a/kernel/exit.c b/kernel/exit.c index 07110c6020a0..10e088237fed 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -59,8 +59,6 @@ | |||
59 | #include <asm/pgtable.h> | 59 | #include <asm/pgtable.h> |
60 | #include <asm/mmu_context.h> | 60 | #include <asm/mmu_context.h> |
61 | 61 | ||
62 | static void exit_mm(struct task_struct *tsk); | ||
63 | |||
64 | static void __unhash_process(struct task_struct *p, bool group_dead) | 62 | static void __unhash_process(struct task_struct *p, bool group_dead) |
65 | { | 63 | { |
66 | nr_threads--; | 64 | nr_threads--; |
@@ -1120,8 +1118,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1120 | static int *task_stopped_code(struct task_struct *p, bool ptrace) | 1118 | static int *task_stopped_code(struct task_struct *p, bool ptrace) |
1121 | { | 1119 | { |
1122 | if (ptrace) { | 1120 | if (ptrace) { |
1123 | if (task_is_stopped_or_traced(p) && | 1121 | if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) |
1124 | !(p->jobctl & JOBCTL_LISTENING)) | ||
1125 | return &p->exit_code; | 1122 | return &p->exit_code; |
1126 | } else { | 1123 | } else { |
1127 | if (p->signal->flags & SIGNAL_STOP_STOPPED) | 1124 | if (p->signal->flags & SIGNAL_STOP_STOPPED) |
diff --git a/kernel/futex.c b/kernel/futex.c index c6f514573b28..0773f2b23b10 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2884,7 +2884,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, | |||
2884 | } | 2884 | } |
2885 | 2885 | ||
2886 | ret = -EPERM; | 2886 | ret = -EPERM; |
2887 | if (!ptrace_may_access(p, PTRACE_MODE_READ)) | 2887 | if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
2888 | goto err_unlock; | 2888 | goto err_unlock; |
2889 | 2889 | ||
2890 | head = p->robust_list; | 2890 | head = p->robust_list; |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 55c8c9349cfe..4ae3232e7a28 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, | |||
155 | } | 155 | } |
156 | 156 | ||
157 | ret = -EPERM; | 157 | ret = -EPERM; |
158 | if (!ptrace_may_access(p, PTRACE_MODE_READ)) | 158 | if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
159 | goto err_unlock; | 159 | goto err_unlock; |
160 | 160 | ||
161 | head = p->compat_robust_list; | 161 | head = p->compat_robust_list; |
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index 0aa69ea1d8fd..3a47fa998fe0 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c | |||
@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, | |||
122 | &task2->signal->cred_guard_mutex); | 122 | &task2->signal->cred_guard_mutex); |
123 | if (ret) | 123 | if (ret) |
124 | goto err; | 124 | goto err; |
125 | if (!ptrace_may_access(task1, PTRACE_MODE_READ) || | 125 | if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || |
126 | !ptrace_may_access(task2, PTRACE_MODE_READ)) { | 126 | !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) { |
127 | ret = -EPERM; | 127 | ret = -EPERM; |
128 | goto err_unlock; | 128 | goto err_unlock; |
129 | } | 129 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index d873b64fbddc..ee70aef5cd81 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -63,16 +63,16 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, | |||
63 | if (ret) | 63 | if (ret) |
64 | goto out_free_image; | 64 | goto out_free_image; |
65 | 65 | ||
66 | ret = sanity_check_segment_list(image); | ||
67 | if (ret) | ||
68 | goto out_free_image; | ||
69 | |||
70 | /* Enable the special crash kernel control page allocation policy. */ | ||
71 | if (kexec_on_panic) { | 66 | if (kexec_on_panic) { |
67 | /* Enable special crash kernel control page alloc policy. */ | ||
72 | image->control_page = crashk_res.start; | 68 | image->control_page = crashk_res.start; |
73 | image->type = KEXEC_TYPE_CRASH; | 69 | image->type = KEXEC_TYPE_CRASH; |
74 | } | 70 | } |
75 | 71 | ||
72 | ret = sanity_check_segment_list(image); | ||
73 | if (ret) | ||
74 | goto out_free_image; | ||
75 | |||
76 | /* | 76 | /* |
77 | * Find a location for the control code buffer, and add it | 77 | * Find a location for the control code buffer, and add it |
78 | * the vector of segments so that it's pages will also be | 78 | * the vector of segments so that it's pages will also be |
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index c823f3001e12..8dc659144869 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -310,12 +310,9 @@ static void kimage_free_pages(struct page *page) | |||
310 | 310 | ||
311 | void kimage_free_page_list(struct list_head *list) | 311 | void kimage_free_page_list(struct list_head *list) |
312 | { | 312 | { |
313 | struct list_head *pos, *next; | 313 | struct page *page, *next; |
314 | 314 | ||
315 | list_for_each_safe(pos, next, list) { | 315 | list_for_each_entry_safe(page, next, list, lru) { |
316 | struct page *page; | ||
317 | |||
318 | page = list_entry(pos, struct page, lru); | ||
319 | list_del(&page->lru); | 316 | list_del(&page->lru); |
320 | kimage_free_pages(page); | 317 | kimage_free_pages(page); |
321 | } | 318 | } |
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index b70ada0028d2..007b791f676d 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c | |||
@@ -109,11 +109,13 @@ int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) | |||
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | 111 | ||
112 | #ifdef CONFIG_KEXEC_VERIFY_SIG | ||
112 | int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, | 113 | int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, |
113 | unsigned long buf_len) | 114 | unsigned long buf_len) |
114 | { | 115 | { |
115 | return -EKEYREJECTED; | 116 | return -EKEYREJECTED; |
116 | } | 117 | } |
118 | #endif | ||
117 | 119 | ||
118 | /* Apply relocations of type RELA */ | 120 | /* Apply relocations of type RELA */ |
119 | int __weak | 121 | int __weak |
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index e4392a698ad4..0a52315d9c62 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h | |||
@@ -15,6 +15,27 @@ int kimage_is_destination_range(struct kimage *image, | |||
15 | extern struct mutex kexec_mutex; | 15 | extern struct mutex kexec_mutex; |
16 | 16 | ||
17 | #ifdef CONFIG_KEXEC_FILE | 17 | #ifdef CONFIG_KEXEC_FILE |
18 | struct kexec_sha_region { | ||
19 | unsigned long start; | ||
20 | unsigned long len; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * Keeps track of buffer parameters as provided by caller for requesting | ||
25 | * memory placement of buffer. | ||
26 | */ | ||
27 | struct kexec_buf { | ||
28 | struct kimage *image; | ||
29 | char *buffer; | ||
30 | unsigned long bufsz; | ||
31 | unsigned long mem; | ||
32 | unsigned long memsz; | ||
33 | unsigned long buf_align; | ||
34 | unsigned long buf_min; | ||
35 | unsigned long buf_max; | ||
36 | bool top_down; /* allocate from top of memory hole */ | ||
37 | }; | ||
38 | |||
18 | void kimage_file_post_load_cleanup(struct kimage *image); | 39 | void kimage_file_post_load_cleanup(struct kimage *image); |
19 | #else /* CONFIG_KEXEC_FILE */ | 40 | #else /* CONFIG_KEXEC_FILE */ |
20 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | 41 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e79439134978..c963ba534a78 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -233,7 +233,11 @@ struct printk_log { | |||
233 | u8 facility; /* syslog facility */ | 233 | u8 facility; /* syslog facility */ |
234 | u8 flags:5; /* internal record flags */ | 234 | u8 flags:5; /* internal record flags */ |
235 | u8 level:3; /* syslog level */ | 235 | u8 level:3; /* syslog level */ |
236 | }; | 236 | } |
237 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
238 | __packed __aligned(4) | ||
239 | #endif | ||
240 | ; | ||
237 | 241 | ||
238 | /* | 242 | /* |
239 | * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken | 243 | * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken |
@@ -274,11 +278,7 @@ static u32 clear_idx; | |||
274 | #define LOG_FACILITY(v) ((v) >> 3 & 0xff) | 278 | #define LOG_FACILITY(v) ((v) >> 3 & 0xff) |
275 | 279 | ||
276 | /* record buffer */ | 280 | /* record buffer */ |
277 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
278 | #define LOG_ALIGN 4 | ||
279 | #else | ||
280 | #define LOG_ALIGN __alignof__(struct printk_log) | 281 | #define LOG_ALIGN __alignof__(struct printk_log) |
281 | #endif | ||
282 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) | 282 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
283 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); | 283 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
284 | static char *log_buf = __log_buf; | 284 | static char *log_buf = __log_buf; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index b760bae64cf1..2341efe7fe02 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) | |||
219 | static int __ptrace_may_access(struct task_struct *task, unsigned int mode) | 219 | static int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
220 | { | 220 | { |
221 | const struct cred *cred = current_cred(), *tcred; | 221 | const struct cred *cred = current_cred(), *tcred; |
222 | int dumpable = 0; | ||
223 | kuid_t caller_uid; | ||
224 | kgid_t caller_gid; | ||
225 | |||
226 | if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { | ||
227 | WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); | ||
228 | return -EPERM; | ||
229 | } | ||
222 | 230 | ||
223 | /* May we inspect the given task? | 231 | /* May we inspect the given task? |
224 | * This check is used both for attaching with ptrace | 232 | * This check is used both for attaching with ptrace |
@@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
228 | * because setting up the necessary parent/child relationship | 236 | * because setting up the necessary parent/child relationship |
229 | * or halting the specified task is impossible. | 237 | * or halting the specified task is impossible. |
230 | */ | 238 | */ |
231 | int dumpable = 0; | 239 | |
232 | /* Don't let security modules deny introspection */ | 240 | /* Don't let security modules deny introspection */ |
233 | if (same_thread_group(task, current)) | 241 | if (same_thread_group(task, current)) |
234 | return 0; | 242 | return 0; |
235 | rcu_read_lock(); | 243 | rcu_read_lock(); |
244 | if (mode & PTRACE_MODE_FSCREDS) { | ||
245 | caller_uid = cred->fsuid; | ||
246 | caller_gid = cred->fsgid; | ||
247 | } else { | ||
248 | /* | ||
249 | * Using the euid would make more sense here, but something | ||
250 | * in userland might rely on the old behavior, and this | ||
251 | * shouldn't be a security problem since | ||
252 | * PTRACE_MODE_REALCREDS implies that the caller explicitly | ||
253 | * used a syscall that requests access to another process | ||
254 | * (and not a filesystem syscall to procfs). | ||
255 | */ | ||
256 | caller_uid = cred->uid; | ||
257 | caller_gid = cred->gid; | ||
258 | } | ||
236 | tcred = __task_cred(task); | 259 | tcred = __task_cred(task); |
237 | if (uid_eq(cred->uid, tcred->euid) && | 260 | if (uid_eq(caller_uid, tcred->euid) && |
238 | uid_eq(cred->uid, tcred->suid) && | 261 | uid_eq(caller_uid, tcred->suid) && |
239 | uid_eq(cred->uid, tcred->uid) && | 262 | uid_eq(caller_uid, tcred->uid) && |
240 | gid_eq(cred->gid, tcred->egid) && | 263 | gid_eq(caller_gid, tcred->egid) && |
241 | gid_eq(cred->gid, tcred->sgid) && | 264 | gid_eq(caller_gid, tcred->sgid) && |
242 | gid_eq(cred->gid, tcred->gid)) | 265 | gid_eq(caller_gid, tcred->gid)) |
243 | goto ok; | 266 | goto ok; |
244 | if (ptrace_has_cap(tcred->user_ns, mode)) | 267 | if (ptrace_has_cap(tcred->user_ns, mode)) |
245 | goto ok; | 268 | goto ok; |
@@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request, | |||
306 | goto out; | 329 | goto out; |
307 | 330 | ||
308 | task_lock(task); | 331 | task_lock(task); |
309 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); | 332 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); |
310 | task_unlock(task); | 333 | task_unlock(task); |
311 | if (retval) | 334 | if (retval) |
312 | goto unlock_creds; | 335 | goto unlock_creds; |
@@ -364,8 +387,14 @@ unlock_creds: | |||
364 | mutex_unlock(&task->signal->cred_guard_mutex); | 387 | mutex_unlock(&task->signal->cred_guard_mutex); |
365 | out: | 388 | out: |
366 | if (!retval) { | 389 | if (!retval) { |
367 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, | 390 | /* |
368 | TASK_UNINTERRUPTIBLE); | 391 | * We do not bother to change retval or clear JOBCTL_TRAPPING |
392 | * if wait_on_bit() was interrupted by SIGKILL. The tracer will | ||
393 | * not return to user-mode, it will exit and clear this bit in | ||
394 | * __ptrace_unlink() if it wasn't already cleared by the tracee; | ||
395 | * and until then nobody can ptrace this task. | ||
396 | */ | ||
397 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); | ||
369 | proc_ptrace_connector(task, PTRACE_ATTACH); | 398 | proc_ptrace_connector(task, PTRACE_ATTACH); |
370 | } | 399 | } |
371 | 400 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 6af9212ab5aa..78947de6f969 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1853,11 +1853,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data | |||
1853 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; | 1853 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; |
1854 | } | 1854 | } |
1855 | 1855 | ||
1856 | if (prctl_map.exe_fd != (u32)-1) | 1856 | if (prctl_map.exe_fd != (u32)-1) { |
1857 | error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); | 1857 | error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); |
1858 | down_read(&mm->mmap_sem); | 1858 | if (error) |
1859 | if (error) | 1859 | return error; |
1860 | goto out; | 1860 | } |
1861 | |||
1862 | down_write(&mm->mmap_sem); | ||
1861 | 1863 | ||
1862 | /* | 1864 | /* |
1863 | * We don't validate if these members are pointing to | 1865 | * We don't validate if these members are pointing to |
@@ -1894,10 +1896,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data | |||
1894 | if (prctl_map.auxv_size) | 1896 | if (prctl_map.auxv_size) |
1895 | memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); | 1897 | memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); |
1896 | 1898 | ||
1897 | error = 0; | 1899 | up_write(&mm->mmap_sem); |
1898 | out: | 1900 | return 0; |
1899 | up_read(&mm->mmap_sem); | ||
1900 | return error; | ||
1901 | } | 1901 | } |
1902 | #endif /* CONFIG_CHECKPOINT_RESTORE */ | 1902 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
1903 | 1903 | ||
@@ -1963,7 +1963,7 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
1963 | 1963 | ||
1964 | error = -EINVAL; | 1964 | error = -EINVAL; |
1965 | 1965 | ||
1966 | down_read(&mm->mmap_sem); | 1966 | down_write(&mm->mmap_sem); |
1967 | vma = find_vma(mm, addr); | 1967 | vma = find_vma(mm, addr); |
1968 | 1968 | ||
1969 | prctl_map.start_code = mm->start_code; | 1969 | prctl_map.start_code = mm->start_code; |
@@ -2056,7 +2056,7 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
2056 | 2056 | ||
2057 | error = 0; | 2057 | error = 0; |
2058 | out: | 2058 | out: |
2059 | up_read(&mm->mmap_sem); | 2059 | up_write(&mm->mmap_sem); |
2060 | return error; | 2060 | return error; |
2061 | } | 2061 | } |
2062 | 2062 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c810f8afdb7f..91420362e0b3 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -173,7 +173,7 @@ extern int no_unaligned_warning; | |||
173 | #define SYSCTL_WRITES_WARN 0 | 173 | #define SYSCTL_WRITES_WARN 0 |
174 | #define SYSCTL_WRITES_STRICT 1 | 174 | #define SYSCTL_WRITES_STRICT 1 |
175 | 175 | ||
176 | static int sysctl_writes_strict = SYSCTL_WRITES_WARN; | 176 | static int sysctl_writes_strict = SYSCTL_WRITES_STRICT; |
177 | 177 | ||
178 | static int proc_do_cad_pid(struct ctl_table *table, int write, | 178 | static int proc_do_cad_pid(struct ctl_table *table, int write, |
179 | void __user *buffer, size_t *lenp, loff_t *ppos); | 179 | void __user *buffer, size_t *lenp, loff_t *ppos); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7d0b49c536c5..ecb9e75614bf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1893,6 +1893,8 @@ source "samples/Kconfig" | |||
1893 | 1893 | ||
1894 | source "lib/Kconfig.kgdb" | 1894 | source "lib/Kconfig.kgdb" |
1895 | 1895 | ||
1896 | source "lib/Kconfig.ubsan" | ||
1897 | |||
1896 | config ARCH_HAS_DEVMEM_IS_ALLOWED | 1898 | config ARCH_HAS_DEVMEM_IS_ALLOWED |
1897 | bool | 1899 | bool |
1898 | 1900 | ||
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan new file mode 100644 index 000000000000..49518fb48cab --- /dev/null +++ b/lib/Kconfig.ubsan | |||
@@ -0,0 +1,29 @@ | |||
1 | config ARCH_HAS_UBSAN_SANITIZE_ALL | ||
2 | bool | ||
3 | |||
4 | config UBSAN | ||
5 | bool "Undefined behaviour sanity checker" | ||
6 | help | ||
7 | This option enables undefined behaviour sanity checker | ||
8 | Compile-time instrumentation is used to detect various undefined | ||
9 | behaviours in runtime. Various types of checks may be enabled | ||
10 | via boot parameter ubsan_handle (see: Documentation/ubsan.txt). | ||
11 | |||
12 | config UBSAN_SANITIZE_ALL | ||
13 | bool "Enable instrumentation for the entire kernel" | ||
14 | depends on UBSAN | ||
15 | depends on ARCH_HAS_UBSAN_SANITIZE_ALL | ||
16 | default y | ||
17 | help | ||
18 | This option activates instrumentation for the entire kernel. | ||
19 | If you don't enable this option, you have to explicitly specify | ||
20 | UBSAN_SANITIZE := y for the files/directories you want to check for UB. | ||
21 | |||
22 | config UBSAN_ALIGNMENT | ||
23 | bool "Enable checking of pointers alignment" | ||
24 | depends on UBSAN | ||
25 | default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
26 | help | ||
27 | This option enables detection of unaligned memory accesses. | ||
28 | Enabling this option on architectures that support unalligned | ||
29 | accesses may produce a lot of false positives. | ||
diff --git a/lib/Makefile b/lib/Makefile index 180dd4d0dd41..2d4bc33d09b4 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -31,7 +31,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
31 | obj-y += string_helpers.o | 31 | obj-y += string_helpers.o |
32 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 32 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
33 | obj-y += hexdump.o | 33 | obj-y += hexdump.o |
34 | obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o | 34 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o |
35 | obj-y += kstrtox.o | 35 | obj-y += kstrtox.o |
36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | 37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o |
@@ -154,7 +154,7 @@ obj-$(CONFIG_GLOB) += glob.o | |||
154 | obj-$(CONFIG_MPILIB) += mpi/ | 154 | obj-$(CONFIG_MPILIB) += mpi/ |
155 | obj-$(CONFIG_SIGNATURE) += digsig.o | 155 | obj-$(CONFIG_SIGNATURE) += digsig.o |
156 | 156 | ||
157 | obj-$(CONFIG_CLZ_TAB) += clz_tab.o | 157 | lib-$(CONFIG_CLZ_TAB) += clz_tab.o |
158 | 158 | ||
159 | obj-$(CONFIG_DDR) += jedec_ddr_data.o | 159 | obj-$(CONFIG_DDR) += jedec_ddr_data.o |
160 | 160 | ||
@@ -209,3 +209,6 @@ quiet_cmd_build_OID_registry = GEN $@ | |||
209 | clean-files += oid_registry_data.c | 209 | clean-files += oid_registry_data.c |
210 | 210 | ||
211 | obj-$(CONFIG_UCS2_STRING) += ucs2_string.o | 211 | obj-$(CONFIG_UCS2_STRING) += ucs2_string.o |
212 | obj-$(CONFIG_UBSAN) += ubsan.o | ||
213 | |||
214 | UBSAN_SANITIZE_ubsan.o := n | ||
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 4527e751b5e0..b8f1d6cbb200 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c | |||
@@ -42,6 +42,27 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, | |||
42 | EXPORT_SYMBOL_GPL(__iowrite32_copy); | 42 | EXPORT_SYMBOL_GPL(__iowrite32_copy); |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * __ioread32_copy - copy data from MMIO space, in 32-bit units | ||
46 | * @to: destination (must be 32-bit aligned) | ||
47 | * @from: source, in MMIO space (must be 32-bit aligned) | ||
48 | * @count: number of 32-bit quantities to copy | ||
49 | * | ||
50 | * Copy data from MMIO space to kernel space, in units of 32 bits at a | ||
51 | * time. Order of access is not guaranteed, nor is a memory barrier | ||
52 | * performed afterwards. | ||
53 | */ | ||
54 | void __ioread32_copy(void *to, const void __iomem *from, size_t count) | ||
55 | { | ||
56 | u32 *dst = to; | ||
57 | const u32 __iomem *src = from; | ||
58 | const u32 __iomem *end = src + count; | ||
59 | |||
60 | while (src < end) | ||
61 | *dst++ = __raw_readl(src++); | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(__ioread32_copy); | ||
64 | |||
65 | /** | ||
45 | * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units | 66 | * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units |
46 | * @to: destination, in MMIO space (must be 64-bit aligned) | 67 | * @to: destination, in MMIO space (must be 64-bit aligned) |
47 | * @from: source (must be 64-bit aligned) | 68 | * @from: source (must be 64-bit aligned) |
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 6a08ce7d6adc..31ce853fbfb1 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/crc32c.h> | ||
39 | 40 | ||
40 | static struct crypto_shash *tfm; | 41 | static struct crypto_shash *tfm; |
41 | 42 | ||
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 5939f63d90cd..5c88204b6f1f 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
@@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, | |||
43 | [STRING_UNITS_10] = 1000, | 43 | [STRING_UNITS_10] = 1000, |
44 | [STRING_UNITS_2] = 1024, | 44 | [STRING_UNITS_2] = 1024, |
45 | }; | 45 | }; |
46 | int i, j; | 46 | static const unsigned int rounding[] = { 500, 50, 5 }; |
47 | u32 remainder = 0, sf_cap, exp; | 47 | int i = 0, j; |
48 | u32 remainder = 0, sf_cap; | ||
48 | char tmp[8]; | 49 | char tmp[8]; |
49 | const char *unit; | 50 | const char *unit; |
50 | 51 | ||
51 | tmp[0] = '\0'; | 52 | tmp[0] = '\0'; |
52 | i = 0; | 53 | |
53 | if (!size) | 54 | if (blk_size == 0) |
55 | size = 0; | ||
56 | if (size == 0) | ||
54 | goto out; | 57 | goto out; |
55 | 58 | ||
56 | while (blk_size >= divisor[units]) { | 59 | /* This is Napier's algorithm. Reduce the original block size to |
57 | remainder = do_div(blk_size, divisor[units]); | 60 | * |
61 | * coefficient * divisor[units]^i | ||
62 | * | ||
63 | * we do the reduction so both coefficients are just under 32 bits so | ||
64 | * that multiplying them together won't overflow 64 bits and we keep | ||
65 | * as much precision as possible in the numbers. | ||
66 | * | ||
67 | * Note: it's safe to throw away the remainders here because all the | ||
68 | * precision is in the coefficients. | ||
69 | */ | ||
70 | while (blk_size >> 32) { | ||
71 | do_div(blk_size, divisor[units]); | ||
58 | i++; | 72 | i++; |
59 | } | 73 | } |
60 | 74 | ||
61 | exp = divisor[units] / (u32)blk_size; | 75 | while (size >> 32) { |
62 | /* | 76 | do_div(size, divisor[units]); |
63 | * size must be strictly greater than exp here to ensure that remainder | ||
64 | * is greater than divisor[units] coming out of the if below. | ||
65 | */ | ||
66 | if (size > exp) { | ||
67 | remainder = do_div(size, divisor[units]); | ||
68 | remainder *= blk_size; | ||
69 | i++; | 77 | i++; |
70 | } else { | ||
71 | remainder *= size; | ||
72 | } | 78 | } |
73 | 79 | ||
80 | /* now perform the actual multiplication keeping i as the sum of the | ||
81 | * two logarithms */ | ||
74 | size *= blk_size; | 82 | size *= blk_size; |
75 | size += remainder / divisor[units]; | ||
76 | remainder %= divisor[units]; | ||
77 | 83 | ||
84 | /* and logarithmically reduce it until it's just under the divisor */ | ||
78 | while (size >= divisor[units]) { | 85 | while (size >= divisor[units]) { |
79 | remainder = do_div(size, divisor[units]); | 86 | remainder = do_div(size, divisor[units]); |
80 | i++; | 87 | i++; |
81 | } | 88 | } |
82 | 89 | ||
90 | /* work out in j how many digits of precision we need from the | ||
91 | * remainder */ | ||
83 | sf_cap = size; | 92 | sf_cap = size; |
84 | for (j = 0; sf_cap*10 < 1000; j++) | 93 | for (j = 0; sf_cap*10 < 1000; j++) |
85 | sf_cap *= 10; | 94 | sf_cap *= 10; |
86 | 95 | ||
87 | if (j) { | 96 | if (units == STRING_UNITS_2) { |
97 | /* express the remainder as a decimal. It's currently the | ||
98 | * numerator of a fraction whose denominator is | ||
99 | * divisor[units], which is 1 << 10 for STRING_UNITS_2 */ | ||
88 | remainder *= 1000; | 100 | remainder *= 1000; |
89 | remainder /= divisor[units]; | 101 | remainder >>= 10; |
102 | } | ||
103 | |||
104 | /* add a 5 to the digit below what will be printed to ensure | ||
105 | * an arithmetical round up and carry it through to size */ | ||
106 | remainder += rounding[j]; | ||
107 | if (remainder >= 1000) { | ||
108 | remainder -= 1000; | ||
109 | size += 1; | ||
110 | } | ||
111 | |||
112 | if (j) { | ||
90 | snprintf(tmp, sizeof(tmp), ".%03u", remainder); | 113 | snprintf(tmp, sizeof(tmp), ".%03u", remainder); |
91 | tmp[j+1] = '\0'; | 114 | tmp[j+1] = '\0'; |
92 | } | 115 | } |
diff --git a/lib/test-hexdump.c b/lib/test_hexdump.c index 5241df36eedf..3f415d8101f3 100644 --- a/lib/test-hexdump.c +++ b/lib/test_hexdump.c | |||
@@ -42,19 +42,21 @@ static const char * const test_data_8_le[] __initconst = { | |||
42 | "e9ac0f9cad319ca6", "0cafb1439919d14c", | 42 | "e9ac0f9cad319ca6", "0cafb1439919d14c", |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static void __init test_hexdump(size_t len, int rowsize, int groupsize, | 45 | #define FILL_CHAR '#' |
46 | bool ascii) | 46 | |
47 | static unsigned total_tests __initdata; | ||
48 | static unsigned failed_tests __initdata; | ||
49 | |||
50 | static void __init test_hexdump_prepare_test(size_t len, int rowsize, | ||
51 | int groupsize, char *test, | ||
52 | size_t testlen, bool ascii) | ||
47 | { | 53 | { |
48 | char test[32 * 3 + 2 + 32 + 1]; | ||
49 | char real[32 * 3 + 2 + 32 + 1]; | ||
50 | char *p; | 54 | char *p; |
51 | const char * const *result; | 55 | const char * const *result; |
52 | size_t l = len; | 56 | size_t l = len; |
53 | int gs = groupsize, rs = rowsize; | 57 | int gs = groupsize, rs = rowsize; |
54 | unsigned int i; | 58 | unsigned int i; |
55 | 59 | ||
56 | hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii); | ||
57 | |||
58 | if (rs != 16 && rs != 32) | 60 | if (rs != 16 && rs != 32) |
59 | rs = 16; | 61 | rs = 16; |
60 | 62 | ||
@@ -73,8 +75,6 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, | |||
73 | else | 75 | else |
74 | result = test_data_1_le; | 76 | result = test_data_1_le; |
75 | 77 | ||
76 | memset(test, ' ', sizeof(test)); | ||
77 | |||
78 | /* hex dump */ | 78 | /* hex dump */ |
79 | p = test; | 79 | p = test; |
80 | for (i = 0; i < l / gs; i++) { | 80 | for (i = 0; i < l / gs; i++) { |
@@ -82,24 +82,49 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, | |||
82 | size_t amount = strlen(q); | 82 | size_t amount = strlen(q); |
83 | 83 | ||
84 | strncpy(p, q, amount); | 84 | strncpy(p, q, amount); |
85 | p += amount + 1; | 85 | p += amount; |
86 | |||
87 | *p++ = ' '; | ||
86 | } | 88 | } |
87 | if (i) | 89 | if (i) |
88 | p--; | 90 | p--; |
89 | 91 | ||
90 | /* ASCII part */ | 92 | /* ASCII part */ |
91 | if (ascii) { | 93 | if (ascii) { |
92 | p = test + rs * 2 + rs / gs + 1; | 94 | do { |
95 | *p++ = ' '; | ||
96 | } while (p < test + rs * 2 + rs / gs + 1); | ||
97 | |||
93 | strncpy(p, data_a, l); | 98 | strncpy(p, data_a, l); |
94 | p += l; | 99 | p += l; |
95 | } | 100 | } |
96 | 101 | ||
97 | *p = '\0'; | 102 | *p = '\0'; |
103 | } | ||
98 | 104 | ||
99 | if (strcmp(test, real)) { | 105 | #define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1) |
106 | |||
107 | static void __init test_hexdump(size_t len, int rowsize, int groupsize, | ||
108 | bool ascii) | ||
109 | { | ||
110 | char test[TEST_HEXDUMP_BUF_SIZE]; | ||
111 | char real[TEST_HEXDUMP_BUF_SIZE]; | ||
112 | |||
113 | total_tests++; | ||
114 | |||
115 | memset(real, FILL_CHAR, sizeof(real)); | ||
116 | hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real), | ||
117 | ascii); | ||
118 | |||
119 | memset(test, FILL_CHAR, sizeof(test)); | ||
120 | test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test), | ||
121 | ascii); | ||
122 | |||
123 | if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) { | ||
100 | pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); | 124 | pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); |
101 | pr_err("Result: '%s'\n", real); | 125 | pr_err("Result: '%s'\n", real); |
102 | pr_err("Expect: '%s'\n", test); | 126 | pr_err("Expect: '%s'\n", test); |
127 | failed_tests++; | ||
103 | } | 128 | } |
104 | } | 129 | } |
105 | 130 | ||
@@ -114,52 +139,72 @@ static void __init test_hexdump_set(int rowsize, bool ascii) | |||
114 | test_hexdump(len, rowsize, 1, ascii); | 139 | test_hexdump(len, rowsize, 1, ascii); |
115 | } | 140 | } |
116 | 141 | ||
117 | static void __init test_hexdump_overflow(bool ascii) | 142 | static void __init test_hexdump_overflow(size_t buflen, size_t len, |
143 | int rowsize, int groupsize, | ||
144 | bool ascii) | ||
118 | { | 145 | { |
119 | char buf[56]; | 146 | char test[TEST_HEXDUMP_BUF_SIZE]; |
120 | const char *t = test_data_1_le[0]; | 147 | char buf[TEST_HEXDUMP_BUF_SIZE]; |
121 | size_t l = get_random_int() % sizeof(buf); | 148 | int rs = rowsize, gs = groupsize; |
149 | int ae, he, e, f, r; | ||
122 | bool a; | 150 | bool a; |
123 | int e, r; | ||
124 | 151 | ||
125 | memset(buf, ' ', sizeof(buf)); | 152 | total_tests++; |
153 | |||
154 | memset(buf, FILL_CHAR, sizeof(buf)); | ||
126 | 155 | ||
127 | r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii); | 156 | r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii); |
157 | |||
158 | /* | ||
159 | * Caller must provide the data length multiple of groupsize. The | ||
160 | * calculations below are made with that assumption in mind. | ||
161 | */ | ||
162 | ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */; | ||
163 | he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */; | ||
128 | 164 | ||
129 | if (ascii) | 165 | if (ascii) |
130 | e = 50; | 166 | e = ae; |
131 | else | 167 | else |
132 | e = 2; | 168 | e = he; |
133 | buf[e + 2] = '\0'; | 169 | |
134 | 170 | f = min_t(int, e + 1, buflen); | |
135 | if (!l) { | 171 | if (buflen) { |
136 | a = r == e && buf[0] == ' '; | 172 | test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii); |
137 | } else if (l < 3) { | 173 | test[f - 1] = '\0'; |
138 | a = r == e && buf[0] == '\0'; | ||
139 | } else if (l < 4) { | ||
140 | a = r == e && !strcmp(buf, t); | ||
141 | } else if (ascii) { | ||
142 | if (l < 51) | ||
143 | a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' '; | ||
144 | else | ||
145 | a = r == e && buf[50] == '\0' && buf[49] == '.'; | ||
146 | } else { | ||
147 | a = r == e && buf[e] == '\0'; | ||
148 | } | 174 | } |
175 | memset(test + f, FILL_CHAR, sizeof(test) - f); | ||
176 | |||
177 | a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE); | ||
178 | |||
179 | buf[sizeof(buf) - 1] = '\0'; | ||
149 | 180 | ||
150 | if (!a) { | 181 | if (!a) { |
151 | pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf)); | 182 | pr_err("Len: %zu buflen: %zu strlen: %zu\n", |
152 | pr_err("Result: '%s'\n", buf); | 183 | len, buflen, strnlen(buf, sizeof(buf))); |
184 | pr_err("Result: %d '%s'\n", r, buf); | ||
185 | pr_err("Expect: %d '%s'\n", e, test); | ||
186 | failed_tests++; | ||
153 | } | 187 | } |
154 | } | 188 | } |
155 | 189 | ||
190 | static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) | ||
191 | { | ||
192 | unsigned int i = 0; | ||
193 | int rs = (get_random_int() % 2 + 1) * 16; | ||
194 | |||
195 | do { | ||
196 | int gs = 1 << i; | ||
197 | size_t len = get_random_int() % rs + gs; | ||
198 | |||
199 | test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); | ||
200 | } while (i++ < 3); | ||
201 | } | ||
202 | |||
156 | static int __init test_hexdump_init(void) | 203 | static int __init test_hexdump_init(void) |
157 | { | 204 | { |
158 | unsigned int i; | 205 | unsigned int i; |
159 | int rowsize; | 206 | int rowsize; |
160 | 207 | ||
161 | pr_info("Running tests...\n"); | ||
162 | |||
163 | rowsize = (get_random_int() % 2 + 1) * 16; | 208 | rowsize = (get_random_int() % 2 + 1) * 16; |
164 | for (i = 0; i < 16; i++) | 209 | for (i = 0; i < 16; i++) |
165 | test_hexdump_set(rowsize, false); | 210 | test_hexdump_set(rowsize, false); |
@@ -168,13 +213,26 @@ static int __init test_hexdump_init(void) | |||
168 | for (i = 0; i < 16; i++) | 213 | for (i = 0; i < 16; i++) |
169 | test_hexdump_set(rowsize, true); | 214 | test_hexdump_set(rowsize, true); |
170 | 215 | ||
171 | for (i = 0; i < 16; i++) | 216 | for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) |
172 | test_hexdump_overflow(false); | 217 | test_hexdump_overflow_set(i, false); |
173 | 218 | ||
174 | for (i = 0; i < 16; i++) | 219 | for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) |
175 | test_hexdump_overflow(true); | 220 | test_hexdump_overflow_set(i, true); |
221 | |||
222 | if (failed_tests == 0) | ||
223 | pr_info("all %u tests passed\n", total_tests); | ||
224 | else | ||
225 | pr_err("failed %u out of %u tests\n", failed_tests, total_tests); | ||
176 | 226 | ||
177 | return -EINVAL; | 227 | return failed_tests ? -EINVAL : 0; |
178 | } | 228 | } |
179 | module_init(test_hexdump_init); | 229 | module_init(test_hexdump_init); |
230 | |||
231 | static void __exit test_hexdump_exit(void) | ||
232 | { | ||
233 | /* do nothing */ | ||
234 | } | ||
235 | module_exit(test_hexdump_exit); | ||
236 | |||
237 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
180 | MODULE_LICENSE("Dual BSD/GPL"); | 238 | MODULE_LICENSE("Dual BSD/GPL"); |
diff --git a/lib/ubsan.c b/lib/ubsan.c new file mode 100644 index 000000000000..8799ae5e2e42 --- /dev/null +++ b/lib/ubsan.c | |||
@@ -0,0 +1,456 @@ | |||
1 | /* | ||
2 | * UBSAN error reporting functions | ||
3 | * | ||
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/bug.h> | ||
15 | #include <linux/ctype.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/sched.h> | ||
20 | |||
21 | #include "ubsan.h" | ||
22 | |||
23 | const char *type_check_kinds[] = { | ||
24 | "load of", | ||
25 | "store to", | ||
26 | "reference binding to", | ||
27 | "member access within", | ||
28 | "member call on", | ||
29 | "constructor call on", | ||
30 | "downcast of", | ||
31 | "downcast of" | ||
32 | }; | ||
33 | |||
34 | #define REPORTED_BIT 31 | ||
35 | |||
36 | #if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) | ||
37 | #define COLUMN_MASK (~(1U << REPORTED_BIT)) | ||
38 | #define LINE_MASK (~0U) | ||
39 | #else | ||
40 | #define COLUMN_MASK (~0U) | ||
41 | #define LINE_MASK (~(1U << REPORTED_BIT)) | ||
42 | #endif | ||
43 | |||
44 | #define VALUE_LENGTH 40 | ||
45 | |||
46 | static bool was_reported(struct source_location *location) | ||
47 | { | ||
48 | return test_and_set_bit(REPORTED_BIT, &location->reported); | ||
49 | } | ||
50 | |||
51 | static void print_source_location(const char *prefix, | ||
52 | struct source_location *loc) | ||
53 | { | ||
54 | pr_err("%s %s:%d:%d\n", prefix, loc->file_name, | ||
55 | loc->line & LINE_MASK, loc->column & COLUMN_MASK); | ||
56 | } | ||
57 | |||
58 | static bool suppress_report(struct source_location *loc) | ||
59 | { | ||
60 | return current->in_ubsan || was_reported(loc); | ||
61 | } | ||
62 | |||
63 | static bool type_is_int(struct type_descriptor *type) | ||
64 | { | ||
65 | return type->type_kind == type_kind_int; | ||
66 | } | ||
67 | |||
68 | static bool type_is_signed(struct type_descriptor *type) | ||
69 | { | ||
70 | WARN_ON(!type_is_int(type)); | ||
71 | return type->type_info & 1; | ||
72 | } | ||
73 | |||
74 | static unsigned type_bit_width(struct type_descriptor *type) | ||
75 | { | ||
76 | return 1 << (type->type_info >> 1); | ||
77 | } | ||
78 | |||
79 | static bool is_inline_int(struct type_descriptor *type) | ||
80 | { | ||
81 | unsigned inline_bits = sizeof(unsigned long)*8; | ||
82 | unsigned bits = type_bit_width(type); | ||
83 | |||
84 | WARN_ON(!type_is_int(type)); | ||
85 | |||
86 | return bits <= inline_bits; | ||
87 | } | ||
88 | |||
89 | static s_max get_signed_val(struct type_descriptor *type, unsigned long val) | ||
90 | { | ||
91 | if (is_inline_int(type)) { | ||
92 | unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); | ||
93 | return ((s_max)val) << extra_bits >> extra_bits; | ||
94 | } | ||
95 | |||
96 | if (type_bit_width(type) == 64) | ||
97 | return *(s64 *)val; | ||
98 | |||
99 | return *(s_max *)val; | ||
100 | } | ||
101 | |||
102 | static bool val_is_negative(struct type_descriptor *type, unsigned long val) | ||
103 | { | ||
104 | return type_is_signed(type) && get_signed_val(type, val) < 0; | ||
105 | } | ||
106 | |||
107 | static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) | ||
108 | { | ||
109 | if (is_inline_int(type)) | ||
110 | return val; | ||
111 | |||
112 | if (type_bit_width(type) == 64) | ||
113 | return *(u64 *)val; | ||
114 | |||
115 | return *(u_max *)val; | ||
116 | } | ||
117 | |||
118 | static void val_to_string(char *str, size_t size, struct type_descriptor *type, | ||
119 | unsigned long value) | ||
120 | { | ||
121 | if (type_is_int(type)) { | ||
122 | if (type_bit_width(type) == 128) { | ||
123 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
124 | u_max val = get_unsigned_val(type, value); | ||
125 | |||
126 | scnprintf(str, size, "0x%08x%08x%08x%08x", | ||
127 | (u32)(val >> 96), | ||
128 | (u32)(val >> 64), | ||
129 | (u32)(val >> 32), | ||
130 | (u32)(val)); | ||
131 | #else | ||
132 | WARN_ON(1); | ||
133 | #endif | ||
134 | } else if (type_is_signed(type)) { | ||
135 | scnprintf(str, size, "%lld", | ||
136 | (s64)get_signed_val(type, value)); | ||
137 | } else { | ||
138 | scnprintf(str, size, "%llu", | ||
139 | (u64)get_unsigned_val(type, value)); | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static bool location_is_valid(struct source_location *loc) | ||
145 | { | ||
146 | return loc->file_name != NULL; | ||
147 | } | ||
148 | |||
149 | static DEFINE_SPINLOCK(report_lock); | ||
150 | |||
151 | static void ubsan_prologue(struct source_location *location, | ||
152 | unsigned long *flags) | ||
153 | { | ||
154 | current->in_ubsan++; | ||
155 | spin_lock_irqsave(&report_lock, *flags); | ||
156 | |||
157 | pr_err("========================================" | ||
158 | "========================================\n"); | ||
159 | print_source_location("UBSAN: Undefined behaviour in", location); | ||
160 | } | ||
161 | |||
162 | static void ubsan_epilogue(unsigned long *flags) | ||
163 | { | ||
164 | dump_stack(); | ||
165 | pr_err("========================================" | ||
166 | "========================================\n"); | ||
167 | spin_unlock_irqrestore(&report_lock, *flags); | ||
168 | current->in_ubsan--; | ||
169 | } | ||
170 | |||
171 | static void handle_overflow(struct overflow_data *data, unsigned long lhs, | ||
172 | unsigned long rhs, char op) | ||
173 | { | ||
174 | |||
175 | struct type_descriptor *type = data->type; | ||
176 | unsigned long flags; | ||
177 | char lhs_val_str[VALUE_LENGTH]; | ||
178 | char rhs_val_str[VALUE_LENGTH]; | ||
179 | |||
180 | if (suppress_report(&data->location)) | ||
181 | return; | ||
182 | |||
183 | ubsan_prologue(&data->location, &flags); | ||
184 | |||
185 | val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); | ||
186 | val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); | ||
187 | pr_err("%s integer overflow:\n", | ||
188 | type_is_signed(type) ? "signed" : "unsigned"); | ||
189 | pr_err("%s %c %s cannot be represented in type %s\n", | ||
190 | lhs_val_str, | ||
191 | op, | ||
192 | rhs_val_str, | ||
193 | type->type_name); | ||
194 | |||
195 | ubsan_epilogue(&flags); | ||
196 | } | ||
197 | |||
198 | void __ubsan_handle_add_overflow(struct overflow_data *data, | ||
199 | unsigned long lhs, | ||
200 | unsigned long rhs) | ||
201 | { | ||
202 | |||
203 | handle_overflow(data, lhs, rhs, '+'); | ||
204 | } | ||
205 | EXPORT_SYMBOL(__ubsan_handle_add_overflow); | ||
206 | |||
207 | void __ubsan_handle_sub_overflow(struct overflow_data *data, | ||
208 | unsigned long lhs, | ||
209 | unsigned long rhs) | ||
210 | { | ||
211 | handle_overflow(data, lhs, rhs, '-'); | ||
212 | } | ||
213 | EXPORT_SYMBOL(__ubsan_handle_sub_overflow); | ||
214 | |||
215 | void __ubsan_handle_mul_overflow(struct overflow_data *data, | ||
216 | unsigned long lhs, | ||
217 | unsigned long rhs) | ||
218 | { | ||
219 | handle_overflow(data, lhs, rhs, '*'); | ||
220 | } | ||
221 | EXPORT_SYMBOL(__ubsan_handle_mul_overflow); | ||
222 | |||
223 | void __ubsan_handle_negate_overflow(struct overflow_data *data, | ||
224 | unsigned long old_val) | ||
225 | { | ||
226 | unsigned long flags; | ||
227 | char old_val_str[VALUE_LENGTH]; | ||
228 | |||
229 | if (suppress_report(&data->location)) | ||
230 | return; | ||
231 | |||
232 | ubsan_prologue(&data->location, &flags); | ||
233 | |||
234 | val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); | ||
235 | |||
236 | pr_err("negation of %s cannot be represented in type %s:\n", | ||
237 | old_val_str, data->type->type_name); | ||
238 | |||
239 | ubsan_epilogue(&flags); | ||
240 | } | ||
241 | EXPORT_SYMBOL(__ubsan_handle_negate_overflow); | ||
242 | |||
243 | |||
244 | void __ubsan_handle_divrem_overflow(struct overflow_data *data, | ||
245 | unsigned long lhs, | ||
246 | unsigned long rhs) | ||
247 | { | ||
248 | unsigned long flags; | ||
249 | char rhs_val_str[VALUE_LENGTH]; | ||
250 | |||
251 | if (suppress_report(&data->location)) | ||
252 | return; | ||
253 | |||
254 | ubsan_prologue(&data->location, &flags); | ||
255 | |||
256 | val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); | ||
257 | |||
258 | if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) | ||
259 | pr_err("division of %s by -1 cannot be represented in type %s\n", | ||
260 | rhs_val_str, data->type->type_name); | ||
261 | else | ||
262 | pr_err("division by zero\n"); | ||
263 | |||
264 | ubsan_epilogue(&flags); | ||
265 | } | ||
266 | EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); | ||
267 | |||
268 | static void handle_null_ptr_deref(struct type_mismatch_data *data) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | |||
272 | if (suppress_report(&data->location)) | ||
273 | return; | ||
274 | |||
275 | ubsan_prologue(&data->location, &flags); | ||
276 | |||
277 | pr_err("%s null pointer of type %s\n", | ||
278 | type_check_kinds[data->type_check_kind], | ||
279 | data->type->type_name); | ||
280 | |||
281 | ubsan_epilogue(&flags); | ||
282 | } | ||
283 | |||
284 | static void handle_missaligned_access(struct type_mismatch_data *data, | ||
285 | unsigned long ptr) | ||
286 | { | ||
287 | unsigned long flags; | ||
288 | |||
289 | if (suppress_report(&data->location)) | ||
290 | return; | ||
291 | |||
292 | ubsan_prologue(&data->location, &flags); | ||
293 | |||
294 | pr_err("%s misaligned address %p for type %s\n", | ||
295 | type_check_kinds[data->type_check_kind], | ||
296 | (void *)ptr, data->type->type_name); | ||
297 | pr_err("which requires %ld byte alignment\n", data->alignment); | ||
298 | |||
299 | ubsan_epilogue(&flags); | ||
300 | } | ||
301 | |||
302 | static void handle_object_size_mismatch(struct type_mismatch_data *data, | ||
303 | unsigned long ptr) | ||
304 | { | ||
305 | unsigned long flags; | ||
306 | |||
307 | if (suppress_report(&data->location)) | ||
308 | return; | ||
309 | |||
310 | ubsan_prologue(&data->location, &flags); | ||
311 | pr_err("%s address %pk with insufficient space\n", | ||
312 | type_check_kinds[data->type_check_kind], | ||
313 | (void *) ptr); | ||
314 | pr_err("for an object of type %s\n", data->type->type_name); | ||
315 | ubsan_epilogue(&flags); | ||
316 | } | ||
317 | |||
318 | void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, | ||
319 | unsigned long ptr) | ||
320 | { | ||
321 | |||
322 | if (!ptr) | ||
323 | handle_null_ptr_deref(data); | ||
324 | else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) | ||
325 | handle_missaligned_access(data, ptr); | ||
326 | else | ||
327 | handle_object_size_mismatch(data, ptr); | ||
328 | } | ||
329 | EXPORT_SYMBOL(__ubsan_handle_type_mismatch); | ||
330 | |||
331 | void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | |||
335 | if (suppress_report(&data->location)) | ||
336 | return; | ||
337 | |||
338 | ubsan_prologue(&data->location, &flags); | ||
339 | |||
340 | pr_err("null pointer returned from function declared to never return null\n"); | ||
341 | |||
342 | if (location_is_valid(&data->attr_location)) | ||
343 | print_source_location("returns_nonnull attribute specified in", | ||
344 | &data->attr_location); | ||
345 | |||
346 | ubsan_epilogue(&flags); | ||
347 | } | ||
348 | EXPORT_SYMBOL(__ubsan_handle_nonnull_return); | ||
349 | |||
350 | void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, | ||
351 | unsigned long bound) | ||
352 | { | ||
353 | unsigned long flags; | ||
354 | char bound_str[VALUE_LENGTH]; | ||
355 | |||
356 | if (suppress_report(&data->location)) | ||
357 | return; | ||
358 | |||
359 | ubsan_prologue(&data->location, &flags); | ||
360 | |||
361 | val_to_string(bound_str, sizeof(bound_str), data->type, bound); | ||
362 | pr_err("variable length array bound value %s <= 0\n", bound_str); | ||
363 | |||
364 | ubsan_epilogue(&flags); | ||
365 | } | ||
366 | EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); | ||
367 | |||
368 | void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, | ||
369 | unsigned long index) | ||
370 | { | ||
371 | unsigned long flags; | ||
372 | char index_str[VALUE_LENGTH]; | ||
373 | |||
374 | if (suppress_report(&data->location)) | ||
375 | return; | ||
376 | |||
377 | ubsan_prologue(&data->location, &flags); | ||
378 | |||
379 | val_to_string(index_str, sizeof(index_str), data->index_type, index); | ||
380 | pr_err("index %s is out of range for type %s\n", index_str, | ||
381 | data->array_type->type_name); | ||
382 | ubsan_epilogue(&flags); | ||
383 | } | ||
384 | EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); | ||
385 | |||
386 | void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, | ||
387 | unsigned long lhs, unsigned long rhs) | ||
388 | { | ||
389 | unsigned long flags; | ||
390 | struct type_descriptor *rhs_type = data->rhs_type; | ||
391 | struct type_descriptor *lhs_type = data->lhs_type; | ||
392 | char rhs_str[VALUE_LENGTH]; | ||
393 | char lhs_str[VALUE_LENGTH]; | ||
394 | |||
395 | if (suppress_report(&data->location)) | ||
396 | return; | ||
397 | |||
398 | ubsan_prologue(&data->location, &flags); | ||
399 | |||
400 | val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); | ||
401 | val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); | ||
402 | |||
403 | if (val_is_negative(rhs_type, rhs)) | ||
404 | pr_err("shift exponent %s is negative\n", rhs_str); | ||
405 | |||
406 | else if (get_unsigned_val(rhs_type, rhs) >= | ||
407 | type_bit_width(lhs_type)) | ||
408 | pr_err("shift exponent %s is too large for %u-bit type %s\n", | ||
409 | rhs_str, | ||
410 | type_bit_width(lhs_type), | ||
411 | lhs_type->type_name); | ||
412 | else if (val_is_negative(lhs_type, lhs)) | ||
413 | pr_err("left shift of negative value %s\n", | ||
414 | lhs_str); | ||
415 | else | ||
416 | pr_err("left shift of %s by %s places cannot be" | ||
417 | " represented in type %s\n", | ||
418 | lhs_str, rhs_str, | ||
419 | lhs_type->type_name); | ||
420 | |||
421 | ubsan_epilogue(&flags); | ||
422 | } | ||
423 | EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); | ||
424 | |||
425 | |||
426 | void __noreturn | ||
427 | __ubsan_handle_builtin_unreachable(struct unreachable_data *data) | ||
428 | { | ||
429 | unsigned long flags; | ||
430 | |||
431 | ubsan_prologue(&data->location, &flags); | ||
432 | pr_err("calling __builtin_unreachable()\n"); | ||
433 | ubsan_epilogue(&flags); | ||
434 | panic("can't return from __builtin_unreachable()"); | ||
435 | } | ||
436 | EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); | ||
437 | |||
438 | void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, | ||
439 | unsigned long val) | ||
440 | { | ||
441 | unsigned long flags; | ||
442 | char val_str[VALUE_LENGTH]; | ||
443 | |||
444 | if (suppress_report(&data->location)) | ||
445 | return; | ||
446 | |||
447 | ubsan_prologue(&data->location, &flags); | ||
448 | |||
449 | val_to_string(val_str, sizeof(val_str), data->type, val); | ||
450 | |||
451 | pr_err("load of value %s is not a valid value for type %s\n", | ||
452 | val_str, data->type->type_name); | ||
453 | |||
454 | ubsan_epilogue(&flags); | ||
455 | } | ||
456 | EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); | ||
diff --git a/lib/ubsan.h b/lib/ubsan.h new file mode 100644 index 000000000000..b2d18d4a53f5 --- /dev/null +++ b/lib/ubsan.h | |||
@@ -0,0 +1,84 @@ | |||
1 | #ifndef _LIB_UBSAN_H | ||
2 | #define _LIB_UBSAN_H | ||
3 | |||
4 | enum { | ||
5 | type_kind_int = 0, | ||
6 | type_kind_float = 1, | ||
7 | type_unknown = 0xffff | ||
8 | }; | ||
9 | |||
10 | struct type_descriptor { | ||
11 | u16 type_kind; | ||
12 | u16 type_info; | ||
13 | char type_name[1]; | ||
14 | }; | ||
15 | |||
16 | struct source_location { | ||
17 | const char *file_name; | ||
18 | union { | ||
19 | unsigned long reported; | ||
20 | struct { | ||
21 | u32 line; | ||
22 | u32 column; | ||
23 | }; | ||
24 | }; | ||
25 | }; | ||
26 | |||
27 | struct overflow_data { | ||
28 | struct source_location location; | ||
29 | struct type_descriptor *type; | ||
30 | }; | ||
31 | |||
32 | struct type_mismatch_data { | ||
33 | struct source_location location; | ||
34 | struct type_descriptor *type; | ||
35 | unsigned long alignment; | ||
36 | unsigned char type_check_kind; | ||
37 | }; | ||
38 | |||
39 | struct nonnull_arg_data { | ||
40 | struct source_location location; | ||
41 | struct source_location attr_location; | ||
42 | int arg_index; | ||
43 | }; | ||
44 | |||
45 | struct nonnull_return_data { | ||
46 | struct source_location location; | ||
47 | struct source_location attr_location; | ||
48 | }; | ||
49 | |||
50 | struct vla_bound_data { | ||
51 | struct source_location location; | ||
52 | struct type_descriptor *type; | ||
53 | }; | ||
54 | |||
55 | struct out_of_bounds_data { | ||
56 | struct source_location location; | ||
57 | struct type_descriptor *array_type; | ||
58 | struct type_descriptor *index_type; | ||
59 | }; | ||
60 | |||
61 | struct shift_out_of_bounds_data { | ||
62 | struct source_location location; | ||
63 | struct type_descriptor *lhs_type; | ||
64 | struct type_descriptor *rhs_type; | ||
65 | }; | ||
66 | |||
67 | struct unreachable_data { | ||
68 | struct source_location location; | ||
69 | }; | ||
70 | |||
71 | struct invalid_value_data { | ||
72 | struct source_location location; | ||
73 | struct type_descriptor *type; | ||
74 | }; | ||
75 | |||
76 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
77 | typedef __int128 s_max; | ||
78 | typedef unsigned __int128 u_max; | ||
79 | #else | ||
80 | typedef s64 s_max; | ||
81 | typedef u64 u_max; | ||
82 | #endif | ||
83 | |||
84 | #endif | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b1cf73bc3b12..8ad580273521 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -3357,6 +3357,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) | |||
3357 | struct anon_vma *anon_vma; | 3357 | struct anon_vma *anon_vma; |
3358 | int count, mapcount, ret; | 3358 | int count, mapcount, ret; |
3359 | bool mlocked; | 3359 | bool mlocked; |
3360 | unsigned long flags; | ||
3360 | 3361 | ||
3361 | VM_BUG_ON_PAGE(is_huge_zero_page(page), page); | 3362 | VM_BUG_ON_PAGE(is_huge_zero_page(page), page); |
3362 | VM_BUG_ON_PAGE(!PageAnon(page), page); | 3363 | VM_BUG_ON_PAGE(!PageAnon(page), page); |
@@ -3396,7 +3397,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) | |||
3396 | lru_add_drain(); | 3397 | lru_add_drain(); |
3397 | 3398 | ||
3398 | /* Prevent deferred_split_scan() touching ->_count */ | 3399 | /* Prevent deferred_split_scan() touching ->_count */ |
3399 | spin_lock(&split_queue_lock); | 3400 | spin_lock_irqsave(&split_queue_lock, flags); |
3400 | count = page_count(head); | 3401 | count = page_count(head); |
3401 | mapcount = total_mapcount(head); | 3402 | mapcount = total_mapcount(head); |
3402 | if (!mapcount && count == 1) { | 3403 | if (!mapcount && count == 1) { |
@@ -3404,11 +3405,11 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) | |||
3404 | split_queue_len--; | 3405 | split_queue_len--; |
3405 | list_del(page_deferred_list(head)); | 3406 | list_del(page_deferred_list(head)); |
3406 | } | 3407 | } |
3407 | spin_unlock(&split_queue_lock); | 3408 | spin_unlock_irqrestore(&split_queue_lock, flags); |
3408 | __split_huge_page(page, list); | 3409 | __split_huge_page(page, list); |
3409 | ret = 0; | 3410 | ret = 0; |
3410 | } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { | 3411 | } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { |
3411 | spin_unlock(&split_queue_lock); | 3412 | spin_unlock_irqrestore(&split_queue_lock, flags); |
3412 | pr_alert("total_mapcount: %u, page_count(): %u\n", | 3413 | pr_alert("total_mapcount: %u, page_count(): %u\n", |
3413 | mapcount, count); | 3414 | mapcount, count); |
3414 | if (PageTail(page)) | 3415 | if (PageTail(page)) |
@@ -3416,7 +3417,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) | |||
3416 | dump_page(page, "total_mapcount(head) > 0"); | 3417 | dump_page(page, "total_mapcount(head) > 0"); |
3417 | BUG(); | 3418 | BUG(); |
3418 | } else { | 3419 | } else { |
3419 | spin_unlock(&split_queue_lock); | 3420 | spin_unlock_irqrestore(&split_queue_lock, flags); |
3420 | unfreeze_page(anon_vma, head); | 3421 | unfreeze_page(anon_vma, head); |
3421 | ret = -EBUSY; | 3422 | ret = -EBUSY; |
3422 | } | 3423 | } |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 64710148941e..a61460d9f5b0 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | KASAN_SANITIZE := n | 1 | KASAN_SANITIZE := n |
2 | UBSAN_SANITIZE_kasan.o := n | ||
2 | 3 | ||
3 | CFLAGS_REMOVE_kasan.o = -pg | 4 | CFLAGS_REMOVE_kasan.o = -pg |
4 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 5 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
diff --git a/mm/list_lru.c b/mm/list_lru.c index afc71ea9a381..1d05cb9d363d 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/memcontrol.h> | 13 | #include <linux/memcontrol.h> |
14 | 14 | ||
15 | #ifdef CONFIG_MEMCG_KMEM | 15 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
16 | static LIST_HEAD(list_lrus); | 16 | static LIST_HEAD(list_lrus); |
17 | static DEFINE_MUTEX(list_lrus_mutex); | 17 | static DEFINE_MUTEX(list_lrus_mutex); |
18 | 18 | ||
@@ -37,9 +37,9 @@ static void list_lru_register(struct list_lru *lru) | |||
37 | static void list_lru_unregister(struct list_lru *lru) | 37 | static void list_lru_unregister(struct list_lru *lru) |
38 | { | 38 | { |
39 | } | 39 | } |
40 | #endif /* CONFIG_MEMCG_KMEM */ | 40 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
41 | 41 | ||
42 | #ifdef CONFIG_MEMCG_KMEM | 42 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
43 | static inline bool list_lru_memcg_aware(struct list_lru *lru) | 43 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
44 | { | 44 | { |
45 | /* | 45 | /* |
@@ -104,7 +104,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) | |||
104 | { | 104 | { |
105 | return &nlru->lru; | 105 | return &nlru->lru; |
106 | } | 106 | } |
107 | #endif /* CONFIG_MEMCG_KMEM */ | 107 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
108 | 108 | ||
109 | bool list_lru_add(struct list_lru *lru, struct list_head *item) | 109 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
110 | { | 110 | { |
@@ -292,7 +292,7 @@ static void init_one_lru(struct list_lru_one *l) | |||
292 | l->nr_items = 0; | 292 | l->nr_items = 0; |
293 | } | 293 | } |
294 | 294 | ||
295 | #ifdef CONFIG_MEMCG_KMEM | 295 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
296 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, | 296 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
297 | int begin, int end) | 297 | int begin, int end) |
298 | { | 298 | { |
@@ -529,7 +529,7 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) | |||
529 | static void memcg_destroy_list_lru(struct list_lru *lru) | 529 | static void memcg_destroy_list_lru(struct list_lru *lru) |
530 | { | 530 | { |
531 | } | 531 | } |
532 | #endif /* CONFIG_MEMCG_KMEM */ | 532 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
533 | 533 | ||
534 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, | 534 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
535 | struct lock_class_key *key) | 535 | struct lock_class_key *key) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eda67376df4..ca052f2a4a0b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -66,7 +66,6 @@ | |||
66 | #include "internal.h" | 66 | #include "internal.h" |
67 | #include <net/sock.h> | 67 | #include <net/sock.h> |
68 | #include <net/ip.h> | 68 | #include <net/ip.h> |
69 | #include <net/tcp_memcontrol.h> | ||
70 | #include "slab.h" | 69 | #include "slab.h" |
71 | 70 | ||
72 | #include <asm/uaccess.h> | 71 | #include <asm/uaccess.h> |
@@ -83,6 +82,9 @@ struct mem_cgroup *root_mem_cgroup __read_mostly; | |||
83 | /* Socket memory accounting disabled? */ | 82 | /* Socket memory accounting disabled? */ |
84 | static bool cgroup_memory_nosocket; | 83 | static bool cgroup_memory_nosocket; |
85 | 84 | ||
85 | /* Kernel memory accounting disabled? */ | ||
86 | static bool cgroup_memory_nokmem; | ||
87 | |||
86 | /* Whether the swap controller is active */ | 88 | /* Whether the swap controller is active */ |
87 | #ifdef CONFIG_MEMCG_SWAP | 89 | #ifdef CONFIG_MEMCG_SWAP |
88 | int do_swap_account __read_mostly; | 90 | int do_swap_account __read_mostly; |
@@ -239,6 +241,7 @@ enum res_type { | |||
239 | _MEMSWAP, | 241 | _MEMSWAP, |
240 | _OOM_TYPE, | 242 | _OOM_TYPE, |
241 | _KMEM, | 243 | _KMEM, |
244 | _TCP, | ||
242 | }; | 245 | }; |
243 | 246 | ||
244 | #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) | 247 | #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) |
@@ -247,13 +250,6 @@ enum res_type { | |||
247 | /* Used for OOM nofiier */ | 250 | /* Used for OOM nofiier */ |
248 | #define OOM_CONTROL (0) | 251 | #define OOM_CONTROL (0) |
249 | 252 | ||
250 | /* | ||
251 | * The memcg_create_mutex will be held whenever a new cgroup is created. | ||
252 | * As a consequence, any change that needs to protect against new child cgroups | ||
253 | * appearing has to hold it as well. | ||
254 | */ | ||
255 | static DEFINE_MUTEX(memcg_create_mutex); | ||
256 | |||
257 | /* Some nice accessors for the vmpressure. */ | 253 | /* Some nice accessors for the vmpressure. */ |
258 | struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) | 254 | struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) |
259 | { | 255 | { |
@@ -297,7 +293,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |||
297 | return mem_cgroup_from_css(css); | 293 | return mem_cgroup_from_css(css); |
298 | } | 294 | } |
299 | 295 | ||
300 | #ifdef CONFIG_MEMCG_KMEM | 296 | #ifndef CONFIG_SLOB |
301 | /* | 297 | /* |
302 | * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. | 298 | * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. |
303 | * The main reason for not using cgroup id for this: | 299 | * The main reason for not using cgroup id for this: |
@@ -349,7 +345,7 @@ void memcg_put_cache_ids(void) | |||
349 | DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); | 345 | DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); |
350 | EXPORT_SYMBOL(memcg_kmem_enabled_key); | 346 | EXPORT_SYMBOL(memcg_kmem_enabled_key); |
351 | 347 | ||
352 | #endif /* CONFIG_MEMCG_KMEM */ | 348 | #endif /* !CONFIG_SLOB */ |
353 | 349 | ||
354 | static struct mem_cgroup_per_zone * | 350 | static struct mem_cgroup_per_zone * |
355 | mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) | 351 | mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) |
@@ -370,13 +366,6 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) | |||
370 | * | 366 | * |
371 | * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup | 367 | * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup |
372 | * is returned. | 368 | * is returned. |
373 | * | ||
374 | * XXX: The above description of behavior on the default hierarchy isn't | ||
375 | * strictly true yet as replace_page_cache_page() can modify the | ||
376 | * association before @page is released even on the default hierarchy; | ||
377 | * however, the current and planned usages don't mix the the two functions | ||
378 | * and replace_page_cache_page() will soon be updated to make the invariant | ||
379 | * actually true. | ||
380 | */ | 369 | */ |
381 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) | 370 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) |
382 | { | 371 | { |
@@ -896,17 +885,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
896 | if (css == &root->css) | 885 | if (css == &root->css) |
897 | break; | 886 | break; |
898 | 887 | ||
899 | if (css_tryget(css)) { | 888 | if (css_tryget(css)) |
900 | /* | 889 | break; |
901 | * Make sure the memcg is initialized: | ||
902 | * mem_cgroup_css_online() orders the the | ||
903 | * initialization against setting the flag. | ||
904 | */ | ||
905 | if (smp_load_acquire(&memcg->initialized)) | ||
906 | break; | ||
907 | |||
908 | css_put(css); | ||
909 | } | ||
910 | 890 | ||
911 | memcg = NULL; | 891 | memcg = NULL; |
912 | } | 892 | } |
@@ -1233,7 +1213,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1233 | pr_cont(":"); | 1213 | pr_cont(":"); |
1234 | 1214 | ||
1235 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 1215 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
1236 | if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) | 1216 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
1237 | continue; | 1217 | continue; |
1238 | pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], | 1218 | pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], |
1239 | K(mem_cgroup_read_stat(iter, i))); | 1219 | K(mem_cgroup_read_stat(iter, i))); |
@@ -1272,9 +1252,12 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) | |||
1272 | limit = memcg->memory.limit; | 1252 | limit = memcg->memory.limit; |
1273 | if (mem_cgroup_swappiness(memcg)) { | 1253 | if (mem_cgroup_swappiness(memcg)) { |
1274 | unsigned long memsw_limit; | 1254 | unsigned long memsw_limit; |
1255 | unsigned long swap_limit; | ||
1275 | 1256 | ||
1276 | memsw_limit = memcg->memsw.limit; | 1257 | memsw_limit = memcg->memsw.limit; |
1277 | limit = min(limit + total_swap_pages, memsw_limit); | 1258 | swap_limit = memcg->swap.limit; |
1259 | swap_limit = min(swap_limit, (unsigned long)total_swap_pages); | ||
1260 | limit = min(limit + swap_limit, memsw_limit); | ||
1278 | } | 1261 | } |
1279 | return limit; | 1262 | return limit; |
1280 | } | 1263 | } |
@@ -2203,7 +2186,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, | |||
2203 | unlock_page_lru(page, isolated); | 2186 | unlock_page_lru(page, isolated); |
2204 | } | 2187 | } |
2205 | 2188 | ||
2206 | #ifdef CONFIG_MEMCG_KMEM | 2189 | #ifndef CONFIG_SLOB |
2207 | static int memcg_alloc_cache_id(void) | 2190 | static int memcg_alloc_cache_id(void) |
2208 | { | 2191 | { |
2209 | int id, size; | 2192 | int id, size; |
@@ -2378,16 +2361,17 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | |||
2378 | struct page_counter *counter; | 2361 | struct page_counter *counter; |
2379 | int ret; | 2362 | int ret; |
2380 | 2363 | ||
2381 | if (!memcg_kmem_is_active(memcg)) | 2364 | if (!memcg_kmem_online(memcg)) |
2382 | return 0; | 2365 | return 0; |
2383 | 2366 | ||
2384 | if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) | ||
2385 | return -ENOMEM; | ||
2386 | |||
2387 | ret = try_charge(memcg, gfp, nr_pages); | 2367 | ret = try_charge(memcg, gfp, nr_pages); |
2388 | if (ret) { | 2368 | if (ret) |
2389 | page_counter_uncharge(&memcg->kmem, nr_pages); | ||
2390 | return ret; | 2369 | return ret; |
2370 | |||
2371 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && | ||
2372 | !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { | ||
2373 | cancel_charge(memcg, nr_pages); | ||
2374 | return -ENOMEM; | ||
2391 | } | 2375 | } |
2392 | 2376 | ||
2393 | page->mem_cgroup = memcg; | 2377 | page->mem_cgroup = memcg; |
@@ -2416,7 +2400,9 @@ void __memcg_kmem_uncharge(struct page *page, int order) | |||
2416 | 2400 | ||
2417 | VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); | 2401 | VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); |
2418 | 2402 | ||
2419 | page_counter_uncharge(&memcg->kmem, nr_pages); | 2403 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
2404 | page_counter_uncharge(&memcg->kmem, nr_pages); | ||
2405 | |||
2420 | page_counter_uncharge(&memcg->memory, nr_pages); | 2406 | page_counter_uncharge(&memcg->memory, nr_pages); |
2421 | if (do_memsw_account()) | 2407 | if (do_memsw_account()) |
2422 | page_counter_uncharge(&memcg->memsw, nr_pages); | 2408 | page_counter_uncharge(&memcg->memsw, nr_pages); |
@@ -2424,7 +2410,7 @@ void __memcg_kmem_uncharge(struct page *page, int order) | |||
2424 | page->mem_cgroup = NULL; | 2410 | page->mem_cgroup = NULL; |
2425 | css_put_many(&memcg->css, nr_pages); | 2411 | css_put_many(&memcg->css, nr_pages); |
2426 | } | 2412 | } |
2427 | #endif /* CONFIG_MEMCG_KMEM */ | 2413 | #endif /* !CONFIG_SLOB */ |
2428 | 2414 | ||
2429 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 2415 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
2430 | 2416 | ||
@@ -2684,14 +2670,6 @@ static inline bool memcg_has_children(struct mem_cgroup *memcg) | |||
2684 | { | 2670 | { |
2685 | bool ret; | 2671 | bool ret; |
2686 | 2672 | ||
2687 | /* | ||
2688 | * The lock does not prevent addition or deletion of children, but | ||
2689 | * it prevents a new child from being initialized based on this | ||
2690 | * parent in css_online(), so it's enough to decide whether | ||
2691 | * hierarchically inherited attributes can still be changed or not. | ||
2692 | */ | ||
2693 | lockdep_assert_held(&memcg_create_mutex); | ||
2694 | |||
2695 | rcu_read_lock(); | 2673 | rcu_read_lock(); |
2696 | ret = css_next_child(NULL, &memcg->css); | 2674 | ret = css_next_child(NULL, &memcg->css); |
2697 | rcu_read_unlock(); | 2675 | rcu_read_unlock(); |
@@ -2754,10 +2732,8 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, | |||
2754 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 2732 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
2755 | struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); | 2733 | struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); |
2756 | 2734 | ||
2757 | mutex_lock(&memcg_create_mutex); | ||
2758 | |||
2759 | if (memcg->use_hierarchy == val) | 2735 | if (memcg->use_hierarchy == val) |
2760 | goto out; | 2736 | return 0; |
2761 | 2737 | ||
2762 | /* | 2738 | /* |
2763 | * If parent's use_hierarchy is set, we can't make any modifications | 2739 | * If parent's use_hierarchy is set, we can't make any modifications |
@@ -2776,9 +2752,6 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, | |||
2776 | } else | 2752 | } else |
2777 | retval = -EINVAL; | 2753 | retval = -EINVAL; |
2778 | 2754 | ||
2779 | out: | ||
2780 | mutex_unlock(&memcg_create_mutex); | ||
2781 | |||
2782 | return retval; | 2755 | return retval; |
2783 | } | 2756 | } |
2784 | 2757 | ||
@@ -2794,6 +2767,18 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, | |||
2794 | return val; | 2767 | return val; |
2795 | } | 2768 | } |
2796 | 2769 | ||
2770 | static unsigned long tree_events(struct mem_cgroup *memcg, | ||
2771 | enum mem_cgroup_events_index idx) | ||
2772 | { | ||
2773 | struct mem_cgroup *iter; | ||
2774 | unsigned long val = 0; | ||
2775 | |||
2776 | for_each_mem_cgroup_tree(iter, memcg) | ||
2777 | val += mem_cgroup_read_events(iter, idx); | ||
2778 | |||
2779 | return val; | ||
2780 | } | ||
2781 | |||
2797 | static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) | 2782 | static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) |
2798 | { | 2783 | { |
2799 | unsigned long val; | 2784 | unsigned long val; |
@@ -2836,6 +2821,9 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
2836 | case _KMEM: | 2821 | case _KMEM: |
2837 | counter = &memcg->kmem; | 2822 | counter = &memcg->kmem; |
2838 | break; | 2823 | break; |
2824 | case _TCP: | ||
2825 | counter = &memcg->tcpmem; | ||
2826 | break; | ||
2839 | default: | 2827 | default: |
2840 | BUG(); | 2828 | BUG(); |
2841 | } | 2829 | } |
@@ -2860,103 +2848,180 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
2860 | } | 2848 | } |
2861 | } | 2849 | } |
2862 | 2850 | ||
2863 | #ifdef CONFIG_MEMCG_KMEM | 2851 | #ifndef CONFIG_SLOB |
2864 | static int memcg_activate_kmem(struct mem_cgroup *memcg, | 2852 | static int memcg_online_kmem(struct mem_cgroup *memcg) |
2865 | unsigned long nr_pages) | ||
2866 | { | 2853 | { |
2867 | int err = 0; | ||
2868 | int memcg_id; | 2854 | int memcg_id; |
2869 | 2855 | ||
2870 | BUG_ON(memcg->kmemcg_id >= 0); | 2856 | BUG_ON(memcg->kmemcg_id >= 0); |
2871 | BUG_ON(memcg->kmem_acct_activated); | 2857 | BUG_ON(memcg->kmem_state); |
2872 | BUG_ON(memcg->kmem_acct_active); | ||
2873 | |||
2874 | /* | ||
2875 | * For simplicity, we won't allow this to be disabled. It also can't | ||
2876 | * be changed if the cgroup has children already, or if tasks had | ||
2877 | * already joined. | ||
2878 | * | ||
2879 | * If tasks join before we set the limit, a person looking at | ||
2880 | * kmem.usage_in_bytes will have no way to determine when it took | ||
2881 | * place, which makes the value quite meaningless. | ||
2882 | * | ||
2883 | * After it first became limited, changes in the value of the limit are | ||
2884 | * of course permitted. | ||
2885 | */ | ||
2886 | mutex_lock(&memcg_create_mutex); | ||
2887 | if (cgroup_is_populated(memcg->css.cgroup) || | ||
2888 | (memcg->use_hierarchy && memcg_has_children(memcg))) | ||
2889 | err = -EBUSY; | ||
2890 | mutex_unlock(&memcg_create_mutex); | ||
2891 | if (err) | ||
2892 | goto out; | ||
2893 | 2858 | ||
2894 | memcg_id = memcg_alloc_cache_id(); | 2859 | memcg_id = memcg_alloc_cache_id(); |
2895 | if (memcg_id < 0) { | 2860 | if (memcg_id < 0) |
2896 | err = memcg_id; | 2861 | return memcg_id; |
2897 | goto out; | ||
2898 | } | ||
2899 | |||
2900 | /* | ||
2901 | * We couldn't have accounted to this cgroup, because it hasn't got | ||
2902 | * activated yet, so this should succeed. | ||
2903 | */ | ||
2904 | err = page_counter_limit(&memcg->kmem, nr_pages); | ||
2905 | VM_BUG_ON(err); | ||
2906 | 2862 | ||
2907 | static_branch_inc(&memcg_kmem_enabled_key); | 2863 | static_branch_inc(&memcg_kmem_enabled_key); |
2908 | /* | 2864 | /* |
2909 | * A memory cgroup is considered kmem-active as soon as it gets | 2865 | * A memory cgroup is considered kmem-online as soon as it gets |
2910 | * kmemcg_id. Setting the id after enabling static branching will | 2866 | * kmemcg_id. Setting the id after enabling static branching will |
2911 | * guarantee no one starts accounting before all call sites are | 2867 | * guarantee no one starts accounting before all call sites are |
2912 | * patched. | 2868 | * patched. |
2913 | */ | 2869 | */ |
2914 | memcg->kmemcg_id = memcg_id; | 2870 | memcg->kmemcg_id = memcg_id; |
2915 | memcg->kmem_acct_activated = true; | 2871 | memcg->kmem_state = KMEM_ONLINE; |
2916 | memcg->kmem_acct_active = true; | 2872 | |
2917 | out: | 2873 | return 0; |
2918 | return err; | ||
2919 | } | 2874 | } |
2920 | 2875 | ||
2921 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | 2876 | static int memcg_propagate_kmem(struct mem_cgroup *parent, |
2922 | unsigned long limit) | 2877 | struct mem_cgroup *memcg) |
2923 | { | 2878 | { |
2924 | int ret; | 2879 | int ret = 0; |
2925 | 2880 | ||
2926 | mutex_lock(&memcg_limit_mutex); | 2881 | mutex_lock(&memcg_limit_mutex); |
2927 | if (!memcg_kmem_is_active(memcg)) | 2882 | /* |
2928 | ret = memcg_activate_kmem(memcg, limit); | 2883 | * If the parent cgroup is not kmem-online now, it cannot be |
2929 | else | 2884 | * onlined after this point, because it has at least one child |
2930 | ret = page_counter_limit(&memcg->kmem, limit); | 2885 | * already. |
2886 | */ | ||
2887 | if (memcg_kmem_online(parent) || | ||
2888 | (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nokmem)) | ||
2889 | ret = memcg_online_kmem(memcg); | ||
2931 | mutex_unlock(&memcg_limit_mutex); | 2890 | mutex_unlock(&memcg_limit_mutex); |
2932 | return ret; | 2891 | return ret; |
2933 | } | 2892 | } |
2934 | 2893 | ||
2935 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) | 2894 | static void memcg_offline_kmem(struct mem_cgroup *memcg) |
2936 | { | 2895 | { |
2937 | int ret = 0; | 2896 | struct cgroup_subsys_state *css; |
2938 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | 2897 | struct mem_cgroup *parent, *child; |
2898 | int kmemcg_id; | ||
2899 | |||
2900 | if (memcg->kmem_state != KMEM_ONLINE) | ||
2901 | return; | ||
2902 | /* | ||
2903 | * Clear the online state before clearing memcg_caches array | ||
2904 | * entries. The slab_mutex in memcg_deactivate_kmem_caches() | ||
2905 | * guarantees that no cache will be created for this cgroup | ||
2906 | * after we are done (see memcg_create_kmem_cache()). | ||
2907 | */ | ||
2908 | memcg->kmem_state = KMEM_ALLOCATED; | ||
2939 | 2909 | ||
2910 | memcg_deactivate_kmem_caches(memcg); | ||
2911 | |||
2912 | kmemcg_id = memcg->kmemcg_id; | ||
2913 | BUG_ON(kmemcg_id < 0); | ||
2914 | |||
2915 | parent = parent_mem_cgroup(memcg); | ||
2940 | if (!parent) | 2916 | if (!parent) |
2941 | return 0; | 2917 | parent = root_mem_cgroup; |
2942 | 2918 | ||
2943 | mutex_lock(&memcg_limit_mutex); | ||
2944 | /* | 2919 | /* |
2945 | * If the parent cgroup is not kmem-active now, it cannot be activated | 2920 | * Change kmemcg_id of this cgroup and all its descendants to the |
2946 | * after this point, because it has at least one child already. | 2921 | * parent's id, and then move all entries from this cgroup's list_lrus |
2922 | * to ones of the parent. After we have finished, all list_lrus | ||
2923 | * corresponding to this cgroup are guaranteed to remain empty. The | ||
2924 | * ordering is imposed by list_lru_node->lock taken by | ||
2925 | * memcg_drain_all_list_lrus(). | ||
2947 | */ | 2926 | */ |
2948 | if (memcg_kmem_is_active(parent)) | 2927 | css_for_each_descendant_pre(css, &memcg->css) { |
2949 | ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); | 2928 | child = mem_cgroup_from_css(css); |
2950 | mutex_unlock(&memcg_limit_mutex); | 2929 | BUG_ON(child->kmemcg_id != kmemcg_id); |
2951 | return ret; | 2930 | child->kmemcg_id = parent->kmemcg_id; |
2931 | if (!memcg->use_hierarchy) | ||
2932 | break; | ||
2933 | } | ||
2934 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); | ||
2935 | |||
2936 | memcg_free_cache_id(kmemcg_id); | ||
2937 | } | ||
2938 | |||
2939 | static void memcg_free_kmem(struct mem_cgroup *memcg) | ||
2940 | { | ||
2941 | /* css_alloc() failed, offlining didn't happen */ | ||
2942 | if (unlikely(memcg->kmem_state == KMEM_ONLINE)) | ||
2943 | memcg_offline_kmem(memcg); | ||
2944 | |||
2945 | if (memcg->kmem_state == KMEM_ALLOCATED) { | ||
2946 | memcg_destroy_kmem_caches(memcg); | ||
2947 | static_branch_dec(&memcg_kmem_enabled_key); | ||
2948 | WARN_ON(page_counter_read(&memcg->kmem)); | ||
2949 | } | ||
2952 | } | 2950 | } |
2953 | #else | 2951 | #else |
2952 | static int memcg_propagate_kmem(struct mem_cgroup *parent, struct mem_cgroup *memcg) | ||
2953 | { | ||
2954 | return 0; | ||
2955 | } | ||
2956 | static int memcg_online_kmem(struct mem_cgroup *memcg) | ||
2957 | { | ||
2958 | return 0; | ||
2959 | } | ||
2960 | static void memcg_offline_kmem(struct mem_cgroup *memcg) | ||
2961 | { | ||
2962 | } | ||
2963 | static void memcg_free_kmem(struct mem_cgroup *memcg) | ||
2964 | { | ||
2965 | } | ||
2966 | #endif /* !CONFIG_SLOB */ | ||
2967 | |||
2954 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | 2968 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, |
2955 | unsigned long limit) | 2969 | unsigned long limit) |
2956 | { | 2970 | { |
2957 | return -EINVAL; | 2971 | int ret = 0; |
2972 | |||
2973 | mutex_lock(&memcg_limit_mutex); | ||
2974 | /* Top-level cgroup doesn't propagate from root */ | ||
2975 | if (!memcg_kmem_online(memcg)) { | ||
2976 | if (cgroup_is_populated(memcg->css.cgroup) || | ||
2977 | (memcg->use_hierarchy && memcg_has_children(memcg))) | ||
2978 | ret = -EBUSY; | ||
2979 | if (ret) | ||
2980 | goto out; | ||
2981 | ret = memcg_online_kmem(memcg); | ||
2982 | if (ret) | ||
2983 | goto out; | ||
2984 | } | ||
2985 | ret = page_counter_limit(&memcg->kmem, limit); | ||
2986 | out: | ||
2987 | mutex_unlock(&memcg_limit_mutex); | ||
2988 | return ret; | ||
2989 | } | ||
2990 | |||
2991 | static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) | ||
2992 | { | ||
2993 | int ret; | ||
2994 | |||
2995 | mutex_lock(&memcg_limit_mutex); | ||
2996 | |||
2997 | ret = page_counter_limit(&memcg->tcpmem, limit); | ||
2998 | if (ret) | ||
2999 | goto out; | ||
3000 | |||
3001 | if (!memcg->tcpmem_active) { | ||
3002 | /* | ||
3003 | * The active flag needs to be written after the static_key | ||
3004 | * update. This is what guarantees that the socket activation | ||
3005 | * function is the last one to run. See sock_update_memcg() for | ||
3006 | * details, and note that we don't mark any socket as belonging | ||
3007 | * to this memcg until that flag is up. | ||
3008 | * | ||
3009 | * We need to do this, because static_keys will span multiple | ||
3010 | * sites, but we can't control their order. If we mark a socket | ||
3011 | * as accounted, but the accounting functions are not patched in | ||
3012 | * yet, we'll lose accounting. | ||
3013 | * | ||
3014 | * We never race with the readers in sock_update_memcg(), | ||
3015 | * because when this value change, the code to process it is not | ||
3016 | * patched in yet. | ||
3017 | */ | ||
3018 | static_branch_inc(&memcg_sockets_enabled_key); | ||
3019 | memcg->tcpmem_active = true; | ||
3020 | } | ||
3021 | out: | ||
3022 | mutex_unlock(&memcg_limit_mutex); | ||
3023 | return ret; | ||
2958 | } | 3024 | } |
2959 | #endif /* CONFIG_MEMCG_KMEM */ | ||
2960 | 3025 | ||
2961 | /* | 3026 | /* |
2962 | * The user of this function is... | 3027 | * The user of this function is... |
@@ -2990,6 +3055,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of, | |||
2990 | case _KMEM: | 3055 | case _KMEM: |
2991 | ret = memcg_update_kmem_limit(memcg, nr_pages); | 3056 | ret = memcg_update_kmem_limit(memcg, nr_pages); |
2992 | break; | 3057 | break; |
3058 | case _TCP: | ||
3059 | ret = memcg_update_tcp_limit(memcg, nr_pages); | ||
3060 | break; | ||
2993 | } | 3061 | } |
2994 | break; | 3062 | break; |
2995 | case RES_SOFT_LIMIT: | 3063 | case RES_SOFT_LIMIT: |
@@ -3016,6 +3084,9 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, | |||
3016 | case _KMEM: | 3084 | case _KMEM: |
3017 | counter = &memcg->kmem; | 3085 | counter = &memcg->kmem; |
3018 | break; | 3086 | break; |
3087 | case _TCP: | ||
3088 | counter = &memcg->tcpmem; | ||
3089 | break; | ||
3019 | default: | 3090 | default: |
3020 | BUG(); | 3091 | BUG(); |
3021 | } | 3092 | } |
@@ -3582,88 +3653,6 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, | |||
3582 | return 0; | 3653 | return 0; |
3583 | } | 3654 | } |
3584 | 3655 | ||
3585 | #ifdef CONFIG_MEMCG_KMEM | ||
3586 | static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | ||
3587 | { | ||
3588 | int ret; | ||
3589 | |||
3590 | ret = memcg_propagate_kmem(memcg); | ||
3591 | if (ret) | ||
3592 | return ret; | ||
3593 | |||
3594 | return tcp_init_cgroup(memcg, ss); | ||
3595 | } | ||
3596 | |||
3597 | static void memcg_deactivate_kmem(struct mem_cgroup *memcg) | ||
3598 | { | ||
3599 | struct cgroup_subsys_state *css; | ||
3600 | struct mem_cgroup *parent, *child; | ||
3601 | int kmemcg_id; | ||
3602 | |||
3603 | if (!memcg->kmem_acct_active) | ||
3604 | return; | ||
3605 | |||
3606 | /* | ||
3607 | * Clear the 'active' flag before clearing memcg_caches arrays entries. | ||
3608 | * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it | ||
3609 | * guarantees no cache will be created for this cgroup after we are | ||
3610 | * done (see memcg_create_kmem_cache()). | ||
3611 | */ | ||
3612 | memcg->kmem_acct_active = false; | ||
3613 | |||
3614 | memcg_deactivate_kmem_caches(memcg); | ||
3615 | |||
3616 | kmemcg_id = memcg->kmemcg_id; | ||
3617 | BUG_ON(kmemcg_id < 0); | ||
3618 | |||
3619 | parent = parent_mem_cgroup(memcg); | ||
3620 | if (!parent) | ||
3621 | parent = root_mem_cgroup; | ||
3622 | |||
3623 | /* | ||
3624 | * Change kmemcg_id of this cgroup and all its descendants to the | ||
3625 | * parent's id, and then move all entries from this cgroup's list_lrus | ||
3626 | * to ones of the parent. After we have finished, all list_lrus | ||
3627 | * corresponding to this cgroup are guaranteed to remain empty. The | ||
3628 | * ordering is imposed by list_lru_node->lock taken by | ||
3629 | * memcg_drain_all_list_lrus(). | ||
3630 | */ | ||
3631 | css_for_each_descendant_pre(css, &memcg->css) { | ||
3632 | child = mem_cgroup_from_css(css); | ||
3633 | BUG_ON(child->kmemcg_id != kmemcg_id); | ||
3634 | child->kmemcg_id = parent->kmemcg_id; | ||
3635 | if (!memcg->use_hierarchy) | ||
3636 | break; | ||
3637 | } | ||
3638 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); | ||
3639 | |||
3640 | memcg_free_cache_id(kmemcg_id); | ||
3641 | } | ||
3642 | |||
3643 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) | ||
3644 | { | ||
3645 | if (memcg->kmem_acct_activated) { | ||
3646 | memcg_destroy_kmem_caches(memcg); | ||
3647 | static_branch_dec(&memcg_kmem_enabled_key); | ||
3648 | WARN_ON(page_counter_read(&memcg->kmem)); | ||
3649 | } | ||
3650 | tcp_destroy_cgroup(memcg); | ||
3651 | } | ||
3652 | #else | ||
3653 | static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | ||
3654 | { | ||
3655 | return 0; | ||
3656 | } | ||
3657 | |||
3658 | static void memcg_deactivate_kmem(struct mem_cgroup *memcg) | ||
3659 | { | ||
3660 | } | ||
3661 | |||
3662 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) | ||
3663 | { | ||
3664 | } | ||
3665 | #endif | ||
3666 | |||
3667 | #ifdef CONFIG_CGROUP_WRITEBACK | 3656 | #ifdef CONFIG_CGROUP_WRITEBACK |
3668 | 3657 | ||
3669 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) | 3658 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) |
@@ -4051,7 +4040,6 @@ static struct cftype mem_cgroup_legacy_files[] = { | |||
4051 | .seq_show = memcg_numa_stat_show, | 4040 | .seq_show = memcg_numa_stat_show, |
4052 | }, | 4041 | }, |
4053 | #endif | 4042 | #endif |
4054 | #ifdef CONFIG_MEMCG_KMEM | ||
4055 | { | 4043 | { |
4056 | .name = "kmem.limit_in_bytes", | 4044 | .name = "kmem.limit_in_bytes", |
4057 | .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), | 4045 | .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), |
@@ -4084,7 +4072,29 @@ static struct cftype mem_cgroup_legacy_files[] = { | |||
4084 | .seq_show = memcg_slab_show, | 4072 | .seq_show = memcg_slab_show, |
4085 | }, | 4073 | }, |
4086 | #endif | 4074 | #endif |
4087 | #endif | 4075 | { |
4076 | .name = "kmem.tcp.limit_in_bytes", | ||
4077 | .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), | ||
4078 | .write = mem_cgroup_write, | ||
4079 | .read_u64 = mem_cgroup_read_u64, | ||
4080 | }, | ||
4081 | { | ||
4082 | .name = "kmem.tcp.usage_in_bytes", | ||
4083 | .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), | ||
4084 | .read_u64 = mem_cgroup_read_u64, | ||
4085 | }, | ||
4086 | { | ||
4087 | .name = "kmem.tcp.failcnt", | ||
4088 | .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), | ||
4089 | .write = mem_cgroup_reset, | ||
4090 | .read_u64 = mem_cgroup_read_u64, | ||
4091 | }, | ||
4092 | { | ||
4093 | .name = "kmem.tcp.max_usage_in_bytes", | ||
4094 | .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), | ||
4095 | .write = mem_cgroup_reset, | ||
4096 | .read_u64 = mem_cgroup_read_u64, | ||
4097 | }, | ||
4088 | { }, /* terminate */ | 4098 | { }, /* terminate */ |
4089 | }; | 4099 | }; |
4090 | 4100 | ||
@@ -4123,147 +4133,92 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
4123 | kfree(memcg->nodeinfo[node]); | 4133 | kfree(memcg->nodeinfo[node]); |
4124 | } | 4134 | } |
4125 | 4135 | ||
4126 | static struct mem_cgroup *mem_cgroup_alloc(void) | 4136 | static void mem_cgroup_free(struct mem_cgroup *memcg) |
4127 | { | ||
4128 | struct mem_cgroup *memcg; | ||
4129 | size_t size; | ||
4130 | |||
4131 | size = sizeof(struct mem_cgroup); | ||
4132 | size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); | ||
4133 | |||
4134 | memcg = kzalloc(size, GFP_KERNEL); | ||
4135 | if (!memcg) | ||
4136 | return NULL; | ||
4137 | |||
4138 | memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | ||
4139 | if (!memcg->stat) | ||
4140 | goto out_free; | ||
4141 | |||
4142 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) | ||
4143 | goto out_free_stat; | ||
4144 | |||
4145 | return memcg; | ||
4146 | |||
4147 | out_free_stat: | ||
4148 | free_percpu(memcg->stat); | ||
4149 | out_free: | ||
4150 | kfree(memcg); | ||
4151 | return NULL; | ||
4152 | } | ||
4153 | |||
4154 | /* | ||
4155 | * At destroying mem_cgroup, references from swap_cgroup can remain. | ||
4156 | * (scanning all at force_empty is too costly...) | ||
4157 | * | ||
4158 | * Instead of clearing all references at force_empty, we remember | ||
4159 | * the number of reference from swap_cgroup and free mem_cgroup when | ||
4160 | * it goes down to 0. | ||
4161 | * | ||
4162 | * Removal of cgroup itself succeeds regardless of refs from swap. | ||
4163 | */ | ||
4164 | |||
4165 | static void __mem_cgroup_free(struct mem_cgroup *memcg) | ||
4166 | { | 4137 | { |
4167 | int node; | 4138 | int node; |
4168 | 4139 | ||
4169 | cancel_work_sync(&memcg->high_work); | 4140 | memcg_wb_domain_exit(memcg); |
4170 | |||
4171 | mem_cgroup_remove_from_trees(memcg); | ||
4172 | |||
4173 | for_each_node(node) | 4141 | for_each_node(node) |
4174 | free_mem_cgroup_per_zone_info(memcg, node); | 4142 | free_mem_cgroup_per_zone_info(memcg, node); |
4175 | |||
4176 | free_percpu(memcg->stat); | 4143 | free_percpu(memcg->stat); |
4177 | memcg_wb_domain_exit(memcg); | ||
4178 | kfree(memcg); | 4144 | kfree(memcg); |
4179 | } | 4145 | } |
4180 | 4146 | ||
4181 | static struct cgroup_subsys_state * __ref | 4147 | static struct mem_cgroup *mem_cgroup_alloc(void) |
4182 | mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | ||
4183 | { | 4148 | { |
4184 | struct mem_cgroup *memcg; | 4149 | struct mem_cgroup *memcg; |
4185 | long error = -ENOMEM; | 4150 | size_t size; |
4186 | int node; | 4151 | int node; |
4187 | 4152 | ||
4188 | memcg = mem_cgroup_alloc(); | 4153 | size = sizeof(struct mem_cgroup); |
4154 | size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); | ||
4155 | |||
4156 | memcg = kzalloc(size, GFP_KERNEL); | ||
4189 | if (!memcg) | 4157 | if (!memcg) |
4190 | return ERR_PTR(error); | 4158 | return NULL; |
4159 | |||
4160 | memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | ||
4161 | if (!memcg->stat) | ||
4162 | goto fail; | ||
4191 | 4163 | ||
4192 | for_each_node(node) | 4164 | for_each_node(node) |
4193 | if (alloc_mem_cgroup_per_zone_info(memcg, node)) | 4165 | if (alloc_mem_cgroup_per_zone_info(memcg, node)) |
4194 | goto free_out; | 4166 | goto fail; |
4195 | 4167 | ||
4196 | /* root ? */ | 4168 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) |
4197 | if (parent_css == NULL) { | 4169 | goto fail; |
4198 | root_mem_cgroup = memcg; | ||
4199 | page_counter_init(&memcg->memory, NULL); | ||
4200 | memcg->high = PAGE_COUNTER_MAX; | ||
4201 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
4202 | page_counter_init(&memcg->memsw, NULL); | ||
4203 | page_counter_init(&memcg->kmem, NULL); | ||
4204 | } | ||
4205 | 4170 | ||
4206 | INIT_WORK(&memcg->high_work, high_work_func); | 4171 | INIT_WORK(&memcg->high_work, high_work_func); |
4207 | memcg->last_scanned_node = MAX_NUMNODES; | 4172 | memcg->last_scanned_node = MAX_NUMNODES; |
4208 | INIT_LIST_HEAD(&memcg->oom_notify); | 4173 | INIT_LIST_HEAD(&memcg->oom_notify); |
4209 | memcg->move_charge_at_immigrate = 0; | ||
4210 | mutex_init(&memcg->thresholds_lock); | 4174 | mutex_init(&memcg->thresholds_lock); |
4211 | spin_lock_init(&memcg->move_lock); | 4175 | spin_lock_init(&memcg->move_lock); |
4212 | vmpressure_init(&memcg->vmpressure); | 4176 | vmpressure_init(&memcg->vmpressure); |
4213 | INIT_LIST_HEAD(&memcg->event_list); | 4177 | INIT_LIST_HEAD(&memcg->event_list); |
4214 | spin_lock_init(&memcg->event_list_lock); | 4178 | spin_lock_init(&memcg->event_list_lock); |
4215 | #ifdef CONFIG_MEMCG_KMEM | 4179 | memcg->socket_pressure = jiffies; |
4180 | #ifndef CONFIG_SLOB | ||
4216 | memcg->kmemcg_id = -1; | 4181 | memcg->kmemcg_id = -1; |
4217 | #endif | 4182 | #endif |
4218 | #ifdef CONFIG_CGROUP_WRITEBACK | 4183 | #ifdef CONFIG_CGROUP_WRITEBACK |
4219 | INIT_LIST_HEAD(&memcg->cgwb_list); | 4184 | INIT_LIST_HEAD(&memcg->cgwb_list); |
4220 | #endif | 4185 | #endif |
4221 | #ifdef CONFIG_INET | 4186 | return memcg; |
4222 | memcg->socket_pressure = jiffies; | 4187 | fail: |
4223 | #endif | 4188 | mem_cgroup_free(memcg); |
4224 | return &memcg->css; | 4189 | return NULL; |
4225 | |||
4226 | free_out: | ||
4227 | __mem_cgroup_free(memcg); | ||
4228 | return ERR_PTR(error); | ||
4229 | } | 4190 | } |
4230 | 4191 | ||
4231 | static int | 4192 | static struct cgroup_subsys_state * __ref |
4232 | mem_cgroup_css_online(struct cgroup_subsys_state *css) | 4193 | mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
4233 | { | 4194 | { |
4234 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 4195 | struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); |
4235 | struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); | 4196 | struct mem_cgroup *memcg; |
4236 | int ret; | 4197 | long error = -ENOMEM; |
4237 | |||
4238 | if (css->id > MEM_CGROUP_ID_MAX) | ||
4239 | return -ENOSPC; | ||
4240 | |||
4241 | if (!parent) | ||
4242 | return 0; | ||
4243 | |||
4244 | mutex_lock(&memcg_create_mutex); | ||
4245 | 4198 | ||
4246 | memcg->use_hierarchy = parent->use_hierarchy; | 4199 | memcg = mem_cgroup_alloc(); |
4247 | memcg->oom_kill_disable = parent->oom_kill_disable; | 4200 | if (!memcg) |
4248 | memcg->swappiness = mem_cgroup_swappiness(parent); | 4201 | return ERR_PTR(error); |
4249 | 4202 | ||
4250 | if (parent->use_hierarchy) { | 4203 | memcg->high = PAGE_COUNTER_MAX; |
4204 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
4205 | if (parent) { | ||
4206 | memcg->swappiness = mem_cgroup_swappiness(parent); | ||
4207 | memcg->oom_kill_disable = parent->oom_kill_disable; | ||
4208 | } | ||
4209 | if (parent && parent->use_hierarchy) { | ||
4210 | memcg->use_hierarchy = true; | ||
4251 | page_counter_init(&memcg->memory, &parent->memory); | 4211 | page_counter_init(&memcg->memory, &parent->memory); |
4252 | memcg->high = PAGE_COUNTER_MAX; | 4212 | page_counter_init(&memcg->swap, &parent->swap); |
4253 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
4254 | page_counter_init(&memcg->memsw, &parent->memsw); | 4213 | page_counter_init(&memcg->memsw, &parent->memsw); |
4255 | page_counter_init(&memcg->kmem, &parent->kmem); | 4214 | page_counter_init(&memcg->kmem, &parent->kmem); |
4256 | 4215 | page_counter_init(&memcg->tcpmem, &parent->tcpmem); | |
4257 | /* | ||
4258 | * No need to take a reference to the parent because cgroup | ||
4259 | * core guarantees its existence. | ||
4260 | */ | ||
4261 | } else { | 4216 | } else { |
4262 | page_counter_init(&memcg->memory, NULL); | 4217 | page_counter_init(&memcg->memory, NULL); |
4263 | memcg->high = PAGE_COUNTER_MAX; | 4218 | page_counter_init(&memcg->swap, NULL); |
4264 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
4265 | page_counter_init(&memcg->memsw, NULL); | 4219 | page_counter_init(&memcg->memsw, NULL); |
4266 | page_counter_init(&memcg->kmem, NULL); | 4220 | page_counter_init(&memcg->kmem, NULL); |
4221 | page_counter_init(&memcg->tcpmem, NULL); | ||
4267 | /* | 4222 | /* |
4268 | * Deeper hierachy with use_hierarchy == false doesn't make | 4223 | * Deeper hierachy with use_hierarchy == false doesn't make |
4269 | * much sense so let cgroup subsystem know about this | 4224 | * much sense so let cgroup subsystem know about this |
@@ -4272,23 +4227,31 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
4272 | if (parent != root_mem_cgroup) | 4227 | if (parent != root_mem_cgroup) |
4273 | memory_cgrp_subsys.broken_hierarchy = true; | 4228 | memory_cgrp_subsys.broken_hierarchy = true; |
4274 | } | 4229 | } |
4275 | mutex_unlock(&memcg_create_mutex); | ||
4276 | 4230 | ||
4277 | ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); | 4231 | /* The following stuff does not apply to the root */ |
4278 | if (ret) | 4232 | if (!parent) { |
4279 | return ret; | 4233 | root_mem_cgroup = memcg; |
4234 | return &memcg->css; | ||
4235 | } | ||
4236 | |||
4237 | error = memcg_propagate_kmem(parent, memcg); | ||
4238 | if (error) | ||
4239 | goto fail; | ||
4280 | 4240 | ||
4281 | #ifdef CONFIG_INET | ||
4282 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) | 4241 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) |
4283 | static_branch_inc(&memcg_sockets_enabled_key); | 4242 | static_branch_inc(&memcg_sockets_enabled_key); |
4284 | #endif | ||
4285 | 4243 | ||
4286 | /* | 4244 | return &memcg->css; |
4287 | * Make sure the memcg is initialized: mem_cgroup_iter() | 4245 | fail: |
4288 | * orders reading memcg->initialized against its callers | 4246 | mem_cgroup_free(memcg); |
4289 | * reading the memcg members. | 4247 | return NULL; |
4290 | */ | 4248 | } |
4291 | smp_store_release(&memcg->initialized, 1); | 4249 | |
4250 | static int | ||
4251 | mem_cgroup_css_online(struct cgroup_subsys_state *css) | ||
4252 | { | ||
4253 | if (css->id > MEM_CGROUP_ID_MAX) | ||
4254 | return -ENOSPC; | ||
4292 | 4255 | ||
4293 | return 0; | 4256 | return 0; |
4294 | } | 4257 | } |
@@ -4310,10 +4273,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
4310 | } | 4273 | } |
4311 | spin_unlock(&memcg->event_list_lock); | 4274 | spin_unlock(&memcg->event_list_lock); |
4312 | 4275 | ||
4313 | vmpressure_cleanup(&memcg->vmpressure); | 4276 | memcg_offline_kmem(memcg); |
4314 | |||
4315 | memcg_deactivate_kmem(memcg); | ||
4316 | |||
4317 | wb_memcg_offline(memcg); | 4277 | wb_memcg_offline(memcg); |
4318 | } | 4278 | } |
4319 | 4279 | ||
@@ -4328,12 +4288,17 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) | |||
4328 | { | 4288 | { |
4329 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 4289 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
4330 | 4290 | ||
4331 | memcg_destroy_kmem(memcg); | ||
4332 | #ifdef CONFIG_INET | ||
4333 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) | 4291 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) |
4334 | static_branch_dec(&memcg_sockets_enabled_key); | 4292 | static_branch_dec(&memcg_sockets_enabled_key); |
4335 | #endif | 4293 | |
4336 | __mem_cgroup_free(memcg); | 4294 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) |
4295 | static_branch_dec(&memcg_sockets_enabled_key); | ||
4296 | |||
4297 | vmpressure_cleanup(&memcg->vmpressure); | ||
4298 | cancel_work_sync(&memcg->high_work); | ||
4299 | mem_cgroup_remove_from_trees(memcg); | ||
4300 | memcg_free_kmem(memcg); | ||
4301 | mem_cgroup_free(memcg); | ||
4337 | } | 4302 | } |
4338 | 4303 | ||
4339 | /** | 4304 | /** |
@@ -5143,6 +5108,59 @@ static int memory_events_show(struct seq_file *m, void *v) | |||
5143 | return 0; | 5108 | return 0; |
5144 | } | 5109 | } |
5145 | 5110 | ||
5111 | static int memory_stat_show(struct seq_file *m, void *v) | ||
5112 | { | ||
5113 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | ||
5114 | int i; | ||
5115 | |||
5116 | /* | ||
5117 | * Provide statistics on the state of the memory subsystem as | ||
5118 | * well as cumulative event counters that show past behavior. | ||
5119 | * | ||
5120 | * This list is ordered following a combination of these gradients: | ||
5121 | * 1) generic big picture -> specifics and details | ||
5122 | * 2) reflecting userspace activity -> reflecting kernel heuristics | ||
5123 | * | ||
5124 | * Current memory state: | ||
5125 | */ | ||
5126 | |||
5127 | seq_printf(m, "anon %llu\n", | ||
5128 | (u64)tree_stat(memcg, MEM_CGROUP_STAT_RSS) * PAGE_SIZE); | ||
5129 | seq_printf(m, "file %llu\n", | ||
5130 | (u64)tree_stat(memcg, MEM_CGROUP_STAT_CACHE) * PAGE_SIZE); | ||
5131 | seq_printf(m, "sock %llu\n", | ||
5132 | (u64)tree_stat(memcg, MEMCG_SOCK) * PAGE_SIZE); | ||
5133 | |||
5134 | seq_printf(m, "file_mapped %llu\n", | ||
5135 | (u64)tree_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED) * | ||
5136 | PAGE_SIZE); | ||
5137 | seq_printf(m, "file_dirty %llu\n", | ||
5138 | (u64)tree_stat(memcg, MEM_CGROUP_STAT_DIRTY) * | ||
5139 | PAGE_SIZE); | ||
5140 | seq_printf(m, "file_writeback %llu\n", | ||
5141 | (u64)tree_stat(memcg, MEM_CGROUP_STAT_WRITEBACK) * | ||
5142 | PAGE_SIZE); | ||
5143 | |||
5144 | for (i = 0; i < NR_LRU_LISTS; i++) { | ||
5145 | struct mem_cgroup *mi; | ||
5146 | unsigned long val = 0; | ||
5147 | |||
5148 | for_each_mem_cgroup_tree(mi, memcg) | ||
5149 | val += mem_cgroup_nr_lru_pages(mi, BIT(i)); | ||
5150 | seq_printf(m, "%s %llu\n", | ||
5151 | mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); | ||
5152 | } | ||
5153 | |||
5154 | /* Accumulated memory events */ | ||
5155 | |||
5156 | seq_printf(m, "pgfault %lu\n", | ||
5157 | tree_events(memcg, MEM_CGROUP_EVENTS_PGFAULT)); | ||
5158 | seq_printf(m, "pgmajfault %lu\n", | ||
5159 | tree_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT)); | ||
5160 | |||
5161 | return 0; | ||
5162 | } | ||
5163 | |||
5146 | static struct cftype memory_files[] = { | 5164 | static struct cftype memory_files[] = { |
5147 | { | 5165 | { |
5148 | .name = "current", | 5166 | .name = "current", |
@@ -5173,6 +5191,11 @@ static struct cftype memory_files[] = { | |||
5173 | .file_offset = offsetof(struct mem_cgroup, events_file), | 5191 | .file_offset = offsetof(struct mem_cgroup, events_file), |
5174 | .seq_show = memory_events_show, | 5192 | .seq_show = memory_events_show, |
5175 | }, | 5193 | }, |
5194 | { | ||
5195 | .name = "stat", | ||
5196 | .flags = CFTYPE_NOT_ON_ROOT, | ||
5197 | .seq_show = memory_stat_show, | ||
5198 | }, | ||
5176 | { } /* terminate */ | 5199 | { } /* terminate */ |
5177 | }; | 5200 | }; |
5178 | 5201 | ||
@@ -5269,7 +5292,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | |||
5269 | if (page->mem_cgroup) | 5292 | if (page->mem_cgroup) |
5270 | goto out; | 5293 | goto out; |
5271 | 5294 | ||
5272 | if (do_memsw_account()) { | 5295 | if (do_swap_account) { |
5273 | swp_entry_t ent = { .val = page_private(page), }; | 5296 | swp_entry_t ent = { .val = page_private(page), }; |
5274 | unsigned short id = lookup_swap_cgroup_id(ent); | 5297 | unsigned short id = lookup_swap_cgroup_id(ent); |
5275 | 5298 | ||
@@ -5504,7 +5527,8 @@ void mem_cgroup_uncharge_list(struct list_head *page_list) | |||
5504 | void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) | 5527 | void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) |
5505 | { | 5528 | { |
5506 | struct mem_cgroup *memcg; | 5529 | struct mem_cgroup *memcg; |
5507 | int isolated; | 5530 | unsigned int nr_pages; |
5531 | bool compound; | ||
5508 | 5532 | ||
5509 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); | 5533 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
5510 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); | 5534 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
@@ -5524,14 +5548,22 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) | |||
5524 | if (!memcg) | 5548 | if (!memcg) |
5525 | return; | 5549 | return; |
5526 | 5550 | ||
5527 | lock_page_lru(oldpage, &isolated); | 5551 | /* Force-charge the new page. The old one will be freed soon */ |
5528 | oldpage->mem_cgroup = NULL; | 5552 | compound = PageTransHuge(newpage); |
5529 | unlock_page_lru(oldpage, isolated); | 5553 | nr_pages = compound ? hpage_nr_pages(newpage) : 1; |
5554 | |||
5555 | page_counter_charge(&memcg->memory, nr_pages); | ||
5556 | if (do_memsw_account()) | ||
5557 | page_counter_charge(&memcg->memsw, nr_pages); | ||
5558 | css_get_many(&memcg->css, nr_pages); | ||
5530 | 5559 | ||
5531 | commit_charge(newpage, memcg, true); | 5560 | commit_charge(newpage, memcg, true); |
5532 | } | ||
5533 | 5561 | ||
5534 | #ifdef CONFIG_INET | 5562 | local_irq_disable(); |
5563 | mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); | ||
5564 | memcg_check_events(memcg, newpage); | ||
5565 | local_irq_enable(); | ||
5566 | } | ||
5535 | 5567 | ||
5536 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); | 5568 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); |
5537 | EXPORT_SYMBOL(memcg_sockets_enabled_key); | 5569 | EXPORT_SYMBOL(memcg_sockets_enabled_key); |
@@ -5558,10 +5590,8 @@ void sock_update_memcg(struct sock *sk) | |||
5558 | memcg = mem_cgroup_from_task(current); | 5590 | memcg = mem_cgroup_from_task(current); |
5559 | if (memcg == root_mem_cgroup) | 5591 | if (memcg == root_mem_cgroup) |
5560 | goto out; | 5592 | goto out; |
5561 | #ifdef CONFIG_MEMCG_KMEM | 5593 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) |
5562 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active) | ||
5563 | goto out; | 5594 | goto out; |
5564 | #endif | ||
5565 | if (css_tryget_online(&memcg->css)) | 5595 | if (css_tryget_online(&memcg->css)) |
5566 | sk->sk_memcg = memcg; | 5596 | sk->sk_memcg = memcg; |
5567 | out: | 5597 | out: |
@@ -5587,24 +5617,24 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
5587 | { | 5617 | { |
5588 | gfp_t gfp_mask = GFP_KERNEL; | 5618 | gfp_t gfp_mask = GFP_KERNEL; |
5589 | 5619 | ||
5590 | #ifdef CONFIG_MEMCG_KMEM | ||
5591 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { | 5620 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { |
5592 | struct page_counter *counter; | 5621 | struct page_counter *fail; |
5593 | 5622 | ||
5594 | if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated, | 5623 | if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { |
5595 | nr_pages, &counter)) { | 5624 | memcg->tcpmem_pressure = 0; |
5596 | memcg->tcp_mem.memory_pressure = 0; | ||
5597 | return true; | 5625 | return true; |
5598 | } | 5626 | } |
5599 | page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages); | 5627 | page_counter_charge(&memcg->tcpmem, nr_pages); |
5600 | memcg->tcp_mem.memory_pressure = 1; | 5628 | memcg->tcpmem_pressure = 1; |
5601 | return false; | 5629 | return false; |
5602 | } | 5630 | } |
5603 | #endif | 5631 | |
5604 | /* Don't block in the packet receive path */ | 5632 | /* Don't block in the packet receive path */ |
5605 | if (in_softirq()) | 5633 | if (in_softirq()) |
5606 | gfp_mask = GFP_NOWAIT; | 5634 | gfp_mask = GFP_NOWAIT; |
5607 | 5635 | ||
5636 | this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages); | ||
5637 | |||
5608 | if (try_charge(memcg, gfp_mask, nr_pages) == 0) | 5638 | if (try_charge(memcg, gfp_mask, nr_pages) == 0) |
5609 | return true; | 5639 | return true; |
5610 | 5640 | ||
@@ -5619,19 +5649,17 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
5619 | */ | 5649 | */ |
5620 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | 5650 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) |
5621 | { | 5651 | { |
5622 | #ifdef CONFIG_MEMCG_KMEM | ||
5623 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { | 5652 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { |
5624 | page_counter_uncharge(&memcg->tcp_mem.memory_allocated, | 5653 | page_counter_uncharge(&memcg->tcpmem, nr_pages); |
5625 | nr_pages); | ||
5626 | return; | 5654 | return; |
5627 | } | 5655 | } |
5628 | #endif | 5656 | |
5657 | this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages); | ||
5658 | |||
5629 | page_counter_uncharge(&memcg->memory, nr_pages); | 5659 | page_counter_uncharge(&memcg->memory, nr_pages); |
5630 | css_put_many(&memcg->css, nr_pages); | 5660 | css_put_many(&memcg->css, nr_pages); |
5631 | } | 5661 | } |
5632 | 5662 | ||
5633 | #endif /* CONFIG_INET */ | ||
5634 | |||
5635 | static int __init cgroup_memory(char *s) | 5663 | static int __init cgroup_memory(char *s) |
5636 | { | 5664 | { |
5637 | char *token; | 5665 | char *token; |
@@ -5641,6 +5669,8 @@ static int __init cgroup_memory(char *s) | |||
5641 | continue; | 5669 | continue; |
5642 | if (!strcmp(token, "nosocket")) | 5670 | if (!strcmp(token, "nosocket")) |
5643 | cgroup_memory_nosocket = true; | 5671 | cgroup_memory_nosocket = true; |
5672 | if (!strcmp(token, "nokmem")) | ||
5673 | cgroup_memory_nokmem = true; | ||
5644 | } | 5674 | } |
5645 | return 0; | 5675 | return 0; |
5646 | } | 5676 | } |
@@ -5730,32 +5760,107 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | |||
5730 | memcg_check_events(memcg, page); | 5760 | memcg_check_events(memcg, page); |
5731 | } | 5761 | } |
5732 | 5762 | ||
5763 | /* | ||
5764 | * mem_cgroup_try_charge_swap - try charging a swap entry | ||
5765 | * @page: page being added to swap | ||
5766 | * @entry: swap entry to charge | ||
5767 | * | ||
5768 | * Try to charge @entry to the memcg that @page belongs to. | ||
5769 | * | ||
5770 | * Returns 0 on success, -ENOMEM on failure. | ||
5771 | */ | ||
5772 | int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) | ||
5773 | { | ||
5774 | struct mem_cgroup *memcg; | ||
5775 | struct page_counter *counter; | ||
5776 | unsigned short oldid; | ||
5777 | |||
5778 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) | ||
5779 | return 0; | ||
5780 | |||
5781 | memcg = page->mem_cgroup; | ||
5782 | |||
5783 | /* Readahead page, never charged */ | ||
5784 | if (!memcg) | ||
5785 | return 0; | ||
5786 | |||
5787 | if (!mem_cgroup_is_root(memcg) && | ||
5788 | !page_counter_try_charge(&memcg->swap, 1, &counter)) | ||
5789 | return -ENOMEM; | ||
5790 | |||
5791 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); | ||
5792 | VM_BUG_ON_PAGE(oldid, page); | ||
5793 | mem_cgroup_swap_statistics(memcg, true); | ||
5794 | |||
5795 | css_get(&memcg->css); | ||
5796 | return 0; | ||
5797 | } | ||
5798 | |||
5733 | /** | 5799 | /** |
5734 | * mem_cgroup_uncharge_swap - uncharge a swap entry | 5800 | * mem_cgroup_uncharge_swap - uncharge a swap entry |
5735 | * @entry: swap entry to uncharge | 5801 | * @entry: swap entry to uncharge |
5736 | * | 5802 | * |
5737 | * Drop the memsw charge associated with @entry. | 5803 | * Drop the swap charge associated with @entry. |
5738 | */ | 5804 | */ |
5739 | void mem_cgroup_uncharge_swap(swp_entry_t entry) | 5805 | void mem_cgroup_uncharge_swap(swp_entry_t entry) |
5740 | { | 5806 | { |
5741 | struct mem_cgroup *memcg; | 5807 | struct mem_cgroup *memcg; |
5742 | unsigned short id; | 5808 | unsigned short id; |
5743 | 5809 | ||
5744 | if (!do_memsw_account()) | 5810 | if (!do_swap_account) |
5745 | return; | 5811 | return; |
5746 | 5812 | ||
5747 | id = swap_cgroup_record(entry, 0); | 5813 | id = swap_cgroup_record(entry, 0); |
5748 | rcu_read_lock(); | 5814 | rcu_read_lock(); |
5749 | memcg = mem_cgroup_from_id(id); | 5815 | memcg = mem_cgroup_from_id(id); |
5750 | if (memcg) { | 5816 | if (memcg) { |
5751 | if (!mem_cgroup_is_root(memcg)) | 5817 | if (!mem_cgroup_is_root(memcg)) { |
5752 | page_counter_uncharge(&memcg->memsw, 1); | 5818 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
5819 | page_counter_uncharge(&memcg->swap, 1); | ||
5820 | else | ||
5821 | page_counter_uncharge(&memcg->memsw, 1); | ||
5822 | } | ||
5753 | mem_cgroup_swap_statistics(memcg, false); | 5823 | mem_cgroup_swap_statistics(memcg, false); |
5754 | css_put(&memcg->css); | 5824 | css_put(&memcg->css); |
5755 | } | 5825 | } |
5756 | rcu_read_unlock(); | 5826 | rcu_read_unlock(); |
5757 | } | 5827 | } |
5758 | 5828 | ||
5829 | long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) | ||
5830 | { | ||
5831 | long nr_swap_pages = get_nr_swap_pages(); | ||
5832 | |||
5833 | if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) | ||
5834 | return nr_swap_pages; | ||
5835 | for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) | ||
5836 | nr_swap_pages = min_t(long, nr_swap_pages, | ||
5837 | READ_ONCE(memcg->swap.limit) - | ||
5838 | page_counter_read(&memcg->swap)); | ||
5839 | return nr_swap_pages; | ||
5840 | } | ||
5841 | |||
5842 | bool mem_cgroup_swap_full(struct page *page) | ||
5843 | { | ||
5844 | struct mem_cgroup *memcg; | ||
5845 | |||
5846 | VM_BUG_ON_PAGE(!PageLocked(page), page); | ||
5847 | |||
5848 | if (vm_swap_full()) | ||
5849 | return true; | ||
5850 | if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) | ||
5851 | return false; | ||
5852 | |||
5853 | memcg = page->mem_cgroup; | ||
5854 | if (!memcg) | ||
5855 | return false; | ||
5856 | |||
5857 | for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) | ||
5858 | if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) | ||
5859 | return true; | ||
5860 | |||
5861 | return false; | ||
5862 | } | ||
5863 | |||
5759 | /* for remember boot option*/ | 5864 | /* for remember boot option*/ |
5760 | #ifdef CONFIG_MEMCG_SWAP_ENABLED | 5865 | #ifdef CONFIG_MEMCG_SWAP_ENABLED |
5761 | static int really_do_swap_account __initdata = 1; | 5866 | static int really_do_swap_account __initdata = 1; |
@@ -5773,6 +5878,63 @@ static int __init enable_swap_account(char *s) | |||
5773 | } | 5878 | } |
5774 | __setup("swapaccount=", enable_swap_account); | 5879 | __setup("swapaccount=", enable_swap_account); |
5775 | 5880 | ||
5881 | static u64 swap_current_read(struct cgroup_subsys_state *css, | ||
5882 | struct cftype *cft) | ||
5883 | { | ||
5884 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | ||
5885 | |||
5886 | return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; | ||
5887 | } | ||
5888 | |||
5889 | static int swap_max_show(struct seq_file *m, void *v) | ||
5890 | { | ||
5891 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | ||
5892 | unsigned long max = READ_ONCE(memcg->swap.limit); | ||
5893 | |||
5894 | if (max == PAGE_COUNTER_MAX) | ||
5895 | seq_puts(m, "max\n"); | ||
5896 | else | ||
5897 | seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); | ||
5898 | |||
5899 | return 0; | ||
5900 | } | ||
5901 | |||
5902 | static ssize_t swap_max_write(struct kernfs_open_file *of, | ||
5903 | char *buf, size_t nbytes, loff_t off) | ||
5904 | { | ||
5905 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | ||
5906 | unsigned long max; | ||
5907 | int err; | ||
5908 | |||
5909 | buf = strstrip(buf); | ||
5910 | err = page_counter_memparse(buf, "max", &max); | ||
5911 | if (err) | ||
5912 | return err; | ||
5913 | |||
5914 | mutex_lock(&memcg_limit_mutex); | ||
5915 | err = page_counter_limit(&memcg->swap, max); | ||
5916 | mutex_unlock(&memcg_limit_mutex); | ||
5917 | if (err) | ||
5918 | return err; | ||
5919 | |||
5920 | return nbytes; | ||
5921 | } | ||
5922 | |||
5923 | static struct cftype swap_files[] = { | ||
5924 | { | ||
5925 | .name = "swap.current", | ||
5926 | .flags = CFTYPE_NOT_ON_ROOT, | ||
5927 | .read_u64 = swap_current_read, | ||
5928 | }, | ||
5929 | { | ||
5930 | .name = "swap.max", | ||
5931 | .flags = CFTYPE_NOT_ON_ROOT, | ||
5932 | .seq_show = swap_max_show, | ||
5933 | .write = swap_max_write, | ||
5934 | }, | ||
5935 | { } /* terminate */ | ||
5936 | }; | ||
5937 | |||
5776 | static struct cftype memsw_cgroup_files[] = { | 5938 | static struct cftype memsw_cgroup_files[] = { |
5777 | { | 5939 | { |
5778 | .name = "memsw.usage_in_bytes", | 5940 | .name = "memsw.usage_in_bytes", |
@@ -5804,6 +5966,8 @@ static int __init mem_cgroup_swap_init(void) | |||
5804 | { | 5966 | { |
5805 | if (!mem_cgroup_disabled() && really_do_swap_account) { | 5967 | if (!mem_cgroup_disabled() && really_do_swap_account) { |
5806 | do_swap_account = 1; | 5968 | do_swap_account = 1; |
5969 | WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, | ||
5970 | swap_files)); | ||
5807 | WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, | 5971 | WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, |
5808 | memsw_cgroup_files)); | 5972 | memsw_cgroup_files)); |
5809 | } | 5973 | } |
diff --git a/mm/memory.c b/mm/memory.c index ff17850a52d9..30991f83d0bf 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2582,7 +2582,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2582 | } | 2582 | } |
2583 | 2583 | ||
2584 | swap_free(entry); | 2584 | swap_free(entry); |
2585 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2585 | if (mem_cgroup_swap_full(page) || |
2586 | (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | ||
2586 | try_to_free_swap(page); | 2587 | try_to_free_swap(page); |
2587 | unlock_page(page); | 2588 | unlock_page(page); |
2588 | if (page != swapcache) { | 2589 | if (page != swapcache) { |
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index e88d071648c2..5d453e58ddbf 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, | |||
194 | goto free_proc_pages; | 194 | goto free_proc_pages; |
195 | } | 195 | } |
196 | 196 | ||
197 | mm = mm_access(task, PTRACE_MODE_ATTACH); | 197 | mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); |
198 | if (!mm || IS_ERR(mm)) { | 198 | if (!mm || IS_ERR(mm)) { |
199 | rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; | 199 | rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; |
200 | /* | 200 | /* |
diff --git a/mm/shmem.c b/mm/shmem.c index b98e1011858c..fa2ceb2d2655 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -912,6 +912,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
912 | if (!swap.val) | 912 | if (!swap.val) |
913 | goto redirty; | 913 | goto redirty; |
914 | 914 | ||
915 | if (mem_cgroup_try_charge_swap(page, swap)) | ||
916 | goto free_swap; | ||
917 | |||
915 | /* | 918 | /* |
916 | * Add inode to shmem_unuse()'s list of swapped-out inodes, | 919 | * Add inode to shmem_unuse()'s list of swapped-out inodes, |
917 | * if it's not already there. Do it now before the page is | 920 | * if it's not already there. Do it now before the page is |
@@ -940,6 +943,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
940 | } | 943 | } |
941 | 944 | ||
942 | mutex_unlock(&shmem_swaplist_mutex); | 945 | mutex_unlock(&shmem_swaplist_mutex); |
946 | free_swap: | ||
943 | swapcache_free(swap); | 947 | swapcache_free(swap); |
944 | redirty: | 948 | redirty: |
945 | set_page_dirty(page); | 949 | set_page_dirty(page); |
@@ -173,7 +173,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
173 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 173 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
174 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 174 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
175 | 175 | ||
176 | #ifdef CONFIG_MEMCG_KMEM | 176 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
177 | /* | 177 | /* |
178 | * Iterate over all memcg caches of the given root cache. The caller must hold | 178 | * Iterate over all memcg caches of the given root cache. The caller must hold |
179 | * slab_mutex. | 179 | * slab_mutex. |
@@ -251,7 +251,7 @@ static __always_inline int memcg_charge_slab(struct page *page, | |||
251 | 251 | ||
252 | extern void slab_init_memcg_params(struct kmem_cache *); | 252 | extern void slab_init_memcg_params(struct kmem_cache *); |
253 | 253 | ||
254 | #else /* !CONFIG_MEMCG_KMEM */ | 254 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
255 | 255 | ||
256 | #define for_each_memcg_cache(iter, root) \ | 256 | #define for_each_memcg_cache(iter, root) \ |
257 | for ((void)(iter), (void)(root); 0; ) | 257 | for ((void)(iter), (void)(root); 0; ) |
@@ -292,7 +292,7 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, | |||
292 | static inline void slab_init_memcg_params(struct kmem_cache *s) | 292 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
293 | { | 293 | { |
294 | } | 294 | } |
295 | #endif /* CONFIG_MEMCG_KMEM */ | 295 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
296 | 296 | ||
297 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | 297 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
298 | { | 298 | { |
diff --git a/mm/slab_common.c b/mm/slab_common.c index e016178063e1..b50aef01ccf7 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -128,7 +128,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | |||
128 | return i; | 128 | return i; |
129 | } | 129 | } |
130 | 130 | ||
131 | #ifdef CONFIG_MEMCG_KMEM | 131 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
132 | void slab_init_memcg_params(struct kmem_cache *s) | 132 | void slab_init_memcg_params(struct kmem_cache *s) |
133 | { | 133 | { |
134 | s->memcg_params.is_root_cache = true; | 134 | s->memcg_params.is_root_cache = true; |
@@ -221,7 +221,7 @@ static inline int init_memcg_params(struct kmem_cache *s, | |||
221 | static inline void destroy_memcg_params(struct kmem_cache *s) | 221 | static inline void destroy_memcg_params(struct kmem_cache *s) |
222 | { | 222 | { |
223 | } | 223 | } |
224 | #endif /* CONFIG_MEMCG_KMEM */ | 224 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * Find a mergeable slab cache | 227 | * Find a mergeable slab cache |
@@ -477,7 +477,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier) | |||
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | #ifdef CONFIG_MEMCG_KMEM | 480 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
481 | /* | 481 | /* |
482 | * memcg_create_kmem_cache - Create a cache for a memory cgroup. | 482 | * memcg_create_kmem_cache - Create a cache for a memory cgroup. |
483 | * @memcg: The memory cgroup the new cache is for. | 483 | * @memcg: The memory cgroup the new cache is for. |
@@ -503,10 +503,10 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
503 | mutex_lock(&slab_mutex); | 503 | mutex_lock(&slab_mutex); |
504 | 504 | ||
505 | /* | 505 | /* |
506 | * The memory cgroup could have been deactivated while the cache | 506 | * The memory cgroup could have been offlined while the cache |
507 | * creation work was pending. | 507 | * creation work was pending. |
508 | */ | 508 | */ |
509 | if (!memcg_kmem_is_active(memcg)) | 509 | if (!memcg_kmem_online(memcg)) |
510 | goto out_unlock; | 510 | goto out_unlock; |
511 | 511 | ||
512 | idx = memcg_cache_id(memcg); | 512 | idx = memcg_cache_id(memcg); |
@@ -689,7 +689,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s, | |||
689 | { | 689 | { |
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | #endif /* CONFIG_MEMCG_KMEM */ | 692 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
693 | 693 | ||
694 | void slab_kmem_cache_release(struct kmem_cache *s) | 694 | void slab_kmem_cache_release(struct kmem_cache *s) |
695 | { | 695 | { |
@@ -1123,7 +1123,7 @@ static int slab_show(struct seq_file *m, void *p) | |||
1123 | return 0; | 1123 | return 0; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | #ifdef CONFIG_MEMCG_KMEM | 1126 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
1127 | int memcg_slab_show(struct seq_file *m, void *p) | 1127 | int memcg_slab_show(struct seq_file *m, void *p) |
1128 | { | 1128 | { |
1129 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); | 1129 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); |
@@ -5207,7 +5207,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
5207 | return -EIO; | 5207 | return -EIO; |
5208 | 5208 | ||
5209 | err = attribute->store(s, buf, len); | 5209 | err = attribute->store(s, buf, len); |
5210 | #ifdef CONFIG_MEMCG_KMEM | 5210 | #ifdef CONFIG_MEMCG |
5211 | if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { | 5211 | if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { |
5212 | struct kmem_cache *c; | 5212 | struct kmem_cache *c; |
5213 | 5213 | ||
@@ -5242,7 +5242,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
5242 | 5242 | ||
5243 | static void memcg_propagate_slab_attrs(struct kmem_cache *s) | 5243 | static void memcg_propagate_slab_attrs(struct kmem_cache *s) |
5244 | { | 5244 | { |
5245 | #ifdef CONFIG_MEMCG_KMEM | 5245 | #ifdef CONFIG_MEMCG |
5246 | int i; | 5246 | int i; |
5247 | char *buffer = NULL; | 5247 | char *buffer = NULL; |
5248 | struct kmem_cache *root_cache; | 5248 | struct kmem_cache *root_cache; |
@@ -5328,7 +5328,7 @@ static struct kset *slab_kset; | |||
5328 | 5328 | ||
5329 | static inline struct kset *cache_kset(struct kmem_cache *s) | 5329 | static inline struct kset *cache_kset(struct kmem_cache *s) |
5330 | { | 5330 | { |
5331 | #ifdef CONFIG_MEMCG_KMEM | 5331 | #ifdef CONFIG_MEMCG |
5332 | if (!is_root_cache(s)) | 5332 | if (!is_root_cache(s)) |
5333 | return s->memcg_params.root_cache->memcg_kset; | 5333 | return s->memcg_params.root_cache->memcg_kset; |
5334 | #endif | 5334 | #endif |
@@ -5405,7 +5405,7 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
5405 | if (err) | 5405 | if (err) |
5406 | goto out_del_kobj; | 5406 | goto out_del_kobj; |
5407 | 5407 | ||
5408 | #ifdef CONFIG_MEMCG_KMEM | 5408 | #ifdef CONFIG_MEMCG |
5409 | if (is_root_cache(s)) { | 5409 | if (is_root_cache(s)) { |
5410 | s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); | 5410 | s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); |
5411 | if (!s->memcg_kset) { | 5411 | if (!s->memcg_kset) { |
@@ -5438,7 +5438,7 @@ void sysfs_slab_remove(struct kmem_cache *s) | |||
5438 | */ | 5438 | */ |
5439 | return; | 5439 | return; |
5440 | 5440 | ||
5441 | #ifdef CONFIG_MEMCG_KMEM | 5441 | #ifdef CONFIG_MEMCG |
5442 | kset_unregister(s->memcg_kset); | 5442 | kset_unregister(s->memcg_kset); |
5443 | #endif | 5443 | #endif |
5444 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | 5444 | kobject_uevent(&s->kobj, KOBJ_REMOVE); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 676ff2991380..69cb2464e7dc 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -170,6 +170,11 @@ int add_to_swap(struct page *page, struct list_head *list) | |||
170 | if (!entry.val) | 170 | if (!entry.val) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | if (mem_cgroup_try_charge_swap(page, entry)) { | ||
174 | swapcache_free(entry); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
173 | if (unlikely(PageTransHuge(page))) | 178 | if (unlikely(PageTransHuge(page))) |
174 | if (unlikely(split_huge_page_to_list(page, list))) { | 179 | if (unlikely(split_huge_page_to_list(page, list))) { |
175 | swapcache_free(entry); | 180 | swapcache_free(entry); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 2bb30aa3a412..c43f654a7b64 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -785,14 +785,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, | |||
785 | count--; | 785 | count--; |
786 | } | 786 | } |
787 | 787 | ||
788 | if (!count) | ||
789 | mem_cgroup_uncharge_swap(entry); | ||
790 | |||
791 | usage = count | has_cache; | 788 | usage = count | has_cache; |
792 | p->swap_map[offset] = usage; | 789 | p->swap_map[offset] = usage; |
793 | 790 | ||
794 | /* free if no reference */ | 791 | /* free if no reference */ |
795 | if (!usage) { | 792 | if (!usage) { |
793 | mem_cgroup_uncharge_swap(entry); | ||
796 | dec_cluster_info_page(p, p->cluster_info, offset); | 794 | dec_cluster_info_page(p, p->cluster_info, offset); |
797 | if (offset < p->lowest_bit) | 795 | if (offset < p->lowest_bit) |
798 | p->lowest_bit = offset; | 796 | p->lowest_bit = offset; |
@@ -1008,7 +1006,7 @@ int free_swap_and_cache(swp_entry_t entry) | |||
1008 | * Also recheck PageSwapCache now page is locked (above). | 1006 | * Also recheck PageSwapCache now page is locked (above). |
1009 | */ | 1007 | */ |
1010 | if (PageSwapCache(page) && !PageWriteback(page) && | 1008 | if (PageSwapCache(page) && !PageWriteback(page) && |
1011 | (!page_mapped(page) || vm_swap_full())) { | 1009 | (!page_mapped(page) || mem_cgroup_swap_full(page))) { |
1012 | delete_from_swap_cache(page); | 1010 | delete_from_swap_cache(page); |
1013 | SetPageDirty(page); | 1011 | SetPageDirty(page); |
1014 | } | 1012 | } |
@@ -476,17 +476,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |||
476 | int res = 0; | 476 | int res = 0; |
477 | unsigned int len; | 477 | unsigned int len; |
478 | struct mm_struct *mm = get_task_mm(task); | 478 | struct mm_struct *mm = get_task_mm(task); |
479 | unsigned long arg_start, arg_end, env_start, env_end; | ||
479 | if (!mm) | 480 | if (!mm) |
480 | goto out; | 481 | goto out; |
481 | if (!mm->arg_end) | 482 | if (!mm->arg_end) |
482 | goto out_mm; /* Shh! No looking before we're done */ | 483 | goto out_mm; /* Shh! No looking before we're done */ |
483 | 484 | ||
484 | len = mm->arg_end - mm->arg_start; | 485 | down_read(&mm->mmap_sem); |
486 | arg_start = mm->arg_start; | ||
487 | arg_end = mm->arg_end; | ||
488 | env_start = mm->env_start; | ||
489 | env_end = mm->env_end; | ||
490 | up_read(&mm->mmap_sem); | ||
491 | |||
492 | len = arg_end - arg_start; | ||
485 | 493 | ||
486 | if (len > buflen) | 494 | if (len > buflen) |
487 | len = buflen; | 495 | len = buflen; |
488 | 496 | ||
489 | res = access_process_vm(task, mm->arg_start, buffer, len, 0); | 497 | res = access_process_vm(task, arg_start, buffer, len, 0); |
490 | 498 | ||
491 | /* | 499 | /* |
492 | * If the nul at the end of args has been overwritten, then | 500 | * If the nul at the end of args has been overwritten, then |
@@ -497,10 +505,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |||
497 | if (len < res) { | 505 | if (len < res) { |
498 | res = len; | 506 | res = len; |
499 | } else { | 507 | } else { |
500 | len = mm->env_end - mm->env_start; | 508 | len = env_end - env_start; |
501 | if (len > buflen - res) | 509 | if (len > buflen - res) |
502 | len = buflen - res; | 510 | len = buflen - res; |
503 | res += access_process_vm(task, mm->env_start, | 511 | res += access_process_vm(task, env_start, |
504 | buffer+res, len, 0); | 512 | buffer+res, len, 0); |
505 | res = strnlen(buffer, res); | 513 | res = strnlen(buffer, res); |
506 | } | 514 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5ac86956ff9d..bd620b65db52 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -411,7 +411,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, | |||
411 | struct shrinker *shrinker; | 411 | struct shrinker *shrinker; |
412 | unsigned long freed = 0; | 412 | unsigned long freed = 0; |
413 | 413 | ||
414 | if (memcg && !memcg_kmem_is_active(memcg)) | 414 | if (memcg && !memcg_kmem_online(memcg)) |
415 | return 0; | 415 | return 0; |
416 | 416 | ||
417 | if (nr_scanned == 0) | 417 | if (nr_scanned == 0) |
@@ -1214,7 +1214,7 @@ cull_mlocked: | |||
1214 | 1214 | ||
1215 | activate_locked: | 1215 | activate_locked: |
1216 | /* Not a candidate for swapping, so reclaim swap space. */ | 1216 | /* Not a candidate for swapping, so reclaim swap space. */ |
1217 | if (PageSwapCache(page) && vm_swap_full()) | 1217 | if (PageSwapCache(page) && mem_cgroup_swap_full(page)) |
1218 | try_to_free_swap(page); | 1218 | try_to_free_swap(page); |
1219 | VM_BUG_ON_PAGE(PageActive(page), page); | 1219 | VM_BUG_ON_PAGE(PageActive(page), page); |
1220 | SetPageActive(page); | 1220 | SetPageActive(page); |
@@ -1966,10 +1966,11 @@ enum scan_balance { | |||
1966 | * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan | 1966 | * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan |
1967 | * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan | 1967 | * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan |
1968 | */ | 1968 | */ |
1969 | static void get_scan_count(struct lruvec *lruvec, int swappiness, | 1969 | static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, |
1970 | struct scan_control *sc, unsigned long *nr, | 1970 | struct scan_control *sc, unsigned long *nr, |
1971 | unsigned long *lru_pages) | 1971 | unsigned long *lru_pages) |
1972 | { | 1972 | { |
1973 | int swappiness = mem_cgroup_swappiness(memcg); | ||
1973 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; | 1974 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
1974 | u64 fraction[2]; | 1975 | u64 fraction[2]; |
1975 | u64 denominator = 0; /* gcc */ | 1976 | u64 denominator = 0; /* gcc */ |
@@ -1996,14 +1997,14 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness, | |||
1996 | if (current_is_kswapd()) { | 1997 | if (current_is_kswapd()) { |
1997 | if (!zone_reclaimable(zone)) | 1998 | if (!zone_reclaimable(zone)) |
1998 | force_scan = true; | 1999 | force_scan = true; |
1999 | if (!mem_cgroup_lruvec_online(lruvec)) | 2000 | if (!mem_cgroup_online(memcg)) |
2000 | force_scan = true; | 2001 | force_scan = true; |
2001 | } | 2002 | } |
2002 | if (!global_reclaim(sc)) | 2003 | if (!global_reclaim(sc)) |
2003 | force_scan = true; | 2004 | force_scan = true; |
2004 | 2005 | ||
2005 | /* If we have no swap space, do not bother scanning anon pages. */ | 2006 | /* If we have no swap space, do not bother scanning anon pages. */ |
2006 | if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { | 2007 | if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { |
2007 | scan_balance = SCAN_FILE; | 2008 | scan_balance = SCAN_FILE; |
2008 | goto out; | 2009 | goto out; |
2009 | } | 2010 | } |
@@ -2193,9 +2194,10 @@ static inline void init_tlb_ubc(void) | |||
2193 | /* | 2194 | /* |
2194 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 2195 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
2195 | */ | 2196 | */ |
2196 | static void shrink_lruvec(struct lruvec *lruvec, int swappiness, | 2197 | static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg, |
2197 | struct scan_control *sc, unsigned long *lru_pages) | 2198 | struct scan_control *sc, unsigned long *lru_pages) |
2198 | { | 2199 | { |
2200 | struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); | ||
2199 | unsigned long nr[NR_LRU_LISTS]; | 2201 | unsigned long nr[NR_LRU_LISTS]; |
2200 | unsigned long targets[NR_LRU_LISTS]; | 2202 | unsigned long targets[NR_LRU_LISTS]; |
2201 | unsigned long nr_to_scan; | 2203 | unsigned long nr_to_scan; |
@@ -2205,7 +2207,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness, | |||
2205 | struct blk_plug plug; | 2207 | struct blk_plug plug; |
2206 | bool scan_adjusted; | 2208 | bool scan_adjusted; |
2207 | 2209 | ||
2208 | get_scan_count(lruvec, swappiness, sc, nr, lru_pages); | 2210 | get_scan_count(lruvec, memcg, sc, nr, lru_pages); |
2209 | 2211 | ||
2210 | /* Record the original scan target for proportional adjustments later */ | 2212 | /* Record the original scan target for proportional adjustments later */ |
2211 | memcpy(targets, nr, sizeof(nr)); | 2213 | memcpy(targets, nr, sizeof(nr)); |
@@ -2409,8 +2411,6 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, | |||
2409 | unsigned long lru_pages; | 2411 | unsigned long lru_pages; |
2410 | unsigned long reclaimed; | 2412 | unsigned long reclaimed; |
2411 | unsigned long scanned; | 2413 | unsigned long scanned; |
2412 | struct lruvec *lruvec; | ||
2413 | int swappiness; | ||
2414 | 2414 | ||
2415 | if (mem_cgroup_low(root, memcg)) { | 2415 | if (mem_cgroup_low(root, memcg)) { |
2416 | if (!sc->may_thrash) | 2416 | if (!sc->may_thrash) |
@@ -2418,12 +2418,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, | |||
2418 | mem_cgroup_events(memcg, MEMCG_LOW, 1); | 2418 | mem_cgroup_events(memcg, MEMCG_LOW, 1); |
2419 | } | 2419 | } |
2420 | 2420 | ||
2421 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); | ||
2422 | swappiness = mem_cgroup_swappiness(memcg); | ||
2423 | reclaimed = sc->nr_reclaimed; | 2421 | reclaimed = sc->nr_reclaimed; |
2424 | scanned = sc->nr_scanned; | 2422 | scanned = sc->nr_scanned; |
2425 | 2423 | ||
2426 | shrink_lruvec(lruvec, swappiness, sc, &lru_pages); | 2424 | shrink_zone_memcg(zone, memcg, sc, &lru_pages); |
2427 | zone_lru_pages += lru_pages; | 2425 | zone_lru_pages += lru_pages; |
2428 | 2426 | ||
2429 | if (memcg && is_classzone) | 2427 | if (memcg && is_classzone) |
@@ -2893,8 +2891,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, | |||
2893 | .may_unmap = 1, | 2891 | .may_unmap = 1, |
2894 | .may_swap = !noswap, | 2892 | .may_swap = !noswap, |
2895 | }; | 2893 | }; |
2896 | struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); | ||
2897 | int swappiness = mem_cgroup_swappiness(memcg); | ||
2898 | unsigned long lru_pages; | 2894 | unsigned long lru_pages; |
2899 | 2895 | ||
2900 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2896 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
@@ -2911,7 +2907,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, | |||
2911 | * will pick up pages from other mem cgroup's as well. We hack | 2907 | * will pick up pages from other mem cgroup's as well. We hack |
2912 | * the priority and make it zero. | 2908 | * the priority and make it zero. |
2913 | */ | 2909 | */ |
2914 | shrink_lruvec(lruvec, swappiness, &sc, &lru_pages); | 2910 | shrink_zone_memcg(zone, memcg, &sc, &lru_pages); |
2915 | 2911 | ||
2916 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 2912 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
2917 | 2913 | ||
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e7414cec220b..2d7c4c11fc63 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -309,7 +309,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle) | |||
309 | 309 | ||
310 | static void record_obj(unsigned long handle, unsigned long obj) | 310 | static void record_obj(unsigned long handle, unsigned long obj) |
311 | { | 311 | { |
312 | *(unsigned long *)handle = obj; | 312 | /* |
313 | * lsb of @obj represents handle lock while other bits | ||
314 | * represent object value the handle is pointing so | ||
315 | * updating shouldn't do store tearing. | ||
316 | */ | ||
317 | WRITE_ONCE(*(unsigned long *)handle, obj); | ||
313 | } | 318 | } |
314 | 319 | ||
315 | /* zpool driver */ | 320 | /* zpool driver */ |
@@ -1635,6 +1640,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, | |||
1635 | free_obj = obj_malloc(d_page, class, handle); | 1640 | free_obj = obj_malloc(d_page, class, handle); |
1636 | zs_object_copy(free_obj, used_obj, class); | 1641 | zs_object_copy(free_obj, used_obj, class); |
1637 | index++; | 1642 | index++; |
1643 | /* | ||
1644 | * record_obj updates handle's value to free_obj and it will | ||
1645 | * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which | ||
1646 | * breaks synchronization using pin_tag(e,g, zs_free) so | ||
1647 | * let's keep the lock bit. | ||
1648 | */ | ||
1649 | free_obj |= BIT(HANDLE_PIN_BIT); | ||
1638 | record_obj(handle, free_obj); | 1650 | record_obj(handle, free_obj); |
1639 | unpin_tag(handle); | 1651 | unpin_tag(handle); |
1640 | obj_free(pool, class, used_obj); | 1652 | obj_free(pool, class, used_obj); |
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index c29809f765dc..62c049b647e9 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -56,7 +56,6 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o | |||
56 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o | 56 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o |
57 | obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o | 57 | obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o |
58 | obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o | 58 | obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o |
59 | obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o | ||
60 | obj-$(CONFIG_NETLABEL) += cipso_ipv4.o | 59 | obj-$(CONFIG_NETLABEL) += cipso_ipv4.o |
61 | 60 | ||
62 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ | 61 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 46ce410703b1..4d367b4139a3 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <net/cipso_ipv4.h> | 24 | #include <net/cipso_ipv4.h> |
25 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
26 | #include <net/ping.h> | 26 | #include <net/ping.h> |
27 | #include <net/tcp_memcontrol.h> | ||
28 | 27 | ||
29 | static int zero; | 28 | static int zero; |
30 | static int one = 1; | 29 | static int one = 1; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c7d1fb50f381..5ced3e4013e3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -73,7 +73,6 @@ | |||
73 | #include <net/timewait_sock.h> | 73 | #include <net/timewait_sock.h> |
74 | #include <net/xfrm.h> | 74 | #include <net/xfrm.h> |
75 | #include <net/secure_seq.h> | 75 | #include <net/secure_seq.h> |
76 | #include <net/tcp_memcontrol.h> | ||
77 | #include <net/busy_poll.h> | 76 | #include <net/busy_poll.h> |
78 | 77 | ||
79 | #include <linux/inet.h> | 78 | #include <linux/inet.h> |
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c deleted file mode 100644 index 18bc7f745e9c..000000000000 --- a/net/ipv4/tcp_memcontrol.c +++ /dev/null | |||
@@ -1,200 +0,0 @@ | |||
1 | #include <net/tcp.h> | ||
2 | #include <net/tcp_memcontrol.h> | ||
3 | #include <net/sock.h> | ||
4 | #include <net/ip.h> | ||
5 | #include <linux/nsproxy.h> | ||
6 | #include <linux/memcontrol.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | ||
10 | { | ||
11 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | ||
12 | struct page_counter *counter_parent = NULL; | ||
13 | /* | ||
14 | * The root cgroup does not use page_counters, but rather, | ||
15 | * rely on the data already collected by the network | ||
16 | * subsystem | ||
17 | */ | ||
18 | if (memcg == root_mem_cgroup) | ||
19 | return 0; | ||
20 | |||
21 | memcg->tcp_mem.memory_pressure = 0; | ||
22 | |||
23 | if (parent) | ||
24 | counter_parent = &parent->tcp_mem.memory_allocated; | ||
25 | |||
26 | page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent); | ||
27 | |||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | void tcp_destroy_cgroup(struct mem_cgroup *memcg) | ||
32 | { | ||
33 | if (memcg == root_mem_cgroup) | ||
34 | return; | ||
35 | |||
36 | if (memcg->tcp_mem.active) | ||
37 | static_branch_dec(&memcg_sockets_enabled_key); | ||
38 | } | ||
39 | |||
40 | static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) | ||
41 | { | ||
42 | int ret; | ||
43 | |||
44 | if (memcg == root_mem_cgroup) | ||
45 | return -EINVAL; | ||
46 | |||
47 | ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages); | ||
48 | if (ret) | ||
49 | return ret; | ||
50 | |||
51 | if (!memcg->tcp_mem.active) { | ||
52 | /* | ||
53 | * The active flag needs to be written after the static_key | ||
54 | * update. This is what guarantees that the socket activation | ||
55 | * function is the last one to run. See sock_update_memcg() for | ||
56 | * details, and note that we don't mark any socket as belonging | ||
57 | * to this memcg until that flag is up. | ||
58 | * | ||
59 | * We need to do this, because static_keys will span multiple | ||
60 | * sites, but we can't control their order. If we mark a socket | ||
61 | * as accounted, but the accounting functions are not patched in | ||
62 | * yet, we'll lose accounting. | ||
63 | * | ||
64 | * We never race with the readers in sock_update_memcg(), | ||
65 | * because when this value change, the code to process it is not | ||
66 | * patched in yet. | ||
67 | */ | ||
68 | static_branch_inc(&memcg_sockets_enabled_key); | ||
69 | memcg->tcp_mem.active = true; | ||
70 | } | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | enum { | ||
76 | RES_USAGE, | ||
77 | RES_LIMIT, | ||
78 | RES_MAX_USAGE, | ||
79 | RES_FAILCNT, | ||
80 | }; | ||
81 | |||
82 | static DEFINE_MUTEX(tcp_limit_mutex); | ||
83 | |||
84 | static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, | ||
85 | char *buf, size_t nbytes, loff_t off) | ||
86 | { | ||
87 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | ||
88 | unsigned long nr_pages; | ||
89 | int ret = 0; | ||
90 | |||
91 | buf = strstrip(buf); | ||
92 | |||
93 | switch (of_cft(of)->private) { | ||
94 | case RES_LIMIT: | ||
95 | /* see memcontrol.c */ | ||
96 | ret = page_counter_memparse(buf, "-1", &nr_pages); | ||
97 | if (ret) | ||
98 | break; | ||
99 | mutex_lock(&tcp_limit_mutex); | ||
100 | ret = tcp_update_limit(memcg, nr_pages); | ||
101 | mutex_unlock(&tcp_limit_mutex); | ||
102 | break; | ||
103 | default: | ||
104 | ret = -EINVAL; | ||
105 | break; | ||
106 | } | ||
107 | return ret ?: nbytes; | ||
108 | } | ||
109 | |||
110 | static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) | ||
111 | { | ||
112 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | ||
113 | u64 val; | ||
114 | |||
115 | switch (cft->private) { | ||
116 | case RES_LIMIT: | ||
117 | if (memcg == root_mem_cgroup) | ||
118 | val = PAGE_COUNTER_MAX; | ||
119 | else | ||
120 | val = memcg->tcp_mem.memory_allocated.limit; | ||
121 | val *= PAGE_SIZE; | ||
122 | break; | ||
123 | case RES_USAGE: | ||
124 | if (memcg == root_mem_cgroup) | ||
125 | val = atomic_long_read(&tcp_memory_allocated); | ||
126 | else | ||
127 | val = page_counter_read(&memcg->tcp_mem.memory_allocated); | ||
128 | val *= PAGE_SIZE; | ||
129 | break; | ||
130 | case RES_FAILCNT: | ||
131 | if (memcg == root_mem_cgroup) | ||
132 | return 0; | ||
133 | val = memcg->tcp_mem.memory_allocated.failcnt; | ||
134 | break; | ||
135 | case RES_MAX_USAGE: | ||
136 | if (memcg == root_mem_cgroup) | ||
137 | return 0; | ||
138 | val = memcg->tcp_mem.memory_allocated.watermark; | ||
139 | val *= PAGE_SIZE; | ||
140 | break; | ||
141 | default: | ||
142 | BUG(); | ||
143 | } | ||
144 | return val; | ||
145 | } | ||
146 | |||
147 | static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of, | ||
148 | char *buf, size_t nbytes, loff_t off) | ||
149 | { | ||
150 | struct mem_cgroup *memcg; | ||
151 | |||
152 | memcg = mem_cgroup_from_css(of_css(of)); | ||
153 | if (memcg == root_mem_cgroup) | ||
154 | return nbytes; | ||
155 | |||
156 | switch (of_cft(of)->private) { | ||
157 | case RES_MAX_USAGE: | ||
158 | page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated); | ||
159 | break; | ||
160 | case RES_FAILCNT: | ||
161 | memcg->tcp_mem.memory_allocated.failcnt = 0; | ||
162 | break; | ||
163 | } | ||
164 | |||
165 | return nbytes; | ||
166 | } | ||
167 | |||
168 | static struct cftype tcp_files[] = { | ||
169 | { | ||
170 | .name = "kmem.tcp.limit_in_bytes", | ||
171 | .write = tcp_cgroup_write, | ||
172 | .read_u64 = tcp_cgroup_read, | ||
173 | .private = RES_LIMIT, | ||
174 | }, | ||
175 | { | ||
176 | .name = "kmem.tcp.usage_in_bytes", | ||
177 | .read_u64 = tcp_cgroup_read, | ||
178 | .private = RES_USAGE, | ||
179 | }, | ||
180 | { | ||
181 | .name = "kmem.tcp.failcnt", | ||
182 | .private = RES_FAILCNT, | ||
183 | .write = tcp_cgroup_reset, | ||
184 | .read_u64 = tcp_cgroup_read, | ||
185 | }, | ||
186 | { | ||
187 | .name = "kmem.tcp.max_usage_in_bytes", | ||
188 | .private = RES_MAX_USAGE, | ||
189 | .write = tcp_cgroup_reset, | ||
190 | .read_u64 = tcp_cgroup_read, | ||
191 | }, | ||
192 | { } /* terminate */ | ||
193 | }; | ||
194 | |||
195 | static int __init tcp_memcontrol_init(void) | ||
196 | { | ||
197 | WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, tcp_files)); | ||
198 | return 0; | ||
199 | } | ||
200 | __initcall(tcp_memcontrol_init); | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4ad8edb46f7c..006396e31cb0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -61,7 +61,6 @@ | |||
61 | #include <net/timewait_sock.h> | 61 | #include <net/timewait_sock.h> |
62 | #include <net/inet_common.h> | 62 | #include <net/inet_common.h> |
63 | #include <net/secure_seq.h> | 63 | #include <net/secure_seq.h> |
64 | #include <net/tcp_memcontrol.h> | ||
65 | #include <net/busy_poll.h> | 64 | #include <net/busy_poll.h> |
66 | 65 | ||
67 | #include <linux/proc_fs.h> | 66 | #include <linux/proc_fs.h> |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index abbdff03ce92..3e24d0ddb51b 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -91,7 +91,7 @@ static const struct file_operations reset_ops = { | |||
91 | }; | 91 | }; |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { | 94 | static const char *hw_flag_names[] = { |
95 | #define FLAG(F) [IEEE80211_HW_##F] = #F | 95 | #define FLAG(F) [IEEE80211_HW_##F] = #F |
96 | FLAG(HAS_RATE_CONTROL), | 96 | FLAG(HAS_RATE_CONTROL), |
97 | FLAG(RX_INCLUDES_FCS), | 97 | FLAG(RX_INCLUDES_FCS), |
@@ -126,9 +126,6 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { | |||
126 | FLAG(SUPPORTS_AMSDU_IN_AMPDU), | 126 | FLAG(SUPPORTS_AMSDU_IN_AMPDU), |
127 | FLAG(BEACON_TX_STATUS), | 127 | FLAG(BEACON_TX_STATUS), |
128 | FLAG(NEEDS_UNIQUE_STA_ADDR), | 128 | FLAG(NEEDS_UNIQUE_STA_ADDR), |
129 | |||
130 | /* keep last for the build bug below */ | ||
131 | (void *)0x1 | ||
132 | #undef FLAG | 129 | #undef FLAG |
133 | }; | 130 | }; |
134 | 131 | ||
@@ -148,7 +145,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, | |||
148 | /* fail compilation if somebody adds or removes | 145 | /* fail compilation if somebody adds or removes |
149 | * a flag without updating the name array above | 146 | * a flag without updating the name array above |
150 | */ | 147 | */ |
151 | BUILD_BUG_ON(hw_flag_names[NUM_IEEE80211_HW_FLAGS] != (void *)0x1); | 148 | BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); |
152 | 149 | ||
153 | for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { | 150 | for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { |
154 | if (test_bit(i, local->hw.flags)) | 151 | if (test_bit(i, local->hw.flags)) |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 39d6bb18ce76..2edbcadb3d7f 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -130,6 +130,12 @@ _c_flags += $(if $(patsubst n%,, \ | |||
130 | $(CFLAGS_KASAN)) | 130 | $(CFLAGS_KASAN)) |
131 | endif | 131 | endif |
132 | 132 | ||
133 | ifeq ($(CONFIG_UBSAN),y) | ||
134 | _c_flags += $(if $(patsubst n%,, \ | ||
135 | $(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \ | ||
136 | $(CFLAGS_UBSAN)) | ||
137 | endif | ||
138 | |||
133 | # If building the kernel in a separate objtree expand all occurrences | 139 | # If building the kernel in a separate objtree expand all occurrences |
134 | # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). | 140 | # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). |
135 | 141 | ||
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan new file mode 100644 index 000000000000..8ab68679cfb5 --- /dev/null +++ b/scripts/Makefile.ubsan | |||
@@ -0,0 +1,17 @@ | |||
1 | ifdef CONFIG_UBSAN | ||
2 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift) | ||
3 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero) | ||
4 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable) | ||
5 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound) | ||
6 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) | ||
7 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) | ||
8 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) | ||
9 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) | ||
10 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=returns-nonnull-attribute) | ||
11 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool) | ||
12 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum) | ||
13 | |||
14 | ifdef CONFIG_UBSAN_ALIGNMENT | ||
15 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) | ||
16 | endif | ||
17 | endif | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c7bf1aa2eeb3..0147c91fa549 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -433,6 +433,28 @@ our @typeList = ( | |||
433 | qr{${Ident}_handler_fn}, | 433 | qr{${Ident}_handler_fn}, |
434 | @typeListMisordered, | 434 | @typeListMisordered, |
435 | ); | 435 | ); |
436 | |||
437 | our $C90_int_types = qr{(?x: | ||
438 | long\s+long\s+int\s+(?:un)?signed| | ||
439 | long\s+long\s+(?:un)?signed\s+int| | ||
440 | long\s+long\s+(?:un)?signed| | ||
441 | (?:(?:un)?signed\s+)?long\s+long\s+int| | ||
442 | (?:(?:un)?signed\s+)?long\s+long| | ||
443 | int\s+long\s+long\s+(?:un)?signed| | ||
444 | int\s+(?:(?:un)?signed\s+)?long\s+long| | ||
445 | |||
446 | long\s+int\s+(?:un)?signed| | ||
447 | long\s+(?:un)?signed\s+int| | ||
448 | long\s+(?:un)?signed| | ||
449 | (?:(?:un)?signed\s+)?long\s+int| | ||
450 | (?:(?:un)?signed\s+)?long| | ||
451 | int\s+long\s+(?:un)?signed| | ||
452 | int\s+(?:(?:un)?signed\s+)?long| | ||
453 | |||
454 | int\s+(?:un)?signed| | ||
455 | (?:(?:un)?signed\s+)?int | ||
456 | )}; | ||
457 | |||
436 | our @typeListFile = (); | 458 | our @typeListFile = (); |
437 | our @typeListWithAttr = ( | 459 | our @typeListWithAttr = ( |
438 | @typeList, | 460 | @typeList, |
@@ -4517,7 +4539,7 @@ sub process { | |||
4517 | #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n"; | 4539 | #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n"; |
4518 | 4540 | ||
4519 | $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/); | 4541 | $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/); |
4520 | $has_arg_concat = 1 if ($ctx =~ /\#\#/); | 4542 | $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/); |
4521 | 4543 | ||
4522 | $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//; | 4544 | $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//; |
4523 | $dstat =~ s/$;//g; | 4545 | $dstat =~ s/$;//g; |
@@ -4528,7 +4550,7 @@ sub process { | |||
4528 | # Flatten any parentheses and braces | 4550 | # Flatten any parentheses and braces |
4529 | while ($dstat =~ s/\([^\(\)]*\)/1/ || | 4551 | while ($dstat =~ s/\([^\(\)]*\)/1/ || |
4530 | $dstat =~ s/\{[^\{\}]*\}/1/ || | 4552 | $dstat =~ s/\{[^\{\}]*\}/1/ || |
4531 | $dstat =~ s/\[[^\[\]]*\]/1/) | 4553 | $dstat =~ s/.\[[^\[\]]*\]/1/) |
4532 | { | 4554 | { |
4533 | } | 4555 | } |
4534 | 4556 | ||
@@ -4548,7 +4570,8 @@ sub process { | |||
4548 | union| | 4570 | union| |
4549 | struct| | 4571 | struct| |
4550 | \.$Ident\s*=\s*| | 4572 | \.$Ident\s*=\s*| |
4551 | ^\"|\"$ | 4573 | ^\"|\"$| |
4574 | ^\[ | ||
4552 | }x; | 4575 | }x; |
4553 | #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n"; | 4576 | #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n"; |
4554 | if ($dstat ne '' && | 4577 | if ($dstat ne '' && |
@@ -5272,6 +5295,26 @@ sub process { | |||
5272 | } | 5295 | } |
5273 | } | 5296 | } |
5274 | 5297 | ||
5298 | # check for cast of C90 native int or longer types constants | ||
5299 | if ($line =~ /(\(\s*$C90_int_types\s*\)\s*)($Constant)\b/) { | ||
5300 | my $cast = $1; | ||
5301 | my $const = $2; | ||
5302 | if (WARN("TYPECAST_INT_CONSTANT", | ||
5303 | "Unnecessary typecast of c90 int constant\n" . $herecurr) && | ||
5304 | $fix) { | ||
5305 | my $suffix = ""; | ||
5306 | my $newconst = $const; | ||
5307 | $newconst =~ s/${Int_type}$//; | ||
5308 | $suffix .= 'U' if ($cast =~ /\bunsigned\b/); | ||
5309 | if ($cast =~ /\blong\s+long\b/) { | ||
5310 | $suffix .= 'LL'; | ||
5311 | } elsif ($cast =~ /\blong\b/) { | ||
5312 | $suffix .= 'L'; | ||
5313 | } | ||
5314 | $fixed[$fixlinenr] =~ s/\Q$cast\E$const\b/$newconst$suffix/; | ||
5315 | } | ||
5316 | } | ||
5317 | |||
5275 | # check for sizeof(&) | 5318 | # check for sizeof(&) |
5276 | if ($line =~ /\bsizeof\s*\(\s*\&/) { | 5319 | if ($line =~ /\bsizeof\s*\(\s*\&/) { |
5277 | WARN("SIZEOF_ADDRESS", | 5320 | WARN("SIZEOF_ADDRESS", |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index cab641a12dd5..1873421f2305 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -16,7 +16,9 @@ my $P = $0; | |||
16 | my $V = '0.26'; | 16 | my $V = '0.26'; |
17 | 17 | ||
18 | use Getopt::Long qw(:config no_auto_abbrev); | 18 | use Getopt::Long qw(:config no_auto_abbrev); |
19 | use Cwd; | ||
19 | 20 | ||
21 | my $cur_path = fastgetcwd() . '/'; | ||
20 | my $lk_path = "./"; | 22 | my $lk_path = "./"; |
21 | my $email = 1; | 23 | my $email = 1; |
22 | my $email_usename = 1; | 24 | my $email_usename = 1; |
@@ -429,6 +431,8 @@ foreach my $file (@ARGV) { | |||
429 | } | 431 | } |
430 | } | 432 | } |
431 | if ($from_filename) { | 433 | if ($from_filename) { |
434 | $file =~ s/^\Q${cur_path}\E//; #strip any absolute path | ||
435 | $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree | ||
432 | push(@files, $file); | 436 | push(@files, $file); |
433 | if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) { | 437 | if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) { |
434 | open(my $f, '<', $file) | 438 | open(my $f, '<', $file) |
diff --git a/security/commoncap.c b/security/commoncap.c index 1832cf701c3d..48071ed7c445 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -137,12 +137,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode) | |||
137 | { | 137 | { |
138 | int ret = 0; | 138 | int ret = 0; |
139 | const struct cred *cred, *child_cred; | 139 | const struct cred *cred, *child_cred; |
140 | const kernel_cap_t *caller_caps; | ||
140 | 141 | ||
141 | rcu_read_lock(); | 142 | rcu_read_lock(); |
142 | cred = current_cred(); | 143 | cred = current_cred(); |
143 | child_cred = __task_cred(child); | 144 | child_cred = __task_cred(child); |
145 | if (mode & PTRACE_MODE_FSCREDS) | ||
146 | caller_caps = &cred->cap_effective; | ||
147 | else | ||
148 | caller_caps = &cred->cap_permitted; | ||
144 | if (cred->user_ns == child_cred->user_ns && | 149 | if (cred->user_ns == child_cred->user_ns && |
145 | cap_issubset(child_cred->cap_permitted, cred->cap_permitted)) | 150 | cap_issubset(child_cred->cap_permitted, *caller_caps)) |
146 | goto out; | 151 | goto out; |
147 | if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE)) | 152 | if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE)) |
148 | goto out; | 153 | goto out; |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8d85435a45d7..2d6e9bdea398 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead, | |||
398 | */ | 398 | */ |
399 | static inline unsigned int smk_ptrace_mode(unsigned int mode) | 399 | static inline unsigned int smk_ptrace_mode(unsigned int mode) |
400 | { | 400 | { |
401 | switch (mode) { | 401 | if (mode & PTRACE_MODE_ATTACH) |
402 | case PTRACE_MODE_READ: | ||
403 | return MAY_READ; | ||
404 | case PTRACE_MODE_ATTACH: | ||
405 | return MAY_READWRITE; | 402 | return MAY_READWRITE; |
406 | } | 403 | if (mode & PTRACE_MODE_READ) |
404 | return MAY_READ; | ||
407 | 405 | ||
408 | return 0; | 406 | return 0; |
409 | } | 407 | } |
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index d3c19c970a06..cb6ed10816d4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
281 | int rc = 0; | 281 | int rc = 0; |
282 | 282 | ||
283 | /* require ptrace target be a child of ptracer on attach */ | 283 | /* require ptrace target be a child of ptracer on attach */ |
284 | if (mode == PTRACE_MODE_ATTACH) { | 284 | if (mode & PTRACE_MODE_ATTACH) { |
285 | switch (ptrace_scope) { | 285 | switch (ptrace_scope) { |
286 | case YAMA_SCOPE_DISABLED: | 286 | case YAMA_SCOPE_DISABLED: |
287 | /* No additional restrictions. */ | 287 | /* No additional restrictions. */ |
@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
307 | } | 307 | } |
308 | } | 308 | } |
309 | 309 | ||
310 | if (rc) { | 310 | if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) { |
311 | printk_ratelimited(KERN_NOTICE | 311 | printk_ratelimited(KERN_NOTICE |
312 | "ptrace of pid %d was attempted by: %s (pid %d)\n", | 312 | "ptrace of pid %d was attempted by: %s (pid %d)\n", |
313 | child->pid, current->comm, current->pid); | 313 | child->pid, current->comm, current->pid); |